Commit e8d218c1 by John Jarvis

fixing merge conflicts

parents 1280542e bc23df7c
...@@ -29,3 +29,5 @@ Ker Ruben Ramos <xdiscent@gmail.com> ...@@ -29,3 +29,5 @@ Ker Ruben Ramos <xdiscent@gmail.com>
Fred Smith <derf@edx.org> Fred Smith <derf@edx.org>
Wang Peifeng <pku9104038@hotmail.com> Wang Peifeng <pku9104038@hotmail.com>
Ray Hooker <ray.hooker@gmail.com> Ray Hooker <ray.hooker@gmail.com>
David Pollack <david@sologourmand.com>
Rodolphe Quiedeville <rodolphe@quiedeville.org>
- Role: Edxapp
- Turn on code sandboxing by default and allow the jailed code to be able to write
files to the tmp directory created for it by codejail.
- Role: Edxapp
- The repo.txt requirements file is no longer being processed in anyway. This file was removed from edxplatform
via pull #3487(https://github.com/edx/edx-platform/pull/3487)
- Update CMS_HOSTNAME default to allow any hostname that starts with `studio` along with `prod-studio` or `stage-studio`.
- Start a change log to keep track of backwards incompatible changes and deprecations.
...@@ -26,9 +26,9 @@ Private Cloud with hosts for the core edX services. This template ...@@ -26,9 +26,9 @@ Private Cloud with hosts for the core edX services. This template
will build quite a number of AWS resources that cost money, so please will build quite a number of AWS resources that cost money, so please
consider this before you start. consider this before you start.
The configuration phase is manged by [Ansible](http://ansible.cc/). The configuration phase is managed by [Ansible](http://ansible.com/).
We have provided a number of playbooks that will configure each of We have provided a number of playbooks that will configure each of
the edX service. the edX services.
This project is a re-write of the current edX provisioning and This project is a re-write of the current edX provisioning and
configuration tools, we will be migrating features to this project configuration tools, we will be migrating features to this project
...@@ -36,3 +36,5 @@ over time, so expect frequent changes. ...@@ -36,3 +36,5 @@ over time, so expect frequent changes.
For more information including installation instruction please see the [Configuration Wiki](https://github.com/edx/configuration/wiki). For more information including installation instruction please see the [Configuration Wiki](https://github.com/edx/configuration/wiki).
For info on any large recent changes please see the [change log](https://github.com/edx/configuration/blob/master/CHANGELOG.md).
#
# Overview:
# This play needs to be run per environment-deployment and you will need to
# provide the boto environment and vpc_id as arguments
#
# ansible-playbook -i 'localhost,' ./vpc-migrate-xqwatcher-edge-stage.yml \
# -e 'profile=edge vpc_id=vpc-416f9b24'
#
# Caveats
#
# - This requires ansible 1.6
# - Required the following branch of Ansible /e0d/add-instance-profile from
# https://github.com/e0d/ansible.git
# - This play isn't full idempotent because of and ec2 module update issue
# with ASGs. This can be worked around by deleting the ASG and re-running
# the play
# - The instance_profile_name will need to be created in advance as there
# isn't a way to do so from ansible.
#
# Prequisities:
# Create a iam ec2 role
#
- name: Add resources for the XQWatcher
hosts: localhost
connection: local
gather_facts: False
tasks:
# ignore_error is used here because this module is not idempotent
# If tags already exist, the task will fail with the following message
# Tags already exists in subnet
- name: Update subnet tags
ec2_tag:
resource: "{{ item }}"
region: "{{ ec2_region }}"
state: present
tags:
Name: "{{ edp }}-subnet"
play: xqwatcher
immutable_metadata: "{'purpose':'{{ environment }}-{{ deployment }}-internal-{{ play }}','target':'ec2'}"
with_items: subnets
ignore_errors: True
# Fail intermittantly with the following error:
# The specified rule does not exist in this security group
- name: Create security group
ec2_group:
profile: "{{ profile }}"
description: "Open up SSH access"
name: "{{ security_group }}"
vpc_id: "{{ vpc_id }}"
region: "{{ ec2_region }}"
rules:
- proto: tcp
from_port: "{{ sec_group_ingress_from_port }}"
to_port: "{{ sec_group_ingress_to_port }}"
cidr_ip: "{{ item }}"
with_items: sec_group_ingress_cidrs
register: created_sec_group
ignore_errors: True
- name: debug
debug:
msg: "Registered created_sec_group: {{ created_sec_group }}"
# instance_profile_name was added by me in my fork
- name: Create the launch configuration
ec2_lc:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ lc_name }}"
image_id: "{{ lc_ami }}"
key_name: "{{ key_name }}"
security_groups: "{{ created_sec_group.results[0].group_id }}"
instance_type: "{{ instance_type }}"
instance_profile_name: "{{ instance_profile_name }}"
volumes:
- device_name: "/dev/sda1"
volume_size: "{{ instance_volume_size }}"
- name: Create ASG
ec2_asg:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ asg_name }}"
launch_config_name: "{{ lc_name }}"
min_size: 0
max_size: 0
desired_capacity: 0
vpc_zone_identifier: "{{ subnets|join(',') }}"
instance_tags:
Name: "{{ env }}-{{ deployment }}-{{ play }}"
autostack: "true"
environment: "{{ env }}"
deployment: "{{ deployment }}"
play: "{{ play }}"
services: "{{ play }}"
register: asg
- name: debug
debug:
msg: "DEBUG: {{ asg }}"
- name: Create scale up policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleUpPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: 1
min_adjustment_step: 1
cooldown: 60
register: scale_up_policy
tags:
- foo
- name: debug
debug:
msg: "Registered scale_up_policy: {{ scale_up_policy }}"
- name: Create scale down policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleDownPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 60
register: scale_down_policy
- name: debug
debug:
msg: "Registered scale_down_policy: {{ scale_down_policy }}"
#
# Sometimes the scaling policy reports itself changed, but
# does not return data about the policy. It's bad enough
# that consistent data isn't returned when things
# have and have not changed; this make writing idempotent
# tasks difficult.
- name: create high-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-high"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: ">="
threshold: 90.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-up if CPU > 90% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_up_policy.arn }}"]
when: scale_up_policy.arn is defined
- name: create low-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 50.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-down if CPU < 50% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_down_policy.arn }}"]
when: scale_down_policy.arn is defined
\ No newline at end of file
...@@ -24,13 +24,24 @@ ...@@ -24,13 +24,24 @@
"m2.xlarge", "m2.xlarge",
"m2.2xlarge", "m2.2xlarge",
"m2.4xlarge", "m2.4xlarge",
"m3.xlarge", "cr1.8xlarge",
"m3.2xlarge", "cc2.8xlarge",
"c1.medium", "c1.medium",
"c1.xlarge", "c1.xlarge",
"cc1.4xlarge", "m3.medium",
"cc2.8xlarge", "m3.large",
"cg1.4xlarge" "m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
], ],
"ConstraintDescription":"must be a valid EC2 instance type." "ConstraintDescription":"must be a valid EC2 instance type."
}, },
...@@ -56,13 +67,24 @@ ...@@ -56,13 +67,24 @@
"m2.xlarge", "m2.xlarge",
"m2.2xlarge", "m2.2xlarge",
"m2.4xlarge", "m2.4xlarge",
"m3.xlarge", "cr1.8xlarge",
"m3.2xlarge", "cc2.8xlarge",
"c1.medium", "c1.medium",
"c1.xlarge", "c1.xlarge",
"cc1.4xlarge", "m3.medium",
"cc2.8xlarge", "m3.large",
"cg1.4xlarge" "m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
], ],
"ConstraintDescription":"must be a valid EC2 instance type." "ConstraintDescription":"must be a valid EC2 instance type."
}, },
...@@ -79,13 +101,24 @@ ...@@ -79,13 +101,24 @@
"m2.xlarge", "m2.xlarge",
"m2.2xlarge", "m2.2xlarge",
"m2.4xlarge", "m2.4xlarge",
"m3.xlarge", "cr1.8xlarge",
"m3.2xlarge", "cc2.8xlarge",
"c1.medium", "c1.medium",
"c1.xlarge", "c1.xlarge",
"cc1.4xlarge", "m3.medium",
"cc2.8xlarge", "m3.large",
"cg1.4xlarge" "m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
], ],
"ConstraintDescription":"must be a valid EC2 instance type." "ConstraintDescription":"must be a valid EC2 instance type."
}, },
...@@ -117,25 +150,39 @@ ...@@ -117,25 +150,39 @@
}, },
"Mappings":{ "Mappings":{
"AWSInstanceType2Arch":{ "AWSInstanceType2Arch":{
"t1.micro": { "Arch":"64" }, "t1.micro" : { "Arch" : "64" },
"m1.small": { "Arch":"64" }, "m1.small" : { "Arch" : "64" },
"m1.medium": { "Arch":"64" }, "m1.medium" : { "Arch" : "64" },
"m1.large": { "Arch":"64" }, "m1.large" : { "Arch" : "64" },
"m1.xlarge": { "Arch":"64" }, "m1.xlarge" : { "Arch" : "64" },
"m2.xlarge": { "Arch":"64" }, "m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge": { "Arch":"64" }, "m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge": { "Arch":"64" }, "m2.4xlarge" : { "Arch" : "64" },
"m3.xlarge": { "Arch":"64" }, "cr1.8xlarge" : { "Arch" : "64" },
"m3.2xlarge": { "Arch":"64" }, "cc2.8xlarge" : { "Arch" : "64" },
"c1.medium": { "Arch":"64" }, "c1.medium" : { "Arch" : "64" },
"c1.xlarge": { "Arch":"64" }, "c1.xlarge" : { "Arch" : "64" },
"cg1.4xlarge": { "Arch":"64HVM" } "m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" },
"m3.4xlarge" : { "Arch" : "64" },
"c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
}, },
"AWSRegionArch2AMI":{ "AWSRegionArch2AMI":{
"us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9", "64HVM":"ami-b93264d0" }, "us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9" },
"us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" }, "us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" },
"us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40", "64HVM":"ami-6cad335c" }, "us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40" },
"eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba", "64HVM":"ami-8c987efb" }, "eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba" },
"sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" }, "sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" },
"ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" }, "ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" },
"ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" }, "ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" },
...@@ -183,7 +230,7 @@ ...@@ -183,7 +230,7 @@
"Fn::FindInMap":[ "Fn::FindInMap":[
"MapRegionsToAvailZones", "MapRegionsToAvailZones",
{ "Ref":"AWS::Region" }, { "Ref":"AWS::Region" },
"AZone0" "AZone1"
] ]
}, },
"Tags":[ "Tags":[
...@@ -205,7 +252,7 @@ ...@@ -205,7 +252,7 @@
"Fn::FindInMap":[ "Fn::FindInMap":[
"MapRegionsToAvailZones", "MapRegionsToAvailZones",
{ "Ref":"AWS::Region" }, { "Ref":"AWS::Region" },
"AZone0" "AZone1"
] ]
}, },
"Tags":[ "Tags":[
...@@ -364,7 +411,7 @@ ...@@ -364,7 +411,7 @@
} }
} }
}, },
"InboundEmphemeralPublicNetworkAclEntry":{ "InboundSMTPPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry", "Type":"AWS::EC2::NetworkAclEntry",
"Properties":{ "Properties":{
"NetworkAclId":{ "NetworkAclId":{
...@@ -376,6 +423,23 @@ ...@@ -376,6 +423,23 @@
"Egress":"false", "Egress":"false",
"CidrBlock":"0.0.0.0/0", "CidrBlock":"0.0.0.0/0",
"PortRange":{ "PortRange":{
"From":"587",
"To":"587"
}
}
},
"InboundEmphemeralPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PublicNetworkAcl"
},
"RuleNumber":"104",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"1024", "From":"1024",
"To":"65535" "To":"65535"
} }
...@@ -582,6 +646,12 @@ ...@@ -582,6 +646,12 @@
"FromPort":"443", "FromPort":"443",
"ToPort":"443", "ToPort":"443",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"587",
"ToPort":"587",
"CidrIp":"0.0.0.0/0"
} }
], ],
"SecurityGroupEgress":[ "SecurityGroupEgress":[
...@@ -604,6 +674,12 @@ ...@@ -604,6 +674,12 @@
"FromPort":"443", "FromPort":"443",
"ToPort":"443", "ToPort":"443",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"587",
"ToPort":"587",
"CidrIp":"0.0.0.0/0"
} }
] ]
} }
...@@ -688,10 +764,11 @@ ...@@ -688,10 +764,11 @@
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort":"22", "FromPort":"22",
"ToPort":"22", "ToPort":"22",
"CidrIp":"10.0.0.0/16" "CidrIp":"10.254.0.0/16"
}, },
{ {
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80", "ToPort":"80",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
}, },
...@@ -827,7 +904,23 @@ ...@@ -827,7 +904,23 @@
] ]
] ]
} }
},
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs":{
"VolumeSize": 100
}
},
{
"DeviceName": "/dev/sdb",
"VirtualName": "ephemeral0"
},
{
"DeviceName": "/dev/sdc",
"VirtualName": "ephemeral1"
} }
]
} }
}, },
"AdminSecurityGroup":{ "AdminSecurityGroup":{
......
...@@ -47,10 +47,25 @@ ...@@ -47,10 +47,25 @@
"m2.xlarge" : { "Arch" : "64" }, "m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" }, "m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" }, "m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" }, "m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" }, "m3.2xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" }, "m3.4xlarge" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" } "c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
}, },
"AWSRegionArch2AMI" : { "AWSRegionArch2AMI" : {
......
...@@ -46,10 +46,25 @@ ...@@ -46,10 +46,25 @@
"m2.xlarge" : { "Arch" : "64" }, "m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" }, "m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" }, "m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" }, "m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" }, "m3.2xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" }, "m3.4xlarge" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" } "c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
}, },
"AWSRegionArch2AMI" : { "AWSRegionArch2AMI" : {
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# AWS regions to make calls to. Set this to 'all' to make request to all regions # AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma # in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
regions = all regions = us-east-1
regions_exclude = us-gov-west-1 regions_exclude = us-gov-west-1
# When generating inventory, Ansible needs to know how to address a server. # When generating inventory, Ansible needs to know how to address a server.
......
...@@ -217,7 +217,14 @@ class Ec2Inventory(object): ...@@ -217,7 +217,14 @@ class Ec2Inventory(object):
config.get('ec2', 'route53_excluded_zones', '').split(',')) config.get('ec2', 'route53_excluded_zones', '').split(','))
# Cache related # Cache related
if 'EC2_CACHE_PATH' in os.environ:
cache_path = os.environ['EC2_CACHE_PATH']
elif self.args.cache_path:
cache_path = self.args.cache_path
else:
cache_path = config.get('ec2', 'cache_path') cache_path = config.get('ec2', 'cache_path')
if not os.path.exists(cache_path):
os.makedirs(cache_path)
self.cache_path_cache = cache_path + "/ansible-ec2.cache" self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache" self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index" self.cache_path_index = cache_path + "/ansible-ec2.index"
...@@ -241,6 +248,10 @@ class Ec2Inventory(object): ...@@ -241,6 +248,10 @@ class Ec2Inventory(object):
default_inifile = os.environ.get("ANSIBLE_EC2_INI", os.path.dirname(os.path.realpath(__file__))+'/ec2.ini') default_inifile = os.environ.get("ANSIBLE_EC2_INI", os.path.dirname(os.path.realpath(__file__))+'/ec2.ini')
parser.add_argument('--inifile', dest='inifile', help='Path to init script to use', default=default_inifile) parser.add_argument('--inifile', dest='inifile', help='Path to init script to use', default=default_inifile)
parser.add_argument(
'--cache-path',
help='Override the cache path set in ini file',
required=False)
self.args = parser.parse_args() self.args = parser.parse_args()
......
...@@ -2,16 +2,12 @@ ...@@ -2,16 +2,12 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- certs - certs
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -2,15 +2,11 @@ ...@@ -2,15 +2,11 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- common - common
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -4,9 +4,26 @@ ...@@ -4,9 +4,26 @@
sudo: True sudo: True
serial: 1 serial: 1
vars: vars:
ENABLE_DATADOG: False # By default take instances in and out of the elb(s) they
ENABLE_SPLUNKFORWARDER: False # are attached to
ENABLE_NEWRELIC: False # To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -14,17 +31,29 @@ ...@@ -14,17 +31,29 @@
- xqueue - xqueue
- role: xqueue - role: xqueue
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
- rabbitmq - rabbitmq
- datadog - datadog
- splunkforwarder - splunkforwarder
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# #
# In order to reconfigure the host resolution we are issuing a # In order to reconfigure the host resolution we are issuing a
# reboot. # reboot.
......
- name: connect a sandbox to production data
hosts: all
gather_facts: False
sudo: True
tasks:
- name: Switch the mongo db to use ephemeral
file: >
name=/mnt/mongodb
state=directory
owner=mongodb
group=mongodb
tags: update_mongo_data
- name: update the mongo config to use the new mongo dir
shell: >
sed -i 's#^dbpath=.*#dbpath=/mnt/mongodb#' /etc/mongodb.conf
tags: update_mongo_data
- name: restart mongodb
service: >
name=mongodb
state=restarted
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep comment | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_forum
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_forum.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_forum.stdout|basename }}
chdir=/mnt
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep prod-edx | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_modulestore
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_modulestore.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_modulestore.stdout|basename }}
chdir=/mnt
tags: update_mongo_data
- name: Restore the mongo data for the forums
shell: >
mongorestore --drop -d cs_comments_service /mnt/comments-prod
tags: update_mongo_data
- name: Restore the mongo data for the modulestore
shell: >
mongorestore --drop -d edxapp /mnt/prod-edx
tags: update_mongo_data
# recreate users after the restore
- name: create a mongodb users
mongodb_user: >
database={{ item.database }}
name={{ item.user }}
password={{ item.password }}
state=present
with_items:
- user: cs_comments_service
password: password
database: cs_comments_service
- user: exdapp
password: password
database: edxapp
# WARNING - calling lineinfile on a symlink
# will convert the symlink to a file!
# don't use /edx/etc/server-vars.yml here
#
# What we are doing here is updating the sandbox
# server-vars config file so that when update
# is called it will use the new MYSQL connection
# info.
- name: Update RDS to point to the sandbox clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
with_items:
- "EDXAPP_MYSQL_HOST: {{ EDXAPP_MYSQL_HOST }}"
- "EDXAPP_MYSQL_DB_NAME: {{ EDXAPP_MYSQL_DB_NAME }}"
- "EDXAPP_MYSQL_USER: {{ EDXAPP_MYSQL_USER }}"
- "EDXAPP_MYSQL_PASSWORD: {{ EDXAPP_MYSQL_PASSWORD }}"
tags: update_edxapp_mysql_host
- name: call update on edx-platform
shell: >
/edx/bin/update edx-platform master
tags: update_edxapp_mysql_host
...@@ -2,15 +2,11 @@ ...@@ -2,15 +2,11 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- demo - demo
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -2,10 +2,6 @@ ...@@ -2,10 +2,6 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -13,8 +9,8 @@ ...@@ -13,8 +9,8 @@
- discern - discern
- discern - discern
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -6,10 +6,6 @@ ...@@ -6,10 +6,6 @@
vars: vars:
migrate_db: "yes" migrate_db: "yes"
openid_workaround: True openid_workaround: True
ENABLE_DATADOG: True
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
edx_internal: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -33,14 +29,14 @@ ...@@ -33,14 +29,14 @@
- elasticsearch - elasticsearch
- forum - forum
- { role: "xqueue", update_users: True } - { role: "xqueue", update_users: True }
- { role: xserver, when: edx_internal } - xserver
- ora - ora
- discern
- certs - certs
- edx_ansible - edx_ansible
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
- flower
...@@ -2,10 +2,6 @@ ...@@ -2,10 +2,6 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -16,8 +12,8 @@ ...@@ -16,8 +12,8 @@
- lms - lms
- edxapp - edxapp
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -3,7 +3,41 @@ ...@@ -3,7 +3,41 @@
vars_files: vars_files:
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/stage/stage-edx.yml" - "{{ secure_dir }}/vars/stage/stage-edx.yml"
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles: roles:
- common - common
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- name: Deploy celery flower (monitoring tool)
hosts: all
sudo: True
gather_facts: True
roles:
- flower
...@@ -2,10 +2,6 @@ ...@@ -2,10 +2,6 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -13,10 +9,8 @@ ...@@ -13,10 +9,8 @@
- forum - forum
- forum - forum
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
- role: newrelic
when: ENABLE_NEWRELIC
...@@ -3,19 +3,16 @@ ...@@ -3,19 +3,16 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- edx_ansible - edx_ansible
- user - user
- jenkins_admin - jenkins_admin
- hotg - hotg
- alton
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "restart edxapp" - "restart edxapp"
- "restart workers"
- name: syncdb and migrate - name: syncdb and migrate
shell: > shell: >
...@@ -38,10 +39,11 @@ ...@@ -38,10 +39,11 @@
DB_MIGRATION_PASS: "{{ edxapp_mysql_password }}" DB_MIGRATION_PASS: "{{ edxapp_mysql_password }}"
notify: notify:
- "restart edxapp" - "restart edxapp"
- "restart workers"
handlers: handlers:
- name: restart edxapp - name: restart edxapp
shell: "{{ supervisorctl_path }} restart edxapp:{{ item }}" shell: "{{ supervisorctl_path }} restart edxapp:"
with_items:
- lms - name: restart workers
- cms shell: "{{ supervisorctl_path }} restart edxapp_worker:"
...@@ -5,6 +5,40 @@ ...@@ -5,6 +5,40 @@
# ansible_default_ipv4 so # ansible_default_ipv4 so
# gather_facts must be set to True # gather_facts must be set to True
gather_facts: True gather_facts: True
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles: roles:
- aws - aws
- rabbitmq - rabbitmq
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- hosts: all
sudo: true
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
tasks:
- shell: echo "test"
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# This is a utility play to setup the db users on the edxapp db
#
# The mysql root user MUST be passed in as an extra var
#
# the environment and deployment must be passed in as COMMON_ENVIRONMENT
# and COMMON_DEPLOYMENT. These two vars should be set in the secret
# var file for the corresponding vpc stack
#
# Example invocation:
#
# Create the databases for edxapp and xqueue:
#
# ansible-playbook -i localhost, create_db_users.yml -e@/path/to/secrets.yml -e "edxapp_db_root_user=root edxapp_db_root_pass=password"
#
- name: Update db users on the edxapp db
hosts: all
gather_facts: False
vars:
edxapp_db_root_user: 'None'
edxapp_db_root_pass: 'None'
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- name: assign mysql user permissions for read_only user
mysql_user:
name: "{{ COMMON_MYSQL_READ_ONLY_USER }}"
priv: "*.*:SELECT"
password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for migrate user
mysql_user:
name: "{{ COMMON_MYSQL_MIGRATE_USER }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
password: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for admin user
mysql_user:
name: "{{ COMMON_MYSQL_ADMIN_USER }}"
priv: "*.*:CREATE USER"
password: "{{ COMMON_MYSQL_ADMIN_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for db users
mysql_user:
name: "{{ item.db_user_to_modify }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
# The second call to mysql_user needs to have append_privs set to
# yes otherwise it will overwrite the previous run.
# This means that both tasks will report changed on every ansible
# run
- name: assign mysql user permissions for db test user
mysql_user:
append_privs: yes
name: "{{ item.db_user_to_modify }}"
priv: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ item.db_name }}.*:ALL"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
...@@ -2,17 +2,13 @@ ...@@ -2,17 +2,13 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: edxapp - role: edxapp
celery_worker: True celery_worker: True
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -3,9 +3,26 @@ ...@@ -3,9 +3,26 @@
sudo: True sudo: True
gather_facts: True gather_facts: True
vars: vars:
ENABLE_DATADOG: False # By default take instances in and out of the elb(s) they
ENABLE_SPLUNKFORWARDER: False # are attached to
ENABLE_NEWRELIC: False # To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -13,8 +30,21 @@ ...@@ -13,8 +30,21 @@
- xqueue - xqueue
- role: xqueue - role: xqueue
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- name: Deploy xqueue-watcher
hosts: all
sudo: True
gather_facts: True
vars:
COMMON_APP_DIR: "/edx/app"
common_web_group: "www-data"
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- xqwatcher
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
\ No newline at end of file
...@@ -2,10 +2,6 @@ ...@@ -2,10 +2,6 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -13,8 +9,8 @@ ...@@ -13,8 +9,8 @@
- xserver - xserver
- role: xserver - role: xserver
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
../ansible.cfg
\ No newline at end of file
*
!prod
!stage
!data
!.gitignore
This temp directory created here so that we can make sure it doesn't
collide with other users doing ansible operations on the same machine;
or concurrent installs to different environments, say to prod and stage.
# config file for ansible -- http://ansible.github.com
# nearly all parameters can be overridden in ansible-playbook or with command line flags
# ansible will read ~/.ansible.cfg or /etc/ansible/ansible.cfg, whichever it finds first
[defaults]
jinja2_extensions=jinja2.ext.do
hash_behaviour=merge
host_key_checking = False
# These are environment-specific defaults
forks=10
transport=ssh
hostfile=./ec2.py
extra_vars='key=deployment region=us-west-1'
user=ubuntu
[ssh_connection]
# example from https://github.com/ansible/ansible/blob/devel/examples/ansible.cfg
ssh_args= -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r
scp_if_ssh=True
...@@ -3,6 +3,6 @@ regions=us-west-1 ...@@ -3,6 +3,6 @@ regions=us-west-1
regions_exclude = us-gov-west-1 regions_exclude = us-gov-west-1
destination_variable=public_dns_name destination_variable=public_dns_name
vpc_destination_variable=private_dns_name vpc_destination_variable=private_dns_name
cache_path=/tmp cache_path=ec2_cache/prod
cache_max_age=300 cache_max_age=300
route53=False route53=False
[ec2]
regions=us-west-1
regions_exclude = us-gov-west-1
destination_variable=public_dns_name
vpc_destination_variable=private_dns_name
cache_path=ec2_cache/stage
cache_max_age=300
route53=False
...@@ -14,9 +14,8 @@ ...@@ -14,9 +14,8 @@
EDXAPP_LMS_NGINX_PORT: '80' EDXAPP_LMS_NGINX_PORT: '80'
edx_platform_version: 'master' edx_platform_version: 'master'
# These should stay false for the public AMI # These should stay false for the public AMI
ENABLE_DATADOG: False COMMON_ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False COMMON_ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- role: nginx - role: nginx
nginx_sites: nginx_sites:
...@@ -41,8 +40,8 @@ ...@@ -41,8 +40,8 @@
- certs - certs
- edx_ansible - edx_ansible
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
---
# Build a kibana/logstash/elasticsearch server for capturing and
# analyzing logs.
- name: Configure syslog server
hosts: all
sudo: yes
roles:
- common
- oraclejdk
- elasticsearch
- logstash
- kibana
- role: nginx
nginx_sites:
- kibana
---
AIDE_REPORT_EMAIL: 'root'
---
# install and configure aide IDS
#
- name: install aide
apt: pkg="aide" state="present"
- name: configure aide defaults
template: >
src=etc/default/aide.j2 dest=/etc/default/aide
owner=root group=root mode=0644
- name: aide initial scan (this can take a long time)
command: >
aideinit -y -f
creates=/var/lib/aide/aide.db
sudo: yes
# These settings are mainly for the wrapper scripts around aide,
# such as aideinit and /etc/cron.daily/aide
# This is used as the host name in the AIDE reports that are sent out
# via e-mail. It defaults to the output of $(hostname --fqdn), but can
# be set to arbitrary values.
# FQDN=
# This is used as the subject for the e-mail reports.
# If your mail system only threads by subject, you might want to add
# some variable content here (for example $(date +%Y-%m-%d)).
MAILSUBJ="Daily AIDE report for $FQDN"
# This is the email address reports get mailed to
# default is root
# This variable is expanded before it is used, so you can use variables
# here. For example, MAILTO=$FQDN-aide@domain.example will send the
# report to host.name.example-aide@domain.example is the local FQDN is
# host.name.example.
MAILTO={{ AIDE_REPORT_EMAIL }}
# Set this to yes to suppress mailings when no changes have been
# detected during the AIDE run and no error output was given.
#QUIETREPORTS=no
# This parameter defines which AIDE command to run from the cron script.
# Sensible values are "update" and "check".
# Default is "check", ensuring backwards compatibility.
# Since "update" does not take any longer, it is recommended to use "update",
# so that a new database is created every day. The new database needs to be
# manually copied over the current one, though.
COMMAND=update
# This parameter defines what to do with a new database created by
# COMMAND=update. It is ignored if COMMAND!=update.
# no: Do not copy new database to old database. This is the default.
# yes: Copy new database to old database. This means that changes to the
# file system are only reported once. Possibly dangerous.
# ifnochange: Copy new database to old database if no changes have
# been reported. This is needed for ANF/ARF to work reliably.
COPYNEWDB=no
# Set this to yes to truncate the detailed changes part in the mail. The full
# output will still be listed in the log file.
TRUNCATEDETAILS=yes
# Set this to yes to suppress file changes by package and security
# updates from appearing in the e-mail report. Filtered file changes will
# still be listed in the log file. This option parses the /var/log/dpkg.log
# file and implies TRUNCATEDETAILS=yes
FILTERUPDATES=yes
# Set this to yes to suppress file changes by package installations
# from appearing in the e-mail report. Filtered file changes will still
# be listed in the log file. This option parses the /var/log/dpkg.log file and
# implies TRUNCATEDETAILS=yes.
FILTERINSTALLATIONS=yes
# This parameter defines how many lines to return per e-mail. Output longer
# than this value will be truncated in the e-mail sent out.
# Set value to "0" to disable this option.
LINES=1000
# This parameter gives a grep regular expression. If given, all output lines
# that _don't_ match the regexp are listed first in the script's output. This
# allows to easily remove noise from the AIDE report.
NOISE=""
# This parameter defines which options are given to aide in the daily
# cron job. The default is "-V4".
AIDEARGS=""
# These parameters control update-aide.conf and give the defaults for
# the --confdir, --confd and --settingsd options
# UPAC_CONFDIR="/etc/aide"
# UPAC_CONFD="$UPAC_CONFDIR/aide.conf.d"
# UPAC_SETTINGSD="$UPAC_CONFDIR/aide.settings.d"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role alton
#
#
# vars are namespace with the module name.
#
ALTON_USERNAME: '1234_1234@chat.hipchat.com'
ALTON_PASSWORD: 'password'
ALTON_V2_TOKEN: 'HIPCHAT_V2_TOKEN'
ALTON_ROOMS: 'Hammer'
ALTON_NAME: 'Alton W. Daemon'
ALTON_HANDLE: 'alton'
ALTON_REDIS_URL: 'redis://fakeuser:redispassword@redis.url:port'
ALTON_HTTPSERVER_PORT: '8081'
alton_role_name: alton
alton_user: alton
alton_app_dir: "{{ COMMON_APP_DIR }}/alton"
alton_code_dir: "{{ alton_app_dir }}/alton"
alton_venvs_dir: "{{ alton_app_dir }}/venvs"
alton_venv_dir: "{{ alton_venvs_dir }}/alton"
alton_venv_bin: "{{ alton_venv_dir }}/bin"
alton_source_repo: "https://github.com/edx/alton.git"
alton_version: "HEAD"
alton_requirements_file: "{{ alton_code_dir }}/requirements.txt"
alton_supervisor_wrapper: "{{ alton_app_dir }}/alton-supervisor.sh"
alton_environment:
WILL_USERNAME: "{{ ALTON_USERNAME }}"
WILL_PASSWORD: "{{ ALTON_PASSWORD }}"
WILL_V2_TOKEN: "{{ ALTON_V2_TOKEN }}"
WILL_ROOMS: "{{ ALTON_ROOMS }}"
WILL_NAME: "{{ ALTON_NAME }}"
WILL_HANDLE: "{{ ALTON_HANDLE }}"
WILL_REDIS_URL: "{{ ALTON_REDIS_URL }}"
WILL_HTTPSERVER_PORT: "{{ ALTON_HTTPSERVER_PORT }}"
#
# OS packages
#
alton_debian_pkgs: []
alton_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role alton
#
# Overview:
#
#
- name: restart alton
supervisorctl_local: >
name=alton
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not disable_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role alton
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- supervisor
- name: checkout the code
git: >
dest="{{ alton_code_dir }}" repo="{{ alton_source_repo }}"
version="{{ alton_version }}" accept_hostkey=yes
sudo_user: "{{ alton_user }}"
- name: install the requirements
pip: >
requirements="{{ alton_requirements_file }}"
virtualenv="{{ alton_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ alton_user }}"
- name: create the supervisor wrapper
template: >
src="{{ alton_supervisor_wrapper|basename }}.j2"
dest="{{ alton_supervisor_wrapper }}"
mode=0755
sudo_user: "{{ alton_user }}"
notify: restart alton
- name: create a supervisor config
template: >
src=alton.conf.j2 dest="{{ supervisor_available_dir }}/alton.conf"
owner="{{ supervisor_user }}"
group="{{ supervisor_user }}"
sudo_user: "{{ supervisor_user }}"
notify: restart alton
- name: enable the supervisor config
file: >
src="{{ supervisor_available_dir }}/alton.conf"
dest="{{ supervisor_cfg_dir }}/alton.conf"
state=link
force=yes
mode=0644
sudo_user: "{{ supervisor_user }}"
when: not disable_edx_services
notify: restart alton
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure alton is started
supervisorctl_local: >
name=alton
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: not disable_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role alton
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- name: create application user
user: >
name="{{ alton_user }}" home="{{ alton_app_dir }}"
createhome=no shell=/bin/false
- name: create alton user dirs
file: >
path="{{ item }}" state=directory
owner="{{ alton_user }}" group="{{ common_web_group }}"
with_items:
- "{{ alton_app_dir }}"
- "{{ alton_venvs_dir }}"
- name: setup the alton env
template: >
src="alton_env.j2" dest="{{ alton_app_dir }}/alton_env"
owner="{{ alton_user }}" group="{{ common_web_user }}"
mode=0644
notify: restart alton
- include: deploy.yml tags=deploy
#!/bin/bash
source {{ alton_app_dir }}/alton_env
cd {{ alton_code_dir }}
{{ alton_venv_bin }}/python run_alton.py
[program:alton]
command={{ alton_supervisor_wrapper }}
priority=999
user={{ common_web_user }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
stopsignal=QUIT
# {{ ansible_managed }}
{% for name,value in alton_environment.items() -%}
{%- if value -%}
export {{ name }}="{{ value }}"
{% endif %}
{%- endfor %}
...@@ -56,6 +56,7 @@ aws_debian_pkgs: ...@@ -56,6 +56,7 @@ aws_debian_pkgs:
aws_pip_pkgs: aws_pip_pkgs:
- https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz - https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
- awscli - awscli
- boto==2.20.1
aws_redhat_pkgs: [] aws_redhat_pkgs: []
aws_s3cmd_version: s3cmd-1.5.0-beta1 aws_s3cmd_version: s3cmd-1.5.0-beta1
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
- db_host: "{{ ORA_MYSQL_HOST }}" - db_host: "{{ ORA_MYSQL_HOST }}"
db_name: "{{ ORA_MYSQL_DB_NAME }}" db_name: "{{ ORA_MYSQL_DB_NAME }}"
script_name: ora-rds.sh script_name: ora-rds.sh
when: COMMON_MYSQL_READ_ONLY_PASS when: COMMON_MYSQL_READ_ONLY_PASS is defined
# These templates rely on there being a global # These templates rely on there being a global
# read_only mongo user, you must override the default # read_only mongo user, you must override the default
...@@ -67,4 +67,4 @@ ...@@ -67,4 +67,4 @@
db_name: "{{ FORUM_MONGO_DATABASE }}" db_name: "{{ FORUM_MONGO_DATABASE }}"
db_port: "{{ FORUM_MONGO_PORT }}" db_port: "{{ FORUM_MONGO_PORT }}"
script_name: forum-mongo.sh script_name: forum-mongo.sh
when: COMMON_MONGO_READ_ONLY_PASS when: COMMON_MONGO_READ_ONLY_PASS is defined
...@@ -60,7 +60,9 @@ ...@@ -60,7 +60,9 @@
notify: restart certs notify: restart certs
- name : install python requirements - name : install python requirements
pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present pip: >
requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
notify: restart certs notify: restart certs
......
...@@ -18,6 +18,7 @@ COMMON_CFG_DIR: "{{ COMMON_BASE_DIR }}/etc" ...@@ -18,6 +18,7 @@ COMMON_CFG_DIR: "{{ COMMON_BASE_DIR }}/etc"
COMMON_ENVIRONMENT: 'default_env' COMMON_ENVIRONMENT: 'default_env'
COMMON_DEPLOYMENT: 'default_deployment' COMMON_DEPLOYMENT: 'default_deployment'
COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple' COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple'
COMMON_NPM_MIRROR_URL: 'http://registry.npmjs.org'
# do not include http/https # do not include http/https
COMMON_GIT_MIRROR: 'github.com' COMMON_GIT_MIRROR: 'github.com'
# override this var to set a different hostname # override this var to set a different hostname
...@@ -46,7 +47,11 @@ COMMON_MYSQL_MIGRATE_PASS: !!null ...@@ -46,7 +47,11 @@ COMMON_MYSQL_MIGRATE_PASS: !!null
COMMON_MONGO_READ_ONLY_USER: 'read_only' COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: !!null COMMON_MONGO_READ_ONLY_PASS: !!null
COMMON_ENABLE_DATADOG: False
COMMON_DATADOG_API_KEY: "PUT_YOUR_API_KEY_HERE"
COMMON_ENABLE_SPLUNKFORWARDER: False
COMMON_ENABLE_NEWRELIC: False
COMMON_NEWRELIC_LICENSE: "YOUR_NEWRELIC_LICENSE"
common_debian_pkgs: common_debian_pkgs:
- ntp - ntp
- ack-grep - ack-grep
...@@ -67,6 +72,7 @@ common_debian_pkgs: ...@@ -67,6 +72,7 @@ common_debian_pkgs:
common_pip_pkgs: common_pip_pkgs:
- pip==1.5.4 - pip==1.5.4
- setuptools==3.6
- virtualenv==1.11.4 - virtualenv==1.11.4
- virtualenvwrapper - virtualenvwrapper
......
...@@ -109,7 +109,7 @@ ...@@ -109,7 +109,7 @@
path={{ item }} path={{ item }}
with_items: with_items:
- "/etc/update-motd.d/10-help-text" - "/etc/update-motd.d/10-help-text"
- "/usr/share/landscape/50-landscape-sysinfo" - "/usr/share/landscape/landscape-sysinfo.wrapper"
- "/etc/update-motd.d/51-cloudguest" - "/etc/update-motd.d/51-cloudguest"
- "/etc/update-motd.d/91-release-upgrade" - "/etc/update-motd.d/91-release-upgrade"
......
--- ---
DATADOG_API_KEY: "PUT_YOUR_API_KEY_HERE"
datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52" datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52"
datadog_debian_pkgs: datadog_debian_pkgs:
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
lineinfile: > lineinfile: >
dest="/etc/dd-agent/datadog.conf" dest="/etc/dd-agent/datadog.conf"
regexp="^api_key:.*" regexp="^api_key:.*"
line="api_key:{{ DATADOG_API_KEY }}" line="api_key:{{ COMMON_DATADOG_API_KEY }}"
notify: notify:
- restart the datadog service - restart the datadog service
tags: tags:
......
...@@ -6,6 +6,7 @@ dependencies: ...@@ -6,6 +6,7 @@ dependencies:
rbenv_dir: "{{ edxapp_app_dir }}" rbenv_dir: "{{ edxapp_app_dir }}"
rbenv_ruby_version: "{{ edxapp_ruby_version }}" rbenv_ruby_version: "{{ edxapp_ruby_version }}"
- devpi - devpi
- nltk
- role: user - role: user
user_info: user_info:
- name: "{{ EDXAPP_AUTOMATOR_NAME }}" - name: "{{ EDXAPP_AUTOMATOR_NAME }}"
......
...@@ -108,7 +108,7 @@ ...@@ -108,7 +108,7 @@
# Set the npm registry # Set the npm registry
- name: Set the npm registry - name: Set the npm registry
shell: shell:
npm config set registry 'http://registry.npmjs.org' npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'
creates="{{ edxapp_app_dir }}/.npmrc" creates="{{ edxapp_app_dir }}/.npmrc"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
...@@ -192,7 +192,6 @@ ...@@ -192,7 +192,6 @@
{{ edxapp_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ item }} {{ edxapp_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ item }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
with_items: with_items:
- "{{ repo_requirements_file }}"
- "{{ github_requirements_file }}" - "{{ github_requirements_file }}"
- "{{ local_requirements_file }}" - "{{ local_requirements_file }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
...@@ -219,7 +218,6 @@ ...@@ -219,7 +218,6 @@
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
# If using CAS and you have a function for mapping attributes, install # If using CAS and you have a function for mapping attributes, install
# the module here. The next few tasks set up the python code sandbox # the module here. The next few tasks set up the python code sandbox
- name: install CAS attribute module - name: install CAS attribute module
...@@ -250,6 +248,33 @@ ...@@ -250,6 +248,33 @@
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=300
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url: url="{{ s3_one_time_url.url }}" dest="/tmp/{{ EDXAPP_XML_S3_KEY|basename }}"
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf /tmp/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# The next few tasks set up the python code sandbox # The next few tasks set up the python code sandbox
# need to disable this profile, otherwise the pip inside the sandbox venv has no permissions # need to disable this profile, otherwise the pip inside the sandbox venv has no permissions
...@@ -334,6 +359,9 @@ ...@@ -334,6 +359,9 @@
# service variants configured, runs # service variants configured, runs
# gather_assets and db migrations # gather_assets and db migrations
- include: service_variant_config.yml - include: service_variant_config.yml
tags:
- service_variant_config
- deploy
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
# the supervisorctl config and restarts # the supervisorctl config and restarts
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
- "{{ edxapp_venvs_dir }}" - "{{ edxapp_venvs_dir }}"
- "{{ edxapp_theme_dir }}" - "{{ edxapp_theme_dir }}"
- "{{ edxapp_staticfile_dir }}" - "{{ edxapp_staticfile_dir }}"
- "{{ edxapp_course_static_dir }}"
- name: create edxapp log dir - name: create edxapp log dir
file: > file: >
......
- name: clone the xml course repo
git: >
repo="{{ item.repo_url }}"
dest="{{ edxapp_course_data_dir }}/{{ item.repo_name }}"
version="{{ item.version }}"
accept_hostkey=True
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
with_items: EDXAPP_XML_COURSES
- name: update course.xml
template: >
src=course.xml.j2
dest="{{ edxapp_course_data_dir }}/{{ item.repo_name }}/course.xml"
sudo_user: "{{ edxapp_user }}"
with_items: EDXAPP_XML_COURSES
- name: make symlinks for the static data
shell: >
executable=/bin/bash
if [[ -d {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static ]]; then
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.repo_name }}
else
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.repo_name }}
fi
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: make symlinks so code works
file: >
src="{{ edxapp_course_data_dir }}/{{ item.repo_name }}"
dest="{{ edxapp_course_data_dir }}/{{ item.course }}"
state=link
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: import courses with nostatic flag
shell: >
{{ edxapp_venv_bin }}/python manage.py cms --settings=aws import --nostatic {{ edxapp_course_data_dir }} {{ item.repo_name }}
chdir="{{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}"
with_items: EDXAPP_XML_COURSES
when: item.disposition == "no static import"
- name: import courses including static data
shell: >
{{ edxapp_venv_bin }}/python manage.py cms --settings=aws import {{ edxapp_course_data_dir }} {{ item.repo_name }}
chdir="{{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}"
with_items: EDXAPP_XML_COURSES
when: item.disposition == "import"
- name: delete courses that were fully imported
file: path="{{ edxapp_course_data_dir }}/{{ item.repo_name }}" state=absent
with_items: EDXAPP_XML_COURSES
when: item.disposition == "import"
- name: delete .git repos
file: path="{{ edxapp_course_data_dir }}/{{ item.repo_name }}/.git" state=absent
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: create an archive of course data and course static dirs
shell: tar czf /tmp/static_course_content.tar.gz -C {{ edxapp_data_dir }} {{ edxapp_course_data_dir|basename }} {{ edxapp_course_static_dir|basename }}
- name: upload archive to s3
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode=put
overwrite=True
src="/tmp/static_course_content.tar.gz"
when: EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
- name: remove archive from disk
file: path="/tmp/static_course_content.tar.gz" state=absent
when: EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python {{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/bin/kill {{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill {{ common_web_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ common_web_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
[program:cms] [program:cms]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' %}
{% else %}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if ansible_processor|length > 0 %} {% if ansible_processor|length > 0 %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else %} {% else %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% endif %} {% endif %}
user={{ common_web_user }} user={{ common_web_user }}
directory={{ edxapp_code_dir }} directory={{ edxapp_code_dir }}
environment=PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms" environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ COMMON_NEWRELIC_LICENSE }},{% endif -%}PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true killasgroup=true
......
#include <tunables/global> #include <tunables/global>
{{ edxapp_sandbox_venv_dir }}/bin/python flags=(complain) { {{ edxapp_sandbox_venv_dir }}/bin/python {
#include <abstractions/base> #include <abstractions/base>
{{ edxapp_sandbox_venv_dir }}/** mr, {{ edxapp_sandbox_venv_dir }}/** mr,
{{ edxapp_code_dir }}/common/lib/sandbox-packages/** r, {{ edxapp_code_dir }}/common/lib/sandbox-packages/** r,
/tmp/codejail-*/ rix, /tmp/codejail-*/ rix,
/tmp/codejail-*/** rix, /tmp/codejail-*/** wrix,
# #
# Whitelist particiclar shared objects from the system # Whitelist particiclar shared objects from the system
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
/usr/lib/python2.7/lib-dynload/_csv.so mr, /usr/lib/python2.7/lib-dynload/_csv.so mr,
/usr/lib/python2.7/lib-dynload/datetime.so mr, /usr/lib/python2.7/lib-dynload/datetime.so mr,
/usr/lib/python2.7/lib-dynload/_elementtree.so mr, /usr/lib/python2.7/lib-dynload/_elementtree.so mr,
/usr/lib/python2.7/lib-dynload/pyexpat.so mr,
# #
# Allow access to selections from /proc # Allow access to selections from /proc
......
<course org="{{ item.org }}" course="{{ item.course }}" url_name="{{ item.run }}" />
{% do lms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{{ lms_preview_auth_config | to_nice_json }}
[program:lms-preview]
{% if ansible_processor|length > 0 %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_preview_gunicorn_host }}:{{ edxapp_lms_preview_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms_preview }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_preview_gunicorn_host }}:{{ edxapp_lms_preview_gunicorn_port }} -w {{ worker_core_mult.lms_preview }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% endif %}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment=PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms-preview"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
{% do lms_preview_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do lms_preview_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{{ lms_preview_env_config | to_nice_json }}
[program:lms] [program:lms]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' %}
{% else %}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if ansible_processor|length > 0 %} {% if ansible_processor|length > 0 %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else %} {% else %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% endif %} {% endif %}
user={{ common_web_user }} user={{ common_web_user }}
directory={{ edxapp_code_dir }} directory={{ edxapp_code_dir }}
environment=PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms" environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ COMMON_NEWRELIC_LICENSE }},{% endif -%} PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true killasgroup=true
......
---
# By default, point to the RabbitMQ broker running locally
FLOWER_BROKER_USERNAME: "celery"
FLOWER_BROKER_PASSWORD: "celery"
FLOWER_BROKER_HOST: "127.0.0.1"
FLOWER_BROKER_PORT: 5672
FLOWER_ADDRESS: "0.0.0.0"
FLOWER_PORT: "5555"
flower_user: "flower"
flower_app_dir: "{{ COMMON_APP_DIR }}/flower"
flower_data_dir: "{{ COMMON_DATA_DIR }}/flower"
flower_log_dir: "{{ COMMON_LOG_DIR }}/flower"
flower_venv_dir: "{{ flower_app_dir }}/venvs/flower"
flower_venv_bin: "{{ flower_venv_dir }}/bin"
flower_python_reqs:
- "flower==0.7.0"
flower_deploy_path: "{{ flower_venv_bin }}:/usr/local/sbin:/usr/local/bin:/usr/bin:/sbin:/bin"
flower_broker: "amqp://{{ FLOWER_BROKER_USERNAME }}:{{ FLOWER_BROKER_PASSWORD }}@{{ FLOWER_BROKER_HOST }}:{{ FLOWER_BROKER_PORT }}"
flower_environment:
PATH: $flower_deploy_path
---
- name: restart flower
supervisorctl_local: >
state=restarted
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name="flower"
sudo_user: "{{supervisor_service_user }}"
---
dependencies:
- supervisor
- devpi
---
- name: create application user
user: >
name="{{ flower_user }}" home="{{ flower_app_dir }}"
createhome=no shell=/bin/false
notify:
- "restart flower"
- name: create flower user dirs
file: >
path="{{ item }}" state=directory
owner="{{ flower_user }}" group="{{ common_web_group }}"
notify:
- "restart flower"
with_items:
- "{{ flower_app_dir }}"
- "{{ flower_data_dir }}"
- "{{ flower_venv_dir }}"
- "{{ flower_log_dir }}"
- name: create flower environment script
template: >
src=flower_env.j2 dest={{ flower_app_dir }}/flower_env
owner={{ flower_user }} group={{ common_web_group }}
mode=0644
notify:
- "restart flower"
- name: create virtualenv and install Python requirements
pip: >
name="{{ item }}"
virtualenv="{{ flower_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ flower_user }}"
environment: "{{ flower_environment }}"
with_items: "flower_python_reqs"
notify:
- "restart flower"
- name: create supervisor configuration
template: >
src=flower.conf.j2 dest={{ supervisor_available_dir }}/flower.conf
owner={{ supervisor_user }}
group={{ supervisor_user }}
sudo_user: "{{ supervisor_user }}"
notify:
- "restart flower"
- name: enable supervisor configuration
file: >
src={{ supervisor_available_dir }}/flower.conf
dest={{ supervisor_cfg_dir }}/flower.conf
state=link
force=yes
sudo_user: "{{ supervisor_user }}"
notify:
- "restart flower"
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
notify:
- "restart flower"
[program:flower]
environment=PATH="{{ flower_deploy_path }}"
user={{ common_web_user }}
command={{ flower_venv_bin }}/celery flower --broker {{ flower_broker }} --address={{ FLOWER_ADDRESS }} --port={{ FLOWER_PORT }}
stdout_logfile={{ supervisor_log_dir }}/flower-stdout.log
stderr_logfile={{ supervisor_log_dir }}/flower-stderr.log
# {{ ansible_managed }}
{% for name,value in flower_environment.items() %}
{%- if value %}
export {{ name }}="{{ value }}"
{%- endif %}
{% endfor %}
...@@ -26,8 +26,12 @@ FORUM_API_KEY: "password" ...@@ -26,8 +26,12 @@ FORUM_API_KEY: "password"
FORUM_ELASTICSEARCH_HOST: "localhost" FORUM_ELASTICSEARCH_HOST: "localhost"
FORUM_ELASTICSEARCH_PORT: "9200" FORUM_ELASTICSEARCH_PORT: "9200"
FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTICSEARCH_PORT }}" FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTICSEARCH_PORT }}"
# This needs to be a string, set to 'false' to disable
FORUM_NEW_RELIC_ENABLE: 'true'
FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key" FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key"
FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app" FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app"
FORUM_WORKER_PROCESSES: "4" FORUM_WORKER_PROCESSES: "4"
FORUM_LISTEN_HOST: "0.0.0.0" FORUM_LISTEN_HOST: "0.0.0.0"
FORUM_LISTEN_PORT: "4567" FORUM_LISTEN_PORT: "4567"
...@@ -44,6 +48,7 @@ forum_environment: ...@@ -44,6 +48,7 @@ forum_environment:
SEARCH_SERVER: "{{ FORUM_ELASTICSEARCH_URL }}" SEARCH_SERVER: "{{ FORUM_ELASTICSEARCH_URL }}"
MONGOHQ_URL: "{{ FORUM_MONGO_URL }}" MONGOHQ_URL: "{{ FORUM_MONGO_URL }}"
HOME: "{{ forum_app_dir }}" HOME: "{{ forum_app_dir }}"
NEW_RELIC_ENABLE: "{{ FORUM_NEW_RELIC_ENABLE }}"
NEW_RELIC_APP_NAME: "{{ FORUM_NEW_RELIC_APP_NAME }}" NEW_RELIC_APP_NAME: "{{ FORUM_NEW_RELIC_APP_NAME }}"
NEW_RELIC_LICENSE_KEY: "{{ FORUM_NEW_RELIC_LICENSE_KEY }}" NEW_RELIC_LICENSE_KEY: "{{ FORUM_NEW_RELIC_LICENSE_KEY }}"
WORKER_PROCESSES: "{{ FORUM_WORKER_PROCESSES }}" WORKER_PROCESSES: "{{ FORUM_WORKER_PROCESSES }}"
......
...@@ -133,5 +133,9 @@ ...@@ -133,5 +133,9 @@
mode=0440 validate='visudo -cf %s' mode=0440 validate='visudo -cf %s'
- name: install global gem dependencies - name: install global gem dependencies
gem: name={{ item.name }} state=present version={{ item.version }} gem: >
name={{ item.name }}
state=present
version={{ item.version }}
user_install=no
with_items: jenkins_admin_gem_pkgs with_items: jenkins_admin_gem_pkgs
...@@ -44,6 +44,7 @@ jenkins_plugins: ...@@ -44,6 +44,7 @@ jenkins_plugins:
- { name: "violations", version: "0.7.11" } - { name: "violations", version: "0.7.11" }
- { name: "multiple-scms", version: "0.2" } - { name: "multiple-scms", version: "0.2" }
- { name: "timestamper", version: "1.5.7" } - { name: "timestamper", version: "1.5.7" }
- { name: "thinBackup", version: "1.7.4"}
jenkins_bundled_plugins: jenkins_bundled_plugins:
- "credentials" - "credentials"
......
...@@ -126,4 +126,4 @@ jenkins_wheels: ...@@ -126,4 +126,4 @@ jenkins_wheels:
- { pkg: "psutil==1.2.1", wheel: "psutil-1.2.1-cp27-none-linux_x86_64.whl" } - { pkg: "psutil==1.2.1", wheel: "psutil-1.2.1-cp27-none-linux_x86_64.whl" }
- { pkg: "lazy==1.1", wheel: "lazy-1.1-py27-none-any.whl" } - { pkg: "lazy==1.1", wheel: "lazy-1.1-py27-none-any.whl" }
- { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" } - { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" }
- { pkg: "MySQL-python==1.2.4", wheel: "MySQL_python-1.2.4-cp27-none-linux_x86_64.whl" } - { pkg: "MySQL-python==1.2.5", wheel: "MySQL_python-1.2.5-cp27-none-linux_x86_64.whl" }
...@@ -10,4 +10,5 @@ ...@@ -10,4 +10,5 @@
- include: system.yml - include: system.yml
- include: python.yml - include: python.yml
- include: ruby.yml
- include: jscover.yml - include: jscover.yml
...@@ -44,3 +44,27 @@ ...@@ -44,3 +44,27 @@
template: template:
src=wheel_venv.sh.j2 dest={{ jenkins_home }}/wheel_venv.sh src=wheel_venv.sh.j2 dest={{ jenkins_home }}/wheel_venv.sh
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
# Run the wheel_venv.sh script for the first time
# This was previously done in the Jenkins global
# configuration as part of the AMI Init script.
# Moving here so that we can archive a clean snapshot
# of the virtualenv with only the defined packages
# from jenkins_wheels.
- name: Run the wheel_venv.sh script
command: >
./wheel_venv.sh edx-venv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-venv
sudo_user: "{{ jenkins_user }}"
# Archive the current state of the virtualenv
# as a starting point for new builds.
# The edx-venv directory is deleted and then recreated
# cleanly from the archive by the jenkins build scripts.
- name: Create a clean virtualenv archive
command: >
tar -cpzf edx-venv_clean.tar.gz edx-venv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-venv_clean.tar.gz
sudo_user: "{{ jenkins_user }}"
---
# Archive the current state of the rbenv
# as a starting point for new builds.
# The edx-rbenv directory is deleted and then recreated
# cleanly from the archive by the jenkins build scripts.
- name: Create a clean rbenv archive
command: >
tar -cpzf edx-rbenv_clean.tar.gz .rbenv
chdir={{ jenkins_home }}
creates={{ jenkins_home }}/edx-rbenv_clean.tar.gz
sudo_user: "{{ jenkins_user }}"
---
KIBANA_SERVER_NAME: "192.168.33.10"
KIBANA_NGINX_PORT: 80
KIBANA_SSL_NGINX_PORT: 443
kibana_app_dir: /edx/app/kibana
kibana_file: kibana-3.0.0.tar.gz
kibana_url: "https://download.elasticsearch.org/kibana/kibana/{{ kibana_file }}"
---
- name: restart nginx
service: name=nginx state=restarted
- name: reload nginx
service: name=nginx state=reloaded
---
dependencies:
- common
- nginx
# requires:
# - oraclejdk
# - elasticsearch
# - nginx
---
- name: Ensure app apt dependencies are installed
apt: pkg={{ item }} state=installed
with_items:
- python-software-properties
- git
- nginx
- name: Ensure {{ kibana_app_dir }} exists
file: path={{ kibana_app_dir }} state=directory owner=root group=root mode=0755
- name: Ensure subdirectories exist
file: path={{ kibana_app_dir }}/{{ item }} owner=root group=root mode=0755 state=directory
with_items:
- htdocs
- share
- name: ensure we have the specified kibana release
get_url: url={{ kibana_url }} dest={{ kibana_app_dir }}/share/{{ kibana_file }}
- name: extract
shell: >
chdir={{ kibana_app_dir }}/share
tar -xzvf {{ kibana_app_dir }}/share/{{ kibana_file }}
creates={{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}
- name: install
shell: >
chdir={{ kibana_app_dir }}/share/{{ kibana_file|replace('.tar.gz','') }}
cp -R * {{ kibana_app_dir }}/htdocs/
- name: copy config
template: src=config.js.j2 dest={{ kibana_app_dir }}/htdocs/config.js
/**
* These is the app's configuration, If you need to configure
* the default dashboard, please see dashboards/default
*/
define(['settings'],
function (Settings) {
return new Settings({
/**
* URL to your elasticsearch server. You almost certainly don't
* want 'http://localhost:9200' here. Even if Kibana and ES are on
* the same host
*
* By default this will attempt to reach ES at the same host you have
* elasticsearch installed on. You probably want to set it to the FQDN of your
* elasticsearch host
* @type {String}
*/
//elasticsearch: "http://"+window.location.hostname+":9200",
{% if NGINX_ENABLE_SSL %}
elasticsearch: "https://{{ KIBANA_SERVER_NAME }}/e",
{% else %}
elasticsearch: "http://{{ KIBANA_SERVER_NAME }}/e",
{% endif %}
/**
* The default ES index to use for storing Kibana specific object
* such as stored dashboards
* @type {String}
*/
kibana_index: "kibana-int",
/**
* Panel modules available. Panels will only be loaded when they are defined in the
* dashboard, but this list is used in the "add panel" interface.
* @type {Array}
*/
panel_names: [
'histogram',
'map',
'table',
'filtering',
'timepicker',
'text',
'hits',
'column',
'trends',
'bettermap',
'query',
'terms',
'stats',
'sparklines',
'goal',
]
});
});
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
- name: set forum rbenv and gem permissions - name: set forum rbenv and gem permissions
file: file:
path={{ item }} state=directory mode=770 path={{ item }} state=directory recurse=yes mode=770
with_items: with_items:
- "{{ forum_app_dir }}/.gem" - "{{ forum_app_dir }}/.gem"
- "{{ forum_app_dir }}/.rbenv" - "{{ forum_app_dir }}/.rbenv"
......
---
LOGSTASH_DAYS_TO_KEEP: 30
LOGSTASH_ROTATE: true
logstash_app_dir: /edx/app/logstash
logstash_log_dir: /edx/var/log/logstash
logstash_data_dir: /edx/var/logstash/file_logs
logstash_syslog_port: 514
logstash_file: logstash-1.3.3-flatjar.jar
logstash_url: "https://download.elasticsearch.org/logstash/logstash/{{ logstash_file }}"
logstash_python_requirements:
- pyes==0.19.0
logstash_scripts_repo: https://github.com/crashdump/logstash-elasticsearch-scripts
logstash_rotate_cron:
hour: 5
minute: 42
logstash_optimize_cron:
hour: 6
minute: 15
{
"template": "logstash-*",
"settings" : {
"number_of_shards" : 1,
"number_of_replicas" : 0,
"index" : {
"query" : { "default_field" : "message" },
"store" : { "compress" : { "stored" : true, "tv": true } }
}
},
"mappings": {
"_default_": {
"_all": { "enabled": false },
"_source": { "compress": true },
"dynamic_templates": [
{
"string_template" : {
"match" : "*",
"mapping": { "type": "string", "index": "not_analyzed" },
"match_mapping_type" : "string"
}
}
],
"properties" : {
"@fields": { "type": "object", "dynamic": true, "path": "full" },
"@message" : { "type" : "string", "index" : "analyzed" },
"@source" : { "type" : "string", "index" : "not_analyzed" },
"@source_host" : { "type" : "string", "index" : "not_analyzed" },
"@source_path" : { "type" : "string", "index" : "not_analyzed" },
"@tags": { "type": "string", "index" : "not_analyzed" },
"@timestamp" : { "type" : "date", "index" : "not_analyzed" },
"@type" : { "type" : "string", "index" : "not_analyzed" }
}
}
}
}
---
- name: restart logstash
service: name=logstash state=restarted
---
dependencies:
- common
- elasticsearch
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment