Commit e7fae47e by Han Su Kim

Merge pull request #1239 from edx/rc/Johnnycake

Release ... rc/Johnnycake
parents 6298c7b2 b0447bcb
...@@ -9,3 +9,4 @@ ...@@ -9,3 +9,4 @@
vagrant/*/devstack/edx-platform vagrant/*/devstack/edx-platform
vagrant/*/devstack/cs_comments_service vagrant/*/devstack/cs_comments_service
vagrant/*/devstack/ora vagrant/*/devstack/ora
vagrant_ansible_inventory_default
...@@ -29,3 +29,6 @@ Ker Ruben Ramos <xdiscent@gmail.com> ...@@ -29,3 +29,6 @@ Ker Ruben Ramos <xdiscent@gmail.com>
Fred Smith <derf@edx.org> Fred Smith <derf@edx.org>
Wang Peifeng <pku9104038@hotmail.com> Wang Peifeng <pku9104038@hotmail.com>
Ray Hooker <ray.hooker@gmail.com> Ray Hooker <ray.hooker@gmail.com>
David Pollack <david@sologourmand.com>
Rodolphe Quiedeville <rodolphe@quiedeville.org>
Matjaz Gregoric <mtyaka@gmail.com>
- Role: xqwatcher, xqueue, nginx, edxapp, common
- Moving nginx basic authorization flag and credentials to the common role
- Role: Edxapp
- Turn on code sandboxing by default and allow the jailed code to be able to write
files to the tmp directory created for it by codejail.
- Role: Edxapp
- The repo.txt requirements file is no longer being processed in anyway. This file was removed from edxplatform
via pull #3487(https://github.com/edx/edx-platform/pull/3487)
- Update CMS_HOSTNAME default to allow any hostname that starts with `studio` along with `prod-studio` or `stage-studio`.
- Start a change log to keep track of backwards incompatible changes and deprecations.
...@@ -26,9 +26,9 @@ Private Cloud with hosts for the core edX services. This template ...@@ -26,9 +26,9 @@ Private Cloud with hosts for the core edX services. This template
will build quite a number of AWS resources that cost money, so please will build quite a number of AWS resources that cost money, so please
consider this before you start. consider this before you start.
The configuration phase is manged by [Ansible](http://ansible.cc/). The configuration phase is managed by [Ansible](http://ansible.com/).
We have provided a number of playbooks that will configure each of We have provided a number of playbooks that will configure each of
the edX service. the edX services.
This project is a re-write of the current edX provisioning and This project is a re-write of the current edX provisioning and
configuration tools, we will be migrating features to this project configuration tools, we will be migrating features to this project
...@@ -36,3 +36,5 @@ over time, so expect frequent changes. ...@@ -36,3 +36,5 @@ over time, so expect frequent changes.
For more information including installation instruction please see the [Configuration Wiki](https://github.com/edx/configuration/wiki). For more information including installation instruction please see the [Configuration Wiki](https://github.com/edx/configuration/wiki).
For info on any large recent changes please see the [change log](https://github.com/edx/configuration/blob/master/CHANGELOG.md).
#
# Overview:
# This play needs to be run per environment-deployment and you will need to
# provide the boto environment and vpc_id as arguments
#
# ansible-playbook -i 'localhost,' ./vpc-migrate-analytics_api-edge-stage.yml \
# -e 'profile=edge vpc_id=vpc-416f9b24'
#
# Caveats
#
# - This requires ansible 1.6
# - Required the following branch of Ansible /e0d/add-instance-profile from
# https://github.com/e0d/ansible.git
# - This play isn't full idempotent because of and ec2 module update issue
# with ASGs. This can be worked around by deleting the ASG and re-running
# the play
# - The instance_profile_name will need to be created in advance as there
# isn't a way to do so from ansible.
#
# Prequisities:
# Create a iam ec2 role
#
- name: Add resources for the Analytics API
hosts: localhost
connection: local
gather_facts: False
tasks:
# Fail intermittantly with the following error:
# The specified rule does not exist in this security group
- name: Create instance security group
ec2_group:
profile: "{{ profile }}"
description: "Open up SSH access"
name: "{{ security_group }}"
vpc_id: "{{ vpc_id }}"
region: "{{ ec2_region }}"
rules:
- proto: tcp
from_port: "{{ sec_group_ingress_from_port }}"
to_port: "{{ sec_group_ingress_to_port }}"
cidr_ip: "{{ item }}"
with_items: sec_group_ingress_cidrs
register: created_sec_group
ignore_errors: True
- name: debug
debug:
msg: "Registered created_sec_group: {{ created_sec_group }}"
# Needs ansible 1.7 for vpc support of elbs
# - name: Create elb security group
# ec2_group:
# profile: "{{ profile }}"
# description: "ELB security group"
# name: "ELB-{{ security_group }}"
# vpc_id: "{{ vpc_id }}"
# region: "{{ ec2_region }}"
# rules:
# - proto: tcp
# from_port: "443"
# to_port: "443"
# cidr_ip: "0.0.0.0/0"
# register: created_elb_sec_group
# ignore_errors: True
# Needs 1.7 for VPC support
# - name: "Create ELB"
# ec2_elb_lb:
# profile: "{{ profile }}"
# region: "{{ ec2_region }}"
# zones:
# - us-east-1b
# - us-east-1c
# name: "{{ edp }}"
# state: present
# security_group_ids: "{{ created_elb_sec_group.group_id }}"
# listeners:
# - protocol: https
# load_balancer_port: 443
# instance_protocol: http # optional, defaults to value of protocol setting
# instance_port: 80
# # ssl certificate required for https or ssl
# ssl_certificate_id: "{{ ssl_cert }}"
# instance_profile_name was added by me in my fork
- name: Create the launch configuration
ec2_lc:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ lc_name }}"
image_id: "{{ lc_ami }}"
key_name: "{{ key_name }}"
security_groups: "{{ created_sec_group.results[0].group_id }}"
instance_type: "{{ instance_type }}"
instance_profile_name: "{{ instance_profile_name }}"
volumes:
- device_name: "/dev/sda1"
volume_size: "{{ instance_volume_size }}"
- name: Create ASG
ec2_asg:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ asg_name }}"
launch_config_name: "{{ lc_name }}"
load_balancers: "{{ elb_name }}"
availability_zones:
- us-east-1b
- us-east-1c
min_size: 0
max_size: 2
desired_capacity: 1
vpc_zone_identifier: "{{ subnets|join(',') }}"
instance_tags:
Name: "{{ env }}-{{ deployment }}-{{ play }}"
autostack: "true"
environment: "{{ env }}"
deployment: "{{ deployment }}"
play: "{{ play }}"
services: "{{ play }}"
register: asg
- name: debug
debug:
msg: "DEBUG: {{ asg }}"
- name: Create scale up policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleUpPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: 1
min_adjustment_step: 1
cooldown: 60
register: scale_up_policy
- name: debug
debug:
msg: "Registered scale_up_policy: {{ scale_up_policy }}"
- name: Create scale down policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleDownPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 60
register: scale_down_policy
- name: debug
debug:
msg: "Registered scale_down_policy: {{ scale_down_policy }}"
#
# Sometimes the scaling policy reports itself changed, but
# does not return data about the policy. It's bad enough
# that consistent data isn't returned when things
# have and have not changed; this make writing idempotent
# tasks difficult.
- name: create high-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-high"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: ">="
threshold: 90.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-up if CPU > 90% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_up_policy.arn }}"]
when: scale_up_policy.arn is defined
- name: create low-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 50.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-down if CPU < 50% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_down_policy.arn }}"]
when: scale_down_policy.arn is defined
\ No newline at end of file
#
# Overview:
# This play needs to be run per environment-deployment and you will need to
# provide the boto environment and vpc_id as arguments
#
# ansible-playbook -i 'localhost,' ./vpc-migrate-xqwatcher-edge-stage.yml \
# -e 'profile=edge vpc_id=vpc-416f9b24'
#
# Caveats
#
# - This requires ansible 1.6
# - Required the following branch of Ansible /e0d/add-instance-profile from
# https://github.com/e0d/ansible.git
# - This play isn't full idempotent because of and ec2 module update issue
# with ASGs. This can be worked around by deleting the ASG and re-running
# the play
# - The instance_profile_name will need to be created in advance as there
# isn't a way to do so from ansible.
#
# Prequisities:
# Create a iam ec2 role
#
- name: Add resources for the XQWatcher
hosts: localhost
connection: local
gather_facts: False
tasks:
# ignore_error is used here because this module is not idempotent
# If tags already exist, the task will fail with the following message
# Tags already exists in subnet
- name: Update subnet tags
ec2_tag:
resource: "{{ item }}"
region: "{{ ec2_region }}"
state: present
tags:
Name: "{{ edp }}-subnet"
play: xqwatcher
immutable_metadata: "{'purpose':'{{ environment }}-{{ deployment }}-internal-{{ play }}','target':'ec2'}"
with_items: subnets
ignore_errors: True
# Fail intermittantly with the following error:
# The specified rule does not exist in this security group
- name: Create security group
ec2_group:
profile: "{{ profile }}"
description: "Open up SSH access"
name: "{{ security_group }}"
vpc_id: "{{ vpc_id }}"
region: "{{ ec2_region }}"
rules:
- proto: tcp
from_port: "{{ sec_group_ingress_from_port }}"
to_port: "{{ sec_group_ingress_to_port }}"
cidr_ip: "{{ item }}"
with_items: sec_group_ingress_cidrs
register: created_sec_group
ignore_errors: True
- name: debug
debug:
msg: "Registered created_sec_group: {{ created_sec_group }}"
# instance_profile_name was added by me in my fork
- name: Create the launch configuration
ec2_lc:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ lc_name }}"
image_id: "{{ lc_ami }}"
key_name: "{{ key_name }}"
security_groups: "{{ created_sec_group.results[0].group_id }}"
instance_type: "{{ instance_type }}"
instance_profile_name: "{{ instance_profile_name }}"
volumes:
- device_name: "/dev/sda1"
volume_size: "{{ instance_volume_size }}"
- name: Create ASG
ec2_asg:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ asg_name }}"
launch_config_name: "{{ lc_name }}"
min_size: 0
max_size: 0
desired_capacity: 0
vpc_zone_identifier: "{{ subnets|join(',') }}"
instance_tags:
Name: "{{ env }}-{{ deployment }}-{{ play }}"
autostack: "true"
environment: "{{ env }}"
deployment: "{{ deployment }}"
play: "{{ play }}"
services: "{{ play }}"
register: asg
- name: debug
debug:
msg: "DEBUG: {{ asg }}"
- name: Create scale up policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleUpPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: 1
min_adjustment_step: 1
cooldown: 60
register: scale_up_policy
tags:
- foo
- name: debug
debug:
msg: "Registered scale_up_policy: {{ scale_up_policy }}"
- name: Create scale down policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleDownPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 60
register: scale_down_policy
- name: debug
debug:
msg: "Registered scale_down_policy: {{ scale_down_policy }}"
#
# Sometimes the scaling policy reports itself changed, but
# does not return data about the policy. It's bad enough
# that consistent data isn't returned when things
# have and have not changed; this make writing idempotent
# tasks difficult.
- name: create high-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-high"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: ">="
threshold: 90.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-up if CPU > 90% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_up_policy.arn }}"]
when: scale_up_policy.arn is defined
- name: create low-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 50.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-down if CPU < 50% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_down_policy.arn }}"]
when: scale_down_policy.arn is defined
\ No newline at end of file
...@@ -24,13 +24,24 @@ ...@@ -24,13 +24,24 @@
"m2.xlarge", "m2.xlarge",
"m2.2xlarge", "m2.2xlarge",
"m2.4xlarge", "m2.4xlarge",
"m3.xlarge", "cr1.8xlarge",
"m3.2xlarge", "cc2.8xlarge",
"c1.medium", "c1.medium",
"c1.xlarge", "c1.xlarge",
"cc1.4xlarge", "m3.medium",
"cc2.8xlarge", "m3.large",
"cg1.4xlarge" "m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
], ],
"ConstraintDescription":"must be a valid EC2 instance type." "ConstraintDescription":"must be a valid EC2 instance type."
}, },
...@@ -56,13 +67,24 @@ ...@@ -56,13 +67,24 @@
"m2.xlarge", "m2.xlarge",
"m2.2xlarge", "m2.2xlarge",
"m2.4xlarge", "m2.4xlarge",
"m3.xlarge", "cr1.8xlarge",
"m3.2xlarge", "cc2.8xlarge",
"c1.medium", "c1.medium",
"c1.xlarge", "c1.xlarge",
"cc1.4xlarge", "m3.medium",
"cc2.8xlarge", "m3.large",
"cg1.4xlarge" "m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
], ],
"ConstraintDescription":"must be a valid EC2 instance type." "ConstraintDescription":"must be a valid EC2 instance type."
}, },
...@@ -79,13 +101,24 @@ ...@@ -79,13 +101,24 @@
"m2.xlarge", "m2.xlarge",
"m2.2xlarge", "m2.2xlarge",
"m2.4xlarge", "m2.4xlarge",
"m3.xlarge", "cr1.8xlarge",
"m3.2xlarge", "cc2.8xlarge",
"c1.medium", "c1.medium",
"c1.xlarge", "c1.xlarge",
"cc1.4xlarge", "m3.medium",
"cc2.8xlarge", "m3.large",
"cg1.4xlarge" "m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
], ],
"ConstraintDescription":"must be a valid EC2 instance type." "ConstraintDescription":"must be a valid EC2 instance type."
}, },
...@@ -117,25 +150,39 @@ ...@@ -117,25 +150,39 @@
}, },
"Mappings":{ "Mappings":{
"AWSInstanceType2Arch":{ "AWSInstanceType2Arch":{
"t1.micro": { "Arch":"64" }, "t1.micro" : { "Arch" : "64" },
"m1.small": { "Arch":"64" }, "m1.small" : { "Arch" : "64" },
"m1.medium": { "Arch":"64" }, "m1.medium" : { "Arch" : "64" },
"m1.large": { "Arch":"64" }, "m1.large" : { "Arch" : "64" },
"m1.xlarge": { "Arch":"64" }, "m1.xlarge" : { "Arch" : "64" },
"m2.xlarge": { "Arch":"64" }, "m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge": { "Arch":"64" }, "m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge": { "Arch":"64" }, "m2.4xlarge" : { "Arch" : "64" },
"m3.xlarge": { "Arch":"64" }, "cr1.8xlarge" : { "Arch" : "64" },
"m3.2xlarge": { "Arch":"64" }, "cc2.8xlarge" : { "Arch" : "64" },
"c1.medium": { "Arch":"64" }, "c1.medium" : { "Arch" : "64" },
"c1.xlarge": { "Arch":"64" }, "c1.xlarge" : { "Arch" : "64" },
"cg1.4xlarge": { "Arch":"64HVM" } "m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" },
"m3.4xlarge" : { "Arch" : "64" },
"c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
}, },
"AWSRegionArch2AMI":{ "AWSRegionArch2AMI":{
"us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9", "64HVM":"ami-b93264d0" }, "us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9" },
"us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" }, "us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" },
"us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40", "64HVM":"ami-6cad335c" }, "us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40" },
"eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba", "64HVM":"ami-8c987efb" }, "eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba" },
"sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" }, "sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" },
"ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" }, "ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" },
"ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" }, "ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" },
...@@ -183,7 +230,7 @@ ...@@ -183,7 +230,7 @@
"Fn::FindInMap":[ "Fn::FindInMap":[
"MapRegionsToAvailZones", "MapRegionsToAvailZones",
{ "Ref":"AWS::Region" }, { "Ref":"AWS::Region" },
"AZone0" "AZone1"
] ]
}, },
"Tags":[ "Tags":[
...@@ -205,7 +252,7 @@ ...@@ -205,7 +252,7 @@
"Fn::FindInMap":[ "Fn::FindInMap":[
"MapRegionsToAvailZones", "MapRegionsToAvailZones",
{ "Ref":"AWS::Region" }, { "Ref":"AWS::Region" },
"AZone0" "AZone1"
] ]
}, },
"Tags":[ "Tags":[
...@@ -364,7 +411,7 @@ ...@@ -364,7 +411,7 @@
} }
} }
}, },
"InboundEmphemeralPublicNetworkAclEntry":{ "InboundSMTPPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry", "Type":"AWS::EC2::NetworkAclEntry",
"Properties":{ "Properties":{
"NetworkAclId":{ "NetworkAclId":{
...@@ -376,6 +423,23 @@ ...@@ -376,6 +423,23 @@
"Egress":"false", "Egress":"false",
"CidrBlock":"0.0.0.0/0", "CidrBlock":"0.0.0.0/0",
"PortRange":{ "PortRange":{
"From":"587",
"To":"587"
}
}
},
"InboundEmphemeralPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PublicNetworkAcl"
},
"RuleNumber":"104",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"1024", "From":"1024",
"To":"65535" "To":"65535"
} }
...@@ -582,6 +646,18 @@ ...@@ -582,6 +646,18 @@
"FromPort":"443", "FromPort":"443",
"ToPort":"443", "ToPort":"443",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"587",
"ToPort":"587",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"5222",
"ToPort":"5222",
"CidrIp":"0.0.0.0/0"
} }
], ],
"SecurityGroupEgress":[ "SecurityGroupEgress":[
...@@ -604,6 +680,18 @@ ...@@ -604,6 +680,18 @@
"FromPort":"443", "FromPort":"443",
"ToPort":"443", "ToPort":"443",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"587",
"ToPort":"587",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"5222",
"ToPort":"5222",
"CidrIp":"0.0.0.0/0"
} }
] ]
} }
...@@ -688,10 +776,11 @@ ...@@ -688,10 +776,11 @@
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort":"22", "FromPort":"22",
"ToPort":"22", "ToPort":"22",
"CidrIp":"10.0.0.0/16" "CidrIp":"10.254.0.0/16"
}, },
{ {
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80", "ToPort":"80",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
}, },
...@@ -827,7 +916,23 @@ ...@@ -827,7 +916,23 @@
] ]
] ]
} }
},
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs":{
"VolumeSize": 100
}
},
{
"DeviceName": "/dev/sdb",
"VirtualName": "ephemeral0"
},
{
"DeviceName": "/dev/sdc",
"VirtualName": "ephemeral1"
} }
]
} }
}, },
"AdminSecurityGroup":{ "AdminSecurityGroup":{
......
...@@ -47,10 +47,25 @@ ...@@ -47,10 +47,25 @@
"m2.xlarge" : { "Arch" : "64" }, "m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" }, "m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" }, "m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" }, "m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" }, "m3.2xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" }, "m3.4xlarge" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" } "c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
}, },
"AWSRegionArch2AMI" : { "AWSRegionArch2AMI" : {
......
...@@ -46,10 +46,25 @@ ...@@ -46,10 +46,25 @@
"m2.xlarge" : { "Arch" : "64" }, "m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" }, "m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" }, "m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" }, "m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" }, "m3.2xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" }, "m3.4xlarge" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" } "c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
}, },
"AWSRegionArch2AMI" : { "AWSRegionArch2AMI" : {
......
import os
import prettytable
import hipchat
import time
import random
from ansible import utils
class CallbackModule(object):
"""Send status updates to a HipChat channel during playbook execution.
This plugin makes use of the following environment variables:
HIPCHAT_TOKEN (required): HipChat API token
HIPCHAT_ROOM (optional): HipChat room to post in. Default: ansible
HIPCHAT_FROM (optional): Name to post as. Default: ansible
HIPCHAT_NOTIFY (optional): Add notify flag to important messages ("true" or "false"). Default: true
HIPCHAT_MSG_PREFIX (option): Optional prefix to add to all hipchat messages
HIPCHAT_MSG_COLOR (option): Optional color for hipchat messages
HIPCHAT_CONDENSED (option): Condense the task summary output
Requires:
prettytable
"""
def __init__(self):
if 'HIPCHAT_TOKEN' in os.environ:
self.start_time = time.time()
self.task_report = []
self.last_task = None
self.last_task_changed = False
self.last_task_count = 0
self.last_task_delta = 0
self.last_task_start = time.time()
self.condensed_task_report = (os.getenv('HIPCHAT_CONDENSED', True) == True)
self.room = os.getenv('HIPCHAT_ROOM', 'ansible')
self.from_name = os.getenv('HIPCHAT_FROM', 'ansible')
self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') != 'false')
try:
self.hipchat_conn = hipchat.HipChat(token=os.getenv('HIPCHAT_TOKEN'))
except Exception as e:
utils.warning("Unable to connect to hipchat: {}".format(e))
self.hipchat_msg_prefix = os.getenv('HIPCHAT_MSG_PREFIX', '')
self.hipchat_msg_color = os.getenv('HIPCHAT_MSG_COLOR', '')
self.printed_playbook = False
self.playbook_name = None
self.enabled = True
else:
self.enabled = False
def _send_hipchat(self, message, room=None, from_name=None, color=None, message_format='text'):
if not room:
room = self.room
if not from_name:
from_name = self.from_name
if not color:
color = self.hipchat_msg_color
try:
self.hipchat_conn.message_room(room, from_name, message, color=color, message_format=message_format)
except Exception as e:
utils.warning("Could not submit message to hipchat: {}".format(e))
def _flush_last_task(self):
if self.last_task:
delta = time.time() - self.last_task_start
self.task_report.append(dict(
changed=self.last_task_changed,
count=self.last_task_count,
delta="{:0>.1f}".format(self.last_task_delta),
task=self.last_task))
self.last_task_count = 0
self.last_task_changed = False
self.last_task = None
self.last_task_delta = 0
def _process_message(self, msg, msg_type='STATUS'):
if msg_type == 'OK' and self.last_task:
if msg.get('changed', True):
self.last_task_changed = True
if msg.get('delta', False):
(hour, minute, sec) = msg['delta'].split(':')
total = float(hour) * 1200 + float(minute) * 60 + float(sec)
self.last_task_delta += total
self.last_task_count += 1
else:
self._flush_last_task()
if msg_type == 'TASK_START':
self.last_task = msg
self.last_task_start = time.time()
elif msg_type == 'FAILED':
self.last_task_start = time.time()
if 'msg' in msg:
self._send_hipchat('/code {}: The ansible run returned the following error:\n\n {}'.format(
self.hipchat_msg_prefix, msg['msg']), color='red', message_format='text')
else:
# move forward the last task start time
self.last_task_start = time.time()
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
if self.enabled:
self._process_message(res, 'FAILED')
def runner_on_ok(self, host, res):
if self.enabled:
# don't send the setup results
if res['invocation']['module_name'] != "setup":
self._process_message(res, 'OK')
def runner_on_error(self, host, msg):
if self.enabled:
self._process_message(msg, 'ERROR')
def runner_on_skipped(self, host, item=None):
if self.enabled:
self._process_message(item, 'SKIPPED')
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
if self.enabled:
self._process_message(res, 'ASYNC_POLL')
def runner_on_async_ok(self, host, res, jid):
if self.enabled:
self._process_message(res, 'ASYNC_OK')
def runner_on_async_failed(self, host, res, jid):
if self.enabled:
self._process_message(res, 'ASYNC_FAILED')
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
if self.enabled:
self._process_message(name, 'TASK_START')
def playbook_on_vars_prompt(self, varname, private=True, prompt=None,
encrypt=None, confirm=False, salt_size=None,
salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, pattern):
if self.enabled:
"""Display Playbook and play start messages"""
self.start_time = time.time()
self.playbook_name, _ = os.path.splitext(os.path.basename(self.play.playbook.filename))
host_list = self.play.playbook.inventory.host_list
inventory = os.path.basename(os.path.realpath(host_list))
subset = self.play.playbook.inventory._subset
msg = "<b>{description}</b>: Starting ansible run for play <b><i>{play}</i></b>".format(description=self.hipchat_msg_prefix, play=self.playbook_name)
if self.play.playbook.only_tags and 'all' not in self.play.playbook.only_tags:
msg = msg + " with tags <b><i>{}</i></b>".format(','.join(self.play.playbook.only_tags))
if subset:
msg = msg + " on hosts <b><i>{}</i></b>".format(','.join(subset))
self._send_hipchat(msg, message_format='html')
def playbook_on_stats(self, stats):
if self.enabled:
self._flush_last_task()
delta = time.time() - self.start_time
self.start_time = time.time()
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
task_column = '{} - Task'.format(self.hipchat_msg_prefix)
task_summary = prettytable.PrettyTable([task_column, 'Time', 'Count', 'Changed'])
task_summary.align[task_column] = "l"
task_summary.align['Time'] = "r"
task_summary.align['Count'] = "r"
task_summary.align['Changed'] = "r"
for task in self.task_report:
if self.condensed_task_report:
# for the condensed task report skip all tasks
# that are not marked as changed and that have
# a time delta less than 1
if not task['changed'] and float(task['delta']) < 1:
continue
task_summary.add_row([task['task'], task['delta'], str(task['count']), str(task['changed'])])
summary_table = prettytable.PrettyTable(['Ok', 'Changed', 'Unreachable', 'Failures'])
self._send_hipchat("/code " + str(task_summary) )
summary_all_host_output = []
for host in hosts:
stats = stats.summarize(host)
summary_output = "<b>{}</b>: <i>{}</i> - ".format(self.hipchat_msg_prefix, host)
for summary_item in ['ok', 'changed', 'unreachable', 'failures']:
if stats[summary_item] != 0:
summary_output += "<b>{}</b> - {} ".format(summary_item, stats[summary_item])
summary_all_host_output.append(summary_output)
self._send_hipchat("<br />".join(summary_all_host_output), message_format='html')
msg = "<b>{description}</b>: Finished Ansible run for <b><i>{play}</i> in {min:02} minutes, {sec:02} seconds</b><br /><br />".format(
description=self.hipchat_msg_prefix,
play=self.playbook_name,
min=int(delta / 60),
sec=int(delta % 60))
self._send_hipchat(msg, message_format='html')
...@@ -128,5 +128,8 @@ class CallbackModule(object): ...@@ -128,5 +128,8 @@ class CallbackModule(object):
if len(payload[msg_type][output]) > 1000: if len(payload[msg_type][output]) > 1000:
payload[msg_type][output] = "(clipping) ... " \ payload[msg_type][output] = "(clipping) ... " \
+ payload[msg_type][output][-1000:] + payload[msg_type][output][-1000:]
if 'stdout_lines' in payload[msg_type]:
# only keep the last 20 or so lines to avoid payload size errors
if len(payload[msg_type]['stdout_lines']) > 20:
payload[msg_type]['stdout_lines'] = ['(clipping) ... '] + payload[msg_type]['stdout_lines'][-20:]
self.sqs.send_message(self.queue, json.dumps(payload)) self.sqs.send_message(self.queue, json.dumps(payload))
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# AWS regions to make calls to. Set this to 'all' to make request to all regions # AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma # in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
regions = all regions = us-east-1
regions_exclude = us-gov-west-1 regions_exclude = us-gov-west-1
# When generating inventory, Ansible needs to know how to address a server. # When generating inventory, Ansible needs to know how to address a server.
......
...@@ -217,7 +217,14 @@ class Ec2Inventory(object): ...@@ -217,7 +217,14 @@ class Ec2Inventory(object):
config.get('ec2', 'route53_excluded_zones', '').split(',')) config.get('ec2', 'route53_excluded_zones', '').split(','))
# Cache related # Cache related
if 'EC2_CACHE_PATH' in os.environ:
cache_path = os.environ['EC2_CACHE_PATH']
elif self.args.cache_path:
cache_path = self.args.cache_path
else:
cache_path = config.get('ec2', 'cache_path') cache_path = config.get('ec2', 'cache_path')
if not os.path.exists(cache_path):
os.makedirs(cache_path)
self.cache_path_cache = cache_path + "/ansible-ec2.cache" self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache" self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index" self.cache_path_index = cache_path + "/ansible-ec2.index"
...@@ -241,6 +248,10 @@ class Ec2Inventory(object): ...@@ -241,6 +248,10 @@ class Ec2Inventory(object):
default_inifile = os.environ.get("ANSIBLE_EC2_INI", os.path.dirname(os.path.realpath(__file__))+'/ec2.ini') default_inifile = os.environ.get("ANSIBLE_EC2_INI", os.path.dirname(os.path.realpath(__file__))+'/ec2.ini')
parser.add_argument('--inifile', dest='inifile', help='Path to init script to use', default=default_inifile) parser.add_argument('--inifile', dest='inifile', help='Path to init script to use', default=default_inifile)
parser.add_argument(
'--cache-path',
help='Override the cache path set in ini file',
required=False)
self.args = parser.parse_args() self.args = parser.parse_args()
......
# Configure an admin instance with jenkins and asgard.
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: True
roles:
- alton
- name: Deploy Analytics API
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- role: nginx
nginx_sites:
- analytics-api
- aws
- analytics-api
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
...@@ -2,16 +2,12 @@ ...@@ -2,16 +2,12 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- certs - certs
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -2,15 +2,11 @@ ...@@ -2,15 +2,11 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- common - common
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -4,9 +4,26 @@ ...@@ -4,9 +4,26 @@
sudo: True sudo: True
serial: 1 serial: 1
vars: vars:
ENABLE_DATADOG: False # By default take instances in and out of the elb(s) they
ENABLE_SPLUNKFORWARDER: False # are attached to
ENABLE_NEWRELIC: False # To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -14,17 +31,29 @@ ...@@ -14,17 +31,29 @@
- xqueue - xqueue
- role: xqueue - role: xqueue
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
- rabbitmq - rabbitmq
- datadog - datadog
- splunkforwarder - splunkforwarder
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# #
# In order to reconfigure the host resolution we are issuing a # In order to reconfigure the host resolution we are issuing a
# reboot. # reboot.
......
- name: connect a sandbox to production data
hosts: all
gather_facts: False
sudo: True
tasks:
- name: Switch the mongo db to use ephemeral
file: >
name=/mnt/mongodb
state=directory
owner=mongodb
group=mongodb
tags: update_mongo_data
- name: update the mongo config to use the new mongo dir
shell: >
sed -i 's#^dbpath=.*#dbpath=/mnt/mongodb#' /etc/mongodb.conf
tags: update_mongo_data
- name: restart mongodb
service: >
name=mongodb
state=restarted
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep comment | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_forum
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_forum.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_forum.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_forum.stdout|basename }}
chdir=/mnt
when: s3cmd_out_forum.stdout is defined
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep prod-edx | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_modulestore
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_modulestore.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_modulestore.stdout|basename }}
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: Restore the mongo data for the forums
shell: >
mongorestore --drop -d cs_comments_service /mnt/comments-prod
tags: update_mongo_data
- name: Restore the mongo data for the modulestore
shell: >
mongorestore --drop -d edxapp /mnt/prod-edx
tags: update_mongo_data
# recreate users after the restore
- name: create a mongodb users
mongodb_user: >
database={{ item.database }}
name={{ item.user }}
password={{ item.password }}
state=present
with_items:
- user: cs_comments_service
password: password
database: cs_comments_service
- user: exdapp
password: password
database: edxapp
# WARNING - calling lineinfile on a symlink
# will convert the symlink to a file!
# don't use /edx/etc/server-vars.yml here
#
# What we are doing here is updating the sandbox
# server-vars config file so that when update
# is called it will use the new MYSQL connection
# info.
- name: Update RDS to point to the sandbox clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
with_items:
- "EDXAPP_MYSQL_HOST: {{ EDXAPP_MYSQL_HOST }}"
- "EDXAPP_MYSQL_DB_NAME: {{ EDXAPP_MYSQL_DB_NAME }}"
- "EDXAPP_MYSQL_USER: {{ EDXAPP_MYSQL_USER }}"
- "EDXAPP_MYSQL_PASSWORD: {{ EDXAPP_MYSQL_PASSWORD }}"
tags: update_edxapp_mysql_host
- name: call update on edx-platform
shell: >
/edx/bin/update edx-platform master
tags: update_edxapp_mysql_host
#
# Requires MySQL-python be installed for system python
# This play will create databases and user for an application.
# It can be run like so:
#
# ansible-playbook -i 'localhost,' create_analytics_reports_dbs.yml -e@./db.yml
#
# where the content of dbs.yml contains the following dictionaries
#
# database_connection: &default_connection
# login_host: "mysql.example.org"
# login_user: "root"
# login_password: "super-secure-password"
# DEFAULT_ENCODING: "utf8"
# databases:
# reports:
# state: "present"
# encoding: "{{ DEFAULT_ENCODING }}"
# <<: *default_connection
# application:
# state: "present"
# encoding: "{{ DEFAULT_ENCODING }}"
# <<: *default_connection
# database_users:
# migrate:
# state: "present"
# password: "user-with-ddl-privs"
# host: "%"
# privileges:
# - "reports.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
# - "wwc.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
# <<: *default_connection
# runtime:
# state: "present"
# password: "user-with-dml-privs"
# host: "%"
# privileges:
# - "reports.*:SELECT"
# - "wwc.*:SELECT,INSERT,UPDATE,DELETE"
# <<: *default_connection
- name: Create databases and users
hosts: all
connection: local
gather_facts: False
tasks:
# Install required library, currently this needs to be available
# to system python.
- name: install python mysqldb module
pip: name={{item}} state=present
sudo: yes
with_items:
- MySQL-python
- name: create mysql databases
mysql_db: >
db={{ item.key }}
state={{ item.value.state }}
encoding={{ item.value.encoding }}
login_host={{ item.value.login_host }}
login_user={{ item.value.login_user }}
login_password={{ item.value.login_password }}
with_dict: databases
- name: create mysql users and assign privileges
mysql_user: >
name="{{ item.key }}"
priv="{{ '/'.join(item.value.privileges) }}"
password="{{ item.value.password }}"
host={{ item.value.host }}
login_host={{ item.value.login_host }}
login_user={{ item.value.login_user }}
login_password={{ item.value.login_password }}
append_privs=yes
with_dict: database_users
...@@ -2,15 +2,11 @@ ...@@ -2,15 +2,11 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- demo - demo
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -2,10 +2,6 @@ ...@@ -2,10 +2,6 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -13,8 +9,8 @@ ...@@ -13,8 +9,8 @@
- discern - discern
- discern - discern
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -6,10 +6,6 @@ ...@@ -6,10 +6,6 @@
vars: vars:
migrate_db: "yes" migrate_db: "yes"
openid_workaround: True openid_workaround: True
ENABLE_DATADOG: True
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
edx_internal: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -20,27 +16,30 @@ ...@@ -20,27 +16,30 @@
- xqueue - xqueue
- xserver - xserver
- certs - certs
- analytics-api
nginx_default_sites: nginx_default_sites:
- lms - lms
- edxlocal - role: edxlocal
tags: edxlocal
- mongo - mongo
- { role: 'edxapp', celery_worker: True } - { role: 'edxapp', celery_worker: True }
- edxapp - edxapp
- role: demo - role: demo
tags: ['demo'] tags: demo
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' } - { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
- forum - forum
- { role: "xqueue", update_users: True } - { role: "xqueue", update_users: True }
- { role: xserver, when: edx_internal } - xserver
- ora - ora
- discern
- certs - certs
- edx_ansible - edx_ansible
- analytics-api
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
- flower
...@@ -2,10 +2,6 @@ ...@@ -2,10 +2,6 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -16,8 +12,8 @@ ...@@ -16,8 +12,8 @@
- lms - lms
- edxapp - edxapp
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
- name: Run edxapp migrations
hosts: all
sudo: False
gather_facts: False
vars:
db_dry_run: "--db-dry-run"
tasks:
# Syncdb with migrate when the migrate user is overridden in extra vars
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
python manage.py {{ item }} migrate --noinput --settings=aws_migrate {{ db_dry_run }}
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
with_items:
- lms
- cms
...@@ -3,7 +3,41 @@ ...@@ -3,7 +3,41 @@
vars_files: vars_files:
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/stage/stage-edx.yml" - "{{ secure_dir }}/vars/stage/stage-edx.yml"
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles: roles:
- common - common
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- name: Deploy celery flower (monitoring tool)
hosts: all
sudo: True
gather_facts: True
roles:
- flower
...@@ -2,10 +2,6 @@ ...@@ -2,10 +2,6 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -13,10 +9,8 @@ ...@@ -13,10 +9,8 @@
- forum - forum
- forum - forum
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
- role: newrelic
when: ENABLE_NEWRELIC
# Configure an admin instance with jenkins and asgard. # Configure an instance with the admin jenkins.
- name: Configure instance(s) - name: Configure instance(s)
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws
- edx_ansible
- user
- jenkins_admin - jenkins_admin
- hotg
- role: datadog
when: ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
# Configure a Jenkins master instance # Configure a Jenkins master instance for testeng
# This has the Jenkins Java app, but none of the requirements # This has the Jenkins Java app, but none of the requirements
# to run the tests. # to run the tests.
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
gather_facts: True gather_facts: True
vars: vars:
COMMON_DATA_DIR: "/mnt" COMMON_DATA_DIR: "/mnt"
COMMON_ENABLE_DATADOG: True
roles: roles:
- common - common
- role: datadog
when: COMMON_ENABLE_DATADOG
- jenkins_master - jenkins_master
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "restart edxapp" - "restart edxapp"
- "restart workers"
- name: syncdb and migrate - name: syncdb and migrate
shell: > shell: >
...@@ -38,10 +39,11 @@ ...@@ -38,10 +39,11 @@
DB_MIGRATION_PASS: "{{ edxapp_mysql_password }}" DB_MIGRATION_PASS: "{{ edxapp_mysql_password }}"
notify: notify:
- "restart edxapp" - "restart edxapp"
- "restart workers"
handlers: handlers:
- name: restart edxapp - name: restart edxapp
shell: "{{ supervisorctl_path }} restart edxapp:{{ item }}" shell: "{{ supervisorctl_path }} restart edxapp:"
with_items:
- lms - name: restart workers
- cms shell: "{{ supervisorctl_path }} restart edxapp_worker:"
...@@ -5,6 +5,40 @@ ...@@ -5,6 +5,40 @@
# ansible_default_ipv4 so # ansible_default_ipv4 so
# gather_facts must be set to True # gather_facts must be set to True
gather_facts: True gather_facts: True
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles: roles:
- aws - aws
- rabbitmq - rabbitmq
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- hosts: all
sudo: true
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
tasks:
- shell: echo "test"
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# This is a utility play to setup the db users on the edxapp db
#
# The mysql root user MUST be passed in as an extra var
#
# the environment and deployment must be passed in as COMMON_ENVIRONMENT
# and COMMON_DEPLOYMENT. These two vars should be set in the secret
# var file for the corresponding vpc stack
#
# Example invocation:
#
# Create the databases for edxapp and xqueue:
#
# ansible-playbook -i localhost, create_db_users.yml -e@/path/to/secrets.yml -e "edxapp_db_root_user=root edxapp_db_root_pass=password"
#
- name: Update db users on the edxapp db
hosts: all
gather_facts: False
vars:
edxapp_db_root_user: 'None'
edxapp_db_root_pass: 'None'
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- name: assign mysql user permissions for read_only user
mysql_user:
name: "{{ COMMON_MYSQL_READ_ONLY_USER }}"
priv: "*.*:SELECT"
password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for migrate user
mysql_user:
name: "{{ COMMON_MYSQL_MIGRATE_USER }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
password: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for admin user
mysql_user:
name: "{{ COMMON_MYSQL_ADMIN_USER }}"
priv: "*.*:CREATE USER"
password: "{{ COMMON_MYSQL_ADMIN_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for db users
mysql_user:
name: "{{ item.db_user_to_modify }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
# The second call to mysql_user needs to have append_privs set to
# yes otherwise it will overwrite the previous run.
# This means that both tasks will report changed on every ansible
# run
- name: assign mysql user permissions for db test user
mysql_user:
append_privs: yes
name: "{{ item.db_user_to_modify }}"
priv: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ item.db_name }}.*:ALL"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
# ansible-playbook -c ssh -vvvv --user=ubuntu -i ec2.py deployer.yml -e "@/path/to/secure/ansible/vars/edx_admin.yml" --limit="tag_aws_cloudformation_stack-name_<admin_stack_name>" # ansible-playbook -vvv -c ssh -i admin_url, vpc_admin.yml -e "@path_to_common_overrides" -e "@path_to_deployment_specific_overrides"
# You will need to create a gh_users.yml that contains the github names of users that should have login access to the machines.
# Setup user login on the bastion
- name: Configure Bastion
hosts: tag_play_bastion
sudo: True
gather_facts: False
roles:
- aws
# Configure an admin instance with jenkins and asgard. # Configure an admin instance with jenkins and asgard.
- name: Configure instance(s) - name: Configure instance(s)
hosts: tag_play_admin hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
roles: roles:
- aws - aws
- edx_ansible - edx_ansible
- user
- jenkins_admin - jenkins_admin
- hotg - hotg
- newrelic - alton
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
...@@ -2,17 +2,13 @@ ...@@ -2,17 +2,13 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: edxapp - role: edxapp
celery_worker: True celery_worker: True
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -3,9 +3,26 @@ ...@@ -3,9 +3,26 @@
sudo: True sudo: True
gather_facts: True gather_facts: True
vars: vars:
ENABLE_DATADOG: False # By default take instances in and out of the elb(s) they
ENABLE_SPLUNKFORWARDER: False # are attached to
ENABLE_NEWRELIC: False # To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -13,8 +30,21 @@ ...@@ -13,8 +30,21 @@
- xqueue - xqueue
- role: xqueue - role: xqueue
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- name: Deploy xqueue-watcher
hosts: all
sudo: True
gather_facts: True
vars:
COMMON_APP_DIR: "/edx/app"
common_web_group: "www-data"
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- xqwatcher
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
\ No newline at end of file
...@@ -2,10 +2,6 @@ ...@@ -2,10 +2,6 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -13,8 +9,8 @@ ...@@ -13,8 +9,8 @@
- xserver - xserver
- role: xserver - role: xserver
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
../ansible.cfg
\ No newline at end of file
*
!prod
!stage
!data
!.gitignore
This temp directory created here so that we can make sure it doesn't
collide with other users doing ansible operations on the same machine;
or concurrent installs to different environments, say to prod and stage.
# config file for ansible -- http://ansible.github.com
# nearly all parameters can be overridden in ansible-playbook or with command line flags
# ansible will read ~/.ansible.cfg or /etc/ansible/ansible.cfg, whichever it finds first
[defaults]
jinja2_extensions=jinja2.ext.do
hash_behaviour=merge
host_key_checking = False
# These are environment-specific defaults
forks=10
transport=ssh
hostfile=./ec2.py
extra_vars='key=deployment region=us-west-1'
user=ubuntu
[ssh_connection]
# example from https://github.com/ansible/ansible/blob/devel/examples/ansible.cfg
ssh_args= -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r
scp_if_ssh=True
...@@ -3,6 +3,6 @@ regions=us-west-1 ...@@ -3,6 +3,6 @@ regions=us-west-1
regions_exclude = us-gov-west-1 regions_exclude = us-gov-west-1
destination_variable=public_dns_name destination_variable=public_dns_name
vpc_destination_variable=private_dns_name vpc_destination_variable=private_dns_name
cache_path=/tmp cache_path=ec2_cache/prod
cache_max_age=300 cache_max_age=300
route53=False route53=False
[ec2]
regions=us-west-1
regions_exclude = us-gov-west-1
destination_variable=public_dns_name
vpc_destination_variable=private_dns_name
cache_path=ec2_cache/stage
cache_max_age=300
route53=False
...@@ -13,10 +13,11 @@ ...@@ -13,10 +13,11 @@
openid_workaround: True openid_workaround: True
EDXAPP_LMS_NGINX_PORT: '80' EDXAPP_LMS_NGINX_PORT: '80'
edx_platform_version: 'master' edx_platform_version: 'master'
# Set to false if deployed behind another proxy/load balancer.
NGINX_SET_X_FORWARDED_HEADERS: True
# These should stay false for the public AMI # These should stay false for the public AMI
ENABLE_DATADOG: False COMMON_ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False COMMON_ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- role: nginx - role: nginx
nginx_sites: nginx_sites:
...@@ -41,8 +42,8 @@ ...@@ -41,8 +42,8 @@
- certs - certs
- edx_ansible - edx_ansible
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
---
# Build a kibana/logstash/elasticsearch server for capturing and
# analyzing logs.
- name: Configure syslog server
hosts: all
sudo: yes
roles:
- common
- oraclejdk
- elasticsearch
- logstash
- kibana
- role: nginx
nginx_sites:
- kibana
---
AIDE_REPORT_EMAIL: 'root'
---
# install and configure aide IDS
#
- name: install aide
apt: pkg="aide" state="present"
- name: configure aide defaults
template: >
src=etc/default/aide.j2 dest=/etc/default/aide
owner=root group=root mode=0644
- name: open read permissions on aide logs
file: >
name="/var/log/aide"
recurse="yes"
state="directory"
mode="755"
- name: aide initial scan (this can take a long time)
command: >
aideinit -y -f
creates=/var/lib/aide/aide.db
sudo: yes
# These settings are mainly for the wrapper scripts around aide,
# such as aideinit and /etc/cron.daily/aide
# send reports to syslog
REPORT_URL=syslog:LOG_LOCAL1
# This is used as the host name in the AIDE reports that are sent out
# via e-mail. It defaults to the output of $(hostname --fqdn), but can
# be set to arbitrary values.
# FQDN=
# This is used as the subject for the e-mail reports.
# If your mail system only threads by subject, you might want to add
# some variable content here (for example $(date +%Y-%m-%d)).
MAILSUBJ="Daily AIDE report for $FQDN"
# This is the email address reports get mailed to
# default is root
# This variable is expanded before it is used, so you can use variables
# here. For example, MAILTO=$FQDN-aide@domain.example will send the
# report to host.name.example-aide@domain.example is the local FQDN is
# host.name.example.
MAILTO={{ AIDE_REPORT_EMAIL }}
# Set this to yes to suppress mailings when no changes have been
# detected during the AIDE run and no error output was given.
#QUIETREPORTS=no
# This parameter defines which AIDE command to run from the cron script.
# Sensible values are "update" and "check".
# Default is "check", ensuring backwards compatibility.
# Since "update" does not take any longer, it is recommended to use "update",
# so that a new database is created every day. The new database needs to be
# manually copied over the current one, though.
COMMAND=update
# This parameter defines what to do with a new database created by
# COMMAND=update. It is ignored if COMMAND!=update.
# no: Do not copy new database to old database. This is the default.
# yes: Copy new database to old database. This means that changes to the
# file system are only reported once. Possibly dangerous.
# ifnochange: Copy new database to old database if no changes have
# been reported. This is needed for ANF/ARF to work reliably.
COPYNEWDB=no
# Set this to yes to truncate the detailed changes part in the mail. The full
# output will still be listed in the log file.
TRUNCATEDETAILS=yes
# Set this to yes to suppress file changes by package and security
# updates from appearing in the e-mail report. Filtered file changes will
# still be listed in the log file. This option parses the /var/log/dpkg.log
# file and implies TRUNCATEDETAILS=yes
FILTERUPDATES=yes
# Set this to yes to suppress file changes by package installations
# from appearing in the e-mail report. Filtered file changes will still
# be listed in the log file. This option parses the /var/log/dpkg.log file and
# implies TRUNCATEDETAILS=yes.
FILTERINSTALLATIONS=yes
# This parameter defines how many lines to return per e-mail. Output longer
# than this value will be truncated in the e-mail sent out.
# Set value to "0" to disable this option.
LINES=1000
# This parameter gives a grep regular expression. If given, all output lines
# that _don't_ match the regexp are listed first in the script's output. This
# allows to easily remove noise from the AIDE report.
NOISE=""
# This parameter defines which options are given to aide in the daily
# cron job. The default is "-V4".
AIDEARGS=""
# These parameters control update-aide.conf and give the defaults for
# the --confdir, --confd and --settingsd options
# UPAC_CONFDIR="/etc/aide"
# UPAC_CONFD="$UPAC_CONFDIR/aide.conf.d"
# UPAC_SETTINGSD="$UPAC_CONFDIR/aide.settings.d"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role alton
#
#
# vars are namespace with the module name.
#
ALTON_USERNAME: '1234_1234@chat.hipchat.com'
ALTON_PASSWORD: 'password'
ALTON_V2_TOKEN: 'HIPCHAT_V2_TOKEN'
ALTON_ROOMS: 'Hammer'
ALTON_NAME: 'Alton W. Daemon'
ALTON_HANDLE: 'alton'
ALTON_REDIS_URL: 'redis://fakeuser:redispassword@localhost:6379'
ALTON_HTTPSERVER_PORT: '8081'
ALTON_WORLD_WEATHER_KEY: !!null
# Needed if you want to build AMIs from alton.
ALTON_JENKINS_URL: !!null
alton_role_name: alton
alton_user: alton
alton_app_dir: "{{ COMMON_APP_DIR }}/alton"
alton_code_dir: "{{ alton_app_dir }}/alton"
alton_venvs_dir: "{{ alton_app_dir }}/venvs"
alton_venv_dir: "{{ alton_venvs_dir }}/alton"
alton_venv_bin: "{{ alton_venv_dir }}/bin"
alton_source_repo: "https://github.com/edx/alton.git"
alton_version: "HEAD"
alton_requirements_file: "{{ alton_code_dir }}/requirements.txt"
alton_supervisor_wrapper: "{{ alton_app_dir }}/alton-supervisor.sh"
alton_environment:
WILL_USERNAME: "{{ ALTON_USERNAME }}"
WILL_PASSWORD: "{{ ALTON_PASSWORD }}"
WILL_V2_TOKEN: "{{ ALTON_V2_TOKEN }}"
WILL_ROOMS: "{{ ALTON_ROOMS }}"
WILL_NAME: "{{ ALTON_NAME }}"
WILL_HANDLE: "{{ ALTON_HANDLE }}"
WILL_REDIS_URL: "{{ ALTON_REDIS_URL }}"
WILL_HTTPSERVER_PORT: "{{ ALTON_HTTPSERVER_PORT }}"
WORLD_WEATHER_ONLINE_KEY: "{{ ALTON_WORLD_WEATHER_KEY }}"
JENKINS_URL: "{{ ALTON_JENKINS_URL }}"
#
# OS packages
#
alton_debian_pkgs: []
alton_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role alton
#
# Overview:
#
#
- name: restart alton
supervisorctl_local: >
name=alton
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not disable_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role alton
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- supervisor
- redis
- name: checkout the code
git: >
dest="{{ alton_code_dir }}" repo="{{ alton_source_repo }}"
version="{{ alton_version }}" accept_hostkey=yes
sudo_user: "{{ alton_user }}"
notify: restart alton
- name: install the requirements
pip: >
requirements="{{ alton_requirements_file }}"
virtualenv="{{ alton_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ alton_user }}"
notify: restart alton
- name: create the supervisor wrapper
template: >
src="{{ alton_supervisor_wrapper|basename }}.j2"
dest="{{ alton_supervisor_wrapper }}"
mode=0755
sudo_user: "{{ alton_user }}"
notify: restart alton
- name: create a supervisor config
template: >
src=alton.conf.j2 dest="{{ supervisor_available_dir }}/alton.conf"
owner="{{ supervisor_user }}"
group="{{ supervisor_user }}"
sudo_user: "{{ supervisor_user }}"
notify: restart alton
- name: enable the supervisor config
file: >
src="{{ supervisor_available_dir }}/alton.conf"
dest="{{ supervisor_cfg_dir }}/alton.conf"
state=link
force=yes
mode=0644
sudo_user: "{{ supervisor_user }}"
when: not disable_edx_services
notify: restart alton
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure alton is started
supervisorctl_local: >
name=alton
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: not disable_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role alton
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- name: create application user
user: >
name="{{ alton_user }}" home="{{ alton_app_dir }}"
createhome=no shell=/bin/false
- name: create alton user dirs
file: >
path="{{ item }}" state=directory
owner="{{ alton_user }}" group="{{ common_web_group }}"
with_items:
- "{{ alton_app_dir }}"
- "{{ alton_venvs_dir }}"
- name: setup the alton env
template: >
src="alton_env.j2" dest="{{ alton_app_dir }}/alton_env"
owner="{{ alton_user }}" group="{{ common_web_user }}"
mode=0644
notify: restart alton
- include: deploy.yml tags=deploy
#!/bin/bash
source {{ alton_app_dir }}/alton_env
cd {{ alton_code_dir }}
{{ alton_venv_bin }}/python run_alton.py
[program:alton]
command={{ alton_supervisor_wrapper }}
priority=999
user={{ common_web_user }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
stopsignal=QUIT
# {{ ansible_managed }}
{% for name,value in alton_environment.items() -%}
{%- if value -%}
export {{ name }}="{{ value }}"
{% endif %}
{%- endfor %}
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role analytics-api
#
ANALYTICS_API_GIT_IDENTITY: !!null
# depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC
# and a key being provided via NEWRELIC_LICENSE_KEY
ANALYTICS_API_NEWRELIC_APPNAME: "your Newrelic appname"
ANALYTICS_API_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
ANALYTICS_API_NGINX_PORT: "18100"
ANALYTICS_API_VERSION: "master"
# Default dummy user, override this!!
ANALYTICS_API_USERS:
"dummy-api-user": "changeme"
ANALYTICS_API_CONFIG:
ANALYTICS_DATABASE: 'reports'
SECRET_KEY: 'Your secret key here'
TIME_ZONE: 'America/New_York'
LANGUAGE_CODE: 'en-us'
# email config
EMAIL_HOST: 'smtp.example.com'
EMAIL_HOST_PASSWORD: ""
EMAIL_HOST_USER: ""
EMAIL_PORT: 587
API_AUTH_TOKEN: 'put-your-api-token-here'
STATICFILES_DIRS: []
STATIC_ROOT: "{{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }}/staticfiles"
# db config
DATABASE_OPTIONS:
connect_timeout: 10
DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: 'analytics-api'
USER: 'api001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
# read-only user
reports:
ENGINE: 'django.db.backends.mysql'
NAME: 'reports'
USER: 'reports001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
#
# vars are namespace with the module name.
#
analytics_api_environment:
DJANGO_SETTINGS_MODULE: "analyticsdataserver.settings.production"
ANALYTICS_API_CFG: "{{ COMMON_CFG_DIR }}/{{ analytics_api_service_name }}.yaml"
analytics_api_role_name: "analytics-api"
analytics_api_service_name: "analytics-api"
analytics_api_user: "analytics-api"
analytics_api_app_dir: "{{ COMMON_APP_DIR }}/{{ analytics_api_service_name }}"
analytics_api_home: "{{ COMMON_APP_DIR }}/{{ analytics_api_service_name }}"
analytics_api_venv_base: "{{ analytics_api_home }}/venvs"
analytics_api_venv_dir: "{{ analytics_api_venv_base }}/{{ analytics_api_service_name }}"
analytics_api_venv_bin: "{{ analytics_api_venv_dir }}/bin"
analytics_api_code_dir: "{{ analytics_api_app_dir }}/edx-analytics-data-api"
analytics_api_conf_dir: "{{ analytics_api_home }}"
analytics_api_gunicorn_host: "127.0.0.1"
analytics_api_gunicorn_port: "8100"
analytics_api_gunicorn_workers: "8"
analytics_api_gunicorn_timeout: "300"
analytics_api_django_settings: "production"
analytics_api_source_repo: "git@{{ COMMON_GIT_MIRROR }}:edx/edx-analytics-data-api"
analytics_api_git_ssh_opts: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ analytics_api_git_identity_file }}"
analytics_api_git_identity_file: "{{ analytics_api_app_dir }}/git-identity"
analytics_api_log_dir: "{{ COMMON_LOG_DIR }}/{{ analytics_api_service_name }}"
analytics_api_requirements_base: "{{ analytics_api_code_dir }}/requirements"
analytics_api_requirements:
- base.txt
- production.txt
- optional.txt
#
# OS packages
#
analytics_api_debian_pkgs:
- 'libmysqlclient-dev'
analytics_api_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role analytics-api
#
# Overview:
#
#
- name: "restart the analytics service"
supervisorctl_local: >
name={{ analytics_api_service_name }}
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
# Role includes for role analytics-api
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- role: edx_service
edx_role_name: "{{ analytics_api_role_name }}"
edx_service_name: "{{ analytics_api_service_name }}"
- supervisor
---
- name: install read-only ssh key
copy: >
content="{{ ANALYTICS_API_GIT_IDENTITY }}" dest={{ analytics_api_git_identity_file }}
owner={{ analytics_api_user }} group={{ analytics_api_user }} mode=0600
- name: setup the analytics-api env file
template: >
src="edx/app/analytics-api/analytics_api_env.j2"
dest="{{ analytics_api_app_dir }}/analytics_api_env"
owner={{ analytics_api_user }}
group={{ analytics_api_user }}
mode=0644
- name: checkout code
git: >
dest={{ analytics_api_code_dir }} repo={{ analytics_api_source_repo }} version={{ ANALYTICS_API_VERSION }}
accept_hostkey=yes
ssh_opts="{{ analytics_api_git_ssh_opts }}"
register: analytics_api_code_checkout
notify: "restart the analytics service"
sudo_user: "{{ analytics_api_user }}"
- name: write out app config file
template: >
src=edx/app/analytics-api/analytics-api.yaml.j2
dest={{ COMMON_CFG_DIR }}/{{ analytics_api_service_name }}.yaml
mode=0644 owner={{ analytics_api_user }} group={{ analytics_api_user }}
notify: restart the analytics service
- name: install application requirements
pip: >
requirements="{{ analytics_api_requirements_base }}/{{ item }}"
virtualenv="{{ analytics_api_venv_dir }}" state=present
sudo_user: "{{ analytics_api_user }}"
notify: restart the analytics service
with_items: analytics_api_requirements
- name: syncdb and migrate
shell: >
chdir={{ analytics_api_code_dir }}
DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }}
DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }}
{{ analytics_api_venv_bin }}/python ./manage.py syncdb --migrate --noinput
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
- name: run collectstatic
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_venv_bin }}/python manage.py collectstatic --noinput
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
- name: create api users
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_venv_bin }}/python manage.py set_api_key {{ item.key }} {{ item.value }} --create-user
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
with_dict: ANALYTICS_API_USERS
- name: write out the supervisior wrapper
template: >
src=edx/app/analytics-api/analytics-api.sh.j2
dest={{ analytics_api_app_dir }}/{{ analytics_api_service_name }}.sh
mode=0650 owner={{ supervisor_user }} group={{ common_web_user }}
notify: restart the analytics service
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d.available/analytics-api.conf.j2
dest="{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
notify: restart the analytics service
- name: enable supervisor script
file: >
src={{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf
dest={{ supervisor_cfg_dir }}/{{ analytics_api_service_name }}.conf
state=link
force=yes
notify: restart the analytics service
when: not disable_edx_services
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
when: not disable_edx_services
- name: create symlinks from the venv bin dir
file: >
src="{{ analytics_api_venv_bin }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics-api"
state=link
with_items:
- python
- pip
- django-admin.py
- name: create symlinks from the repo dir
file: >
src="{{ analytics_api_code_dir }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics-api"
state=link
with_items:
- manage.py
- name: remove read-only ssh key for the content repo
file: path={{ analytics_api_git_identity_file }} state=absent
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role analytics-api
#
# Overview:
#
# Install the Analytics Data API server, a python
# django application that runs under gunicorn
#
# Dependencies:
#
# Example play:
# - name: Deploy Analytics API
# hosts: all
# sudo: True
# gather_facts: True
# vars:
# ENABLE_DATADOG: False
# ENABLE_SPLUNKFORWARDER: False
# ENABLE_NEWRELIC: False
# roles:
# - aws
# - analytics-api
#
# ansible-playbook -i 'api.example.com,' ./analyticsapi.yml -e@/ansible/vars/deployment.yml -e@/ansible/vars/env-deployment.yml
#
- fail: msg="You must provide an private key for the analytics repo"
when: not ANALYTICS_API_GIT_IDENTITY
- include: deploy.yml tags=deploy
#!/usr/bin/env bash
# {{ ansible_managed }}
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = analytics_api_venv_bin + '/newrelic-admin run-program ' + analytics_api_venv_bin + '/gunicorn' %}
{% else %}
{% set executable = analytics_api_venv_bin + '/gunicorn' %}
{% endif %}
{% if COMMON_ENABLE_NEWRELIC %}
export NEW_RELIC_APP_NAME="{{ ANALYTICS_API_NEWRELIC_APPNAME }}"
export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}"
{% endif -%}
source {{ analytics_api_app_dir }}/analytics_api_env
{{ executable }} --pythonpath={{ analytics_api_code_dir }} -b {{ analytics_api_gunicorn_host }}:{{ analytics_api_gunicorn_port }} -w {{ analytics_api_gunicorn_workers }} --timeout={{ analytics_api_gunicorn_timeout }} analyticsdataserver.wsgi:application
---
# {{ ansible_managed }}
{{ ANALYTICS_API_CONFIG | to_nice_yaml }}
# {{ ansible_managed }}
{% for name,value in analytics_api_environment.items() -%}
{%- if value -%}
export {{ name }}="{{ value }}"
{% endif %}
{%- endfor %}
# {{ ansible_managed }}
[program:{{ analytics_api_service_name }}]
command={{ analytics_api_app_dir }}/analytics-api.sh
user={{ common_web_user }}
directory={{ analytics_api_code_dir }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
--- ---
apache_port: 80 apache_ports:
- 80
apache_sites:
- lms
apache_template_dir: '.'
--- ---
- name: restart apache - name: restart apache
service: name=apache2 state=restarted service: name=apache2 state=restarted
tags: deploy
---
dependencies:
- common
# Requires nginx package
---
- name: Copying apache config {{ site_name }}
template: src={{ item }} dest=/etc/apache2/sites-available/{{ site_name }}
first_available_file:
- "{{ local_dir }}/apache/templates/{{ site_name }}.j2"
# seems like paths in first_available_file must be relative to the playbooks dir
- "roles/apache/templates/{{ site_name }}.j2"
notify: restart apache
when: apache_role_run is defined
tags:
- apache
- update
- name: Creating apache2 config link {{ site_name }}
file: src=/etc/apache2/sites-available/{{ site_name }} dest=/etc/apache2/sites-enabled/{{ site_name }} state={{ state }} owner=root group=root
notify: restart apache
when: apache_role_run is defined
tags:
- apache
- update
#Installs apache and runs the lms wsgi # Installs apache and runs the lms wsgi by default
--- ---
- name: Installs apache and mod_wsgi from apt - name: Installs apache and mod_wsgi from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes apt: pkg={{ item }} install_recommends=no state=present update_cache=yes
with_items: with_items:
- apache2 - apache2
- libapache2-mod-wsgi - libapache2-mod-wsgi
notify: restart apache notify: restart apache
tags:
- apache
- install
- name: disables default site - name: disables default site
command: a2dissite 000-default command: a2dissite 000-default
notify: restart apache notify: restart apache
tags:
- apache
- install
- name: rewrite apache ports conf - name: rewrite apache ports conf
template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root
notify: restart apache notify: restart apache
tags:
- apache
- install
- name: Register the fact that apache role has run - debug: msg={{ apache_sites }}
command: echo True
register: apache_role_run
tags:
- apache
- install
- name: Copying apache configs for {{ apache_sites }}
template: >
src={{ apache_template_dir }}/{{ item }}.j2
dest=/etc/apache2/sites-available/{{ item }}
owner=root group={{ common_web_user }} mode=0640
notify: restart apache
with_items: apache_sites
- include: apache_site.yml state=link site_name=lms - name: Creating apache2 config links for {{ apache_sites }}
file: >
src=/etc/apache2/sites-available/{{ item }}
dest=/etc/apache2/sites-enabled/{{ item }}
state=link owner=root group=root
notify: restart apache
with_items: apache_sites
WSGIPythonHome {{ edxapp_venv_dir }} WSGIPythonHome {{ edxapp_venv_dir }}
WSGIRestrictEmbedded On WSGIRestrictEmbedded On
<VirtualHost *:{{apache_port}}> <VirtualHost *:{{ apache_port }}>
ServerName https://{{ lms_env_config.SITE_NAME }} ServerName https://{{ lms_env_config.SITE_NAME }}
ServerAlias *.{{ lms_env_config.SITE_NAME }} ServerAlias *.{{ lms_env_config.SITE_NAME }}
UseCanonicalName On UseCanonicalName On
...@@ -46,6 +46,6 @@ WSGIRestrictEmbedded On ...@@ -46,6 +46,6 @@ WSGIRestrictEmbedded On
ErrorLog ${APACHE_LOG_DIR}/apache-edx-error.log ErrorLog ${APACHE_LOG_DIR}/apache-edx-error.log
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\" %D" apache-edx LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\" %D" apache-edx
CustomLog ${APACHE_LOG_DIR}/apache-edx-access.log apache-edx CustomLog {{ APACHE_LOG_DIR }}/apache-edx-access.log apache-edx
</VirtualHost> </VirtualHost>
NameVirtualHost *:{{apache_port}} {%- for port in apache_ports -%}
Listen {{apache_port}} NameVirtualHost *:{{ port }}
Listen {{ port }}
{% endfor %}
...@@ -56,6 +56,7 @@ aws_debian_pkgs: ...@@ -56,6 +56,7 @@ aws_debian_pkgs:
aws_pip_pkgs: aws_pip_pkgs:
- https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz - https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
- awscli - awscli
- boto==2.20.1
aws_redhat_pkgs: [] aws_redhat_pkgs: []
aws_s3cmd_version: s3cmd-1.5.0-beta1 aws_s3cmd_version: s3cmd-1.5.0-beta1
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
- db_host: "{{ ORA_MYSQL_HOST }}" - db_host: "{{ ORA_MYSQL_HOST }}"
db_name: "{{ ORA_MYSQL_DB_NAME }}" db_name: "{{ ORA_MYSQL_DB_NAME }}"
script_name: ora-rds.sh script_name: ora-rds.sh
when: COMMON_MYSQL_READ_ONLY_PASS when: COMMON_MYSQL_READ_ONLY_PASS is defined
# These templates rely on there being a global # These templates rely on there being a global
# read_only mongo user, you must override the default # read_only mongo user, you must override the default
...@@ -67,4 +67,4 @@ ...@@ -67,4 +67,4 @@
db_name: "{{ FORUM_MONGO_DATABASE }}" db_name: "{{ FORUM_MONGO_DATABASE }}"
db_port: "{{ FORUM_MONGO_PORT }}" db_port: "{{ FORUM_MONGO_PORT }}"
script_name: forum-mongo.sh script_name: forum-mongo.sh
when: COMMON_MONGO_READ_ONLY_PASS when: COMMON_MONGO_READ_ONLY_PASS is defined
...@@ -60,7 +60,9 @@ ...@@ -60,7 +60,9 @@
notify: restart certs notify: restart certs
- name : install python requirements - name : install python requirements
pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present pip: >
requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
notify: restart certs notify: restart certs
...@@ -73,7 +75,7 @@ ...@@ -73,7 +75,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services when: not disable_edx_services
- name: ensure certs has started - name: ensure certs has started
......
...@@ -3,6 +3,11 @@ ...@@ -3,6 +3,11 @@
# to change the base directory # to change the base directory
# where edX is installed # where edX is installed
# Set global htpasswd credentials
COMMON_ENABLE_BASIC_AUTH: True
COMMON_HTPASSWD_USER: edx
COMMON_HTPASSWD_PASS: edx
COMMON_BASE_DIR: /edx COMMON_BASE_DIR: /edx
COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var" COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var"
COMMON_APP_DIR: "{{ COMMON_BASE_DIR}}/app" COMMON_APP_DIR: "{{ COMMON_BASE_DIR}}/app"
...@@ -18,6 +23,7 @@ COMMON_CFG_DIR: "{{ COMMON_BASE_DIR }}/etc" ...@@ -18,6 +23,7 @@ COMMON_CFG_DIR: "{{ COMMON_BASE_DIR }}/etc"
COMMON_ENVIRONMENT: 'default_env' COMMON_ENVIRONMENT: 'default_env'
COMMON_DEPLOYMENT: 'default_deployment' COMMON_DEPLOYMENT: 'default_deployment'
COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple' COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple'
COMMON_NPM_MIRROR_URL: 'http://registry.npmjs.org'
# do not include http/https # do not include http/https
COMMON_GIT_MIRROR: 'github.com' COMMON_GIT_MIRROR: 'github.com'
# override this var to set a different hostname # override this var to set a different hostname
...@@ -38,15 +44,17 @@ COMMON_SSH_PASSWORD_AUTH: "no" ...@@ -38,15 +44,17 @@ COMMON_SSH_PASSWORD_AUTH: "no"
# the migrate user is granted table alter privs on all dbs # the migrate user is granted table alter privs on all dbs
COMMON_MYSQL_READ_ONLY_USER: 'read_only' COMMON_MYSQL_READ_ONLY_USER: 'read_only'
COMMON_MYSQL_READ_ONLY_PASS: !!null COMMON_MYSQL_READ_ONLY_PASS: 'password'
COMMON_MYSQL_ADMIN_USER: 'admin' COMMON_MYSQL_ADMIN_USER: 'admin'
COMMON_MYSQL_ADMIN_PASS: !!null COMMON_MYSQL_ADMIN_PASS: 'password'
COMMON_MYSQL_MIGRATE_USER: 'migrate' COMMON_MYSQL_MIGRATE_USER: 'migrate'
COMMON_MYSQL_MIGRATE_PASS: !!null COMMON_MYSQL_MIGRATE_PASS: 'password'
COMMON_MONGO_READ_ONLY_USER: 'read_only' COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: !!null COMMON_MONGO_READ_ONLY_PASS: !!null
COMMON_ENABLE_DATADOG: False
COMMON_ENABLE_SPLUNKFORWARDER: False
COMMON_ENABLE_NEWRELIC: False
common_debian_pkgs: common_debian_pkgs:
- ntp - ntp
- ack-grep - ack-grep
...@@ -66,8 +74,9 @@ common_debian_pkgs: ...@@ -66,8 +74,9 @@ common_debian_pkgs:
- curl - curl
common_pip_pkgs: common_pip_pkgs:
- pip==1.5.4 - pip==1.5.6
- virtualenv==1.11.4 - setuptools==3.6
- virtualenv==1.11.6
- virtualenvwrapper - virtualenvwrapper
common_web_user: www-data common_web_user: www-data
......
...@@ -15,6 +15,12 @@ ...@@ -15,6 +15,12 @@
- "{{ COMMON_BIN_DIR }}" - "{{ COMMON_BIN_DIR }}"
- "{{ COMMON_CFG_DIR }}" - "{{ COMMON_CFG_DIR }}"
# Determine if machine is provisioned via vagrant
# Some EC2-specific steps would need to be skipped
- name: check if instance is vagrant
stat: path=/home/vagrant
register: vagrant_home_dir
# Need to install python-pycurl to use Ansible's apt_repository module # Need to install python-pycurl to use Ansible's apt_repository module
- name: Install python-pycurl - name: Install python-pycurl
apt: pkg=python-pycurl state=present update_cache=yes apt: pkg=python-pycurl state=present update_cache=yes
...@@ -56,10 +62,14 @@ ...@@ -56,10 +62,14 @@
- name: Install logrotate configuration for edX - name: Install logrotate configuration for edX
template: > template: >
dest=/etc/logrotate.d/edx-services dest=/etc/logrotate.d/hourly/edx-services
src=etc/logrotate.d/edx_logrotate.j2 src=etc/logrotate.d/hourly/edx_logrotate.j2
owner=root group=root mode=644 owner=root group=root mode=644
# This can be removed after new release of edX
- name: Remove old edx-services config from /etc/logrotate.d
file: path=/etc/logrotate.d/edx-services state=absent
# This is in common to keep all logrotation config # This is in common to keep all logrotation config
# in the same role # in the same role
- name: Create hourly subdirectory in logrotate.d - name: Create hourly subdirectory in logrotate.d
...@@ -103,13 +113,15 @@ ...@@ -103,13 +113,15 @@
# Remove some of the default motd display on ubuntu # Remove some of the default motd display on ubuntu
# and add a custom motd. These do not require an # and add a custom motd. These do not require an
# ssh restart # ssh restart
# Only needed for EC2 instances.
- name: update the ssh motd on Ubuntu - name: update the ssh motd on Ubuntu
file: > file: >
mode=0644 mode=0644
path={{ item }} path={{ item }}
when: vagrant_home_dir.stat.exists == false
with_items: with_items:
- "/etc/update-motd.d/10-help-text" - "/etc/update-motd.d/10-help-text"
- "/usr/share/landscape/50-landscape-sysinfo" - "/usr/share/landscape/landscape-sysinfo.wrapper"
- "/etc/update-motd.d/51-cloudguest" - "/etc/update-motd.d/51-cloudguest"
- "/etc/update-motd.d/91-release-upgrade" - "/etc/update-motd.d/91-release-upgrade"
......
...@@ -4,9 +4,13 @@ ...@@ -4,9 +4,13 @@
copytruncate copytruncate
delaycompress delaycompress
dateext dateext
dateformat -%Y%m%d-%s
missingok missingok
notifempty notifempty
daily daily
rotate 90 rotate 90
size 1M size 1M
postrotate
/usr/bin/killall -HUP rsyslogd
endscript
} }
--- ---
DATADOG_API_KEY: "SPECIFY_KEY_HERE"
DATADOG_API_KEY: "PUT_YOUR_API_KEY_HERE"
datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52" datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52"
datadog_debian_pkgs: datadog_debian_pkgs:
- apparmor-utils - apparmor-utils
- build-essential - build-essential
......
...@@ -86,7 +86,7 @@ ...@@ -86,7 +86,7 @@
- name: create a symlink for venv supervisor - name: create a symlink for venv supervisor
file: > file: >
src="{{ devpi_supervisor_venv_bin }}/supervisorctl" src="{{ devpi_supervisor_venv_bin }}/supervisorctl"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi dest={{ COMMON_BIN_DIR }}/supervisorctl.devpi
state=link state=link
- name: create a symlink for supervisor config - name: create a symlink for supervisor config
...@@ -103,7 +103,7 @@ ...@@ -103,7 +103,7 @@
- name: update devpi supervisor configuration - name: update devpi supervisor configuration
shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update" shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
- name: ensure devpi is started - name: ensure devpi is started
supervisorctl_local: > supervisorctl_local: >
......
...@@ -123,7 +123,7 @@ ...@@ -123,7 +123,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services when: not disable_edx_services
- name: ensure discern, discern_celery has started - name: ensure discern, discern_celery has started
......
...@@ -12,7 +12,7 @@ IFS="," ...@@ -12,7 +12,7 @@ IFS=","
-v add verbosity to edx_ansible run -v add verbosity to edx_ansible run
-h this -h this
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code <repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code edx-analytics-data-api
<version> - can be a commit or tag <version> - can be a commit or tag
EO EO
...@@ -43,13 +43,13 @@ edx_ansible_cmd="{{ edx_ansible_venv_bin }}/ansible-playbook -i localhost, -c lo ...@@ -43,13 +43,13 @@ edx_ansible_cmd="{{ edx_ansible_venv_bin }}/ansible-playbook -i localhost, -c lo
repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'" repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'"
repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'" repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd xserver.yml -e 'xserver_version=$2'"
repos_to_cmd["cs_comments_service"]="$edx_ansible_cmd forum.yml -e 'forum_version=$2'" repos_to_cmd["cs_comments_service"]="$edx_ansible_cmd forum.yml -e 'forum_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd forums.yml -e 'xserver_version=$2'" repos_to_cmd["xserver"]="$edx_ansible_cmd xserver.yml -e 'xserver_version=$2'"
repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'" repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'"
repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'" repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'" repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
repos_to_cmd["read-only-certificate-code"]="$edx_ansible_cmd certs.yml -e 'certs_version=$2'" repos_to_cmd["read-only-certificate-code"]="$edx_ansible_cmd certs.yml -e 'certs_version=$2'"
repos_to_cmd["edx-analytics-data-api"]="$edx_ansible_cmd analyticsapi.yml -e 'ANALYTICS_API_VERSION=$2'"
if [[ -z $1 || -z $2 ]]; then if [[ -z $1 || -z $2 ]]; then
......
...@@ -44,12 +44,34 @@ ...@@ -44,12 +44,34 @@
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}" - "{{ COMMON_APP_DIR }}/{{ edx_service_name }}"
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/venvs" - "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/venvs"
- name: create edx_service data and staticfiles dir
file: >
path="{{ item }}"
state=directory
owner="{{ edx_service_name }}"
group="{{ common_web_group }}"
with_items:
- "{{ COMMON_DATA_DIR }}/{{ edx_service_name }}/data"
- "{{ COMMON_DATA_DIR }}/{{ edx_service_name }}/staticfiles"
- name: create edx_service log dir
file: >
path="{{ item }}"
state=directory
owner="syslog"
group="syslog"
with_items:
- "{{ COMMON_LOG_DIR }}/{{ edx_service_name }}"
# Replace dashes with underscores to support roles that use
# dashes (the role vars will contain underscores)
- name: install a bunch of system packages on which edx_service relies - name: install a bunch of system packages on which edx_service relies
apt: pkg={{ item }} state=present apt: pkg={{ item }} state=present
with_items: "{{ edx_service_name }}_debian_pkgs" with_items: "{{ edx_service_name.replace('-', '_') }}_debian_pkgs"
when: ansible_distribution in common_debian_variants when: ansible_distribution in common_debian_variants
- name: install a bunch of system packages on which edx_service relies - name: install a bunch of system packages on which edx_service relies
yum: pkg={{ item }} state=present yum: pkg={{ item }} state=present
with_items: "{{ edx_service_name }}_redhat_pkgs" with_items: "{{ edx_service_name.replace('-', '_') }}_redhat_pkgs"
when: ansible_distribution in common_redhat_variants when: ansible_distribution in common_redhat_variants
...@@ -6,6 +6,7 @@ dependencies: ...@@ -6,6 +6,7 @@ dependencies:
rbenv_dir: "{{ edxapp_app_dir }}" rbenv_dir: "{{ edxapp_app_dir }}"
rbenv_ruby_version: "{{ edxapp_ruby_version }}" rbenv_ruby_version: "{{ edxapp_ruby_version }}"
- devpi - devpi
- nltk
- role: user - role: user
user_info: user_info:
- name: "{{ EDXAPP_AUTOMATOR_NAME }}" - name: "{{ EDXAPP_AUTOMATOR_NAME }}"
......
...@@ -108,7 +108,7 @@ ...@@ -108,7 +108,7 @@
# Set the npm registry # Set the npm registry
- name: Set the npm registry - name: Set the npm registry
shell: shell:
npm config set registry 'http://registry.npmjs.org' npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'
creates="{{ edxapp_app_dir }}/.npmrc" creates="{{ edxapp_app_dir }}/.npmrc"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
...@@ -218,7 +218,6 @@ ...@@ -218,7 +218,6 @@
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
# If using CAS and you have a function for mapping attributes, install # If using CAS and you have a function for mapping attributes, install
# the module here. The next few tasks set up the python code sandbox # the module here. The next few tasks set up the python code sandbox
- name: install CAS attribute module - name: install CAS attribute module
...@@ -249,6 +248,33 @@ ...@@ -249,6 +248,33 @@
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=300
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url: url="{{ s3_one_time_url.url }}" dest="/tmp/{{ EDXAPP_XML_S3_KEY|basename }}"
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf /tmp/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# The next few tasks set up the python code sandbox # The next few tasks set up the python code sandbox
# need to disable this profile, otherwise the pip inside the sandbox venv has no permissions # need to disable this profile, otherwise the pip inside the sandbox venv has no permissions
...@@ -283,7 +309,7 @@ ...@@ -283,7 +309,7 @@
sudo_user: "{{ edxapp_sandbox_user }}" sudo_user: "{{ edxapp_sandbox_user }}"
when: EDXAPP_PYTHON_SANDBOX when: EDXAPP_PYTHON_SANDBOX
register: sandbox_install_output register: sandbox_install_output
changed_when: "'installed' in sandbox_install_output" changed_when: sandbox_install_output.stdout is defined and 'installed' in sandbox_install_output.stdout
notify: notify:
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
...@@ -333,6 +359,9 @@ ...@@ -333,6 +359,9 @@
# service variants configured, runs # service variants configured, runs
# gather_assets and db migrations # gather_assets and db migrations
- include: service_variant_config.yml - include: service_variant_config.yml
tags:
- service_variant_config
- deploy
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
# the supervisorctl config and restarts # the supervisorctl config and restarts
...@@ -343,7 +372,7 @@ ...@@ -343,7 +372,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services when: not disable_edx_services
- name: ensure edxapp has started - name: ensure edxapp has started
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
- "{{ edxapp_venvs_dir }}" - "{{ edxapp_venvs_dir }}"
- "{{ edxapp_theme_dir }}" - "{{ edxapp_theme_dir }}"
- "{{ edxapp_staticfile_dir }}" - "{{ edxapp_staticfile_dir }}"
- "{{ edxapp_course_static_dir }}"
- name: create edxapp log dir - name: create edxapp log dir
file: > file: >
......
...@@ -75,24 +75,6 @@ ...@@ -75,24 +75,6 @@
when: celery_worker is defined and not disable_edx_services when: celery_worker is defined and not disable_edx_services
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
# Fake syncdb with migrate, only when fake_migrations is defined
# This overrides the database name to be the test database which
# the default application user has full write access to.
#
# This is run in cases where you want to test to see if migrations
# work without actually runnning them (when creating AMIs for example).
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is defined and migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ edxapp_user }}"
environment:
DB_MIGRATION_NAME: "{{ EDXAPP_TEST_MIGRATE_DB_NAME }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Syncdb with migrate when the migrate user is overridden in extra vars # Syncdb with migrate when the migrate user is overridden in extra vars
- name: syncdb and migrate - name: syncdb and migrate
shell: > shell: >
...@@ -107,54 +89,6 @@ ...@@ -107,54 +89,6 @@
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
# Syncdb with migrate when the default migrate user is not set,
# in this case use the EDXAPP_MYSQL_USER_MIGRATE user to run migrations
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes" and not COMMON_MYSQL_MIGRATE_PASS
environment:
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Fake migrate, only when fake_migrations is defined
# This overrides the database name to be the test database which
# the default application user has full write access to
- name: db migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate
when: fake_migrations is defined and migrate_only is defined and migrate_only|lower == "yes"
sudo_user: "{{ edxapp_user }}"
environment:
DB_MIGRATION_NAME: "{{ EDXAPP_TEST_MIGRATE_DB_NAME }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Regular migrations
- name: db migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_only is defined and migrate_only|lower == "yes"
environment:
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Gather assets using rake if possible # Gather assets using rake if possible
- name: gather {{ item }} static assets with rake - name: gather {{ item }} static assets with rake
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment