Commit 1e9b3ebe by Jason Zhu

Merge pull request #1 from edx/master

up to date
parents a97f6ee1 5079f51d
...@@ -9,3 +9,4 @@ ...@@ -9,3 +9,4 @@
vagrant/*/devstack/edx-platform vagrant/*/devstack/edx-platform
vagrant/*/devstack/cs_comments_service vagrant/*/devstack/cs_comments_service
vagrant/*/devstack/ora vagrant/*/devstack/ora
vagrant_ansible_inventory_default
...@@ -6,11 +6,10 @@ install: ...@@ -6,11 +6,10 @@ install:
- "sudo apt-get install -y npm python-demjson" - "sudo apt-get install -y npm python-demjson"
- "pip install --allow-all-external -r requirements.txt" - "pip install --allow-all-external -r requirements.txt"
- "pip install --allow-all-external demjson" - "pip install --allow-all-external demjson"
- "sudo npm install -g js-yaml"
script: script:
- | - |
for yml in $(find . -name "*.yml"); do for yml in $(find . -name "*.yml"); do
js-yaml $yml >/dev/null python -c "import sys,yaml; yaml.load(open(sys.argv[1]))" $yml >/dev/null
if [[ $? -ne 0 ]]; then if [[ $? -ne 0 ]]; then
echo "ERROR parsing $yml" echo "ERROR parsing $yml"
exit 1 exit 1
......
...@@ -28,3 +28,7 @@ Valera Rozuvan <valera.rozuvan@gmail.com> ...@@ -28,3 +28,7 @@ Valera Rozuvan <valera.rozuvan@gmail.com>
Ker Ruben Ramos <xdiscent@gmail.com> Ker Ruben Ramos <xdiscent@gmail.com>
Fred Smith <derf@edx.org> Fred Smith <derf@edx.org>
Wang Peifeng <pku9104038@hotmail.com> Wang Peifeng <pku9104038@hotmail.com>
Ray Hooker <ray.hooker@gmail.com>
David Pollack <david@sologourmand.com>
Rodolphe Quiedeville <rodolphe@quiedeville.org>
Matjaz Gregoric <mtyaka@gmail.com>
- Role: xqwatcher, xqueue, nginx, edxapp, common
- Moving nginx basic authorization flag and credentials to the common role
- Role: Edxapp
- Turn on code sandboxing by default and allow the jailed code to be able to write
files to the tmp directory created for it by codejail.
- Role: Edxapp
- The repo.txt requirements file is no longer being processed in anyway. This file was removed from edxplatform
via pull #3487(https://github.com/edx/edx-platform/pull/3487)
- Update CMS_HOSTNAME default to allow any hostname that starts with `studio` along with `prod-studio` or `stage-studio`.
- Start a change log to keep track of backwards incompatible changes and deprecations.
...@@ -15,7 +15,7 @@ Building the platform takes place in two phases: ...@@ -15,7 +15,7 @@ Building the platform takes place in two phases:
As much as possible, we have tried to keep a clean distinction between As much as possible, we have tried to keep a clean distinction between
provisioning and configuration. You are not obliged to use our tools provisioning and configuration. You are not obliged to use our tools
and are free to use one, but not the other. The provisioning phase and are free to use one, but not the other. The provisioning phase
stands-up the required resources and tags them with role identifiers stands-up the required resources and tags them with role identifiers
so that the configuration tool can come in and complete the job. so that the configuration tool can come in and complete the job.
...@@ -26,9 +26,9 @@ Private Cloud with hosts for the core edX services. This template ...@@ -26,9 +26,9 @@ Private Cloud with hosts for the core edX services. This template
will build quite a number of AWS resources that cost money, so please will build quite a number of AWS resources that cost money, so please
consider this before you start. consider this before you start.
The configuration phase is manged by [Ansible](http://ansible.cc/). The configuration phase is managed by [Ansible](http://ansible.com/).
We have provided a number of playbooks that will configure each of We have provided a number of playbooks that will configure each of
the edX service. the edX services.
This project is a re-write of the current edX provisioning and This project is a re-write of the current edX provisioning and
configuration tools, we will be migrating features to this project configuration tools, we will be migrating features to this project
...@@ -36,3 +36,5 @@ over time, so expect frequent changes. ...@@ -36,3 +36,5 @@ over time, so expect frequent changes.
For more information including installation instruction please see the [Configuration Wiki](https://github.com/edx/configuration/wiki). For more information including installation instruction please see the [Configuration Wiki](https://github.com/edx/configuration/wiki).
For info on any large recent changes please see the [change log](https://github.com/edx/configuration/blob/master/CHANGELOG.md).
#
# Overview:
# This play needs to be run per environment-deployment and you will need to
# provide the boto environment and vpc_id as arguments
#
# ansible-playbook -i 'localhost,' ./vpc-migrate-xqwatcher-edge-stage.yml \
# -e 'profile=edge vpc_id=vpc-416f9b24'
#
# Caveats
#
# - This requires ansible 1.6
# - Required the following branch of Ansible /e0d/add-instance-profile from
# https://github.com/e0d/ansible.git
# - This play isn't full idempotent because of and ec2 module update issue
# with ASGs. This can be worked around by deleting the ASG and re-running
# the play
# - The instance_profile_name will need to be created in advance as there
# isn't a way to do so from ansible.
#
# Prequisities:
# Create a iam ec2 role
#
- name: Add resources for the XQWatcher
hosts: localhost
connection: local
gather_facts: False
tasks:
# ignore_error is used here because this module is not idempotent
# If tags already exist, the task will fail with the following message
# Tags already exists in subnet
- name: Update subnet tags
ec2_tag:
resource: "{{ item }}"
region: "{{ ec2_region }}"
state: present
tags:
Name: "{{ edp }}-subnet"
play: xqwatcher
immutable_metadata: "{'purpose':'{{ environment }}-{{ deployment }}-internal-{{ play }}','target':'ec2'}"
with_items: subnets
ignore_errors: True
# Fail intermittantly with the following error:
# The specified rule does not exist in this security group
- name: Create security group
ec2_group:
profile: "{{ profile }}"
description: "Open up SSH access"
name: "{{ security_group }}"
vpc_id: "{{ vpc_id }}"
region: "{{ ec2_region }}"
rules:
- proto: tcp
from_port: "{{ sec_group_ingress_from_port }}"
to_port: "{{ sec_group_ingress_to_port }}"
cidr_ip: "{{ item }}"
with_items: sec_group_ingress_cidrs
register: created_sec_group
ignore_errors: True
- name: debug
debug:
msg: "Registered created_sec_group: {{ created_sec_group }}"
# instance_profile_name was added by me in my fork
- name: Create the launch configuration
ec2_lc:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ lc_name }}"
image_id: "{{ lc_ami }}"
key_name: "{{ key_name }}"
security_groups: "{{ created_sec_group.results[0].group_id }}"
instance_type: "{{ instance_type }}"
instance_profile_name: "{{ instance_profile_name }}"
volumes:
- device_name: "/dev/sda1"
volume_size: "{{ instance_volume_size }}"
- name: Create ASG
ec2_asg:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ asg_name }}"
launch_config_name: "{{ lc_name }}"
min_size: 0
max_size: 0
desired_capacity: 0
vpc_zone_identifier: "{{ subnets|join(',') }}"
instance_tags:
Name: "{{ env }}-{{ deployment }}-{{ play }}"
autostack: "true"
environment: "{{ env }}"
deployment: "{{ deployment }}"
play: "{{ play }}"
services: "{{ play }}"
register: asg
- name: debug
debug:
msg: "DEBUG: {{ asg }}"
- name: Create scale up policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleUpPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: 1
min_adjustment_step: 1
cooldown: 60
register: scale_up_policy
tags:
- foo
- name: debug
debug:
msg: "Registered scale_up_policy: {{ scale_up_policy }}"
- name: Create scale down policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleDownPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 60
register: scale_down_policy
- name: debug
debug:
msg: "Registered scale_down_policy: {{ scale_down_policy }}"
#
# Sometimes the scaling policy reports itself changed, but
# does not return data about the policy. It's bad enough
# that consistent data isn't returned when things
# have and have not changed; this make writing idempotent
# tasks difficult.
- name: create high-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-high"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: ">="
threshold: 90.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-up if CPU > 90% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_up_policy.arn }}"]
when: scale_up_policy.arn is defined
- name: create low-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 50.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-down if CPU < 50% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_down_policy.arn }}"]
when: scale_down_policy.arn is defined
\ No newline at end of file
...@@ -24,13 +24,24 @@ ...@@ -24,13 +24,24 @@
"m2.xlarge", "m2.xlarge",
"m2.2xlarge", "m2.2xlarge",
"m2.4xlarge", "m2.4xlarge",
"m3.xlarge", "cr1.8xlarge",
"m3.2xlarge", "cc2.8xlarge",
"c1.medium", "c1.medium",
"c1.xlarge", "c1.xlarge",
"cc1.4xlarge", "m3.medium",
"cc2.8xlarge", "m3.large",
"cg1.4xlarge" "m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
], ],
"ConstraintDescription":"must be a valid EC2 instance type." "ConstraintDescription":"must be a valid EC2 instance type."
}, },
...@@ -56,13 +67,24 @@ ...@@ -56,13 +67,24 @@
"m2.xlarge", "m2.xlarge",
"m2.2xlarge", "m2.2xlarge",
"m2.4xlarge", "m2.4xlarge",
"m3.xlarge", "cr1.8xlarge",
"m3.2xlarge", "cc2.8xlarge",
"c1.medium", "c1.medium",
"c1.xlarge", "c1.xlarge",
"cc1.4xlarge", "m3.medium",
"cc2.8xlarge", "m3.large",
"cg1.4xlarge" "m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
], ],
"ConstraintDescription":"must be a valid EC2 instance type." "ConstraintDescription":"must be a valid EC2 instance type."
}, },
...@@ -79,13 +101,24 @@ ...@@ -79,13 +101,24 @@
"m2.xlarge", "m2.xlarge",
"m2.2xlarge", "m2.2xlarge",
"m2.4xlarge", "m2.4xlarge",
"m3.xlarge", "cr1.8xlarge",
"m3.2xlarge", "cc2.8xlarge",
"c1.medium", "c1.medium",
"c1.xlarge", "c1.xlarge",
"cc1.4xlarge", "m3.medium",
"cc2.8xlarge", "m3.large",
"cg1.4xlarge" "m3.xlarge",
"m3.2xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge"
], ],
"ConstraintDescription":"must be a valid EC2 instance type." "ConstraintDescription":"must be a valid EC2 instance type."
}, },
...@@ -117,25 +150,39 @@ ...@@ -117,25 +150,39 @@
}, },
"Mappings":{ "Mappings":{
"AWSInstanceType2Arch":{ "AWSInstanceType2Arch":{
"t1.micro": { "Arch":"64" }, "t1.micro" : { "Arch" : "64" },
"m1.small": { "Arch":"64" }, "m1.small" : { "Arch" : "64" },
"m1.medium": { "Arch":"64" }, "m1.medium" : { "Arch" : "64" },
"m1.large": { "Arch":"64" }, "m1.large" : { "Arch" : "64" },
"m1.xlarge": { "Arch":"64" }, "m1.xlarge" : { "Arch" : "64" },
"m2.xlarge": { "Arch":"64" }, "m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge": { "Arch":"64" }, "m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge": { "Arch":"64" }, "m2.4xlarge" : { "Arch" : "64" },
"m3.xlarge": { "Arch":"64" }, "cr1.8xlarge" : { "Arch" : "64" },
"m3.2xlarge": { "Arch":"64" }, "cc2.8xlarge" : { "Arch" : "64" },
"c1.medium": { "Arch":"64" }, "c1.medium" : { "Arch" : "64" },
"c1.xlarge": { "Arch":"64" }, "c1.xlarge" : { "Arch" : "64" },
"cg1.4xlarge": { "Arch":"64HVM" } "m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" },
"m3.4xlarge" : { "Arch" : "64" },
"c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
}, },
"AWSRegionArch2AMI":{ "AWSRegionArch2AMI":{
"us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9", "64HVM":"ami-b93264d0" }, "us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9" },
"us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" }, "us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" },
"us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40", "64HVM":"ami-6cad335c" }, "us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40" },
"eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba", "64HVM":"ami-8c987efb" }, "eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba" },
"sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" }, "sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" },
"ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" }, "ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" },
"ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" }, "ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" },
...@@ -183,14 +230,14 @@ ...@@ -183,14 +230,14 @@
"Fn::FindInMap":[ "Fn::FindInMap":[
"MapRegionsToAvailZones", "MapRegionsToAvailZones",
{ "Ref":"AWS::Region" }, { "Ref":"AWS::Region" },
"AZone0" "AZone1"
] ]
}, },
"Tags":[ "Tags":[
{ {
"Key":"immutable_metadata", "Key":"immutable_metadata",
"Value":"{'purpose':'external','target':'ec2'}" "Value":"{'purpose':'external','target':'ec2'}"
} }
] ]
} }
}, },
...@@ -205,7 +252,7 @@ ...@@ -205,7 +252,7 @@
"Fn::FindInMap":[ "Fn::FindInMap":[
"MapRegionsToAvailZones", "MapRegionsToAvailZones",
{ "Ref":"AWS::Region" }, { "Ref":"AWS::Region" },
"AZone0" "AZone1"
] ]
}, },
"Tags":[ "Tags":[
...@@ -364,7 +411,7 @@ ...@@ -364,7 +411,7 @@
} }
} }
}, },
"InboundEmphemeralPublicNetworkAclEntry":{ "InboundSMTPPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry", "Type":"AWS::EC2::NetworkAclEntry",
"Properties":{ "Properties":{
"NetworkAclId":{ "NetworkAclId":{
...@@ -376,6 +423,23 @@ ...@@ -376,6 +423,23 @@
"Egress":"false", "Egress":"false",
"CidrBlock":"0.0.0.0/0", "CidrBlock":"0.0.0.0/0",
"PortRange":{ "PortRange":{
"From":"587",
"To":"587"
}
}
},
"InboundEmphemeralPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PublicNetworkAcl"
},
"RuleNumber":"104",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"1024", "From":"1024",
"To":"65535" "To":"65535"
} }
...@@ -582,6 +646,18 @@ ...@@ -582,6 +646,18 @@
"FromPort":"443", "FromPort":"443",
"ToPort":"443", "ToPort":"443",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"587",
"ToPort":"587",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"5222",
"ToPort":"5222",
"CidrIp":"0.0.0.0/0"
} }
], ],
"SecurityGroupEgress":[ "SecurityGroupEgress":[
...@@ -604,6 +680,18 @@ ...@@ -604,6 +680,18 @@
"FromPort":"443", "FromPort":"443",
"ToPort":"443", "ToPort":"443",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"587",
"ToPort":"587",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"5222",
"ToPort":"5222",
"CidrIp":"0.0.0.0/0"
} }
] ]
} }
...@@ -688,10 +776,11 @@ ...@@ -688,10 +776,11 @@
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort":"22", "FromPort":"22",
"ToPort":"22", "ToPort":"22",
"CidrIp":"10.0.0.0/16" "CidrIp":"10.254.0.0/16"
}, },
{ {
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80", "ToPort":"80",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
}, },
...@@ -755,7 +844,7 @@ ...@@ -755,7 +844,7 @@
"KeyName":{ "KeyName":{
"Ref":"KeyName" "Ref":"KeyName"
}, },
"IamInstanceProfile" : { "IamInstanceProfile" : {
"Ref" : "AdminInstanceProfile" "Ref" : "AdminInstanceProfile"
}, },
"SubnetId":{ "SubnetId":{
...@@ -827,7 +916,23 @@ ...@@ -827,7 +916,23 @@
] ]
] ]
} }
} },
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs":{
"VolumeSize": 100
}
},
{
"DeviceName": "/dev/sdb",
"VirtualName": "ephemeral0"
},
{
"DeviceName": "/dev/sdc",
"VirtualName": "ephemeral1"
}
]
} }
}, },
"AdminSecurityGroup":{ "AdminSecurityGroup":{
......
...@@ -47,10 +47,25 @@ ...@@ -47,10 +47,25 @@
"m2.xlarge" : { "Arch" : "64" }, "m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" }, "m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" }, "m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" }, "m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" }, "m3.2xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" }, "m3.4xlarge" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" } "c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
}, },
"AWSRegionArch2AMI" : { "AWSRegionArch2AMI" : {
...@@ -236,7 +251,7 @@ ...@@ -236,7 +251,7 @@
"UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [ "UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [
"#!/bin/bash\n", "#!/bin/bash\n",
"exec >> /home/ubuntu/cflog.log\n", "exec >> /home/ubuntu/cflog.log\n",
"exec 2>> /home/ubuntu/cflog.log\n", "exec 2>> /home/ubuntu/cflog.log\n",
"function error_exit\n", "function error_exit\n",
"{\n", "{\n",
" cfn-signal -e 1 -r \"$1\" '", { "Ref" : "EdxServerWaitHandle" }, "'\n", " cfn-signal -e 1 -r \"$1\" '", { "Ref" : "EdxServerWaitHandle" }, "'\n",
......
...@@ -46,10 +46,25 @@ ...@@ -46,10 +46,25 @@
"m2.xlarge" : { "Arch" : "64" }, "m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" }, "m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" }, "m2.4xlarge" : { "Arch" : "64" },
"cr1.8xlarge" : { "Arch" : "64" },
"cc2.8xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"m3.medium" : { "Arch" : "64" },
"m3.large" : { "Arch" : "64" },
"m3.xlarge" : { "Arch" : "64" }, "m3.xlarge" : { "Arch" : "64" },
"m3.2xlarge" : { "Arch" : "64" }, "m3.2xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" }, "m3.4xlarge" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" } "c3.large" : { "Arch" : "64" },
"c3.xlarge" : { "Arch" : "64" },
"c3.2xlarge" : { "Arch" : "64" },
"c3.4xlarge" : { "Arch" : "64" },
"c3.8xlarge" : { "Arch" : "64" },
"r3.large" : { "Arch" : "64" },
"r3.xlarge" : { "Arch" : "64" },
"r3.2xlarge" : { "Arch" : "64" },
"r3.4xlarge" : { "Arch" : "64" },
"r3.8xlarge" : { "Arch" : "64" }
}, },
"AWSRegionArch2AMI" : { "AWSRegionArch2AMI" : {
......
...@@ -6,4 +6,4 @@ ...@@ -6,4 +6,4 @@
jinja2_extensions=jinja2.ext.do jinja2_extensions=jinja2.ext.do
host_key_checking = False host_key_checking = False
roles_path=../../../ansible-roles roles_path=../../ansible-roles/roles:../../ansible-private/roles:../../ansible-roles/
import os
import prettytable
import hipchat
import time
import random
from ansible import utils
class CallbackModule(object):
"""Send status updates to a HipChat channel during playbook execution.
This plugin makes use of the following environment variables:
HIPCHAT_TOKEN (required): HipChat API token
HIPCHAT_ROOM (optional): HipChat room to post in. Default: ansible
HIPCHAT_FROM (optional): Name to post as. Default: ansible
HIPCHAT_NOTIFY (optional): Add notify flag to important messages ("true" or "false"). Default: true
HIPCHAT_MSG_PREFIX (option): Optional prefix to add to all hipchat messages
HIPCHAT_MSG_COLOR (option): Optional color for hipchat messages
HIPCHAT_CONDENSED (option): Condense the task summary output
Requires:
prettytable
"""
def __init__(self):
if 'HIPCHAT_TOKEN' in os.environ:
self.start_time = time.time()
self.task_report = []
self.last_task = None
self.last_task_changed = False
self.last_task_count = 0
self.last_task_delta = 0
self.last_task_start = time.time()
self.condensed_task_report = (os.getenv('HIPCHAT_CONDENSED', True) == True)
self.room = os.getenv('HIPCHAT_ROOM', 'ansible')
self.from_name = os.getenv('HIPCHAT_FROM', 'ansible')
self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') != 'false')
try:
self.hipchat_conn = hipchat.HipChat(token=os.getenv('HIPCHAT_TOKEN'))
except Exception as e:
utils.warning("Unable to connect to hipchat: {}".format(e))
self.hipchat_msg_prefix = os.getenv('HIPCHAT_MSG_PREFIX', '')
self.hipchat_msg_color = os.getenv('HIPCHAT_MSG_COLOR', '')
self.printed_playbook = False
self.playbook_name = None
self.enabled = True
else:
self.enabled = False
def _send_hipchat(self, message, room=None, from_name=None, color=None, message_format='text'):
if not room:
room = self.room
if not from_name:
from_name = self.from_name
if not color:
color = self.hipchat_msg_color
try:
self.hipchat_conn.message_room(room, from_name, message, color=color, message_format=message_format)
except Exception as e:
utils.warning("Could not submit message to hipchat: {}".format(e))
def _flush_last_task(self):
if self.last_task:
delta = time.time() - self.last_task_start
self.task_report.append(dict(
changed=self.last_task_changed,
count=self.last_task_count,
delta="{:0>.1f}".format(self.last_task_delta),
task=self.last_task))
self.last_task_count = 0
self.last_task_changed = False
self.last_task = None
self.last_task_delta = 0
def _process_message(self, msg, msg_type='STATUS'):
if msg_type == 'OK' and self.last_task:
if msg.get('changed', True):
self.last_task_changed = True
if msg.get('delta', False):
(hour, minute, sec) = msg['delta'].split(':')
total = float(hour) * 1200 + float(minute) * 60 + float(sec)
self.last_task_delta += total
self.last_task_count += 1
else:
self._flush_last_task()
if msg_type == 'TASK_START':
self.last_task = msg
self.last_task_start = time.time()
elif msg_type == 'FAILED':
self.last_task_start = time.time()
if 'msg' in msg:
self._send_hipchat('/code {}: The ansible run returned the following error:\n\n {}'.format(
self.hipchat_msg_prefix, msg['msg']), color='red', message_format='text')
else:
# move forward the last task start time
self.last_task_start = time.time()
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
if self.enabled:
self._process_message(res, 'FAILED')
def runner_on_ok(self, host, res):
if self.enabled:
# don't send the setup results
if res['invocation']['module_name'] != "setup":
self._process_message(res, 'OK')
def runner_on_error(self, host, msg):
if self.enabled:
self._process_message(msg, 'ERROR')
def runner_on_skipped(self, host, item=None):
if self.enabled:
self._process_message(item, 'SKIPPED')
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
if self.enabled:
self._process_message(res, 'ASYNC_POLL')
def runner_on_async_ok(self, host, res, jid):
if self.enabled:
self._process_message(res, 'ASYNC_OK')
def runner_on_async_failed(self, host, res, jid):
if self.enabled:
self._process_message(res, 'ASYNC_FAILED')
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
if self.enabled:
self._process_message(name, 'TASK_START')
def playbook_on_vars_prompt(self, varname, private=True, prompt=None,
encrypt=None, confirm=False, salt_size=None,
salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, pattern):
if self.enabled:
"""Display Playbook and play start messages"""
self.start_time = time.time()
self.playbook_name, _ = os.path.splitext(os.path.basename(self.play.playbook.filename))
host_list = self.play.playbook.inventory.host_list
inventory = os.path.basename(os.path.realpath(host_list))
subset = self.play.playbook.inventory._subset
msg = "<b>{description}</b>: Starting ansible run for play <b><i>{play}</i></b>".format(description=self.hipchat_msg_prefix, play=self.playbook_name)
if self.play.playbook.only_tags and 'all' not in self.play.playbook.only_tags:
msg = msg + " with tags <b><i>{}</i></b>".format(','.join(self.play.playbook.only_tags))
if subset:
msg = msg + " on hosts <b><i>{}</i></b>".format(','.join(subset))
self._send_hipchat(msg, message_format='html')
def playbook_on_stats(self, stats):
if self.enabled:
self._flush_last_task()
delta = time.time() - self.start_time
self.start_time = time.time()
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
task_column = '{} - Task'.format(self.hipchat_msg_prefix)
task_summary = prettytable.PrettyTable([task_column, 'Time', 'Count', 'Changed'])
task_summary.align[task_column] = "l"
task_summary.align['Time'] = "r"
task_summary.align['Count'] = "r"
task_summary.align['Changed'] = "r"
for task in self.task_report:
if self.condensed_task_report:
# for the condensed task report skip all tasks
# that are not marked as changed and that have
# a time delta less than 1
if not task['changed'] and float(task['delta']) < 1:
continue
task_summary.add_row([task['task'], task['delta'], str(task['count']), str(task['changed'])])
summary_table = prettytable.PrettyTable(['Ok', 'Changed', 'Unreachable', 'Failures'])
self._send_hipchat("/code " + str(task_summary) )
summary_all_host_output = []
for host in hosts:
stats = stats.summarize(host)
summary_output = "<b>{}</b>: <i>{}</i> - ".format(self.hipchat_msg_prefix, host)
for summary_item in ['ok', 'changed', 'unreachable', 'failures']:
if stats[summary_item] != 0:
summary_output += "<b>{}</b> - {} ".format(summary_item, stats[summary_item])
summary_all_host_output.append(summary_output)
self._send_hipchat("<br />".join(summary_all_host_output), message_format='html')
msg = "<b>{description}</b>: Finished Ansible run for <b><i>{play}</i> in {min:02} minutes, {sec:02} seconds</b><br /><br />".format(
description=self.hipchat_msg_prefix,
play=self.playbook_name,
min=int(delta / 60),
sec=int(delta % 60))
self._send_hipchat(msg, message_format='html')
...@@ -128,5 +128,8 @@ class CallbackModule(object): ...@@ -128,5 +128,8 @@ class CallbackModule(object):
if len(payload[msg_type][output]) > 1000: if len(payload[msg_type][output]) > 1000:
payload[msg_type][output] = "(clipping) ... " \ payload[msg_type][output] = "(clipping) ... " \
+ payload[msg_type][output][-1000:] + payload[msg_type][output][-1000:]
if 'stdout_lines' in payload[msg_type]:
# only keep the last 20 or so lines to avoid payload size errors
if len(payload[msg_type]['stdout_lines']) > 20:
payload[msg_type]['stdout_lines'] = ['(clipping) ... '] + payload[msg_type]['stdout_lines'][-20:]
self.sqs.send_message(self.queue, json.dumps(payload)) self.sqs.send_message(self.queue, json.dumps(payload))
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# AWS regions to make calls to. Set this to 'all' to make request to all regions # AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma # in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
regions = all regions = us-east-1
regions_exclude = us-gov-west-1 regions_exclude = us-gov-west-1
# When generating inventory, Ansible needs to know how to address a server. # When generating inventory, Ansible needs to know how to address a server.
......
...@@ -217,7 +217,14 @@ class Ec2Inventory(object): ...@@ -217,7 +217,14 @@ class Ec2Inventory(object):
config.get('ec2', 'route53_excluded_zones', '').split(',')) config.get('ec2', 'route53_excluded_zones', '').split(','))
# Cache related # Cache related
cache_path = config.get('ec2', 'cache_path') if 'EC2_CACHE_PATH' in os.environ:
cache_path = os.environ['EC2_CACHE_PATH']
elif self.args.cache_path:
cache_path = self.args.cache_path
else:
cache_path = config.get('ec2', 'cache_path')
if not os.path.exists(cache_path):
os.makedirs(cache_path)
self.cache_path_cache = cache_path + "/ansible-ec2.cache" self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache" self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index" self.cache_path_index = cache_path + "/ansible-ec2.index"
...@@ -241,6 +248,10 @@ class Ec2Inventory(object): ...@@ -241,6 +248,10 @@ class Ec2Inventory(object):
default_inifile = os.environ.get("ANSIBLE_EC2_INI", os.path.dirname(os.path.realpath(__file__))+'/ec2.ini') default_inifile = os.environ.get("ANSIBLE_EC2_INI", os.path.dirname(os.path.realpath(__file__))+'/ec2.ini')
parser.add_argument('--inifile', dest='inifile', help='Path to init script to use', default=default_inifile) parser.add_argument('--inifile', dest='inifile', help='Path to init script to use', default=default_inifile)
parser.add_argument(
'--cache-path',
help='Override the cache path set in ini file',
required=False)
self.args = parser.parse_args() self.args = parser.parse_args()
......
# Configure an admin instance with jenkins and asgard.
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: True
roles:
- alton
...@@ -6,4 +6,4 @@ ...@@ -6,4 +6,4 @@
jinja2_extensions=jinja2.ext.do jinja2_extensions=jinja2.ext.do
host_key_checking=False host_key_checking=False
roles_path=../../../ansible-roles roles_path=../../../ansible-roles/roles:../../../ansible-private/roles:../../../ansible-roles/
...@@ -2,16 +2,12 @@ ...@@ -2,16 +2,12 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- certs - certs
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -2,15 +2,11 @@ ...@@ -2,15 +2,11 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- common - common
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -4,9 +4,26 @@ ...@@ -4,9 +4,26 @@
sudo: True sudo: True
serial: 1 serial: 1
vars: vars:
ENABLE_DATADOG: False # By default take instances in and out of the elb(s) they
ENABLE_SPLUNKFORWARDER: False # are attached to
ENABLE_NEWRELIC: False # To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -14,17 +31,29 @@ ...@@ -14,17 +31,29 @@
- xqueue - xqueue
- role: xqueue - role: xqueue
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
- rabbitmq - rabbitmq
- datadog - datadog
- splunkforwarder - splunkforwarder
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# #
# In order to reconfigure the host resolution we are issuing a # In order to reconfigure the host resolution we are issuing a
# reboot. # reboot.
...@@ -38,4 +67,5 @@ ...@@ -38,4 +67,5 @@
tasks: tasks:
- name: reboot - name: reboot
command: /sbin/shutdown -r now "Reboot is triggered by Ansible" command: /sbin/shutdown -r now "Reboot is triggered by Ansible"
when: reboot
tags: reboot tags: reboot
- name: connect a sandbox to production data
hosts: all
gather_facts: False
sudo: True
tasks:
- name: Switch the mongo db to use ephemeral
file: >
name=/mnt/mongodb
state=directory
owner=mongodb
group=mongodb
tags: update_mongo_data
- name: update the mongo config to use the new mongo dir
shell: >
sed -i 's#^dbpath=.*#dbpath=/mnt/mongodb#' /etc/mongodb.conf
tags: update_mongo_data
- name: restart mongodb
service: >
name=mongodb
state=restarted
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep comment | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_forum
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_forum.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_forum.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_forum.stdout|basename }}
chdir=/mnt
when: s3cmd_out_forum.stdout is defined
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep prod-edx | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_modulestore
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_modulestore.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_modulestore.stdout|basename }}
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: Restore the mongo data for the forums
shell: >
mongorestore --drop -d cs_comments_service /mnt/comments-prod
tags: update_mongo_data
- name: Restore the mongo data for the modulestore
shell: >
mongorestore --drop -d edxapp /mnt/prod-edx
tags: update_mongo_data
# recreate users after the restore
- name: create a mongodb users
mongodb_user: >
database={{ item.database }}
name={{ item.user }}
password={{ item.password }}
state=present
with_items:
- user: cs_comments_service
password: password
database: cs_comments_service
- user: exdapp
password: password
database: edxapp
# WARNING - calling lineinfile on a symlink
# will convert the symlink to a file!
# don't use /edx/etc/server-vars.yml here
#
# What we are doing here is updating the sandbox
# server-vars config file so that when update
# is called it will use the new MYSQL connection
# info.
- name: Update RDS to point to the sandbox clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
with_items:
- "EDXAPP_MYSQL_HOST: {{ EDXAPP_MYSQL_HOST }}"
- "EDXAPP_MYSQL_DB_NAME: {{ EDXAPP_MYSQL_DB_NAME }}"
- "EDXAPP_MYSQL_USER: {{ EDXAPP_MYSQL_USER }}"
- "EDXAPP_MYSQL_PASSWORD: {{ EDXAPP_MYSQL_PASSWORD }}"
tags: update_edxapp_mysql_host
- name: call update on edx-platform
shell: >
/edx/bin/update edx-platform master
tags: update_edxapp_mysql_host
...@@ -2,15 +2,11 @@ ...@@ -2,15 +2,11 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- demo - demo
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -2,10 +2,6 @@ ...@@ -2,10 +2,6 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -13,8 +9,8 @@ ...@@ -13,8 +9,8 @@
- discern - discern
- discern - discern
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -6,9 +6,6 @@ ...@@ -6,9 +6,6 @@
vars: vars:
migrate_db: "yes" migrate_db: "yes"
openid_workaround: True openid_workaround: True
ENABLE_DATADOG: True
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -18,6 +15,7 @@ ...@@ -18,6 +15,7 @@
- ora - ora
- xqueue - xqueue
- xserver - xserver
- certs
nginx_default_sites: nginx_default_sites:
- lms - lms
- edxlocal - edxlocal
...@@ -33,12 +31,12 @@ ...@@ -33,12 +31,12 @@
- { role: "xqueue", update_users: True } - { role: "xqueue", update_users: True }
- xserver - xserver
- ora - ora
- discern
- certs - certs
- edx_ansible - edx_ansible
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
- flower
...@@ -2,10 +2,6 @@ ...@@ -2,10 +2,6 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -16,8 +12,8 @@ ...@@ -16,8 +12,8 @@
- lms - lms
- edxapp - edxapp
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
- name: Run edxapp migrations
hosts: all
sudo: False
gather_facts: False
vars:
db_dry_run: "--db-dry-run"
tasks:
# Syncdb with migrate when the migrate user is overridden in extra vars
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
python manage.py {{ item }} migrate --noinput --settings=aws_migrate {{ db_dry_run }}
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
with_items:
- lms
- cms
...@@ -3,7 +3,41 @@ ...@@ -3,7 +3,41 @@
vars_files: vars_files:
- "{{ secure_dir }}/vars/common/common.yml" - "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/stage/stage-edx.yml" - "{{ secure_dir }}/vars/stage/stage-edx.yml"
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles: roles:
- common - common
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- name: Deploy celery flower (monitoring tool)
hosts: all
sudo: True
gather_facts: True
roles:
- flower
...@@ -2,10 +2,6 @@ ...@@ -2,10 +2,6 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -13,10 +9,8 @@ ...@@ -13,10 +9,8 @@
- forum - forum
- forum - forum
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
- role: newrelic
when: ENABLE_NEWRELIC
...@@ -3,19 +3,16 @@ ...@@ -3,19 +3,16 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- edx_ansible - edx_ansible
- user - user
- jenkins_admin - jenkins_admin
- hotg - hotg
- alton
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
# Deploy a specific version of edx-ora2 and re-run migrations
# edx-ora2 is already included in the requirements for edx-platform,
# but we need to override that version when deploying to
# the continuous integration server for testing ora2 changes.
- name: Update edx-ora2
hosts: all
sudo: True
gather_facts: True
vars:
- edxapp_venv_dir: "/edx/app/edxapp/venvs/edxapp"
- edxapp_code_dir: "/edx/app/edxapp/edx-platform"
- edxapp_deploy_path: "{{ edxapp_venv_dir }}/bin:{{ edxapp_code_dir }}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- edxapp_user: "edxapp"
- edxapp_mysql_user: "migrate"
- edxapp_mysql_password: "password"
- supervisorctl_path: "/edx/bin/supervisorctl"
- ora2_version: "master"
- ora2_pip_req: "git+https://github.com/edx/edx-ora2.git@{{ ora2_version }}#egg=edx-ora2"
tasks:
- name: install edx-ora2
shell: >
{{ edxapp_venv_dir }}/bin/pip install -e {{ ora2_pip_req }}
chdir={{ edxapp_code_dir }}
environment:
PATH: "{{ edxapp_deploy_path }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart workers"
- name: syncdb and migrate
shell: >
{{ edxapp_venv_dir }}/bin/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
chdir={{ edxapp_code_dir }}
environment:
DB_MIGRATION_USER: "{{ edxapp_mysql_user }}"
DB_MIGRATION_PASS: "{{ edxapp_mysql_password }}"
notify:
- "restart edxapp"
- "restart workers"
handlers:
- name: restart edxapp
shell: "{{ supervisorctl_path }} restart edxapp:"
- name: restart workers
shell: "{{ supervisorctl_path }} restart edxapp_worker:"
- name: Deploy rabbitmq - name: Deploy rabbitmq
hosts: all hosts: all
sudo: True sudo: True
gather_facts: False # The rabbitmq role depends on
# ansible_default_ipv4 so
# gather_facts must be set to True
gather_facts: True
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles: roles:
- aws - aws
- rabbitmq - rabbitmq
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# This is a utility play to set a hostname
# on a server
- name: Set hostname
hosts: all
sudo: True
gather_facts: False
tasks:
- fail: msg="you must pass a hostname_fqdn var into this play"
when: hostname_fqdn is not defined
- name: Set hostname
hostname: name={{ hostname_fqdn.split('.')[0] }}
- name: Update /etc/hosts
lineinfile: >
dest=/etc/hosts
regexp="^127\.0\.1\.1"
line="127.0.1.1{{'\t'}}{{ hostname_fqdn.split('.')[0] }}{{'\t'}}{{ hostname_fqdn }}{{'\t'}}localhost"
state=present
- hosts: all
sudo: true
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
tasks:
- shell: echo "test"
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
# This is a utility play to setup the db users on the edxapp db
#
# The mysql root user MUST be passed in as an extra var
#
# the environment and deployment must be passed in as COMMON_ENVIRONMENT
# and COMMON_DEPLOYMENT. These two vars should be set in the secret
# var file for the corresponding vpc stack
#
# Example invocation:
#
# Create the databases for edxapp and xqueue:
#
# ansible-playbook -i localhost, create_db_users.yml -e@/path/to/secrets.yml -e "edxapp_db_root_user=root edxapp_db_root_pass=password"
#
- name: Update db users on the edxapp db
hosts: all
gather_facts: False
vars:
edxapp_db_root_user: 'None'
edxapp_db_root_pass: 'None'
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- name: assign mysql user permissions for read_only user
mysql_user:
name: "{{ COMMON_MYSQL_READ_ONLY_USER }}"
priv: "*.*:SELECT"
password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for migrate user
mysql_user:
name: "{{ COMMON_MYSQL_MIGRATE_USER }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
password: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for admin user
mysql_user:
name: "{{ COMMON_MYSQL_ADMIN_USER }}"
priv: "*.*:CREATE USER"
password: "{{ COMMON_MYSQL_ADMIN_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- name: assign mysql user permissions for db users
mysql_user:
name: "{{ item.db_user_to_modify }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
# The second call to mysql_user needs to have append_privs set to
# yes otherwise it will overwrite the previous run.
# This means that both tasks will report changed on every ansible
# run
- name: assign mysql user permissions for db test user
mysql_user:
append_privs: yes
name: "{{ item.db_user_to_modify }}"
priv: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ item.db_name }}.*:ALL"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
...@@ -2,17 +2,13 @@ ...@@ -2,17 +2,13 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: edxapp - role: edxapp
celery_worker: True celery_worker: True
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
...@@ -3,9 +3,26 @@ ...@@ -3,9 +3,26 @@
sudo: True sudo: True
gather_facts: True gather_facts: True
vars: vars:
ENABLE_DATADOG: False # By default take instances in and out of the elb(s) they
ENABLE_SPLUNKFORWARDER: False # are attached to
ENABLE_NEWRELIC: False # To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
when: elb_pre_post
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
sudo: False
when: elb_pre_post
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -13,8 +30,21 @@ ...@@ -13,8 +30,21 @@
- xqueue - xqueue
- role: xqueue - role: xqueue
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
with_items: ec2_elbs
sudo: False
when: elb_pre_post
- name: Deploy xqueue-watcher
hosts: all
sudo: True
gather_facts: True
vars:
COMMON_APP_DIR: "/edx/app"
common_web_group: "www-data"
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- xqwatcher
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
\ No newline at end of file
...@@ -2,10 +2,6 @@ ...@@ -2,10 +2,6 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- role: nginx - role: nginx
...@@ -13,8 +9,8 @@ ...@@ -13,8 +9,8 @@
- xserver - xserver
- role: xserver - role: xserver
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
../ansible.cfg
\ No newline at end of file
*
!prod
!stage
!data
!.gitignore
This temp directory created here so that we can make sure it doesn't
collide with other users doing ansible operations on the same machine;
or concurrent installs to different environments, say to prod and stage.
# config file for ansible -- http://ansible.github.com
# nearly all parameters can be overridden in ansible-playbook or with command line flags
# ansible will read ~/.ansible.cfg or /etc/ansible/ansible.cfg, whichever it finds first
[defaults]
jinja2_extensions=jinja2.ext.do
hash_behaviour=merge
host_key_checking = False
# These are environment-specific defaults
forks=10
transport=ssh
hostfile=./ec2.py
extra_vars='key=deployment region=us-west-1'
user=ubuntu
[ssh_connection]
# example from https://github.com/ansible/ansible/blob/devel/examples/ansible.cfg
ssh_args= -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r
scp_if_ssh=True
...@@ -3,6 +3,6 @@ regions=us-west-1 ...@@ -3,6 +3,6 @@ regions=us-west-1
regions_exclude = us-gov-west-1 regions_exclude = us-gov-west-1
destination_variable=public_dns_name destination_variable=public_dns_name
vpc_destination_variable=private_dns_name vpc_destination_variable=private_dns_name
cache_path=/tmp cache_path=ec2_cache/prod
cache_max_age=300 cache_max_age=300
route53=False route53=False
[ec2]
regions=us-west-1
regions_exclude = us-gov-west-1
destination_variable=public_dns_name
vpc_destination_variable=private_dns_name
cache_path=ec2_cache/stage
cache_max_age=300
route53=False
...@@ -13,10 +13,11 @@ ...@@ -13,10 +13,11 @@
openid_workaround: True openid_workaround: True
EDXAPP_LMS_NGINX_PORT: '80' EDXAPP_LMS_NGINX_PORT: '80'
edx_platform_version: 'master' edx_platform_version: 'master'
# Set to false if deployed behind another proxy/load balancer.
NGINX_SET_X_FORWARDED_HEADERS: True
# These should stay false for the public AMI # These should stay false for the public AMI
ENABLE_DATADOG: False COMMON_ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False COMMON_ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- role: nginx - role: nginx
nginx_sites: nginx_sites:
...@@ -38,11 +39,11 @@ ...@@ -38,11 +39,11 @@
- forum - forum
- { role: "xqueue", update_users: True } - { role: "xqueue", update_users: True }
- ora - ora
- discern - certs
- edx_ansible - edx_ansible
- role: datadog - role: datadog
when: ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
---
# Build a kibana/logstash/elasticsearch server for capturing and
# analyzing logs.
- name: Configure syslog server
hosts: all
sudo: yes
roles:
- common
- oraclejdk
- elasticsearch
- logstash
- kibana
- role: nginx
nginx_sites:
- kibana
---
AIDE_REPORT_EMAIL: 'root'
---
# install and configure aide IDS
#
- name: install aide
apt: pkg="aide" state="present"
- name: configure aide defaults
template: >
src=etc/default/aide.j2 dest=/etc/default/aide
owner=root group=root mode=0644
- name: open read permissions on aide logs
file: >
name="/var/log/aide"
recurse="yes"
state="directory"
mode="755"
- name: aide initial scan (this can take a long time)
command: >
aideinit -y -f
creates=/var/lib/aide/aide.db
sudo: yes
# These settings are mainly for the wrapper scripts around aide,
# such as aideinit and /etc/cron.daily/aide
# send reports to syslog
REPORT_URL=syslog:LOG_LOCAL1
# This is used as the host name in the AIDE reports that are sent out
# via e-mail. It defaults to the output of $(hostname --fqdn), but can
# be set to arbitrary values.
# FQDN=
# This is used as the subject for the e-mail reports.
# If your mail system only threads by subject, you might want to add
# some variable content here (for example $(date +%Y-%m-%d)).
MAILSUBJ="Daily AIDE report for $FQDN"
# This is the email address reports get mailed to
# default is root
# This variable is expanded before it is used, so you can use variables
# here. For example, MAILTO=$FQDN-aide@domain.example will send the
# report to host.name.example-aide@domain.example is the local FQDN is
# host.name.example.
MAILTO={{ AIDE_REPORT_EMAIL }}
# Set this to yes to suppress mailings when no changes have been
# detected during the AIDE run and no error output was given.
#QUIETREPORTS=no
# This parameter defines which AIDE command to run from the cron script.
# Sensible values are "update" and "check".
# Default is "check", ensuring backwards compatibility.
# Since "update" does not take any longer, it is recommended to use "update",
# so that a new database is created every day. The new database needs to be
# manually copied over the current one, though.
COMMAND=update
# This parameter defines what to do with a new database created by
# COMMAND=update. It is ignored if COMMAND!=update.
# no: Do not copy new database to old database. This is the default.
# yes: Copy new database to old database. This means that changes to the
# file system are only reported once. Possibly dangerous.
# ifnochange: Copy new database to old database if no changes have
# been reported. This is needed for ANF/ARF to work reliably.
COPYNEWDB=no
# Set this to yes to truncate the detailed changes part in the mail. The full
# output will still be listed in the log file.
TRUNCATEDETAILS=yes
# Set this to yes to suppress file changes by package and security
# updates from appearing in the e-mail report. Filtered file changes will
# still be listed in the log file. This option parses the /var/log/dpkg.log
# file and implies TRUNCATEDETAILS=yes
FILTERUPDATES=yes
# Set this to yes to suppress file changes by package installations
# from appearing in the e-mail report. Filtered file changes will still
# be listed in the log file. This option parses the /var/log/dpkg.log file and
# implies TRUNCATEDETAILS=yes.
FILTERINSTALLATIONS=yes
# This parameter defines how many lines to return per e-mail. Output longer
# than this value will be truncated in the e-mail sent out.
# Set value to "0" to disable this option.
LINES=1000
# This parameter gives a grep regular expression. If given, all output lines
# that _don't_ match the regexp are listed first in the script's output. This
# allows to easily remove noise from the AIDE report.
NOISE=""
# This parameter defines which options are given to aide in the daily
# cron job. The default is "-V4".
AIDEARGS=""
# These parameters control update-aide.conf and give the defaults for
# the --confdir, --confd and --settingsd options
# UPAC_CONFDIR="/etc/aide"
# UPAC_CONFD="$UPAC_CONFDIR/aide.conf.d"
# UPAC_SETTINGSD="$UPAC_CONFDIR/aide.settings.d"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role alton
#
#
# vars are namespace with the module name.
#
ALTON_USERNAME: '1234_1234@chat.hipchat.com'
ALTON_PASSWORD: 'password'
ALTON_V2_TOKEN: 'HIPCHAT_V2_TOKEN'
ALTON_ROOMS: 'Hammer'
ALTON_NAME: 'Alton W. Daemon'
ALTON_HANDLE: 'alton'
ALTON_REDIS_URL: 'redis://fakeuser:redispassword@localhost:6379'
ALTON_HTTPSERVER_PORT: '8081'
ALTON_WORLD_WEATHER_KEY: !!null
# Needed if you want to build AMIs from alton.
ALTON_JENKINS_URL: !!null
alton_role_name: alton
alton_user: alton
alton_app_dir: "{{ COMMON_APP_DIR }}/alton"
alton_code_dir: "{{ alton_app_dir }}/alton"
alton_venvs_dir: "{{ alton_app_dir }}/venvs"
alton_venv_dir: "{{ alton_venvs_dir }}/alton"
alton_venv_bin: "{{ alton_venv_dir }}/bin"
alton_source_repo: "https://github.com/edx/alton.git"
alton_version: "HEAD"
alton_requirements_file: "{{ alton_code_dir }}/requirements.txt"
alton_supervisor_wrapper: "{{ alton_app_dir }}/alton-supervisor.sh"
alton_environment:
WILL_USERNAME: "{{ ALTON_USERNAME }}"
WILL_PASSWORD: "{{ ALTON_PASSWORD }}"
WILL_V2_TOKEN: "{{ ALTON_V2_TOKEN }}"
WILL_ROOMS: "{{ ALTON_ROOMS }}"
WILL_NAME: "{{ ALTON_NAME }}"
WILL_HANDLE: "{{ ALTON_HANDLE }}"
WILL_REDIS_URL: "{{ ALTON_REDIS_URL }}"
WILL_HTTPSERVER_PORT: "{{ ALTON_HTTPSERVER_PORT }}"
WORLD_WEATHER_ONLINE_KEY: "{{ ALTON_WORLD_WEATHER_KEY }}"
JENKINS_URL: "{{ ALTON_JENKINS_URL }}"
#
# OS packages
#
alton_debian_pkgs: []
alton_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role alton
#
# Overview:
#
#
- name: restart alton
supervisorctl_local: >
name=alton
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not disable_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role alton
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- supervisor
- redis
- name: checkout the code
git: >
dest="{{ alton_code_dir }}" repo="{{ alton_source_repo }}"
version="{{ alton_version }}" accept_hostkey=yes
sudo_user: "{{ alton_user }}"
notify: restart alton
- name: install the requirements
pip: >
requirements="{{ alton_requirements_file }}"
virtualenv="{{ alton_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ alton_user }}"
notify: restart alton
- name: create the supervisor wrapper
template: >
src="{{ alton_supervisor_wrapper|basename }}.j2"
dest="{{ alton_supervisor_wrapper }}"
mode=0755
sudo_user: "{{ alton_user }}"
notify: restart alton
- name: create a supervisor config
template: >
src=alton.conf.j2 dest="{{ supervisor_available_dir }}/alton.conf"
owner="{{ supervisor_user }}"
group="{{ supervisor_user }}"
sudo_user: "{{ supervisor_user }}"
notify: restart alton
- name: enable the supervisor config
file: >
src="{{ supervisor_available_dir }}/alton.conf"
dest="{{ supervisor_cfg_dir }}/alton.conf"
state=link
force=yes
mode=0644
sudo_user: "{{ supervisor_user }}"
when: not disable_edx_services
notify: restart alton
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure alton is started
supervisorctl_local: >
name=alton
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: not disable_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role alton
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- name: create application user
user: >
name="{{ alton_user }}" home="{{ alton_app_dir }}"
createhome=no shell=/bin/false
- name: create alton user dirs
file: >
path="{{ item }}" state=directory
owner="{{ alton_user }}" group="{{ common_web_group }}"
with_items:
- "{{ alton_app_dir }}"
- "{{ alton_venvs_dir }}"
- name: setup the alton env
template: >
src="alton_env.j2" dest="{{ alton_app_dir }}/alton_env"
owner="{{ alton_user }}" group="{{ common_web_user }}"
mode=0644
notify: restart alton
- include: deploy.yml tags=deploy
#!/bin/bash
source {{ alton_app_dir }}/alton_env
cd {{ alton_code_dir }}
{{ alton_venv_bin }}/python run_alton.py
[program:alton]
command={{ alton_supervisor_wrapper }}
priority=999
user={{ common_web_user }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
stopsignal=QUIT
# {{ ansible_managed }}
{% for name,value in alton_environment.items() -%}
{%- if value -%}
export {{ name }}="{{ value }}"
{% endif %}
{%- endfor %}
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
- name: checkout code - name: checkout code
git: git:
dest={{ as_code_dir }} repo={{ as_source_repo }} dest={{ as_code_dir }} repo={{ as_source_repo }}
accept_hostkey=yes
version={{ as_version }} force=true version={{ as_version }} force=true
environment: environment:
GIT_SSH: $as_git_ssh GIT_SSH: $as_git_ssh
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
- name: checkout code - name: checkout code
git: git:
dest={{ analytics_code_dir }} repo={{ analytics_source_repo }} dest={{ analytics_code_dir }} repo={{ analytics_source_repo }}
accept_hostkey=yes
version={{ analytics_version }} force=true version={{ analytics_version }} force=true
environment: environment:
GIT_SSH: $analytics_git_ssh GIT_SSH: $analytics_git_ssh
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
--- ---
- name: Installs apache and mod_wsgi from apt - name: Installs apache and mod_wsgi from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes apt: pkg={{ item }} install_recommends=no state=present update_cache=yes
with_items: with_items:
- apache2 - apache2
- libapache2-mod-wsgi - libapache2-mod-wsgi
......
WSGIPythonHome {{ edxapp_venv_dir }} WSGIPythonHome {{ edxapp_venv_dir }}
WSGIRestrictEmbedded On WSGIRestrictEmbedded On
<VirtualHost *:{{apache_port}}> <VirtualHost *:{{ apache_port }}>
ServerName https://{{ lms_env_config.SITE_NAME }} ServerName https://{{ lms_env_config.SITE_NAME }}
ServerAlias *.{{ lms_env_config.SITE_NAME }} ServerAlias *.{{ lms_env_config.SITE_NAME }}
UseCanonicalName On UseCanonicalName On
Alias /static/ /opt/wwc/staticfiles/ Alias /static/ /opt/wwc/staticfiles/
<Directory /opt/wwc/staticfiles> <Directory /opt/wwc/staticfiles>
Order deny,allow Order deny,allow
Allow from all Allow from all
</Directory> </Directory>
SetEnv SERVICE_VARIANT lms SetEnv SERVICE_VARIANT lms
WSGIScriptAlias / {{ edxapp_code_dir }}/lms/wsgi_apache_lms.py WSGIScriptAlias / {{ edxapp_code_dir }}/lms/wsgi_apache_lms.py
<Directory {{ edxapp_code_dir }}/lms> <Directory {{ edxapp_code_dir }}/lms>
...@@ -42,10 +42,10 @@ WSGIRestrictEmbedded On ...@@ -42,10 +42,10 @@ WSGIRestrictEmbedded On
WSGIDaemonProcess lms user=www-data group=adm processes=1 python-path={{ edxapp_code_dir }}:{{ edxapp_venv_dir }}/lib/python2.7/site-packages display-name=%{GROUP} WSGIDaemonProcess lms user=www-data group=adm processes=1 python-path={{ edxapp_code_dir }}:{{ edxapp_venv_dir }}/lib/python2.7/site-packages display-name=%{GROUP}
WSGIProcessGroup lms WSGIProcessGroup lms
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}
ErrorLog ${APACHE_LOG_DIR}/apache-edx-error.log ErrorLog ${APACHE_LOG_DIR}/apache-edx-error.log
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\" %D" apache-edx LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\" %D" apache-edx
CustomLog ${APACHE_LOG_DIR}/apache-edx-access.log apache-edx CustomLog {{ APACHE_LOG_DIR }}/apache-edx-access.log apache-edx
</VirtualHost> </VirtualHost>
NameVirtualHost *:{{apache_port}} NameVirtualHost *:{{ apache_port }}
Listen {{apache_port}} Listen {{ apache_port }}
...@@ -56,6 +56,7 @@ aws_debian_pkgs: ...@@ -56,6 +56,7 @@ aws_debian_pkgs:
aws_pip_pkgs: aws_pip_pkgs:
- https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz - https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
- awscli - awscli
- boto==2.20.1
aws_redhat_pkgs: [] aws_redhat_pkgs: []
aws_s3cmd_version: s3cmd-1.5.0-beta1 aws_s3cmd_version: s3cmd-1.5.0-beta1
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
- db_host: "{{ ORA_MYSQL_HOST }}" - db_host: "{{ ORA_MYSQL_HOST }}"
db_name: "{{ ORA_MYSQL_DB_NAME }}" db_name: "{{ ORA_MYSQL_DB_NAME }}"
script_name: ora-rds.sh script_name: ora-rds.sh
when: COMMON_MYSQL_READ_ONLY_PASS when: COMMON_MYSQL_READ_ONLY_PASS is defined
# These templates rely on there being a global # These templates rely on there being a global
# read_only mongo user, you must override the default # read_only mongo user, you must override the default
...@@ -67,4 +67,4 @@ ...@@ -67,4 +67,4 @@
db_name: "{{ FORUM_MONGO_DATABASE }}" db_name: "{{ FORUM_MONGO_DATABASE }}"
db_port: "{{ FORUM_MONGO_PORT }}" db_port: "{{ FORUM_MONGO_PORT }}"
script_name: forum-mongo.sh script_name: forum-mongo.sh
when: COMMON_MONGO_READ_ONLY_PASS when: COMMON_MONGO_READ_ONLY_PASS is defined
...@@ -25,18 +25,39 @@ CERTS_AWS_KEY: "" ...@@ -25,18 +25,39 @@ CERTS_AWS_KEY: ""
CERTS_AWS_ID: "" CERTS_AWS_ID: ""
# GPG key ID, defaults to the dummy key # GPG key ID, defaults to the dummy key
CERTS_KEY_ID: "FEF8D954" CERTS_KEY_ID: "FEF8D954"
# Path to git identity file for pull access to # Contents of the identity for a private
# the edX certificates repo - REQUIRED # repo. Leave set to "none" if using the public
# Example - {{ secure_dir }}/files/git-identity # certificate repo
CERTS_GIT_IDENTITY: !!null CERTS_GIT_IDENTITY: "none"
# Path to public and private gpg key for signing # Path to public and private gpg key for signing
# the edX certificate. Default is a dummy key # the edX certificate. Default is a dummy key
CERTS_LOCAL_PRIVATE_KEY: "example-private-key.txt" CERTS_LOCAL_PRIVATE_KEY: "example-private-key.txt"
# This defaults to the public certificates repo which is
# used for open-edx
CERTS_REPO: "https://github.com/edx/read-only-certificate-code"
CERTS_NGINX_PORT: 18090
CERTS_WEB_ROOT: "{{ certs_data_dir }}/www-data"
CERTS_URL: "http://localhost:{{ CERTS_NGINX_PORT }}"
CERTS_DOWNLOAD_URL: "http://localhost:{{ CERTS_NGINX_PORT }}"
CERTS_VERIFY_URL: "http://localhost:{{ CERTS_NGINX_PORT }}"
# Set to false if using s3 or if you don't want certificates
# copied to the web root
CERTS_COPY_TO_WEB_ROOT: true
CERTS_S3_UPLOAD: false
# Can be set to a different repo for private
# templates, fonts, etc.
CERTS_TEMPLATE_DATA_DIR: 'template_data'
# this is the trust export, output of
# gpg --export-ownertrust
CERTS_OWNER_TRUST: "A9F9EAD11A0A6E7E5A037BDC044089B6FEF8D954:6:\n"
########## Internal role vars below ########## Internal role vars below
certs_user: certs certs_user: certs
certs_app_dir: "{{ COMMON_APP_DIR }}/certs" certs_app_dir: "{{ COMMON_APP_DIR }}/certs"
certs_data_dir: "{{ COMMON_DATA_DIR }}/certs"
certs_code_dir: "{{ certs_app_dir }}/certificates" certs_code_dir: "{{ certs_app_dir }}/certificates"
certs_venvs_dir: "{{ certs_app_dir }}/venvs" certs_venvs_dir: "{{ certs_app_dir }}/venvs"
certs_venv_dir: "{{ certs_venvs_dir }}/certs" certs_venv_dir: "{{ certs_venvs_dir }}/certs"
...@@ -44,7 +65,6 @@ certs_venv_bin: "{{ certs_venv_dir }}/bin" ...@@ -44,7 +65,6 @@ certs_venv_bin: "{{ certs_venv_dir }}/bin"
certs_git_ssh: /tmp/git_ssh.sh certs_git_ssh: /tmp/git_ssh.sh
certs_git_identity: "{{ certs_app_dir }}/certs-git-identity" certs_git_identity: "{{ certs_app_dir }}/certs-git-identity"
certs_requirements_file: "{{ certs_code_dir }}/requirements.txt" certs_requirements_file: "{{ certs_code_dir }}/requirements.txt"
certs_repo: "git@github.com:/edx/certificates"
certs_version: 'master' certs_version: 'master'
certs_gpg_dir: "{{ certs_app_dir }}/gnupg" certs_gpg_dir: "{{ certs_app_dir }}/gnupg"
certs_env_config: certs_env_config:
...@@ -57,6 +77,13 @@ certs_env_config: ...@@ -57,6 +77,13 @@ certs_env_config:
CERT_KEY_ID: $CERTS_KEY_ID CERT_KEY_ID: $CERTS_KEY_ID
LOGGING_ENV: "" LOGGING_ENV: ""
CERT_GPG_DIR: $certs_gpg_dir CERT_GPG_DIR: $certs_gpg_dir
CERT_URL: $CERTS_URL
CERT_DOWNLOAD_URL: $CERTS_DOWNLOAD_URL
CERT_WEB_ROOT: $CERTS_WEB_ROOT
COPY_TO_WEB_ROOT: $CERTS_COPY_TO_WEB_ROOT
S3_UPLOAD: $CERTS_S3_UPLOAD
CERT_VERIFY_URL: $CERTS_VERIFY_URL
TEMPLATE_DATA_DIR: $CERTS_TEMPLATE_DATA_DIR
certs_auth_config: certs_auth_config:
QUEUE_USER: $CERTS_QUEUE_USER QUEUE_USER: $CERTS_QUEUE_USER
......
A9F9EAD11A0A6E7E5A037BDC044089B6FEF8D954:6:
...@@ -36,14 +36,19 @@ ...@@ -36,14 +36,19 @@
owner={{ certs_user }} mode=750 owner={{ certs_user }} mode=750
notify: restart certs notify: restart certs
# This key is only needed if you are pulling down a private
# certificates repo
- name: install read-only ssh key for the certs repo - name: install read-only ssh key for the certs repo
copy: > copy: >
content="{{ CERTS_GIT_IDENTITY }}" dest={{ certs_git_identity }} content="{{ CERTS_GIT_IDENTITY }}" dest={{ certs_git_identity }}
force=yes owner={{ certs_user }} mode=0600 force=yes owner={{ certs_user }} mode=0600
when: CERTS_GIT_IDENTITY != "none"
notify: restart certs notify: restart certs
- name: checkout certificates repo into {{ certs_code_dir }} - name: checkout certificates repo into {{ certs_code_dir }}
git: dest={{ certs_code_dir }} repo={{ certs_repo }} version={{ certs_version }} git: >
dest={{ certs_code_dir }} repo={{ CERTS_REPO }} version={{ certs_version }}
accept_hostkey=yes
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
environment: environment:
GIT_SSH: "{{ certs_git_ssh }}" GIT_SSH: "{{ certs_git_ssh }}"
...@@ -51,10 +56,13 @@ ...@@ -51,10 +56,13 @@
- name: remove read-only ssh key for the certs repo - name: remove read-only ssh key for the certs repo
file: path={{ certs_git_identity }} state=absent file: path={{ certs_git_identity }} state=absent
when: CERTS_GIT_IDENTITY != "none"
notify: restart certs notify: restart certs
- name : install python requirements - name : install python requirements
pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present pip: >
requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
notify: restart certs notify: restart certs
...@@ -67,7 +75,7 @@ ...@@ -67,7 +75,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services when: not disable_edx_services
- name: ensure certs has started - name: ensure certs has started
......
...@@ -31,10 +31,6 @@ ...@@ -31,10 +31,6 @@
# - supervisor # - supervisor
# - certs # - certs
# #
- name: Checking to see if git identity is set
fail: msg="You must set CERTS_GIT_IDENTITY var for this role!"
when: not CERTS_GIT_IDENTITY
- name: create application user - name: create application user
user: > user: >
name="{{ certs_user }}" name="{{ certs_user }}"
...@@ -43,7 +39,7 @@ ...@@ -43,7 +39,7 @@
shell=/bin/false shell=/bin/false
notify: restart certs notify: restart certs
- name: create certs app and data dirs - name: create certs app dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -52,7 +48,20 @@ ...@@ -52,7 +48,20 @@
notify: restart certs notify: restart certs
with_items: with_items:
- "{{ certs_app_dir }}" - "{{ certs_app_dir }}"
# needed for the ansible 1.5 git module
- "{{ certs_app_dir }}/.ssh"
- "{{ certs_venvs_dir }}" - "{{ certs_venvs_dir }}"
- "{{ certs_data_dir }}"
# The certs web root must be owned
# by the web user so the certs service
# can write files there.
- name: create certs web root
file: >
path="{{ CERTS_WEB_ROOT }}"
state=directory
owner="{{ common_web_group }}"
group="{{ certs_user }}"
- name: create certs gpg dir - name: create certs gpg dir
file: > file: >
...@@ -69,6 +78,12 @@ ...@@ -69,6 +78,12 @@
notify: restart certs notify: restart certs
register: certs_gpg_key register: certs_gpg_key
- name: copy the pgp trust export
copy: >
content="{{ CERTS_OWNER_TRUST }}"
dest={{ certs_app_dir }}/trust.export
owner={{ common_web_user }} mode=0600
notify: restart certs
- name: load the gpg key - name: load the gpg key
shell: > shell: >
...@@ -77,4 +92,11 @@ ...@@ -77,4 +92,11 @@
when: certs_gpg_key.changed when: certs_gpg_key.changed
notify: restart certs notify: restart certs
- name: import the trust export
shell: >
/usr/bin/gpg --homedir {{ certs_gpg_dir }} --import-ownertrust {{ certs_app_dir }}/trust.export
sudo_user: "{{ common_web_user }}"
when: certs_gpg_key.changed
notify: restart certs
- include: deploy.yml tags=deploy - include: deploy.yml tags=deploy
...@@ -3,6 +3,11 @@ ...@@ -3,6 +3,11 @@
# to change the base directory # to change the base directory
# where edX is installed # where edX is installed
# Set global htpasswd credentials
COMMON_ENABLE_BASIC_AUTH: True
COMMON_HTPASSWD_USER: edx
COMMON_HTPASSWD_PASS: edx
COMMON_BASE_DIR: /edx COMMON_BASE_DIR: /edx
COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var" COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var"
COMMON_APP_DIR: "{{ COMMON_BASE_DIR}}/app" COMMON_APP_DIR: "{{ COMMON_BASE_DIR}}/app"
...@@ -18,6 +23,7 @@ COMMON_CFG_DIR: "{{ COMMON_BASE_DIR }}/etc" ...@@ -18,6 +23,7 @@ COMMON_CFG_DIR: "{{ COMMON_BASE_DIR }}/etc"
COMMON_ENVIRONMENT: 'default_env' COMMON_ENVIRONMENT: 'default_env'
COMMON_DEPLOYMENT: 'default_deployment' COMMON_DEPLOYMENT: 'default_deployment'
COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple' COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple'
COMMON_NPM_MIRROR_URL: 'http://registry.npmjs.org'
# do not include http/https # do not include http/https
COMMON_GIT_MIRROR: 'github.com' COMMON_GIT_MIRROR: 'github.com'
# override this var to set a different hostname # override this var to set a different hostname
...@@ -30,6 +36,7 @@ COMMON_CUSTOM_DHCLIENT_CONFIG: false ...@@ -30,6 +36,7 @@ COMMON_CUSTOM_DHCLIENT_CONFIG: false
COMMON_MOTD_TEMPLATE: "motd.tail.j2" COMMON_MOTD_TEMPLATE: "motd.tail.j2"
COMMON_SSH_PASSWORD_AUTH: "no"
# These are three maintenance accounts across all databases # These are three maintenance accounts across all databases
# the read only user is is granted select privs on all dbs # the read only user is is granted select privs on all dbs
...@@ -45,7 +52,9 @@ COMMON_MYSQL_MIGRATE_PASS: !!null ...@@ -45,7 +52,9 @@ COMMON_MYSQL_MIGRATE_PASS: !!null
COMMON_MONGO_READ_ONLY_USER: 'read_only' COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: !!null COMMON_MONGO_READ_ONLY_PASS: !!null
COMMON_ENABLE_DATADOG: False
COMMON_ENABLE_SPLUNKFORWARDER: False
COMMON_ENABLE_NEWRELIC: False
common_debian_pkgs: common_debian_pkgs:
- ntp - ntp
- ack-grep - ack-grep
...@@ -65,8 +74,9 @@ common_debian_pkgs: ...@@ -65,8 +74,9 @@ common_debian_pkgs:
- curl - curl
common_pip_pkgs: common_pip_pkgs:
- pip==1.5.4 - pip==1.5.6
- virtualenv==1.11.4 - setuptools==3.6
- virtualenv==1.11.6
- virtualenvwrapper - virtualenvwrapper
common_web_user: www-data common_web_user: www-data
......
...@@ -15,6 +15,12 @@ ...@@ -15,6 +15,12 @@
- "{{ COMMON_BIN_DIR }}" - "{{ COMMON_BIN_DIR }}"
- "{{ COMMON_CFG_DIR }}" - "{{ COMMON_CFG_DIR }}"
# Determine if machine is provisioned via vagrant
# Some EC2-specific steps would need to be skipped
- name: check if instance is vagrant
stat: path=/home/vagrant
register: vagrant_home_dir
# Need to install python-pycurl to use Ansible's apt_repository module # Need to install python-pycurl to use Ansible's apt_repository module
- name: Install python-pycurl - name: Install python-pycurl
apt: pkg=python-pycurl state=present update_cache=yes apt: pkg=python-pycurl state=present update_cache=yes
...@@ -103,13 +109,15 @@ ...@@ -103,13 +109,15 @@
# Remove some of the default motd display on ubuntu # Remove some of the default motd display on ubuntu
# and add a custom motd. These do not require an # and add a custom motd. These do not require an
# ssh restart # ssh restart
# Only needed for EC2 instances.
- name: update the ssh motd on Ubuntu - name: update the ssh motd on Ubuntu
file: > file: >
mode=0644 mode=0644
path={{ item }} path={{ item }}
when: vagrant_home_dir.stat.exists == false
with_items: with_items:
- "/etc/update-motd.d/10-help-text" - "/etc/update-motd.d/10-help-text"
- "/usr/share/landscape/50-landscape-sysinfo" - "/usr/share/landscape/landscape-sysinfo.wrapper"
- "/etc/update-motd.d/51-cloudguest" - "/etc/update-motd.d/51-cloudguest"
- "/etc/update-motd.d/91-release-upgrade" - "/etc/update-motd.d/91-release-upgrade"
......
...@@ -51,7 +51,7 @@ PermitEmptyPasswords no ...@@ -51,7 +51,7 @@ PermitEmptyPasswords no
ChallengeResponseAuthentication no ChallengeResponseAuthentication no
# Change to no to disable tunnelled clear text passwords # Change to no to disable tunnelled clear text passwords
PasswordAuthentication no PasswordAuthentication {{ COMMON_SSH_PASSWORD_AUTH }}
# Kerberos options # Kerberos options
#KerberosAuthentication no #KerberosAuthentication no
......
--- ---
DATADOG_API_KEY: "SPECIFY_KEY_HERE"
datadog_api_key: "PUT_YOUR_API_KEY_HERE"
datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52" datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52"
datadog_debian_pkgs: datadog_debian_pkgs:
- apparmor-utils - apparmor-utils
- build-essential - build-essential
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
lineinfile: > lineinfile: >
dest="/etc/dd-agent/datadog.conf" dest="/etc/dd-agent/datadog.conf"
regexp="^api_key:.*" regexp="^api_key:.*"
line="api_key:{{ datadog_api_key }}" line="api_key:{{ DATADOG_API_KEY }}"
notify: notify:
- restart the datadog service - restart the datadog service
tags: tags:
......
--- ---
- name: check out the demo course - name: check out the demo course
git: dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }} git: >
dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }}
accept_hostkey=yes
sudo_user: "{{ demo_edxapp_user }}" sudo_user: "{{ demo_edxapp_user }}"
register: demo_checkout register: demo_checkout
......
...@@ -86,7 +86,7 @@ ...@@ -86,7 +86,7 @@
- name: create a symlink for venv supervisor - name: create a symlink for venv supervisor
file: > file: >
src="{{ devpi_supervisor_venv_bin }}/supervisorctl" src="{{ devpi_supervisor_venv_bin }}/supervisorctl"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi dest={{ COMMON_BIN_DIR }}/supervisorctl.devpi
state=link state=link
- name: create a symlink for supervisor config - name: create a symlink for supervisor config
...@@ -103,7 +103,7 @@ ...@@ -103,7 +103,7 @@
- name: update devpi supervisor configuration - name: update devpi supervisor configuration
shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update" shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
- name: ensure devpi is started - name: ensure devpi is started
supervisorctl_local: > supervisorctl_local: >
......
...@@ -33,13 +33,17 @@ ...@@ -33,13 +33,17 @@
- restart discern - restart discern
- name: git checkout discern repo into discern_code_dir - name: git checkout discern repo into discern_code_dir
git: dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_version }} git: >
dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_version }}
accept_hostkey=yes
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- restart discern - restart discern
- name: git checkout ease repo into discern_ease_code_dir - name: git checkout ease repo into discern_ease_code_dir
git: dest={{ discern_ease_code_dir}} repo={{ discern_ease_source_repo }} version={{ discern_ease_version }} git: >
dest={{ discern_ease_code_dir}} repo={{ discern_ease_source_repo }} version={{ discern_ease_version }}
accept_hostkey=yes
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- restart discern - restart discern
...@@ -48,7 +52,7 @@ ...@@ -48,7 +52,7 @@
- name : install python pre-requirements for discern and ease - name : install python pre-requirements for discern and ease
pip: > pip: >
requirements={{item}} virtualenv={{ discern_venv_dir }} state=present requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- restart discern - restart discern
...@@ -119,7 +123,7 @@ ...@@ -119,7 +123,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services when: not disable_edx_services
- name: ensure discern, discern_celery has started - name: ensure discern, discern_celery has started
......
...@@ -29,6 +29,7 @@ edx_ansible_debian_pkgs: ...@@ -29,6 +29,7 @@ edx_ansible_debian_pkgs:
- libxml2-dev - libxml2-dev
- libxslt1-dev - libxslt1-dev
- curl - curl
- python-yaml
edx_ansible_app_dir: "{{ COMMON_APP_DIR }}/edx_ansible" edx_ansible_app_dir: "{{ COMMON_APP_DIR }}/edx_ansible"
edx_ansible_code_dir: "{{ edx_ansible_app_dir }}/edx_ansible" edx_ansible_code_dir: "{{ edx_ansible_app_dir }}/edx_ansible"
edx_ansible_data_dir: "{{ COMMON_DATA_DIR }}/edx_ansible" edx_ansible_data_dir: "{{ COMMON_DATA_DIR }}/edx_ansible"
......
--- ---
- name: git checkout edx_ansible repo into edx_ansible_code_dir - name: git checkout edx_ansible repo into edx_ansible_code_dir
git: dest={{ edx_ansible_code_dir }} repo={{ edx_ansible_source_repo }} version={{ configuration_version }} git: >
dest={{ edx_ansible_code_dir }} repo={{ edx_ansible_source_repo }} version={{ configuration_version }}
accept_hostkey=yes
sudo_user: "{{ edx_ansible_user }}" sudo_user: "{{ edx_ansible_user }}"
- name : install edx_ansible venv requirements - name : install edx_ansible venv requirements
...@@ -31,7 +33,7 @@ ...@@ -31,7 +33,7 @@
src={{ edx_ansible_code_dir }}/playbooks src={{ edx_ansible_code_dir }}/playbooks
dest={{ COMMON_CFG_DIR }}/playbooks dest={{ COMMON_CFG_DIR }}/playbooks
state=link state=link
- name: dump all vars to yaml - name: dump all vars to yaml
template: src=dumpall.yml.j2 dest={{ edx_ansible_var_file }} mode=0600 template: src=dumpall.yml.j2 dest={{ edx_ansible_var_file }} mode=0600
when: EDX_ANSIBLE_DUMP_VARS when: EDX_ANSIBLE_DUMP_VARS
......
...@@ -12,7 +12,7 @@ IFS="," ...@@ -12,7 +12,7 @@ IFS=","
-v add verbosity to edx_ansible run -v add verbosity to edx_ansible run
-h this -h this
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, discern, edx-ora, configuration <repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code
<version> - can be a commit or tag <version> - can be a commit or tag
EO EO
...@@ -44,11 +44,11 @@ edx_ansible_cmd="{{ edx_ansible_venv_bin }}/ansible-playbook -i localhost, -c lo ...@@ -44,11 +44,11 @@ edx_ansible_cmd="{{ edx_ansible_venv_bin }}/ansible-playbook -i localhost, -c lo
repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'" repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'"
repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'" repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'"
repos_to_cmd["cs_comments_service"]="$edx_ansible_cmd forum.yml -e 'forum_version=$2'" repos_to_cmd["cs_comments_service"]="$edx_ansible_cmd forum.yml -e 'forum_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd forums.yml -e 'xserver_version=$2'" repos_to_cmd["xserver"]="$edx_ansible_cmd xserver.yml -e 'xserver_version=$2'"
repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'" repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'"
repos_to_cmd["discern"]="$edx_ansible_cmd discern.yml -e 'discern_version=$2'"
repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'" repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'" repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
repos_to_cmd["read-only-certificate-code"]="$edx_ansible_cmd certs.yml -e 'certs_version=$2'"
if [[ -z $1 || -z $2 ]]; then if [[ -z $1 || -z $2 ]]; then
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
with_items: with_items:
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}" - "{{ COMMON_APP_DIR }}/{{ edx_service_name }}"
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/venvs" - "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/venvs"
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/data"
- name: install a bunch of system packages on which edx_service relies - name: install a bunch of system packages on which edx_service relies
apt: pkg={{ item }} state=present apt: pkg={{ item }} state=present
......
...@@ -6,6 +6,7 @@ dependencies: ...@@ -6,6 +6,7 @@ dependencies:
rbenv_dir: "{{ edxapp_app_dir }}" rbenv_dir: "{{ edxapp_app_dir }}"
rbenv_ruby_version: "{{ edxapp_ruby_version }}" rbenv_ruby_version: "{{ edxapp_ruby_version }}"
- devpi - devpi
- nltk
- role: user - role: user
user_info: user_info:
- name: "{{ EDXAPP_AUTOMATOR_NAME }}" - name: "{{ EDXAPP_AUTOMATOR_NAME }}"
......
...@@ -28,7 +28,9 @@ ...@@ -28,7 +28,9 @@
# Do A Checkout # Do A Checkout
- name: checkout edx-platform repo into {{edxapp_code_dir}} - name: checkout edx-platform repo into {{edxapp_code_dir}}
git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}} git: >
dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}}
accept_hostkey=yes
register: chkout register: chkout
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: environment:
...@@ -45,7 +47,9 @@ ...@@ -45,7 +47,9 @@
- "restart edxapp_workers" - "restart edxapp_workers"
- name: checkout theme - name: checkout theme
git: dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}} git: >
dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}}
accept_hostkey=yes
when: edxapp_theme_name != '' when: edxapp_theme_name != ''
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: environment:
...@@ -104,7 +108,7 @@ ...@@ -104,7 +108,7 @@
# Set the npm registry # Set the npm registry
- name: Set the npm registry - name: Set the npm registry
shell: shell:
npm config set registry 'http://registry.npmjs.org' npm config set registry '{{ COMMON_NPM_MIRROR_URL }}'
creates="{{ edxapp_app_dir }}/.npmrc" creates="{{ edxapp_app_dir }}/.npmrc"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
...@@ -128,7 +132,7 @@ ...@@ -128,7 +132,7 @@
requirements="{{pre_requirements_file}}" requirements="{{pre_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}" virtualenv="{{edxapp_venv_dir}}"
state=present state=present
extra_args="-i {{ edxapp_pypi_local_mirror }}" extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
notify: notify:
...@@ -142,7 +146,7 @@ ...@@ -142,7 +146,7 @@
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
shell: > shell: >
{{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ base_requirements_file }} {{ edxapp_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ base_requirements_file }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
...@@ -157,7 +161,7 @@ ...@@ -157,7 +161,7 @@
requirements="{{post_requirements_file}}" requirements="{{post_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}" virtualenv="{{edxapp_venv_dir}}"
state=present state=present
extra_args="-i {{ edxapp_pypi_local_mirror }}" extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
notify: notify:
...@@ -171,7 +175,7 @@ ...@@ -171,7 +175,7 @@
requirements="{{paver_requirements_file}}" requirements="{{paver_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}" virtualenv="{{edxapp_venv_dir}}"
state=present state=present
extra_args="-i {{ edxapp_pypi_local_mirror }}" extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
notify: notify:
...@@ -185,10 +189,9 @@ ...@@ -185,10 +189,9 @@
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
shell: > shell: >
{{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }} {{ edxapp_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ item }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
with_items: with_items:
- "{{ repo_requirements_file }}"
- "{{ github_requirements_file }}" - "{{ github_requirements_file }}"
- "{{ local_requirements_file }}" - "{{ local_requirements_file }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
...@@ -203,7 +206,7 @@ ...@@ -203,7 +206,7 @@
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
shell: > shell: >
{{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }} {{ edxapp_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ item }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
with_items: with_items:
- "{{ private_requirements_file }}" - "{{ private_requirements_file }}"
...@@ -215,7 +218,6 @@ ...@@ -215,7 +218,6 @@
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
# If using CAS and you have a function for mapping attributes, install # If using CAS and you have a function for mapping attributes, install
# the module here. The next few tasks set up the python code sandbox # the module here. The next few tasks set up the python code sandbox
- name: install CAS attribute module - name: install CAS attribute module
...@@ -223,7 +225,7 @@ ...@@ -223,7 +225,7 @@
name="{{ EDXAPP_CAS_ATTRIBUTE_PACKAGE }}" name="{{ EDXAPP_CAS_ATTRIBUTE_PACKAGE }}"
virtualenv="{{edxapp_venv_dir}}" virtualenv="{{edxapp_venv_dir}}"
state=present state=present
extra_args="-i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors" extra_args="-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
when: EDXAPP_CAS_ATTRIBUTE_PACKAGE|length > 0 when: EDXAPP_CAS_ATTRIBUTE_PACKAGE|length > 0
notify: "restart edxapp" notify: "restart edxapp"
...@@ -234,7 +236,7 @@ ...@@ -234,7 +236,7 @@
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
shell: > shell: >
{{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }} {{ edxapp_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ item }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
with_items: with_items:
- "{{ sandbox_base_requirements }}" - "{{ sandbox_base_requirements }}"
...@@ -246,6 +248,33 @@ ...@@ -246,6 +248,33 @@
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=300
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url: url="{{ s3_one_time_url.url }}" dest="/tmp/{{ EDXAPP_XML_S3_KEY|basename }}"
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf /tmp/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# The next few tasks set up the python code sandbox # The next few tasks set up the python code sandbox
# need to disable this profile, otherwise the pip inside the sandbox venv has no permissions # need to disable this profile, otherwise the pip inside the sandbox venv has no permissions
...@@ -261,7 +290,7 @@ ...@@ -261,7 +290,7 @@
requirements="{{sandbox_base_requirements}}" requirements="{{sandbox_base_requirements}}"
virtualenv="{{edxapp_sandbox_venv_dir}}" virtualenv="{{edxapp_sandbox_venv_dir}}"
state=present state=present
extra_args="-i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors" extra_args="-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors"
sudo_user: "{{ edxapp_sandbox_user }}" sudo_user: "{{ edxapp_sandbox_user }}"
when: EDXAPP_PYTHON_SANDBOX when: EDXAPP_PYTHON_SANDBOX
notify: notify:
...@@ -272,7 +301,7 @@ ...@@ -272,7 +301,7 @@
- name: code sandbox | Install sandbox requirements into sandbox venv - name: code sandbox | Install sandbox requirements into sandbox venv
shell: > shell: >
{{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }} {{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors -r {{ item }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
with_items: with_items:
- "{{ sandbox_local_requirements }}" - "{{ sandbox_local_requirements }}"
...@@ -280,7 +309,7 @@ ...@@ -280,7 +309,7 @@
sudo_user: "{{ edxapp_sandbox_user }}" sudo_user: "{{ edxapp_sandbox_user }}"
when: EDXAPP_PYTHON_SANDBOX when: EDXAPP_PYTHON_SANDBOX
register: sandbox_install_output register: sandbox_install_output
changed_when: "'installed' in sandbox_install_output" changed_when: sandbox_install_output.stdout is defined and 'installed' in sandbox_install_output.stdout
notify: notify:
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
...@@ -330,6 +359,9 @@ ...@@ -330,6 +359,9 @@
# service variants configured, runs # service variants configured, runs
# gather_assets and db migrations # gather_assets and db migrations
- include: service_variant_config.yml - include: service_variant_config.yml
tags:
- service_variant_config
- deploy
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
# the supervisorctl config and restarts # the supervisorctl config and restarts
...@@ -340,7 +372,7 @@ ...@@ -340,7 +372,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout is defined and supervisor_update.stdout != ""
when: not disable_edx_services when: not disable_edx_services
- name: ensure edxapp has started - name: ensure edxapp has started
......
...@@ -19,10 +19,13 @@ ...@@ -19,10 +19,13 @@
- "restart edxapp_workers" - "restart edxapp_workers"
with_items: with_items:
- "{{ edxapp_app_dir }}" - "{{ edxapp_app_dir }}"
# needed for the ansible 1.5 git module
- "{{ edxapp_app_dir }}/.ssh"
- "{{ edxapp_data_dir }}" - "{{ edxapp_data_dir }}"
- "{{ edxapp_venvs_dir }}" - "{{ edxapp_venvs_dir }}"
- "{{ edxapp_theme_dir }}" - "{{ edxapp_theme_dir }}"
- "{{ edxapp_staticfile_dir }}" - "{{ edxapp_staticfile_dir }}"
- "{{ edxapp_course_static_dir }}"
- name: create edxapp log dir - name: create edxapp log dir
file: > file: >
......
- name: clone the xml course repo
git: >
repo="{{ item.repo_url }}"
dest="{{ edxapp_course_data_dir }}/{{ item.repo_name }}"
version="{{ item.version }}"
accept_hostkey=True
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
with_items: EDXAPP_XML_COURSES
- name: update course.xml
template: >
src=course.xml.j2
dest="{{ edxapp_course_data_dir }}/{{ item.repo_name }}/course.xml"
sudo_user: "{{ edxapp_user }}"
with_items: EDXAPP_XML_COURSES
- name: make symlinks for the static data
shell: >
executable=/bin/bash
if [[ -d {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static ]]; then
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.repo_name }}
else
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.repo_name }}
fi
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: make symlinks so code works
file: >
src="{{ edxapp_course_data_dir }}/{{ item.repo_name }}"
dest="{{ edxapp_course_data_dir }}/{{ item.course }}"
state=link
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: import courses with nostatic flag
shell: >
{{ edxapp_venv_bin }}/python manage.py cms --settings=aws import --nostatic {{ edxapp_course_data_dir }} {{ item.repo_name }}
chdir="{{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}"
with_items: EDXAPP_XML_COURSES
when: item.disposition == "no static import"
- name: import courses including static data
shell: >
{{ edxapp_venv_bin }}/python manage.py cms --settings=aws import {{ edxapp_course_data_dir }} {{ item.repo_name }}
chdir="{{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}"
with_items: EDXAPP_XML_COURSES
when: item.disposition == "import"
- name: delete courses that were fully imported
file: path="{{ edxapp_course_data_dir }}/{{ item.repo_name }}" state=absent
with_items: EDXAPP_XML_COURSES
when: item.disposition == "import"
- name: delete .git repos
file: path="{{ edxapp_course_data_dir }}/{{ item.repo_name }}/.git" state=absent
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: create an archive of course data and course static dirs
shell: tar czf /tmp/static_course_content.tar.gz -C {{ edxapp_data_dir }} {{ edxapp_course_data_dir|basename }} {{ edxapp_course_static_dir|basename }}
- name: upload archive to s3
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode=put
overwrite=True
src="/tmp/static_course_content.tar.gz"
when: EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
- name: remove archive from disk
file: path="/tmp/static_course_content.tar.gz" state=absent
when: EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python {{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/bin/kill {{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill {{ common_web_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ common_web_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
[program:cms] [program:cms]
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = edxapp_venv_dir + '/bin/newrelic-admin run-program ' + edxapp_venv_dir + '/bin/gunicorn' %}
{% else %}
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %}
{% if ansible_processor|length > 0 %} {% if ansible_processor|length > 0 %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else %} {% else %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% endif %} {% endif %}
user={{ common_web_user }} user={{ common_web_user }}
directory={{ edxapp_code_dir }} directory={{ edxapp_code_dir }}
environment=PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms" environment={% if COMMON_ENABLE_NEWRELIC %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true killasgroup=true
......
#include <tunables/global> #include <tunables/global>
{{ edxapp_sandbox_venv_dir }}/bin/python flags=(complain) { {{ edxapp_sandbox_venv_dir }}/bin/python {
#include <abstractions/base> #include <abstractions/base>
{{ edxapp_sandbox_venv_dir }}/** mr, {{ edxapp_sandbox_venv_dir }}/** mr,
{{ edxapp_code_dir }}/common/lib/sandbox-packages/** r, {{ edxapp_code_dir }}/common/lib/sandbox-packages/** r,
/tmp/codejail-*/ rix, /tmp/codejail-*/ rix,
/tmp/codejail-*/** rix, /tmp/codejail-*/** wrix,
# #
# Whitelist particiclar shared objects from the system # Whitelist particiclar shared objects from the system
...@@ -19,10 +19,11 @@ ...@@ -19,10 +19,11 @@
/usr/lib/python2.7/lib-dynload/_csv.so mr, /usr/lib/python2.7/lib-dynload/_csv.so mr,
/usr/lib/python2.7/lib-dynload/datetime.so mr, /usr/lib/python2.7/lib-dynload/datetime.so mr,
/usr/lib/python2.7/lib-dynload/_elementtree.so mr, /usr/lib/python2.7/lib-dynload/_elementtree.so mr,
/usr/lib/python2.7/lib-dynload/pyexpat.so mr,
# #
# Allow access to selections from /proc # Allow access to selections from /proc
# #
/proc/*/mounts r, /proc/*/mounts r,
} }
<course org="{{ item.org }}" course="{{ item.course }}" url_name="{{ item.run }}" />
{% do lms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{{ lms_preview_auth_config | to_nice_json }}
[program:lms-preview]
{% if ansible_processor|length > 0 %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_preview_gunicorn_host }}:{{ edxapp_lms_preview_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms_preview }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else %}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_preview_gunicorn_host }}:{{ edxapp_lms_preview_gunicorn_port }} -w {{ worker_core_mult.lms_preview }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% endif %}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment=PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms-preview"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment