Commit 0ba07131 by Feanil Patel

Merge pull request #923 from edx/feanil/release_himbasha

Feanil/release himbasha
parents 7bc2c211 4e1593eb
# Travis CI configuration file for running tests
language: python
python:
- "2.7"
install:
- "sudo apt-get install -y npm python-demjson"
- "pip install --allow-all-external -r requirements.txt"
- "pip install --allow-all-external demjson"
- "sudo npm install -g js-yaml"
script:
- |
for yml in $(find . -name "*.yml"); do
js-yaml $yml >/dev/null
if [[ $? -ne 0 ]]; then
echo "ERROR parsing $yml"
exit 1
fi
done
- |
for json in $(find . -name "*.json"); do
jsonlint -v $json
if [[ $? -ne 0 ]]; then
echo "ERROR parsing $json"
exit 1
fi
done
- |
plays="aws bastion certs commoncluster common demo devpi discern edx_ansible edxapp elasticsearch forum ora rabbitmq worker xqueue xserver"
set -e
cd playbooks/edx-east
for play in $plays; do
ansible-playbook -i localhost, --syntax-check ${play}.yml
done
......@@ -711,6 +711,11 @@
},
{
"IpProtocol":"tcp",
"ToPort":"80",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":{ "Ref": "JenkinsServerPort" },
"ToPort":{ "Ref": "JenkinsServerPort" },
"CidrIp":"0.0.0.0/0"
......
......@@ -417,6 +417,14 @@
"MinValue":"5",
"MaxValue":"3072",
"ConstraintDescription":"must be between 5 and 3072Gb."
},
"ClassB":{
"Default":"1",
"Description":"The second octet of the Class B to be allocated for this VPC. 10.?.xxx.xxx",
"Type":"Number",
"MinValue":"0",
"MaxValue":"255",
"ConstraintDescription":"ClassB value must be between 0 and 255."
}
},
"Mappings":{
......@@ -456,31 +464,31 @@
"sa-east-1": { "AMI":"ami-0439e619" }
},
"SubnetConfig":{
"VPC": { "CIDR":"10.0.0.0/16" },
"Public01": { "CIDR":"10.0.0.0/24" },
"Public02": { "CIDR":"10.0.1.0/24" },
"Edxapp01": { "CIDR":"10.0.10.0/24" },
"Edxapp02": { "CIDR":"10.0.11.0/24" },
"XServerJail01": { "CIDR":"10.0.20.0/24" },
"XServerJail02": { "CIDR":"10.0.21.0/24" },
"Xqueue01": { "CIDR":"10.0.30.0/24" },
"Xqueue02": { "CIDR":"10.0.31.0/24" },
"CommonCluster01": { "CIDR":"10.0.46.0/24"},
"CommonCluster02": { "CIDR":"10.0.47.0/24"},
"CommonCluster03": { "CIDR":"10.0.48.0/24"},
"Data01": { "CIDR":"10.0.50.0/24" },
"Data02": { "CIDR":"10.0.51.0/24" },
"Cache01": { "CIDR":"10.0.60.0/24" },
"Cache02": { "CIDR":"10.0.61.0/24" },
"Worker01": { "CIDR":"10.0.70.0/24" },
"Worker02": { "CIDR":"10.0.71.0/24" },
"Forum01": { "CIDR":"10.0.80.0/24" },
"Forum02": { "CIDR":"10.0.81.0/24" },
"Mongo01": { "CIDR":"10.0.90.0/24" },
"Mongo02": { "CIDR":"10.0.91.0/24" },
"Mongo03": { "CIDR":"10.0.92.0/24" },
"Notifier01": { "CIDR":"10.0.100.0/24" },
"Admin": { "CIDR":"10.0.200.0/24" }
"VPC": { "CIDR":".0.0/16" },
"Public01": { "CIDR":".0.0/24" },
"Public02": { "CIDR":".1.0/24" },
"Edxapp01": { "CIDR":".10.0/24" },
"Edxapp02": { "CIDR":".11.0/24" },
"XServerJail01": { "CIDR":".20.0/24" },
"XServerJail02": { "CIDR":".21.0/24" },
"Xqueue01": { "CIDR":".30.0/24" },
"Xqueue02": { "CIDR":".31.0/24" },
"CommonCluster01": { "CIDR":".46.0/24"},
"CommonCluster02": { "CIDR":".47.0/24"},
"CommonCluster03": { "CIDR":".48.0/24"},
"Data01": { "CIDR":".50.0/24" },
"Data02": { "CIDR":".51.0/24" },
"Cache01": { "CIDR":".60.0/24" },
"Cache02": { "CIDR":".61.0/24" },
"Worker01": { "CIDR":".70.0/24" },
"Worker02": { "CIDR":".71.0/24" },
"Forum01": { "CIDR":".80.0/24" },
"Forum02": { "CIDR":".81.0/24" },
"Mongo01": { "CIDR":".90.0/24" },
"Mongo02": { "CIDR":".91.0/24" },
"Mongo03": { "CIDR":".92.0/24" },
"Notifier01": { "CIDR":".100.0/24" },
"Admin": { "CIDR":".200.0/24" }
},
"MapRegionsToAvailZones":{
"us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" },
......@@ -499,7 +507,7 @@
"Properties":{
"EnableDnsSupport" : "true",
"EnableDnsHostnames" : "true",
"CidrBlock":"10.0.0.0/16",
"CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]},
"InstanceTenancy":"default"
}
},
......@@ -510,11 +518,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Public01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Public01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -523,9 +534,9 @@
"AZone0"
]
},
"Tags":[
{
"Key":"immutable_metadata",
"Tags":[
{
"Key":"immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -535,7 +546,7 @@
"external','target':'ec2'}"
]
]
}
}
}
]
}
......@@ -547,11 +558,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Public02",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Public02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -560,8 +574,8 @@
"AZone1"
]
},
"Tags":[
{
"Tags":[
{
"Key":"immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
......@@ -572,8 +586,8 @@
"external','target':'ec2'}"
]
]
}
}
}
}
]
}
},
......@@ -584,11 +598,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Admin",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Admin",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -616,11 +633,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Edxapp01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Edxapp01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -639,7 +659,7 @@
"Value":"Private"
},
{
"Key":"immutable_metadata",
"Key":"immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -649,7 +669,7 @@
"internal-edxapp','target':'ec2'}"
]
]
}
}
}
]
}
......@@ -661,11 +681,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Edxapp02",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Edxapp02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -684,7 +707,7 @@
"Value":"Private"
},
{
"Key":"immutable_metadata",
"Key":"immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -694,7 +717,7 @@
"internal-edxapp','target':'ec2'}"
]
]
}
}
}
]
}
......@@ -706,11 +729,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Xqueue01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Xqueue01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -739,7 +765,7 @@
"internal-xqueue','target':'ec2'}"
]
]
}
}
}
]
}
......@@ -751,11 +777,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Xqueue02",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Xqueue02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -774,7 +803,7 @@
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -796,11 +825,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"CommonCluster01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"CommonCluster01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -819,7 +851,7 @@
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -841,11 +873,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"CommonCluster02",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"CommonCluster02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -874,7 +909,7 @@
"internal-commoncluster','target':'ec2'}"
]
]
}
}
}
]
}
......@@ -886,11 +921,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"CommonCluster03",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"CommonCluster03",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -919,7 +957,7 @@
"internal-commoncluster','target':'ec2'}"
]
]
}
}
}
]
}
......@@ -931,11 +969,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"XServerJail01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"XServerJail01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -954,7 +995,7 @@
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -964,7 +1005,7 @@
"internal-xserver','target':'ec2'}"
]
]
}
}
}
]
}
......@@ -976,11 +1017,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"XServerJail02",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"XServerJail02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -999,7 +1043,7 @@
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -1009,7 +1053,7 @@
"internal-xserver','target':'ec2'}"
]
]
}
}
}
]
}
......@@ -1021,11 +1065,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Data01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Data01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -1053,11 +1100,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Data02",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Data02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -1085,11 +1135,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Cache01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Cache01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -1117,11 +1170,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Cache02",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Cache02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -1149,11 +1205,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Worker01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Worker01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -1172,7 +1231,7 @@
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -1182,7 +1241,7 @@
"internal-worker','target':'ec2'}"
]
]
}
}
}
]
}
......@@ -1194,11 +1253,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Worker02",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Worker02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -1217,7 +1279,7 @@
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -1227,7 +1289,7 @@
"internal-worker','target':'ec2'}"
]
]
}
}
}
]
}
......@@ -1239,11 +1301,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Forum01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Forum01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -1262,7 +1327,7 @@
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -1272,7 +1337,7 @@
"internal-forum','target':'ec2'}"
]
]
}
}
}
]
}
......@@ -1284,11 +1349,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Forum02",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Forum02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -1307,7 +1375,7 @@
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -1329,11 +1397,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Mongo01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Mongo01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -1352,7 +1423,7 @@
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -1374,11 +1445,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Mongo02",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Mongo02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -1397,7 +1471,7 @@
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
......@@ -1419,11 +1493,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Mongo03",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Mongo03",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -1452,7 +1529,7 @@
"internal-mongo','target':'ec2'}"
]
]
}
}
}
]
}
......@@ -1464,11 +1541,14 @@
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Notifier01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Notifier01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
......@@ -1684,7 +1764,7 @@
"Protocol":"1",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"10.0.0.0/16",
"CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]},
"Icmp": {
"Code": "0",
"Type": "0"
......@@ -1701,7 +1781,7 @@
"Protocol":"1",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"10.0.0.0/16",
"CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]},
"Icmp": {
"Code": "0",
"Type": "8"
......@@ -1735,7 +1815,7 @@
"Protocol":"1",
"RuleAction":"allow",
"Egress":"true",
"CidrBlock":"10.0.0.0/16",
"CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]},
"Icmp": {
"Code": "0",
"Type": "0"
......@@ -1752,7 +1832,7 @@
"Protocol":"1",
"RuleAction":"allow",
"Egress":"true",
"CidrBlock":"10.0.0.0/16",
"CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]},
"Icmp": {
"Code": "0",
"Type": "8"
......@@ -2593,7 +2673,7 @@
"KeyName":{
"Ref":"KeyName"
},
"IamInstanceProfile" : {
"IamInstanceProfile" : {
"Ref" : "NATMonitorRoleProfile"
},
"SubnetId":{
......@@ -2791,7 +2871,7 @@
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp":"10.0.0.0/16"
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
......@@ -2838,7 +2918,9 @@
"s3:Put",
"ses:SendEmail",
"ses:SendRawEmail",
"ses:GetSendQuota"
"ses:GetSendQuota",
"ec2:DescribeInstances",
"ec2:DescribeTags"
],
"Resource":"*"
}
......@@ -2868,7 +2950,26 @@
"Action": [ "sts:AssumeRole" ]
} ]
},
"Path": "/"
"Path": "/",
"Policies": [ {
"PolicyName": "XqueueBasePolicy",
"PolicyDocument": {
"Statement":[
{
"Effect":"Allow",
"Action":[
"cloudformation:DescribeStackResource",
"ses:SendEmail",
"ses:SendRawEmail",
"ses:GetSendQuota",
"ec2:DescribeInstances",
"ec2:DescribeTags"
],
"Resource":"*"
}
]
}
} ]
}
},
"XqueueInstanceProfile": {
......@@ -2892,7 +2993,26 @@
"Action": [ "sts:AssumeRole" ]
} ]
},
"Path": "/"
"Path": "/",
"Policies": [ {
"PolicyName": "XServerBasePolicy",
"PolicyDocument": {
"Statement":[
{
"Effect":"Allow",
"Action":[
"cloudformation:DescribeStackResource",
"ses:SendEmail",
"ses:SendRawEmail",
"ses:GetSendQuota",
"ec2:DescribeInstances",
"ec2:DescribeTags"
],
"Resource":"*"
}
]
}
} ]
}
},
"XServerInstanceProfile": {
......@@ -2904,6 +3024,49 @@
} ]
}
},
"ForumRole": {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Statement": [ {
"Effect": "Allow",
"Principal": {
"Service": [ "ec2.amazonaws.com" ]
},
"Action": [ "sts:AssumeRole" ]
} ]
},
"Path": "/",
"Policies": [ {
"PolicyName": "ForumBasePolicy",
"PolicyDocument": {
"Statement":[
{
"Effect":"Allow",
"Action":[
"cloudformation:DescribeStackResource",
"ses:SendEmail",
"ses:SendRawEmail",
"ses:GetSendQuota",
"ec2:DescribeInstances",
"ec2:DescribeTags"
],
"Resource":"*"
}
]
}
} ]
}
},
"ForumInstanceProfile": {
"Type": "AWS::IAM::InstanceProfile",
"Properties": {
"Path": "/",
"Roles": [ {
"Ref": "ForumRole"
} ]
}
},
"AdminSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
......@@ -2949,42 +3112,6 @@
}
]
},
"UserData":{
"Fn::Base64":{
"Fn::Join":[
"",
[
"#!/bin/bash -x\n",
"exec >> /home/ubuntu/cflog.log\n",
"exec 2>> /home/ubuntu/cflog.log\n",
"function error_exit\n",
"{\n",
" cfn-signal -e 1 -r \"$1\" '",
{
"Ref":"EdxServerWaitHandle"
},
"'\n",
" exit 1\n",
"}\n",
"for dev in /dev/xvdc /dev/xvdd; do sudo echo w | fdisk $dev; sudo mkfs -t ext4 $dev;done;\n",
"sudo mkdir /mnt/logs\n",
"sudo mount /dev/xvdc /mnt/logs\n",
"sudo mount /dev/xvdd /opt\n",
"apt-get -y update\n",
"apt-get -y install python-setuptools\n",
"echo \"Python Tools installed\" - `date`\n",
"easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n",
"echo \"Cloudformation Boostrap installed \" - `date`\n",
"# If all went well, signal success\n",
"cfn-signal -e $? -r 'Edx Server configuration' '",
{
"Ref":"EdxServerWaitHandle"
},
"'\n"
]
]
}
},
"KeyName":{
"Ref":"KeyName"
},
......@@ -2993,21 +3120,15 @@
},
"BlockDeviceMappings":[
{
"DeviceName":"/dev/xvdc",
"Ebs":{
"VolumeSize":"50"
}
},
{
"DeviceName":"/dev/xvdd",
"DeviceName":"/dev/sda1",
"Ebs":{
"VolumeSize":"50"
"VolumeSize":"100"
}
}
]
}
},
"EdxappServerASGroup":{
"EdxappServerAsGroup":{
"Type":"AWS::AutoScaling::AutoScalingGroup",
"Properties":{
"AvailabilityZones":[
......@@ -3034,11 +3155,21 @@
],
"Tags":[
{
"Key":"Name",
"Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"edxapp"]]},
"PropagateAtLaunch":true
},
{
"Key":"play",
"Value":"edxapp",
"PropagateAtLaunch":true
},
{
"Key":"services",
"Value":"edxapp,lms,cms",
"PropagateAtLaunch":true
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
......@@ -3077,7 +3208,7 @@
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"EdxappServerASGroup"
"Ref":"EdxappServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"1"
......@@ -3088,7 +3219,7 @@
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"EdxappServerASGroup"
"Ref":"EdxappServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"-1"
......@@ -3113,7 +3244,7 @@
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"EdxappServerASGroup"
"Ref":"EdxappServerAsGroup"
}
}
],
......@@ -3139,7 +3270,7 @@
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"EdxappServerASGroup"
"Ref":"EdxappServerAsGroup"
}
}
],
......@@ -3283,19 +3414,6 @@
]
}
},
"EdxServerWaitHandle":{
"Type":"AWS::CloudFormation::WaitConditionHandle"
},
"EdxServerWaitCondition":{
"Type":"AWS::CloudFormation::WaitCondition",
"DependsOn":"EdxappServer",
"Properties":{
"Handle":{
"Ref":"EdxServerWaitHandle"
},
"Timeout":"1200"
}
},
"XqueueServer":{
"Type":"AWS::AutoScaling::LaunchConfiguration",
"Properties":{
......@@ -3322,38 +3440,6 @@
}
]
},
"UserData":{
"Fn::Base64":{
"Fn::Join":[
"",
[
"#!/bin/bash -x\n",
"exec >> /home/ubuntu/cflog.log\n",
"exec 2>> /home/ubuntu/cflog.log\n",
"function error_exit\n",
"{\n",
" cfn-signal -e 1 -r \"$1\" '",
{
"Ref":"XqueueServerWaitHandle"
},
"'\n",
" exit 1\n",
"}\n",
"apt-get -y update\n",
"apt-get -y install python-setuptools\n",
"echo \"Python Tools installed\" - `date`\n",
"easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n",
"echo \"Cloudformation Boostrap installed \" - `date`\n",
"# If all went well, signal success\n",
"cfn-signal -e $? -r 'Edx Server configuration' '",
{
"Ref":"XqueueServerWaitHandle"
},
"'\n"
]
]
}
},
"KeyName":{
"Ref":"KeyName"
},
......@@ -3362,21 +3448,15 @@
},
"BlockDeviceMappings":[
{
"DeviceName":"/dev/xvdc",
"Ebs":{
"VolumeSize":"50"
}
},
{
"DeviceName":"/dev/xvdd",
"DeviceName":"/dev/sda1",
"Ebs":{
"VolumeSize":"50"
"VolumeSize":"100"
}
}
]
}
},
"XqueueServerASGroup":{
"XqueueServerAsGroup":{
"Type":"AWS::AutoScaling::AutoScalingGroup",
"Properties":{
"AvailabilityZones":[
......@@ -3403,11 +3483,21 @@
],
"Tags":[
{
"Key":"Name",
"Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"xqueue"]]},
"PropagateAtLaunch":true
},
{
"Key":"play",
"Value":"xqueue",
"PropagateAtLaunch":true
},
{
"Key":"services",
"Value":"xqueue",
"PropagateAtLaunch":true
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
......@@ -3446,7 +3536,7 @@
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"XqueueServerASGroup"
"Ref":"XqueueServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"1"
......@@ -3457,7 +3547,7 @@
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"XqueueServerASGroup"
"Ref":"XqueueServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"-1"
......@@ -3482,7 +3572,7 @@
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"XqueueServerASGroup"
"Ref":"XqueueServerAsGroup"
}
}
],
......@@ -3508,7 +3598,7 @@
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"XqueueServerASGroup"
"Ref":"XqueueServerAsGroup"
}
}
],
......@@ -3636,19 +3726,6 @@
]
}
},
"XqueueServerWaitHandle":{
"Type":"AWS::CloudFormation::WaitConditionHandle"
},
"XqueueServerWaitCondition":{
"Type":"AWS::CloudFormation::WaitCondition",
"DependsOn":"XqueueServer",
"Properties":{
"Handle":{
"Ref":"XqueueServerWaitHandle"
},
"Timeout":"1200"
}
},
"CommonClusterServer":{
"Type":"AWS::AutoScaling::LaunchConfiguration",
"Properties":{
......@@ -3677,38 +3754,6 @@
}
]
},
"UserData":{
"Fn::Base64":{
"Fn::Join":[
"",
[
"#!/bin/bash -x\n",
"exec >> /home/ubuntu/cflog.log\n",
"exec 2>> /home/ubuntu/cflog.log\n",
"function error_exit\n",
"{\n",
" cfn-signal -e 1 -r \"$1\" '",
{
"Ref":"CommonClusterServerWaitHandle"
},
"'\n",
" exit 1\n",
"}\n",
"apt-get -y update\n",
"apt-get -y install python-setuptools\n",
"echo \"Python Tools installed\" - `date` >> /home/ubuntu/cflog.txt\n",
"easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n",
"echo \"Cloudformation Boostrap installed \" - `date` >> /home/ubuntu/cflog.txt\n",
"# If all went well, signal success\n",
"cfn-signal -e $? -r 'Edx Server configuration' '",
{
"Ref":"CommonClusterServerWaitHandle"
},
"'\n"
]
]
}
},
"KeyName":{
"Ref":"KeyName"
},
......@@ -3717,21 +3762,15 @@
},
"BlockDeviceMappings":[
{
"DeviceName":"/dev/xvdc",
"Ebs":{
"VolumeSize":"50"
}
},
{
"DeviceName":"/dev/xvdd",
"DeviceName":"/dev/sda1",
"Ebs":{
"VolumeSize":"50"
"VolumeSize":"100"
}
}
]
}
},
"CommonClusterServerASGroup":{
"CommonClusterServerAsGroup":{
"Type":"AWS::AutoScaling::AutoScalingGroup",
"Properties":{
"AvailabilityZones":[
......@@ -3767,6 +3806,11 @@
],
"Tags":[
{
"Key":"Name",
"Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"commoncluster"]]},
"PropagateAtLaunch":true
},
{
"Key":"play",
"Value":"commoncluster",
"PropagateAtLaunch":true
......@@ -3823,7 +3867,7 @@
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"CommonClusterServerASGroup"
"Ref":"CommonClusterServerAsGroup"
}
}
],
......@@ -3845,7 +3889,7 @@
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"CommonClusterServerASGroup"
"Ref":"CommonClusterServerAsGroup"
}
}
],
......@@ -3905,27 +3949,51 @@
"IpProtocol":"tcp",
"FromPort":"9200",
"ToPort":"9200",
"CidrIp":"10.0.0.0/16"
"SourceSecurityGroupId": { "Ref": "ForumServerSecurityGroup" }
},
{
"IpProtocol":"tcp",
"FromPort":"9300",
"ToPort":"9300",
"CidrIp":"10.0.0.0/16"
"SourceSecurityGroupId": { "Ref": "ForumServerSecurityGroup" }
}
],
"SecurityGroupEgress":[
{
"IpProtocol":"tcp",
"FromPort":"9200",
"ToPort":"9200",
"CidrIp":"10.0.0.0/16"
"FromPort": 9200,
"ToPort": 9200,
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort":"9300",
"ToPort":"9300",
"CidrIp":"10.0.0.0/16"
"FromPort": 9300,
"ToPort": 9300,
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort": 9200,
"ToPort": 9200,
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort": 9300,
"ToPort": 9300,
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort": 9200,
"ToPort": 9200,
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort": 9300,
"ToPort": 9300,
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]}
}
]
}
......@@ -3950,13 +4018,61 @@
"IpProtocol":"tcp",
"FromPort": 9200,
"ToPort": 9200,
"CidrIp":"10.0.0.0/16"
"SourceSecurityGroupId": { "Ref": "ElasticSearchELBSecurityGroup" }
},
{
"IpProtocol":"tcp",
"FromPort": 9300,
"ToPort": 9300,
"CidrIp":"10.0.0.0/16"
"SourceSecurityGroupId": { "Ref": "ElasticSearchELBSecurityGroup" }
},
{
"IpProtocol":"tcp",
"FromPort": 9200,
"ToPort": 9200,
"SourceSecurityGroupId": { "Ref": "ForumServerSecurityGroup" }
},
{
"IpProtocol":"tcp",
"FromPort": 9300,
"ToPort": 9300,
"SourceSecurityGroupId": { "Ref": "ForumServerSecurityGroup" }
},
{
"IpProtocol":"tcp",
"FromPort": 9200,
"ToPort": 9200,
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort": 9300,
"ToPort": 9300,
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort": 9200,
"ToPort": 9200,
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort": 9300,
"ToPort": 9300,
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort": 9200,
"ToPort": 9200,
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort": 9300,
"ToPort": 9300,
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]}
}
]
}
......@@ -4014,13 +4130,25 @@
"IpProtocol":"tcp",
"FromPort":"5672",
"ToPort":"5672",
"CidrIp":"10.0.0.0/16"
"SourceSecurityGroupId": { "Ref": "EdxappServerSecurityGroup" }
},
{
"IpProtocol":"tcp",
"FromPort":"6163",
"ToPort":"6163",
"SourceSecurityGroupId": { "Ref": "EdxappServerSecurityGroup" }
},
{
"IpProtocol":"tcp",
"FromPort":"5672",
"ToPort":"5672",
"SourceSecurityGroupId": { "Ref": "XqueueServerSecurityGroup" }
},
{
"IpProtocol":"tcp",
"FromPort":"6163",
"ToPort":"6163",
"CidrIp":"10.0.0.0/16"
"SourceSecurityGroupId": { "Ref": "XqueueServerSecurityGroup" }
}
],
"SecurityGroupEgress":[
......@@ -4028,13 +4156,37 @@
"IpProtocol":"tcp",
"FromPort":"5672",
"ToPort":"5672",
"CidrIp":"10.0.0.0/16"
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort":"6163",
"ToPort":"6163",
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort":"5672",
"ToPort":"5672",
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort":"6163",
"ToPort":"6163",
"CidrIp":"10.0.0.0/16"
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort":"5672",
"ToPort":"5672",
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort":"6163",
"ToPort":"6163",
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]}
}
]
}
......@@ -4051,7 +4203,7 @@
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp":"10.0.0.0/16"
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
......@@ -4066,7 +4218,7 @@
"FromPort":"6163",
"ToPort":"6163",
"SourceSecurityGroupId" : {
"Ref" : "RabbitMQELBSecurityGroup"
"Ref" : "RabbitMQELBSecurityGroup"
}
},
{
......@@ -4087,9 +4239,37 @@
},
{
"IpProtocol":"tcp",
"FromPort":"5672",
"ToPort":"5672",
"SourceSecurityGroupId" : {
"Ref" : "EdxappServerSecurityGroup"
}
},
{
"IpProtocol":"tcp",
"FromPort":"6163",
"ToPort":"6163",
"SourceSecurityGroupId" : {
"Ref" : "EdxappServerSecurityGroup"
}
},
{
"IpProtocol":"tcp",
"FromPort":"0",
"ToPort":"65535",
"CidrIp":"10.0.0.0/16"
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster01", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort":"0",
"ToPort":"65535",
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster02", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort":"0",
"ToPort":"65535",
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "CommonCluster03", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
......@@ -4120,26 +4300,13 @@
]
}
},
"CommonClusterServerWaitHandle":{
"Type":"AWS::CloudFormation::WaitConditionHandle"
},
"CommonClusterServerWaitCondition":{
"Type":"AWS::CloudFormation::WaitCondition",
"DependsOn":"CommonClusterServer",
"Properties":{
"Handle":{
"Ref":"CommonClusterServerWaitHandle"
},
"Timeout":"1200"
}
},
"XServer":{
"XserverServer":{
"Type":"AWS::AutoScaling::LaunchConfiguration",
"Properties":{
"IamInstanceProfile":{ "Ref":"XServerInstanceProfile" },
"SecurityGroups":[
{
"Ref":"XServerSecurityGroup"
"Ref":"XserverServerSecurityGroup"
}
],
"ImageId":{
......@@ -4159,38 +4326,6 @@
}
]
},
"UserData":{
"Fn::Base64":{
"Fn::Join":[
"",
[
"#!/bin/bash -x\n",
"exec >> /home/ubuntu/cflog.log\n",
"exec 2>> /home/ubuntu/cflog.log\n",
"function error_exit\n",
"{\n",
" cfn-signal -e 1 -r \"$1\" '",
{
"Ref":"XServerWaitHandle"
},
"'\n",
" exit 1\n",
"}\n",
"apt-get -y update\n",
"apt-get -y install python-setuptools\n",
"echo \"Python Tools installed\" - `date` >> /home/ubuntu/cflog.txt\n",
"easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n",
"echo \"Cloudformation Boostrap installed \" - `date` >> /home/ubuntu/cflog.txt\n",
"# If all went well, signal success\n",
"cfn-signal -e $? -r 'Edx Server configuration' '",
{
"Ref":"XServerWaitHandle"
},
"'\n"
]
]
}
},
"KeyName":{
"Ref":"KeyName"
},
......@@ -4199,21 +4334,15 @@
},
"BlockDeviceMappings":[
{
"DeviceName":"/dev/xvdc",
"DeviceName":"/dev/sda1",
"Ebs":{
"VolumeSize":"50"
}
},
{
"DeviceName":"/dev/xvdd",
"Ebs":{
"VolumeSize":"50"
"VolumeSize":"100"
}
}
]
}
},
"XServerASGroup":{
"XserverServerAsGroup":{
"Type":"AWS::AutoScaling::AutoScalingGroup",
"Properties":{
"AvailabilityZones":[
......@@ -4240,11 +4369,21 @@
],
"Tags":[
{
"Key":"Name",
"Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"xserver"]]},
"PropagateAtLaunch":true
},
{
"Key":"play",
"Value":"xserver",
"PropagateAtLaunch":true
},
{
"Key":"services",
"Value":"xserver",
"PropagateAtLaunch":true
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
......@@ -4260,7 +4399,7 @@
}
],
"LaunchConfigurationName":{
"Ref":"XServer"
"Ref":"XserverServer"
},
"MinSize":{
"Ref":"XServerDesiredCapacity"
......@@ -4278,23 +4417,23 @@
]
}
},
"XServerScaleUpPolicy":{
"XserverServerScaleUpPolicy":{
"Type":"AWS::AutoScaling::ScalingPolicy",
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"XServerASGroup"
"Ref":"XserverServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"1"
}
},
"XServerScaleDownPolicy":{
"XserverServerScaleDownPolicy":{
"Type":"AWS::AutoScaling::ScalingPolicy",
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"XServerASGroup"
"Ref":"XserverServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"-1"
......@@ -4312,14 +4451,14 @@
"Threshold":"90",
"AlarmActions":[
{
"Ref":"XServerScaleUpPolicy"
"Ref":"XserverServerScaleUpPolicy"
}
],
"Dimensions":[
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"XServerASGroup"
"Ref":"XserverServerAsGroup"
}
}
],
......@@ -4338,14 +4477,14 @@
"Threshold":"70",
"AlarmActions":[
{
"Ref":"XServerScaleDownPolicy"
"Ref":"XserverServerScaleDownPolicy"
}
],
"Dimensions":[
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"XServerASGroup"
"Ref":"XserverServerAsGroup"
}
}
],
......@@ -4403,20 +4542,26 @@
"IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80",
"CidrIp":"10.0.0.0/16"
"SourceSecurityGroupId": { "Ref": "XqueueServerSecurityGroup" }
}
],
"SecurityGroupEgress":[
{
"IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80",
"CidrIp":"10.0.0.0/16"
"FromPort": {"Ref": "XserverServerPort"},
"ToPort": {"Ref": "XserverServerPort"},
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "XServerJail01", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort": {"Ref": "XserverServerPort"},
"ToPort": {"Ref": "XserverServerPort"},
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "XServerJail02", "CIDR"]}]]}
}
]
}
},
"XServerSecurityGroup":{
"XserverServerSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
"GroupDescription":"Open up SSH access plus XServer required ports",
......@@ -4428,21 +4573,13 @@
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp":"10.0.0.0/16"
"CidrIp": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]}
},
{
"IpProtocol":"tcp",
"FromPort": { "Ref": "XserverServerPort" },
"ToPort": { "Ref": "XserverServerPort" },
"CidrIp":"10.0.0.0/16"
},
{
"IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80",
"SourceSecurityGroupId" : {
"Ref" : "XServerELBSecurityGroup"
}
"SourceSecurityGroupId": { "Ref": "XServerELBSecurityGroup" }
}
],
"Tags":[
......@@ -4465,19 +4602,6 @@
]
}
},
"XServerWaitHandle":{
"Type":"AWS::CloudFormation::WaitConditionHandle"
},
"XServerWaitCondition":{
"Type":"AWS::CloudFormation::WaitCondition",
"DependsOn":"XServer",
"Properties":{
"Handle":{
"Ref":"XServerWaitHandle"
},
"Timeout":"1200"
}
},
"EdxDataSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
......@@ -4614,6 +4738,7 @@
"WorkerServer":{
"Type":"AWS::AutoScaling::LaunchConfiguration",
"Properties":{
"IamInstanceProfile":{ "Ref":"EdxappInstanceProfile" },
"SecurityGroups":[
{
"Ref":"WorkerServerSecurityGroup"
......@@ -4636,42 +4761,6 @@
}
]
},
"UserData":{
"Fn::Base64":{
"Fn::Join":[
"",
[
"#!/bin/bash -x\n",
"exec >> /home/ubuntu/cflog.log\n",
"exec 2>> /home/ubuntu/cflog.log\n",
"function error_exit\n",
"{\n",
" cfn-signal -e 1 -r \"$1\" '",
{
"Ref":"WorkerServerWaitHandle"
},
"'\n",
" exit 1\n",
"}\n",
"for dev in /dev/xvdc /dev/xvdd; do sudo echo w | fdisk $dev; sudo mkfs -t ext4 $dev;done;\n",
"sudo mkdir /mnt/logs\n",
"sudo mount /dev/xvdc /mnt/logs\n",
"sudo mount /dev/xvdd /opt\n",
"apt-get -y update\n",
"apt-get -y install python-setuptools\n",
"echo \"Python Tools installed\" - `date` >> /home/ubuntu/cflog.txt\n",
"easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n",
"echo \"Cloudformation Boostrap installed \" - `date` >> /home/ubuntu/cflog.txt\n",
"# If all went well, signal success\n",
"cfn-signal -e $? -r 'Edx Server configuration' '",
{
"Ref":"WorkerServerWaitHandle"
},
"'\n"
]
]
}
},
"KeyName":{
"Ref":"KeyName"
},
......@@ -4680,21 +4769,15 @@
},
"BlockDeviceMappings":[
{
"DeviceName":"/dev/xvdc",
"Ebs":{
"VolumeSize":"50"
}
},
{
"DeviceName":"/dev/xvdd",
"DeviceName":"/dev/sda1",
"Ebs":{
"VolumeSize":"50"
"VolumeSize":"100"
}
}
]
}
},
"WorkerServerASGroup":{
"WorkerServerAsGroup":{
"Type":"AWS::AutoScaling::AutoScalingGroup",
"Properties":{
"AvailabilityZones":[
......@@ -4721,11 +4804,21 @@
],
"Tags":[
{
"Key":"Name",
"Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"worker"]]},
"PropagateAtLaunch":true
},
{
"Key":"play",
"Value":"worker",
"PropagateAtLaunch":true
},
{
"Key":"services",
"Value":"workers",
"PropagateAtLaunch":true
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
......@@ -4759,7 +4852,7 @@
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"WorkerServerASGroup"
"Ref":"WorkerServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"1"
......@@ -4770,7 +4863,7 @@
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"WorkerServerASGroup"
"Ref":"WorkerServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"-1"
......@@ -4795,7 +4888,7 @@
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"WorkerServerASGroup"
"Ref":"WorkerServerAsGroup"
}
}
],
......@@ -4821,7 +4914,7 @@
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"WorkerServerASGroup"
"Ref":"WorkerServerAsGroup"
}
}
],
......@@ -4865,19 +4958,6 @@
]
}
},
"WorkerServerWaitHandle":{
"Type":"AWS::CloudFormation::WaitConditionHandle"
},
"WorkerServerWaitCondition":{
"Type":"AWS::CloudFormation::WaitCondition",
"DependsOn":"WorkerServer",
"Properties":{
"Handle":{
"Ref":"WorkerServerWaitHandle"
},
"Timeout":"1200"
}
},
"ForumServer":{
"Type":"AWS::AutoScaling::LaunchConfiguration",
"Properties":{
......@@ -4903,42 +4983,6 @@
}
]
},
"UserData":{
"Fn::Base64":{
"Fn::Join":[
"",
[
"#!/bin/bash -x\n",
"exec >> /home/ubuntu/cflog.log\n",
"exec 2>> /home/ubuntu/cflog.log\n",
"function error_exit\n",
"{\n",
" cfn-signal -e 1 -r \"$1\" '",
{
"Ref":"ForumServerWaitHandle"
},
"'\n",
" exit 1\n",
"}\n",
"for dev in /dev/xvdc /dev/xvdd; do sudo echo w | fdisk $dev; sudo mkfs -t ext4 $dev;done;\n",
"sudo mkdir /mnt/logs\n",
"sudo mount /dev/xvdc /mnt/logs\n",
"sudo mount /dev/xvdd /opt\n",
"apt-get -y update\n",
"apt-get -y install python-setuptools\n",
"echo \"Python Tools installed\" - `date` >> /home/ubuntu/cflog.txt\n",
"easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n",
"echo \"Cloudformation Boostrap installed \" - `date` >> /home/ubuntu/cflog.txt\n",
"# If all went well, signal success\n",
"cfn-signal -e $? -r 'Edx Server configuration' '",
{
"Ref":"ForumServerWaitHandle"
},
"'\n"
]
]
}
},
"KeyName":{
"Ref":"KeyName"
},
......@@ -4947,21 +4991,15 @@
},
"BlockDeviceMappings":[
{
"DeviceName":"/dev/xvdc",
"Ebs":{
"VolumeSize":"50"
}
},
{
"DeviceName":"/dev/xvdd",
"DeviceName":"/dev/sda1",
"Ebs":{
"VolumeSize":"50"
"VolumeSize":"100"
}
}
]
}
},
"ForumServerASGroup":{
"ForumServerAsGroup":{
"Type":"AWS::AutoScaling::AutoScalingGroup",
"Properties":{
"AvailabilityZones":[
......@@ -4988,11 +5026,21 @@
],
"Tags":[
{
"Key":"Name",
"Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"forum"]]},
"PropagateAtLaunch":true
},
{
"Key":"play",
"Value":"forum",
"PropagateAtLaunch":true
},
{
"Key":"services",
"Value":"forum",
"PropagateAtLaunch":true
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
......@@ -5031,7 +5079,7 @@
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"ForumServerASGroup"
"Ref":"ForumServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"1"
......@@ -5042,7 +5090,7 @@
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"ForumServerASGroup"
"Ref":"ForumServerAsGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"-1"
......@@ -5067,7 +5115,7 @@
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"ForumServerASGroup"
"Ref":"ForumServerAsGroup"
}
}
],
......@@ -5093,7 +5141,7 @@
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"ForumServerASGroup"
"Ref":"ForumServerAsGroup"
}
}
],
......@@ -5213,19 +5261,6 @@
]
}
},
"ForumServerWaitHandle":{
"Type":"AWS::CloudFormation::WaitConditionHandle"
},
"ForumServerWaitCondition":{
"Type":"AWS::CloudFormation::WaitCondition",
"DependsOn":"ForumServer",
"Properties":{
"Handle":{
"Ref":"ForumServerWaitHandle"
},
"Timeout":"1200"
}
},
"MongoServer":{
"Type":"AWS::AutoScaling::LaunchConfiguration",
"Properties":{
......@@ -5360,7 +5395,7 @@
]
}
},
"MongoServerASGroup":{
"MongoServerAsGroup":{
"Type":"AWS::AutoScaling::AutoScalingGroup",
"Properties":{
"AvailabilityZones":[
......@@ -5396,6 +5431,11 @@
],
"Tags":[
{
"Key":"Name",
"Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"mongo"]]},
"PropagateAtLaunch":true
},
{
"Key":"play",
"Value":"mongo",
"PropagateAtLaunch":true
......@@ -5486,11 +5526,14 @@
"FromPort":"27017",
"ToPort":"27017",
"CidrIp":{
"Fn::FindInMap":[
"SubnetConfig",
"Mongo01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Mongo01",
"CIDR"
]}
]]
}
},
{
......@@ -5498,11 +5541,14 @@
"FromPort":"28017",
"ToPort":"28017",
"CidrIp":{
"Fn::FindInMap":[
"SubnetConfig",
"Mongo01",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Mongo01",
"CIDR"
]}
]]
}
},
{
......@@ -5510,11 +5556,14 @@
"FromPort":"27017",
"ToPort":"27017",
"CidrIp":{
"Fn::FindInMap":[
"SubnetConfig",
"Mongo02",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Mongo02",
"CIDR"
]}
]]
}
},
{
......@@ -5522,11 +5571,14 @@
"FromPort":"28017",
"ToPort":"28017",
"CidrIp":{
"Fn::FindInMap":[
"SubnetConfig",
"Mongo02",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Mongo02",
"CIDR"
]}
]]
}
},
{
......@@ -5534,11 +5586,14 @@
"FromPort":"27017",
"ToPort":"27017",
"CidrIp":{
"Fn::FindInMap":[
"SubnetConfig",
"Mongo03",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Mongo03",
"CIDR"
]}
]]
}
},
{
......@@ -5546,11 +5601,14 @@
"FromPort":"28017",
"ToPort":"28017",
"CidrIp":{
"Fn::FindInMap":[
"SubnetConfig",
"Mongo03",
"CIDR"
]
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Mongo03",
"CIDR"
]}
]]
}
}
],
......@@ -5695,49 +5753,34 @@
],
"Tags":[
{
"Key":"Name",
"Value": {"Fn::Join": ["-",[{"Ref": "EnvironmentTag"},{"Ref": "DeploymentTag"},"notifier"]]}
},
{
"Key":"play",
"Value":"notifier"
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
}
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
},
"PropagateAtLaunch":true
}
}
],
"UserData":{
"Fn::Base64":{
"Fn::Join":[
"",
[
"#!/bin/bash -x\n",
"exec >> /home/ubuntu/cflog.log\n",
"exec 2>> /home/ubuntu/cflog.log\n",
"function error_exit\n",
"{\n",
" cfn-signal -e 1 -r \"$1\" '",
{
"Ref":"NotifierServerWaitHandle"
},
"'\n",
" exit 1\n",
"}\n",
"apt-get -y update\n",
"apt-get -y install python-setuptools\n",
"echo \"Python Tools installed\" - `date`\n",
"easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n",
"echo \"Cloudformation Boostrap installed \" - `date`\n",
"# If all went well, signal success\n",
"cfn-signal -e $? -r 'Edx Server configuration' '",
{
"Ref":"NotifierServerWaitHandle"
},
"'\n"
]
]
"BlockDeviceMappings":[
{
"DeviceName":"/dev/sda1",
"Ebs":{
"VolumeSize":"100"
}
}
}
]
}
},
"NotifierSecurityGroup":{
......@@ -5758,19 +5801,6 @@
}
]
}
},
"NotifierServerWaitHandle":{
"Type":"AWS::CloudFormation::WaitConditionHandle"
},
"NotifierServerWaitCondition":{
"Type":"AWS::CloudFormation::WaitCondition",
"DependsOn":"NotifierHost",
"Properties":{
"Handle":{
"Ref":"NotifierServerWaitHandle"
},
"Timeout":"1200"
}
}
},
"Outputs":{
......
......@@ -156,15 +156,19 @@ class Ec2Inventory(object):
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print data_to_print
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
if self.args.tags_only:
to_check = self.cache_path_tags
else:
to_check = self.cache_path_cache
if os.path.isfile(to_check):
mod_time = os.path.getmtime(to_check)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
......@@ -215,15 +219,18 @@ class Ec2Inventory(object):
# Cache related
cache_path = config.get('ec2', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--tags-only', action='store_true', default=False,
help='only return tags (default: False)')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
......@@ -247,9 +254,12 @@ class Ec2Inventory(object):
self.get_instances_by_region(region)
self.get_rds_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
if self.args.tags_only:
self.write_to_cache(self.inventory, self.cache_path_tags)
else:
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
......@@ -266,13 +276,13 @@ class Ec2Inventory(object):
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances()
for reservation in reservations:
instances = sorted(reservation.instances)
for instance in instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if not self.eucalyptus:
print "Looks like AWS is down again:"
......@@ -349,7 +359,7 @@ class Ec2Inventory(object):
# Inventory: Group by key pair
if instance.key_name:
self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest)
# Inventory: Group by security group
try:
for group in instance.groups:
......@@ -403,10 +413,10 @@ class Ec2Inventory(object):
# Inventory: Group by availability zone
self.push(self.inventory, instance.availability_zone, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)
# Inventory: Group by security group
try:
if instance.security_group:
......@@ -541,8 +551,10 @@ class Ec2Inventory(object):
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
if self.args.tags_only:
cache = open(self.cache_path_tags, 'r')
else:
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
......@@ -556,7 +568,9 @@ class Ec2Inventory(object):
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
'''
Writes data in JSON format to a file
'''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
......@@ -574,7 +588,8 @@ class Ec2Inventory(object):
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if self.args.tags_only:
data = [key for key in data.keys() if 'tag_' in key]
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
......
- name: Deploy aws
hosts: all
sudo: True
gather_facts: True
roles:
- aws
- name: Deploy bastion
hosts: all
sudo: True
gather_facts: True
roles:
- bastion
......@@ -5,9 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- certs
- role: datadog
when: enable_datadog
......
......@@ -5,8 +5,6 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- common
- role: datadog
......
# ansible-playbook -i ec2.py commoncluster.yml --extra-vars "deployment=edge env=stage" -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts
- hosts: tag_play_commoncluster:&tag_environment_{{ env }}:&tag_deployment_{{ deployment }}
sudo: True
serial: 1
roles:
- oraclejdk
- elasticsearch
- rabbitmq
- datadog
- splunkforwarder
#
# In order to reconfigure the host resolution we are issuing a
# reboot.
- hosts: tag_play_commoncluster:&tag_environment_{{ env }}:&tag_deployment_{{ deployment }}
sudo: True
serial: 1
vars:
reboot: False
tasks:
- name: reboot
command: /sbin/shutdown -r now "Reboot is triggered by Ansible"
# This is a test play that creates all supported user
# types using the user role. Example only, not meant
# to be run on a real system
- name: Create all user types (test play)
hosts: all
sudo: True
gather_facts: False
vars_files:
- 'roles/edxapp/defaults/main.yml'
- 'roles/common/defaults/main.yml'
- 'roles/analytics-server/defaults/main.yml'
- 'roles/analytics/defaults/main.yml'
pre_tasks:
- fail: msg="You must pass a user into this play"
when: user is not defined
- name: give access with no sudo
set_fact:
CUSTOM_USER_INFO:
- name: "{{ user }}"
github: true
- name: test-admin-user
type: admin
- name: test-normal-user
- name: test-restricted-user-edxapp
type: restricted
sudoers_template: 99-edxapp-manage-cmds.j2
- name: test-restricted-user-anayltics
type: restricted
sudoers_template: 99-analytics-manage-cmds.j2
roles:
- role: user
user_info: "{{ CUSTOM_USER_INFO }}"
......@@ -33,27 +33,19 @@
vars_prompt:
# passwords use vars_prompt so they aren't in the
# bash history
- name: "edxapp_db_root_pass"
prompt: "Password for edxapp root mysql user (enter to skip)"
default: "None"
private: True
- name: "xqueue_db_root_pass"
prompt: "Password for xqueue root mysql user (enter to skip)"
default: "None"
private: True
- name: "ora_db_root_pass"
prompt: "Password for ora root mysql user (enter to skip)"
default: "None"
private: True
- name: "discern_db_root_pass"
prompt: "Password for discern root mysql user (enter to skip)"
default: "None"
- name: "db_root_pass"
prompt: "Password for root mysql user"
private: True
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- name: install python mysqldb module
apt: pkg={{item}} install_recommends=no state=present update_cache=yes
sudo: yes
with_items:
- python-mysqldb
- name: create mysql databases for the edX stack
mysql_db: >
db={{ item[0] }}{{ item[1].db_name }}
......@@ -72,17 +64,58 @@
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
db_pass: "{{ db_root_pass }}"
- db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user }}"
db_pass: "{{ xqueue_db_root_pass }}"
db_pass: "{{ db_root_pass }}"
- db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user }}"
db_pass: "{{ ora_db_root_pass }}"
db_pass: "{{ db_root_pass }}"
- name: assign mysql user permissions for read_only user
mysql_user:
name: "{{ COMMON_MYSQL_READ_ONLY_USER }}"
priv: "*.*:SELECT"
password: "{{ COMMON_MYSQL_READ_ONLY_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- name: assign mysql user permissions for admin user
mysql_user:
name: "{{ COMMON_MYSQL_ADMIN_USER }}"
priv: "*.*:CREATE USER"
password: "{{ COMMON_MYSQL_ADMIN_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- name: assign mysql user permissions for db user
- name: assign mysql user permissions for db users
mysql_user:
name: "{{ item.db_user_to_modify }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE"
......@@ -99,19 +132,19 @@
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_pass: "{{ db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
- db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user|default('None') }}"
db_pass: "{{ xqueue_db_root_pass|default('None') }}"
db_pass: "{{ db_root_pass|default('None') }}"
db_user_to_modify: "{{ XQUEUE_MYSQL_USER }}"
db_user_to_modify_pass: "{{ XQUEUE_MYSQL_PASSWORD }}"
- db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user|default('None') }}"
db_pass: "{{ ora_db_root_pass|default('None') }}"
db_pass: "{{ db_root_pass|default('None') }}"
db_user_to_modify: "{{ ORA_MYSQL_USER }}"
db_user_to_modify_pass: "{{ ORA_MYSQL_PASSWORD }}"
......@@ -139,18 +172,18 @@
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_pass: "{{ db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
- db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user|default('None') }}"
db_pass: "{{ xqueue_db_root_pass|default('None') }}"
db_pass: "{{ db_root_pass|default('None') }}"
db_user_to_modify: "{{ XQUEUE_MYSQL_USER }}"
db_user_to_modify_pass: "{{ XQUEUE_MYSQL_PASSWORD }}"
- db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user|default('None') }}"
db_pass: "{{ ora_db_root_pass|default('None') }}"
db_pass: "{{ db_root_pass|default('None') }}"
db_user_to_modify: "{{ ORA_MYSQL_USER }}"
db_user_to_modify_pass: "{{ ORA_MYSQL_PASSWORD }}"
# Creates a single user on a server
# By default no super-user privileges
# Example: ansible-playbook -i "jarv.m.sandbox.edx.org," ./create_user.yml -e "user=jarv"
# Create a user with sudo privileges
# Example: ansible-playbook -i "jarv.m.sandbox.edx.org," ./create_user.yml -e "user=jarv" -e "give_sudo=true"
- name: Create a single user
hosts: all
sudo: True
gather_facts: False
pre_tasks:
- fail: msg="You must pass a user into this play"
when: not user
- set_fact:
gh_users:
- "{{ user }}"
when: user is not defined
- name: give access with no sudo
set_fact:
CUSTOM_USER_INFO:
- name: "{{ user }}"
github: true
when: give_sudo is not defined
- name: give access with sudo
set_fact:
CUSTOM_USER_INFO:
- name: "{{ user }}"
type: admin
github: true
when: give_sudo is defined
roles:
- gh_users
- role: user
user_info: "{{ CUSTOM_USER_INFO }}"
......@@ -5,10 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- demo
- demo
- role: datadog
when: enable_datadog
- role: splunkforwarder
......
......@@ -2,7 +2,5 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- devpi
......@@ -5,9 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- role: nginx
nginx_sites:
- discern
......
---
# dummy var file
# This file is needed as a fall through
# for vars_files
dummy_var: True
......@@ -7,6 +7,7 @@
migrate_db: "yes"
openid_workaround: True
roles:
- aws
- role: nginx
nginx_sites:
- cms
......
---
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_bastion
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/dev2.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_edxapp
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/dev2.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- datadog
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
EDXAPP_LMS_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_version: 'sarina/install-datadog'
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_worker
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/dev2.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- datadog
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
edx_platform_version: 'sarina/install-datadog'
#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_xserver
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/dev/dev2.yml"
# - "{{ secure_dir }}/vars/users.yml"
# roles:
# - nginx
# - xserver
#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_rabbitmq
# serial: 1
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/dev/dev2.yml"
# - "{{ secure_dir }}/vars/users.yml"
# roles:
# - rabbitmq
#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_xqueue
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/dev/dev2.yml"
# - "{{ secure_dir }}/vars/users.yml"
# roles:
# - nginx
# - xqueue
---
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_edxapp
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- datadog
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_version: 'release'
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_worker
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- datadog
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
nginx_default_sites:
- lms
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
edx_platform_version: 'release'
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xserver
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- role: nginx
nginx_sites:
- xserver
- xserver
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_rabbitmq
serial: 1
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- rabbitmq
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xqueue
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- role: nginx
nginx_sites:
- xqueue
- xqueue
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_mongo
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- role: 'mongo'
mongo_clustered: true
......@@ -67,9 +67,6 @@
- forum
nginx_default_sites:
- lms
# gh_users hash must be passed
# in as a -e variable
- gh_users
post_tasks:
- name: get instance id for elb registration
local_action:
......
......@@ -5,7 +5,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- role: 'mongo'
mongo_create_users: yes
#- hosts: tag_role_mongo:!first_in_tag_role_mongo
......@@ -14,7 +14,7 @@
# - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
# - "{{ secure_dir }}/vars/common/common.yml"
# roles:
# - gh_users
# - user
# - mongo
- hosts: first_in_tag_role_edxapp
sudo: True
......@@ -23,7 +23,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- datadog
- role: nginx
nginx_sites:
......@@ -44,7 +44,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- datadog
- role: nginx
nginx_sites:
......@@ -62,7 +62,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- datadog
- role: nginx
nginx_sites:
......@@ -81,7 +81,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- role: nginx
nginx_sites:
- xserver
......@@ -94,7 +94,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- rabbitmq
- splunkforwarder
- hosts: first_in_tag_role_xqueue
......@@ -103,7 +103,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- role: nginx
nginx_sites:
- xqueue
......@@ -116,7 +116,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- role: nginx
nginx_sites:
- xqueue
......@@ -128,7 +128,7 @@
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- gh_users
- user
- oraclejdk
- elasticsearch
- forum
......@@ -5,9 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- role: nginx
nginx_sites:
- lms
......
......@@ -5,6 +5,5 @@
- "{{ secure_dir }}/vars/stage/stage-edx.yml"
roles:
- common
- gh_users
- oraclejdk
- elasticsearch
......@@ -5,9 +5,9 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
enable_newrelic: True
roles:
- aws
- role: nginx
nginx_sites:
- forum
......@@ -16,3 +16,5 @@
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
- role: newrelic
when: enable_newrelic
# Configure an admin instance with jenkins and asgard.
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: True
roles:
- aws
- edx_ansible
- user
- jenkins_admin
- hotg
- newrelic
......@@ -10,5 +10,4 @@
COMMON_DATA_DIR: "/mnt"
roles:
- common
- gh_users
- jenkins_master
# ansible-playbook -i ec2.py --limit="tag_group_grader:&tag_environment_stage" legacy_ora.yml -e "COMMON_ENV_TYPE=stage secure_dir=/path/to/secure/dir"
# ansible-playbook -i ec2.py --limit="tag_group_grader:&tag_environment_stage" legacy_ora.yml -e "COMMON_ENVIRONMENT=stage COMMON_DEPLOYMENT=edx secure_dir=/path/to/secure/dir"
- name: Deploy legacy_ora
hosts: all
sudo: True
gather_facts: True
vars:
ora_app_dir: '/opt/wwc'
ora_user: 'www-data'
serial: 1
vars_files:
- "{{secure_dir}}/vars/{{COMMON_ENVIRONMENT}}/legacy-ora.yml"
roles:
- legacy_ora
- splunkforwarder
- newrelic
......@@ -2,8 +2,6 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: nginx
nginx_sites:
......
......@@ -2,7 +2,6 @@
hosts: all
sudo: True
gather_facts: False
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- rabbitmq
# ansible-playbook -c ssh -vvvv --user=ubuntu -i ec2.py deployer.yml -e "@gh_users.yml" -e "@/path/to/secure/ansible/vars/hotg.yml" -e "@/path/to/configuration-secure/ansible/vars/common/common.yml" --limit="tag_aws_cloudformation_stack-name_<admin_stack_name>"
# ansible-playbook -c ssh -vvvv --user=ubuntu -i ec2.py deployer.yml -e "@/path/to/secure/ansible/vars/edx_admin.yml" --limit="tag_aws_cloudformation_stack-name_<admin_stack_name>"
# You will need to create a gh_users.yml that contains the github names of users that should have login access to the machines.
# Setup user login on the bastion
- name: Configure Bastion
hosts: tag_role_bastion
hosts: tag_play_bastion
sudo: True
gather_facts: False
roles:
- gh_users
- aws
# Configure an admin instance with jenkins and asgard.
- name: Configure instance(s)
hosts: tag_role_admin
hosts: tag_play_admin
sudo: True
gather_facts: True
roles:
- common
- gh_users
- jenkins_master
- aws
- edx_ansible
- jenkins_admin
- hotg
- newrelic
......@@ -5,9 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- role: edxapp
celery_worker: True
- role: datadog
......
......@@ -5,9 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- role: nginx
nginx_sites:
- xqueue
......
......@@ -5,9 +5,8 @@
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- aws
- role: nginx
nginx_sites:
- xserver
......
Readme
------
# Stanford Ansible Configuration Files
This directory has the live playbooks that we use here at Stanford to
maintain our instance of OpenEdX at [class.stanford.edu][c]. We check
......@@ -23,25 +22,34 @@ Other install docs:
[1]: https://docs.google.com/document/d/1ZDx51Jxa-zffyeKvHmTp_tIskLW9D9NRg9NytPTbnrA/edit#heading=h.iggugvghbcpf
Ansible Commands - Prod
-----------------------
## Ansible Commands - Prod
Generally we do installs as the "ubuntu" user. You want to make
sure that the stanford-deploy-20130415 ssh key is in your ssh agent.
ANSIBLE_EC2_INI=ec2.ini ansible-playbook prod-log.yml -u ubuntu -c ssh -i ./ec2.py
ANSIBLE_CONFIG=prod-ansible.cfg ANSIBLE_EC2_INI=prod-ec2.ini ansible-playbook prod-app.yml -e "machine=app4" -u ubuntu -c ssh -i ./ec2.py
Some specifics:
Ansible Commands - Stage
------------------------
* To do database migrations, include this: ```-e "migrate_db=yes"```. The default
behavior is to not do migrations.
Verify that you're doing something reasonable:
* To hit multiple machines the use this: ```-e "machine=app(1|2|4)"```.
Use multiple separate "-e" options to specify multiple vars on the
command line.
ANSIBLE_CONFIG=stage-ansible.cfg ANSIBLE_EC2_INI=ec2.ini ansible-playbook stage-app.yml -u ubuntu -c ssh -i ./ec2.py --list-hosts
* Usually I do with the ```--list-hosts``` option first to verify that I'm
doing something sane before actually running.
Verify that you're doing something reasonable:
* To install the utility machines, substitute ```prod-worker.yml```. Those
are also parameterized on the take the machine variable (util1, util(1|2),
and so forth).
ANSIBLE_CONFIG=stage-ansible.cfg ANSIBLE_EC2_INI=ec2.ini ansible-playbook stage-app.yml -u ubuntu -c ssh -i ./ec2.py
## Ansible Commands - Stage
Command is:
ANSIBLE_CONFIG=stage-ansible.cfg ANSIBLE_EC2_INI=stage-ec2.ini ansible-playbook stage-app.yml -e "machine=app1" -u ubuntu -c ssh -i ./ec2.py
- hosts: ~tag_Name_app(10|20)_carn
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
- hosts: ~tag_Name_{{machine}}_carn
pre_tasks:
- fail: msg="This playbook only runnable on 'app' machines"
when: "'app' not in machine"
sudo: True
vars_prompt:
- name: "migrate_db"
prompt: "Should this playbook run database migrations? (Type 'yes' to run, anything else to skip migrations)"
default: "no"
private: no
vars:
secure_dir: '../../../configuration-secure/ansible'
# this indicates the path to site-specific (with precedence)
......
# this gets all running prod webservers
- hosts: tag_environment_prod_carn:&tag_function_util
# or we can get subsets of them by name
#- hosts: ~tag_Name_util(10)_carn
- name: Basic util setup on carnegie workers
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
hosts: ~tag_Name_{{machine}}_carn
pre_tasks:
- fail: msg="This playbook only runnable on 'util' machines"
when: "'util' not in machine"
sudo: True
gather_facts: True
vars:
secure_dir: '../../../edx-secret/ansible'
# this indicates the path to site-specific (with precedence)
......
......@@ -9,17 +9,13 @@
# - apt: pkg=libzmq-dev,python-zmq state=present
# - action: fireball
# this gets all running prod webservers
#- hosts: tag_environment_prod:&tag_function_webserver
# or we can get subsets of them by name
- hosts: ~tag_Name_app(10|20)_cme
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
- hosts: ~tag_Name_{{machine}}_cme
pre_tasks:
- fail: msg="This playbook only runnable on 'app' machines"
when: "'app' not in machine"
sudo: True
vars_prompt:
- name: "migrate_db"
prompt: "Should this playbook run database migrations? (Type 'yes' to run, anything else to skip migrations)"
default: "no"
private: no
vars:
secure_dir: '../../../edx-secret/ansible'
# this indicates the path to site-specific (with precedence)
......
# this gets all running prod webservers
- hosts: tag_environment_prod_cme:&tag_function_util
# or we can get subsets of them by name
#- hosts: ~tag_Name_util(10)_cme
- name: Basic util setup on cme hosts
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
hosts: ~tag_Name_{{machine}}_cme
pre_tasks:
- fail: msg="This playbook only runnable on 'util' machines"
when: "'util' not in machine"
sudo: True
vars:
secure_dir: '../../../edx-secret/ansible'
......
# this gets all running prod webservers
#- hosts: tag_environment_prod:&tag_function_webserver
# or we can get subsets of them by name
#- hosts: ~tag_Name_app(10|20)_prod
- hosts: ~tag_Name_app(11|21)_prod
## this is the test box
#- hosts: ~tag_Name_app4_prod
## you can also do security group, but don't do that
#- hosts: security_group_edx-prod-EdxappServerSecurityGroup-NSKCQTMZIPQB
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
- hosts: ~tag_Name_{{machine}}_prod
pre_tasks:
- fail: msg="This playbook only runnable on 'app' machines"
when: "'app' not in machine"
sudo: True
vars_prompt:
- name: "migrate_db"
prompt: "Should this playbook run database migrations? (Type 'yes' to run, anything else to skip migrations)"
default: "no"
private: no
vars:
secure_dir: '../../../configuration-secure/ansible'
# this indicates the path to site-specific (with precedence)
......
......@@ -8,3 +8,35 @@
roles:
- common
- supervisor
- role: user
USER_INFO:
- name: sefk
github: true
type: admin
- name: jbau
github: true
type: admin
- name: jrbl
github: true
type: admin
- name: ali123
github: true
type: admin
- name: caesar2164
github: true
type: admin
- name: dcadams
github: true
type: admin
- name: nparlante
github: true
type: admin
- name: jinpa
github: true
- name: gbruhns
github: true
- name: paepcke
github: true
- name: akshayak
github: true
tags: users
# For all util machines
- hosts: tag_environment_prod:&tag_function_util
# or we can get subsets of them by name
#- hosts: ~tag_Name_util(1|2)_prod
- name: Basic util setup on all hosts
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
hosts: ~tag_Name_{{machine}}_prod
pre_tasks:
- fail: msg="This playbook only runnable on 'util' machines"
when: "'util' not in machine"
sudo: True
vars:
secure_dir: '../../../configuration-secure/ansible'
......
- hosts: tag_environment_stage:&tag_function_webserver
#- hosts: tag_Name_app1_stage
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
- hosts: ~tag_Name_{{ machine }}_stage
pre_tasks:
- fail: msg="This playbook only runnable on 'app' machines"
when: "'app' not in machine"
sudo: True
vars_prompt:
- name: "migrate_db"
prompt: "Should this playbook run database migrations? (Type 'yes' to run, anything else to skip migrations)"
default: "no"
private: no
vars:
not_prod: true
secure_dir: ../../../edx-secret/ansible
......
# this gets all running stage util machiens
- hosts: tag_environment_stage:&tag_function_util
# or we can get subsets of them by name
#- hosts: ~tag_Name_util(1|2)_stage
---
- name: Basic util setup on all hosts
# This uses variable expansion so you can select machine(s) from the command line
# using the -e flag. See README for instructions on how to use.
hosts: ~tag_Name_{{machine}}_stage
pre_tasks:
- fail: msg="This playbook only runnable on 'util' machines"
when: "'util' not in machine"
sudo: True
vars:
secure_dir: ../../../edx-secret/ansible
......
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds_param_group
version_added: "1.5"
short_description: manage RDS parameter groups
description:
- Creates, modifies, and deletes RDS parameter groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the group should be present or absent.
required: true
default: present
aliases: []
choices: [ 'present' , 'absent' ]
name:
description:
- Database parameter group identifier.
required: true
default: null
aliases: []
description:
description:
- Database parameter group description. Only set when a new group is added.
required: false
default: null
aliases: []
engine:
description:
- The type of database for this group. Required for state=present.
required: false
default: null
aliases: []
choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0']
immediate:
description:
- Whether to apply the changes immediately, or after the next reboot of any associated instances.
required: false
default: null
aliases: []
params:
description:
- Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3), or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group.
required: false
default: null
aliases: []
choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0']
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: [ 'aws_region', 'ec2_region' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
requirements: [ "boto" ]
author: Scott Anderson
'''
EXAMPLES = '''
# Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024
- rds_param_group: >
state=present
name=norwegian_blue
description=My Fancy Ex Parrot Group
engine=mysql5.6
params='{"auto_increment_increment": "42K"}'
# Remove a parameter group
- rds_param_group: >
state=absent
name=norwegian_blue
'''
import sys
import time
VALID_ENGINES = [
'mysql5.1',
'mysql5.5',
'mysql5.6',
'oracle-ee-11.2',
'oracle-se-11.2',
'oracle-se1-11.2',
'postgres9.3',
'sqlserver-ee-10.5',
'sqlserver-ee-11.0',
'sqlserver-ex-10.5',
'sqlserver-ex-11.0',
'sqlserver-se-10.5',
'sqlserver-se-11.0',
'sqlserver-web-10.5',
'sqlserver-web-11.0',
]
try:
import boto.rds
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
# returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group)
class NotModifiableError(StandardError):
def __init__(self, error_message, *args):
super(NotModifiableError, self).__init__(error_message, *args)
self.error_message = error_message
def __repr__(self):
return 'NotModifiableError: %s' % self.error_message
def __str__(self):
return 'NotModifiableError: %s' % self.error_message
INT_MODIFIERS = {
'K': 1024,
'M': pow(1024, 2),
'G': pow(1024, 3),
'T': pow(1024, 4),
}
TRUE_VALUES = ('on', 'true', 'yes', '1',)
def set_parameter(param, value, immediate):
"""
Allows setting parameters with 10M = 10* 1024 * 1024 and so on.
"""
converted_value = value
if param.type == 'string':
converted_value = str(value)
elif param.type == 'integer':
if isinstance(value, basestring):
for modifier in INT_MODIFIERS.keys():
if value.endswith(modifier):
converted_value = int(value[:-1]) * INT_MODIFIERS[modifier]
converted_value = int(converted_value)
elif type(value) == bool:
converted_value = 1 if value else 0
else:
converted_value = int(value)
elif param.type == 'boolean':
if isinstance(value, basestring):
converted_value = value in TRUE_VALUES
else:
converted_value = bool(value)
param.value = converted_value
param.apply(immediate)
def modify_group(group, params, immediate=False):
""" Set all of the params in a group to the provided new params. Raises NotModifiableError if any of the
params to be changed are read only.
"""
changed = {}
new_params = dict(params)
for key in new_params.keys():
if group.has_key(key):
param = group[key]
new_value = new_params[key]
if param.value != new_value:
if not param.is_modifiable:
raise NotModifiableError('Parameter %s is not modifiable.' % key)
changed[key] = {'old': param.value, 'new': new_value}
set_parameter(param, new_value, immediate)
del new_params[key]
return changed, new_params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
engine = dict(required=False, choices=VALID_ENGINES),
description = dict(required=False),
params = dict(required=False, aliases=['parameters'], type='dict'),
immediate = dict(required=False, type='bool'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_engine = module.params.get('engine')
group_description = module.params.get('description')
group_params = module.params.get('params') or {}
immediate = module.params.get('immediate') or False
if state == 'present':
for required in ['name', 'description', 'engine', 'params']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'engine', 'params']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if not region:
module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION."))
try:
conn = boto.rds.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
group_was_added = False
try:
changed = False
try:
all_groups = conn.get_all_dbparameter_groups(group_name, max_records=100)
exists = len(all_groups) > 0
except BotoServerError, e:
if e.error_code != 'DBParameterGroupNotFound':
module.fail_json(msg = e.error_message)
exists = False
if state == 'absent':
if exists:
conn.delete_parameter_group(group_name)
changed = True
else:
changed = {}
if not exists:
new_group = conn.create_parameter_group(group_name, engine=group_engine, description=group_description)
group_was_added = True
# If a "Marker" is present, this group has more attributes remaining to check. Get the next batch, but only
# if there are parameters left to set.
marker = None
while len(group_params):
next_group = conn.get_all_dbparameters(group_name, marker=marker)
changed_params, group_params = modify_group(next_group, group_params, immediate)
changed.update(changed_params)
if hasattr(next_group, 'Marker'):
marker = next_group.Marker
else:
break
except BotoServerError, e:
module.fail_json(msg = e.error_message)
except NotModifiableError, e:
msg = e.error_message
if group_was_added:
msg = '%s The group "%s" was added first.' % (msg, group_name)
module.fail_json(msg=msg)
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds_subnet_group
version_added: "1.5"
short_description: manage RDS database subnet groups
description:
- Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
default: present
aliases: []
choices: [ 'present' , 'absent' ]
name:
description:
- Database subnet group identifier.
required: true
default: null
aliases: []
description:
description:
- Database subnet group description. Only set when a new group is added.
required: false
default: null
aliases: []
subnets:
description:
- List of subnet IDs that make up the database subnet group.
required: false
default: null
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: [ 'aws_region', 'ec2_region' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
requirements: [ "boto" ]
author: Scott Anderson
'''
EXAMPLES = '''
# Add or change a subnet group
- local_action:
module: rds_subnet_group
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a parameter group
- rds_param_group: >
state=absent
name=norwegian-blue
'''
import sys
import time
try:
import boto.rds
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if not region:
module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION."))
try:
conn = boto.rds.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
try:
changed = False
exists = False
try:
matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except BotoServerError, e:
if e.error_code != 'DBSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
if state == 'absent':
if exists:
conn.delete_db_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets)
else:
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
except BotoServerError, e:
module.fail_json(msg = e.error_message)
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
......@@ -19,6 +19,13 @@ AS_SERVER_PORT: '9000'
AS_ENV_LANG: 'en_US.UTF-8'
AS_LOG_LEVEL: 'INFO'
AS_WORKERS: '4'
# add public keys to enable the automator user
# for running manage.py commands
AS_AUTOMATOR_NAME: automator
AS_AUTOMATOR_AUTHORIZED_KEYS: []
AS_AUTOMATOR_SUDO_CMDS:
- "ALL=({{ analytics_web_user }}) NOPASSWD:SETENV:{{ analytics_venv_dir }}/bin/django-admin.py run_all_queries *"
DATABASES:
default: &databases_default
......@@ -43,7 +50,7 @@ analytics_auth_config:
MONGO_STORED_QUERIES_COLLECTION: $AS_DB_RESULTS_COLLECTION
as_role_name: "analytics-server"
as_user: "analytics-server"
as_user: "analytics-server"
as_home: "/opt/wwc/analytics-server"
as_venv_dir: "{{ as_home }}/virtualenvs/analytics-server"
as_source_repo: "git@github.com:edx/analytics-server.git"
......@@ -63,14 +70,6 @@ as_env_vars:
ANALYTICS_SERVER_LOG_LEVEL: "{{ AS_LOG_LEVEL }}"
#
# Used by the included role, automated.
# See meta/main.yml
#
as_automated_rbash_links:
- /usr/bin/sudo
- /usr/bin/scp
#
# OS packages
#
......
automator ALL=(www-data) NOPASSWD:SETENV:/opt/wwc/analytics-server/virtualenvs/analytics-server/bin/django-admin.py run_all_queries *
---
dependencies:
- {
role: automated,
automated_rbash_links: $as_automated_rbash_links,
autmoated_sudoers_dest: '99-automator-analytics-server',
automated_sudoers_template: 'roles/analytics-server/templates/etc/sudoers.d/99-automator-analytics-server.j2'
}
- role: user
user_info:
- name: "{{ AS_AUTOMATOR_NAME }}"
type: restricted
sudo_cmds: "{{ AS_AUTOMATOR_SUDO_CMDS }}"
authorized_keys: "{{ AS_AUTOMATOR_AUTHORIZED_KEYS }}"
user_rbash_links:
- /usr/bin/sudo
- /usr/bin/scp
when: AS_AUTOMATOR_AUTHORIZED_KEYS|length != 0
......@@ -21,7 +21,7 @@
#
# common role
#
# Depends upon the automated role
# Depends upon the user role
#
# Example play:
#
......
......@@ -43,7 +43,7 @@ analytics_auth_config:
MONGO_STORED_QUERIES_COLLECTION: $ANALYTICS_DB_RESULTS_COLLECTION
analytics_role_name: "analytics"
analytics_user: "analytics"
analytics_user: "analytics"
analytics_home: "/opt/wwc/analytics"
analytics_venv_dir: "{{ analytics_home }}/virtualenvs/analytics"
analytics_source_repo: "git@github.com:edx/analytics-server.git"
......@@ -63,7 +63,7 @@ analytics_env_vars:
ANALYTICS_LOG_LEVEL: "{{ ANALYTICS_LOG_LEVEL }}"
#
# Used by the included role, automated.
# Used by the included role, user.
# See meta/main.yml
#
analytics_automated_rbash_links:
......
......@@ -21,7 +21,7 @@
#
# common role
#
# Depends upon the automated role
# user role to set up a restricted user
#
# Example play:
#
......
automator ALL=({{ analytics_web_user }}) NOPASSWD:SETENV:{{ analytics_venv_dir }}/bin/django-admin.py run_all_queries *
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role automated
#
# Overview:
#
# This role is included as a dependency by other roles which provide
# automated jobs. Automation occurs over ssh. The automator user
# is assigned to a managed rbash shell and is, potentially, allowed to run
# explicitly listed commands via sudo. Both the commands that are
# allowed via rbash and the sudoers file are provided by the
# including role.
#
# Dependencies:
#
# This role depends upon variables provided by an including role
# via the my_role/meta/main.yml file. Includes take the following forms:
#
# dependencies:
# - {
# role: automated,
# automated_rbash_links: $as_automated_rbash_links,
# automated_sudoers_dest: '99-my_role'
# automated_sudoers_file: 'roles/my_role/files/etc/sudoers.d/99-my_role'
# }
#
# or
#
# dependencies:
# - {
# role: automated,
# automated_rbash_links: $as_automated_rbash_links,
# automated_sudoers_dest: '99-my_role'
# automated_sudoers_template: 'roles/my_role/templates/etc/sudoers.d/99-my_role.j2'
# }
#
# The sudoers file is optional. Note that for sudo to work it must be
# included in the rbash links list.
#
# That list should be provided via my_role's defaults
#
# role_automated_rbash_links:
# - /usr/bin/sudo
# - /usr/bin/scp
#
- fail: automated_rbash_links required for role
when: automated_rbash_links is not defined
- fail: automated_sudoers_dest required for role
when: automated_sudoers_dest is not defined
- name: create automated user
user:
name={{ automated_user }} state=present shell=/bin/rbash
home={{ automated_home }} createhome=yes
- name: create sudoers file from file
copy:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_file }} owner="root"
group="root" mode=0440 validate='visudo -cf %s'
when: automated_sudoers_file
- name: create sudoers file from template
template:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_template }} owner="root"
group="root" mode=0440 validate='visudo -cf %s'
when: automated_sudoers_template
#
# Prevent user from updating their PATH and
# environment.
#
- name: update shell file mode
file:
path={{ automated_home }}/{{ item }} mode=0640
state=file owner="root" group={{ automated_user }}
with_items:
- .bashrc
- .profile
- .bash_logout
- name: change ~automated ownership
file:
path={{ automated_home }} mode=0750 state=directory
owner="root" group={{ automated_user }}
#
# This ensures that the links are updated with each run
# and that links that were remove from the role are
# removed.
#
- name: remove ~automated/bin directory
file:
path={{ automated_home }}/bin state=absent
ignore_errors: yes
- name: create ~automated/bin directory
file:
path={{ automated_home }}/bin state=directory mode=0750
owner="root" group={{ automated_user }}
- name: re-write .profile
copy:
src=home/automator/.profile
dest={{ automated_home }}/.profile
owner="root"
group={{ automated_user }}
mode="0744"
- name: re-write .bashrc
copy:
src=home/automator/.bashrc
dest={{ automated_home }}/.bashrc
owner="root"
group={{ automated_user }}
mode="0744"
- name: create .ssh directory
file:
path={{ automated_home }}/.ssh state=directory mode=0700
owner={{ automated_user }} group={{ automated_user }}
- name: build authorized_keys file
template:
src=home/automator/.ssh/authorized_keys.j2
dest={{ automated_home }}/.ssh/authorized_keys mode=0600
owner={{ automated_user }} group={{ automated_user }}
- name: create allowed command links
file:
src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }}
state=link
with_items: automated_rbash_links
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role aws
#
#
# Rotate logs to S3
# Only for when edX is running in AWS since it organizes
# logs by security group.
# !! The buckets defined below MUST exist prior to enabling !!
# this feature and the instance IAM role must have write permissions
# to the buckets
AWS_S3_LOGS: false
# If there are any issues with the s3 sync an error
# log will be sent to the following address.
# This relies on your server being able to send mail
AWS_S3_LOGS_NOTIFY_EMAIL: dummy@example.com
AWS_S3_LOGS_FROM_EMAIL: dummy@example.com
# Separate buckets for tracking logs and everything else
# You should be overriding the environment and deployment vars
# Order of precedence is left to right for exclude and include options
AWS_S3_LOG_PATHS:
- bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-app-logs"
path: "{{ COMMON_LOG_DIR }}/!(*tracking*)"
- bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-app-logs"
path: "/var/log/*"
- bucket: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-tracking-logs"
path: "{{ COMMON_LOG_DIR }}/*tracking*"
#
# vars are namespace with the module name.
#
aws_role_name: aws
aws_data_dir: "{{ COMMON_DATA_DIR }}/aws"
aws_app_dir: "{{ COMMON_APP_DIR }}/aws"
aws_s3_sync_script: "{{ aws_app_dir }}/send-logs-to-s3"
aws_s3_logfile: "{{ aws_log_dir }}/s3-log-sync.log"
aws_log_dir: "{{ COMMON_LOG_DIR }}/aws"
# default path to the aws binary
aws_cmd: "{{ COMMON_BIN_DIR }}/s3cmd"
#
# OS packages
#
aws_debian_pkgs:
- python-setuptools
aws_pip_pkgs:
- https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
- awscli
aws_redhat_pkgs: []
aws_s3cmd_version: s3cmd-1.5.0-beta1
aws_s3cmd_url: "http://files.edx.org/s3cmd/{{ aws_s3cmd_version }}.tar.gz"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role aws
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- common
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role aws
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- name: create data directories
file: >
path={{ item }}
state=directory
owner=root
group=root
mode=0700
with_items:
- "{{ aws_data_dir }}"
- "{{ aws_log_dir }}"
- name: create app directory
file: >
path={{ item }}
state=directory
owner=root
group=root
mode=0755
with_items:
- "{{ aws_app_dir }}"
- name: install system packages
apt: >
pkg={{','.join(aws_debian_pkgs)}}
state=present
update_cache=yes
- name: install aws python packages
pip: >
name="{{ item }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: aws_pip_pkgs
- name: get s3cmd
get_url: >
url={{ aws_s3cmd_url }}
dest={{ aws_data_dir }}/
- name: untar s3cmd
shell: >
tar xf {{ aws_data_dir }}/{{ aws_s3cmd_version }}.tar.gz
creates={{ aws_app_dir }}/{{ aws_s3cmd_version }}/s3cmd
chdir={{ aws_app_dir }}
- name: create symlink for s3cmd
file: >
src={{ aws_app_dir }}/{{ aws_s3cmd_version }}/s3cmd
dest={{ COMMON_BIN_DIR }}/s3cmd
state=link
- name: create s3 log sync script
template: >
dest={{ aws_s3_sync_script }}
src=send-logs-to-s3.j2 mode=0755 owner=root group=root
when: AWS_S3_LOGS
- name: create symlink for s3 log sync script
file: >
state=link
src={{ aws_s3_sync_script }}
dest={{ COMMON_BIN_DIR }}/{{ aws_s3_sync_script|basename }}
when: AWS_S3_LOGS
- name: run s3 log sync script on shutdown
file: >
state=link
src={{ COMMON_BIN_DIR }}/send-logs-to-s3
path=/etc/rc0.d/S00send-logs-to-s3
when: AWS_S3_LOGS
# cron job runs the aws s3 sync script
- name: cronjob for s3 log sync
cron: >
name="cronjob for s3 log sync"
user=root
minute=0
job={{ aws_s3_sync_script }}
when: AWS_S3_LOGS
{% set lb = '{' %}
{% set rb = '}' %}
#!/bin/bash
#
# This script can be called from logrotate
# to sync logs to s3
if (( $EUID != 0 )); then
echo "Please run as the root user"
exit 1
fi
S3_LOGFILE="{{ aws_s3_logfile }}"
NOTIFY_EMAIL={{ AWS_S3_LOGS_NOTIFY_EMAIL }}
FROM_EMAIL={{ AWS_S3_LOGS_FROM_EMAIL }}
AWS_CMD={{ aws_cmd }}
exec > >(tee $S3_LOGFILE)
exec 2>&1
shopt -s extglob
usage() {
cat<<EO
A wrapper of s3cmd sync that will sync files to
an s3 bucket, will send mail to {{ AWS_S3_LOGS_NOTIFY_EMAIL }}
on failures.
Usage: $PROG
-v add verbosity (set -x)
-n echo what will be done
-h this
EO
}
while getopts "vhn" opt; do
case $opt in
v)
set -x
shift
;;
h)
usage
exit 0
;;
n)
noop="echo Would have run: "
shift
;;
esac
done
# grab the first security group for the instance
# which will be used as a directory name in the s3
# bucket
# If there are any errors from this point
# send mail to $NOTIFY_EMAIL
set -e
sec_grp=unset
instance_id=unset
s3_path=unset
onerror() {
if [[ -z $noop ]]; then
message_file=/var/tmp/message-$$.json
message_string="Error syncing $s3_path: inst_id=$instance_id ip=$ip region=$region"
if [[ -r $S3_LOGFILE ]]; then
python -c "import json; d={'Subject':{'Data':'$message_string'},'Body':{'Text':{'Data':open('$S3_LOGFILE').read()}}};print json.dumps(d)" > $message_file
else
cat << EOF > $message_file
{"Subject": { "Data": "$message_string" }, "Body": { "Text": { "Data": "!! ERROR !! no logfile" } } }
EOF
fi
echo "ERROR: syncing $s3_path on $instance_id"
$AWS_CMD ses send-email --from $FROM_EMAIL --to $NOTIFY_EMAIL --message file://$message_file --region $region
else
echo "Error syncing $s3_path on $instance_id"
fi
}
trap onerror ERR SIGHUP SIGINT SIGTERM
# first security group is used as the directory name in the bucket
sec_grp=$(ec2metadata --security-groups | head -1)
instance_id=$(ec2metadata --instance-id)
ip=$(ec2metadata --local-ipv4)
availability_zone=$(ec2metadata --availability-zone)
# region isn't available via the metadata service
region=${availability_zone:0:${{lb}}#availability_zone{{rb}} - 1}
s3_path="${2}/$sec_grp/"
{% for item in AWS_S3_LOG_PATHS -%}
$noop $AWS_CMD sync {{ item['path'] }} "s3://{{ item['bucket'] }}/$sec_grp/${instance_id}-${ip}/"
{% endfor %}
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
#
# Defaults for role bastion
#
# These users are given access
# to the databases from the bastion
# box, it needs to be a subset of the
# users created on the box which is
# COMMON_USER_INFO + BASTION_USER_INFO
BASTION_REPLICA_USERS: []
# These users are created on the bastion
# server.
BASTION_USER_INFO: []
#
# vars are namespace with the module name.
#
bastion_role_name: bastion
#
# OS packages
#
bastion_debian_pkgs:
# for running ansible mysql module
- mysql-client-core-5.5
- libmysqlclient-dev
# for connecting to mongo
- mongodb-clients
bastion_pip_pkgs:
# for running ansible mysql
- mysql-python
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role bastion
#
dependencies:
- role: user
user_info: "{{ BASTION_USER_INFO }}"
- aws
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role bastion
#
# Overview:
#
#
# Dependencies:
# - common
#
- name: install system packages
apt: >
pkg={{','.join(bastion_debian_pkgs)}}
state=present
- name: install bastion python packages
pip: >
name="{{ item }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: bastion_pip_pkgs
- template: >
src=mysql.sh.j2
dest=/home/{{ item[0] }}/{{ item[1].script_name }}
mode=0700 owner={{ item[0] }} group=root
with_nested:
- "{{ BASTION_REPLICA_USERS }}"
-
- db_host: "{{ EDXAPP_MYSQL_HOST }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME }}"
script_name: edxapp-rds.sh
- db_host: "{{ XQUEUE_MYSQL_HOST }}"
db_name: "{{ XQUEUE_MYSQL_DB_NAME }}"
script_name: xqueue-rds.sh
- db_host: "{{ ORA_MYSQL_HOST }}"
db_name: "{{ ORA_MYSQL_DB_NAME }}"
script_name: ora-rds.sh
- template: >
src=mongo.sh.j2
dest=/home/{{ item[0] }}/{{ item[1].script_name }}
mode=0700 owner={{ item[0] }} group=root
with_nested:
- "{{ BASTION_REPLICA_USERS }}"
-
- db_host: "{{ EDXAPP_MONGO_HOSTS[1] }}"
db_name: "{{ EDXAPP_MONGO_DB_NAME }}"
db_port: "{{ EDXAPP_MONGO_PORT }}"
script_name: edxapp-mongo.sh
- db_host: "{{ FORUM_MONGO_HOSTS[1] }}"
db_name: "{{ FORUM_MONGO_DATABASE }}"
db_port: "{{ FORUM_MONGO_PORT }}"
script_name: forum-mongo.sh
#!/usr/bin/env bash
mongo {{ item[1].db_host }}:{{ item[1].db_port }}/{{ item[1].db_name }} -u {{ COMMON_MONGO_READ_ONLY_USER }} -p"{{ COMMON_MONGO_READ_ONLY_PASS }}"
#!/usr/bin/env bash
mysql -u {{ COMMON_MYSQL_READ_ONLY_USER }} -h {{ item[1].db_host }} -p"{{ COMMON_MYSQL_READ_ONLY_PASS }}" {{ item[1].db_name }}
......@@ -28,7 +28,7 @@ CERTS_KEY_ID: "FEF8D954"
# Path to git identity file for pull access to
# the edX certificates repo - REQUIRED
# Example - {{ secure_dir }}/files/git-identity
CERTS_LOCAL_GIT_IDENTITY: !!null
CERTS_GIT_IDENTITY: !!null
# Path to public and private gpg key for signing
# the edX certificate. Default is a dummy key
CERTS_LOCAL_PRIVATE_KEY: "example-private-key.txt"
......@@ -42,7 +42,7 @@ certs_venvs_dir: "{{ certs_app_dir }}/venvs"
certs_venv_dir: "{{ certs_venvs_dir }}/certs"
certs_venv_bin: "{{ certs_venv_dir }}/bin"
certs_git_ssh: /tmp/git_ssh.sh
certs_git_identity: "{{ certs_app_dir }}/git-identity"
certs_git_identity: "{{ certs_app_dir }}/certs-git-identity"
certs_requirements_file: "{{ certs_code_dir }}/requirements.txt"
certs_repo: "git@github.com:/edx/certificates"
certs_version: 'master'
......
......@@ -20,4 +20,4 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: certs_installed is defined
when: certs_installed is defined and not disable_edx_services
......@@ -16,9 +16,19 @@
- name: writing supervisor script for certificates
template: >
src=certs.conf.j2 dest={{ supervisor_cfg_dir }}/certs.conf
src=certs.conf.j2 dest={{ supervisor_available_dir }}/certs.conf
owner={{ supervisor_user }} mode=0644
- name: enable supervisor script for certificates
file: >
src={{ supervisor_available_dir }}/certs.conf
dest={{ supervisor_cfg_dir }}/certs.conf
owner={{ supervisor_user }}
state=link
force=yes
mode=0644
notify: restart certs
when: not disable_edx_services
- name: create ssh script for git
template: >
......@@ -28,7 +38,7 @@
- name: install read-only ssh key for the certs repo
copy: >
src={{ CERTS_LOCAL_GIT_IDENTITY }} dest={{ certs_git_identity }}
content="{{ CERTS_GIT_IDENTITY }}" dest={{ certs_git_identity }}
force=yes owner={{ certs_user }} mode=0600
notify: restart certs
......@@ -58,6 +68,7 @@
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure certs has started
supervisorctl_local: >
......@@ -66,6 +77,7 @@
config={{ supervisor_cfg }}
state=started
sudo_user: "{{ supervisor_service_user }}"
when: not disable_edx_services
- name: create a symlink for venv python
file: >
......
......@@ -32,8 +32,8 @@
# - certs
#
- name: Checking to see if git identity is set
fail: msg="You must set CERTS_LOCAL_GIT_IDENTITY var for this role!"
when: not CERTS_LOCAL_GIT_IDENTITY
fail: msg="You must set CERTS_GIT_IDENTITY var for this role!"
when: not CERTS_GIT_IDENTITY
- name: create application user
user: >
......
......@@ -7,7 +7,9 @@ COMMON_BASE_DIR: /edx
COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var"
COMMON_APP_DIR: "{{ COMMON_BASE_DIR}}/app"
COMMON_LOG_DIR: "{{ COMMON_DATA_DIR }}/log"
# Override this to create
# common users in all roles
COMMON_USER_INFO: []
# these directories contain
# symlinks for convenience
COMMON_BIN_DIR: "{{ COMMON_BASE_DIR }}/bin"
......@@ -25,7 +27,22 @@ COMMON_HOSTNAME: !!null
COMMON_CUSTOM_DHCLIENT_CONFIG: false
# uncomment and specifity your domains.
# COMMON_DHCLIENT_DNS_SEARCH: ["ec2.internal","example.com"]
COMMON_MOTD_TEMPLATE: "motd.tail.j2"
# These are two special accounts across all databases
# the read only user is is granted select privs on all dbs
# the admin user is granted create user privs on all dbs
COMMON_MYSQL_READ_ONLY_USER: 'read_only'
COMMON_MYSQL_READ_ONLY_PASS: 'password'
COMMON_MYSQL_ADMIN_USER: 'admin'
COMMON_MYSQL_ADMIN_PASS: 'password'
COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: 'password'
common_debian_pkgs:
- ntp
- ack-grep
......@@ -40,9 +57,13 @@ common_debian_pkgs:
- python2.7
- python-pip
- python2.7-dev
# Not installed by default on vagrant ubuntu
# boxes
- curl
common_pip_pkgs:
- virtualenv==1.10.1
- pip==1.5.4
- virtualenv==1.11.4
- virtualenvwrapper
common_web_user: www-data
......@@ -52,7 +73,14 @@ common_log_user: syslog
common_git_ppa: "ppa:git-core/ppa"
# Skip supervisor tasks
# Useful when supervisor is not installed (local dev)
# When set to true this flag will allow you to install everything but keep
# supervisor from starting any of the services.
# Service files will be placed in supervisor's conf.available.d but not linked
# to supervisors 'conf.d' directory.
disable_edx_services: False
# Some apps run differently in dev mode(forums)
# so different start scripts are generated in dev mode.
devstack: False
common_debian_variants:
......
#!/bin/sh
test -x /usr/sbin/logrotate || exit 0
/usr/sbin/logrotate /etc/logrotate.d/hourly
......@@ -2,3 +2,6 @@
- name: restart rsyslogd
service: name=rsyslog state=restarted
sudo: True
- name: restart ssh
service: name=ssh state=restarted
sudo: True
---
dependencies:
- role: user
user_info: "{{ COMMON_USER_INFO }}"
......@@ -48,13 +48,38 @@
with_items: common_pip_pkgs
- name: Install rsyslog configuration for edX
template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644
template: >
dest=/etc/rsyslog.d/99-edx.conf
src=edx_rsyslog.j2
owner=root group=root mode=644
notify: restart rsyslogd
- name: Install logrotate configuration for edX
template: dest=/etc/logrotate.d/edx-services src=edx_logrotate.j2 owner=root group=root mode=644
template: >
dest=/etc/logrotate.d/edx-services
src=etc/logrotate.d/edx_logrotate.j2
owner=root group=root mode=644
# This is in common to keep all logrotation config
# in the same role
- name: Create hourly subdirectory in logrotate.d
file: path=/etc/logrotate.d/hourly state=directory
- name: Install logrotate configuration for tracking file
template: >
dest=/etc/logrotate.d/hourly/tracking.log
src=etc/logrotate.d/hourly/edx_logrotate_tracking_log.j2
owner=root group=root mode=644
- name: Add logrotate for tracking.log to cron.hourly
copy: >
dest=/etc/cron.hourly/logrotate
src=etc/cron.hourly/logrotate
owner=root group=root mode=555
# This can be removed after new release of edX
- name: Remove old tracking.log config from /etc/logrotate.d
file: path=/etc/logrotate.d/tracking.log state=absent
- name: update /etc/hosts
template: src=hosts.j2 dest=/etc/hosts
......@@ -73,4 +98,28 @@
- name: update /etc/dhcp/dhclient.conf
template: src=etc/dhcp/dhclient.conf.j2 dest=/etc/dhcp/dhclient.conf
when: COMMON_CUSTOM_DHCLIENT_CONFIG
\ No newline at end of file
when: COMMON_CUSTOM_DHCLIENT_CONFIG
# Remove some of the default motd display on ubuntu
# and add a custom motd. These do not require an
# ssh restart
- name: update the ssh motd on Ubuntu
file: >
mode=0644
path={{ item }}
with_items:
- "/etc/update-motd.d/10-help-text"
- "/usr/share/landscape/50-landscape-sysinfo"
- "/etc/update-motd.d/51-cloudguest"
- "/etc/update-motd.d/91-release-upgrade"
- name: add ssh-warning banner motd
template: >
dest=/etc/motd.tail
src={{ COMMON_MOTD_TEMPLATE }} mode=0755 owner=root group=root
- name: update ssh config
template: >
dest=/etc/ssh/sshd_config
src=sshd_config.j2 mode=0644 owner=root group=root
notify: restart ssh
*******************************************************************
* *
* _ _| |\ \/ / *
* / -_) _` | > < *
* \___\__,_|/_/\_\ *
* *
* Instructions and troubleshooting: *
* https://github.com/edx/configuration/wiki/edX-Developer-Stack *
*******************************************************************
{{ COMMON_LOG_DIR }}/tracking.log {
create
compress
delaycompress
create
dateext
dateformat -%Y%m%d-%s
missingok
nodelaycompress
notifempty
daily
rotate 365000
rotate 16000
size 1M
postrotate
/usr/bin/killall -HUP rsyslogd
endscript
}
*******************************************************************
* _ __ __ *
* _ _| |\ \/ / This system is for the use of authorized *
* / -_) _` | > < users only. Usage of this system may be *
* \___\__,_|/_/\_\ monitored and recorded by system personnel. *
* *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide the *
* evidence from such monitoring to law enforcement officials. *
* *
*******************************************************************
# {{ ansible_managed }}
#
# Changes from the default Ubuntu ssh config:
# - LogLevel set to VERBOSE
#
# What ports, IPs and protocols we listen for
Port 22
# Use these options to restrict which interfaces/protocols sshd will bind to
#ListenAddress ::
#ListenAddress 0.0.0.0
Protocol 2
# HostKeys for protocol version 2
HostKey /etc/ssh/ssh_host_rsa_key
HostKey /etc/ssh/ssh_host_dsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
#Privilege Separation is turned on for security
UsePrivilegeSeparation yes
# Lifetime and size of ephemeral version 1 server key
KeyRegenerationInterval 3600
ServerKeyBits 768
# Logging
SyslogFacility AUTH
LogLevel VERBOSE
# Authentication:
LoginGraceTime 120
PermitRootLogin yes
StrictModes yes
RSAAuthentication yes
PubkeyAuthentication yes
#AuthorizedKeysFile %h/.ssh/authorized_keys
# Don't read the user's ~/.rhosts and ~/.shosts files
IgnoreRhosts yes
# For this to work you will also need host keys in /etc/ssh_known_hosts
RhostsRSAAuthentication no
# similar for protocol version 2
HostbasedAuthentication no
# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
#IgnoreUserKnownHosts yes
# To enable empty passwords, change to yes (NOT RECOMMENDED)
PermitEmptyPasswords no
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
ChallengeResponseAuthentication no
# Change to no to disable tunnelled clear text passwords
PasswordAuthentication no
# Kerberos options
#KerberosAuthentication no
#KerberosGetAFSToken no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
X11Forwarding yes
X11DisplayOffset 10
PrintMotd no
PrintLastLog yes
TCPKeepAlive yes
#UseLogin no
#MaxStartups 10:30:60
#Banner /etc/issue
# Allow client to pass locale environment variables
AcceptEnv LANG LC_*
Subsystem sftp /usr/lib/openssh/sftp-server
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the ChallengeResponseAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via ChallengeResponseAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
UsePAM yes
......@@ -5,7 +5,7 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: discern_installed is defined
when: discern_installed is defined and not disable_edx_services
with_items:
- discern
- discern_celery
......@@ -2,11 +2,23 @@
- name: create supervisor scripts - discern, discern_celery
template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
src={{ item }}.conf.j2 dest={{ supervisor_available_dir }}/{{ item }}.conf
owner={{ supervisor_user }} mode=0644
sudo_user: "{{ supervisor_user }}"
with_items: ['discern', 'discern_celery']
- name: enable supervisor scripts - discern, discern_celery
file: >
src={{ supervisor_available_dir }}/{{ item }}.conf
dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }}
state=link
force=yes
mode=0644
sudo_user: "{{ supervisor_user }}"
with_items: ['discern', 'discern_celery']
when: not disable_edx_services
#Upload config files for django (auth and env)
- name: create discern application config env.json file
template: src=env.json.j2 dest={{ discern_app_dir }}/env.json
......@@ -104,6 +116,7 @@
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure discern, discern_celery has started
supervisorctl_local: >
......@@ -114,6 +127,7 @@
with_items:
- discern
- discern_celery
when: not disable_edx_services
- name: create a symlink for venv python
file: >
......
......@@ -13,6 +13,12 @@
#
# OS packages
#
# set this to true dump all extra vars
# this is currently broken when extra vars
# contains references to vars that are not
# included in the play.
EDX_ANSIBLE_DUMP_VARS: false
edx_ansible_debian_pkgs:
- python-pip
......@@ -32,6 +38,6 @@ edx_ansible_venv_bin: "{{ edx_ansible_venv_dir }}/bin"
edx_ansible_user: "edx-ansible"
edx_ansible_source_repo: https://github.com/edx/configuration.git
edx_ansible_requirements_file: "{{ edx_ansible_code_dir }}/requirements.txt"
edx_ansible_var_file: "{{ edx_ansible_data_dir }}/server-vars.yml"
# edX configuration repo
configuration_version: master
edx_ansible_var_file: "{{ edx_ansible_app_dir }}/server-vars.yml"
......@@ -11,3 +11,4 @@
# Role includes for role edx_ansible
dependencies:
- common
- aws
......@@ -18,31 +18,29 @@
dest={{ COMMON_BIN_DIR }}/update
state=link
- name: create a symlink for ansible-playbook
file: >
src={{ edx_ansible_venv_bin }}/ansible-playbook
dest={{ COMMON_BIN_DIR }}/ansible-playbook
state=link
- name: create a symlink for the playbooks dir
file: >
src={{ edx_ansible_code_dir }}/playbooks
dest={{ COMMON_CFG_DIR }}/playbooks
state=link
- name: dump all vars to yaml
template: src=dumpall.yml.j2 dest={{ edx_ansible_var_file }} mode=0600
when: EDX_ANSIBLE_DUMP_VARS
- name: clean up var file, removing all version vars
shell: sed -i -e "/{{item}}/d" {{ edx_ansible_var_file }}
with_items:
# deploy versions
- "^edx_platform_version:"
- "^edx_platform_commit:"
- "^xqueue_version:"
- "^forum_version:"
- "^xserver_version:"
- "^discern_ease_version:"
- "^ora_ease_version:"
- "^discern_version:"
- "^ora_version:"
- "^configuration_version:"
- "^ease_version:"
- "^certs_version:"
# other misc vars
- "^tags:"
- "^_original_file:"
- name: create a symlink for var file
- name: create symlink for config file
file: >
src={{ edx_ansible_var_file }}
dest={{ COMMON_CFG_DIR }}/{{ edx_ansible_var_file|basename }}
state=link
when: EDX_ANSIBLE_DUMP_VARS
- name: clean up var file, removing all version vars and internal ansible vars (anything not caps)
shell: python -c "import yaml; y=yaml.load(open('{{ edx_ansible_var_file }}')); f=open('{{ edx_ansible_var_file }}', 'wb'); f.write(yaml.safe_dump({key:value for key,value in y.iteritems() if key.isupper()}, default_flow_style=False)); f.close();"
when: EDX_ANSIBLE_DUMP_VARS
......@@ -31,7 +31,13 @@ EDXAPP_MONGO_DB_NAME: 'edxapp'
EDXAPP_MYSQL_DB_NAME: 'edxapp'
EDXAPP_MYSQL_USER: 'edxapp001'
EDXAPP_MYSQL_USER_ADMIN: 'root'
EDXAPP_MYSQL_USER_MIGRATE: 'migrate'
EDXAPP_MYSQL_PASSWORD: 'password'
EDXAPP_MYSQL_PASSWORD_READ_ONLY: 'password'
EDXAPP_MYSQL_PASSWORD_ADMIN: 'password'
EDXAPP_MYSQL_PASSWORD_MIGRATE: 'password'
EDXAPP_MYSQL_HOST: 'localhost'
EDXAPP_MYSQL_PORT: '3306'
......@@ -57,6 +63,11 @@ EDXAPP_CELERY_PASSWORD: 'celery'
EDXAPP_PLATFORM_NAME: 'edX'
EDXAPP_CAS_SERVER_URL: ''
EDXAPP_CAS_EXTRA_LOGIN_PARAMS: ''
EDXAPP_CAS_ATTRIBUTE_CALLBACK: ''
EDXAPP_CAS_ATTRIBUTE_PACKAGE: ''
EDXAPP_FEATURES:
AUTH_USE_OPENID_PROVIDER: true
CERTIFICATES_ENABLED: true
......@@ -104,6 +115,7 @@ EDXAPP_BULK_EMAIL_DEFAULT_FROM_EMAIL: 'no-reply@example.com'
EDXAPP_ENV_EXTRA: {}
EDXAPP_AUTH_EXTRA: {}
EDXAPP_MKTG_URL_LINK_MAP: {}
EDXAPP_MKTG_URLS: {}
# Set this sets the url for static files
# Override this var to use a CDN
# Example: xxxxx.cloudfront.net/static/
......@@ -123,21 +135,39 @@ EDXAPP_PYTHON_SANDBOX: false
# it puts the sandbox in 'complain' mode, for reporting but not enforcement
EDXAPP_SANDBOX_ENFORCE: true
# Supply authorized keys used for remote management via the automated
# role, see meta/main.yml. Ensure you know what this does before
# enabling. The boolean flag determines whether the role is included.
# This is done to make it possible to disable remote access easily by
# setting the flag to true and providing an empty array.
EDXAPP_INCLUDE_AUTOMATOR_ROLE: false
# Supply authorized keys used for remote management via the user
# role.
EDXAPP_AUTOMATOR_NAME: automator
EDXAPP_AUTOMATOR_AUTHORIZED_KEYS: []
# These are the commands allowed by the automator role.
# The --settings parameter must be set at the end so that
# is caught by the glob.
# Example: sudo -u www-data /edx/bin/python.edxapp /edx/bin/manage.edxapp lms migrate --settings=aws
EDXAPP_AUTOMATOR_SUDO_CMDS:
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms migrate *"
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp cms migrate *"
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms syncdb *"
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp cms syncdb *"
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms seed_permissions_roles *"
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms set_staff *"
- "ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ COMMON_BIN_DIR }}/python.edxapp {{ COMMON_BIN_DIR }}/manage.edxapp lms transfer_students *"
EDXAPP_USE_GIT_IDENTITY: false
# Example: "{{ secure_dir }}/files/git-identity"
EDXAPP_LOCAL_GIT_IDENTITY: !!null
# Paste the contents of the git identity
# into this var
EDXAPP_GIT_IDENTITY: !!null
# Configuration for database migration
EDXAPP_TEST_MIGRATE_DB_NAME: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ EDXAPP_MYSQL_DB_NAME }}"
EDXAPP_UPDATE_STATIC_FILES_KEY: false
# Set this to true if you want to install the private pip
# requirements in the edx-platform repo.
# This will use EDXAPP_GIT_IDENTITY, EDXAPP_USE_GIT_IDENTITY
# must be set to true if EDXAPP_INSTALL_PRIVATE_REQUIREMENTS is
# set to true
EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: false
#-------- Everything below this line is internal to the role ------------
#Use YAML references (& and *) and hash merge <<: to factor out shared settings
......@@ -161,7 +191,7 @@ edxapp_staticfile_dir: "{{ edxapp_data_dir }}/staticfiles"
edxapp_course_data_dir: "{{ edxapp_data_dir }}/data"
edxapp_upload_dir: "{{ edxapp_data_dir }}/uploads"
edxapp_theme_dir: "{{ edxapp_data_dir }}/themes"
edxapp_git_identity: "{{ edxapp_app_dir }}/{{ EDXAPP_LOCAL_GIT_IDENTITY|basename }}"
edxapp_git_identity: "{{ edxapp_app_dir }}/edxapp-git-identity"
edxapp_git_ssh: "/tmp/edxapp_git_ssh.sh"
edxapp_pypi_local_mirror: "http://localhost:{{ devpi_port }}/root/pypi/+simple"
edxapp_workers:
......@@ -322,6 +352,7 @@ generic_env_config: &edxapp_generic_env
FEEDBACK_SUBMISSION_EMAIL: $EDXAPP_FEEDBACK_SUBMISSION_EMAIL
TIME_ZONE: $EDXAPP_TIME_ZONE
MKTG_URL_LINK_MAP: $EDXAPP_MKTG_URL_LINK_MAP
MKTG_URLS: $EDXAPP_MKTG_URLS
# repo root for courses
GITHUB_REPO_ROOT: $edxapp_course_data_dir
CACHES:
......@@ -361,7 +392,9 @@ generic_env_config: &edxapp_generic_env
DEFAULT_FEEDBACK_EMAIL: $EDXAPP_DEFAULT_FEEDBACK_EMAIL
SERVER_EMAIL: $EDXAPP_DEFAULT_SERVER_EMAIL
BULK_EMAIL_DEFAULT_FROM_EMAIL: $EDXAPP_BULK_EMAIL_DEFAULT_FROM_EMAIL
CAS_SERVER_URL: $EDXAPP_CAS_SERVER_URL
CAS_EXTRA_LOGIN_PARAMS: $EDXAPP_CAS_EXTRA_LOGIN_PARAMS
CAS_ATTRIBUTE_CALLBACK: $EDXAPP_CAS_ATTRIBUTE_CALLBACK
lms_auth_config:
<<: *edxapp_generic_auth
......@@ -467,6 +500,7 @@ post_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/post.txt"
base_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/base.txt"
github_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/github.txt"
repo_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/repo.txt"
private_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/edx-private.txt"
sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/base.txt"
sandbox_local_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/local.txt"
......@@ -516,10 +550,3 @@ edxapp_cms_variant: cms
# Worker Settings
worker_django_settings_module: 'aws'
# This array is used by the automator role to provide
# access to a limited set of commands via rbash. The
# commands listed here will be symlinked to ~/bin/ for
# the automator user.
edxapp_automated_rbash_links:
- /usr/bin/sudo
......@@ -5,7 +5,7 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name="edxapp:{{ item }}"
when: edxapp_installed is defined and celery_worker is not defined and not devstack
when: edxapp_installed is defined and celery_worker is not defined and not disable_edx_services
sudo_user: "{{ supervisor_service_user }}"
with_items: service_variants_enabled
......@@ -15,6 +15,6 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: edxapp_installed is defined and celery_worker is defined and not devstack
when: edxapp_installed is defined and celery_worker is defined and not disable_edx_services
with_items: edxapp_workers
sudo_user: "{{ common_web_user }}"
......@@ -6,9 +6,10 @@ dependencies:
rbenv_dir: "{{ edxapp_app_dir }}"
rbenv_ruby_version: "{{ edxapp_ruby_version }}"
- devpi
- role: automated
automated_rbash_links: "{{ edxapp_automated_rbash_links }}"
automated_sudoers_dest: '99-automator-edxapp-server'
automated_sudoers_template: 'roles/edxapp/templates/etc/sudoers.d/99-automator-edxapp-server.j2'
automated_authorized_keys: "{{ EDXAPP_AUTOMATOR_AUTHORIZED_KEYS }}"
when: EDXAPP_INCLUDE_AUTOMATOR_ROLE
- role: user
user_info:
- name: "{{ EDXAPP_AUTOMATOR_NAME }}"
sudo_cmds: "{{ EDXAPP_AUTOMATOR_SUDO_CMDS }}"
type: restricted
authorized_keys: "{{ EDXAPP_AUTOMATOR_AUTHORIZED_KEYS }}"
when: EDXAPP_AUTOMATOR_AUTHORIZED_KEYS|length != 0
......@@ -22,7 +22,7 @@
- name: install read-only ssh key
copy: >
src={{ EDXAPP_LOCAL_GIT_IDENTITY }} dest={{ edxapp_git_identity }}
content="{{ EDXAPP_GIT_IDENTITY }}" dest={{ edxapp_git_identity }}
force=yes owner={{ edxapp_user }} mode=0600
when: EDXAPP_USE_GIT_IDENTITY
......@@ -54,10 +54,6 @@
- "restart edxapp"
- "restart edxapp_workers"
- name: remove read-only ssh key
file: path={{ edxapp_git_identity }} state=absent
when: EDXAPP_USE_GIT_IDENTITY
- name: create checksum for requirements, package.json and Gemfile
shell: >
/usr/bin/md5sum {{ " ".join(edxapp_chksum_req_files) }} 2>/dev/null > /var/tmp/edxapp.req.new
......@@ -186,6 +182,37 @@
- "restart edxapp"
- "restart edxapp_workers"
# Private requriements require a ssh key to install, use the same key as the private key for edx-platform
# If EDXAPP_INSTALL_PRIVATE_REQUIREMENTS is set to true EDXAPP_USE_GIT_IDENTITY must also be true
- name : install python private requirements
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment.
shell: >
{{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }}
chdir={{ edxapp_code_dir }}
with_items:
- "{{ private_requirements_file }}"
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
when: EDXAPP_INSTALL_PRIVATE_REQUIREMENTS
notify:
- "restart edxapp"
- "restart edxapp_workers"
# If using CAS and you have a function for mapping attributes, install
# the module here. The next few tasks set up the python code sandbox
- name: install CAS attribute module
pip: >
name="{{ EDXAPP_CAS_ATTRIBUTE_PACKAGE }}"
virtualenv="{{edxapp_venv_dir}}"
state=present
extra_args="-i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors"
sudo_user: "{{ edxapp_user }}"
when: EDXAPP_CAS_ATTRIBUTE_PACKAGE|length > 0
notify: "restart edxapp"
# Install the sandbox python modules into {{ edxapp_venv_dir }}
- name : install sandbox requirements into regular venv
......@@ -253,7 +280,7 @@
- edxapp-sandbox
- name: compiling all py files in the edx-platform repo
shell: "{{ edxapp_venv_bin }}/python -m compileall {{ edxapp_code_dir }}"
shell: "{{ edxapp_venv_bin }}/python -m compileall -x .git/.* {{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
......@@ -300,7 +327,7 @@
register: supervisor_update
sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != ""
when: not devstack
when: not disable_edx_services
- name: ensure edxapp has started
supervisorctl_local: >
......@@ -309,7 +336,7 @@
config={{ supervisor_cfg }}
name="edxapp:{{ item }}"
sudo_user: "{{ supervisor_service_user }}"
when: celery_worker is not defined and not devstack
when: celery_worker is not defined and not disable_edx_services
with_items: service_variants_enabled
- name: ensure edxapp_workers has started
......@@ -318,18 +345,30 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: celery_worker is defined and not devstack
when: celery_worker is defined and not disable_edx_services
with_items: edxapp_workers
sudo_user: "{{ supervisor_service_user }}"
- name: create a symlink for venv python
- name: create symlinks from the venv bin dir
file: >
src="{{ edxapp_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.edxapp
dest={{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.edxapp
state=link
with_items:
- python
- pip
- django-admin.py
- name: create symlinks from the repo dir
file: >
src="{{ edxapp_code_dir }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.edxapp
state=link
with_items:
- manage.py
- name: remove read-only ssh key
file: path={{ edxapp_git_identity }} state=absent
when: EDXAPP_USE_GIT_IDENTITY
- set_fact: edxapp_installed=true
......@@ -2,14 +2,6 @@
# - group_vars/all
# - common/tasks/main.yml
---
- name: Install logrotate configuration for tracking file
template: dest=/etc/logrotate.d/tracking.log src=edx_logrotate_tracking_log.j2 owner=root group=root mode=644
notify:
- "restart edxapp"
- "restart edxapp_workers"
- name: create application user
user: >
name="{{ edxapp_user }}" home="{{ edxapp_app_dir }}"
......
......@@ -3,6 +3,7 @@
src={{ item }}.env.json.j2
dest={{ edxapp_app_dir }}/{{ item }}.env.json
sudo_user: "{{ edxapp_user }}"
tags: edxapp_cfg
with_items: service_variants_enabled
notify:
- "restart edxapp"
......@@ -13,6 +14,7 @@
src={{ item }}.auth.json.j2
dest={{ edxapp_app_dir }}/{{ item }}.auth.json
sudo_user: "{{ edxapp_user }}"
tags: edxapp_cfg
notify:
- "restart edxapp"
- "restart edxapp_workers"
......@@ -22,26 +24,55 @@
- name: "writing {{ item }} supervisor script"
template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
src={{ item }}.conf.j2 dest={{ supervisor_available_dir }}/{{ item }}.conf
owner={{ supervisor_user }}
group={{ supervisor_user }}
with_items: service_variants_enabled
when: celery_worker is not defined and not devstack
sudo_user: "{{ supervisor_user }}"
- name: writing edxapp supervisor script
template: >
src=edxapp.conf.j2 dest={{ supervisor_cfg_dir }}/edxapp.conf
src=edxapp.conf.j2 dest={{ supervisor_available_dir }}/edxapp.conf
owner={{ supervisor_user }}
when: celery_worker is not defined and not devstack
group={{ supervisor_user }}
sudo_user: "{{ supervisor_user }}"
# write the supervisor script for celery workers
- name: writing celery worker supervisor script
template: >
src=workers.conf.j2 dest={{ supervisor_cfg_dir }}/workers.conf
src=workers.conf.j2 dest={{ supervisor_available_dir }}/workers.conf
owner={{ supervisor_user }}
when: celery_worker is defined and not devstack
group={{ supervisor_user }}
sudo_user: "{{ supervisor_user }}"
# Enable the supervisor jobs
- name: "enable {{ item }} supervisor script"
file: >
src={{ supervisor_available_dir }}/{{ item }}.conf
dest={{ supervisor_cfg_dir }}/{{ item }}.conf
state=link
force=yes
with_items: service_variants_enabled
when: celery_worker is not defined and not disable_edx_services
sudo_user: "{{ supervisor_user }}"
- name: "enable edxapp supervisor script"
file: >
src={{ supervisor_available_dir }}/edxapp.conf
dest={{ supervisor_cfg_dir }}/edxapp.conf
state=link
force=yes
when: celery_worker is not defined and not disable_edx_services
sudo_user: "{{ supervisor_user }}"
- name: "enable celery worker supervisor script"
file: >
src={{ supervisor_available_dir }}/workers.conf
dest={{ supervisor_cfg_dir }}/workers.conf
state=link
force=yes
when: celery_worker is defined and not disable_edx_services
sudo_user: "{{ supervisor_user }}"
# Fake syncdb with migrate, only when fake_migrations is defined
......@@ -66,7 +97,8 @@
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes"
environment:
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD }}"
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
......@@ -95,6 +127,9 @@
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_only is defined and migrate_only|lower == "yes"
environment:
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
......
{% do cms_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do cms_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{{ cms_env_config | to_nice_json }}
automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py migrate *
automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py seed_permissions_roles *
automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py set_staff *
automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py transfer_students *
{% do lms_preview_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do lms_preview_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{{ lms_preview_env_config | to_nice_json }}
{% do lms_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %}
{%- do lms_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %}
{% endif %}
{{ lms_env_config | to_nice_json }}
......@@ -13,6 +13,12 @@
- name: install packages needed for single server
apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present
- name: setup the migration db user
mysql_user: >
name={{ EDXAPP_MYSQL_USER_MIGRATE }}
password={{ EDXAPP_MYSQL_PASSWORD_MIGRATE}}
priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL'
- name: setup the edxapp db user
mysql_user: >
name={{ EDXAPP_MYSQL_USER }}
......@@ -31,14 +37,14 @@
name={{ XQUEUE_MYSQL_USER }}
password={{ XQUEUE_MYSQL_PASSWORD }}
priv='{{XQUEUE_MYSQL_DB_NAME}}.*:ALL'
when: XQUEUE_MYSQL_USER is defined and not devstack
when: XQUEUE_MYSQL_USER is defined and not disable_edx_services
- name: create a database for xqueue
mysql_db: >
db=xqueue
state=present
encoding=utf8
when: XQUEUE_MYSQL_USER is defined and not devstack
when: XQUEUE_MYSQL_USER is defined and not disable_edx_services
- name: setup the ora db user
mysql_user: >
......@@ -58,7 +64,7 @@
name={{ DISCERN_MYSQL_USER }}
password={{ DISCERN_MYSQL_PASSWORD }}
priv='{{DISCERN_MYSQL_DB_NAME}}.*:ALL'
when: DISCERN_MYSQL_USER is defined and not devstack
when: DISCERN_MYSQL_USER is defined and not disable_edx_services
- name: create a database for discern
......@@ -66,7 +72,7 @@
db=discern
state=present
encoding=utf8
when: DISCERN_MYSQL_USER is defined and not devstack
when: DISCERN_MYSQL_USER is defined and not disable_edx_services
- name: install memcached
......
......@@ -12,5 +12,5 @@ elasticsearch_group: "elasticsearch"
#
# Defaults for a single server installation.
ELASTICSEARCH_CLUSTERED: true
ELASTICSEARCH_HEAP_SIZE: "512m"
\ No newline at end of file
ELASTICSEARCH_CLUSTERED: false
ELASTICSEARCH_HEAP_SIZE: "512m"
......@@ -83,4 +83,8 @@
when: bigdesk.stat.isdir is not defined
- name: Ensure elasticsearch is enabled and started
service: name=elasticsearch state=restarted enabled=yes
\ No newline at end of file
service: name=elasticsearch state=started enabled=yes
- name: Restart elastic when there has been an upgrade
service: name=elasticsearch state=restarted enabled=yes
when: elasticsearch_reinstall.changed
......@@ -18,7 +18,7 @@ FORUM_MONGO_HOSTS:
FORUM_MONGO_TAGS: !!null
FORUM_MONGO_PORT: "27017"
FORUM_MONGO_DATABASE: "cs_comments_service"
FORUM_MONGO_URL: "mongodb://{{ FORUM_MONGO_USER }}:{{ FORUM_MONGO_PASSWORD }}@{%- for host in FORUM_MONGO_HOSTS -%}{{host}}:{{ FORUM_MONGO_PORT }}{%- if not loop.last -%},{%- endif -%}{%- endfor -%}/{{ FORUM_MONGO_DATABASE }}{%- if FORUM_MONGO_TAGS -%}?{{ FORUM_MONGO_TAGS }}{%- endif -%}"
FORUM_MONGO_URL: "mongodb://{{ FORUM_MONGO_USER }}:{{ FORUM_MONGO_PASSWORD }}@{%- for host in FORUM_MONGO_HOSTS -%}{{host}}:{{ FORUM_MONGO_PORT }}{%- if not loop.last -%},{%- endif -%}{%- endfor -%}/{{ FORUM_MONGO_DATABASE }}{%- if FORUM_MONGO_TAGS -%}?tags={{ FORUM_MONGO_TAGS }}{%- endif -%}"
FORUM_SINATRA_ENV: "development"
FORUM_RACK_ENV: "development"
FORUM_NGINX_PORT: "18080"
......@@ -29,6 +29,9 @@ FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTIC
FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key"
FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app"
FORUM_WORKER_PROCESSES: "4"
FORUM_LISTEN_HOST: "0.0.0.0"
FORUM_LISTEN_PORT: "4567"
FORUM_USE_TCP: false
forum_environment:
RBENV_ROOT: "{{ forum_rbenv_root }}"
......@@ -45,20 +48,13 @@ forum_environment:
NEW_RELIC_LICENSE_KEY: "{{ FORUM_NEW_RELIC_LICENSE_KEY }}"
WORKER_PROCESSES: "{{ FORUM_WORKER_PROCESSES }}"
DATA_DIR: "{{ forum_data_dir }}"
FORUM_LISTEN_HOST: "{{ FORUM_LISTEN_HOST }}"
FORUM_LISTEN_PORT: "{{ FORUM_LISTEN_PORT }}"
forum_user: "forum"
forum_ruby_version: "1.9.3-p448"
forum_source_repo: "https://github.com/edx/cs_comments_service.git"
# Currently we are installing a branch of the comments service
# that configures unicorn to listen on a unix socket and get the
# worker count configuration from the environment. We are not
# merging to master of the comments service yet as this will have
# some incompatibilities with our Heroku deployments.
#
# https://github.com/edx/cs_comments_service/pull/83
#
forum_version: "e0d/unicorn-config"
forum_unicorn_port: "4567"
forum_version: "master"
#
# test config
......
......@@ -5,4 +5,4 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: forum_installed is defined and not devstack
when: forum_installed is defined and not disable_edx_services
......@@ -2,11 +2,23 @@
- name: create the supervisor config
template: >
src=forum.conf.j2 dest={{ supervisor_cfg_dir }}/forum.conf
src=forum.conf.j2 dest={{ supervisor_available_dir }}/forum.conf
owner={{ supervisor_user }}
group={{ supervisor_user }}
mode=0644
sudo_user: "{{ supervisor_user }}"
when: not devstack
register: forum_supervisor
- name: enable the supervisor config
file: >
src={{ supervisor_available_dir }}/forum.conf
dest={{ supervisor_cfg_dir }}/forum.conf
owner={{ supervisor_user }}
state=link
force=yes
mode=0644
sudo_user: "{{ supervisor_user }}"
when: not disable_edx_services
register: forum_supervisor
- name: create the supervisor wrapper
......@@ -15,7 +27,6 @@
dest={{ forum_supervisor_wrapper }}
mode=0755
sudo_user: "{{ forum_user }}"
when: not devstack
notify: restart the forum service
- name: git checkout forum repo into {{ forum_code_dir }}
......@@ -41,7 +52,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
when: not devstack
when: not disable_edx_services
- name: ensure forum is started
supervisorctl_local: >
......@@ -49,7 +60,7 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: not devstack
when: not disable_edx_services
- include: test.yml tags=deploy
......
......@@ -3,9 +3,9 @@
- name: test that the required service are listening
wait_for: port={{ item.port }} host={{ item.host }} timeout=30
with_items: forum_services
when: not devstack
when: not disable_edx_services
- name: test that mongo replica set members are listing
wait_for: port={{ FORUM_MONGO_PORT }} host={{ item }} timeout=30
with_items: FORUM_MONGO_HOSTS
when: not devstack
\ No newline at end of file
when: not disable_edx_services
......@@ -5,6 +5,8 @@ cd {{ forum_code_dir }}
{% if devstack %}
{{ forum_rbenv_shims }}/ruby app.rb
{% elif FORUM_USE_TCP %}
{{ forum_gem_bin }}/unicorn -c config/unicorn_tcp.rb
{% else %}
{{ forum_gem_bin }}/unicorn -c config/unicorn.rb
{% endif %}
......@@ -6,3 +6,4 @@ stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
stopsignal=QUIT
......@@ -3,7 +3,6 @@
[gerrit]
basePath = {{ gerrit_data_dir }}
canonicalWebUrl = http://{{ gerrit_hostname }}/
changeScreen = CHANGE_SCREEN2
[database]
type = MYSQL
hostname = {{ gerrit_db_hostname }}
......
# override this var to add a prefix to the prompt
# also need to set commont_update_bashrc for to
# update the system bashrc default
GH_USERS_PROMPT: ""
gh_users: []
---
# gh_users
#
# Creates OS accounts for users based on their github credential.
# Takes a list gh_users as a parameter which is a list of users
#
# roles:
# - role: gh_users
# gh_users:
# - joe
# - mark
- name: creating default .bashrc
template: >
src=default.bashrc.j2 dest=/etc/skel/.bashrc
mode=0644 owner=root group=root
- name: create gh group
group: name=gh state=present
# TODO: give limited sudo access to this group
- name: grant full sudo access to gh group
copy: >
content="%gh ALL=(ALL) NOPASSWD:ALL"
dest=/etc/sudoers.d/gh owner=root group=root
mode=0440 validate='visudo -cf %s'
- name: create github users
user:
name={{ item }} groups=gh
shell=/bin/bash
with_items: gh_users
- name: create .ssh directory
file:
path=/home/{{ item }}/.ssh state=directory mode=0700
owner={{ item }}
with_items: gh_users
- name: copy github key[s] to .ssh/authorized_keys
get_url:
url=https://github.com/{{ item }}.keys
dest=/home/{{ item }}/.ssh/authorized_keys mode=0600
owner={{ item }}
with_items: gh_users
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role jenkins_admin
#
#
# vars are namespace with the module name.
#
JENKINS_ADMIN_NAME: 'default_jenkins_name'
# jenkins_admin also requires other variables that are not defined by default.
# JENKINS_ADMIN_S3_PROFILE: !!null
# JENKINS_ADMIN_CONFIGURATION_REPO: !!null
# JENKINS_ADMIN_CONFIGURATION_SECURE_REPO: !!null
#
# # git key to use to checkout secure repos on jenkins and in abbey
# JENKINS_ADMIN_GIT_KEY: !!null
#
# # EC2 Key to use when bringing up the abbey instance in ec2 (aws key-pair)
# JENKINS_ADMIN_EC2_KEY: !!null
jenkins_admin_role_name: jenkins_admin
#
# OS packages
#
jenkins_admin_debian_pkgs:
# These are copied from the edxapp
# role so that we can create virtualenvs
# on the jenkins server for edxapp
- npm
# for compiling the virtualenv
# (only needed if wheel files aren't available)
- build-essential
- s3cmd
- pkg-config
- graphviz-dev
- graphviz
- libmysqlclient-dev
# for scipy, do not install
# libopenblas-base, it will cause
# problems for numpy
- gfortran
- libatlas3gf-base
- liblapack-dev
- g++
- libxml2-dev
- libxslt1-dev
# apparmor
- apparmor-utils
# misc
- curl
- ipython
- npm
- ntp
# for shapely
- libgeos-dev
# i18n
- gettext
# Pillow (PIL Fork) Dependencies
# Needed by the CMS to manipulate images.
- libjpeg8-dev
- libpng12-dev
# for status.edx.org
- ruby
- ruby1.9.1
jenkins_admin_gem_pkgs:
# for generating status.edx.org
- { name: sass, version: "3.2.4" }
jenkins_admin_redhat_pkgs: []
jenkins_admin_plugins:
- { name: "greenballs", version: "1.14" }
- { name: "rebuild", version: "1.21" }
- { name: "build-user-vars-plugin", version: "1.1" }
- { name: "build-token-root", version: "1.1" }
jenkins_admin_jobs:
- 'backup-jenkins'
- 'build-ami'
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role jenkins_admin
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- common
- jenkins_master
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role jenkins_admin
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- fail: "JENKINS_ADMIN_S3_PROFILE is not defined."
when: JENKINS_ADMIN_S3_PROFILE is not defined
- fail: "JENKINS_ADMIN_S3_PROFILE.name is not defined."
when: JENKINS_ADMIN_S3_PROFILE.name is not defined
- fail: "JENKINS_ADMIN_S3_PROFILE.access_key is not defined."
when: JENKINS_ADMIN_S3_PROFILE.access_key is not defined
- fail: "JENKINS_ADMIN_S3_PROFILE.secret_key is not defined."
when: JENKINS_ADMIN_S3_PROFILE.secret_key is not defined
- fail: "JENKINS_ADMIN_CONFIGURATION_REPO is not defined."
when: JENKINS_ADMIN_CONFIGURATION_REPO is not defined
- fail: "JENKINS_ADMIN_CONFIGURATION_SECURE_REPO is not defined."
when: JENKINS_ADMIN_CONFIGURATION_SECURE_REPO is not defined
- fail: "JENKINS_ADMIN_GIT_KEY is not defined."
when: JENKINS_ADMIN_GIT_KEY is not defined
- fail: "JENKINS_ADMIN_EC2_KEY is not defined."
when: JENKINS_ADMIN_EC2_KEY is not defined
# We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment
# the version, the plugin will be updated in Jenkins
- name: download Jenkins plugins
get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi
dest=/tmp/{{ item.name }}_{{ item.version }}
with_items: jenkins_admin_plugins
- name: install Jenkins plugins
command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi
with_items: jenkins_admin_plugins
- name: set Jenkins plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: jenkins_admin_plugins
notify:
- restart Jenkins
- name: configure s3 plugin
template: >
src="./{{jenkins_home}}/hudson.plugins.s3.S3BucketPublisher.xml.j2"
dest="{{jenkins_home}}/hudson.plugins.s3.S3BucketPublisher.xml"
owner={{jenkins_user}}
group={{jenkins_group}}
mode=0644
- name: create the ssh directory
file: >
path={{jenkins_home}}/.ssh
owner={{jenkins_user}}
group={{jenkins_group}}
mode=0700
state=directory
# Need to add Github to known_hosts to avoid
# being prompted when using git through ssh
- name: Add github.com to known_hosts if it does not exist
shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
- name: drop the secure credentials
copy: >
content="{{ JENKINS_ADMIN_GIT_KEY }}"
dest={{jenkins_home}}/.ssh/id_rsa
owner={{jenkins_user}}
group={{jenkins_group}}
mode=0600
- name: create job directory
file: >
path="{{jenkins_home}}/jobs"
owner="{{jenkins_user}}"
group="{{jenkins_group}}"
mode=0755
state=directory
- name: create admin job directories
file: >
path="{{jenkins_home}}/jobs/{{item}}"
owner={{jenkins_user}}
group={{jenkins_group}}
mode=0755
state=directory
with_items: jenkins_admin_jobs
- name: create admin job config files
template: >
src="./{{jenkins_home}}/jobs/{{item}}/config.xml.j2"
dest="{{jenkins_home}}/jobs/{{item}}/config.xml"
owner={{jenkins_user}}
group={{jenkins_group}}
mode=0644
with_items: jenkins_admin_jobs
- name: install system packages for edxapp virtualenvs
apt: pkg={{ item }} state=present
with_items: jenkins_admin_debian_pkgs
# This is necessary so that ansible can run with
# sudo set to True (as the jenkins user) on jenkins
- name: grant sudo access to the jenkins user
copy: >
content="{{ jenkins_user }} ALL=({{ jenkins_user }}) NOPASSWD:ALL"
dest=/etc/sudoers.d/99-jenkins owner=root group=root
mode=0440 validate='visudo -cf %s'
- name: install global gem dependencies
gem: name={{ item.name }} state=present version={{ item.version }}
with_items: jenkins_admin_gem_pkgs
<?xml version='1.0' encoding='UTF-8'?>
<hudson.plugins.s3.S3BucketPublisher_-DescriptorImpl plugin="s3@0.5">
<profiles>
<hudson.plugins.s3.S3Profile>
<name>{{JENKINS_ADMIN_S3_PROFILE.name}}</name>
<accessKey>{{JENKINS_ADMIN_S3_PROFILE.access_key}}</accessKey>
<secretKey>{{JENKINS_ADMIN_S3_PROFILE.secret_key}}</secretKey>
</hudson.plugins.s3.S3Profile>
</profiles>
</hudson.plugins.s3.S3BucketPublisher_-DescriptorImpl>
<?xml version='1.0' encoding='UTF-8'?>
<project>
<actions/>
<description></description>
<keepDependencies>false</keepDependencies>
<properties/>
<scm class="hudson.scm.NullSCM"/>
<canRoam>true</canRoam>
<disabled>false</disabled>
<blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
<triggers>
<hudson.triggers.TimerTrigger>
<spec>@daily</spec>
</hudson.triggers.TimerTrigger>
</triggers>
<concurrentBuild>false</concurrentBuild>
<builders>
<hudson.tasks.Shell>
<command>
#!/bin/bash -x
# Delete all files in the workspace
rm -rf *
# Create a directory for the job definitions
mkdir -p $BUILD_ID/jobs
# Copy global configuration files into the workspace
cp $JENKINS_HOME/*.xml $BUILD_ID/
# Copy keys and secrets into the workspace
cp $JENKINS_HOME/identity.key $BUILD_ID/
cp $JENKINS_HOME/secret.key $BUILD_ID/
cp $JENKINS_HOME/secret.key.not-so-secret $BUILD_ID/
cp -r $JENKINS_HOME/secrets $BUILD_ID/
# Copy user configuration files into the workspace
#cp -r $JENKINS_HOME/users $BUILD_ID/
# Copy job definitions into the workspace
rsync -am --include=&apos;config.xml&apos; --include=&apos;*/&apos; --prune-empty-dirs --exclude=&apos;*&apos; $JENKINS_HOME/jobs/ $BUILD_ID/jobs/
# Create an archive from all copied files (since the S3 plugin cannot copy folders recursively)
tar czf $BUILD_ID.tar.gz $BUILD_ID/
# Remove the directory so only the archive gets copied to S3
rm -rf $BUILD_ID
</command>
</hudson.tasks.Shell>
</builders>
<publishers>
<hudson.plugins.s3.S3BucketPublisher plugin="s3@0.5">
<profileName>{{JENKINS_ADMIN_S3_PROFILE.name}}</profileName>
<entries>
<hudson.plugins.s3.Entry>
<bucket>edx-jenkins-backups/{{JENKINS_ADMIN_NAME}}</bucket>
<sourceFile>${BUILD_ID}.tar.gz</sourceFile>
<storageClass>STANDARD</storageClass>
<selectedRegion>US_EAST_1</selectedRegion>
</hudson.plugins.s3.Entry>
</entries>
<userMetadata/>
</hudson.plugins.s3.S3BucketPublisher>
</publishers>
<buildWrappers/>
</project>
<?xml version='1.0' encoding='UTF-8'?>
<project>
<actions/>
<description></description>
<keepDependencies>false</keepDependencies>
<properties>
<hudson.model.ParametersDefinitionProperty>
<parameterDefinitions>
<hudson.model.StringParameterDefinition>
<name>play</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>deployment</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>environment</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.TextParameterDefinition>
<name>refs</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.TextParameterDefinition>
<hudson.model.TextParameterDefinition>
<name>vars</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.TextParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>configuration</name>
<description>The GITREF of configuration to use.</description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>configuration_secure</name>
<description>The GITREF of configuration-secure repository to use.</description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>base_ami</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
</parameterDefinitions>
</hudson.model.ParametersDefinitionProperty>
<com.sonyericsson.rebuild.RebuildSettings plugin="rebuild@1.20">
<autoRebuild>false</autoRebuild>
</com.sonyericsson.rebuild.RebuildSettings>
</properties>
<scm class="org.jenkinsci.plugins.multiplescms.MultiSCM" plugin="multiple-scms@0.2">
<scms>
<hudson.plugins.git.GitSCM plugin="git@1.5.0">
<configVersion>2</configVersion>
<userRemoteConfigs>
<hudson.plugins.git.UserRemoteConfig>
<name></name>
<refspec></refspec>
<url>{{JENKINS_ADMIN_CONFIGURATION_REPO}}</url>
</hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs>
<branches>
<hudson.plugins.git.BranchSpec>
<name>*/master</name>
</hudson.plugins.git.BranchSpec>
</branches>
<disableSubmodules>false</disableSubmodules>
<recursiveSubmodules>false</recursiveSubmodules>
<doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
<authorOrCommitter>false</authorOrCommitter>
<clean>false</clean>
<wipeOutWorkspace>false</wipeOutWorkspace>
<pruneBranches>false</pruneBranches>
<remotePoll>false</remotePoll>
<ignoreNotifyCommit>false</ignoreNotifyCommit>
<useShallowClone>false</useShallowClone>
<abortIfNoNewRevs>false</abortIfNoNewRevs>
<cutoffHours></cutoffHours>
<buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
<gitTool>Default</gitTool>
<submoduleCfg class="list"/>
<relativeTargetDir>configuration</relativeTargetDir>
<reference></reference>
<excludedRegions></excludedRegions>
<excludedUsers></excludedUsers>
<gitConfigName></gitConfigName>
<gitConfigEmail></gitConfigEmail>
<skipTag>true</skipTag>
<includedRegions></includedRegions>
<scmName>configuration</scmName>
</hudson.plugins.git.GitSCM>
<hudson.plugins.git.GitSCM plugin="git@1.5.0">
<configVersion>2</configVersion>
<userRemoteConfigs>
<hudson.plugins.git.UserRemoteConfig>
<name></name>
<refspec></refspec>
<url>{{JENKINS_ADMIN_CONFIGURATION_SECURE_REPO}}</url>
</hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs>
<branches>
<hudson.plugins.git.BranchSpec>
<name>*/master</name>
</hudson.plugins.git.BranchSpec>
</branches>
<disableSubmodules>false</disableSubmodules>
<recursiveSubmodules>false</recursiveSubmodules>
<doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
<authorOrCommitter>false</authorOrCommitter>
<clean>false</clean>
<wipeOutWorkspace>false</wipeOutWorkspace>
<pruneBranches>false</pruneBranches>
<remotePoll>false</remotePoll>
<ignoreNotifyCommit>false</ignoreNotifyCommit>
<useShallowClone>false</useShallowClone>
<abortIfNoNewRevs>false</abortIfNoNewRevs>
<cutoffHours></cutoffHours>
<buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
<gitTool>Default</gitTool>
<submoduleCfg class="list"/>
<relativeTargetDir>configuration-secure</relativeTargetDir>
<reference></reference>
<excludedRegions></excludedRegions>
<excludedUsers></excludedUsers>
<gitConfigName></gitConfigName>
<gitConfigEmail></gitConfigEmail>
<skipTag>true</skipTag>
<includedRegions></includedRegions>
<scmName>configuration-secure</scmName>
</hudson.plugins.git.GitSCM>
</scms>
</scm>
<canRoam>true</canRoam>
<disabled>false</disabled>
<blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
<authToken>MULTIPASS</authToken>
<triggers/>
<concurrentBuild>true</concurrentBuild>
<builders>
<jenkins.plugins.shiningpanda.builders.VirtualenvBuilder plugin="shiningpanda@0.20">
<pythonName>System-CPython-2.7</pythonName>
<home></home>
<clear>false</clear>
<useDistribute>true</useDistribute>
<systemSitePackages>false</systemSitePackages>
<nature>shell</nature>
<command>
#!/bin/bash -x
if [[ "\$play" == "" ]]; then
echo "No Play Specified. Nothing to Do."
exit 0
fi
export PYTHONUNBUFFERED=1
export PIP_DOWNLOAD_CACHE=\$WORKSPACE/pip-cache
cd configuration
pip install -r requirements.txt
cd util/vpc-tools/
echo "\$refs" > /var/tmp/$BUILD_ID-refs.yml
cat /var/tmp/$BUILD_ID-refs.yml
echo "\$vars" > /var/tmp/$BUILD_ID-extra-vars.yml
cat /var/tmp/$BUILD_ID-extra-vars.yml
python -u abbey.py -p \$play -t c1.medium -d \$deployment -e \$environment -i /edx/var/jenkins/.ssh/id_rsa -b \$base_ami --vars /var/tmp/\$BUILD_ID-extra-vars.yml --refs /var/tmp/\$BUILD_ID-refs.yml -c \$BUILD_NUMBER --configuration-version \$configuration --configuration-secure-version \$configuration_secure -k deployment --configuration-secure-repo "git@github.com:edx-ops/prod-secure.git"
</command>
<ignoreExitCode>false</ignoreExitCode>
</jenkins.plugins.shiningpanda.builders.VirtualenvBuilder>
<hudson.tasks.Shell>
<command>#!/bin/bash -x
if [[(&quot;\$play&quot; == &quot;&quot;)]]; then
echo &quot;No Play Specified. Nothing to Do.&quot;
exit 0
fi
rm /var/tmp/\$BUILD_ID-extra-vars.yml
rm /var/tmp/\$BUILD_ID-refs.yml</command>
</hudson.tasks.Shell>
</builders>
<publishers/>
</project>
......@@ -51,6 +51,11 @@
notify:
- restart Jenkins
# Using this instead of the user module because the user module
# fails if the directory exists.
- name: set home directory for jenkins user
shell: usermod -d {{jenkins_home}} {{jenkins_user}}
- name: make plugins directory
sudo_user: jenkins
shell: mkdir -p {{ jenkins_home }}/plugins
......
......@@ -24,7 +24,7 @@ jenkins_rbenv_root: "{{ jenkins_home }}/.rbenv"
jenkins_ruby_version: "1.9.3-p374"
# JSCover direct download URL
jscover_url: "http://superb-dca2.dl.sourceforge.net/project/jscover/JSCover-1.0.2.zip"
jscover_url: "http://files.edx.org/testeng/JSCover-1.0.2.zip"
jscover_version: "1.0.2"
# Python
......
......@@ -10,8 +10,8 @@
- fail: msg="ora_user not defined. eg. ora, www-data"
when: ora_user is not defined
- fail: msg="COMMON_ENV_TYPE not defined. eg. stage, prod"
when: COMMON_ENV_TYPE is not defined
- fail: msg="COMMON_ENVIRONMENT not defined. eg. stage, prod"
when: COMMON_ENVIRONMENT is not defined
- fail: msg="secure_dir not defined. This is a path to the secure ora config file."
when: secure_dir is not defined
......
......@@ -8,23 +8,24 @@
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Vars for role automated
# Defaults for role newrelic
#
#
# vars are namespace with the module name.
#
automated_role_name: automated
automated_user: "automator"
automated_home: "/home/automator"
automated_rbash_links: !!null
automated_sudoers_template: !!null
automated_sudoers_file: !!null
#
newrelic_role_name: newrelic
NEWRELIC_REPO: 'deb http://apt.newrelic.com/debian/ newrelic non-free'
NEWRELIC_KEY_ID: '548C16BF'
NEWRELIC_KEY_URL: 'https://download.newrelic.com/{{ NEWRELIC_KEY_ID }}.gpg'
NEWRELIC_LICENSE_KEY: 'NEW-RELIC-KEY'
#
# OS packages
#
automated_debian_pkgs: []
newrelic_debian_pkgs:
- newrelic-sysmond
automated_redhat_pkgs: []
newrelic_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role newrelic
#
# Overview:
#
# Installs and configures the newrelic system monitoring agent. The server
# will be identified in Newrelic by hostname and this cannot be changed
# in configuration. Also configuratin does not allow hosts to be
# associated with an application or tagged.
#
# Example play:
#
# - name: Install Newrelic system agent
# hosts: all
# sudo: True
# gather_facts: True
# roles:
# - newrelic
- name: add apt key
apt_key: >
id="{{ NEWRELIC_KEY_ID }}" url="{{ NEWRELIC_KEY_URL }}"
state=present
- name: install apt repository
apt_repository: repo="{{ NEWRELIC_REPO }}" update_cache=yes
- name: install newrelic agent
apt: pkg="newrelic-sysmond"
- name: Install newrelic related system packages.
apt: pkg={{ item }} install_recommends=yes state=present
with_items: newrelic_debian_pkgs
- name: configure the agent with the license key
shell: >
nrsysmond-config --set license_key="{{ NEWRELIC_LICENSE_KEY }}"
ssl=true
- name: ensure started and enabled
service: name=newrelic-sysmond state=restarted enabled=yes
\ No newline at end of file
......@@ -75,20 +75,46 @@
path={{ nginx_log_dir}} state=directory
owner={{ common_web_user }} group={{ common_web_user }}
# Check to see if the ssl cert/key exists before copying.
# This extra check is done to prevent failures when
# ansible-playbook is run locally
- local_action:
module: stat
path: "{{ NGINX_SSL_CERTIFICATE }}"
sudo: False
register: ssl_cert
- local_action:
module: stat
path: "{{ NGINX_SSL_KEY }}"
sudo: False
register: ssl_key
- name: copy ssl cert
copy: >
src={{ NGINX_SSL_CERTIFICATE }}
dest=/etc/ssl/certs/{{ item|basename }}
dest=/etc/ssl/certs/
owner=root group=root mode=0644
when: NGINX_ENABLE_SSL and NGINX_SSL_CERTIFICATE != 'ssl-cert-snakeoil.pem'
when: ssl_cert.stat.exists and NGINX_ENABLE_SSL and NGINX_SSL_CERTIFICATE != 'ssl-cert-snakeoil.pem'
- name: copy ssl key
copy: >
src={{ NGINX_SSL_KEY }}
dest=/etc/ssl/private/{{ item|basename }}
dest=/etc/ssl/private/
owner=root group=root mode=0640
when: NGINX_ENABLE_SSL and NGINX_SSL_KEY != 'ssl-cert-snakeoil.key'
when: ssl_key.stat.exists and NGINX_ENABLE_SSL and NGINX_SSL_KEY != 'ssl-cert-snakeoil.key'
# removing default link
- name: Removing default nginx config and restart (enabled)
file: path={{ nginx_sites_enabled_dir }}/default state=absent
notify: reload nginx
# Note that nginx logs to /var/log until it reads its configuration, so /etc/logrotate.d/nginx is still good
- name: Set up nginx access log rotation
template: >
dest=/etc/logrotate.d/nginx-access src=edx_logrotate_nginx_access.j2
owner=root group=root mode=644
# removing default link
- name: Removing default nginx config and restart (enabled)
......
......@@ -20,6 +20,8 @@ server {
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
# request the browser to use SSL for all connections
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
{% else %}
listen {{EDXAPP_CMS_NGINX_PORT}} {{default_site}};
......
......@@ -49,4 +49,4 @@ location @proxy_to_app {
proxy_redirect off;
proxy_pass http://forum_app_server;
}
}
\ No newline at end of file
}
......@@ -15,6 +15,8 @@ server {
# CMS requires larger value for course assest, values provided
# via hiera.
client_max_body_size 4M;
# request the browser to use SSL for all connections
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last;
......
......@@ -20,6 +20,8 @@ server {
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
# request the browser to use SSL for all connections
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
{% else %}
listen {{EDXAPP_LMS_NGINX_PORT}} {{default_site}};
......
......@@ -6,6 +6,7 @@
state=restarted
config={{ supervisor_cfg }}
supervisorctl_path={{ supervisor_ctl }}
when: not disable_edx_services
- name: restart notifier-celery-workers
supervisorctl_local: >
......@@ -13,3 +14,4 @@
state=restarted
config={{ supervisor_cfg }}
supervisorctl_path={{ supervisor_ctl }}
when: not disable_edx_services
......@@ -85,18 +85,36 @@
file:
path="{{ NOTIFIER_HOME }}/bin" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }}
- name: supervisord config for celery workers
- name: write supervisord config for celery workers
template: >
src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2
dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf"
dest="{{ supervisor_available_dir }}/notifier-celery-workers.conf"
sudo_user: "{{ supervisor_user }}"
notify: restart notifier-celery-workers
- name: supervisord config for scheduler
- name: write supervisord config for scheduler
template: >
src=edx/app/supervisor/conf.d/notifier-scheduler.conf.j2
dest="{{ supervisor_available_dir }}/notifier-scheduler.conf"
sudo_user: "{{ supervisor_user }}"
- name: enable supervisord config for celery workers
file: >
src="{{ supervisor_available_dir }}/notifier-celery-workers.conf"
dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf"
state=link
force=yes
sudo_user: "{{ supervisor_user }}"
notify: restart notifier-celery-workers
when: not disable_edx_services
- name: enable supervisord config for scheduler
file: >
src="{{ supervisor_available_dir }}/notifier-scheduler.conf"
dest="{{ supervisor_cfg_dir }}/notifier-scheduler.conf"
state=link
force=yes
sudo_user: "{{ supervisor_user }}"
notify: restart notifier-scheduler
when: not disable_edx_services
- include: deploy.yml tags=deploy
......@@ -5,7 +5,7 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: ora_installed is defined and not devstack
when: ora_installed is defined and not disable_edx_services
- name: restart ora_celery
supervisorctl_local: >
......@@ -13,4 +13,4 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: ora_installed is defined and not devstack
when: ora_installed is defined and not disable_edx_services
- name: create supervisor scripts - ora, ora_celery
template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
src={{ item }}.conf.j2 dest={{ supervisor_available_dir }}/{{ item }}.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
with_items: ['ora', 'ora_celery']
- name: enable supervisor scripts - ora, ora_celery
file: >
src={{ supervisor_available_dir }}/{{ item }}.conf
dest={{ supervisor_cfg_dir }}/{{ item }}.conf
state=link
force=yes
owner={{ supervisor_user }}
group={{ common_web_user }}
mode=0644
notify:
- restart ora
- restart ora_celery
with_items: ['ora', 'ora_celery']
when: not devstack
when: not disable_edx_services
- include: ease.yml
......@@ -86,7 +97,7 @@
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
when: not devstack
when: not disable_edx_services
changed_when: supervisor_update.stdout != ""
- name: ensure ora is started
......@@ -95,7 +106,7 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: not devstack
when: not disable_edx_services
- name: ensure ora_celery is started
supervisorctl_local: >
......@@ -103,7 +114,7 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: not devstack
when: not disable_edx_services
- name: create a symlink for venv python
file: >
......
---
oraclejdk_version: "7u25"
oraclejdk_version: "7u51"
# what the archive unpacks to
oraclejdk_base: "jdk1.7.0_25"
oraclejdk_build: "b15"
oraclejdk_base: "jdk1.7.0_51"
oraclejdk_build: "b13"
oraclejdk_platform: "linux"
oraclejdk_arch: "x64"
oraclejdk_file: "jdk-{{ oraclejdk_version }}-{{ oraclejdk_platform }}-{{ oraclejdk_arch }}.tar.gz"
......
......@@ -19,7 +19,7 @@
- name: download Oracle Java
shell: >
curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -O -L {{ oraclejdk_url }}
curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -b oraclelicense=accept-securebackup-cookie -O -L {{ oraclejdk_url }}
executable=/bin/bash
chdir=/var/tmp
creates=/var/tmp/{{ oraclejdk_file }}
......
......@@ -8,3 +8,6 @@ shib:
YOU NEED TO GENERATE A REAL KEY HERE USING OPENSSL
sp_pem: |
THE CORRESPONDING CERTIFICATE PEM GOES HERE
shib_template_dir: '.'
shib_metadata_backup_url: "https://idp.stanford.edu/Stanford-metadata.xml"
shib_download_metadata: true
......@@ -10,21 +10,18 @@
- libapache2-mod-shib2
- opensaml2-tools
notify: restart shibd
tags:
- shib
- install
- name: Creates /etc/shibboleth/metadata directory
file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd
tags:
- shib
- install
- name: Downloads metadata into metadata directory as backup
get_url: url=https://idp.stanford.edu/Stanford-metadata.xml dest=/etc/shibboleth/metadata/idp-metadata.xml mode=0640 group=_shibd owner=_shibd
tags:
- shib
- install
get_url: >
url={{ shib_metadata_backup_url }}
dest=/etc/shibboleth/metadata/idp-metadata.xml
mode=0640
group=_shibd
owner=_shibd
when: shib_download_metadata
- name: writes out key and pem file
template: src=sp.{{item}}.j2 dest=/etc/shibboleth/sp.{{item}} group=_shibd owner=_shibd mode=0600
......@@ -32,24 +29,15 @@
- key
- pem
notify: restart shibd
tags:
- shib
- install
- name: writes out configuration files
template: src={{item}}.j2 dest=/etc/shibboleth/{{item}} group=_shibd owner=_shibd mode=0644
template: src={{ shib_template_dir }}/{{item}}.j2 dest=/etc/shibboleth/{{item}} group=_shibd owner=_shibd mode=0644
with_items:
- attribute-map.xml
- shibboleth2.xml
notify: restart shibd
tags:
- shib
- install
- name: enables shib
command: a2enmod shib2
notify: restart shibd
tags:
- shib
- install
......@@ -19,6 +19,7 @@ SUPERVISOR_HTTP_BIND_IP: '127.0.0.1'
supervisor_http_bind_port: '9001'
supervisor_app_dir: "{{ COMMON_APP_DIR }}/supervisor"
supervisor_cfg_dir: "{{ supervisor_app_dir }}/conf.d"
supervisor_available_dir: "{{ supervisor_app_dir }}/conf.available.d"
supervisor_data_dir: "{{ COMMON_DATA_DIR }}/supervisor"
supervisor_venvs_dir: "{{ supervisor_app_dir }}/venvs"
supervisor_venv_dir: "{{ supervisor_venvs_dir }}/supervisor"
......
# Get the tags for this instance
import argparse
import boto
import boto.utils
import os
import subprocess
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Enable all services that are in the services tag of this ec2 instance.")
parser.add_argument("-a","--available",
help="The location of the available services.")
parser.add_argument("-e","--enabled",
help="The location of the enabled services.")
args = parser.parse_args()
ec2 = boto.connect_ec2()
instance_id = boto.utils.get_instance_metadata()['instance-id']
reservations = ec2.get_all_instances(instance_ids=[instance_id])
report = []
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
services = instance.tags['services'].split(',')
for service in services:
# Link to available service.
available_file = "{}/{}.conf".format(args.available, service)
link_location = "{}/{}.conf".format(args.enabled, service)
if os.path.exists(available_file):
subprocess.call("ln -sf {} {}".format(available_file, link_location), shell=True)
report.append("Linking service: {}".format(service))
else:
report.append("No conf available for service: {}".format(link_location))
print("\n".join(report))
......@@ -68,10 +68,12 @@
state=directory
owner={{ supervisor_user }}
group={{ supervisor_service_user }}
mode="755"
with_items:
- "{{ supervisor_app_dir }}"
- "{{ supervisor_venv_dir }}"
- "{{ supervisor_cfg_dir }}"
- "{{ supervisor_available_dir }}"
- name: create supervisor directories
file: >
......@@ -88,14 +90,37 @@
pip: name=supervisor virtualenv="{{supervisor_venv_dir}}" state=present
sudo_user: "{{ supervisor_user }}"
- name: install supervisor in its venv
pip: name=boto virtualenv="{{supervisor_venv_dir}}" state=present
sudo_user: "{{ supervisor_user }}"
when: supervisor_service == "supervisor" and disable_edx_services and not devstack
- name: create supervisor upstart job
template: >
src=supervisor-upstart.conf.j2 dest=/etc/init/{{ supervisor_service }}.conf
src=etc/init/supervisor-upstart.conf.j2 dest=/etc/init/{{ supervisor_service }}.conf
owner=root group=root
# This script is aws specific and looks up instances
# tags and enables services based on the 'services' tag
# on instance startup.
- name: create pre_supervisor upstart job
template: >
src=etc/init/pre_supervisor.conf.j2 dest=/etc/init/pre_supervisor.conf
owner=root group=root
when: supervisor_service == "supervisor" and disable_edx_services and not devstack
- name: write the pre_suprevisor python script
copy: >
src=pre_supervisor_checks.py
dest={{ supervisor_app_dir }}/pre_supervisor_checks.py
mode=0750
owner={{ supervisor_user }}
group={{ supervisor_service_user }}
when: disable_edx_services
- name: create supervisor master config
template: >
src=supervisord.conf.j2 dest={{ supervisor_cfg }}
src=edx/app/supervisor/supervisord.conf.j2 dest={{ supervisor_cfg }}
owner={{ supervisor_user }} group={{ supervisor_service_user }}
mode=0644
......
description "Tasks before supervisord"
start on runlevel [2345]
task
setuid {{ supervisor_user }}
exec {{ supervisor_venv_dir }}/bin/python {{ supervisor_app_dir }}/pre_supervisor_checks.py --available={{supervisor_available_dir}} --enabled={{supervisor_cfg_dir}}
description "supervisord"
{% if disable_edx_services -%}
start on stopped pre_supervisor
{% else %}
start on runlevel [2345]
{% endif %}
stop on runlevel [!2345]
setuid {{ supervisor_service_user }}
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Vars for role user
#
#
# vars are namespace with the module name.
#
user_role_name: user
# override this var to add a prefix to the prompt
# also need to set commont_update_bashrc for to
# update the system bashrc default
USER_CMD_PROMPT: ""
# these are the default links to create in the
# restricted user's ~/bin directory
# defaults to sudo, more can be added by overriding
# this var
user_rbash_links:
- /usr/bin/sudo
# parameter for this role,
# must be set when called and should NOT
# be set in extra vars since it
# will take precedence over the paramter
user_info: []
---
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role user
#
# Overview:
#
# This role is included as a dependency by other roles or as a standalone
# paramaterized role to create users.
#
# There are generally three classes of users:
# (1) normal login users without any special permissions
# (2) admin users with full sudo permissions
# (3) restricted users that use rbash and are locked down to specific sudo commands
#
# The parameter "type" sets the user in one of these three categories:
# (1) type not set
# (2) type=admin
# (3) type=restricted
#
# Dependencies:
#
# This role has no dependencies but requires parameters
#
# Example:
#
# # Create a few users, one restricted
# # one admin with a github key and one with
# # a regular key.
# #
# # All user types can use a key from github
# # and also have additional authorized keys defined
# #
#
# - role: user
# user_info:
# # This restricted user is defined in meta/
# # for edxapp, it creates a user that can only
# # run manage.py commands
# - name: automator
# type: restricted
# # The sudoers file is optional.
# sudoers_template: '99-edxapp-manage-cmds.j2'
# authorized_keys:
# - ssh-rsa abcdef...
# - ssh-rsa ghiklm...
#
# # More users passed to the role, this one is a user
# # with full sudo, key fetched from github
# - name: frank
# github: true
# type: admin
#
# # This user is a normal login user without sudo, with
# # a couple keys passed in as parameters
# - name: sally
# authorized_keys:
# - ssh-rsa abcdef...
# - ssh-rsa ghiklm...
#
# By default for restricted users we only allow sudo, if you
# want to provide more binaries add them to user_rbash_links
# which can be passed in as a paramter to the role.
#
- debug: var=user_info
- name: create the edxadmin group
group: name=edxadmin state=present
# give full sudo admin access to the edxadmin group
- name: grant full sudo access to the edxadmin group
copy: >
content="%edxadmin ALL=(ALL) NOPASSWD:ALL"
dest=/etc/sudoers.d/edxadmin owner=root group=root
mode=0440 validate='visudo -cf %s'
- name: create the users
user:
name={{ item.name }}
shell=/bin/bash
with_items: user_info
- name: create .ssh directory
file:
path=/home/{{ item.name }}/.ssh state=directory mode=0750
owner={{ item.name }}
with_items: user_info
- name: assign admin role to admin users
user:
name={{ item.name }}
groups=edxadmin
when: item.type is defined and item.type == 'admin'
with_items: user_info
# authorized_keys2 used here so that personal
# keys can be copied to authorized_keys
# force is set to yes here, otherwise the keys
# won't update if they haven't changed on teh github
# side
- name: copy github key[s] to .ssh/authorized_keys2
get_url:
url=https://github.com/{{ item.name }}.keys
force=yes
dest=/home/{{ item.name }}/.ssh/authorized_keys2 mode=0640
owner={{ item.name }}
when: item.github is defined
with_items: user_info
- name: copy additional authorized keys
copy: >
content="{{ "\n".join(item.authorized_keys) }}"
dest=/home/{{ item.name }}/.ssh/authorized_keys mode=0640
owner={{ item.name }}
mode=0440
when: item.authorized_keys is defined
with_items: user_info
- name: create bashrc file for normal users
template: >
src=default.bashrc.j2
dest=/home/{{ item.name }}/.bashrc mode=0640
owner={{ item.name }}
when: not (item.type is defined and item.type == 'restricted')
with_items: user_info
- name: create .profile for all users
template: >
src=default.profile.j2
dest=/home/{{ item.name }}/.profile mode=0640
owner={{ item.name }}
with_items: user_info
########################################################
# All tasks below this line are for restricted users
- name: modify shell for restricted users
user:
name={{ item.name }}
shell=/bin/rbash
when: item.type is defined and item.type == 'restricted'
with_items: user_info
- name: create bashrc file for restricted users
template: >
src=restricted.bashrc.j2
dest=/home/{{ item.name }}/.bashrc mode=0640
owner={{ item.name }}
when: item.type is defined and item.type == 'restricted'
with_items: user_info
- name: create sudoers file from template
template:
dest=/etc/sudoers.d/99-restricted
src=restricted.sudoers.conf.j2 owner="root"
group="root" mode=0440 validate='visudo -cf %s'
# Prevent restricted user from updating their PATH and
# environment by ensuring root ownership
- name: change home directory ownership to root for restricted users
shell: "chown -R root:{{ item.name }} /home/{{ item.name }}"
when: item.type is defined and item.type == 'restricted'
with_items: user_info
- name: create ~/bin directory
file:
path=/home/{{ item.name }}/bin state=directory mode=0750
owner="root" group={{ item.name }}
when: item.type is defined and item.type == 'restricted'
with_items: user_info
- name: create allowed command links
file:
src: "{{ item[1] }}"
dest: "/home/{{ item[0].name }}/bin/{{ item[1]|basename }}"
state: link
when: item[0].type is defined and item[0].type == 'restricted'
with_nested:
- user_info
- user_rbash_links
......@@ -54,9 +54,9 @@ if [ -n "$force_color_prompt" ]; then
fi
if [ "$color_prompt" = yes ]; then
PS1='{{ GH_USERS_PROMPT }}${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
PS1='{{ USER_CMD_PROMPT }}${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='{{ GH_USERS_PROMPT}}${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
PS1='{{ USER_CMD_PROMPT}}${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
......@@ -73,9 +73,6 @@ esac
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
......@@ -85,6 +82,7 @@ fi
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
alias h='ls ~/.bash_histories/*/* | sort | xargs grep -i '
# better bash history
......
umask 022
# if running bash
if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
fi
{% for user in user_info -%}
{% if 'sudo_cmds' in user -%}
{% for cmd in user['sudo_cmds'] -%}
{{ user['name'] }} {{ cmd }}
{% endfor %}
{% endif %}
{% endfor %}
......@@ -4,7 +4,7 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: xqueue_installed is defined
when: xqueue_installed is defined and not disable_edx_services
with_items:
- xqueue
- xqueue_consumer
- name: "writing supervisor scripts - xqueue, xqueue consumer"
template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
src={{ item }}.conf.j2 dest={{ supervisor_available_dir }}/{{ item }}.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
with_items: ['xqueue', 'xqueue_consumer']
- name: "enabling supervisor scripts - xqueue, xqueue consumer"
file: >
src={{ supervisor_available_dir }}/{{ item }}.conf
dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }} group={{ common_web_user }}
mode=0644 state=link force=yes
with_items: ['xqueue', 'xqueue_consumer']
when: not disable_edx_services
- name: create xqueue application config
template: src=xqueue.env.json.j2 dest={{ xqueue_app_dir }}/xqueue.env.json mode=0644
......@@ -63,6 +72,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure xqueue, consumer is running
supervisorctl_local: >
......@@ -73,6 +83,7 @@
with_items:
- xqueue
- xqueue_consumer
when: not disable_edx_services
- name: create a symlink for venv python
file: >
......
......@@ -11,8 +11,7 @@ XSERVER_GRADER_DIR: !!null
# For 6.00x use "git@github.com:/MITx/6.00x.git"
XSERVER_GRADER_SOURCE: !!null
# This must be set to run this role
# Example: "{{ secure_dir }}/files/git-identity"
XSERVER_LOCAL_GIT_IDENTITY: !!null
XSERVER_GIT_IDENTITY: !!null
XSERVER_LANG: "en_US.UTF-8"
......@@ -29,7 +28,7 @@ xserver_user: "xserver"
xserver_sandbox_user: "sandbox"
xserver_log_dir: "{{ COMMON_LOG_DIR }}/xserver"
xserver_grader_root: "{{ XSERVER_GRADER_DIR }}/graders"
xserver_git_identity: "{{ xserver_app_dir }}/{{ XSERVER_LOCAL_GIT_IDENTITY|basename }}"
xserver_git_identity: "{{ xserver_app_dir }}/xserver-identity"
xserver_env_config:
RUN_URL: $XSERVER_RUN_URL
......
......@@ -20,4 +20,5 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not disable_edx_services
- name: "writing supervisor script"
template: >
src=xserver.conf.j2 dest={{ supervisor_cfg_dir }}/xserver.conf
src=xserver.conf.j2 dest={{ supervisor_available_dir }}/xserver.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
- name: "enable supervisor script"
file: >
src={{ supervisor_available_dir }}/xserver.conf
dest={{ supervisor_cfg_dir }}/xserver.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
state=link force=yes
when: not disable_edx_services
- name: checkout code
git: dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}}
sudo_user: "{{ xserver_user }}"
......@@ -25,7 +33,7 @@
- name: install read-only ssh key for the content repo that is required for grading
copy: >
src={{ XSERVER_LOCAL_GIT_IDENTITY }} dest={{ xserver_git_identity }}
content="{{ XSERVER_GIT_IDENTITY }}" dest={{ xserver_git_identity }}
owner={{ xserver_user }} group={{ xserver_user }} mode=0600
notify: restart xserver
......@@ -55,6 +63,7 @@
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
changed_when: supervisor_update.stdout != ""
when: not disable_edx_services
- name: ensure xserver is started
supervisorctl_local: >
......@@ -62,6 +71,7 @@
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: not disable_edx_services
- name: create a symlink for venv python
file: >
......
......@@ -8,8 +8,8 @@
when: not XSERVER_GRADER_DIR or not XSERVER_GRADER_SOURCE
- name: checking for git identity
fail: msg="You must define XSERVER_LOCAL_GIT_IDENTITY to use this role"
when: not XSERVER_LOCAL_GIT_IDENTITY
fail: msg="You must define XSERVER_GIT_IDENTITY to use this role"
when: not XSERVER_GIT_IDENTITY
- name: create application user
user: >
......
---
# Creates a new ansible role
# Usage:
# ansible-playbook ./run_role.yml -i "hostname," -e role=my_awesome_role
#
- hosts: all
sudo: True
gather_facts: False
roles:
- "{{role}}"
......@@ -6,9 +6,11 @@
migrate_db: "yes"
openid_workaround: True
devstack: True
disable_edx_services: True
edx_platform_version: 'master'
mongo_enable_journal: False
EDXAPP_NO_PREREQ_INSTALL: 0
COMMON_MOTD_TEMPLATE: "devstack_motd.tail.j2"
vars_files:
- "group_vars/all"
roles:
......
......@@ -11,7 +11,7 @@
- "group_vars/all"
roles:
- edx_ansible
- gh_users
- user
- role: nginx
nginx_sites:
- cms
......
......@@ -8,5 +8,4 @@ ecdsa==0.10
paramiko==1.12.0
pycrypto==2.6.1
wsgiref==0.1.2
GitPython==0.3.2.RC1
pymongo==2.4.1
docopt==0.6.1
......@@ -21,21 +21,11 @@
export PYTHONUNBUFFERED=1
export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto
if [[ -n $WORKSPACE ]]; then
# setup a virtualenv in jenkins
if [[ ! -d ".venv" ]]; then
virtualenv .venv
fi
source .venv/bin/activate
pip install -r requirements.txt
fi
if [[ -z $WORKSPACE ]]; then
dir=$(dirname $0)
source "$dir/ascii-convert.sh"
else
source "$WORKSPACE/util/jenkins/ascii-convert.sh"
source "$WORKSPACE/configuration/util/jenkins/ascii-convert.sh"
fi
if [[ -z $static_url_base ]]; then
......@@ -75,9 +65,9 @@ fi
if [[ -z $ami ]]; then
if [[ $server_type == "full_edx_installation" ]]; then
ami="ami-bd6b6ed4"
elif [[ $server_type == "ubuntu_12.04" ]]; then
ami="ami-a73264ce"
ami="ami-ad1a0dc4"
elif [[ $server_type == "ubuntu_12.04" || $server_type == "full_edx_installation_from_scratch" ]]; then
ami="ami-59a4a230"
fi
fi
......@@ -92,31 +82,11 @@ cd playbooks/edx-east
cat << EOF > $extra_vars
---
enable_datadog: False
enable_splunkforwarder: False
ansible_ssh_private_key_file: /var/lib/jenkins/${keypair}.pem
NGINX_ENABLE_SSL: True
NGINX_SSL_CERTIFICATE: '/var/lib/jenkins/star.sandbox.edx.org.crt'
NGINX_SSL_KEY: '/var/lib/jenkins/star.sandbox.edx.org.key'
EDXAPP_LMS_SSL_NGINX_PORT: 443
EDXAPP_CMS_SSL_NGINX_PORT: 443
EDXAPP_PREVIEW_LMS_BASE: preview.${deploy_host}
EDXAPP_LMS_BASE: ${deploy_host}
EDXAPP_CMS_BASE: studio.${deploy_host}
EDXAPP_LMS_NGINX_PORT: 80
EDXAPP_LMS_PREVIEW_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
EDXAPP_SITE_NAME: ${deploy_host}
COMMON_PYPI_MIRROR_URL: 'https://pypi.edx.org/root/pypi/+simple/'
XSERVER_GRADER_DIR: "/edx/var/xserver/data/content-mit-600x~2012_Fall"
XSERVER_GRADER_SOURCE: "git@github.com:/MITx/6.00x.git"
XSERVER_LOCAL_GIT_IDENTITY: /var/lib/jenkins/git-identity-edx-pull
CERTS_LOCAL_GIT_IDENTITY: /var/lib/jenkins/git-identity-edx-pull
CERTS_AWS_KEY: $(cat /var/lib/jenkins/certs-aws-key)
CERTS_AWS_ID: $(cat /var/lib/jenkins/certs-aws-id)
CERTS_BUCKET: "verify-test.edx.org"
migrate_db: "yes"
openid_workaround: True
edx_platform_version: $edxapp_version
forum_version: $forum_version
xqueue_version: $xqueue_version
......@@ -125,17 +95,7 @@ ora_version: $ora_version
ease_version: $ease_version
certs_version: $certs_version
discern_version: $discern_version
rabbitmq_ip: "127.0.0.1"
rabbitmq_refresh: True
COMMON_HOSTNAME: edx-server
EDXAPP_STATIC_URL_BASE: $static_url_base
# Settings for Grade downloads
EDXAPP_GRADE_STORAGE_TYPE: 's3'
EDXAPP_GRADE_BUCKET: 'edx-grades'
EDXAPP_GRADE_ROOT_PATH: 'sandbox'
EOF
if [[ $basic_auth == "true" ]]; then
......@@ -165,23 +125,25 @@ instance_tags:
owner: $BUILD_USER
root_ebs_size: $root_ebs_size
name_tag: $name_tag
gh_users:
- ${github_username}
COMMON_USER_INFO:
- name: ${github_username}
github: true
type: admin
dns_zone: $dns_zone
rabbitmq_refresh: True
GH_USERS_PROMPT: '[$name_tag] '
USER_CMD_PROMPT: '[$name_tag] '
elb: $elb
EOF
# run the tasks to launch an ec2 instance from AMI
cat $extra_vars
ansible-playbook edx_provision.yml -i inventory.ini -e "@${extra_vars}" --user ubuntu
ansible-playbook edx_provision.yml -i inventory.ini -e@${extra_vars} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu -v
if [[ $server_type == "full_edx_installation" ]]; then
# additional tasks that need to be run if the
# entire edx stack is brought up from an AMI
ansible-playbook rabbitmq.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu
ansible-playbook restart_supervisor.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu
ansible-playbook rabbitmq.yml -i "${deploy_host}," -e@${extra_vars} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu
ansible-playbook restart_supervisor.yml -i "${deploy_host}," -e@${extra_vars} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu
fi
fi
......@@ -193,20 +155,22 @@ done
# If reconfigure was selected or if starting from an ubuntu 12.04 AMI
# run non-deploy tasks for all roles
if [[ $reconfigure == "true" || $server_type == "ubuntu_12.04" ]]; then
if [[ $reconfigure == "true" || $server_type == "full_edx_installation_from_scratch" ]]; then
cat $extra_vars
ansible-playbook edx_continuous_integration.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --skip-tags deploy
ansible-playbook edx_continuous_integration.yml -i "${deploy_host}," -e@${extra_vars} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu --skip-tags deploy
fi
# Run deploy tasks for the roles selected
for i in $roles; do
if [[ ${deploy[$i]} == "true" ]]; then
cat $extra_vars
ansible-playbook ${i}.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --tags deploy
fi
done
if [[ $server_type == "full_edx_installation" || $server_type == "full_edx_installation_from_scratch" ]]; then
# Run deploy tasks for the roles selected
for i in $roles; do
if [[ ${deploy[$i]} == "true" ]]; then
cat $extra_vars
ansible-playbook ${i}.yml -i "${deploy_host}," -e@${extra_vars} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu --tags deploy
fi
done
fi
# deploy the edx_ansible role
ansible-playbook edx_ansible.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu
ansible-playbook edx_ansible.yml -i "${deploy_host}," -e@${extra_vars} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu
rm -f "$extra_vars"
import argparse
import json
import logging as log
import pickle
import requests
import yaml
from datetime import datetime
from git import Repo
from os import path
from pprint import pformat
from pymongo import MongoClient, DESCENDING
from stage_release import uri_from
def releases(repo):
"""
Yield a list of all release candidates from the origin.
"""
for ref in repo.refs:
if ref.name.startswith('origin/rc/'):
yield ref
def candidates_since(repo, time):
"""
Given a repo yield a list of release candidate refs that have a
commit on them after the passed in time
"""
for rc in releases(repo):
last_update = datetime.utcfromtimestamp(rc.commit.committed_date)
if last_update > time:
# New or updated RC
yield rc
def stage_release(url, token, repo, rc):
"""
Submit a job to stage a new release for the new rc of the repo.
"""
# Setup the Jenkins params.
params = []
params.append({'name': "{}_REF".format(repo), 'value': True})
params.append({'name': repo, 'value': rc.commit.hexsha})
build_params = {'parameter': params}
log.info("New rc found{}, staging new release.".format(rc.name))
r = requests.post(url,
data={"token", token},
params={"json": json.dumps(build_params)})
if r.status_code != 201:
msg = "Failed to submit request with params: {}"
raise Exception(msg.format(pformat(build_params)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Monitor git repos for new rc branches.")
parser.add_argument('-c', '--config', required=True,
help="Config file.")
parser.add_argument('-p', '--pickle', default="data.pickle",
help="Pickle of presistent data.")
args = parser.parse_args()
config = yaml.safe_load(open(args.config))
if path.exists(args.pickle):
data = pickle.load(open(args.pickle))
else:
data = {}
# Presist the last time we made this check.
if 'last_check' not in data:
last_check = datetime.utcnow()
else:
last_check = data['last_check']
data['last_check'] = datetime.utcnow()
# Find plays that are affected by this repo.
repos_with_changes = {}
for repo in config['repos']:
# Check for new rc candidates.
for rc in candidates_since(Repo(repo), last_check):
# Notify stage-release to build for the new repo.
stage_release(config['abbey_url'], config['abbey_token'], repo, rc)
pickle.dump(data, open(args.pickle, 'w'))
"""
Take in a YAML file with the basic data of all the things we could
deploy and command line hashes for the repos that we want to deploy
right now.
Example Config YAML file:
---
DOC_STORE_CONFIG:
hosts: [ list, of, mongo, hosts]
port: #
db: 'db'
user: 'jenkins'
password: 'password'
configuration_repo: "/path/to/configuration/repo"
configuration_secure_repo: "/path/to/configuration-secure"
repos:
edxapp:
plays:
- edxapp
- worker
xqueue:
plays:
- xqueue
6.00x:
plays:
- xserver
xserver:
plays:
- xserver
deployments:
edx:
- stage
- prod
edge:
- stage
- prod
loadtest:
- stage
# A jenkins URL to post requests for building AMIs
abbey_url: "http://...."
# A mapping of plays to base AMIs
base_ami:{}
# The default AMI to use if there isn't one specific to your plays.
default_base_ami: ''
---
"""
import argparse
import json
import yaml
import logging as log
import requests
from datetime import datetime
from git import Repo
from pprint import pformat
from pymongo import MongoClient, DESCENDING
log.basicConfig(level=log.DEBUG)
def uri_from(doc_store_config):
"""
Convert the below structure to a mongodb uri.
DOC_STORE_CONFIG:
hosts:
- 'host1.com'
- 'host2.com'
port: 10012
db: 'devops'
user: 'username'
password: 'password'
"""
uri_format = "mongodb://{user}:{password}@{hosts}/{db}"
host_format = "{host}:{port}"
port = doc_store_config['port']
host_uris = [host_format.format(host=host,port=port) for host in doc_store_config['hosts']]
return uri_format.format(
user=doc_store_config['user'],
password=doc_store_config['password'],
hosts=",".join(host_uris),
db=doc_store_config['db'])
def prepare_release(args):
config = yaml.safe_load(open(args.config))
mongo_uri = uri_from(config['DOC_STORE_CONFIG'])
client = MongoClient(mongo_uri)
db = client[config['DOC_STORE_CONFIG']['db']]
# Get configuration repo versions
config_repo_ver = Repo(config['configuration_repo']).commit().hexsha
config_secure_ver = Repo(config['configuration_secure_repo']).commit().hexsha
# Parse the vars.
var_array = map(lambda key_value: key_value.split('='), args.REPOS)
update_repos = { item[0]:item[1] for item in var_array }
log.info("Update repos: {}".format(pformat(update_repos)))
release = {}
now = datetime.utcnow()
release['_id'] = args.release_id
release['date_created'] = now
release['date_modified'] = now
release['build_status'] = 'Unknown'
release['build_user'] = args.user
release_coll = db[args.deployment]
releases = release_coll.find({'build_status': 'Succeeded'}).sort('_id', DESCENDING)
all_plays = {}
try:
last_successful = releases.next()
all_plays = last_successful['plays']
except StopIteration:
# No successful builds.
log.warn("No Previously successful builds.")
# For all repos that were updated
for repo, ref in update_repos.items():
var_name = "{}_version".format(repo.replace('-','_'))
if repo not in config['repos']:
raise Exception("No info for repo with name '{}'".format(repo))
# For any play that uses the updated repo
for play in config['repos'][repo]:
if play not in all_plays:
all_plays[play] = {}
if 'vars' not in all_plays[play]:
all_plays[play]['vars'] = {}
all_plays[play]['vars'][var_name] = ref
# Configuration to use to build these AMIs
all_plays[play]['configuration_ref'] = config_repo_ver
all_plays[play]['configuration_secure_ref'] = config_secure_ver
# Set amis to None for all envs of this deployment
all_plays[play]['amis'] = {}
for env in config['deployments'][args.deployment]:
# Check the AMIs collection to see if an ami already exist
# for this configuration.
potential_ami = ami_for(db, env,
args.deployment,
play, config_repo_ver,
config_secure_ver,
ref)
if potential_ami:
all_plays[play]['amis'][env] = potential_ami['_id']
else:
all_plays[play]['amis'][env] = None
release['plays'] = all_plays
if args.noop:
print("Would insert into release collection: {}".format(pformat(release)))
else:
release_coll.insert(release)
# All plays that need new AMIs have been updated.
notify_abbey(config, args.deployment,
all_plays, args.release_id, mongo_uri, config_repo_ver,
config_secure_ver, args.noop)
def ami_for(db, env, deployment, play, configuration,
configuration_secure, ansible_vars):
ami_signature = {
'env': env,
'deployment': deployment,
'play': play,
'configuration_ref': configuration,
'configuration_secure_ref': configuration_secure,
'vars': ansible_vars,
}
return db.amis.find_one(ami_signature)
def notify_abbey(config, deployment, all_plays, release_id,
mongo_uri, configuration_ref, configuration_secure_ref, noop=False):
abbey_url = config['abbey_url']
base_amis = config['base_amis']
default_base = config['default_base_ami']
for play_name, play in all_plays.items():
for env, ami in play['amis'].items():
if ami is None:
params = {}
params['play'] = play_name
params['deployment'] = deployment
params['environment'] = env
params['vars'] = yaml.safe_dump(play['vars'], default_flow_style=False)
params['release_id'] = release_id
params['mongo_uri'] = mongo_uri
params['configuration'] = configuration_ref
params['configuration_secure'] = configuration_secure_ref
params['base_ami'] = base_amis.get(play_name, default_base)
log.info("Need ami for {}".format(pformat(params)))
if noop:
r = requests.Request('POST', abbey_url, params=params)
url = r.prepare().url
print("Would have posted: {}".format(url))
else:
r = requests.post(abbey_url, params=params)
log.info("Sent request got {}".format(r))
if r.status_code != 200:
# Something went wrong.
msg = "Failed to submit request with params: {}"
raise Exception(msg.format(pformat(params)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Prepare a new release.")
parser.add_argument('-c', '--config', required=True, help="Configuration for deploys")
parser.add_argument('-u', '--user', required=True, help="User staging the release.")
msg = "The deployment to build for eg. edx, edge, loadtest"
parser.add_argument('-d', '--deployment', required=True, help=msg)
parser.add_argument('-r', '--release-id', required=True, help="Id of Release.")
parser.add_argument('-n', '--noop', action='store_true',
help="Run without sending requests to abbey.")
parser.add_argument('REPOS', nargs='+',
help="Any number of var=value(no spcae around '='" + \
" e.g. 'edxapp=3233bac xqueue=92832ab'")
args = parser.parse_args()
log.debug(args)
prepare_release(args)
......@@ -15,8 +15,6 @@ except ImportError:
print "boto required for script"
sys.exit(1)
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure, DuplicateKeyError
from pprint import pprint
AMI_TIMEOUT = 600 # time to wait for AMIs to complete
......@@ -26,77 +24,6 @@ NUM_TASKS = 5 # number of tasks for time summary report
NUM_PLAYBOOKS = 2
class MongoConnection:
def __init__(self):
try:
mongo = MongoClient(host=args.mongo_uri)
except ConnectionFailure:
print "Unable to connect to the mongo database specified"
sys.exit(1)
mongo_db = getattr(mongo, args.mongo_db)
if args.mongo_ami_collection not in mongo_db.collection_names():
mongo_db.create_collection(args.mongo_ami_collection)
if args.mongo_deployment_collection not in mongo_db.collection_names():
mongo_db.create_collection(args.mongo_deployment_collection)
self.mongo_ami = getattr(mongo_db, args.mongo_ami_collection)
self.mongo_deployment = getattr(
mongo_db, args.mongo_deployment_collection)
def update_ami(self, ami):
"""
Creates a new document in the AMI
collection with the ami id as the
id
"""
query = {
'_id': ami,
'play': args.play,
'env': args.environment,
'deployment': args.deployment,
'configuration_ref': args.configuration_version,
'configuration_secure_ref': args.configuration_secure_version,
'vars': git_refs,
}
try:
self.mongo_ami.insert(query)
except DuplicateKeyError:
if not args.noop:
print "Entry already exists for {}".format(ami)
raise
def update_deployment(self, ami):
"""
Adds the built AMI to the deployment
collection
"""
query = {'_id': args.jenkins_build}
deployment = self.mongo_deployment.find_one(query)
try:
deployment['plays'][args.play]['amis'][args.environment] = ami
except KeyError:
msg = "Unexpected document structure, couldn't write " +\
"to path deployment['plays']['{}']['amis']['{}']"
print msg.format(args.play, args.environment)
pprint(deployment)
if args.noop:
deployment = {
'plays': {
args.play: {
'amis': {
args.environment: ami,
},
},
},
}
else:
raise
self.mongo_deployment.save(deployment)
class Unbuffered:
"""
For unbuffered output, not
......@@ -124,7 +51,7 @@ def parse_args():
metavar="SECURE_VAR_FILE",
help="path to secure-vars from the root of "
"the secure repo (defaults to ansible/"
"vars/DEPLOYMENT/ENVIRONMENT-DEPLOYMENT.yml)")
"vars/ENVIRONMENT-DEPLOYMENT.yml)")
parser.add_argument('--stack-name',
help="defaults to ENVIRONMENT-DEPLOYMENT",
metavar="STACK_NAME",
......@@ -156,8 +83,8 @@ def parse_args():
parser.add_argument('--configuration-secure-repo', required=False,
default="git@github.com:edx-ops/prod-secure",
help="repo to use for the secure files")
parser.add_argument('-j', '--jenkins-build', required=False,
help="jenkins build number to update")
parser.add_argument('-c', '--cache-id', required=True,
help="unique id to use as part of cache prefix")
parser.add_argument('-b', '--base-ami', required=False,
help="ami to use as a base ami",
default="ami-0568456c")
......@@ -181,30 +108,14 @@ def parse_args():
default=5,
help="How long to delay message display from sqs "
"to ensure ordering")
parser.add_argument("--mongo-uri", required=False,
default=None,
help="Mongo uri for the host that contains"
"the AMI collection")
parser.add_argument("--mongo-db", required=False,
default="test",
help="Mongo database")
parser.add_argument("--mongo-ami-collection", required=False,
default="amis",
help="Mongo ami collection")
parser.add_argument("--mongo-deployment-collection", required=False,
default="deployment",
help="Mongo deployment collection")
return parser.parse_args()
def get_instance_sec_group(vpc_id):
security_group_id = None
grp_details = ec2.get_all_security_groups(
filters={
'vpc_id':vpc_id,
'vpc_id': vpc_id,
'tag:play': args.play
}
)
......@@ -242,10 +153,10 @@ def create_instance_args():
if args.identity:
config_secure = 'true'
with open(args.identity) as f:
identity_file = f.read()
identity_contents = f.read()
else:
config_secure = 'false'
identity_file = "dummy"
identity_contents = "dummy"
user_data = """#!/bin/bash
set -x
......@@ -311,7 +222,7 @@ chmod 755 $git_ssh
if $config_secure; then
cat << EOF > $secure_identity
{identity_file}
{identity_contents}
EOF
fi
......@@ -324,23 +235,16 @@ cat << EOF >> $extra_vars
{git_refs_yml}
# path to local checkout of
# the secure repo
secure_vars: $secure_vars_file
# The private key used for pulling down
# private edx-platform repos is the same
# identity of the github huser that has
# access to the secure vars repo.
# EDXAPP_USE_GIT_IDENTITY needs to be set
# to true in the extra vars for this
# variable to be used.
EDXAPP_LOCAL_GIT_IDENTITY: $secure_identity
# abbey will always run fake migrations
# this is so that the application can come
# up healthy
fake_migrations: true
# Use the build number an the dynamic cache key.
EDXAPP_UPDATE_STATIC_FILES_KEY: true
edxapp_dynamic_cache_key: {deployment}-{environment}-{play}-{cache_id}
disable_edx_services: true
EOF
chmod 400 $secure_identity
......@@ -362,8 +266,8 @@ sudo pip install -r requirements.txt
cd $playbook_dir
ansible-playbook -vvvv -c local -i "localhost," $play.yml -e@$extra_vars
ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml -e@$extra_vars
ansible-playbook -vvvv -c local -i "localhost," $play.yml -e@$secure_vars_file -e@$extra_vars
ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml -e@$secure_vars_file -e@$extra_vars
rm -rf $base_dir
......@@ -377,11 +281,12 @@ rm -rf $base_dir
deployment=args.deployment,
play=args.play,
config_secure=config_secure,
identity_file=identity_file,
identity_contents=identity_contents,
queue_name=run_id,
extra_vars_yml=extra_vars_yml,
git_refs_yml=git_refs_yml,
secure_vars=secure_vars)
secure_vars=secure_vars,
cache_id=args.cache_id)
ec2_args = {
'security_group_ids': [security_group_id],
......@@ -520,12 +425,32 @@ def create_ami(instance_id, name, description):
'description': description,
'no_reboot': True}
AWS_API_WAIT_TIME = 1
image_id = ec2.create_image(**params)
print("Checking if image is ready.")
for _ in xrange(AMI_TIMEOUT):
try:
img = ec2.get_image(image_id)
if img.state == 'available':
print("Tagging image.")
img.add_tag("environment", args.environment)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("deployment", args.deployment)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("play", args.play)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("configuration_ref", args.configuration_version)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("configuration_secure_ref", args.configuration_secure_version)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("configuration_secure_repo", args.configuration_secure_repo)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("cache_id", args.cache_id)
time.sleep(AWS_API_WAIT_TIME)
for repo, ref in git_refs.items():
key = "vars:{}".format(repo)
img.add_tag(key, ref)
time.sleep(AWS_API_WAIT_TIME)
break
else:
time.sleep(1)
......@@ -658,8 +583,8 @@ if __name__ == '__main__':
if args.secure_vars:
secure_vars = args.secure_vars
else:
secure_vars = "ansible/vars/{}/{}-{}.yml".format(
args.environment, args.environment, args.deployment)
secure_vars = "ansible/vars/{}-{}.yml".format(
args.environment, args.deployment)
if args.stack_name:
stack_name = args.stack_name
else:
......@@ -672,15 +597,12 @@ if __name__ == '__main__':
print 'You must be able to connect to sqs and ec2 to use this script'
sys.exit(1)
if args.mongo_uri:
mongo_con = MongoConnection()
try:
sqs_queue = None
instance_id = None
run_id = "abbey-{}-{}-{}".format(
args.environment, args.deployment, int(time.time() * 100))
run_id = "{}-abbey-{}-{}-{}".format(
int(time.time() * 100), args.environment, args.deployment, args.play)
ec2_args = create_instance_args()
......@@ -698,9 +620,6 @@ if __name__ == '__main__':
print "{:<30} {:0>2.0f}:{:0>5.2f}".format(
run[0], run[1] / 60, run[1] % 60)
print "AMI: {}".format(ami)
if args.mongo_uri:
mongo_con.update_ami(ami)
mongo_con.update_deployment(ami)
finally:
print
if not args.no_cleanup and not args.noop:
......
"""VPC Tools.
Usage:
vpc-tools.py ssh-config (vpc <vpc_id> | stack-name <stack_name>) identity-file <identity_file> user <user> [(config-file <config_file>)] [(strict-host-check <strict_host_check>)]
vpc-tools.py ssh-config (vpc <vpc_id> | stack-name <stack_name>) [(identity-file <identity_file>)] user <user> [(config-file <config_file>)] [(strict-host-check <strict_host_check>)]
vpc-tools.py (-h --help)
vpc-tools.py (-v --version)
......@@ -21,26 +21,34 @@ VERSION="vpc tools 0.1"
DEFAULT_USER="ubuntu"
DEFAULT_HOST_CHECK="ask"
JUMPBOX_CONFIG = """
Host {jump_box}
HostName {ip}
IdentityFile {identity_file}
ForwardAgent yes
User {user}
StrictHostKeyChecking {strict_host_check}
BASTION_CONFIG = """Host {jump_box}
HostName {ip}
ForwardAgent yes
User {user}
StrictHostKeyChecking {strict_host_check}
{identity_line}
"""
HOST_CONFIG = """
# Instance ID: {instance_id}
Host {name}
ProxyCommand ssh {config_file} -W %h:%p {jump_box}
HostName {ip}
IdentityFile {identity_file}
ForwardAgent yes
User {user}
StrictHostKeyChecking {strict_host_check}
HOST_CONFIG = """# Instance ID: {instance_id}
Host {name}
ProxyCommand ssh {config_file} -W %h:%p {jump_box}
HostName {ip}
ForwardAgent yes
User {user}
StrictHostKeyChecking {strict_host_check}
{identity_line}
"""
BASTION_HOST_CONFIG = """# Instance ID: {instance_id}
Host {name}
HostName {ip}
ForwardAgent yes
User {user}
StrictHostKeyChecking {strict_host_check}
{identity_line}
"""
def dispatch(args):
......@@ -59,7 +67,12 @@ def _ssh_config(args):
vpc = boto.connect_vpc()
identity_file = args.get("<identity_file>")
identity_file = args.get("<identity_file>", None)
if identity_file:
identity_line = "IdentityFile {}".format(identity_file)
else:
identity_line = ""
user = args.get("<user>")
config_file = args.get("<config_file>")
strict_host_check = args.get("<strict_host_check>")
......@@ -75,7 +88,7 @@ def _ssh_config(args):
else:
config_file = ""
jump_box = "{stack_name}-jumpbox".format(stack_name=stack_name)
jump_box = "{stack_name}-bastion".format(stack_name=stack_name)
friendly = "{stack_name}-{logical_id}-{instance_number}"
id_type_counter = defaultdict(int)
......@@ -100,39 +113,61 @@ def _ssh_config(args):
if logical_id == "BastionHost" or logical_id == 'bastion':
print JUMPBOX_CONFIG.format(
print BASTION_CONFIG.format(
jump_box=jump_box,
ip=instance.ip_address,
user=user,
identity_file=identity_file,
strict_host_check=strict_host_check)
# Print host config even for the bastion box because that is how
# ansible accesses it.
print HOST_CONFIG.format(
name=instance.private_ip_address,
jump_box=jump_box,
ip=instance.private_ip_address,
user=user,
identity_file=identity_file,
config_file=config_file,
strict_host_check=strict_host_check,
instance_id=instance.id)
#duplicating for convenience with ansible
name = friendly.format(stack_name=stack_name,
logical_id=logical_id,
instance_number=instance_number)
print HOST_CONFIG.format(
name=name,
jump_box=jump_box,
ip=instance.private_ip_address,
user=user,
identity_file=identity_file,
config_file=config_file,
strict_host_check=strict_host_check,
instance_id=instance.id)
strict_host_check=strict_host_check,
identity_line=identity_line)
print BASTION_HOST_CONFIG.format(
name=instance.private_ip_address,
ip=instance.ip_address,
user=user,
instance_id=instance.id,
strict_host_check=strict_host_check,
identity_line=identity_line)
#duplicating for convenience with ansible
name = friendly.format(stack_name=stack_name,
logical_id=logical_id,
instance_number=instance_number)
print BASTION_HOST_CONFIG.format(
name=name,
ip=instance.ip_address,
user=user,
strict_host_check=strict_host_check,
instance_id=instance.id,
identity_line=identity_line)
else:
# Print host config even for the bastion box because that is how
# ansible accesses it.
print HOST_CONFIG.format(
name=instance.private_ip_address,
jump_box=jump_box,
ip=instance.private_ip_address,
user=user,
config_file=config_file,
strict_host_check=strict_host_check,
instance_id=instance.id,
identity_line=identity_line)
#duplicating for convenience with ansible
name = friendly.format(stack_name=stack_name,
logical_id=logical_id,
instance_number=instance_number)
print HOST_CONFIG.format(
name=name,
jump_box=jump_box,
ip=instance.private_ip_address,
user=user,
config_file=config_file,
strict_host_check=strict_host_check,
instance_id=instance.id,
identity_line=identity_line)
if __name__ == '__main__':
args = docopt(__doc__, version=VERSION)
......
......@@ -26,61 +26,84 @@ import argparse
import boto
import datetime
from vpcutil import vpc_for_stack_name
import xml.dom.minidom
import re
r53 = boto.connect_route53()
def add_or_update_record(zone, record_name, record_type,
extra_play_dns = {"edxapp":["courses","studio"]}
class DNSRecord():
def __init__(self, zone, record_name, record_type,
record_ttl, record_values):
self.zone = zone
self.record_name = record_name
self.record_type = record_type
self.record_ttl = record_ttl
self.record_values = record_values
def add_or_update_record(dns_records):
"""
Creates or updates a DNS record in a hosted route53
zone
"""
change_set = boto.route53.record.ResourceRecordSets()
status_msg = """
record_name: {}
record_type: {}
record_ttl: {}
record_values: {}
""".format(record_name, record_type,
record_ttl, record_values)
for record in dns_records:
if args.noop:
print("Would have updated DNS record:\n{}".format(status_msg))
return
status_msg = """
record_name: {}
record_type: {}
record_ttl: {}
record_values: {}
""".format(record.record_name, record.record_type,
record.record_ttl, record.record_values)
zone_id = zone.Id.replace("/hostedzone/", "")
if args.noop:
print("Would have updated DNS record:\n{}".format(status_msg))
records = r53.get_all_rrsets(zone_id)
zone_id = record.zone.Id.replace("/hostedzone/", "")
old_records = {r.name[:-1]: r for r in records}
records = r53.get_all_rrsets(zone_id)
change_set = boto.route53.record.ResourceRecordSets()
old_records = {r.name[:-1]: r for r in records}
# If the record name already points to something.
# Delete the existing connection.
if record_name in old_records.keys():
print("Deleting record:\n{}".format(status_msg))
change = change_set.add_change(
'DELETE',
record_name,
record_type,
record_ttl)
# If the record name already points to something.
# Delete the existing connection.
if record.record_name in old_records.keys():
if args.force:
print("Deleting record:\n{}".format(status_msg))
change = change_set.add_change(
'DELETE',
record.record_name,
record.record_type,
record.record_ttl)
else:
raise RuntimeError(
"DNS record exists for {} and force was not specified.".
format(record.record_name))
for value in old_records[record_name].resource_records:
change.add_value(value)
for value in old_records[record.record_name].resource_records:
change.add_value(value)
change = change_set.add_change(
'CREATE',
record_name,
record_type,
record_ttl)
change = change_set.add_change(
'CREATE',
record.record_name,
record.record_type,
record.record_ttl)
for value in record_values:
change.add_value(value)
for value in record.record_values:
change.add_value(value)
r53.change_rrsets(zone_id, change_set.to_xml())
print("Updated DNS record:\n{}".format(status_msg))
if args.noop:
print("Would have submitted the following change set:\n")
xml_doc = xml.dom.minidom.parseString(change_set.to_xml())
print xml_doc.toprettyxml()
else:
r53.change_rrsets(zone_id, change_set.to_xml())
print("Updated DNS record:\n{}".format(status_msg))
def get_or_create_hosted_zone(zone_name):
......@@ -112,12 +135,42 @@ def get_or_create_hosted_zone(zone_name):
if parent_zone:
print("Updating parent zone {}".format(parent_zone_name))
add_or_update_record(parent_zone,
zone_name, 'NS', 900,
zone.NameServers)
dns_records = set()
dns_records.add(DNSRecord(parent_zone,zone_name,'NS',900,zone.NameServers))
add_or_update_record(dns_records)
return zone
def get_security_group_dns(group_name):
# stage-edx-RabbitMQELBSecurityGroup-YB8ZKIZYN1EN
environment,deployment,sec_group,salt = group_name.split('-')
play = sec_group.replace("ELBSecurityGroup","").lower()
return environment, deployment, play
def get_dns_from_instances(elb):
ec2_con = boto.connect_ec2()
for inst in elb.instances:
instance = ec2_con.get_all_instances(
instance_ids=[inst.id])[0].instances[0]
try:
env_tag = instance.tags['environment']
if 'play' in instance.tags:
play_tag = instance.tags['play']
else:
# deprecated, for backwards compatibility
play_tag = instance.tags['role']
break # only need the first instance for tag info
except KeyError:
print("Instance {}, attached to elb {} does not "
"have tags for environment and play".format(elb, inst))
raise
return env_tag, play_tag
def update_elb_rds_dns(zone):
"""
......@@ -127,9 +180,11 @@ def update_elb_rds_dns(zone):
to the ELBs to create the dns name
"""
dns_records = set()
elb_con = boto.connect_elb()
ec2_con = boto.connect_ec2()
rds_con = boto.connect_rds()
vpc_id = vpc_for_stack_name(args.stack_name)
if not zone and args.noop:
......@@ -139,41 +194,38 @@ def update_elb_rds_dns(zone):
else:
zone_name = zone.Name[:-1]
stack_elbs = [elb for elb in elb_con.get_all_load_balancers()
if elb.vpc_id == vpc_id]
for elb in stack_elbs:
if "RabbitMQ" in elb.source_security_group.name or "ElasticSearch" in elb.source_security_group.name:
env_tag,deployment,play_tag = get_security_group_dns(elb.source_security_group.name)
fqdn = "{}-{}.{}".format(env_tag, play_tag, zone_name)
dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[elb.dns_name]))
else:
env_tag,play_tag = get_dns_from_instances(elb)
fqdn = "{}-{}.{}".format(env_tag, play_tag, zone_name)
dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[elb.dns_name]))
if extra_play_dns.has_key(play_tag):
for name in extra_play_dns.get(play_tag):
fqdn = "{}-{}.{}".format(env_tag, name, zone_name)
dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[elb.dns_name]))
stack_rdss = [rds for rds in rds_con.get_all_dbinstances()
if hasattr(rds.subnet_group, 'vpc_id') and
rds.subnet_group.vpc_id == vpc_id]
for rds in stack_rdss:
fqdn = "{}.{}".format('rds', zone_name)
add_or_update_record(zone, fqdn, 'CNAME', 600,
[stack_rdss[0].endpoint[0]])
stack_elbs = [elb for elb in elb_con.get_all_load_balancers()
if elb.vpc_id == vpc_id]
# TODO the current version of the RDS API doesn't support
# looking up RDS instance tags. Hence, we are using the
# env_tag that was set via the loop over instances above.
for rds in stack_rdss:
fqdn = "{}-{}.{}".format(env_tag,'rds', zone_name)
dns_records.add(DNSRecord(zone,fqdn,'CNAME',600,[stack_rdss[0].endpoint[0]]))
for elb in stack_elbs:
for inst in elb.instances:
instance = ec2_con.get_all_instances(
instance_ids=[inst.id])[0].instances[0]
try:
env_tag = instance.tags['environment']
if 'play' in instance.tags:
play_tag = instance.tags['play']
else:
# deprecated, for backwards compatibility
play_tag = instance.tags['role']
fqdn = "{}-{}.{}".format(env_tag, play_tag, zone_name)
add_or_update_record(zone, fqdn, 'CNAME', 600, [elb.dns_name])
if play_tag == 'edxapp':
# create courses and studio CNAME records for edxapp
for name in ['courses', 'studio']:
fqdn = "{}-{}.{}".format(env_tag, name, zone_name)
add_or_update_record(zone, fqdn, 'CNAME',
600, [elb.dns_name])
break # only need the first instance for tag info
except KeyError:
print("Instance {}, attached to elb {} does not "
"have tags for environment and play".format(elb, inst))
raise
add_or_update_record(dns_records)
if __name__ == "__main__":
description = "Give a cloudformation stack name, for an edx stack, setup \
......@@ -188,7 +240,11 @@ if __name__ == "__main__":
parser.add_argument('-z', '--zone-name', default="vpc.edx.org",
help="The name of the zone under which to "
"create the dns entries.")
parser.add_argument('-f', '--force',
help="Force reuse of an existing name in a zone",
action="store_true",default=False)
args = parser.parse_args()
zone = get_or_create_hosted_zone(args.zone_name)
update_elb_rds_dns(zone)
VAGRANTFILE_API_VERSION = "2"
MEMORY = 2048
CPU_COUNT = 2
......@@ -14,7 +16,7 @@ if ENV['VAGRANT_MOUNT_BASE']
end
Vagrant.configure("2") do |config|
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates a devstack from a base Ubuntu 12.04 image
config.vm.box = "precise64"
......
VAGRANTFILE_API_VERSION = "2"
MEMORY = 2048
CPU_COUNT = 2
Vagrant.configure("2") do |config|
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "precise64"
config.vm.box_url = "http://files.vagrantup.com/precise64.box"
......
VAGRANTFILE_API_VERSION = "2"
MEMORY = 2048
CPU_COUNT = 2
......@@ -14,9 +16,9 @@ cd /edx/app/edx_ansible/edx_ansible/playbooks
# The vagrant-devstack.yml playbook will also do this, but only
# after loading the playbooks into memory. If these are out of date,
# this can cause problems (e.g. looking for templates that no longer exist).
/edx/bin/update configuration master
/edx/bin/update configuration release
ansible-playbook -i localhost, -c local vagrant-devstack.yml
ansible-playbook -i localhost, -c local vagrant-devstack.yml --tags=deploy -e configuration_version=release
SCRIPT
edx_platform_mount_dir = "edx-platform"
......@@ -31,17 +33,22 @@ if ENV['VAGRANT_MOUNT_BASE']
end
Vagrant.configure("2") do |config|
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates an edX devstack VM from an official release
config.vm.box = "gugelhupf-devstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140210-gugelhupf-devstack.box"
config.vm.box = "himbasha-devstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140325-himbasha-devstack.box"
config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000
config.vm.network :forwarded_port, guest: 8001, host: 8001
config.vm.network :forwarded_port, guest: 4567, host: 4567
# Enable X11 forwarding so we can interact with GUI applications
if ENV['VAGRANT_X11']
config.ssh.forward_x11 = true
end
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform", :create => true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service", :create => true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora", :create => true, nfs: true
......
VAGRANTFILE_API_VERSION = "2"
MEMORY = 2048
CPU_COUNT = 2
Vagrant.configure("2") do |config|
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates an edX fullstack VM from an official release
config.vm.box = "facaccia"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140210-gugelhupf-fullstack.box"
config.vm.box = "himbasha-fullstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140325-himbasha-fullstack.box"
config.vm.network :private_network, ip: "192.168.33.10"
config.hostsupdater.aliases = ["preview.localhost"]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment