Commit b56ffe09 by John Jarvis

Merge pull request #729 from edx/jarv/focaccia

Jarv/focaccia
parents 50041fa8 38fc11c9
{
"AWSTemplateFormatVersion":"2010-09-09",
"Description":"Bring up a VPC for operations.",
"Parameters":{
"DeploymentTag":{
"Type":"String",
"Description":"A tag value applied to the hosts in the VPC indicating which deployment this is, e.g., edx, edge, <university>, <org>"
},
"KeyName":{
"Type":"String",
"Description":"Name of an existing EC2 KeyPair to enable SSH access to the web server",
"Default":"deployment"
},
"AdminInstanceType":{
"Description":"WebServer EC2 instance type",
"Type":"String",
"Default":"m1.large",
"AllowedValues":[
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
"SSHLocation":{
"Description":"The IP address range that can be used to SSH to the EC2 instances",
"Type":"String",
"MinLength":"9",
"MaxLength":"18",
"Default":"0.0.0.0/0",
"AllowedPattern":"(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})",
"ConstraintDescription":"must be a valid IP CIDR range of the form x.x.x.x/x."
},
"BastionInstanceType":{
"Description":"Bastion Host EC2 instance type",
"Type":"String",
"Default":"t1.micro",
"AllowedValues":[
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
"NATInstanceType":{
"Description":"NAT Device EC2 instance type",
"Type":"String",
"Default":"t1.micro",
"AllowedValues":[
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
"JenkinsServerPort":{
"Description":"The TCP port for the Jenkins server",
"Type":"Number",
"Default":"8080"
},
"AsgardServerPort":{
"Description":"The TCP port for the Asgard server",
"Type":"Number",
"Default":"8090"
},
"MongoServicePort":{
"Description":"The TCP port for the deployment mongo server",
"Type":"Number",
"Default":"10001"
}
},
"Mappings":{
"AWSInstanceType2Arch":{
"t1.micro": { "Arch":"64" },
"m1.small": { "Arch":"64" },
"m1.medium": { "Arch":"64" },
"m1.large": { "Arch":"64" },
"m1.xlarge": { "Arch":"64" },
"m2.xlarge": { "Arch":"64" },
"m2.2xlarge": { "Arch":"64" },
"m2.4xlarge": { "Arch":"64" },
"m3.xlarge": { "Arch":"64" },
"m3.2xlarge": { "Arch":"64" },
"c1.medium": { "Arch":"64" },
"c1.xlarge": { "Arch":"64" },
"cg1.4xlarge": { "Arch":"64HVM" }
},
"AWSRegionArch2AMI":{
"us-east-1": { "32":"ami-def89fb7", "64":"ami-d0f89fb9", "64HVM":"ami-b93264d0" },
"us-west-1": { "32":"ami-fc002cb9", "64":"ami-fe002cbb" },
"us-west-2": { "32":"ami-0ef96e3e", "64":"ami-70f96e40", "64HVM":"ami-6cad335c" },
"eu-west-1": { "32":"ami-c27b6fb6", "64":"ami-ce7b6fba", "64HVM":"ami-8c987efb" },
"sa-east-1": { "32":"ami-a1da00bc", "64":"ami-a3da00be" },
"ap-southeast-1": { "32":"ami-66084734", "64":"ami-64084736" },
"ap-southeast-2": { "32":"ami-06ea7a3c", "64":"ami-04ea7a3e" },
"ap-northeast-1": { "32":"ami-fc6ceefd", "64":"ami-fe6ceeff" }
},
"AWSNATAMI":{
"us-east-1": { "AMI":"ami-c6699baf" },
"us-west-2": { "AMI":"ami-52ff7262" },
"us-west-1": { "AMI":"ami-3bcc9e7e" },
"eu-west-1": { "AMI":"ami-0b5b6c7f" },
"ap-southeast-1": { "AMI":"ami-02eb9350" },
"ap-southeast-2": { "AMI":"ami-ab990e91" },
"ap-northeast-1": { "AMI":"ami-14d86d15" },
"sa-east-1": { "AMI":"ami-0439e619" }
},
"SubnetConfig":{
"VPC": { "CIDR":"10.0.0.0/16" },
"Public01": { "CIDR":"10.0.0.0/24" },
"Admin": { "CIDR":"10.0.185.0/24" }
},
"MapRegionsToAvailZones":{
"us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" },
"us-west-1": { "AZone0":"us-west-1a", "AZone2":"us-west-1b", "AZone1":"us-west-1c" },
"us-west-2": { "AZone0":"us-west-2a", "AZone1":"us-west-2b", "AZone2":"us-west-2c" },
"eu-west-1": { "AZone0":"eu-west-1a", "AZone1":"eu-west-1b", "AZone2":"eu-west-1c" },
"sa-east-1": { "AZone0":"sa-east-1a", "AZone1":"sa-east-1b", "AZone2":"sa-east-1c" },
"ap-southeast-1": { "AZone0":"ap-southeast-1a", "AZone1":"ap-southeast-1b", "AZone2":"ap-southeast-1c" },
"ap-southeast-2": { "AZone0":"ap-southeast-2a", "AZone1":"ap-southeast-2b", "AZone2":"ap-southeast-2c" },
"ap-northeast-1": { "AZone0":"ap-northeast-1a", "AZone1":"ap-northeast-1b", "AZone2":"ap-northeast-1c" }
}
},
"Resources":{
"AdminVPC":{
"Type":"AWS::EC2::VPC",
"Properties":{
"EnableDnsSupport" : "true",
"EnableDnsHostnames" : "true",
"CidrBlock":"10.0.0.0/16",
"InstanceTenancy":"default"
}
},
"PublicSubnet01":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"AdminVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Public01",
"CIDR"
]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
]
},
"Tags":[
{
"Key":"immutable_metadata",
"Value":"{'purpose':'external','target':'ec2'}"
}
]
}
},
"AdminSubnet":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"AdminVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Admin",
"CIDR"
]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
]
},
"Tags":[
{
"Key":"Application",
"Value":"admin"
},
{
"Key":"Network",
"Value":"Private"
}
]
}
},
"InternetGateway":{
"Type":"AWS::EC2::InternetGateway",
"Properties":{
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Public"
}
]
}
},
"GatewayToInternet":{
"Type":"AWS::EC2::VPCGatewayAttachment",
"Properties":{
"VpcId":{
"Ref":"AdminVPC"
},
"InternetGatewayId":{
"Ref":"InternetGateway"
}
}
},
"PublicRouteTable":{
"Type":"AWS::EC2::RouteTable",
"Properties":{
"VpcId":{
"Ref":"AdminVPC"
},
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Public"
}
]
}
},
"PublicRoute":{
"Type":"AWS::EC2::Route",
"Properties":{
"RouteTableId":{
"Ref":"PublicRouteTable"
},
"DestinationCidrBlock":"0.0.0.0/0",
"GatewayId":{
"Ref":"InternetGateway"
}
}
},
"PublicSubnetRouteTableAssociation01":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"PublicSubnet01"
},
"RouteTableId":{
"Ref":"PublicRouteTable"
}
}
},
"PublicNetworkAcl":{
"Type":"AWS::EC2::NetworkAcl",
"Properties":{
"VpcId":{
"Ref":"AdminVPC"
},
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Public"
}
]
}
},
"InboundHTTPPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PublicNetworkAcl"
},
"RuleNumber":"100",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"80",
"To":"80"
}
}
},
"InboundHTTPSPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PublicNetworkAcl"
},
"RuleNumber":"101",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"443",
"To":"443"
}
}
},
"InboundSSHPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PublicNetworkAcl"
},
"RuleNumber":"102",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":{
"Ref":"SSHLocation"
},
"PortRange":{
"From":"22",
"To":"22"
}
}
},
"InboundEmphemeralPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PublicNetworkAcl"
},
"RuleNumber":"103",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"1024",
"To":"65535"
}
}
},
"OutboundPublicNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PublicNetworkAcl"
},
"RuleNumber":"100",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"true",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"0",
"To":"65535"
}
}
},
"PublicSubnetNetworkAclAssociation01":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"PublicSubnet01"
},
"NetworkAclId":{
"Ref":"PublicNetworkAcl"
}
}
},
"PrivateRouteTable":{
"Type":"AWS::EC2::RouteTable",
"Properties":{
"VpcId":{
"Ref":"AdminVPC"
},
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Private"
}
]
}
},
"PrivateRoute":{
"Type":"AWS::EC2::Route",
"Properties":{
"RouteTableId":{
"Ref":"PrivateRouteTable"
},
"DestinationCidrBlock":"0.0.0.0/0",
"InstanceId":{
"Ref":"NATDevice"
}
}
},
"PrivateSubnetRouteTableAssociationAdmin":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"AdminSubnet"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateNetworkAcl":{
"Type":"AWS::EC2::NetworkAcl",
"Properties":{
"VpcId":{
"Ref":"AdminVPC"
},
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Private"
}
]
}
},
"InboundPrivateNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
},
"RuleNumber":"100",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"0",
"To":"65535"
}
}
},
"OutBoundPrivateNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
},
"RuleNumber":"100",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"true",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"0",
"To":"65535"
}
}
},
"PrivateSubnetNetworkAclAssociationAdmin":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"AdminSubnet"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"NATIPAddress":{
"Type":"AWS::EC2::EIP",
"Properties":{
"Domain":"vpc",
"InstanceId":{
"Ref":"NATDevice"
}
}
},
"NATDevice":{
"Type":"AWS::EC2::Instance",
"Properties":{
"InstanceType":{
"Ref":"NATInstanceType"
},
"KeyName":{
"Ref":"KeyName"
},
"SubnetId":{
"Ref":"PublicSubnet01"
},
"SourceDestCheck":"false",
"ImageId":{
"Fn::FindInMap":[
"AWSNATAMI",
{
"Ref":"AWS::Region"
},
"AMI"
]
},
"SecurityGroupIds":[
{
"Ref":"NATSecurityGroup"
}
]
}
},
"NATSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
"GroupDescription":"Enable internal access to the NAT device",
"VpcId":{
"Ref":"AdminVPC"
},
"SecurityGroupIngress":[
{
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp":{
"Ref":"SSHLocation"
}
},
{
"IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"443",
"ToPort":"443",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":{ "Ref": "MongoServicePort" },
"ToPort":{ "Ref": "MongoServicePort" },
"CidrIp":"0.0.0.0/0"
}
],
"SecurityGroupEgress":[
{
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp":{
"Ref":"SSHLocation"
}
},
{
"IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"443",
"ToPort":"443",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":{ "Ref": "MongoServicePort" },
"ToPort":{ "Ref": "MongoServicePort" },
"CidrIp":"0.0.0.0/0"
}
]
}
},
"BastionIPAddress":{
"Type":"AWS::EC2::EIP",
"Properties":{
"Domain":"vpc",
"InstanceId":{
"Ref":"BastionHost"
}
}
},
"BastionHost":{
"Type":"AWS::EC2::Instance",
"Properties":{
"InstanceType":{
"Ref":"BastionInstanceType"
},
"KeyName":{
"Ref":"KeyName"
},
"SubnetId":{
"Ref":"PublicSubnet01"
},
"ImageId":{
"Fn::FindInMap":[
"AWSRegionArch2AMI",
{
"Ref":"AWS::Region"
},
{
"Fn::FindInMap":[
"AWSInstanceType2Arch",
{
"Ref":"BastionInstanceType"
},
"Arch"
]
}
]
},
"SecurityGroupIds":[
{
"Ref":"BastionSecurityGroup"
}
],
"Tags":[
{
"Key":"play",
"Value":"bastion"
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
},
"PropagateAtLaunch":true
}
]
}
},
"BastionSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
"GroupDescription":"Enable access to the Bastion host",
"VpcId":{
"Ref":"AdminVPC"
},
"SecurityGroupIngress":[
{
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp":{
"Ref":"SSHLocation"
}
}
],
"SecurityGroupEgress":[
{
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp":"10.0.0.0/16"
},
{
"IpProtocol":"tcp",
"FromPort":{ "Ref": "JenkinsServerPort" },
"ToPort":{ "Ref": "JenkinsServerPort" },
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":{ "Ref": "AsgardServerPort" },
"ToPort":{ "Ref": "AsgardServerPort" },
"CidrIp":"0.0.0.0/0"
}
]
}
},
"AdminRole": {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Statement": [ {
"Effect": "Allow",
"Principal": {
"Service": [ "ec2.amazonaws.com" ]
},
"Action": [ "sts:AssumeRole" ]
} ]
},
"Path": "/",
"Policies": [ {
"PolicyName": "AdminBasePolicy",
"PolicyDocument": {
"Statement":[
{
"Effect":"Allow",
"Action": "*",
"Resource":"*"
}
]
}
} ]
}
},
"AdminInstanceProfile": {
"Type": "AWS::IAM::InstanceProfile",
"Properties": {
"Path": "/",
"Roles": [ {
"Ref": "AdminRole"
} ]
}
},
"AdminHost":{
"Type":"AWS::EC2::Instance",
"Properties":{
"InstanceType":{
"Ref":"AdminInstanceType"
},
"KeyName":{
"Ref":"KeyName"
},
"IamInstanceProfile" : {
"Ref" : "AdminInstanceProfile"
},
"SubnetId":{
"Ref":"AdminSubnet"
},
"ImageId":{
"Fn::FindInMap":[
"AWSRegionArch2AMI",
{
"Ref":"AWS::Region"
},
{
"Fn::FindInMap":[
"AWSInstanceType2Arch",
{
"Ref":"AdminInstanceType"
},
"Arch"
]
}
]
},
"SecurityGroupIds":[
{
"Ref":"AdminSecurityGroup"
}
],
"Tags":[
{
"Key":"play",
"Value":"admin"
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
},
"PropagateAtLaunch":true
}
],
"UserData":{
"Fn::Base64":{
"Fn::Join":[
"",
[
"#!/bin/bash -x\n",
"exec >> /home/ubuntu/cflog.log\n",
"exec 2>> /home/ubuntu/cflog.log\n",
"function error_exit\n",
"{\n",
" cfn-signal -e 1 -r \"$1\" '",
{
"Ref":"AdminServerWaitHandle"
},
"'\n",
" exit 1\n",
"}\n",
"apt-get -y update\n",
"apt-get -y install python-setuptools\n",
"echo \"Python Tools installed\" - `date`\n",
"easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n",
"echo \"Cloudformation Boostrap installed \" - `date`\n",
"# If all went well, signal success\n",
"cfn-signal -e $? -r 'Edx Server configuration' '",
{
"Ref":"AdminServerWaitHandle"
},
"'\n"
]
]
}
}
}
},
"AdminSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
"GroupDescription":"Admin Security Group",
"VpcId":{
"Ref":"AdminVPC"
},
"SecurityGroupIngress":[
{
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp":{
"Ref":"SSHLocation"
}
},
{
"IpProtocol":"tcp",
"FromPort":{ "Ref": "JenkinsServerPort" },
"ToPort":{ "Ref": "JenkinsServerPort" },
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":{ "Ref": "AsgardServerPort" },
"ToPort":{ "Ref": "AsgardServerPort" },
"CidrIp":"0.0.0.0/0"
}
]
}
},
"AdminServerWaitHandle":{
"Type":"AWS::CloudFormation::WaitConditionHandle"
},
"AdminServerWaitCondition":{
"Type":"AWS::CloudFormation::WaitCondition",
"DependsOn":"AdminHost",
"Properties":{
"Handle":{
"Ref":"AdminServerWaitHandle"
},
"Timeout":"1200"
}
}
}
}
...@@ -62,6 +62,29 @@ ...@@ -62,6 +62,29 @@
], ],
"ConstraintDescription":"must be a valid EC2 instance type." "ConstraintDescription":"must be a valid EC2 instance type."
}, },
"ElasticsearchInstanceType":{
"Description":"Worker EC2 instance type",
"Type":"String",
"Default":"m1.small",
"AllowedValues":[
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
"ForumInstanceType":{ "ForumInstanceType":{
"Description":"Worker EC2 instance type", "Description":"Worker EC2 instance type",
"Type":"String", "Type":"String",
...@@ -204,7 +227,7 @@ ...@@ -204,7 +227,7 @@
"ForumServerPort":{ "ForumServerPort":{
"Description":"The TCP port for the Forum Server", "Description":"The TCP port for the Forum Server",
"Type":"Number", "Type":"Number",
"Default":"4567" "Default":"18080"
}, },
"CacheNodePort":{ "CacheNodePort":{
"Description":"The TCP port for the nodes in the Elasticache cluster", "Description":"The TCP port for the nodes in the Elasticache cluster",
...@@ -287,6 +310,11 @@ ...@@ -287,6 +310,11 @@
"Type":"Number", "Type":"Number",
"Default":"2" "Default":"2"
}, },
"ElasticsearchDesiredCapacity":{
"Description":"The Auto-scaling group desired capacity for the forums hosts",
"Type":"Number",
"Default":"2"
},
"ForumDesiredCapacity":{ "ForumDesiredCapacity":{
"Description":"The Auto-scaling group desired capacity for the forums hosts", "Description":"The Auto-scaling group desired capacity for the forums hosts",
"Type":"Number", "Type":"Number",
...@@ -364,6 +392,16 @@ ...@@ -364,6 +392,16 @@
], ],
"ConstraintDescription":"must select a valid database instance type." "ConstraintDescription":"must select a valid database instance type."
}, },
"DBEngineVersion":{
"Default":"5.6",
"Description":"Version of MySQL for the RDS",
"Type":"String",
"AllowedValues":[
"5.5",
"5.6"
],
"ConstraintDescription":"must select a valid database version."
},
"DBAllocatedStorage":{ "DBAllocatedStorage":{
"Default":"5", "Default":"5",
"Description":"The size of the database (Gb)", "Description":"The size of the database (Gb)",
...@@ -440,6 +478,8 @@ ...@@ -440,6 +478,8 @@
"Mongo01": { "CIDR":"10.0.90.0/24" }, "Mongo01": { "CIDR":"10.0.90.0/24" },
"Mongo02": { "CIDR":"10.0.91.0/24" }, "Mongo02": { "CIDR":"10.0.91.0/24" },
"Mongo03": { "CIDR":"10.0.92.0/24" }, "Mongo03": { "CIDR":"10.0.92.0/24" },
"Elasticsearch01": { "CIDR":"10.0.100.0/24" },
"Elasticsearch02": { "CIDR":"10.0.101.0/24" },
"Admin": { "CIDR":"10.0.200.0/24" } "Admin": { "CIDR":"10.0.200.0/24" }
}, },
"MapRegionsToAvailZones":{ "MapRegionsToAvailZones":{
...@@ -482,7 +522,22 @@ ...@@ -482,7 +522,22 @@
{ "Ref":"AWS::Region" }, { "Ref":"AWS::Region" },
"AZone0" "AZone0"
] ]
} },
"Tags":[
{
"Key":"immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"external','target':'ec2'}"
]
]
}
}
]
} }
}, },
"PublicSubnet02":{ "PublicSubnet02":{
...@@ -504,7 +559,22 @@ ...@@ -504,7 +559,22 @@
{ "Ref":"AWS::Region" }, { "Ref":"AWS::Region" },
"AZone1" "AZone1"
] ]
} },
"Tags":[
{
"Key":"immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"external','target':'ec2'}"
]
]
}
}
]
} }
}, },
"AdminSubnet":{ "AdminSubnet":{
...@@ -567,6 +637,19 @@ ...@@ -567,6 +637,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key":"immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-edxapp','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -599,6 +682,19 @@ ...@@ -599,6 +682,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key":"immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-edxapp','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -631,6 +727,19 @@ ...@@ -631,6 +727,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-xqueue','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -663,6 +772,19 @@ ...@@ -663,6 +772,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-xqueue','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -695,6 +817,19 @@ ...@@ -695,6 +817,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-rabbit','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -727,6 +862,19 @@ ...@@ -727,6 +862,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-rabbit','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -759,6 +907,19 @@ ...@@ -759,6 +907,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-xserver','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -791,6 +952,19 @@ ...@@ -791,6 +952,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-xserver','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -951,6 +1125,19 @@ ...@@ -951,6 +1125,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-worker','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -983,6 +1170,109 @@ ...@@ -983,6 +1170,109 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-worker','target':'ec2'}"
]
]
}
}
]
}
},
"ElasticsearchSubnet01":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Elasticsearch01",
"CIDR"
]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
]
},
"Tags":[
{
"Key":"Application",
"Value":"forum"
},
{
"Key":"Network",
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-elasticsearch','target':'ec2'}"
]
]
}
}
]
}
},
"ElasticsearchSubnet02":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Elasticsearch02",
"CIDR"
]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone1"
]
},
"Tags":[
{
"Key":"Application",
"Value":"forum"
},
{
"Key":"Network",
"Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-elasticsearch','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -1015,6 +1305,19 @@ ...@@ -1015,6 +1305,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-forum','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -1047,6 +1350,19 @@ ...@@ -1047,6 +1350,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-forum','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -1079,6 +1395,19 @@ ...@@ -1079,6 +1395,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-mongo','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -1111,6 +1440,19 @@ ...@@ -1111,6 +1440,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-mongo','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -1143,6 +1485,19 @@ ...@@ -1143,6 +1485,19 @@
{ {
"Key":"Network", "Key":"Network",
"Value":"Private" "Value":"Private"
},
{
"Key" : "immutable_metadata",
"Value":{"Fn::Join":["",
["{'purpose':'",
{"Ref":"EnvironmentTag"},
"-",
{"Ref":"DeploymentTag"},
"-",
"internal-mongo','target':'ec2'}"
]
]
}
} }
] ]
} }
...@@ -1458,6 +1813,17 @@ ...@@ -1458,6 +1813,17 @@
} }
} }
}, },
"PrivateSubnetRouteTableAssociationAdmin":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"AdminSubnet"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateSubnetRouteTableAssociationEdxapp01":{ "PrivateSubnetRouteTableAssociationEdxapp01":{
"Type":"AWS::EC2::SubnetRouteTableAssociation", "Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{ "Properties":{
...@@ -1612,6 +1978,28 @@ ...@@ -1612,6 +1978,28 @@
} }
} }
}, },
"PrivateSubnetRouteTableAssociationElasticsearch01":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"ElasticsearchSubnet01"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateSubnetRouteTableAssociationElasticsearch02":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"ElasticsearchSubnet02"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateSubnetRouteTableAssociationForum01":{ "PrivateSubnetRouteTableAssociationForum01":{
"Type":"AWS::EC2::SubnetRouteTableAssociation", "Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{ "Properties":{
...@@ -1721,6 +2109,17 @@ ...@@ -1721,6 +2109,17 @@
} }
} }
}, },
"PrivateSubnetNetworkAclAssociationAdmin":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"AdminSubnet"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"PrivateSubnetNetworkAclAssociationEdxapp01":{ "PrivateSubnetNetworkAclAssociationEdxapp01":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation", "Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{ "Properties":{
...@@ -1875,6 +2274,28 @@ ...@@ -1875,6 +2274,28 @@
} }
} }
}, },
"PrivateSubnetNetworkAclAssociationElasticsearch01":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"ElasticsearchSubnet01"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"PrivateSubnetNetworkAclAssociationElasticsearch02":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"ElasticsearchSubnet02"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"PrivateSubnetNetworkAclAssociationForum01":{ "PrivateSubnetNetworkAclAssociationForum01":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation", "Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{ "Properties":{
...@@ -2202,7 +2623,7 @@ ...@@ -2202,7 +2623,7 @@
], ],
"Tags":[ "Tags":[
{ {
"Key":"role", "Key":"play",
"Value":"bastion" "Value":"bastion"
}, },
{ {
...@@ -2613,7 +3034,7 @@ ...@@ -2613,7 +3034,7 @@
], ],
"Tags":[ "Tags":[
{ {
"Key":"role", "Key":"play",
"Value":"edxapp", "Value":"edxapp",
"PropagateAtLaunch":true "PropagateAtLaunch":true
}, },
...@@ -2964,7 +3385,7 @@ ...@@ -2964,7 +3385,7 @@
], ],
"Tags":[ "Tags":[
{ {
"Key":"role", "Key":"play",
"Value":"xqueue", "Value":"xqueue",
"PropagateAtLaunch":true "PropagateAtLaunch":true
}, },
...@@ -3298,7 +3719,7 @@ ...@@ -3298,7 +3719,7 @@
], ],
"Tags":[ "Tags":[
{ {
"Key":"role", "Key":"play",
"Value":"rabbitmq", "Value":"rabbitmq",
"PropagateAtLaunch":true "PropagateAtLaunch":true
}, },
...@@ -3668,7 +4089,7 @@ ...@@ -3668,7 +4089,7 @@
], ],
"Tags":[ "Tags":[
{ {
"Key":"role", "Key":"play",
"Value":"xserver", "Value":"xserver",
"PropagateAtLaunch":true "PropagateAtLaunch":true
}, },
...@@ -3958,7 +4379,7 @@ ...@@ -3958,7 +4379,7 @@
"Ref":"DBClass" "Ref":"DBClass"
}, },
"Engine":"MySQL", "Engine":"MySQL",
"EngineVersion":"5.5", "EngineVersion":{ "Ref": "DBEngineVersion" },
"MasterUsername":{ "MasterUsername":{
"Ref":"DBUsername" "Ref":"DBUsername"
}, },
...@@ -3975,7 +4396,7 @@ ...@@ -3975,7 +4396,7 @@
], ],
"Tags":[ "Tags":[
{ {
"Key":"role", "Key":"play",
"Value":"rds" "Value":"rds"
}, },
{ {
...@@ -4131,7 +4552,7 @@ ...@@ -4131,7 +4552,7 @@
], ],
"Tags":[ "Tags":[
{ {
"Key":"role", "Key":"play",
"Value":"worker", "Value":"worker",
"PropagateAtLaunch":true "PropagateAtLaunch":true
}, },
...@@ -4270,6 +4691,267 @@ ...@@ -4270,6 +4691,267 @@
"Timeout":"1200" "Timeout":"1200"
} }
}, },
"ElasticsearchServer":{
"Type":"AWS::AutoScaling::LaunchConfiguration",
"Properties":{
"SecurityGroups":[
{
"Ref":"ElasticsearchServerSecurityGroup"
}
],
"ImageId":{
"Fn::FindInMap":[
"AWSRegionArch2AMI",
{
"Ref":"AWS::Region"
},
{
"Fn::FindInMap":[
"AWSInstanceType2Arch",
{
"Ref":"ElasticsearchInstanceType"
},
"Arch"
]
}
]
},
"UserData":{
"Fn::Base64":{
"Fn::Join":[
"",
[
"#!/bin/bash -x\n",
"exec >> /home/ubuntu/cflog.log\n",
"exec 2>> /home/ubuntu/cflog.log\n",
"function error_exit\n",
"{\n",
" cfn-signal -e 1 -r \"$1\" '",
{
"Ref":"ElasticsearchServerWaitHandle"
},
"'\n",
" exit 1\n",
"}\n",
"for dev in /dev/xvdc /dev/xvdd; do sudo echo w | fdisk $dev; sudo mkfs -t ext4 $dev;done;\n",
"sudo mkdir /mnt/logs\n",
"sudo mount /dev/xvdc /mnt/logs\n",
"sudo mount /dev/xvdd /opt\n",
"apt-get -y update\n",
"apt-get -y install python-setuptools\n",
"echo \"Python Tools installed\" - `date` >> /home/ubuntu/cflog.txt\n",
"easy_install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n",
"echo \"Cloudformation Boostrap installed \" - `date` >> /home/ubuntu/cflog.txt\n",
"# If all went well, signal success\n",
"cfn-signal -e $? -r 'Edx Server configuration' '",
{
"Ref":"ElasticsearchServerWaitHandle"
},
"'\n"
]
]
}
},
"KeyName":{
"Ref":"KeyName"
},
"InstanceType":{
"Ref":"ElasticsearchInstanceType"
},
"BlockDeviceMappings":[
{
"DeviceName":"/dev/xvdc",
"Ebs":{
"VolumeSize":"50"
}
},
{
"DeviceName":"/dev/xvdd",
"Ebs":{
"VolumeSize":"50"
}
}
]
}
},
"ElasticsearchServerASGroup":{
"Type":"AWS::AutoScaling::AutoScalingGroup",
"Properties":{
"AvailabilityZones":[
{
"Fn::GetAtt":[
"ElasticsearchSubnet01",
"AvailabilityZone"
]
},
{
"Fn::GetAtt":[
"ElasticsearchSubnet02",
"AvailabilityZone"
]
}
],
"VPCZoneIdentifier":[
{
"Ref":"ElasticsearchSubnet01"
},
{
"Ref":"ElasticsearchSubnet02"
}
],
"Tags":[
{
"Key":"play",
"Value":"elasticsearch",
"PropagateAtLaunch":true
},
{
"Key":"environment",
"Value":{
"Ref":"EnvironmentTag"
},
"PropagateAtLaunch":true
},
{
"Key":"deployment",
"Value":{
"Ref":"DeploymentTag"
},
"PropagateAtLaunch":true
}
],
"LaunchConfigurationName":{
"Ref":"ElasticsearchServer"
},
"MinSize":{
"Ref":"ElasticsearchDesiredCapacity"
},
"MaxSize":{
"Ref":"ElasticsearchDesiredCapacity"
},
"DesiredCapacity":{
"Ref":"ElasticsearchDesiredCapacity"
}
}
},
"ElasticsearchServerScaleUpPolicy":{
"Type":"AWS::AutoScaling::ScalingPolicy",
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"ElasticsearchServerASGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"1"
}
},
"ElasticsearchServerScaleDownPolicy":{
"Type":"AWS::AutoScaling::ScalingPolicy",
"Properties":{
"AdjustmentType":"ChangeInCapacity",
"AutoScalingGroupName":{
"Ref":"ElasticsearchServerASGroup"
},
"Cooldown":"60",
"ScalingAdjustment":"-1"
}
},
"ElasticsearchCPUAlarmHigh":{
"Type":"AWS::CloudWatch::Alarm",
"Properties":{
"AlarmDescription":"Scale-up if CPU > 90% for 10 minutes",
"MetricName":"CPUUtilization",
"Namespace":"AWS/EC2",
"Statistic":"Average",
"Period":"300",
"EvaluationPeriods":"2",
"Threshold":"90",
"AlarmActions":[
{
"Ref":"ElasticsearchServerScaleUpPolicy"
}
],
"Dimensions":[
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"ElasticsearchServerASGroup"
}
}
],
"ComparisonOperator":"GreaterThanThreshold"
}
},
"ElasticsearchCPUAlarmLow":{
"Type":"AWS::CloudWatch::Alarm",
"Properties":{
"AlarmDescription":"Scale-down if CPU < 70% for 10 minutes",
"MetricName":"CPUUtilization",
"Namespace":"AWS/EC2",
"Statistic":"Average",
"Period":"300",
"EvaluationPeriods":"2",
"Threshold":"70",
"AlarmActions":[
{
"Ref":"ElasticsearchServerScaleDownPolicy"
}
],
"Dimensions":[
{
"Name":"AutoScalingGroupName",
"Value":{
"Ref":"ElasticsearchServerASGroup"
}
}
],
"ComparisonOperator":"LessThanThreshold"
}
},
"ElasticsearchServerSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
"GroupDescription":"Open up SSH access plus Edx Server required ports",
"VpcId":{
"Ref":"EdxVPC"
},
"SecurityGroupIngress":[
{
"IpProtocol":"tcp",
"FromPort":"22",
"ToPort":"22",
"CidrIp":{
"Ref":"SSHLocation"
}
},
{
"IpProtocol":"tcp",
"FromPort": 9200,
"ToPort": 9200,
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort": 9300,
"ToPort": 9300,
"CidrIp":"0.0.0.0/0"
}
]
}
},
"ElasticsearchServerWaitHandle":{
"Type":"AWS::CloudFormation::WaitConditionHandle"
},
"ElasticsearchServerWaitCondition":{
"Type":"AWS::CloudFormation::WaitCondition",
"DependsOn":"ElasticsearchServer",
"Properties":{
"Handle":{
"Ref":"ElasticsearchServerWaitHandle"
},
"Timeout":"1200"
}
},
"ForumServer":{ "ForumServer":{
"Type":"AWS::AutoScaling::LaunchConfiguration", "Type":"AWS::AutoScaling::LaunchConfiguration",
"Properties":{ "Properties":{
...@@ -4380,7 +5062,7 @@ ...@@ -4380,7 +5062,7 @@
], ],
"Tags":[ "Tags":[
{ {
"Key":"role", "Key":"play",
"Value":"forum", "Value":"forum",
"PropagateAtLaunch":true "PropagateAtLaunch":true
}, },
...@@ -4502,13 +5184,6 @@ ...@@ -4502,13 +5184,6 @@
], ],
"Listeners":[ "Listeners":[
{ {
"LoadBalancerPort":"80",
"InstancePort":{
"Ref":"ForumServerPort"
},
"Protocol":"HTTP"
},
{
"LoadBalancerPort":"443", "LoadBalancerPort":"443",
"InstancePort":{ "InstancePort":{
"Ref":"ForumServerPort" "Ref":"ForumServerPort"
...@@ -4521,7 +5196,12 @@ ...@@ -4521,7 +5196,12 @@
} }
], ],
"HealthCheck":{ "HealthCheck":{
"Target":"TCP:80", "Target":{"Fn::Join":["",
["TCP:",
{"Ref":"ForumServerPort"}
]
]
},
"HealthyThreshold":"3", "HealthyThreshold":"3",
"UnhealthyThreshold":"5", "UnhealthyThreshold":"5",
"Interval":"30", "Interval":"30",
...@@ -4540,19 +5220,13 @@ ...@@ -4540,19 +5220,13 @@
"ForumELBSecurityGroup":{ "ForumELBSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup", "Type":"AWS::EC2::SecurityGroup",
"Properties":{ "Properties":{
"GroupDescription":"Enable HTTP access on port 80", "GroupDescription":"Enable HTTPS access",
"VpcId":{ "VpcId":{
"Ref":"EdxVPC" "Ref":"EdxVPC"
}, },
"SecurityGroupIngress":[ "SecurityGroupIngress":[
{ {
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort":"80",
"ToPort":"80",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"443", "FromPort":"443",
"ToPort":"443", "ToPort":"443",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
...@@ -4588,7 +5262,9 @@ ...@@ -4588,7 +5262,9 @@
"IpProtocol":"tcp", "IpProtocol":"tcp",
"FromPort": { "Ref": "ForumServerPort" }, "FromPort": { "Ref": "ForumServerPort" },
"ToPort": { "Ref": "ForumServerPort" }, "ToPort": { "Ref": "ForumServerPort" },
"CidrIp":"0.0.0.0/0" "SourceSecurityGroupId" : {
"Ref" : "ForumELBSecurityGroup"
}
} }
] ]
} }
...@@ -4776,7 +5452,7 @@ ...@@ -4776,7 +5452,7 @@
], ],
"Tags":[ "Tags":[
{ {
"Key":"role", "Key":"play",
"Value":"mongo", "Value":"mongo",
"PropagateAtLaunch":true "PropagateAtLaunch":true
}, },
......
# Additional Tasks
import cache
import clean
import ec2
import audit
import git
import hosts
import locks
import os
import ssh
import status
import migrate_check
import yaml
from dogapi import dog_stats_api, dog_http_api
from timestamps import TSWrapper
# Global tasks
import logging
from fabric.api import env, task, runs_once
from output import squelch
from datetime import datetime
import sys
import time
from fabric.api import execute, local, task, runs_once
from fabric.utils import fastprint
from fabric.colors import blue
from ssh_tunnel import setup_tunnel
# These imports are to give aliases for these tasks
from hosts import by_tags as tag
from hosts import by_tags as tags
from hosts import exemplar_from_tags as exemplar
from git import default_deploy as deploy
env.linewise = True
env.noop = False
env.use_ssh_config = True
FORMAT = '[ %(asctime)s ] : %(message)s'
logging.basicConfig(format=FORMAT, level=logging.WARNING)
# add timestamps to output
sys.stdout = TSWrapper(sys.stdout)
sys.stderr = TSWrapper(sys.stderr)
path = os.path.abspath(__file__)
with open(os.path.join(
os.path.dirname(path), '../package_data.yaml')) as f:
package_data = yaml.load(f)
dog_stats_api.start(api_key=package_data['datadog_api'], statsd=True)
dog_http_api.api_key = package_data['datadog_api']
@task
def noop():
"""
Disable modification of servers
"""
env.noop = True
dog_stats_api.stop()
@task
def quiet():
"""
Disables verbose output
"""
squelch()
@runs_once
@task()
def log(fname=None):
"""
Writes a logfile to disk of the run
"""
if not fname:
d = datetime.now()
fname = d.strftime('/var/tmp/fab-%Y%m%d-%H%M%S-{0}.log'.format(
os.getpid()))
env.logfile = fname
sys.stdout.log_to_file(fname)
sys.stderr.log_to_file(fname)
import time
from fabric.api import execute, local, task, runs_once
from fabric.utils import fastprint
from fabric.colors import blue
from ssh_tunnel import setup_tunnel
# These imports are to give aliases for these tasks
from hosts import by_name as name
from hosts import by_tags as tag
from hosts import by_tags as tags
from hosts import exemplar_from_tags as exemplar
from git import default_deploy as deploy
import logging
from fabric.api import serial, task, parallel, env, execute, runs_once, settings,sudo
from fabfile.safety import noopable
from multiprocessing import Manager
from timestamps import no_ts
from packages import PackageInfo
import tempfile
from output import notify
@task
@parallel
def collect_installed_packages(results):
"""
Collect all installed packages for the selected hosts and store them in env
"""
print env.host
pkg_info = PackageInfo()
results[env.host] = pkg_info.installed_packages()
@task
@serial
def display_installed_packages(installed_packages):
"""
Print all installed packages collected by collect_installed_packages
"""
# FIXME: env.hosts loses the port information here, not sure why
with no_ts():
for pkg in installed_packages['{0}:22'.format(env.host)]:
notify("{pkg.name} = {pkg.revision}".format(pkg=pkg))
@task(default=True)
@runs_once
def installed_packages(from_links=False):
"""
List all of the installed packages on the selected packages
"""
installed_packages = Manager().dict()
execute(collect_installed_packages, installed_packages)
execute(display_installed_packages, installed_packages)
@task
def audit_user(user, audit_output=None):
"""
Logs on provided hosts and runs id for the supplied user with sudo. Output
is logged to the provided file argument or a default using the
python gettempdir() function and the following file name format:
/tmp/audit-user-{user}.csv
The contents of this file are
host,user,command output
Note that if the file already exists, output will be appended to the
existing file.
"""
logging.info("Auditing {host}.".format(host=env.host_string))
if not audit_output:
audit_output = tempfile.gettempdir() + "/audit-user-{user}.csv".format(
user=user)
with settings(warn_only=True):
with open(audit_output, 'a') as audit:
output = noopable(sudo)("id {user}".format(user=user))
audit.write("{host},{user},{output}\n".format(
host=env.host_string,
user=user,
output=output
)
)
@task
def remove_user(user):
"""
Logs on to provided hosts and runs userdel for the supplied user with sudo.
The user's home directory is preserved.
"""
logging.info("Removing {user} user from {host}.".format(
user=user,host=env.host_string))
with settings(warn_only=True):
output = noopable(sudo)("userdel {user}".format(user=user))
logging.info("Output of userdel command on host {host} was {out}".format(
host=env.host_string,out=output
)
)
from fabric.api import task, runs_once, env, serial, puts, settings
from fabric.utils import fastprint
from fabric.colors import blue, red, white
from output import notify
from packages import PackageDescriptor
from output import unsquelched
from hosts import exemplar
from ssh_tunnel import setup_tunnel
from packages import PackageInfo
@task
@runs_once
def from_exemplar(**tags):
"""
Cache the set of packages installed on one host from the specified tags.
"""
host_string = setup_tunnel([exemplar(**tags)])[0]
with settings(host_string=host_string):
installed_packages()
@task
@runs_once
def limit_prefix(*prefix_list):
"""
Limits cached packages to those that
match one or more prefix strings
"""
env.package_descriptors = filter(
lambda pkg: any(pkg.name.startswith(prefix)
for prefix in prefix_list), env.package_descriptors)
@task(default=True)
@runs_once
def installed_packages(prefix=None):
"""
Cache the set of packages installed on the selected host.
"""
pkg_info = PackageInfo()
env.package_descriptors = [
package for package in pkg_info.installed_packages()
if prefix is None or package.name.startswith(prefix)
]
@task
@runs_once
def from_strings(**pkg_revs):
"""
Cache packages based on strings, that can be either checked with confirm
or deployed with deploy.
Each named argument specifies a package by name, and the revision of
the package to deploy
"""
packages = []
for pkg_name, pkg_rev in pkg_revs.items():
packages.append(PackageDescriptor(pkg_name, pkg_rev))
env.package_descriptors = packages
notify(env.package_descriptors)
@task
@runs_once
def from_stdin(prefix=None):
"""
Cache a list of packages from stdin.
Package names must start with prefix, if specified (any that don't
will be skipped). Package names and revisions should be separated
by = signs, and should be one per line.
"""
if prefix:
prefix_msg = white('pkg_name', bold=True) + white(
' must start with ') + blue(prefix)
else:
prefix_msg = ''
fastprint('\n')
fastprint('\n'.join([
white('Please enter pkg_name=pkg_rev, one per line\n', bold=True),
white('pkg_rev', bold=True) + white(' is a git revision hash'),
prefix_msg,
white('Complete your selections by entering a blank line.'),
]))
fastprint('\n\n')
packages = {}
while True:
line = raw_input("> ")
if not line:
break
if '=' not in line:
fastprint(red("Expected = in '{line}'. Skipping...".format(
line=line)) + white('\n'))
continue
pkg_name, _, pkg_rev = line.partition('=')
pkg_name = pkg_name.strip()
pkg_rev = pkg_rev.strip()
if prefix and not pkg_name.startswith(prefix):
fastprint(red("'{0}' does not start with '{1}'".format(
pkg_name, prefix)) + white('\n'))
continue
packages[pkg_name] = pkg_rev
from_strings(**packages)
@task
@serial
@runs_once
def prompt(*pkg_names):
packages = {}
with unsquelched():
puts("Please supply git revisions to "
"deploy for the following packages:")
for pkg in pkg_names:
packages[pkg] = raw_input("{pkg} = ".format(pkg=pkg)).strip()
from_strings(**packages)
from output import notify
from fabric.api import abort
from fabric.colors import blue, cyan, green, red, white
from fabric.utils import fastprint
def choose(msg, options):
choices = range(len(options))
fastprint(white(msg, bold=True) + white("\n"))
for i, target in enumerate(options):
fastprint("{0}. {1}\n".format(i, target))
fastprint("x. Cancel\n")
user_input = raw_input("> ")
if user_input == 'x':
abort("Cancelled")
try:
choice = int(user_input)
except:
fastprint(red("Choice must be an integer"))
return None
if choice not in choices:
fastprint(red("Choice must be one of {0}".format(choices)))
return None
return options[choice]
def multi_choose_with_input(msg, options):
"""
Options:
msg - header message for the chooser
options - dictionary of options to select
User selects one of the keys in the dictionary,
a new value is read from stdin
"""
selections = options.keys()
user_input = None
while True:
fastprint('\n{0}{1}'.format(white(msg, bold=True), white("\n")))
# The extra white("\n") prints are to reset
# the color for the timestamp line prefix
fastprint(white("\n"))
for i, item in enumerate(selections):
fastprint(" {0}. {1} : {2}".format(white(i, bold=True),
cyan(item), cyan(options[item], bold=True)) + white("\n"))
fastprint(blue(" a. Select all") + white("\n"))
fastprint(blue(" c. Continue") + white("\n"))
fastprint(blue(" x. Cancel") + white("\n"))
fastprint(white("\n"))
user_input = raw_input("> ")
try:
if user_input == 'c':
break
elif user_input == 'x':
return None
elif int(user_input) in range(len(selections)):
name = selections[int(user_input)]
fastprint(green('Enter new msg for ') +
cyan(name))
options[name] = raw_input(white(": "))
except:
notify("Invalid selection ->" + user_input + "<-")
return options
def multi_choose(msg, options):
fastprint(white(msg, bold=True) + white("\n"))
selected = [" " for option in options]
user_input = None
while True:
# The extra white("\n") prints are to reset
# the color for the timestamp line prefix
fastprint(white("\n"))
for i, target in enumerate(options):
fastprint(green(selected[i]))
fastprint(cyan(" {0}. {1}".format(i, target)) + white("\n"))
fastprint(blue(" a. Select all") + white("\n"))
fastprint(blue(" c. Deploy selections") + white("\n"))
fastprint(blue(" x. Cancel") + white("\n"))
fastprint(white("\n"))
user_input = raw_input("> ")
try:
if user_input == 'c':
break
elif user_input == 'a':
selected = ['*' for i in range(len(selected))]
elif user_input == 'x':
return None
elif int(user_input) in range(len(options)):
if selected[int(user_input)] == " ":
selected[int(user_input)] = "*"
else:
selected[int(user_input)] = " "
except:
notify("Invalid selection ->" + user_input + "<-")
pkgs = [options[s] for s in range(len(selected)) if selected[s] == '*']
return pkgs
from fabric.api import sudo, task, parallel
from safety import noopable
from modifiers import rolling
@task
@parallel
def apt_get_clean():
""" Runs apt-get clean on a remote server """
noopable(sudo)('apt-get clean')
@task
@rolling
def mako_template_cache():
noopable(sudo)('service gunicorn stop')
noopable(sudo)('rm -rf /tmp/tmp*mako')
noopable(sudo)('service gunicorn start')
import boto
from fabric.api import run, task, parallel, env
env.instance_ids = {}
def instance_id():
if env.host_string not in env.instance_ids:
env.instance_ids[env.host_string] = run('wget -q -O - http://169.254.169.254/latest/meta-data/instance-id')
return env.instance_ids[env.host_string]
import itertools
import os
import re
import socket
from functools import partial
from fabric.api import task, sudo, runs_once, execute
from fabric.api import cd, env, abort, parallel, prefix
from fabric.colors import white, green, red
from fabric.contrib import console, files
from fabric.operations import put
from fabric.utils import fastprint
from multiprocessing import Manager
from .choose import multi_choose
from .metrics import instance_tags_for_current_host
from .modifiers import rolling
from .output import notify
from .packages import PackageInfo
from .safety import noopable
from .timestamps import no_ts
REPO_URL = 'git@github.com:{}/{}'
REPO_DIRNAME = '/opt/wwc'
GIT_USER = "www-data"
AA_COMPLAIN = '/usr/sbin/aa-complain'
AA_ENFORCE = '/usr/sbin/aa-enforce'
AA_SANDBOX_POLICY = '/etc/apparmor.d/code.sandbox'
VIRTUAL_ENVS = ['/opt/edx', '/opt/edx-sandbox']
@task(default=True, aliases=['deploy'])
@runs_once
def default_deploy(**pkg_revs):
"""
Deploys the cached or specified packages to the
specified hosts
Packages are installed while the server is out of the
loadbalancer pool
"""
if pkg_revs:
execute('cache.from_strings', **pkg_revs)
if socket.gethostname() != 'buildmaster-001':
execute('git.confirm')
execute('git.deploy')
execute('locks.remove_all_locks')
def diff_link(pkg_org, pkg_name, old_revision, new_revision):
if '~' in pkg_name:
pkg_name = re.sub('~.*', '', pkg_name)
return 'Show on github: https://github.com/{org}/{pkg}/compare/{old}' \
'...{new}'.format(org=pkg_org,
pkg=pkg_name,
old=old_revision,
new=new_revision)
@task
@parallel
def diff_installed_packages(results):
pkg_info = PackageInfo()
# { basename(repo_dir): PackageDescriptor()) ... }
old_pkgs = {pkg.name: pkg
for pkg in pkg_info.installed_packages()}
change_list = []
for new_pkg in env.package_descriptors:
if new_pkg.name in old_pkgs:
old = old_pkgs[new_pkg.name].revision
new = new_pkg.revision
change_list.append((new_pkg.name, old, new))
else:
change_list.append((new_pkg.name, None, new_pkg.revision))
results.append((tuple(change_list), env.host))
@task
@runs_once
def confirm():
"""
Generate diffs comparing what's installed to what's about to be installed,
and ask the user to confirm to continue.
Answering no aborts the entire operation
"""
execute('locks.wait_for_all_locks')
# turn off timestamps for the confirm prompt
with no_ts():
manager = Manager()
diffs = manager.list()
execute(diff_installed_packages, diffs)
local_diffs = list(diffs)
def sort_key(diff):
return diff[0]
local_diffs.sort(key=sort_key)
if not local_diffs:
execute('locks.remove_all_locks')
abort("Nothing to deploy")
choices = set()
pkg_info = PackageInfo()
servers_to_update = []
for key, group in itertools.groupby(local_diffs, key=sort_key):
servers = [d[1] for d in group]
servers_to_update.extend(servers)
for pkg, old, new in key:
choices.add(pkg)
notify(
" {name}: {diff}".format(
name=pkg,
old=old,
new=new,
diff=diff_link(pkg_info.org_from_name(pkg),
pkg_info.repo_from_name(pkg), old, new),
),
show_prefix=False
)
choices = list(choices)
if len(choices) > 1:
selection = multi_choose("Select one or more item numbers to mark"
"them with a '*' for deployment.\n"
"Note: none are selected by default.\n"
"Select 'c' to deploy "
"the items that are marked with a '*'.",
choices)
else:
selection = choices
if not selection:
notify('Removing all locks and aborting')
execute('locks.remove_all_locks')
abort('Operation cancelled by user')
pre_post = display_pre_post(selection)
env.pre_post = pre_post
notify("{noop}Updating servers [{servers}]:".format(
servers=", ".join(servers_to_update),
noop="[noop] " if env.noop else ''
), show_prefix=False)
if not console.confirm(
white('Please confirm the pre and post actions above',
bold=True),
default=True):
execute('locks.remove_all_locks')
abort('Operation cancelled by user')
env.package_descriptors = [
pkg for pkg in env.package_descriptors if pkg.name in selection]
def display_pre_post(choices):
"""
Displays list of pre and post checkout commands,
returns the list of commands in a dictionary
return({
'pre': [ 'cmd1', 'cmd2', ... ],
'post': [ 'cmd1', 'cmd2', ... ]
})
"""
pkg_info = PackageInfo()
pre_post = pkg_info.pre_post_actions(choices)
for stage in ['pre', 'post']:
if pre_post[stage]:
fastprint(green('{0}-checkout commands:\n'.format(stage),
bold=True) + green(' -> ') + green('\n -> '.join(
pre_post[stage])) + white('\n\n'))
else:
fastprint(green('WARNING', bold=True) +
green(' - no {0}-checkout commands for this set '
'of packages : '.format(stage, choices)) +
white('\n\n'))
return pre_post
@task
@rolling
def deploy(auto_migrate=False):
"""
Deploys the cached packages to the specified hosts.
Packages are installed while the server is out of the loadbalancer pool
"""
packages = env.package_descriptors
# If these are not defined it means that the confirm
# step was skipped, in this case we figure out pre and
# post steps here
if not hasattr(env, 'pre_post'):
pkg_config = PackageInfo()
env.pre_post = pkg_config.pre_post_actions([pkg.name
for pkg in packages])
contains_content = any(pkg.name.startswith('content') for pkg in packages)
contains_code = not all(pkg.name.startswith('content') for pkg in packages)
base_tags = [
'deploying_to_host:' + env.host,
] + instance_tags_for_current_host()
if contains_content:
base_tags.append('type:content')
if contains_code:
base_tags.append('type:code')
package_tags = base_tags + ['package:' + pkg.name for pkg in packages]
metric_name = 'fabric.deployment'
# pre checkout commands
with prefix("export GIT_SSH=/tmp/git.sh"):
for cmd in env.pre_post['pre']:
noopable(sudo)(cmd)
put(os.path.join(os.path.dirname(__file__), 'git.sh'),
'/tmp/git.sh', mode=0755, use_sudo=True)
for pkg in packages:
existing_repo = files.exists(pkg.repo_root, use_sudo=True)
repo_tags = base_tags + [
'package:' + pkg.name,
'existance:' + 'existing' if existing_repo else 'absent',
]
if existing_repo:
if not files.exists(os.path.join(pkg.repo_root, '.git'),
use_sudo=True):
raise Exception("Repo root not a git repo - {0}".format(
os.path.join(pkg.repo_root, '.git')))
with cd(pkg.repo_root):
if pkg.revision == 'absent':
noopable(sudo)('rm -rf {0}'.format(pkg.repo_root))
else:
checkout(pkg.revision)
else:
with cd(os.path.dirname(pkg.repo_root)):
if pkg.revision != 'absent':
clone(pkg.repo_org, pkg.repo_name, pkg.name, pkg.revision)
if '~' in pkg.name:
_update_course_xml(pkg, pkg.name.split('~')[1])
_install_requirements(pkg)
_install_gemfile(pkg)
_install_npm_package(pkg)
# drop a file for puppet so it knows that
# code is installed for the service
# with cd('/etc/facter/facts.d'):
# pkg_config = PackageInfo()
# if pkg.repo_name in pkg_config.service_repos:
# # facts can't have dashes so they are converted
# # to underscores
# noopable(sudo)(
# 'echo "{0}_installed=true" > {0}_installed.txt'.format(
# pkg.repo_name.replace("-", "_")))
pkg_version()
# post checkout commands
with prefix("export GIT_SSH=/tmp/git.sh"):
for cmd in env.pre_post['post']:
noopable(sudo)(cmd)
if 'mitx' in [pkg.name for pkg in packages]:
# do not slow down content deploys by checking
# for migrations
execute('migrate_check.migrate_check', auto_migrate)
@task
def pkg_version():
"""
Drops an html/json version file on the remote system
"""
path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(path, '../version-script/version.py')) as f:
put(f, '/tmp/version.py', use_sudo=True)
noopable(sudo)('/usr/bin/python /tmp/version.py')
@task
def clone(repo_org, repo_name, name, revision):
"""
Parameters:
repo_name: github organization
repo_name: github repo name
name: basename(repo_dir) ex: content-mit-600x~Fall_2012
revision: commit hash
"""
with no_ts():
with prefix("export GIT_SSH=/tmp/git.sh"):
noopable(sudo)("git clone {} {}".format(
REPO_URL.format(repo_org, repo_name), name))
with cd(name):
noopable(sudo)("git reset --hard {0}".format(revision))
if files.exists('.gitmodules', use_sudo=True):
noopable(sudo)("git submodule update --init")
noopable(sudo)("chown -R {0}:{0} .".format(GIT_USER))
@task
def checkout(revision):
"""
Parameters:
revision: commit hash
"""
with no_ts():
with prefix("export GIT_SSH=/tmp/git.sh"):
noopable(sudo)("git remote prune origin")
noopable(sudo)("git fetch origin")
noopable(sudo)("git reset --hard {0}".format(revision))
if files.exists('.gitmodules', use_sudo=True):
noopable(sudo)("git submodule update --init")
noopable(sudo)("chown -R {0}:{0} .".format(GIT_USER))
def _update_course_xml(pkg, root):
"""
Parameters:
pkg: a single package descriptor
root: a root that must exist in the roots/ folder
"""
with cd(pkg.repo_root):
if files.exists(
os.path.join(pkg.repo_root, 'roots/{0}.xml'.format(root)),
use_sudo=True):
noopable(sudo)('rm -f course.xml && '
'ln -s roots/{0}.xml course.xml'.format(root))
else:
abort(red("There is a '~' in {0} but there is no roots/{1}.xml "
"file in the repo!".format(pkg.name, root)))
def _install_requirements(pkg):
"""
Parameters:
pkg: single package descriptor
install pre-requirements and requirements
if they exists for the repo.
will not run pip install if the requirements file
has not changed since the last run
Turns off apparmor enforcement during pip install
for the sandbox virtualenv
"""
def pip_install(file, venv='/opt/edx'):
with prefix("export GIT_SSH=/tmp/git.sh"):
with prefix('source {}'.format(os.path.join(venv, 'bin/activate'))):
with prefix('export PIP_DOWNLOAD_CACHE=/tmp/pip_download_cache'):
noopable(sudo)('pip install --exists-action w -r {0}'.format(file))
if files.exists(AA_COMPLAIN) and files.exists(AA_SANDBOX_POLICY):
# in order to install code-sandbox requirements the
# code sandbox apparmor policy must be temporarily
# suspended
sudo('{0} {1}'.format(AA_COMPLAIN, AA_SANDBOX_POLICY))
# Run new-style requirements
for venv in VIRTUAL_ENVS:
if not files.exists(venv):
# skip if the virtualenv doesn't exist
continue
venv_name = os.path.basename(venv)
pip_cmd_base = partial(pip_install, file='requirements/{}/base.txt'.format(venv_name), venv=venv)
pip_cmd_post = partial(pip_install, file='requirements/{}/post.txt'.format(venv_name), venv=venv)
_run_if_changed(pkg, 'requirements/{}/base.txt'.format(venv_name), pip_cmd_base,
'cat requirements/{}/*.txt'.format(venv_name))
_run_if_changed(pkg, 'requirements/{}/post.txt'.format(venv_name), pip_cmd_post,
'cat requirements/{}/*.txt'.format(venv_name))
if files.exists(AA_ENFORCE) and files.exists(AA_SANDBOX_POLICY):
sudo('{0} {1}'.format(AA_ENFORCE, AA_SANDBOX_POLICY))
@task
@runs_once
def deploy_with_puppet():
execute('git.confirm')
execute(_deploy_with_puppet)
execute('locks.remove_all_locks')
@task
@rolling
def _deploy_with_puppet():
execute('puppet')
execute('git.deploy')
def _install_gemfile(pkg):
"""
Parameters:
pkg: single package descriptor
Installs the Gemfile from the repo, if it exists.
Will not run if the Gemfile
has not changed since the last run
"""
def bundle_install():
with prefix('export PATH=/opt/www/.rbenv/bin:$PATH'):
with prefix('RBENV_ROOT=/opt/www/.rbenv'):
with prefix('which rbenv'):
with prefix('eval "$(rbenv init -)"'):
noopable(sudo)('gem install bundler')
noopable(sudo)('bundle install --binstubs')
_run_if_changed(pkg, 'Gemfile', bundle_install)
def _install_npm_package(pkg):
"""
Parameters:
pkg: single package descriptor
Installs the package.json from the repo, if it exists.
Will not run if the package.json has not changed since
the last run
"""
def package_install():
noopable(sudo)('npm install')
_run_if_changed(pkg, 'package.json', package_install)
def _run_if_changed(pkg, file, command, contents_command=None):
"""
Runs command if the contents of file
inside pkg have changed since the last time the command was run.
Commands are executed inside pkg.repo_root
"""
if contents_command is None:
contents_command = 'cat ' + file
with cd(pkg.repo_root):
path = os.path.join(pkg.repo_root, file)
if files.exists(path, use_sudo=True):
prev_md5_file = '/var/tmp/{0}-{1}.md5'.format(
pkg.repo_name.replace('/', '-'), file.replace('/', '-'))
md5_command = '{} | /usr/bin/md5sum'.format(contents_command)
if files.exists(prev_md5_file, use_sudo=True):
cur_md5 = sudo(md5_command)
prev_md5 = sudo('cat {0}'.format(prev_md5_file))
if cur_md5 == prev_md5:
return
command()
noopable(sudo)('{} > {}'.format(md5_command, prev_md5_file))
#!/bin/sh
exec ssh -i "/etc/git-identity" -o "StrictHostKeyChecking no" "$@"
import boto
from fabric.decorators import serial
from ssh_tunnel import setup_tunnel
import socket
from fabric.api import env, task, abort
from fabric.colors import red
import logging
def hosts_by_tag(tag, value):
"""
Return a list of all hosts that have the specified value for the specified
tag
"""
return hosts_by_tags(**{tag: value})
def hosts_by_tags(**tags):
"""
Return a list of all hosts that have the specified value for the specified
tags.
Tag values are allowed to include wildcards
If no variant tag is specified, this command will ignore all hosts
that have a variant specified.
"""
if 'env' in tags:
tags['environment'] = tags['env']
del(tags['env'])
ec2 = boto.connect_ec2()
hosts = []
for res in ec2.get_all_instances(filters={'tag:' + tag: value
for tag, value in tags.iteritems()
if value != '*'}):
for inst in res.instances:
if inst.state == "running":
if (inst.public_dns_name):
hosts.append(inst.public_dns_name)
else:
hosts.append(inst.private_dns_name)
print hosts
return hosts
def _fleet():
ec2 = boto.connect_ec2()
hosts = []
for res in ec2.get_all_instances():
for inst in res.instances:
if inst.state == "running":
try:
instance_name = inst.tags['Name']
except:
logging.warning("Instance with id {id} and {dns} has no assigned Name.".format(id=inst.id,dns=inst.public_dns_name))
host_to_add = instance_name + "." + DOMAIN
# fallback to the public hostname if the m.edx.org
# name doesn't exist
try:
socket.gethostbyname(host_to_add.replace(':22',''))
except socket.error:
if inst.public_dns_name:
host_to_add = inst.public_dns_name
if host_to_add:
hosts.append(host_to_add)
return hosts
def exemplar(**tags):
"""
Return the hostname of one host from the specified set
of tags, or None if there is no such host
"""
hosts = hosts_by_tags(**tags)
if hosts:
return hosts[0]
else:
return None
@task(alias='exemplar')
def exemplar_from_tags(**tags):
env.hosts.append(exemplar(**tags))
@task(aliases=['tag', 'tags'])
def by_tags(**tags):
"""
Add all running hosts that match the tag names provided
as keyword arguments.
"""
env.hosts.extend(hosts_by_tags(**tags))
env.hosts.sort()
env.hosts = setup_tunnel(env.hosts)
@task(aliases=['fleet'])
def fleet():
"""
Return a list of all hosts available and running via the default AWS
credentials.
Your ability to operate on these hosts will depend upon the ssh credentials
that you are using to drive fab. There is likely to be a mismatch between
what hosts you can see via IAM managed AWS credentials and which hosts
you can actually connect to even if you are using highly privileged
AWS pems.
"""
hosts = _fleet()
env.hosts.extend(hosts)
env.hosts.sort()
env.hosts = setup_tunnel(env.hosts)
import os
import socket
import time
from output import notify
from safety import noopable
from fabric.api import task, run, env, settings, sudo, abort
from fabric.api import runs_once, execute, serial, hide
MAX_SLEEP_TIME = 10
LOCK_FILE = '/opt/deploy/.lock'
@task
@runs_once
def wait_for_all_locks():
execute('locks.wait_for_lock', hosts=sorted(env.hosts))
@task
@runs_once
def remove_all_locks():
execute('locks.remove_lock', hosts=sorted(env.hosts, reverse=True))
@task
@serial
def remove_lock():
noopable(sudo)("test ! -f {0} || rm {0}".format(LOCK_FILE))
@task
@serial
def wait_for_lock():
if hasattr(env, 'deploy_user'):
lock_user = env.deploy_user
else:
lock_user = env.user
LOCK_ID = 'u:{user} h:{host} pid:{pid}'.format(user=lock_user,
host=socket.gethostname(),
pid=str(os.getpid()))
sleep_time = 0.1
timeout = 120
start_time = time.time()
with settings(warn_only=True):
while True:
wait_time = time.time() - start_time
# break if the lockfile is removed or if it belongs to this pid
# if it exists lock_status will have the file's contents
with hide('running', 'stdout', 'stderr', 'warnings'):
lock_status = run("test ! -f {lfile} || "
"(cat {lfile} && "
'grep -q "{lid}" {lfile})'.format(
lfile=LOCK_FILE,
lid=LOCK_ID))
if lock_status.succeeded:
noopable(sudo)('echo "{0}" > {1}'.format(
LOCK_ID, LOCK_FILE))
notify("Took lock")
break
elif wait_time >= timeout:
abort("Timeout expired, giving up")
lock_create_time = run("stat -c %Y {0}".format(LOCK_FILE))
delta = time.time() - float(lock_create_time)
(dhour, dsec) = divmod(delta, 3600)
notify("""
!! Deploy lockfile already exists ({lockfile}) !!
Waiting: {wait}s
Lockfile info: [ {owner} ]
Lock created: {dhour}h{dmin}m ago
""".format(
lockfile=LOCK_FILE,
wait=int(timeout - wait_time),
owner=lock_status,
dhour=int(dhour),
dmin=int(dsec / 60),
))
time.sleep(sleep_time)
sleep_time *= 2
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
from fabric.api import task, parallel, put, sudo
from safety import noopable
from .modifiers import rolling
from StringIO import StringIO
import json
__all__ = ['on', 'off','maintain_service','unmaintain_service']
services = ['lms','cms','lms-xml','lms-preview']
def set_maintenance(value):
noopable(put)(StringIO(json.dumps({'maintenance': value})), '/etc/facter/facts.d/mitx_maintenance.json', use_sudo=True)
@task
@parallel
def on():
"""
Enable maintenance mode
"""
set_maintenance(True)
puppet.checkin('maintenance')
@task
@parallel
def off():
"""
Disable maintenance mode
"""
set_maintenance(False)
puppet.checkin('maintenance')
@task
@rolling
def maintain_service(service):
"""
Puts a specified edxapp service into maintenance mode by replacing
its nginx sites-enabled link with a link to the maintenance vhost.
"""
if service not in services:
raise Exception("Provided service not in the service inventory. "
"Acceptable values are {services}".format(
services=services
))
noopable(sudo)("rm -f /etc/nginx/sites-enabled/{service}".format(
service=service))
noopable(sudo)("ln -s /etc/nginx/sites-available/{service}-maintenance"
" /etc/nginx/sites-enabled/{service}-maintenance".format(
service=service))
noopable(sudo)("service nginx reload")
@task
@rolling
def unmaintain_service(service):
"""
Removes a specified edxapp service from maintenance mode by replacing
the appropriate link in /etc/nginx/sites-enabled.
"""
if service not in services:
raise Exception("Provided service not in the service inventory. "
"Acceptable values are {services}".format(
services=services
))
noopable(sudo)("rm -f /etc/nginx/sites-enabled/{service}-maintenance".format(
service=service))
noopable(sudo)("ln -s /etc/nginx/sites-available/{service}"
" /etc/nginx/sites-enabled/{service}".format(
service=service))
noopable(sudo)("service nginx reload")
import boto
from .ec2 import instance_id
def instance_tags_for_current_host():
"""
Returns the datadog style tags for the active host
"""
return instance_tags([instance_id()])
def instance_tags(instance_ids):
"""
Returns datadog style tags for the specified instances
"""
ec2 = boto.connect_ec2()
tags = set()
for res in ec2.get_all_instances(instance_ids):
for instance in res.instances:
ec2_tags = instance.tags
tags.add('instance_id:' + instance.id)
if 'group' in ec2_tags:
tags.add('fab-group:' + ec2_tags['group'])
if 'environment' in ec2_tags:
tags.add('fab-environment:' + ec2_tags['environment'])
if 'variant' in ec2_tags:
tags.add('fab-variant:' + ec2_tags['variant'])
return list(tags)
from fabric.api import task, sudo, runs_once, prefix, hide, abort
from fabric.contrib import console
from fabric.colors import white, green
from .safety import noopable
@task()
@runs_once
def migrate_check(auto_migrate=False):
"""
Checks to see whether migrations need to be run,
if they do it will prompt to run them before
continuing.
looks for " - Migrating" in the output of
the dry run
"""
migration_cmd = "/opt/edx/bin/django-admin.py migrate --noinput " \
"--settings=lms.envs.aws --pythonpath=/opt/wwc/edx-platform"
with prefix("export SERVICE_VARIANT=lms"):
with hide('running', 'stdout', 'stderr', 'warnings'):
dryrun_out = sudo(migration_cmd + " --db-dry-run", user="www-data")
migrate = False
for chunk in dryrun_out.split('Running migrations for '):
if 'Migrating' in chunk:
print "!!! Found Migration !!!\n" + chunk
migrate = True
if migrate:
if auto_migrate or console.confirm(
green(migration_cmd) + white('\n') +
white('Run migrations? ', bold=True), default=True):
noopable(sudo)(migration_cmd, user='www-data')
import boto
import time
from collections import namedtuple
from fabric.api import task, execute, serial
from functools import wraps, partial
from safety import noopable
from output import notify
from dogapi import dog_stats_api
from .metrics import instance_tags
from .ec2 import instance_id
MAX_SLEEP_TIME = 1
LockedElb = namedtuple('LockedElb', 'name elb lock')
def await_elb_instance_state(lb, instance_id, awaited_state):
sleep_time = 0.1
start_time = time.time()
while True:
state = lb.get_instance_health([instance_id])[0].state
if state == awaited_state:
notify("Load Balancer {lb} is in awaited state {awaited_state}, proceeding.".format(
lb=lb.dns_name,
awaited_state=awaited_state
))
break
else:
notify("Checking again in {0} seconds. Elapsed time: {1}".format(sleep_time, time.time() - start_time))
time.sleep(sleep_time)
sleep_time *= 2
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
def rolling(func):
@task
@serial
@wraps(func)
def wrapper(*args, **kwargs):
elb = boto.connect_elb()
elbs = elb.get_all_load_balancers()
execute('locks.wait_for_all_locks')
inst_id = instance_id()
tags = ['task:' + func.__name__] + instance_tags(inst_id)
active_lbs = sorted(
lb
for lb in elbs
if inst_id in [info.id for info in lb.instances]
)
timer = partial(dog_stats_api.timer, tags=tags)
# Remove this node from the LB
for lb in active_lbs:
notify("Removing {id} from {lb}".format(id=inst_id, lb=lb))
with timer('rolling.deregister_instance'):
noopable(lb.deregister_instances)([inst_id])
noopable(await_elb_instance_state)(lb, inst_id, "OutOfService")
# Execute the operation
func(*args, **kwargs)
# Add this node back to the LBs
for lb in active_lbs:
notify("Adding {id} to {lb}".format(id=inst_id, lb=lb))
with timer('rolling.register_instance'):
noopable(lb.register_instances)([inst_id])
with timer('rolling.wait_for_start'):
# Wait for the node to come online in the LBs
for lb in active_lbs:
noopable(await_elb_instance_state)(lb, inst_id, "InService")
return wrapper
import sys
from contextlib import contextmanager
from fabric.api import puts
class SquelchingStream(object):
def __init__(self, stream):
self.__dict__['stream'] = stream
self.__dict__['squelched'] = False
self.__dict__['needs_line_ending'] = False
def write(self, string):
if self.squelched:
self.stream.write('.')
self.stream.flush()
self.needs_line_ending = True
else:
if self.needs_line_ending:
self.needs_line_ending = False
self.stream.write('\n')
self.stream.write(string)
def __getattr__(self, attr):
return getattr(self.stream, attr)
def __setattr__(self, attr, val):
if attr in self.__dict__:
return object.__setattr__(self, attr, val)
return setattr(self.stream, attr, val)
sys.stdout = SquelchingStream(sys.stdout)
sys.stderr = SquelchingStream(sys.stderr)
def squelch():
sys.stdout.squelched = sys.stderr.squelched = True
def unsquelch():
sys.stdout.squelched = sys.stderr.squelched = False
@contextmanager
def unsquelched(stream=sys.stdout):
old_state = stream.squelched
stream.squelched = False
yield
stream.squelched = old_state
def notify(msg, show_prefix=None, end='\n', flush=False):
with unsquelched():
puts(msg, show_prefix, end, flush)
import os
from fabric.api import run, settings, hide, sudo
from collections import defaultdict
import yaml
import re
MIN_REVISION_LENGTH = 7
class PackageInfo:
def __init__(self):
path = os.path.abspath(__file__)
with open(os.path.join(
os.path.dirname(path), '../package_data.yaml')) as f:
package_data = yaml.load(f)
# exhaustive list of MITx repos
self.repo_dirs = package_data['repo_dirs']
self.cmd_list = {
'pre': package_data['pre_checkout_regex'],
'post': package_data['post_checkout_regex']}
self.service_repos = package_data['service_repos']
def repo_from_name(self, name):
repos = []
for repo_root in self.repo_dirs:
if os.path.basename(repo_root) == name:
repos.append(self.repo_dirs[repo_root])
if len(repos) > 1:
raise Exception['Multiple repos found for name']
elif len(repos) == 0:
raise Exception['Repo not found for name']
else:
return repos[0].split('/')[1]
def org_from_name(self, name):
repos = []
for repo_root in self.repo_dirs:
if os.path.basename(repo_root) == name:
repos.append(self.repo_dirs[repo_root])
if len(repos) > 1:
raise Exception['Multiple repos found for name']
elif len(repos) == 0:
raise Exception['Repo not found for name']
else:
return repos[0].split('/')[0]
def pre_post_actions(self, pkgs):
"""
Returns a dictionary containing a list of
commands that need to be executed
pre and post checkout for one or more package names.
return({
'pre': [ 'cmd1', 'cmd2', ... ],
'post': [ 'cmd1', 'cmd2', ... ]
})
"""
cmds = defaultdict(list)
for stage in ['pre', 'post']:
for regex, cmd_templates in self.cmd_list[stage]:
for pkg in pkgs:
match = re.match(regex, pkg)
if match is None:
continue
cmds[stage].extend(
cmd.format(*match.groups(), **match.groupdict())
for cmd in cmd_templates
if cmd not in cmds[stage]
)
return(cmds)
def installed_packages(self):
"""
Returns the list of PackageDescriptors for the packages
installed on the system.
This is determined by looking at every package directory
we know about and checking its revision.
"""
with settings(hide('running'), warn_only=True):
revisions = sudo(
"""
for path in {0}; do
if [[ -d "$path/.git" ]]; then
echo $path $(cd $path && git rev-parse HEAD 2>/dev/null)
fi
done
""".format(' '.join(self.repo_dirs))).split('\n')
packages = [revline.strip().split(' ') for revline in revisions
if ' ' in revline.strip()]
return [PackageDescriptor(os.path.basename(path), revision)
for path, revision in packages]
class PackageDescriptor(object):
def __init__(self, name, revision):
if revision != 'absent' and len(revision) < MIN_REVISION_LENGTH:
raise Exception("Must use at least {0} characters "
"in revision to pseudo-guarantee uniqueness".format(
MIN_REVISION_LENGTH))
self.name = name
# Find the repo_root by name
# This assumes that basename(repo_root) is unique
# for all repo_roots. If this is not true an exception
# will be raised
pkg_info = PackageInfo()
repo_roots = []
for repo_dir in pkg_info.repo_dirs.keys():
if os.path.basename(repo_dir) == name:
repo_roots.append(repo_dir)
if len(repo_roots) != 1:
raise Exception("Unable to look up directory for repo")
self.repo_root = repo_roots[0]
self.repo_name = pkg_info.repo_dirs[self.repo_root].split('/')[1]
self.repo_org = pkg_info.repo_dirs[self.repo_root].split('/')[0]
self.revision = revision
from fabric.api import env
from output import notify
def noopable(fun):
if env.noop:
def noop(*args, **kwargs):
notify("Would have called: {fun}({args}, {kwargs})".format(
fun=fun.__name__,
args=", ".join(repr(a) for a in args),
kwargs=", ".join("=".join([key, repr(val)]) for key, val in kwargs.items()),
))
return noop
else:
return fun
from fabric.api import task, env, abort
from fabric.colors import red
import os
import re
@task(default=True)
def ssh(user=None):
if user is None:
user = env.user
if len(env.hosts) != 1:
abort(red('Please specify one host for ssh'))
for host in env.hosts:
host = re.sub(':(\d+)', r' -p\1 ', host)
os.system('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -l {0} {1}'.format(user, host))
from fabric.api import abort, env, fastprint
from fabric.colors import green, red, white
import subprocess
import shlex
import atexit
import time
import boto
import re
import socket
DOMAIN = 'm.edx.org:22'
class SSHTunnel:
port = 9000 # default starting port
tunnels = {}
def __init__(self, host, phost, user, lport=None):
if lport is not None:
SSHTunnel.port = lport
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect(('localhost', SSHTunnel.port))
s.shutdown(2)
# connection was successful so try a new port
SSHTunnel.port += 1
except:
self.lport = SSHTunnel.port
break
phost = re.sub(':(\d+)', r' -p\1 ', phost)
identities = ''
if env.key_filename:
# could be a list or a string
if isinstance(env.key_filename, basestring):
lst = [env.key_filename]
else:
lst = env.key_filename
identities = ' '.join('-i {f} '.format(f=f) for f in lst)
cmd = 'ssh -o UserKnownHostsFile=/dev/null ' \
'{ids}' \
'-o StrictHostKeyChecking=no -vAN -L {lport}:{host} ' \
'{user}@{phost}'.format(ids=identities, lport=self.lport,
host=host, user=user, phost=phost)
self.p = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
start_time = time.time()
atexit.register(self.p.kill)
while not 'Entering interactive session' in self.p.stderr.readline():
if time.time() > start_time + 10:
abort(red("Unable to create ssh tunnel - `{0}`".format(cmd)))
def local(self):
return 'localhost:{lport}'.format(lport=self.lport)
def setup_tunnel(all_hosts, check_tag=True,
proxy_name=None, user=None, lport=None):
"""
Given a all_hosts it will check to see whether
any are proxy hosts if check_tag is True
returns a modified list
of hosts with localhost:port for tunneled hosts.
"""
if user is None:
user = env.user
ec2 = boto.connect_ec2()
# the proxy hosts
proxies = {}
if check_tag:
for res in ec2.get_all_instances(filters={'tag-key': 'proxy'}):
for inst in res.instances:
host = ".".join([inst.tags['Name'], DOMAIN])
proxy = ".".join([inst.tags['proxy'], DOMAIN])
proxies.update({host: proxy})
else:
if not proxy_name:
raise Exception("Must specify a proxy_host")
proxies = {host: proxy_name for host in all_hosts}
# local tunneling ip:port
tunnels = {}
for host in all_hosts:
if host in proxies and host not in SSHTunnel.tunnels:
t = SSHTunnel(host=host, phost=proxies[host],
user=user, lport=lport)
tunnels[host] = t.local()
fastprint(green('created {0} for {1} via {2}'.format(tunnels[host],
host, proxies[host])) + white('\n'))
SSHTunnel.tunnels.update(tunnels)
return([SSHTunnel.tunnels[host] if host in SSHTunnel.tunnels else host
for host in all_hosts])
from fabric.api import task, sudo, abort, parallel, runs_once, execute
from fabric.api import settings, hide
from fabric.operations import put
from fabric.utils import fastprint
from safety import noopable
from fabric.colors import blue, red
from fabric.contrib import console
from output import unsquelched
from timestamps import no_ts
from choose import multi_choose_with_input
import json
import tempfile
status_file = '/opt/wwc/status_message.json'
@task(default=True)
@runs_once
def status():
"""
Drops {0} which is a json formatted file that contains a
status message that will be displayed to all users on the
on the courseware for a single course or for all courses
if 'global' is set.
Message(s) are entered or removed interactively on the console.
Example usage:
$ fab groups:prod_edx status
""".format(status_file)
with hide('running', 'stdout', 'stderr', 'warnings'):
env_json = sudo("cat /opt/wwc/lms-xml.env.json")
course_listings = json.loads(env_json)['COURSE_LISTINGS']
course_ids = [course_id for course_list in course_listings.itervalues()
for course_id in course_list]
course_ids = ['global'] + course_ids
with no_ts():
course_status = None
with settings(warn_only=True):
cur_status = noopable(sudo)('cat {0}'.format(status_file))
try:
course_status = json.loads(cur_status)
# add empty entries for courses not in the list
empty_entries = set(course_ids) - set(course_status.keys())
course_status.update({entry: '' for entry in list(empty_entries)})
except ValueError:
fastprint(red("Not a valid json file, overwritting\n"))
if course_status is None:
course_status = {course: '' for course in course_ids}
new_status = multi_choose_with_input(
'Set the status message, blank to disable:',
course_status)
if new_status is not None:
# remove empty entries
new_status = {entry: new_status[entry]
for entry in new_status if len(new_status[entry]) > 1}
with unsquelched():
if not console.confirm(
'Setting new status message:\n{0}'.format(
blue(str(new_status), bold=True)),
default=False):
abort('Operation cancelled by user')
with tempfile.NamedTemporaryFile(delete=True) as f:
f.write(json.dumps(new_status))
f.flush()
execute(update_status, f.name)
else:
abort('Operation cancelled by user')
@task
@runs_once
def remove():
"""
Removes {0}, a status banner that is displayed to all
users on the front page.
""".format(status_file)
with unsquelched():
if not console.confirm(
blue('Remove /opt/wwc/status_message.html?', bold=True)):
abort('Operation cancelled by user')
execute(remove_status)
@task
@parallel
def remove_status():
noopable(sudo)('rm -f {0}'.format(status_file))
@task
@parallel
def update_status(fjson):
print status_file
noopable(put)(fjson, status_file, use_sudo=True)
from datetime import datetime
from contextlib import contextmanager
import sys
@contextmanager
def no_ts():
sys.stdout.ts = False
yield
sys.stdout.ts = True
class TSWrapper(object):
def __init__(self, stream):
self.o = stream
self.files = []
self.files.append(self.o)
self.newline = True
self.ts = True
def write(self, s):
d = datetime.now()
if self.ts:
buf = ""
lines = s.splitlines(True)
for line in lines:
if self.newline:
buf += d.strftime('[ %Y%m%d %H:%M:%S ] : {0}'.format(line))
else:
buf += str(line)
if line[-1] == '\n':
self.newline = True
else:
self.newline = False
else:
buf = s
for fh in self.files:
fh.write(buf)
fh.flush()
def log_to_file(self, fn):
fp = open(fn, 'a')
self.files.append(fp)
def __getattr__(self, attr):
return getattr(self.o, attr)
# datadog integration configuration
datadog_api: ''
# repo_dirs : Exhaustive list of repo-dirs and their corresponding repo-names
# the repos are specified by basename(repo_dir), therefore basename(repo_dir)
# under repo_dirs must be unique.
# The following repos have upstart services with the same name
service_repos:
- edx-platform
- grading-controller
- xqueue
- xserver
- certificates
- latex2edx
- analytics-experiments
repo_dirs:
/opt/wwc/analytics-experiments: MITx/analytics-experiments
/opt/wwc/grading-controller: edx/grading-controller
/opt/wwc/ease: edx/ease
/opt/wwc/edx-platform: edx/edx-platform
/opt/wwc/data/cs50: MITx/cs50
/opt/wwc/data/edx4edx: MITx/edx4edx
/opt/wwc/data/700x: MITx/700x
/opt/wwc/xqueue: edx/xqueue
/opt/wwc/xserver: edx/xserver
/opt/wwc/certificates: edx/certificates
/opt/wwc/drupal: MITx/drupal
/opt/wwc/latex2edx: edx/latex2edx_xserver
/opt/wwc/worker: edx/edx-platform
/opt/sysadmin: MITx/sysadmin
# {pre,post}_checkout_regex:
# What actions to take before and after a repo checkout.
# Commands will be grouped together if multiple matches are made
# with the duplicates removed.
#
# For example a deploy of 'edx-platform' and 'grading-controller' will execute
# all pre_checkout_regex commands before the checkouts and then
# execute post_checkout_regex commands after.
#
# Commmands will be displayed to the user during the confirm step.
#
#
pre_checkout_regex: !!omap
- ^edx-platform$|^content-.*$:
- |
edxapp_status=$(service edxapp status 2>/dev/null || true)
edx_workers_status=$(service edx-workers status 2>/dev/null || true)
if [[ -n $edxapp_status ]]; then
if [[ $edxapp_status == *running* ]]; then
service edxapp stop;
fi
fi
if [[ -n $edx_workers_status ]]; then
if [[ $edx_workers_status == *running* ]]; then
service edx-workers stop;
fi
fi
- ^grading-controller$|^ease$:
- if [[ $(service grader status) == *running* ]]; then
service grader stop;
fi
- if [[ $(service pull_from_xqueue status) == *running* ]]; then
service pull_from_xqueue stop;
fi
- if [[ $(service ml_grader status) == *running* ]]; then
service ml_grader stop;
fi
- if [[ $(service expire_old status) == *running* ]]; then
service expire_old stop;
fi
- if [[ $(service ml_creator status) == *running* ]]; then
service ml_creator stop;
fi
- ^xqueue$:
- if [[ $(service xqueue status) == *running* ]]; then
service xqueue stop;
fi
- if [[ $(service xqueue_consumer status) == *running* ]]; then
service xqueue_consumer stop;
fi
- ^certificates$:
- if [[ $(service certificates status) == *running* ]]; then
service certificates stop;
fi
- ^xserver|^content-mit-600x.*$:
- if [[ $(service xserver status) == *running* ]]; then
service xserver stop;
fi
- ^latex2edx$:
- if [[ $(service latex2edx status) == *running* ]]; then
service latex2edx stop;
fi
- ^analytics-experiments$:
- if [[ $(service analytics status) == *running* ]]; then
service analytics stop;
fi
post_checkout_regex: !!omap
- ^sandbox$|^staging$|^production$|^hiera$:
- chown -R puppet:puppet /etc/puppet
- ^(?P<repo>content-.*)$:
- |
COLLECT_STATIC_PKG_DIR=/opt/wwc/staticfiles/{repo}
COURSE_STATIC_PKG_DIR=/opt/wwc/course_static/{repo}
SOURCE_PKG_DIR=/opt/wwc/data/{repo}
# Move from course static data in the collectstatic root
# to being symlinked in a separate directory
if [[ -e $COLLECT_STATIC_PKG_DIR ]]; then
rm -rf "$COLLECT_STATIC_PKG_DIR"
fi
# If a course adds or removes a static subdirectory,
# we want to adjust the symlink
if [[ -e $COURSE_STATIC_PKG_DIR ]]; then
rm -rf "$COURSE_STATIC_PKG_DIR"
fi
if [[ -h $COURSE_STATIC_PKG_DIR ]]; then
rm "$COURSE_STATIC_PKG_DIR"
fi
if [[ -d $SOURCE_PKG_DIR/static ]]; then
ln -s $SOURCE_PKG_DIR/static $COURSE_STATIC_PKG_DIR
else
ln -s $SOURCE_PKG_DIR $COURSE_STATIC_PKG_DIR
fi
- ^edx-platform$|^content-.*$:
- |
#install local requirements for the platform
if [[ -e /opt/wwc/edx-platform/requirements/edx/local.txt ]]; then
cd /opt/wwc/edx-platform
sudo -E /opt/edx/bin/pip install -q --upgrade --no-deps --ignore-installed \
--exists-action w -r /opt/wwc/edx-platform/requirements/edx/local.txt
fi
#install local requirements for the sandbox
if [[ -e /etc/apparmor.d/code.sandbox ]]; then
sudo /usr/sbin/aa-complain /etc/apparmor.d/code.sandbox
if [[ -e /opt/wwc/edx-platform/requirements/edx-sandbox/local.txt ]]; then
cd /opt/wwc/edx-platform
sudo -E /opt/edx-sandbox/bin/pip install -q --upgrade --no-deps --ignore-installed \
--exists-action w -r /opt/wwc/edx-platform/requirements/edx-sandbox/local.txt
fi
sudo /usr/sbin/aa-enforce /etc/apparmor.d/code.sandbox
fi
- ^edx-platform$:
- |
# On servers running the workers, which run from the same cloned
# repo, we don't want to run collect static. This could cause
# problems because the cache's are shared with the edx-platform instances
# it's also simply not necessary for the workers as they have no
# view component.
edx_workers_status=$(service edx-workers status 2>/dev/null || true)
# git.py already handle prerequisites, so don't let rake try to install them as well
if [[ -z $edx_workers_status ]]; then
export NO_PREREQ_INSTALL=1
# This is not a developer workspace, so we don't want to do the workspace migrations
# Instead, we rely on having a clean checkout every time
export SKIP_WS_MIGRATIONS=1
export RBENV_ROOT=/opt/www/.rbenv
# Use rbenv binaries
export PATH=$RBENV_ROOT/shims:$RBENV_ROOT/bin:$PATH
# Use binaries installed in rbenv
export PATH=/opt/wwc/edx-platform/bin:$PATH
# Use binaries installed in virtualenv
export PATH=/opt/edx/bin:$PATH
eval "$(rbenv init -)"
# {{}} is used here because of python .format upstream
LMS_VARIANT="$(ls -1 /opt/wwc/lms*.env.json | tail -1 | xargs -i basename {{}} .env.json)"
export RUN="sudo -E -u www-data env SERVICE_VARIANT=$LMS_VARIANT PATH=$PATH"
cd /opt/wwc/edx-platform
# If we're gather_assets is available, run it (to compile coffee and sass, and then collectstatic)
if $(rake -T | grep --quiet gather_assets); then
$RUN rake lms:gather_assets:aws
# Otherwise, we're still using django pipeline, so just run collectstatic [TODO: Remove this clause when gather_assets gets to prod]
elif $($RUN django-admin.py help collectstatic --pythonpath=/opt/wwc/edx-platform --settings=lms.envs.aws &>/dev/null); then
$RUN django-admin.py collectstatic \
--pythonpath=/opt/wwc/edx-platform --settings=lms.envs.aws \
--noinput --verbosity=0
fi
fi
- |
# On servers running the workers, which run from the same cloned
# repo, we don't want to run collect static. This could cause
# problems because the cache's are shared with the edx-platform instances
# it's also simply not necessary for the workers as they have no
# view component.
edx_workers_status=$(service edx-workers status 2>/dev/null || true)
# git.py already handle prerequisites, so don't let rake try to install them as well
if [[ -z $edx_workers_status ]]; then
export NO_PREREQ_INSTALL=1
export SKIP_WS_MIGRATIONS=1
export RBENV_ROOT=/opt/www/.rbenv
# Use rbenv binaries
export PATH=$RBENV_ROOT/shims:$RBENV_ROOT/bin:$PATH
# Use binaries installed in rbenv
export PATH=/opt/wwc/edx-platform/bin:$PATH
# Use binaries installed in virtualenv
export PATH=/opt/edx/bin:$PATH
eval "$(rbenv init -)"
export RUN="sudo -E -u www-data env SERVICE_VARIANT=cms PATH=$PATH"
cd /opt/wwc/edx-platform
# If we're gather_assets is available, run it (to compile coffee and sass, and then collectstatic)
if $(rake -T | grep --quiet gather_assets); then
$RUN rake cms:gather_assets:aws
# Otherwise, we're still using django pipeline, so just run collectstatic [TODO: Remove this clause when gather_assets gets to prod]
elif $($RUN django-admin.py help collectstatic --pythonpath=/opt/wwc/edx-platform --settings=cms.envs.aws &>/dev/null) && [[ -r /opt/wwc/cms.auth.json ]]; then
$RUN django-admin.py collectstatic \
--pythonpath=/opt/wwc/edx-platform --settings=cms.envs.aws \
--noinput --verbosity=0
fi
fi
- ^edx-platform$|^content-.*$:
- |
edxapp_status=$(service edxapp status 2>/dev/null || true)
edx_workers_status=$(service edx-workers status 2>/dev/null || true)
if [[ -n $edxapp_status ]]; then
if [[ $edxapp_status == *stop* ]]; then
service edxapp start;
fi
fi
if [[ -n $edx_workers_status ]]; then
if [[ $edx_workers_status == *stop* ]]; then
service edx-workers start;
fi
fi
- ^grading-controller$|^ease$:
- |
if [[ ! -d /usr/share/nltk_data ]];then
source /opt/edx/bin/activate &&
python -m nltk.downloader -d /usr/share/nltk_data all;
fi
- |
if [[ -d /opt/wwc/.rbenv ]]; then
export RBENV_ROOT=/opt/wwc/.rbenv
export PATH=/opt/wwc/edx-platform/bin:$RBENV_ROOT/shims:$RBENV_ROOT/bin:$PATH
eval "$(rbenv init -)"
fi
if $(sudo -E -u www-data /opt/edx/bin/django-admin.py help collectstatic --pythonpath=/opt/wwc/grading-controller --settings=grading_controller.aws &>/dev/null); then
cd /opt/wwc/staticfiles
sudo -E -u www-data /opt/edx/bin/django-admin.py collectstatic \
--pythonpath=/opt/wwc/grading-controller --settings=grading_controller.aws \
--noinput --verbosity=0
fi
- |
if [[ $(service grader status) == *stop* ]]; then
service grader start;
fi
- |
if [[ $(service expire_old status) == *stop* ]]; then
service expire_old start;
fi
- |
if [[ $(service ml_grader status) == *stop* ]]; then
service ml_grader start;
fi
- |
if [[ $(service ml_creator status) == *stop* ]]; then
service ml_creator start;
fi
- |
if [[ $(service pull_from_xqueue status) == *stop* ]]; then
service pull_from_xqueue start;
fi
- ^xqueue$:
- |
if [[ $(service xqueue status) == *stop* ]];then
service xqueue start;
fi
- |
if [[ $(service xqueue_consumer status) == *stop* ]];then
service xqueue_consumer start;
fi
- ^xserver|^content-mit-600x.*$:
- |
if [[ $(service xserver status) == *stop* ]]; then
service xserver start;
fi
- ^certificates$:
- |
if [[ $(service certificates status) == *stop* ]];then
service certificates start;
fi
- ^latex2edx$:
- |
if [[ $(service latex2edx status) == *stop* ]];then
service latex2edx start;
fi
- ^analytics-experiments$:
- |
if [[ $(service analytics status) == *stop* ]];then
service analytics start;
fi
- ^drupal$:
- |
if [[ -r /opt/wwc/settings.php ]]; then
cp /opt/wwc/settings.php /opt/wwc/drupal/sites/default/settings.php;
fi;
chown -R root:www-data /opt/wwc/drupal &&
find /opt/wwc/drupal -type d -exec chmod u=rwx,g=rx,o= '{}' \; &&
find /opt/wwc/drupal -type f -exec chmod u=rw,g=r,o= '{}' \; &&
find /opt/wwc/drupal/sites -type d -name files -exec chmod ug=rwx,o= '{}' \; &&
for x in /opt/wwc/drupal/sites/*/files; do
find ${x} -type d -exec chmod ug=rwx,o= '{}' \; &&
find ${x} -type f -exec chmod ug=rw,o= '{}' \;;
done;
chmod g+w /opt/wwc/drupal/sites/default/tmp &&
if [[ -r /opt/wwc/drupal/db/edx.sql ]]; then
cd /opt/wwc/drupal &&
`drush sql-connect` < db/edx.sql;
fi
...@@ -5,5 +5,5 @@ ...@@ -5,5 +5,5 @@
[defaults] [defaults]
jinja2_extensions=jinja2.ext.do jinja2_extensions=jinja2.ext.do
hash_behaviour=merge
host_key_checking = False host_key_checking = False
roles_path=../../../ansible-roles
../ansible.cfg
\ No newline at end of file
# config file for ansible -- http://ansible.github.com
# nearly all parameters can be overridden in ansible-playbook or with command line flags
# ansible will read ~/.ansible.cfg or /etc/ansible/ansible.cfg, whichever it finds first
[defaults]
jinja2_extensions=jinja2.ext.do
host_key_checking=False
roles_path=../../../ansible-roles
...@@ -2,7 +2,14 @@ ...@@ -2,7 +2,14 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files: vars_files:
- ["{{ secure_vars }}", "dummy.yml"] - ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- certs - certs
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
...@@ -2,7 +2,14 @@ ...@@ -2,7 +2,14 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files: vars_files:
- ["{{ secure_vars }}", "dummy.yml"] - ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- common - common
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
...@@ -16,3 +16,5 @@ ...@@ -16,3 +16,5 @@
- ora - ora
- xqueue - xqueue
- xserver - xserver
nginx_default_sites:
- lms
# ansible-playbook -c ssh -vvvv --user=ubuntu -i ec2.py deployer.yml -e "@gh_users.yml" -e "@/path/to/secure/ansible/vars/hotg.yml" -e "@/path/to/configuration-secure/ansible/vars/common/common.yml" --limit="tag_aws_cloudformation_stack-name_<admin_stack_name>"
# You will need to create a gh_users.yml that contains the github names of users that should have login access to the machines.
# Setup user login on the bastion
- name: Configure Bastion
hosts: tag_role_bastion
sudo: True
gather_facts: False
roles:
- gh_users
# Configure an admin instance with jenkins and asgard.
- name: Configure instance(s)
hosts: tag_role_admin
sudo: True
gather_facts: True
roles:
- common
- gh_users
- jenkins_master
- hotg
...@@ -2,7 +2,17 @@ ...@@ -2,7 +2,17 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files: vars_files:
- ["{{ secure_vars }}", "dummy.yml"] - ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- role: nginx
nginx_sites:
- discern
- discern - discern
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
- name: Deploy ansible - name: Deploy the edx_ansible role
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: False
roles: roles:
- edx_ansible - edx_ansible
...@@ -14,13 +14,15 @@ ...@@ -14,13 +14,15 @@
- ora - ora
- xqueue - xqueue
- xserver - xserver
nginx_default_sites:
- lms
- edxlocal - edxlocal
- mongo - mongo
- { role: 'edxapp', celery_worker: True }
- edxapp - edxapp
- role: demo - role: demo
tags: ['demo'] tags: ['demo']
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' } - { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True }
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
- forum - forum
...@@ -29,3 +31,4 @@ ...@@ -29,3 +31,4 @@
- ora - ora
- discern - discern
- certs - certs
- edx_ansible
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- role: 'edxapp' - role: 'edxapp'
EDXAPP_LMS_NGINX_PORT: 80 EDXAPP_LMS_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80 EDXAPP_CMS_NGINX_PORT: 80
...@@ -38,6 +40,8 @@ ...@@ -38,6 +40,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True celery_worker: True
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
edx_platform_version: 'release' edx_platform_version: 'release'
...@@ -29,6 +31,8 @@ ...@@ -29,6 +31,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True celery_worker: True
......
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
- xqueue - xqueue
- xserver - xserver
- ora - ora
nginx_default_sites:
- lms
- edxlocal - edxlocal
- mongo - mongo
- edxapp - edxapp
......
...@@ -2,18 +2,37 @@ ...@@ -2,18 +2,37 @@
hosts: localhost hosts: localhost
connection: local connection: local
gather_facts: False gather_facts: False
pre_tasks: vars:
keypair: continuous-integration
instance_type: m1.medium
security_group: sandbox
# ubuntu 12.04
ami: ami-d0f89fb9
region: us-east-1
zone: us-east-1b
instance_tags:
environment: sandbox
github_username: temp
Name: sandbox-temp
source: provisioning-script
owner: temp
root_ebs_size: 50
dns_name: temp
dns_zone: m.sandbox.edx.org
name_tag: sandbox-temp
elb: false
roles: roles:
- role: launch_ec2 - role: launch_ec2
keypair: "{{ keypair }}" keypair: "{{ keypair }}"
instance_type: "{{ instance_type }}" instance_type: "{{ instance_type }}"
security_group: "{{ security_group }}" security_group: "{{ security_group }}"
ami_image: "{{ ami }}" ami: "{{ ami }}"
region: "{{ region }}" region: "{{ region }}"
instance_tags: "{{ instance_tags }}" instance_tags: "{{ instance_tags }}"
root_ebs_size: "{{ root_ebs_size }}" root_ebs_size: "{{ root_ebs_size }}"
dns_name: "{{ dns_name }}" dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}" dns_zone: "{{ dns_zone }}"
zone: "{{ zone }}"
terminate_instance: true terminate_instance: true
instance_profile_name: sandbox instance_profile_name: sandbox
...@@ -21,6 +40,8 @@ ...@@ -21,6 +40,8 @@
hosts: launched hosts: launched
sudo: True sudo: True
gather_facts: False gather_facts: False
vars:
elb: false
pre_tasks: pre_tasks:
- name: Wait for cloud-init to finish - name: Wait for cloud-init to finish
wait_for: > wait_for: >
...@@ -32,6 +53,7 @@ ...@@ -32,6 +53,7 @@
- roles/ora/defaults/main.yml - roles/ora/defaults/main.yml
- roles/xqueue/defaults/main.yml - roles/xqueue/defaults/main.yml
- roles/xserver/defaults/main.yml - roles/xserver/defaults/main.yml
- roles/forum/defaults/main.yml
roles: roles:
# rerun common to set the hostname, nginx to set basic auth # rerun common to set the hostname, nginx to set basic auth
- common - common
...@@ -42,6 +64,9 @@ ...@@ -42,6 +64,9 @@
- ora - ora
- xqueue - xqueue
- xserver - xserver
- forum
nginx_default_sites:
- lms
# gh_users hash must be passed # gh_users hash must be passed
# in as a -e variable # in as a -e variable
- gh_users - gh_users
...@@ -57,7 +82,7 @@ ...@@ -57,7 +82,7 @@
sudo: False sudo: False
- name: register instance into an elb if one was provided - name: register instance into an elb if one was provided
local_action: local_action:
module: ec2_elb module: ec2_elb_local_1.3
region: "{{ region }}" region: "{{ region }}"
instance_id: "{{ ec2_info.instance_ids[0] }}" instance_id: "{{ ec2_info.instance_ids[0] }}"
state: present state: present
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
migrate_db: '{{ RUN_EDXAPP_MIGRATION }}' migrate_db: '{{ RUN_EDXAPP_MIGRATION }}'
...@@ -49,6 +51,8 @@ ...@@ -49,6 +51,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_site:
- lms
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
- splunkforwarder - splunkforwarder
...@@ -65,6 +69,8 @@ ...@@ -65,6 +69,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_site:
- lms
- role: 'edxapp' - role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test' edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True celery_worker: True
......
...@@ -2,7 +2,20 @@ ...@@ -2,7 +2,20 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files: vars_files:
- ["{{ secure_vars }}", "dummy.yml"] - ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- role: nginx
nginx_sites:
- lms
- cms
nginx_default_sites:
- lms
- edxapp - edxapp
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
...@@ -2,7 +2,17 @@ ...@@ -2,7 +2,17 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files: vars_files:
- ["{{ secure_vars }}", "dummy.yml"] - ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- role: nginx
nginx_sites:
- forum
- forum - forum
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
---
# Deploys gerrit on to a server.
#
# Usage:
# ansible-playbook gerrit_deploy.yml -i gerrit_inventory.ini -e "secure_dir=/path/to/secure/dir"
- name: Install and configure gerrit
hosts: gerrit
sudo: True
gather_facts: True
vars_files:
- "{{ secure_dir }}/vars/gerrit.yml"
pre_tasks:
- name: update apt
apt: update_cache=yes
roles:
- gerrit
...@@ -5,4 +5,7 @@ ...@@ -5,4 +5,7 @@
vars_files: vars_files:
- ["{{ secure_vars }}", "dummy.yml"] - ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- role: nginx
nginx_sites:
- ora
- ora - ora
- name: Stop all services
hosts: all
sudo: True
gather_facts: False
roles:
- stop_all_edx_services
...@@ -2,8 +2,15 @@ ...@@ -2,8 +2,15 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files: vars_files:
- ["{{ secure_vars }}", "dummy.yml"] - ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- role: edxapp - role: edxapp
celery_worker: True celery_worker: True
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
...@@ -2,7 +2,17 @@ ...@@ -2,7 +2,17 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files: vars_files:
- ["{{ secure_vars }}", "dummy.yml"] - ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- role: nginx
nginx_sites:
- xqueue
- role: xqueue - role: xqueue
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
...@@ -2,7 +2,17 @@ ...@@ -2,7 +2,17 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files: vars_files:
- ["{{ secure_vars }}", "dummy.yml"] - ["{{ secure_vars }}", "dummy.yml"]
roles: roles:
- role: nginx
nginx_sites:
- xserver
- role: xserver - role: xserver
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
...@@ -39,6 +39,8 @@ ...@@ -39,6 +39,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- {'role': 'edxapp', 'openid_workaround': true, 'template_subdir': 'cme'} - {'role': 'edxapp', 'openid_workaround': true, 'template_subdir': 'cme'}
# run this role last # run this role last
# - in_production # - in_production
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- edxapp - edxapp
- ruby - ruby
post_tasks: post_tasks:
......
...@@ -32,6 +32,8 @@ ...@@ -32,6 +32,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- edxapp - edxapp
- apache - apache
- shibboleth - shibboleth
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- edxapp - edxapp
- apache - apache
- shibboleth - shibboleth
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
- lms - lms
- cms - cms
- lms-preview - lms-preview
nginx_default_sites:
- lms
- edxapp - edxapp
- ansible_debug - ansible_debug
#- apache #- apache
......
...@@ -18,14 +18,17 @@ ...@@ -18,14 +18,17 @@
nginx_sites: nginx_sites:
- cms - cms
- lms - lms
- forum
- ora - ora
- xqueue - xqueue
nginx_default_sites:
- lms
- edxlocal - edxlocal
- mongo - mongo
- { role: 'edxapp', celery_worker: True }
- edxapp - edxapp
- demo - demo
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' } - { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True }
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
- forum - forum
......
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb
short_description: De-registers or registers instances from EC2 ELB(s)
description:
- This module de-registers or registers an AWS EC2 instance from the ELB(s)
that it belongs to.
- Returns fact "ec2_elbs" which is a list of elbs attached to the instance
if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2"
requirements: [ "boto" ]
author: John Jarvis
options:
state:
description:
- register or deregister the instance
required: true
instance_id:
description:
- EC2 Instance ID
required: true
ec2_elbs:
description:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false
default: None
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
def2ault: None
aliases: ['ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: None
aliases: ['ec2_access_key', 'access_key' ]
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
wait:
description:
- Wait for instance registration or deregistration to complete successfully before returning.
required: false
default: yes
choices: [ "yes", "no" ]
"""
EXAMPLES = """
# basic pre_task and post_task example
pre_tasks:
- name: Gathering ec2 facts
ec2_facts:
- name: Instance De-register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent'
roles:
- myrole
post_tasks:
- name: Instance Register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
state: 'present'
with_items: ec2_elbs
"""
import time
import sys
import os
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
try:
import boto
import boto.ec2.elb
from boto.regioninfo import RegionInfo
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
class ElbManager:
"""Handles EC2 instance ELB registration and de-registration"""
def __init__(self, module, instance_id=None, ec2_elbs=None,
aws_access_key=None, aws_secret_key=None, region=None):
self.aws_access_key = aws_access_key
self.aws_secret_key = aws_secret_key
self.module = module
self.instance_id = instance_id
self.region = region
self.lbs = self._get_instance_lbs(ec2_elbs)
# if there are no ELBs to operate on
# there will be no changes made
if len(self.lbs) > 0:
self.changed = True
else:
self.changed = False
def deregister(self, wait):
"""De-register the instance from all ELBs and wait for the ELB
to report it out-of-service"""
for lb in self.lbs:
lb.deregister_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'OutOfService')
def register(self, wait):
"""Register the instance for all ELBs and wait for the ELB
to report the instance in-service"""
for lb in self.lbs:
lb.register_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'InService')
def exists(self, lbtest):
""" Verify that the named ELB actually exists """
found = False
for lb in self.lbs:
if lb.name == lbtest:
found=True
break
return found
def _await_elb_instance_state(self, lb, awaited_state):
"""Wait for an ELB to change state
lb: load balancer
awaited_state : state to poll for (string)"""
while True:
state = lb.get_instance_health([self.instance_id])[0].state
if state == awaited_state:
break
else:
time.sleep(1)
def _get_instance_lbs(self, ec2_elbs=None):
"""Returns a list of ELBs attached to self.instance_id
ec2_elbs: an optional list of elb names that will be used
for elb lookup instead of returning what elbs
are attached to self.instance_id"""
try:
endpoint="elasticloadbalancing.%s.amazonaws.com" % self.region
connect_region = RegionInfo(name=self.region, endpoint=endpoint)
elb = boto.ec2.elb.ELBConnection(self.aws_access_key, self.aws_secret_key, region=connect_region)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
elbs = elb.get_all_load_balancers()
if ec2_elbs:
lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
else:
lbs = []
for lb in elbs:
for info in lb.instances:
if self.instance_id == info.id:
lbs.append(lb)
return lbs
def main():
module = AnsibleModule(
argument_spec=dict(
state={'required': True,
'choices': ['present', 'absent']},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
aws_secret_key={'default': None, 'aliases': ['ec2_secret_key', 'secret_key'], 'no_log': True},
aws_access_key={'default': None, 'aliases': ['ec2_access_key', 'access_key']},
region={'default': None, 'required': False, 'aliases':['aws_region', 'ec2_region'], 'choices':AWS_REGIONS},
wait={'required': False, 'choices': BOOLEANS, 'default': True}
)
)
aws_secret_key = module.params['aws_secret_key']
aws_access_key = module.params['aws_access_key']
ec2_elbs = module.params['ec2_elbs']
region = module.params['region']
wait = module.params['wait']
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
module.fail_json(msg="ELBs are required for registration")
if not aws_secret_key:
if 'AWS_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['EC2_SECRET_KEY']
if not aws_access_key:
if 'AWS_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['EC2_ACCESS_KEY']
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
if not region:
module.fail_json(msg=str("Either region or EC2_REGION environment variable must be set."))
instance_id = module.params['instance_id']
elb_man = ElbManager(module, instance_id, ec2_elbs, aws_access_key,
aws_secret_key, region=region)
for elb in ec2_elbs:
if not elb_man.exists(elb):
msg="ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':
elb_man.register(wait)
elif module.params['state'] == 'absent':
elb_man.deregister(wait)
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb
short_description: De-registers or registers instances from EC2 ELBs
description:
- This module de-registers or registers an AWS EC2 instance from the ELBs
that it belongs to.
- Returns fact "ec2_elbs" which is a list of elbs attached to the instance
if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2"
requirements: [ "boto" ]
author: John Jarvis
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
instance_id:
description:
- EC2 Instance ID
required: true
ec2_elbs:
description:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false
default: None
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: None
aliases: ['ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: None
aliases: ['ec2_access_key', 'access_key' ]
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
enable_availability_zone:
description:
- Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB.
required: false
default: yes
choices: [ "yes", "no" ]
wait:
description:
- Wait for instance registration or deregistration to complete successfully before returning.
required: false
default: yes
choices: [ "yes", "no" ]
"""
EXAMPLES = """
# basic pre_task and post_task example
pre_tasks:
- name: Gathering ec2 facts
ec2_facts:
- name: Instance De-register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent'
roles:
- myrole
post_tasks:
- name: Instance Register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
state: 'present'
with_items: ec2_elbs
"""
import time
import sys
import os
try:
import boto
import boto.ec2
import boto.ec2.elb
from boto.regioninfo import RegionInfo
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
class ElbManager:
"""Handles EC2 instance ELB registration and de-registration"""
def __init__(self, module, instance_id=None, ec2_elbs=None,
aws_access_key=None, aws_secret_key=None, region=None):
self.aws_access_key = aws_access_key
self.aws_secret_key = aws_secret_key
self.module = module
self.instance_id = instance_id
self.region = region
self.lbs = self._get_instance_lbs(ec2_elbs)
self.changed = False
def deregister(self, wait):
"""De-register the instance from all ELBs and wait for the ELB
to report it out-of-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb) if wait else None
if initial_state and initial_state.state == 'InService':
lb.deregister_instances([self.instance_id])
else:
return
if wait:
self._await_elb_instance_state(lb, 'OutOfService', initial_state)
else:
# We cannot assume no change was made if we don't wait
# to find out
self.changed = True
def register(self, wait, enable_availability_zone):
"""Register the instance for all ELBs and wait for the ELB
to report the instance in-service"""
for lb in self.lbs:
if wait:
initial_state = self._get_instance_health(lb)
if enable_availability_zone:
self._enable_availailability_zone(lb)
lb.register_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'InService', initial_state)
else:
# We cannot assume no change was made if we don't wait
# to find out
self.changed = True
def exists(self, lbtest):
""" Verify that the named ELB actually exists """
found = False
for lb in self.lbs:
if lb.name == lbtest:
found=True
break
return found
def _enable_availailability_zone(self, lb):
"""Enable the current instance's availability zone in the provided lb.
Returns True if the zone was enabled or False if no change was made.
lb: load balancer"""
instance = self._get_instance()
if instance.placement in lb.availability_zones:
return False
lb.enable_zones(zones=instance.placement)
# If successful, the new zone will have been added to
# lb.availability_zones
return instance.placement in lb.availability_zones
def _await_elb_instance_state(self, lb, awaited_state, initial_state):
"""Wait for an ELB to change state
lb: load balancer
awaited_state : state to poll for (string)"""
while True:
instance_state = self._get_instance_health(lb)
if not instance_state:
msg = ("The instance %s could not be put in service on %s."
" Reason: Invalid Instance")
self.module.fail_json(msg=msg % (self.instance_id, lb))
if instance_state.state == awaited_state:
# Check the current state agains the initial state, and only set
# changed if they are different.
if (initial_state is None) or (instance_state.state != initial_state.state):
self.changed = True
break
elif self._is_instance_state_pending(instance_state):
# If it's pending, we'll skip further checks andd continue waiting
pass
elif (awaited_state == 'InService'
and instance_state.reason_code == "Instance"):
# If the reason_code for the instance being out of service is
# "Instance" this indicates a failure state, e.g. the instance
# has failed a health check or the ELB does not have the
# instance's availabilty zone enabled. The exact reason why is
# described in InstantState.description.
msg = ("The instance %s could not be put in service on %s."
" Reason: %s")
self.module.fail_json(msg=msg % (self.instance_id,
lb,
instance_state.description))
time.sleep(1)
def _is_instance_state_pending(self, instance_state):
"""
Determines whether the instance_state is "pending", meaning there is
an operation under way to bring it in service.
"""
# This is messy, because AWS provides no way to distinguish between
# an instance that is is OutOfService because it's pending vs. OutOfService
# because it's failing health checks. So we're forced to analyze the
# description, which is likely to be brittle.
return (instance_state and 'pending' in instance_state.description)
def _get_instance_health(self, lb):
"""
Check instance health, should return status object or None under
certain error conditions.
"""
try:
status = lb.get_instance_health([self.instance_id])[0]
except boto.exception.BotoServerError, e:
if e.error_code == 'InvalidInstance':
return None
else:
raise
return status
def _get_instance_lbs(self, ec2_elbs=None):
"""Returns a list of ELBs attached to self.instance_id
ec2_elbs: an optional list of elb names that will be used
for elb lookup instead of returning what elbs
are attached to self.instance_id"""
try:
endpoint="elasticloadbalancing.%s.amazonaws.com" % self.region
connect_region = RegionInfo(name=self.region, endpoint=endpoint)
elb = boto.ec2.elb.ELBConnection(self.aws_access_key, self.aws_secret_key, region=connect_region)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
elbs = elb.get_all_load_balancers()
if ec2_elbs:
lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
else:
lbs = []
for lb in elbs:
for info in lb.instances:
if self.instance_id == info.id:
lbs.append(lb)
return lbs
def _get_instance(self):
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
try:
endpoint = "ec2.%s.amazonaws.com" % self.region
connect_region = RegionInfo(name=self.region, endpoint=endpoint)
ec2_conn = boto.ec2.EC2Connection(self.aws_access_key, self.aws_secret_key, region=connect_region)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
return ec2_conn.get_only_instances(instance_ids=[self.instance_id])[0]
def main():
module = AnsibleModule(
argument_spec=dict(
state={'required': True,
'choices': ['present', 'absent']},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
ec2_secret_key={'default': None, 'aliases': ['aws_secret_key', 'secret_key'], 'no_log': True},
ec2_access_key={'default': None, 'aliases': ['aws_access_key', 'access_key']},
region={'default': None, 'required': False, 'aliases':['aws_region', 'ec2_region']},
enable_availability_zone={'default': True, 'required': False, 'choices': BOOLEANS, 'type': 'bool'},
wait={'required': False, 'choices': BOOLEANS, 'default': True, 'type': 'bool'}
)
)
# def get_ec2_creds(module):
# return ec2_url, ec2_access_key, ec2_secret_key, region
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
ec2_elbs = module.params['ec2_elbs']
region = module.params['region']
wait = module.params['wait']
enable_availability_zone = module.params['enable_availability_zone']
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
module.fail_json(msg="ELBs are required for registration")
instance_id = module.params['instance_id']
elb_man = ElbManager(module, instance_id, ec2_elbs, aws_access_key,
aws_secret_key, region=region)
if ec2_elbs is not None:
for elb in ec2_elbs:
if not elb_man.exists(elb):
msg="ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':
elb_man.register(wait, enable_availability_zone)
elif module.params['state'] == 'absent':
elb_man.deregister(wait)
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
...@@ -121,7 +121,7 @@ options: ...@@ -121,7 +121,7 @@ options:
required: False required: False
default: 1 default: 1
aliases: [] aliases: []
monitor: monitoring:
version_added: "1.1" version_added: "1.1"
description: description:
- enable detailed monitoring (CloudWatch) for instance - enable detailed monitoring (CloudWatch) for instance
...@@ -185,7 +185,7 @@ options: ...@@ -185,7 +185,7 @@ options:
default: 'present' default: 'present'
aliases: [] aliases: []
root_ebs_size: root_ebs_size:
version_added: "1.4" version_added: "1.5"
desription: desription:
- size of the root volume in gigabytes - size of the root volume in gigabytes
required: false required: false
...@@ -193,7 +193,7 @@ options: ...@@ -193,7 +193,7 @@ options:
aliases: [] aliases: []
requirements: [ "boto" ] requirements: [ "boto" ]
author: Seth Vidal, Tim Gerla, Lester Wade, John Jarvis author: Seth Vidal, Tim Gerla, Lester Wade
''' '''
EXAMPLES = ''' EXAMPLES = '''
...@@ -210,17 +210,6 @@ EXAMPLES = ''' ...@@ -210,17 +210,6 @@ EXAMPLES = '''
group: webserver group: webserver
count: 3 count: 3
# Basic provisioning example with setting the root volume size to 50GB
- local_action:
module: ec2
keypair: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
group: webserver
count: 3
root_ebs_size: 50
# Advanced example with tagging and CloudWatch # Advanced example with tagging and CloudWatch
- local_action: - local_action:
module: ec2 module: ec2
...@@ -231,7 +220,8 @@ EXAMPLES = ''' ...@@ -231,7 +220,8 @@ EXAMPLES = '''
wait: yes wait: yes
wait_timeout: 500 wait_timeout: 500
count: 5 count: 5
instance_tags: '{"db":"postgres"}' monitoring=yes' instance_tags: '{"db":"postgres"}'
monitoring=yes
# Multiple groups example # Multiple groups example
local_action: local_action:
...@@ -243,7 +233,8 @@ local_action: ...@@ -243,7 +233,8 @@ local_action:
wait: yes wait: yes
wait_timeout: 500 wait_timeout: 500
count: 5 count: 5
instance_tags: '{"db":"postgres"}' monitoring=yes' instance_tags: '{"db":"postgres"}'
monitoring=yes
# VPC example # VPC example
- local_action: - local_action:
...@@ -406,6 +397,7 @@ def create_instances(module, ec2): ...@@ -406,6 +397,7 @@ def create_instances(module, ec2):
else: else:
bdm = None bdm = None
# group_id and group_name are exclusive of each other # group_id and group_name are exclusive of each other
if group_id and group_name: if group_id and group_name:
module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)"))
...@@ -416,9 +408,7 @@ def create_instances(module, ec2): ...@@ -416,9 +408,7 @@ def create_instances(module, ec2):
if group_name: if group_name:
grp_details = ec2.get_all_security_groups() grp_details = ec2.get_all_security_groups()
if type(group_name) == list: if type(group_name) == list:
# FIXME: this should be a nice list comprehension group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
# also not py 2.4 compliant
group_id = list(filter(lambda grp: str(grp.id) if str(tmp) in str(grp) else None, grp_details) for tmp in group_name)
elif type(group_name) == str: elif type(group_name) == str:
for grp in grp_details: for grp in grp_details:
if str(group_name) in str(grp): if str(group_name) in str(grp):
...@@ -501,7 +491,7 @@ def create_instances(module, ec2): ...@@ -501,7 +491,7 @@ def create_instances(module, ec2):
if instance_tags: if instance_tags:
try: try:
ec2.create_tags(instids, module.from_json(instance_tags)) ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError as e: except boto.exception.EC2ResponseError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
...@@ -558,6 +548,10 @@ def terminate_instances(module, ec2, instance_ids): ...@@ -558,6 +548,10 @@ def terminate_instances(module, ec2, instance_ids):
""" """
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False changed = False
instance_dict_array = [] instance_dict_array = []
...@@ -576,8 +570,30 @@ def terminate_instances(module, ec2, instance_ids): ...@@ -576,8 +570,30 @@ def terminate_instances(module, ec2, instance_ids):
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e)) module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True changed = True
return (changed, instance_dict_array, terminated_instance_ids) # wait here until the instances are 'terminated'
if wait:
num_terminated = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
response = ec2.get_all_instances( \
instance_ids=terminated_instance_ids, \
filters={'instance-state-name':'terminated'})
try:
num_terminated = len(response.pop().instances)
except Exception, e:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if num_terminated < len(terminated_instance_ids):
time.sleep(5)
# waiting took too long
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime())
return (changed, instance_dict_array, terminated_instance_ids)
def main(): def main():
...@@ -593,16 +609,16 @@ def main(): ...@@ -593,16 +609,16 @@ def main():
image = dict(), image = dict(),
kernel = dict(), kernel = dict(),
count = dict(default='1'), count = dict(default='1'),
monitoring = dict(choices=BOOLEANS, default=False), monitoring = dict(type='bool', default=False),
ramdisk = dict(), ramdisk = dict(),
wait = dict(choices=BOOLEANS, default=False), wait = dict(type='bool', default=False),
wait_timeout = dict(default=300), wait_timeout = dict(default=300),
ec2_url = dict(), ec2_url = dict(),
aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True),
aws_access_key = dict(aliases=['ec2_access_key', 'access_key']), ec2_access_key = dict(aliases=['aws_access_key', 'access_key']),
placement_group = dict(), placement_group = dict(),
user_data = dict(), user_data = dict(),
instance_tags = dict(), instance_tags = dict(type='dict'),
vpc_subnet_id = dict(), vpc_subnet_id = dict(),
private_ip = dict(), private_ip = dict(),
instance_profile_name = dict(), instance_profile_name = dict(),
...@@ -612,33 +628,9 @@ def main(): ...@@ -612,33 +628,9 @@ def main():
) )
) )
ec2_url = module.params.get('ec2_url') # def get_ec2_creds(module):
aws_secret_key = module.params.get('aws_secret_key') # return ec2_url, ec2_access_key, ec2_secret_key, region
aws_access_key = module.params.get('aws_access_key') ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
region = module.params.get('region')
# allow eucarc environment variables to be used if ansible vars aren't set
if not ec2_url and 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not aws_secret_key:
if 'AWS_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['EC2_SECRET_KEY']
if not aws_access_key:
if 'AWS_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['EC2_ACCESS_KEY']
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
# If we have a region specified, connect to its endpoint. # If we have a region specified, connect to its endpoint.
if region: if region:
...@@ -672,8 +664,8 @@ def main(): ...@@ -672,8 +664,8 @@ def main():
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array) module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array)
# import module snippets
# this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import *
#<<INCLUDE_ANSIBLE_MODULE_COMMON>> from ansible.module_utils.ec2 import *
main() main()
...@@ -15,10 +15,8 @@ ...@@ -15,10 +15,8 @@
# #
# #
- name: analytics-server | stop the analytics service - name: stop the analytics service
service: name=analytics state=stopped service: name=analytics state=stopped
tags: deploy
- name: analytics-server | start the analytics service - name: start the analytics service
service: name=analytics state=started service: name=analytics state=started
tags: deploy
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | upload ssh script - name: upload ssh script
template: template:
src=tmp/{{ as_role_name }}.git_ssh.sh.j2 dest={{ as_git_ssh }} src=tmp/{{ as_role_name }}.git_ssh.sh.j2 dest={{ as_git_ssh }}
force=yes owner=root group=adm mode=750 force=yes owner=root group=adm mode=750
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | install read-only ssh key required for checkout - name: install read-only ssh key required for checkout
copy: copy:
src={{ as_git_identity_path }} dest={{ as_git_identity_dest }} src={{ as_git_identity_path }} dest={{ as_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600 force=yes owner=ubuntu group=adm mode=0600
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
- name: analytics-server | checkout code - name: checkout code
git: git:
dest={{ as_code_dir }} repo={{ as_source_repo }} dest={{ as_code_dir }} repo={{ as_source_repo }}
version={{ as_version }} force=true version={{ as_version }} force=true
environment: environment:
GIT_SSH: $as_git_ssh GIT_SSH: $as_git_ssh
notify: analytics-server | restart the analytics service notify: restart the analytics service
notify: analytics-server | start the analytics service notify: start the analytics service
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | update src permissions - name: update src permissions
file: file:
path={{ as_code_dir }} state=directory owner={{ as_user }} path={{ as_code_dir }} state=directory owner={{ as_user }}
group={{ as_web_user }} mode=2750 recurse=yes group={{ as_web_user }} mode=2750 recurse=yes
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | remove read-only ssh key for the content repo - name: remove read-only ssh key for the content repo
file: path={{ as_git_identity_dest }} state=absent file: path={{ as_git_identity_dest }} state=absent
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics-server | remove ssh script - name: remove ssh script
file: path={{ as_git_ssh }} state=absent file: path={{ as_git_ssh }} state=absent
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
- name: analytics-server | install application requirements - name: install application requirements
pip: pip:
requirements={{ as_requirements_file }} requirements={{ as_requirements_file }}
virtualenv={{ as_venv_dir }} state=present virtualenv={{ as_venv_dir }} state=present
sudo: true sudo: true
sudo_user: "{{ as_user }}" sudo_user: "{{ as_user }}"
notify: analytics-server | start the analytics service notify: start the analytics service
tags: tags:
- analytics-server - analytics-server
- deploy
- install - install
- update - update
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# #
# #
# Tasks for role analytics-server # Tasks for role analytics-server
# #
# Overview: # Overview:
# #
# Installs the edX analytics-server Django application which provides # Installs the edX analytics-server Django application which provides
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
# common role # common role
# #
# Depends upon the automated role # Depends upon the automated role
# #
# Example play: # Example play:
# #
# - name: Configure analytics instance(s) # - name: Configure analytics instance(s)
...@@ -37,79 +37,79 @@ ...@@ -37,79 +37,79 @@
# - common # - common
# - analytics-server # - analytics-server
# #
- name: analytics-server | install system packages - name: install system packages
apt: pkg={{','.join(as_debian_pkgs)}} state=present apt: pkg={{','.join(as_debian_pkgs)}} state=present
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | create analytics-server user {{ as_user }} - name: create analytics-server user {{ as_user }}
user: user:
name={{ as_user }} state=present shell=/bin/bash name={{ as_user }} state=present shell=/bin/bash
home={{ as_home }} createhome=yes home={{ as_home }} createhome=yes
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | setup the analytics-server env - name: setup the analytics-server env
template: template:
src=opt/wwc/analytics-server/{{ as_env }}.j2 src=opt/wwc/analytics-server/{{ as_env }}.j2
dest={{ as_home }}/{{ as_env }} dest={{ as_home }}/{{ as_env }}
owner="{{ as_user }}" group="{{ as_user }}" owner="{{ as_user }}" group="{{ as_user }}"
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | drop a bash_profile - name: drop a bash_profile
copy: > copy: >
src=../../common/files/bash_profile src=../../common/files/bash_profile
dest={{ as_home }}/.bash_profile dest={{ as_home }}/.bash_profile
owner={{ as_user }} owner={{ as_user }}
group={{ as_user }} group={{ as_user }}
# Awaiting next ansible release. # Awaiting next ansible release.
#- name: analytics-server | ensure .bashrc exists #- name: ensure .bashrc exists
# file: path={{ as_home }}/.bashrc state=touch # file: path={{ as_home }}/.bashrc state=touch
# sudo: true # sudo: true
# sudo_user: "{{ as_user }}" # sudo_user: "{{ as_user }}"
# tags: # tags:
# - analytics-server # - analytics-server
# - install # - install
# - update # - update
- name: analytics-server | ensure .bashrc exists - name: ensure .bashrc exists
shell: touch {{ as_home }}/.bashrc shell: touch {{ as_home }}/.bashrc
sudo: true sudo: true
sudo_user: "{{ as_user }}" sudo_user: "{{ as_user }}"
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | add source of analytics-server_env to .bashrc - name: add source of analytics-server_env to .bashrc
lineinfile: lineinfile:
dest={{ as_home }}/.bashrc dest={{ as_home }}/.bashrc
regexp='. {{ as_home }}/analytics-server_env' regexp='. {{ as_home }}/analytics-server_env'
line='. {{ as_home }}/analytics_server_env' line='. {{ as_home }}/analytics_server_env'
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | add source venv to .bashrc - name: add source venv to .bashrc
lineinfile: lineinfile:
dest={{ as_home }}/.bashrc dest={{ as_home }}/.bashrc
regexp='. {{ as_venv_dir }}/bin/activate' regexp='. {{ as_venv_dir }}/bin/activate'
line='. {{ as_venv_dir }}/bin/activate' line='. {{ as_venv_dir }}/bin/activate'
tags: tags:
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | install global python requirements - name: install global python requirements
pip: name={{ item }} pip: name={{ item }}
with_items: as_pip_pkgs with_items: as_pip_pkgs
tags: tags:
...@@ -117,8 +117,8 @@ ...@@ -117,8 +117,8 @@
- install - install
- update - update
- name: analytics-server | create config - name: create config
template: template:
src=opt/wwc/analytics.auth.json.j2 src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json dest=/opt/wwc/analytics.auth.json
mode=0600 mode=0600
...@@ -127,10 +127,10 @@ ...@@ -127,10 +127,10 @@
- analytics-server - analytics-server
- install - install
- update - update
- name: analytics-server | install service - name: install service
template: template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root owner=root group=root
- include: deploy.yml - include: deploy.yml tags=deploy
...@@ -15,10 +15,8 @@ ...@@ -15,10 +15,8 @@
# #
# #
- name: analytics | stop the analytics service - name: stop the analytics service
service: name=analytics state=stopped service: name=analytics state=stopped
tags: deploy
- name: analytics | start the analytics service - name: start the analytics service
service: name=analytics state=started service: name=analytics state=started
tags: deploy
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | upload ssh script - name: upload ssh script
template: template:
src=tmp/{{ analytics_role_name }}.git_ssh.sh.j2 dest={{ analytics_git_ssh }} src=tmp/{{ analytics_role_name }}.git_ssh.sh.j2 dest={{ analytics_git_ssh }}
force=yes owner=root group=adm mode=750 force=yes owner=root group=adm mode=750
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | install read-only ssh key required for checkout - name: install read-only ssh key required for checkout
copy: copy:
src={{ analytics_git_identity_path }} dest={{ analytics_git_identity_dest }} src={{ analytics_git_identity_path }} dest={{ analytics_git_identity_dest }}
force=yes owner=ubuntu group=adm mode=0600 force=yes owner=ubuntu group=adm mode=0600
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
- name: analytics | checkout code - name: checkout code
git: git:
dest={{ analytics_code_dir }} repo={{ analytics_source_repo }} dest={{ analytics_code_dir }} repo={{ analytics_source_repo }}
version={{ analytics_version }} force=true version={{ analytics_version }} force=true
environment: environment:
GIT_SSH: $analytics_git_ssh GIT_SSH: $analytics_git_ssh
notify: analytics | restart the analytics service notify: restart the analytics service
notify: analytics | start the analytics service notify: start the analytics service
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | update src permissions - name: update src permissions
file: file:
path={{ analytics_code_dir }} state=directory owner={{ analytics_user }} path={{ analytics_code_dir }} state=directory owner={{ analytics_user }}
group={{ analytics_web_user }} mode=2750 recurse=yes group={{ analytics_web_user }} mode=2750 recurse=yes
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | remove read-only ssh key for the content repo - name: remove read-only ssh key for the content repo
file: path={{ analytics_git_identity_dest }} state=absent file: path={{ analytics_git_identity_dest }} state=absent
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
# #
# TODO: Needed while this repo is private # TODO: Needed while this repo is private
# #
- name: analytics | remove ssh script - name: remove ssh script
file: path={{ analytics_git_ssh }} state=absent file: path={{ analytics_git_ssh }} state=absent
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
- name: analytics | install application requirements - name: install application requirements
pip: pip:
requirements={{ analytics_requirements_file }} requirements={{ analytics_requirements_file }}
virtualenv={{ analytics_venv_dir }} state=present virtualenv={{ analytics_venv_dir }} state=present
sudo: true sudo: true
sudo_user: "{{ analytics_user }}" sudo_user: "{{ analytics_user }}"
notify: analytics | start the analytics service notify: start the analytics service
tags: tags:
- analytics - analytics
- deploy
- install - install
- update - update
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# #
# #
# Tasks for role analytics # Tasks for role analytics
# #
# Overview: # Overview:
# #
# Installs the edX analytics Django application which provides # Installs the edX analytics Django application which provides
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
# common role # common role
# #
# Depends upon the automated role # Depends upon the automated role
# #
# Example play: # Example play:
# #
# - name: Configure analytics instance(s) # - name: Configure analytics instance(s)
...@@ -37,79 +37,79 @@ ...@@ -37,79 +37,79 @@
# - common # - common
# - analytics # - analytics
# #
- name: analytics | install system packages - name: install system packages
apt: pkg={{','.join(analytics_debian_pkgs)}} state=present apt: pkg={{','.join(analytics_debian_pkgs)}} state=present
tags: tags:
- analytics - analytics
- install - install
- update - update
- name: analytics | create analytics user {{ analytics_user }} - name: create analytics user {{ analytics_user }}
user: user:
name={{ analytics_user }} state=present shell=/bin/bash name={{ analytics_user }} state=present shell=/bin/bash
home={{ analytics_home }} createhome=yes home={{ analytics_home }} createhome=yes
tags: tags:
- analytics - analytics
- install - install
- update - update
- name: analytics | setup the analytics env - name: setup the analytics env
template: template:
src=opt/wwc/analytics/{{ analytics_env }}.j2 src=opt/wwc/analytics/{{ analytics_env }}.j2
dest={{ analytics_home }}/{{ analytics_env }} dest={{ analytics_home }}/{{ analytics_env }}
owner="{{ analytics_user }}" group="{{ analytics_user }}" owner="{{ analytics_user }}" group="{{ analytics_user }}"
tags: tags:
- analytics - analytics
- install - install
- update - update
- name: analytics | drop a bash_profile - name: drop a bash_profile
copy: > copy: >
src=../../common/files/bash_profile src=../../common/files/bash_profile
dest={{ analytics_home }}/.bash_profile dest={{ analytics_home }}/.bash_profile
owner={{ analytics_user }} owner={{ analytics_user }}
group={{ analytics_user }} group={{ analytics_user }}
# Awaiting next ansible release. # Awaiting next ansible release.
#- name: analytics | ensure .bashrc exists #- name: ensure .bashrc exists
# file: path={{ analytics_home }}/.bashrc state=touch # file: path={{ analytics_home }}/.bashrc state=touch
# sudo: true # sudo: true
# sudo_user: "{{ analytics_user }}" # sudo_user: "{{ analytics_user }}"
# tags: # tags:
# - analytics # - analytics
# - install # - install
# - update # - update
- name: analytics | ensure .bashrc exists - name: ensure .bashrc exists
shell: touch {{ analytics_home }}/.bashrc shell: touch {{ analytics_home }}/.bashrc
sudo: true sudo: true
sudo_user: "{{ analytics_user }}" sudo_user: "{{ analytics_user }}"
tags: tags:
- analytics - analytics
- install - install
- update - update
- name: analytics | add source of analytics_env to .bashrc - name: add source of analytics_env to .bashrc
lineinfile: lineinfile:
dest={{ analytics_home }}/.bashrc dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_home }}/analytics_env' regexp='. {{ analytics_home }}/analytics_env'
line='. {{ analytics_home }}/analytics_env' line='. {{ analytics_home }}/analytics_env'
tags: tags:
- analytics - analytics
- install - install
- update - update
- name: analytics | add source venv to .bashrc - name: add source venv to .bashrc
lineinfile: lineinfile:
dest={{ analytics_home }}/.bashrc dest={{ analytics_home }}/.bashrc
regexp='. {{ analytics_venv_dir }}/bin/activate' regexp='. {{ analytics_venv_dir }}/bin/activate'
line='. {{ analytics_venv_dir }}/bin/activate' line='. {{ analytics_venv_dir }}/bin/activate'
tags: tags:
- analytics - analytics
- install - install
- update - update
- name: analytics | install global python requirements - name: install global python requirements
pip: name={{ item }} pip: name={{ item }}
with_items: analytics_pip_pkgs with_items: analytics_pip_pkgs
tags: tags:
...@@ -117,8 +117,8 @@ ...@@ -117,8 +117,8 @@
- install - install
- update - update
- name: analytics | create config - name: create config
template: template:
src=opt/wwc/analytics.auth.json.j2 src=opt/wwc/analytics.auth.json.j2
dest=/opt/wwc/analytics.auth.json dest=/opt/wwc/analytics.auth.json
mode=0600 mode=0600
...@@ -127,10 +127,10 @@ ...@@ -127,10 +127,10 @@
- analytics - analytics
- install - install
- update - update
- name: analytics | install service - name: install service
template: template:
src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf src=etc/init/analytics.conf.j2 dest=/etc/init/analytics.conf
owner=root group=root owner=root group=root
- include: deploy.yml - include: deploy.yml tags=deploy
--- ---
- name: ansible-role | check if the role exists - name: check if the role exists
command: test -d roles/{{ role_name }} command: test -d roles/{{ role_name }}
register: role_exists register: role_exists
ignore_errors: yes ignore_errors: yes
- name: ansible-role | prompt for overwrite - name: prompt for overwrite
pause: prompt="Role {{ role_name }} exists. Overwrite? Touch any key to continue or <CTRL>-c, then a, to abort." pause: prompt="Role {{ role_name }} exists. Overwrite? Touch any key to continue or <CTRL>-c, then a, to abort."
when: role_exists | success when: role_exists | success
- name: ansible-role | create role directories - name: create role directories
file: path=roles/{{role_name}}/{{ item }} state=directory file: path=roles/{{role_name}}/{{ item }} state=directory
with_items: with_items:
- tasks - tasks
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
- templates - templates
- files - files
- name: ansible-role | make an ansible role - name: make an ansible role
template: src={{ item }}/main.yml.j2 dest=roles/{{ role_name }}/{{ item }}/main.yml template: src={{ item }}/main.yml.j2 dest=roles/{{ role_name }}/{{ item }}/main.yml
with_items: with_items:
- tasks - tasks
......
...@@ -7,5 +7,5 @@ ...@@ -7,5 +7,5 @@
# Overview: # Overview:
# #
# #
- name: {{ role_name }} | notify me - name: notify me
debug: msg="stub handler" debug: msg="stub handler"
...@@ -14,6 +14,6 @@ ...@@ -14,6 +14,6 @@
# #
# #
- name: {{ role_name }} | stub ansible task - name: stub ansible task
debug: msg="This is a stub task created by the ansible-role role" debug: msg="This is a stub task created by the ansible-role role"
notify: {{ role_name }} | notify me notify: notify me
\ No newline at end of file
--- ---
- name: apache | restart apache - name: restart apache
service: name=apache2 state=restarted service: name=apache2 state=restarted
tags: deploy
# Requires nginx package # Requires nginx package
--- ---
- name: apache | Copying apache config {{ site_name }} - name: Copying apache config {{ site_name }}
template: src={{ item }} dest=/etc/apache2/sites-available/{{ site_name }} template: src={{ item }} dest=/etc/apache2/sites-available/{{ site_name }}
first_available_file: first_available_file:
- "{{ local_dir }}/apache/templates/{{ site_name }}.j2" - "{{ local_dir }}/apache/templates/{{ site_name }}.j2"
# seems like paths in first_available_file must be relative to the playbooks dir # seems like paths in first_available_file must be relative to the playbooks dir
- "roles/apache/templates/{{ site_name }}.j2" - "roles/apache/templates/{{ site_name }}.j2"
notify: apache | restart apache notify: restart apache
when: apache_role_run is defined when: apache_role_run is defined
tags: tags:
- apache - apache
- update - update
- name: apache | Creating apache2 config link {{ site_name }} - name: Creating apache2 config link {{ site_name }}
file: src=/etc/apache2/sites-available/{{ site_name }} dest=/etc/apache2/sites-enabled/{{ site_name }} state={{ state }} owner=root group=root file: src=/etc/apache2/sites-available/{{ site_name }} dest=/etc/apache2/sites-enabled/{{ site_name }} state={{ state }} owner=root group=root
notify: apache | restart apache notify: restart apache
when: apache_role_run is defined when: apache_role_run is defined
tags: tags:
- apache - apache
......
#Installs apache and runs the lms wsgi #Installs apache and runs the lms wsgi
--- ---
- name: apache | Installs apache and mod_wsgi from apt - name: Installs apache and mod_wsgi from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes apt: pkg={{item}} install_recommends=no state=present update_cache=yes
with_items: with_items:
- apache2 - apache2
- libapache2-mod-wsgi - libapache2-mod-wsgi
notify: apache | restart apache notify: restart apache
tags: tags:
- apache - apache
- install - install
- name: apache | disables default site - name: disables default site
command: a2dissite 000-default command: a2dissite 000-default
notify: apache | restart apache notify: restart apache
tags: tags:
- apache - apache
- install - install
- name: apache | rewrite apache ports conf - name: rewrite apache ports conf
template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root
notify: apache | restart apache notify: restart apache
tags: tags:
- apache - apache
- install - install
- name: apache | Register the fact that apache role has run - name: Register the fact that apache role has run
command: echo True command: echo True
register: apache_role_run register: apache_role_run
tags: tags:
......
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6noLNy7YVFNK6OSOFgPbnGGovgZqLsvJxjhs82jT7tZIsYOjVVCAMk0kkSnBt0etDjGSJlJ664r1aBhubZrujzxns0oOzA7J+tWQ3CiaOBLtOSffeh8a3dTWWNPCAGg9KflPaufXdd31Bf96g9ACGZR7uLYgWUP/J0jOPMCPE1RBfRNFeZ7cHlh3t/pI+JzTcyZTka4AAEsCejBKHngYxVoOk+gfxe+Qo703st0MFuoxVAMymeBGi/1lCwKsV6r9BijzuvIFyQCl2vThjoF32yHmmP8by//hmgpo5UNqG7jbmSrCJhkdh+My3SgEebn5c2QLJepOrUfrZFwz1BQ1l task@edx.org
\ No newline at end of file
...@@ -57,135 +57,87 @@ ...@@ -57,135 +57,87 @@
- fail: automated_sudoers_dest required for role - fail: automated_sudoers_dest required for role
when: automated_sudoers_dest is not defined when: automated_sudoers_dest is not defined
- name: automated | create automated user - name: create automated user
user: user:
name={{ automated_user }} state=present shell=/bin/rbash name={{ automated_user }} state=present shell=/bin/rbash
home={{ automated_home }} createhome=yes home={{ automated_home }} createhome=yes
tags:
- automated
- install
- update
- name: automated | create sudoers file from file - name: create sudoers file from file
copy: copy:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }} dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_file }} owner="root" src={{ automated_sudoers_file }} owner="root"
group="root" mode=0440 validate='visudo -cf %s' group="root" mode=0440 validate='visudo -cf %s'
when: automated_sudoers_file when: automated_sudoers_file
tags:
- automated
- install
- update
- name: automated | create sudoers file from template - name: create sudoers file from template
template: template:
dest=/etc/sudoers.d/{{ automated_sudoers_dest }} dest=/etc/sudoers.d/{{ automated_sudoers_dest }}
src={{ automated_sudoers_template }} owner="root" src={{ automated_sudoers_template }} owner="root"
group="root" mode=0440 validate='visudo -cf %s' group="root" mode=0440 validate='visudo -cf %s'
when: automated_sudoers_template when: automated_sudoers_template
tags:
- automated
- install
- update
# #
# Prevent user from updating their PATH and # Prevent user from updating their PATH and
# environment. # environment.
# #
- name: automated | update shell file mode - name: update shell file mode
file: file:
path={{ automated_home }}/{{ item }} mode=0640 path={{ automated_home }}/{{ item }} mode=0640
state=file owner="root" group={{ automated_user }} state=file owner="root" group={{ automated_user }}
tags:
- automated
- install
- update
with_items: with_items:
- .bashrc - .bashrc
- .profile - .profile
- .bash_logout - .bash_logout
- name: automated | change ~automated ownership - name: change ~automated ownership
file: file:
path={{ automated_home }} mode=0750 state=directory path={{ automated_home }} mode=0750 state=directory
owner="root" group={{ automated_user }} owner="root" group={{ automated_user }}
tags:
- automated
- install
- update
# #
# This ensures that the links are updated with each run # This ensures that the links are updated with each run
# and that links that were remove from the role are # and that links that were remove from the role are
# removed. # removed.
# #
- name: automated | remove ~automated/bin directory - name: remove ~automated/bin directory
file: file:
path={{ automated_home }}/bin state=absent path={{ automated_home }}/bin state=absent
ignore_errors: yes ignore_errors: yes
tags:
- automated
- install
- update
- name: automated | create ~automated/bin directory - name: create ~automated/bin directory
file: file:
path={{ automated_home }}/bin state=directory mode=0750 path={{ automated_home }}/bin state=directory mode=0750
owner="root" group={{ automated_user }} owner="root" group={{ automated_user }}
tags:
- automated
- install
- update
- name: automated | re-write .profile - name: re-write .profile
copy: copy:
src=home/automator/.profile src=home/automator/.profile
dest={{ automated_home }}/.profile dest={{ automated_home }}/.profile
owner="root" owner="root"
group={{ automated_user }} group={{ automated_user }}
mode="0744" mode="0744"
tags:
- automated
- install
- update
- name: automated | re-write .bashrc - name: re-write .bashrc
copy: copy:
src=home/automator/.bashrc src=home/automator/.bashrc
dest={{ automated_home }}/.bashrc dest={{ automated_home }}/.bashrc
owner="root" owner="root"
group={{ automated_user }} group={{ automated_user }}
mode="0744" mode="0744"
tags:
- automated
- install
- update
- name: automated | create .ssh directory - name: create .ssh directory
file: file:
path={{ automated_home }}/.ssh state=directory mode=0700 path={{ automated_home }}/.ssh state=directory mode=0700
owner={{ automated_user }} group={{ automated_user }} owner={{ automated_user }} group={{ automated_user }}
tags:
- automated - name: build authorized_keys file
- install template:
- update src=home/automator/.ssh/authorized_keys.j2
- name: automated | copy key to .ssh/authorized_keys
copy:
src=home/automator/.ssh/authorized_keys
dest={{ automated_home }}/.ssh/authorized_keys mode=0600 dest={{ automated_home }}/.ssh/authorized_keys mode=0600
owner={{ automated_user }} group={{ automated_user }} owner={{ automated_user }} group={{ automated_user }}
tags:
- automated - name: create allowed command links
- install
- update
- name: automated | create allowed command links
file: file:
src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }} src={{ item }} dest={{ automated_home }}/bin/{{ item.split('/').pop() }}
state=link state=link
with_items: automated_rbash_links with_items: automated_rbash_links
tags: \ No newline at end of file
- automated
- install
- update
# Install browsers required to run the JavaScript # Install browsers required to run the JavaScript
# and acceptance test suite locally without a display # and acceptance test suite locally without a display
--- ---
- name: browsers | install system packages - name: install system packages
apt: pkg={{','.join(browser_deb_pkgs)}} apt: pkg={{','.join(browser_deb_pkgs)}}
state=present update_cache=yes state=present update_cache=yes
- name: browsers | download browser debian packages from S3 - name: download browser debian packages from S3
get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}" get_url: dest="/tmp/{{ item.name }}" url="{{ item.url }}"
register: download_deb register: download_deb
with_items: "{{ browser_s3_deb_pkgs }}" with_items: browser_s3_deb_pkgs
- name: browsers | install browser debian packages - name: install browser debian packages
shell: gdebi -nq /tmp/{{ item.name }} shell: gdebi -nq /tmp/{{ item.name }}
when: download_deb.changed when: download_deb.changed
with_items: "{{ browser_s3_deb_pkgs }}" with_items: browser_s3_deb_pkgs
- name: browsers | Install ChromeDriver - name: Install ChromeDriver
get_url: get_url:
url={{ chromedriver_url }} url={{ chromedriver_url }}
dest=/var/tmp/chromedriver_{{ chromedriver_version }}.zip dest=/var/tmp/chromedriver_{{ chromedriver_version }}.zip
- name: browsers | Install ChromeDriver 2 - name: Install ChromeDriver 2
shell: unzip /var/tmp/chromedriver_{{ chromedriver_version }}.zip shell: unzip /var/tmp/chromedriver_{{ chromedriver_version }}.zip
chdir=/var/tmp chdir=/var/tmp
- name: browsers | Install ChromeDriver 3 - name: Install ChromeDriver 3
shell: mv /var/tmp/chromedriver /usr/local/bin/chromedriver shell: mv /var/tmp/chromedriver /usr/local/bin/chromedriver
- name: browsers | Install Chromedriver 4 - name: Install Chromedriver 4
file: path=/usr/local/bin/chromedriver mode=0755 file: path=/usr/local/bin/chromedriver mode=0755
- name: browsers | create xvfb upstart script - name: create xvfb upstart script
template: src=xvfb.conf.j2 dest=/etc/init/xvfb.conf owner=root group=root template: src=xvfb.conf.j2 dest=/etc/init/xvfb.conf owner=root group=root
- name: browsers | start xvfb - name: start xvfb
shell: start xvfb shell: start xvfb
ignore_errors: yes ignore_errors: yes
...@@ -14,11 +14,10 @@ ...@@ -14,11 +14,10 @@
# Overview: # Overview:
# #
- name: certs | restart certs - name: restart certs
supervisorctl_local: > supervisorctl_local: >
name=certs name=certs
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=restarted state=restarted
tags: deploy when: certs_installed is defined
--- ---
- name: certs | create certificate application config - name: create certificate application config
template: > template: >
src=certs.env.json.j2 src=certs.env.json.j2
dest={{ certs_app_dir }}/env.json dest={{ certs_app_dir }}/env.json
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
notify: certs | restart certs notify: restart certs
tags: deploy
- name: certs | create certificate auth file - name: create certificate auth file
template: > template: >
src=certs.auth.json.j2 src=certs.auth.json.j2
dest={{ certs_app_dir }}/auth.json dest={{ certs_app_dir }}/auth.json
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
notify: certs | restart certs notify: restart certs
tags: deploy
- name: certs | writing supervisor script for certificates - name: writing supervisor script for certificates
template: > template: >
src=certs.conf.j2 dest={{ supervisor_cfg_dir }}/certs.conf src=certs.conf.j2 dest={{ supervisor_cfg_dir }}/certs.conf
owner={{ supervisor_user }} mode=0644 owner={{ supervisor_user }} mode=0644
notify: certs | restart certs notify: restart certs
tags: deploy
- name: certs | create ssh script for git - name: create ssh script for git
template: > template: >
src={{ certs_git_ssh|basename }}.j2 dest={{ certs_git_ssh }} src={{ certs_git_ssh|basename }}.j2 dest={{ certs_git_ssh }}
owner={{ certs_user }} mode=750 owner={{ certs_user }} mode=750
notify: certs | restart certs notify: restart certs
tags: deploy
- name: certs | install read-only ssh key for the certs repo - name: install read-only ssh key for the certs repo
copy: > copy: >
src={{ CERTS_LOCAL_GIT_IDENTITY }} dest={{ certs_git_identity }} src={{ CERTS_LOCAL_GIT_IDENTITY }} dest={{ certs_git_identity }}
force=yes owner={{ certs_user }} mode=0600 force=yes owner={{ certs_user }} mode=0600
notify: certs | restart certs notify: restart certs
tags: deploy
- name: certs | checkout certificates repo into {{ certs_code_dir }} - name: checkout certificates repo into {{ certs_code_dir }}
git: dest={{ certs_code_dir }} repo={{ certs_repo }} version={{ certs_version }} git: dest={{ certs_code_dir }} repo={{ certs_repo }} version={{ certs_version }}
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
environment: environment:
GIT_SSH: "{{ certs_git_ssh }}" GIT_SSH: "{{ certs_git_ssh }}"
notify: certs | restart certs notify: restart certs
tags: deploy
- name: certs | remove read-only ssh key for the certs repo - name: remove read-only ssh key for the certs repo
file: path={{ certs_git_identity }} state=absent file: path={{ certs_git_identity }} state=absent
notify: certs | restart certs notify: restart certs
tags: deploy
- name : install python requirements - name : install python requirements
pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present pip: requirements="{{ certs_requirements_file }}" virtualenv="{{ certs_venv_dir }}" state=present
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
notify: certs | restart certs notify: restart certs
tags: deploy
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
# the supervisorctl config and restarts # the supervisorctl config and restarts
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: certs | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
- name: certs | ensure certs has started - name: ensure certs has started
supervisorctl_local: > supervisorctl_local: >
name=certs name=certs
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=started state=started
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
- name: create a symlink for venv python
file: >
src="{{ certs_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.certs
state=link
with_items:
- python
- pip
- set_fact: certs_installed=true
...@@ -35,56 +35,46 @@ ...@@ -35,56 +35,46 @@
fail: msg="You must set CERTS_LOCAL_GIT_IDENTITY var for this role!" fail: msg="You must set CERTS_LOCAL_GIT_IDENTITY var for this role!"
when: not CERTS_LOCAL_GIT_IDENTITY when: not CERTS_LOCAL_GIT_IDENTITY
- name: certs | create application user - name: create application user
user: > user: >
name="{{ certs_user }}" name="{{ certs_user }}"
home="{{ certs_app_dir }}" home="{{ certs_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
notify: certs | restart certs notify: restart certs
- name: certs | create certs app and data dirs - name: create certs app and data dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
owner="{{ certs_user }}" owner="{{ certs_user }}"
group="{{ common_web_group }}" group="{{ common_web_group }}"
notify: certs | restart certs notify: restart certs
with_items: with_items:
- "{{ certs_app_dir }}" - "{{ certs_app_dir }}"
- "{{ certs_venvs_dir }}" - "{{ certs_venvs_dir }}"
- name: certs | create certs gpg dir - name: create certs gpg dir
file: > file: >
path="{{ certs_gpg_dir }}" state=directory path="{{ certs_gpg_dir }}" state=directory
owner="{{ common_web_user }}" owner="{{ common_web_user }}"
mode=0700 mode=0700
notify: certs | restart certs notify: restart certs
- name: certs | copy the private gpg signing key - name: copy the private gpg signing key
copy: > copy: >
src={{ CERTS_LOCAL_PRIVATE_KEY }} src={{ CERTS_LOCAL_PRIVATE_KEY }}
dest={{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }} dest={{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
owner={{ common_web_user }} mode=0600 owner={{ common_web_user }} mode=0600
notify: certs | restart certs notify: restart certs
register: certs_gpg_key register: certs_gpg_key
- name: certs | load the gpg key - name: load the gpg key
shell: > shell: >
/usr/bin/gpg --homedir {{ certs_gpg_dir }} --import {{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }} /usr/bin/gpg --homedir {{ certs_gpg_dir }} --import {{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
when: certs_gpg_key.changed when: certs_gpg_key.changed
notify: certs | restart certs notify: restart certs
- include: deploy.yml - include: deploy.yml tags=deploy
- name: certs | create a symlink for venv python
file: >
src="{{ certs_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.certs
state=link
notify: certs | restart certs
with_items:
- python
- pip
...@@ -37,7 +37,7 @@ common_debian_pkgs: ...@@ -37,7 +37,7 @@ common_debian_pkgs:
- python2.7-dev - python2.7-dev
common_pip_pkgs: common_pip_pkgs:
- virtualenv - virtualenv==1.10.1
- virtualenvwrapper - virtualenvwrapper
common_web_user: www-data common_web_user: www-data
...@@ -49,3 +49,11 @@ common_git_ppa: "ppa:git-core/ppa" ...@@ -49,3 +49,11 @@ common_git_ppa: "ppa:git-core/ppa"
# Skip supervisor tasks # Skip supervisor tasks
# Useful when supervisor is not installed (local dev) # Useful when supervisor is not installed (local dev)
devstack: False devstack: False
common_debian_variants:
- Ubuntu
- Debian
common_redhat_variants:
- CentOS
- Red Hat Enterprise Linux
--- ---
- name: common | restart rsyslogd - name: restart rsyslogd
service: name=rsyslog state=restarted service: name=rsyslog state=restarted
sudo: True sudo: True
tags: deploy
--- ---
- name: common | Add user www-data - name: Add user www-data
# This is the default user for nginx # This is the default user for nginx
user: > user: >
name="{{ common_web_user }}" name="{{ common_web_user }}"
shell=/bin/false shell=/bin/false
- name: common | Create common directories - name: Create common directories
file: > file: >
path={{ item }} state=directory owner=root path={{ item }} state=directory owner=root
group=root mode=0755 group=root mode=0755
...@@ -16,57 +16,57 @@ ...@@ -16,57 +16,57 @@
- "{{ COMMON_CFG_DIR }}" - "{{ COMMON_CFG_DIR }}"
# Need to install python-pycurl to use Ansible's apt_repository module # Need to install python-pycurl to use Ansible's apt_repository module
- name: common | Install python-pycurl - name: Install python-pycurl
apt: pkg=python-pycurl state=present update_cache=yes apt: pkg=python-pycurl state=present update_cache=yes
# Ensure that we get a current version of Git # Ensure that we get a current version of Git
# GitHub requires version 1.7.10 or later # GitHub requires version 1.7.10 or later
# https://help.github.com/articles/https-cloning-errors # https://help.github.com/articles/https-cloning-errors
- name: common | Add git apt repository - name: Add git apt repository
apt_repository: repo="{{ common_git_ppa }}" apt_repository: repo="{{ common_git_ppa }}"
- name: common | Install role-independent useful system packages - name: Install role-independent useful system packages
# do this before log dir setup; rsyslog package guarantees syslog user present # do this before log dir setup; rsyslog package guarantees syslog user present
apt: > apt: >
pkg={{','.join(common_debian_pkgs)}} install_recommends=yes pkg={{','.join(common_debian_pkgs)}} install_recommends=yes
state=present update_cache=yes state=present update_cache=yes
- name: common | Create common log directory - name: Create common log directory
file: > file: >
path={{ COMMON_LOG_DIR }} state=directory owner=syslog path={{ COMMON_LOG_DIR }} state=directory owner=syslog
group=syslog mode=0755 group=syslog mode=0755
- name: common | upload sudo config for key forwarding as root - name: upload sudo config for key forwarding as root
copy: > copy: >
src=ssh_key_forward dest=/etc/sudoers.d/ssh_key_forward src=ssh_key_forward dest=/etc/sudoers.d/ssh_key_forward
validate='visudo -c -f %s' owner=root group=root mode=0440 validate='visudo -c -f %s' owner=root group=root mode=0440
- name: common | pip install virtualenv - name: pip install virtualenv
pip: > pip: >
name="{{ item }}" state=present name="{{ item }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: common_pip_pkgs with_items: common_pip_pkgs
- name: common | Install rsyslog configuration for edX - name: Install rsyslog configuration for edX
template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644 template: dest=/etc/rsyslog.d/99-edx.conf src=edx_rsyslog.j2 owner=root group=root mode=644
notify: common | restart rsyslogd notify: restart rsyslogd
- name: common | Install logrotate configuration for edX - name: Install logrotate configuration for edX
template: dest=/etc/logrotate.d/edx-services src=edx_logrotate.j2 owner=root group=root mode=644 template: dest=/etc/logrotate.d/edx-services src=edx_logrotate.j2 owner=root group=root mode=644
- name: common | update /etc/hosts - name: update /etc/hosts
template: src=hosts.j2 dest=/etc/hosts template: src=hosts.j2 dest=/etc/hosts
when: COMMON_HOSTNAME when: COMMON_HOSTNAME
register: etc_hosts register: etc_hosts
- name: common | update /etc/hostname - name: update /etc/hostname
template: src=hostname.j2 dest=/etc/hostname template: src=hostname.j2 dest=/etc/hostname
when: COMMON_HOSTNAME when: COMMON_HOSTNAME
register: etc_hostname register: etc_hostname
- name: common | run hostname - name: run hostname
shell: > shell: >
hostname -F /etc/hostname hostname -F /etc/hostname
when: COMMON_HOSTNAME and (etc_hosts.changed or etc_hostname.changed) when: COMMON_HOSTNAME and (etc_hosts.changed or etc_hostname.changed)
--- ---
- name: datadog | restart the datadog service - name: restart the datadog service
service: name=datadog-agent state=restarted service: name=datadog-agent state=restarted
tags: deploy
---
dependencies:
- common
...@@ -2,10 +2,10 @@ ...@@ -2,10 +2,10 @@
# #
# datadog # datadog
# #
# Overview: # Overview:
# #
# Installs datadog # Installs datadog
## ##
# Dependencies: # Dependencies:
# #
...@@ -15,43 +15,43 @@ ...@@ -15,43 +15,43 @@
# - datadog # - datadog
# #
- name: datadog | install debian needed pkgs - name: install debian needed pkgs
apt: pkg={{ item }} apt: pkg={{ item }}
with_items: datadog_debian_pkgs with_items: datadog_debian_pkgs
tags: tags:
- datadog - datadog
- name: datadog | add apt key - name: add apt key
apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present
tags: tags:
- datadog - datadog
- name: datadog | install apt repository - name: install apt repository
apt_repository: repo='deb http://apt.datadoghq.com/ unstable main' update_cache=yes apt_repository: repo='deb http://apt.datadoghq.com/ unstable main' update_cache=yes
tags: tags:
- datadog - datadog
- name: datadog | install datadog agent - name: install datadog agent
apt: pkg="datadog-agent" apt: pkg="datadog-agent"
tags: tags:
- datadog - datadog
- name: datadog | bootstrap config - name: bootstrap config
shell: cp /etc/dd-agent/datadog.conf.example /etc/dd-agent/datadog.conf creates=/etc/dd-agent/datadog.conf shell: cp /etc/dd-agent/datadog.conf.example /etc/dd-agent/datadog.conf creates=/etc/dd-agent/datadog.conf
tags: tags:
- datadog - datadog
- name: datadog | update api-key - name: update api-key
lineinfile: > lineinfile: >
dest="/etc/dd-agent/datadog.conf" dest="/etc/dd-agent/datadog.conf"
regexp="^api_key:.*" regexp="^api_key:.*"
line="api_key:{{ datadog_api_key }}" line="api_key:{{ datadog_api_key }}"
notify: notify:
- datadog | restart the datadog service - restart the datadog service
tags: tags:
- datadog - datadog
- name: datadog | ensure started and enabled - name: ensure started and enabled
service: name=datadog-agent state=started enabled=yes service: name=datadog-agent state=started enabled=yes
tags: tags:
- datadog - datadog
--- ---
- name: demo | check out the demo course - name: check out the demo course
git: dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }} git: dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }}
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
register: demo_checkout register: demo_checkout
tags: deploy
- name: demo | import demo course - name: import demo course
shell: > shell: >
{{ edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ edxapp_course_data_dir }} {{ demo_code_dir }} {{ edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ edxapp_course_data_dir }} {{ demo_code_dir }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
when: demo_checkout.changed when: demo_checkout.changed
tags: deploy
- name: demo | create some test users and enroll them in the course - name: create some test users and enroll them in the course
shell: > shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e {{ item.email }} -p {{ item.password }} -m {{ item.mode }} -c {{ demo_course_id }} {{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e {{ item.email }} -p {{ item.password }} -m {{ item.mode }} -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
with_items: demo_test_users with_items: demo_test_users
when: demo_checkout.changed when: demo_checkout.changed
tags: deploy
- name: demo | create staff user - name: create staff user
shell: > shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e staff@example.com -p edx -s -c {{ demo_course_id }} {{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e staff@example.com -p edx -s -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
when: demo_checkout.changed when: demo_checkout.changed
tags: deploy
- name: demo | add test users to the certificate whitelist - name: add test users to the certificate whitelist
shell: > shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }} {{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
with_items: demo_test_users with_items: demo_test_users
when: demo_checkout.changed when: demo_checkout.changed
tags: deploy
- name: demo | seed the forums for the demo course - name: seed the forums for the demo course
shell: > shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }} {{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
with_items: demo_test_users with_items: demo_test_users
when: demo_checkout.changed when: demo_checkout.changed
tags: deploy
...@@ -30,9 +30,9 @@ ...@@ -30,9 +30,9 @@
# - edxapp # - edxapp
# - demo # - demo
- name: demo | create demo app and data dirs - name: create demo app and data dirs
file: > file: >
path="{{ demo_app_dir }}" state=directory path="{{ demo_app_dir }}" state=directory
owner="{{ edxapp_user }}" group="{{ common_web_group }}" owner="{{ edxapp_user }}" group="{{ common_web_group }}"
- include: deploy.yml - include: deploy.yml tags=deploy
...@@ -11,11 +11,10 @@ ...@@ -11,11 +11,10 @@
# Defaults for role devpi # Defaults for role devpi
# #
--- ---
- name: devpi | restart devpi - name: restart devpi
supervisorctl_local: > supervisorctl_local: >
state=restarted state=restarted
supervisorctl_path={{ devpi_supervisor_ctl }} supervisorctl_path={{ devpi_supervisor_ctl }}
config={{ devpi_supervisor_cfg }} config={{ devpi_supervisor_cfg }}
name=devpi-server name=devpi-server
sudo_user: "{{ devpi_supervisor_user }}" sudo_user: "{{ devpi_supervisor_user }}"
tags: deploy
...@@ -30,13 +30,13 @@ ...@@ -30,13 +30,13 @@
# - devpi # - devpi
--- ---
- name: devpi | create devpi user - name: create devpi user
user: > user: >
name={{ devpi_user }} name={{ devpi_user }}
shell=/bin/false createhome=no shell=/bin/false createhome=no
notify: devpi | restart devpi notify: restart devpi
- name: devpi | create devpi application directories - name: create devpi application directories
file: > file: >
path={{ item }} path={{ item }}
state=directory state=directory
...@@ -45,9 +45,9 @@ ...@@ -45,9 +45,9 @@
with_items: with_items:
- "{{ devpi_app_dir }}" - "{{ devpi_app_dir }}"
- "{{ devpi_venv_dir }}" - "{{ devpi_venv_dir }}"
notify: devpi | restart devpi notify: restart devpi
- name: devpi | create the devpi data directory, needs write access by the service user - name: create the devpi data directory, needs write access by the service user
file: > file: >
path={{ item }} path={{ item }}
state=directory state=directory
...@@ -56,40 +56,40 @@ ...@@ -56,40 +56,40 @@
with_items: with_items:
- "{{ devpi_data_dir }}" - "{{ devpi_data_dir }}"
- "{{ devpi_mirror_dir }}" - "{{ devpi_mirror_dir }}"
notify: devpi | restart devpi notify: restart devpi
- name: devpi | install devpi pip pkgs - name: install devpi pip pkgs
pip: > pip: >
name={{ item }} name={{ item }}
state=present state=present
virtualenv={{ devpi_venv_dir }} virtualenv={{ devpi_venv_dir }}
sudo_user: "{{ devpi_user }}" sudo_user: "{{ devpi_user }}"
with_items: devpi_pip_pkgs with_items: devpi_pip_pkgs
notify: devpi | restart devpi notify: restart devpi
- name: devpi | writing supervisor script - name: writing supervisor script
template: > template: >
src=devpi.conf.j2 dest={{ devpi_supervisor_cfg_dir }}/devpi.conf src=devpi.conf.j2 dest={{ devpi_supervisor_cfg_dir }}/devpi.conf
owner={{ devpi_user }} group={{ devpi_user }} mode=0644 owner={{ devpi_user }} group={{ devpi_user }} mode=0644
notify: devpi | restart devpi notify: restart devpi
- name: devpi | create a symlink for venv python, pip - name: create a symlink for venv python, pip
file: > file: >
src="{{ devpi_venv_bin }}/{{ item }}" src="{{ devpi_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
state=link state=link
notify: devpi | restart devpi notify: restart devpi
with_items: with_items:
- python - python
- pip - pip
- name: devpi | create a symlink for venv supervisor - name: create a symlink for venv supervisor
file: > file: >
src="{{ devpi_supervisor_venv_bin }}/supervisorctl" src="{{ devpi_supervisor_venv_bin }}/supervisorctl"
dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi dest={{ COMMON_BIN_DIR }}/{{ item }}.devpi
state=link state=link
- name: devpi | create a symlink for supervisor config - name: create a symlink for supervisor config
file: > file: >
src="{{ devpi_supervisor_app_dir }}/supervisord.conf" src="{{ devpi_supervisor_app_dir }}/supervisord.conf"
dest={{ COMMON_CFG_DIR }}/supervisord.conf.devpi dest={{ COMMON_CFG_DIR }}/supervisord.conf.devpi
...@@ -100,13 +100,12 @@ ...@@ -100,13 +100,12 @@
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: devpi | update devpi supervisor configuration - name: update devpi supervisor configuration
shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update" shell: "{{ devpi_supervisor_ctl }} -c {{ devpi_supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
tags: deploy
- name: devpi | ensure devpi is started - name: ensure devpi is started
supervisorctl_local: > supervisorctl_local: >
state=started state=started
supervisorctl_path={{ devpi_supervisor_ctl }} supervisorctl_path={{ devpi_supervisor_ctl }}
......
--- ---
- name: discern | restart discern - name: restart discern
supervisorctl_local: > supervisorctl_local: >
name=discern name=discern
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=restarted state=restarted
when: discern_installed is defined
with_items: with_items:
- discern - discern
- discern_celery - discern_celery
tags: deploy
--- ---
- name: discern | create supervisor scripts - discern, discern_celery - name: create supervisor scripts - discern, discern_celery
template: > template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }} mode=0644 owner={{ supervisor_user }} mode=0644
...@@ -8,70 +8,56 @@ ...@@ -8,70 +8,56 @@
with_items: ['discern', 'discern_celery'] with_items: ['discern', 'discern_celery']
#Upload config files for django (auth and env) #Upload config files for django (auth and env)
- name: discern | create discern application config env.json file - name: create discern application config env.json file
template: src=env.json.j2 dest={{ discern_app_dir }}/env.json template: src=env.json.j2 dest={{ discern_app_dir }}/env.json
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
tags:
- deploy
- name: discern | create discern auth file auth.json - name: create discern auth file auth.json
template: src=auth.json.j2 dest={{ discern_app_dir }}/auth.json template: src=auth.json.j2 dest={{ discern_app_dir }}/auth.json
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
tags:
- deploy
- name: discern | git checkout discern repo into discern_code_dir - name: git checkout discern repo into discern_code_dir
git: dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_version }} git: dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_version }}
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
tags:
- deploy
- name: discern | git checkout ease repo into discern_ease_code_dir - name: git checkout ease repo into discern_ease_code_dir
git: dest={{ discern_ease_code_dir}} repo={{ discern_ease_source_repo }} version={{ discern_ease_version }} git: dest={{ discern_ease_code_dir}} repo={{ discern_ease_source_repo }} version={{ discern_ease_version }}
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
tags:
- deploy
#Numpy has to be a pre-requirement in order for scipy to build #Numpy has to be a pre-requirement in order for scipy to build
- name : discern | install python pre-requirements for discern and ease - name : install python pre-requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
with_items: with_items:
- "{{ discern_pre_requirements_file }}" - "{{ discern_pre_requirements_file }}"
- "{{ discern_ease_pre_requirements_file }}" - "{{ discern_ease_pre_requirements_file }}"
tags:
- deploy
- name : discern | install python requirements for discern and ease - name : install python requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
with_items: with_items:
- "{{ discern_post_requirements_file }}" - "{{ discern_post_requirements_file }}"
- "{{ discern_ease_post_requirements_file }}" - "{{ discern_ease_post_requirements_file }}"
tags:
- deploy
- name: discern | install ease python package - name: install ease python package
shell: > shell: >
{{ discern_venv_dir }}/bin/activate; cd {{ discern_ease_code_dir }}; python setup.py install {{ discern_venv_dir }}/bin/activate; cd {{ discern_ease_code_dir }}; python setup.py install
notify: notify:
- discern | restart discern - restart discern
tags:
- deploy
- name: discern | download and install nltk - name: download and install nltk
shell: | shell: |
set -e set -e
curl -o {{ discern_nltk_tmp_file }} {{ discern_nltk_download_url }} curl -o {{ discern_nltk_tmp_file }} {{ discern_nltk_download_url }}
...@@ -82,36 +68,30 @@ ...@@ -82,36 +68,30 @@
chdir={{ discern_data_dir }} chdir={{ discern_data_dir }}
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
tags:
- deploy
#Run this instead of using the ansible module because the ansible module only support syncdb of these three, and does not #Run this instead of using the ansible module because the ansible module only support syncdb of these three, and does not
#support virtualenvs as of this comment #support virtualenvs as of this comment
- name: discern | django syncdb migrate and collectstatic for discern - name: django syncdb migrate and collectstatic for discern
shell: > shell: >
{{ discern_venv_dir }}/bin/python {{discern_code_dir}}/manage.py {{item}} --noinput --settings={{discern_settings}} --pythonpath={{discern_code_dir}} {{ discern_venv_dir }}/bin/python {{discern_code_dir}}/manage.py {{item}} --noinput --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }} chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
with_items: with_items:
- syncdb - syncdb
- migrate - migrate
- collectstatic - collectstatic
tags:
- deploy
#Have this separate from the other three because it doesn't take the noinput flag #Have this separate from the other three because it doesn't take the noinput flag
- name: discern | django update_index for discern - name: django update_index for discern
shell: > shell: >
{{ discern_venv_dir}}/bin/python {{discern_code_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_code_dir}} {{ discern_venv_dir}}/bin/python {{discern_code_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }} chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- discern | restart discern - restart discern
tags:
- deploy
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
...@@ -119,14 +99,13 @@ ...@@ -119,14 +99,13 @@
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: discern | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
tags: deploy
- name: discern | ensure discern, discern_celery has started - name: ensure discern, discern_celery has started
supervisorctl_local: > supervisorctl_local: >
name={{ item }} name={{ item }}
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -135,4 +114,14 @@ ...@@ -135,4 +114,14 @@
with_items: with_items:
- discern - discern
- discern_celery - discern_celery
tags: deploy
- name: create a symlink for venv python
file: >
src="{{ discern_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.discern
state=link
with_items:
- python
- pip
- set_fact: discern_installed=true
--- ---
- name: discern | create application user - name: create application user
user: > user: >
name="{{ discern_user }}" name="{{ discern_user }}"
home="{{ discern_app_dir }}" home="{{ discern_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
notify: notify:
- discern | restart discern - restart discern
- name: discern | create discern app dirs owned by discern - name: create discern app dirs owned by discern
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
owner="{{ discern_user }}" owner="{{ discern_user }}"
group="{{ common_web_group }}" group="{{ common_web_group }}"
notify: notify:
- discern | restart discern - restart discern
with_items: with_items:
- "{{ discern_app_dir }}" - "{{ discern_app_dir }}"
- "{{ discern_venvs_dir }}" - "{{ discern_venvs_dir }}"
- name: discern | create discern data dir, owned by {{ common_web_user }} - name: create discern data dir, owned by {{ common_web_user }}
file: > file: >
path="{{ discern_data_dir }}" state=directory path="{{ discern_data_dir }}" state=directory
owner="{{ common_web_user }}" group="{{ discern_user }}" owner="{{ common_web_user }}" group="{{ discern_user }}"
mode=0775 mode=0775
notify: notify:
- discern | restart discern - restart discern
- name: discern | install debian packages that discern needs - name: install debian packages that discern needs
apt: pkg={{ item }} state=present apt: pkg={{ item }} state=present
notify: notify:
- discern | restart discern - restart discern
with_items: discern_debian_pkgs with_items: discern_debian_pkgs
- name: discern | install debian packages for ease that discern needs - name: install debian packages for ease that discern needs
apt: pkg={{ item }} state=present apt: pkg={{ item }} state=present
notify: notify:
- discern | restart discern - restart discern
with_items: discern_ease_debian_pkgs with_items: discern_ease_debian_pkgs
- name: discern | copy sudoers file for discern - name: copy sudoers file for discern
copy: > copy: >
src=sudoers-discern dest=/etc/sudoers.d/discern src=sudoers-discern dest=/etc/sudoers.d/discern
mode=0440 validate='visudo -cf %s' owner=root group=root mode=0440 validate='visudo -cf %s' owner=root group=root
notify: notify:
- discern | restart discern - restart discern
#Needed if using redis to prevent memory issues #Needed if using redis to prevent memory issues
- name: discern | change memory commit settings -- needed for redis - name: change memory commit settings -- needed for redis
command: sysctl vm.overcommit_memory=1 command: sysctl vm.overcommit_memory=1
notify: notify:
- discern | restart discern - restart discern
- include: deploy.yml - include: deploy.yml tags=deploy
- name: discern | create a symlink for venv python
file: >
src="{{ discern_venv_bin }}/python"
dest={{ COMMON_BIN_DIR }}/python.discern
state=link
...@@ -32,5 +32,6 @@ edx_ansible_venv_bin: "{{ edx_ansible_venv_dir }}/bin" ...@@ -32,5 +32,6 @@ edx_ansible_venv_bin: "{{ edx_ansible_venv_dir }}/bin"
edx_ansible_user: "edx-ansible" edx_ansible_user: "edx-ansible"
edx_ansible_source_repo: https://github.com/edx/configuration.git edx_ansible_source_repo: https://github.com/edx/configuration.git
edx_ansible_requirements_file: "{{ edx_ansible_code_dir }}/requirements.txt" edx_ansible_requirements_file: "{{ edx_ansible_code_dir }}/requirements.txt"
edx_ansible_var_file: "{{ edx_ansible_data_dir }}/server-vars.yml"
# edX configuration repo # edX configuration repo
configuration_version: master configuration_version: master
...@@ -10,4 +10,4 @@ ...@@ -10,4 +10,4 @@
## ##
# Role includes for role edx_ansible # Role includes for role edx_ansible
dependencies: dependencies:
- supervisor - common
--- ---
- name: edx_ansible | git checkout edx_ansible repo into edx_ansible_code_dir - name: git checkout edx_ansible repo into edx_ansible_code_dir
git: dest={{ edx_ansible_code_dir }} repo={{ edx_ansible_source_repo }} version={{ configuration_version }} git: dest={{ edx_ansible_code_dir }} repo={{ edx_ansible_source_repo }} version={{ configuration_version }}
sudo_user: "{{ edx_ansible_user }}" sudo_user: "{{ edx_ansible_user }}"
tags: deploy
- name : edx_ansible | install edx_ansible venv requirements - name : install edx_ansible venv requirements
pip: requirements="{{ edx_ansible_requirements_file }}" virtualenv="{{ edx_ansible_venv_dir }}" state=present pip: requirements="{{ edx_ansible_requirements_file }}" virtualenv="{{ edx_ansible_venv_dir }}" state=present
sudo_user: "{{ edx_ansible_user }}" sudo_user: "{{ edx_ansible_user }}"
tags: deploy
- name: create update script
template: >
dest={{ edx_ansible_app_dir}}/update
src=update.j2 owner={{ edx_ansible_user }} group={{ edx_ansible_user }} mode=755
- name: create a symlink for update.sh
file: >
src={{ edx_ansible_app_dir }}/update
dest={{ COMMON_BIN_DIR }}/update
state=link
- name: dump all vars to yaml
template: src=dumpall.yml.j2 dest={{ edx_ansible_var_file }} mode=0600
- name: clean up var file, removing all version vars
shell: sed -i -e "/{{item}}/d" {{ edx_ansible_var_file }}
with_items:
# deploy versions
- "^edx_platform_version:"
- "^edx_platform_commit:"
- "^xqueue_version:"
- "^forum_version:"
- "^xserver_version:"
- "^discern_ease_version:"
- "^ora_ease_version:"
- "^discern_version:"
- "^ora_version:"
- "^configuration_version:"
- "^ease_version:"
- "^certs_version:"
# other misc vars
- "^tags:"
- "^_original_file:"
- name: create a symlink for var file
file: >
src={{ edx_ansible_var_file }}
dest={{ COMMON_CFG_DIR }}/{{ edx_ansible_var_file|basename }}
state=link
...@@ -23,14 +23,14 @@ ...@@ -23,14 +23,14 @@
# #
# #
# #
- name: edx_ansible | create application user - name: create application user
user: > user: >
name="{{ edx_ansible_user }}" name="{{ edx_ansible_user }}"
home="{{ edx_ansible_app_dir }}" home="{{ edx_ansible_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: edx_ansible | create edx_ansible app and venv dir - name: create edx_ansible app and venv dir
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -38,20 +38,10 @@ ...@@ -38,20 +38,10 @@
group="{{ common_web_group }}" group="{{ common_web_group }}"
with_items: with_items:
- "{{ edx_ansible_app_dir }}" - "{{ edx_ansible_app_dir }}"
- "{{ edx_ansible_data_dir }}"
- "{{ edx_ansible_venvs_dir }}" - "{{ edx_ansible_venvs_dir }}"
- name: edx_ansible | install a bunch of system packages on which edx_ansible relies - name: install a bunch of system packages on which edx_ansible relies
apt: pkg={{','.join(edx_ansible_debian_pkgs)}} state=present apt: pkg={{','.join(edx_ansible_debian_pkgs)}} state=present
- include: deploy.yml - include: deploy.yml tags=deploy
- name: edx_ansible | create update script
template: >
dest={{ edx_ansible_app_dir}}/update
src=update.j2 owner={{ edx_ansible_user }} group={{ edx_ansible_user }} mode=755
- name: edxapp | create a symlink for update.sh
file: >
src={{ edx_ansible_app_dir }}/update
dest={{ COMMON_BIN_DIR }}/update
state=link
...@@ -12,24 +12,13 @@ IFS="," ...@@ -12,24 +12,13 @@ IFS=","
-v add verbosity to edx_ansible run -v add verbosity to edx_ansible run
-h this -h this
<repo> - must be one of [${!repos_to_cmd[*]}] <repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, discern, edx-ora, configuration
<version> - can be a commit or tag <version> - can be a commit or tag
EO EO
IFS=$SAVE_IFS IFS=$SAVE_IFS
} }
declare -A repos_to_cmd
edx_ansible_cmd="{{ edx_ansible_venv_bin}}/ansible-playbook -i localhost, -c local --tags deploy"
repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'"
repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'"
repos_to_cmd["forums"]="$edx_ansible_cmd forums.yml -e 'forum_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd forums.yml -e 'xserver_version=$2'"
repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'"
repos_to_cmd["discern"]="$edx_ansible_cmd discern.yml -e 'discern_version=$2'"
repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
PROG=${0##*/} PROG=${0##*/}
while getopts "vh" opt; do while getopts "vh" opt; do
case $opt in case $opt in
...@@ -45,6 +34,23 @@ while getopts "vh" opt; do ...@@ -45,6 +34,23 @@ while getopts "vh" opt; do
done done
if [[ -f {{ edx_ansible_var_file }} ]]; then
extra_args="-e@{{ edx_ansible_var_file }}"
fi
declare -A repos_to_cmd
edx_ansible_cmd="{{ edx_ansible_venv_bin}}/ansible-playbook -i localhost, -c local --tags deploy $extra_args "
repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'"
repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'"
repos_to_cmd["cs_comments_service"]="$edx_ansible_cmd forum.yml -e 'forum_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd forums.yml -e 'xserver_version=$2'"
repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'"
repos_to_cmd["discern"]="$edx_ansible_cmd discern.yml -e 'discern_version=$2'"
repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
if [[ -z $1 || -z $2 ]]; then if [[ -z $1 || -z $2 ]]; then
echo echo
echo "ERROR: You must specify a repo and commit" echo "ERROR: You must specify a repo and commit"
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role edx_service
#
#
# vars are namespace with the module name.
#
edx_service_role_name: edx_service
#
# OS packages
#
edx_service_debian_pkgs: []
edx_service_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role edx_service
#
# Overview:
#
#
- name: edx_service | notify me
debug: msg="stub handler"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role edx_service
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role edx_service
#
# Overview:
#
# This role performs the repetive tasks that most edX roles
# require in our default configuration.
#
# Example play:
#
# Rather than being included in the play, this role
# is included as a dependency by other roles in the meta/mail.yml
# file. The including role should add the following
# depency definition.
#
# dependencies:
# - { role: edx_service, edx_service_name: "hotg" }
#
- name: create application user
user: >
name="{{ edx_service_name }}"
home="{{ COMMON_APP_DIR }}/{{ edx_service_name }}"
createhome=no
shell=/bin/false
- name: create edx_service app and venv dir
file: >
path="{{ item }}"
state=directory
owner="{{ edx_service_name }}"
group="{{ common_web_group }}"
with_items:
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}"
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/venvs"
- name: install a bunch of system packages on which edx_service relies
apt: pkg={{ item }} state=present
with_items: "{{ edx_service_name }}_debian_pkgs"
when: ansible_distribution in common_debian_variants
- name: install a bunch of system packages on which edx_service relies
yum: pkg={{ item }} state=present
with_items: "{{ edx_service_name }}_redhat_pkgs"
when: ansible_distribution in common_redhat_variants
\ No newline at end of file
...@@ -40,7 +40,7 @@ EDXAPP_EMAIL_BACKEND: 'django.core.mail.backends.smtp.EmailBackend' ...@@ -40,7 +40,7 @@ EDXAPP_EMAIL_BACKEND: 'django.core.mail.backends.smtp.EmailBackend'
EDXAPP_LOG_LEVEL: 'INFO' EDXAPP_LOG_LEVEL: 'INFO'
EDXAPP_MEMCACHE: [ 'localhost:11211' ] EDXAPP_MEMCACHE: [ 'localhost:11211' ]
EDXAPP_COMMENTS_SERVICE_URL: 'http://localhost:4567' EDXAPP_COMMENTS_SERVICE_URL: 'http://localhost:18080'
EDXAPP_COMMENTS_SERVICE_KEY: 'password' EDXAPP_COMMENTS_SERVICE_KEY: 'password'
EDXAPP_EDXAPP_SECRET_KEY: '' EDXAPP_EDXAPP_SECRET_KEY: ''
...@@ -66,6 +66,7 @@ EDXAPP_FEATURES: ...@@ -66,6 +66,7 @@ EDXAPP_FEATURES:
SUBDOMAIN_COURSE_LISTINGS: false SUBDOMAIN_COURSE_LISTINGS: false
PREVIEW_LMS_BASE: $EDXAPP_PREVIEW_LMS_BASE PREVIEW_LMS_BASE: $EDXAPP_PREVIEW_LMS_BASE
ENABLE_S3_GRADE_DOWNLOADS: true ENABLE_S3_GRADE_DOWNLOADS: true
USE_CUSTOM_THEME: $edxapp_use_custom_theme
EDXAPP_BOOK_URL: '' EDXAPP_BOOK_URL: ''
# This needs to be set to localhost # This needs to be set to localhost
...@@ -83,8 +84,11 @@ EDXAPP_RABBIT_HOSTNAME: 'localhost' ...@@ -83,8 +84,11 @@ EDXAPP_RABBIT_HOSTNAME: 'localhost'
EDXAPP_XML_MAPPINGS: {} EDXAPP_XML_MAPPINGS: {}
EDXAPP_LMS_NGINX_PORT: 18000 EDXAPP_LMS_NGINX_PORT: 18000
EDXAPP_LMS_SSL_NGINX_PORT: 48000
EDXAPP_LMS_PREVIEW_NGINX_PORT: 18020 EDXAPP_LMS_PREVIEW_NGINX_PORT: 18020
EDXAPP_CMS_NGINX_PORT: 18010 EDXAPP_CMS_NGINX_PORT: 18010
EDXAPP_CMS_SSL_NGINX_PORT: 48010
EDXAPP_LANG: 'en_US.UTF-8' EDXAPP_LANG: 'en_US.UTF-8'
EDXAPP_TIME_ZONE: 'America/New_York' EDXAPP_TIME_ZONE: 'America/New_York'
...@@ -113,6 +117,20 @@ EDXAPP_GRADE_ROOT_PATH: '/tmp/edx-s3/grades' ...@@ -113,6 +117,20 @@ EDXAPP_GRADE_ROOT_PATH: '/tmp/edx-s3/grades'
# Configure rake tasks in edx-platform to skip Python/Ruby/Node installation # Configure rake tasks in edx-platform to skip Python/Ruby/Node installation
EDXAPP_NO_PREREQ_INSTALL: 1 EDXAPP_NO_PREREQ_INSTALL: 1
# whether to setup the python codejail or not
EDXAPP_PYTHON_SANDBOX: false
# this next setting, if true, turns on actual sandbox enforcement. If not true,
# it puts the sandbox in 'complain' mode, for reporting but not enforcement
EDXAPP_SANDBOX_ENFORCE: true
# Supply authorized keys used for remote management via the automated
# role, see meta/main.yml. Ensure you know what this does before
# enabling. The boolean flag determines whether the role is included.
# This is done to make it possible to disable remote access easily by
# setting the flag to true and providing an empty array.
EDXAPP_INCLUDE_AUTOMATOR_ROLE: false
EDXAPP_AUTOMATOR_AUTHORIZED_KEYS: []
#-------- Everything below this line is internal to the role ------------ #-------- Everything below this line is internal to the role ------------
#Use YAML references (& and *) and hash merge <<: to factor out shared settings #Use YAML references (& and *) and hash merge <<: to factor out shared settings
...@@ -160,6 +178,13 @@ edxapp_workers: ...@@ -160,6 +178,13 @@ edxapp_workers:
service_variant: lms service_variant: lms
concurrency: 2 concurrency: 2
# setup for python codejail
edxapp_sandbox_venv_dir: '{{ edxapp_venvs_dir }}/edxapp-sandbox'
edxapp_sandbox_user: 'sandbox' # I think something about the codejail requires hardcoding this to sandbox:sandbox
# apparmor command
edxapp_aa_command: "{% if EDXAPP_SANDBOX_ENFORCE %}aa-enforce{% else %}aa-complain{% endif %}"
# Requirement files we explicitely # Requirement files we explicitely
# check for changes before attempting # check for changes before attempting
# to update the venv # to update the venv
...@@ -358,6 +383,14 @@ lms_auth_config: ...@@ -358,6 +383,14 @@ lms_auth_config:
lms_env_config: lms_env_config:
<<: *edxapp_generic_env <<: *edxapp_generic_env
'CODE_JAIL':
# from https://github.com/edx/codejail/blob/master/codejail/django_integration.py#L24, '' should be same as None
'python_bin': '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}'
'limits':
'VMEM': 0
'REALTIME': 5
'user': '{{ edxapp_sandbox_user }}'
cms_auth_config: cms_auth_config:
<<: *edxapp_generic_auth <<: *edxapp_generic_auth
cms_env_config: cms_env_config:
...@@ -404,9 +437,12 @@ worker_core_mult: ...@@ -404,9 +437,12 @@ worker_core_mult:
cms: 2 cms: 2
# Theming # Theming
# To turn off theming, specify edxapp_theme_name: "" # Turn theming on and off with edxapp_use_custom_theme
# Set theme name with edxapp_theme_name
# Stanford, for example, uses edxapp_theme_name: 'stanford' # Stanford, for example, uses edxapp_theme_name: 'stanford'
# #
# TODO: change variables to ALL-CAPS, since they are meant to be externally overridden
edxapp_use_custom_theme: false
edxapp_theme_name: "" edxapp_theme_name: ""
edxapp_theme_source_repo: 'https://{{ COMMON_GIT_MIRROR }}/Stanford-Online/edx-theme.git' edxapp_theme_source_repo: 'https://{{ COMMON_GIT_MIRROR }}/Stanford-Online/edx-theme.git'
edxapp_theme_version: 'HEAD' edxapp_theme_version: 'HEAD'
...@@ -427,9 +463,6 @@ sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/base ...@@ -427,9 +463,6 @@ sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/base
sandbox_local_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/local.txt" sandbox_local_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/local.txt"
sandbox_post_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/post.txt" sandbox_post_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/post.txt"
#do we want to install the sandbox requirements into the regular virtual env
install_sandbox_reqs_into_regular_venv: true
edxapp_debian_pkgs: edxapp_debian_pkgs:
- npm - npm
# for compiling the virtualenv # for compiling the virtualenv
...@@ -471,3 +504,9 @@ edxapp_cms_variant: cms ...@@ -471,3 +504,9 @@ edxapp_cms_variant: cms
# Worker Settings # Worker Settings
worker_django_settings_module: 'aws' worker_django_settings_module: 'aws'
# This array is used by the automator role to provide
# access to a limited set of commands via rbash. The
# commands listed here will be symlinked to ~/bin/ for
# the automator user.
edxapp_automated_rbash_links:
- /usr/bin/sudo
\ No newline at end of file
--- ---
- name: edxapp | restart edxapp - name: restart edxapp
supervisorctl_local: > supervisorctl_local: >
state=restarted state=restarted
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
name="edxapp:{{ item }}" name="edxapp:{{ item }}"
when: celery_worker is not defined and not devstack when: edxapp_installed is defined and celery_worker is not defined and not devstack
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
with_items: service_variants_enabled with_items: service_variants_enabled
tags: deploy
- name: edxapp | restart edxapp_workers - name: restart edxapp_workers
supervisorctl_local: > supervisorctl_local: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}" name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=restarted state=restarted
when: celery_worker is defined and not devstack when: edxapp_installed is defined and celery_worker is defined and not devstack
with_items: edxapp_workers with_items: edxapp_workers
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
tags: deploy
...@@ -6,3 +6,9 @@ dependencies: ...@@ -6,3 +6,9 @@ dependencies:
rbenv_dir: "{{ edxapp_app_dir }}" rbenv_dir: "{{ edxapp_app_dir }}"
rbenv_ruby_version: "{{ edxapp_ruby_version }}" rbenv_ruby_version: "{{ edxapp_ruby_version }}"
- devpi - devpi
- role: automated
automated_rbash_links: "{{ edxapp_automated_rbash_links }}"
automated_sudoers_dest: '99-automator-edxapp-server'
automated_sudoers_template: 'roles/edxapp/templates/etc/sudoers.d/99-automator-edxapp-server.j2'
automated_authorized_keys: "{{ EDXAPP_AUTOMATOR_AUTHORIZED_KEYS }}"
when: EDXAPP_INCLUDE_AUTOMATOR_ROLE
- name: edxapp | setup the edxapp env - name: setup the edxapp env
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
template: > template: >
src=edxapp_env.j2 dest={{ edxapp_app_dir }}/edxapp_env src=edxapp_env.j2 dest={{ edxapp_app_dir }}/edxapp_env
owner={{ edxapp_user }} group={{ common_web_user }} owner={{ edxapp_user }} group={{ common_web_user }}
mode=0644 mode=0644
tags: deploy
# Do A Checkout # Do A Checkout
- name: edxapp | checkout edx-platform repo into {{edxapp_code_dir}} - name: checkout edx-platform repo into {{edxapp_code_dir}}
git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}} git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}}
register: chkout register: chkout
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: deploy
- name: edxapp | git clean after checking out edx-platform - name: git clean after checking out edx-platform
shell: cd {{edxapp_code_dir}} && git clean -xdf shell: cd {{edxapp_code_dir}} && git clean -xdf
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: deploy
- name: edxapp | checkout theme - name: checkout theme
git: dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}} git: dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}}
when: edxapp_theme_name != '' when: edxapp_theme_name != ''
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags:
- deploy
- name: edxapp | create checksum for requirements, package.json and Gemfile - name: create checksum for requirements, package.json and Gemfile
shell: > shell: >
/usr/bin/md5sum {{ " ".join(edxapp_chksum_req_files) }} 2>/dev/null > /var/tmp/edxapp.req.new /usr/bin/md5sum {{ " ".join(edxapp_chksum_req_files) }} 2>/dev/null > /var/tmp/edxapp.req.new
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
ignore_errors: true ignore_errors: true
tags:
- deploy
- stat: path=/var/tmp/edxapp.req.new - stat: path=/var/tmp/edxapp.req.new
register: new register: new
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
tags: deploy
- stat: path=/var/tmp/edxapp.req.installed - stat: path=/var/tmp/edxapp.req.installed
register: inst register: inst
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
tags: deploy
# Substitute github mirror in all requirements files # Substitute github mirror in all requirements files
# This is run on every single deploy # This is run on every single deploy
- name: edxapp | Updating requirement files for git mirror - name: Updating requirement files for git mirror
command: | command: |
/bin/sed -i -e 's/github\.com/{{ COMMON_GIT_MIRROR }}/g' {{ " ".join(edxapp_all_req_files) }} /bin/sed -i -e 's/github\.com/{{ COMMON_GIT_MIRROR }}/g' {{ " ".join(edxapp_all_req_files) }}
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: deploy
# Ruby plays that need to be run after platform updates. # Ruby plays that need to be run after platform updates.
- name: edxapp | gem install bundler - name: gem install bundler
shell: > shell: >
gem install bundle gem install bundle
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
...@@ -74,11 +64,10 @@ ...@@ -74,11 +64,10 @@
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: deploy
- name: edxapp | bundle install - name: bundle install
shell: > shell: >
bundle install --binstubs bundle install --binstubs
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
...@@ -86,23 +75,32 @@ ...@@ -86,23 +75,32 @@
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: deploy
# Set the npm registry
- name: Set the npm registry
shell:
npm config set registry 'http://registry.npmjs.org'
creates="{{ edxapp_app_dir }}/.npmrc"
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Node play that need to be run after platform updates. # Node play that need to be run after platform updates.
- name: edxapp | Install edx-platform npm dependencies - name: Install edx-platform npm dependencies
shell: npm install chdir={{ edxapp_code_dir }} shell: npm install chdir={{ edxapp_code_dir }}
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: deploy
# Install the python pre requirements into {{ edxapp_venv_dir }} # Install the python pre requirements into {{ edxapp_venv_dir }}
- name : edxapp | install python pre-requirements - name : install python pre-requirements
pip: > pip: >
requirements="{{pre_requirements_file}}" requirements="{{pre_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}" virtualenv="{{edxapp_venv_dir}}"
...@@ -111,13 +109,12 @@ ...@@ -111,13 +109,12 @@
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
when: not inst.stat.exists or new.stat.md5 != inst.stat.md5 when: not inst.stat.exists or new.stat.md5 != inst.stat.md5
tags: deploy
# Install the python modules into {{ edxapp_venv_dir }} # Install the python modules into {{ edxapp_venv_dir }}
- name : edxapp | install python base-requirements - name : install python base-requirements
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some # Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
...@@ -127,13 +124,12 @@ ...@@ -127,13 +124,12 @@
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
when: not inst.stat.exists or new.stat.md5 != inst.stat.md5 when: not inst.stat.exists or new.stat.md5 != inst.stat.md5
tags: deploy
# Install the python post requirements into {{ edxapp_venv_dir }} # Install the python post requirements into {{ edxapp_venv_dir }}
- name : edxapp | install python post-requirements - name : install python post-requirements
pip: > pip: >
requirements="{{post_requirements_file}}" requirements="{{post_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}" virtualenv="{{edxapp_venv_dir}}"
...@@ -142,84 +138,126 @@ ...@@ -142,84 +138,126 @@
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
when: not inst.stat.exists or new.stat.md5 != inst.stat.md5 when: not inst.stat.exists or new.stat.md5 != inst.stat.md5
tags: deploy
# Install the final python modules into {{ edxapp_venv_dir }} # Install the final python modules into {{ edxapp_venv_dir }}
- name : edxapp | install python post-post requirements - name : install python post-post requirements
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some # Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
shell: cd {{ edxapp_code_dir }} && {{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }} shell: >
{{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }}
chdir={{ edxapp_code_dir }}
with_items: with_items:
- "{{ repo_requirements_file }}" - "{{ repo_requirements_file }}"
- "{{ github_requirements_file }}" - "{{ github_requirements_file }}"
- "{{ local_requirements_file }}" - "{{ local_requirements_file }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: deploy
# Install the sandbox python modules into {{ edxapp_venv_dir }} # Install the sandbox python modules into {{ edxapp_venv_dir }}
- name : edxapp | install sandbox requirements into regular venv - name : install sandbox requirements into regular venv
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some # Need to use shell rather than pip so that we can maintain the context of our current working directory; some
# requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly # requirements are pathed relative to the edx-platform repo. Using the pip from inside the virtual environment implicitly
# installs everything into that virtual environment. # installs everything into that virtual environment.
shell: cd {{ edxapp_code_dir }} && {{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }} shell: >
{{ edxapp_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }}
chdir={{ edxapp_code_dir }}
with_items: with_items:
- "{{ sandbox_base_requirements }}" - "{{ sandbox_base_requirements }}"
- "{{ sandbox_local_requirements }}" - "{{ sandbox_local_requirements }}"
- "{{ sandbox_post_requirements }}" - "{{ sandbox_post_requirements }}"
when: install_sandbox_reqs_into_regular_venv
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
when: not inst.stat.exists or new.stat.md5 != inst.stat.md5 when: "not EDXAPP_PYTHON_SANDBOX and (not inst.stat.exists or new.stat.md5 != inst.stat.md5)"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# The next few tasks set up the python code sandbox
# need to disable this profile, otherwise the pip inside the sandbox venv has no permissions
# to install anything
- name: code sandbox | put sandbox apparmor profile in complain mode
command: /usr/sbin/aa-complain /etc/apparmor.d/code.sandbox
when: EDXAPP_PYTHON_SANDBOX
tags:
- edxapp-sandbox
- name: code sandbox | Install base sandbox requirements and create sandbox virtualenv
pip: >
requirements="{{sandbox_base_requirements}}"
virtualenv="{{edxapp_sandbox_venv_dir}}"
state=present
extra_args="-i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors"
sudo_user: "{{ edxapp_sandbox_user }}"
when: EDXAPP_PYTHON_SANDBOX
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: deploy tags:
- edxapp-sandbox
- name: edxapp | compiling all py files in the edx-platform repo - name: code sandbox | Install sandbox requirements into sandbox venv
shell: >
{{ edxapp_sandbox_venv_dir }}/bin/pip install -i {{ edxapp_pypi_local_mirror }} --exists-action w --use-mirrors -r {{ item }}
chdir={{ edxapp_code_dir }}
with_items:
- "{{ sandbox_local_requirements }}"
- "{{ sandbox_post_requirements }}"
sudo_user: "{{ edxapp_sandbox_user }}"
when: EDXAPP_PYTHON_SANDBOX
register: sandbox_install_output
changed_when: "'installed' in sandbox_install_output"
notify:
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
- name: code sandbox | put code sandbox into aa-enforce or aa-complain mode, depending on EDXAPP_SANDBOX_ENFORCE
command: /usr/sbin/{{ edxapp_aa_command }} /etc/apparmor.d/code.sandbox
when: EDXAPP_PYTHON_SANDBOX
tags:
- edxapp-sandbox
- name: compiling all py files in the edx-platform repo
shell: "{{ edxapp_venv_bin }}/python -m compileall {{ edxapp_code_dir }}" shell: "{{ edxapp_venv_bin }}/python -m compileall {{ edxapp_code_dir }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: deploy
# alternative would be to give {{ common_web_user }} read access # alternative would be to give {{ common_web_user }} read access
# to the virtualenv but that permission change will require # to the virtualenv but that permission change will require
# root access. # root access.
- name: edxapp | give other read permissions to the virtualenv - name: give other read permissions to the virtualenv
command: chmod -R o+r "{{ edxapp_venv_dir }}" command: chmod -R o+r "{{ edxapp_venv_dir }}"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags: deploy
- name: edxapp | create checksum for installed requirements - name: create checksum for installed requirements
shell: cp /var/tmp/edxapp.req.new /var/tmp/edxapp.req.installed shell: cp /var/tmp/edxapp.req.new /var/tmp/edxapp.req.installed
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: "edxapp | restart edxapp" notify: "restart edxapp"
tags: deploy
# https://code.launchpad.net/~wligtenberg/django-openid-auth/mysql_fix/+merge/22726 # https://code.launchpad.net/~wligtenberg/django-openid-auth/mysql_fix/+merge/22726
# This is necessary for when syncdb is run and the django_openid_auth module is installed, # This is necessary for when syncdb is run and the django_openid_auth module is installed,
# not sure if this fix will ever get merged # not sure if this fix will ever get merged
- name: edxapp | openid workaround - name: openid workaround
shell: sed -i -e 's/claimed_id = models.TextField(max_length=2047, unique=True/claimed_id = models.TextField(max_length=2047/' {{ edxapp_venv_dir }}/lib/python2.7/site-packages/django_openid_auth/models.py shell: sed -i -e 's/claimed_id = models.TextField(max_length=2047, unique=True/claimed_id = models.TextField(max_length=2047/' {{ edxapp_venv_dir }}/lib/python2.7/site-packages/django_openid_auth/models.py
when: openid_workaround is defined when: openid_workaround is defined
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags:
- deploy
# creates the supervisor jobs for the # creates the supervisor jobs for the
# service variants configured, runs # service variants configured, runs
...@@ -231,15 +269,14 @@ ...@@ -231,15 +269,14 @@
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
- name: edxapp | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
when: not devstack when: not devstack
tags: deploy
- name: edxapp | ensure edxapp has started - name: ensure edxapp has started
supervisorctl_local: > supervisorctl_local: >
state=started state=started
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -248,9 +285,8 @@ ...@@ -248,9 +285,8 @@
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
when: celery_worker is not defined and not devstack when: celery_worker is not defined and not devstack
with_items: service_variants_enabled with_items: service_variants_enabled
tags: deploy
- name: edxapp | ensure edxapp_workers has started - name: ensure edxapp_workers has started
supervisorctl_local: > supervisorctl_local: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}" name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -259,4 +295,15 @@ ...@@ -259,4 +295,15 @@
when: celery_worker is defined and not devstack when: celery_worker is defined and not devstack
with_items: edxapp_workers with_items: edxapp_workers
sudo_user: "{{ supervisor_service_user }}" sudo_user: "{{ supervisor_service_user }}"
tags: deploy
- name: create a symlink for venv python
file: >
src="{{ edxapp_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.edxapp
state=link
with_items:
- python
- pip
- set_fact: edxapp_installed=true
...@@ -4,27 +4,27 @@ ...@@ -4,27 +4,27 @@
--- ---
- name: edxapp | Install logrotate configuration for tracking file - name: Install logrotate configuration for tracking file
template: dest=/etc/logrotate.d/tracking.log src=edx_logrotate_tracking_log.j2 owner=root group=root mode=644 template: dest=/etc/logrotate.d/tracking.log src=edx_logrotate_tracking_log.j2 owner=root group=root mode=644
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | create application user - name: create application user
user: > user: >
name="{{ edxapp_user }}" home="{{ edxapp_app_dir }}" name="{{ edxapp_user }}" home="{{ edxapp_app_dir }}"
createhome=no shell=/bin/false createhome=no shell=/bin/false
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | create edxapp user dirs - name: create edxapp user dirs
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ edxapp_user }}" group="{{ common_web_group }}" owner="{{ edxapp_user }}" group="{{ common_web_group }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
with_items: with_items:
- "{{ edxapp_app_dir }}" - "{{ edxapp_app_dir }}"
- "{{ edxapp_data_dir }}" - "{{ edxapp_data_dir }}"
...@@ -32,50 +32,44 @@ ...@@ -32,50 +32,44 @@
- "{{ edxapp_theme_dir }}" - "{{ edxapp_theme_dir }}"
- "{{ edxapp_staticfile_dir }}" - "{{ edxapp_staticfile_dir }}"
- name: edxapp | create edxapp log dir - name: create edxapp log dir
file: > file: >
path="{{ edxapp_log_dir }}" state=directory path="{{ edxapp_log_dir }}" state=directory
owner="{{ common_log_user }}" group="{{ common_log_user }}" owner="{{ common_log_user }}" group="{{ common_log_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | create web-writable edxapp data dirs - name: create web-writable edxapp data dirs
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ common_web_user }}" group="{{ edxapp_user }}" owner="{{ common_web_user }}" group="{{ edxapp_user }}"
mode="0775" mode="0775"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
with_items: with_items:
- "{{ edxapp_course_data_dir }}" - "{{ edxapp_course_data_dir }}"
- "{{ edxapp_upload_dir }}" - "{{ edxapp_upload_dir }}"
- name: edxapp | install system packages on which LMS and CMS rely - name: install system packages on which LMS and CMS rely
apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
- name: edxapp | create log directories for service variants - name: create log directories for service variants
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
file: > file: >
path={{ edxapp_log_dir }}/{{ item }} state=directory path={{ edxapp_log_dir }}/{{ item }} state=directory
owner={{ common_log_user }} group={{ common_log_user }} owner={{ common_log_user }} group={{ common_log_user }}
mode=0750 mode=0750
with_items: service_variants_enabled with_items: service_variants_enabled
- include: deploy.yml # Set up the python sandbox execution environment
- include: python_sandbox_env.yml
- name: edxapp | create a symlink for venv python when: EDXAPP_PYTHON_SANDBOX
file: >
src="{{ edxapp_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.edxapp
state=link
with_items:
- python
- pip
- include: deploy.yml tags=deploy
- name: code sandbox | Create edxapp sandbox user
user: name={{ edxapp_sandbox_user }} shell=/bin/false home={{ edxapp_sandbox_venv_dir }}
notify:
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
- name: code sandbox | Install apparmor utils system pkg
apt: pkg=apparmor-utils state=present
notify:
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
- name: code sandbox | write out apparmor code sandbox config
template: src=code.sandbox.j2 dest=/etc/apparmor.d/code.sandbox mode=0644 owner=root group=root
notify:
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
- name: code sandbox | write out sandbox user sudoers config
template: src=95-sandbox-sudoer.j2 dest=/etc/sudoers.d/95-{{ edxapp_sandbox_user }} mode=0440 owner=root group=root validate='visudo -c -f %s'
notify:
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
# we boostrap and enable the apparmor service here. in deploy.yml we disable, deploy, then re-enable
# so we need to enable it in main.yml
- name: code sandbox | start apparmor service
service: name=apparmor state=started
notify:
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
- name: code sandbox | (bootstrap) load code sandbox profile
command: apparmor_parser -r /etc/apparmor.d/code.sandbox
notify:
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
- name: code sandbox | (bootstrap) put code sandbox into aa-enforce or aa-complain mode depending on EDXAPP_SANDBOX_ENFORCE
command: /usr/sbin/{{ edxapp_aa_command }} /etc/apparmor.d/code.sandbox
notify:
- "restart edxapp"
- "restart edxapp_workers"
tags:
- edxapp-sandbox
...@@ -5,10 +5,8 @@ ...@@ -5,10 +5,8 @@
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
with_items: service_variants_enabled with_items: service_variants_enabled
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags:
- deploy
- name: "create {{ item }} auth file" - name: "create {{ item }} auth file"
template: > template: >
...@@ -16,11 +14,9 @@ ...@@ -16,11 +14,9 @@
dest={{ edxapp_app_dir }}/{{ item }}.auth.json dest={{ edxapp_app_dir }}/{{ item }}.auth.json
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
with_items: service_variants_enabled with_items: service_variants_enabled
tags:
- deploy
# write the supervisor scripts for the service variants # write the supervisor scripts for the service variants
...@@ -31,33 +27,27 @@ ...@@ -31,33 +27,27 @@
with_items: service_variants_enabled with_items: service_variants_enabled
when: celery_worker is not defined and not devstack when: celery_worker is not defined and not devstack
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
tags:
- deploy
- name: edxapp | writing edxapp supervisor script - name: writing edxapp supervisor script
template: > template: >
src=edxapp.conf.j2 dest={{ supervisor_cfg_dir }}/edxapp.conf src=edxapp.conf.j2 dest={{ supervisor_cfg_dir }}/edxapp.conf
owner={{ supervisor_user }} owner={{ supervisor_user }}
when: celery_worker is not defined and not devstack when: celery_worker is not defined and not devstack
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
tags:
- deploy
# write the supervisor script for celery workers # write the supervisor script for celery workers
- name: edxapp | writing celery worker supervisor script - name: writing celery worker supervisor script
template: > template: >
src=workers.conf.j2 dest={{ supervisor_cfg_dir }}/workers.conf src=workers.conf.j2 dest={{ supervisor_cfg_dir }}/workers.conf
owner={{ supervisor_user }} owner={{ supervisor_user }}
when: celery_worker is defined and not devstack when: celery_worker is defined and not devstack
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
tags:
- deploy
# Gather assets using rake if possible # Gather assets using rake if possible
- name: edxapp | gather {{ item }} static assets with rake - name: gather {{ item }} static assets with rake
shell: > shell: >
SERVICE_VARIANT={{ item }} rake {{ item }}:gather_assets:aws SERVICE_VARIANT={{ item }} rake {{ item }}:gather_assets:aws
executable=/bin/bash executable=/bin/bash
...@@ -66,29 +56,23 @@ ...@@ -66,29 +56,23 @@
when: celery_worker is not defined and not devstack and item != "lms-preview" when: celery_worker is not defined and not devstack and item != "lms-preview"
with_items: service_variants_enabled with_items: service_variants_enabled
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
environment: "{{ edxapp_environment }}" environment: "{{ edxapp_environment }}"
tags:
- deploy
- name: edxapp | syncdb and migrate - name: syncdb and migrate
shell: SERVICE_VARIANT=lms {{ edxapp_venv_bin}}/django-admin.py syncdb --migrate --noinput --settings=lms.envs.aws --pythonpath={{ edxapp_code_dir }} shell: SERVICE_VARIANT=lms {{ edxapp_venv_bin}}/django-admin.py syncdb --migrate --noinput --settings=lms.envs.aws --pythonpath={{ edxapp_code_dir }}
when: migrate_db is defined and migrate_db|lower == "yes" when: migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
tags:
- deploy
- name: edxapp | db migrate - name: db migrate
shell: SERVICE_VARIANT=lms {{ edxapp_venv_bin }}/django-admin.py migrate --noinput --settings=lms.envs.aws --pythonpath={{ edxapp_code_dir }} shell: SERVICE_VARIANT=lms {{ edxapp_venv_bin }}/django-admin.py migrate --noinput --settings=lms.envs.aws --pythonpath={{ edxapp_code_dir }}
when: migrate_only is defined and migrate_only|lower == "yes" when: migrate_only is defined and migrate_only|lower == "yes"
notify: notify:
- "edxapp | restart edxapp" - "restart edxapp"
- "edxapp | restart edxapp_workers" - "restart edxapp_workers"
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
tags:
- deploy
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
#include <tunables/global>
{{ edxapp_sandbox_venv_dir }}/bin/python flags=(complain) {
#include <abstractions/base>
{{ edxapp_sandbox_venv_dir }}/** mr,
{{ edxapp_code_dir }}/common/lib/sandbox-packages/** r,
/tmp/codejail-*/ rix,
/tmp/codejail-*/** rix,
#
# Whitelist particiclar shared objects from the system
# python installation
#
/usr/lib/python2.7/lib-dynload/_json.so mr,
/usr/lib/python2.7/lib-dynload/_ctypes.so mr,
/usr/lib/python2.7/lib-dynload/_heapq.so mr,
/usr/lib/python2.7/lib-dynload/_io.so mr,
/usr/lib/python2.7/lib-dynload/_csv.so mr,
/usr/lib/python2.7/lib-dynload/datetime.so mr,
/usr/lib/python2.7/lib-dynload/_elementtree.so mr,
#
# Allow access to selections from /proc
#
/proc/*/mounts r,
}
automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py migrate *
automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py seed_permissions_roles *
automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py set_staff *
automator ALL=({{ common_web_user }}) NOPASSWD:SETENV:{{ edxapp_venv_dir }}/bin/django-admin.py transfer_students *
...@@ -7,7 +7,7 @@ directory={{ edxapp_code_dir }} ...@@ -7,7 +7,7 @@ directory={{ edxapp_code_dir }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
command={{ edxapp_venv_bin}}/python {{ edxapp_code_dir }}/manage.py {{ w.service_variant }} --settings=aws celery worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.`hostname` --concurrency={{ w.concurrency }} command={{ edxapp_venv_bin}}/python {{ edxapp_code_dir }}/manage.py {{ w.service_variant }} --settings=aws celery worker --loglevel=info --queues=edx.{{ w.service_variant }}.core.{{ w.queue }} --hostname=edx.{{ w.service_variant }}.core.{{ w.queue }}.{{ ansible_hostname }} --concurrency={{ w.concurrency }}
killasgroup=true killasgroup=true
stopasgroup=true stopasgroup=true
......
...@@ -10,33 +10,33 @@ ...@@ -10,33 +10,33 @@
# http://downloads.mysql.com/archives/mysql-5.1/mysql-5.1.62.tar.gz # http://downloads.mysql.com/archives/mysql-5.1/mysql-5.1.62.tar.gz
# #
--- ---
- name: edxlocal| install packages needed for single server - name: install packages needed for single server
apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present
- name: edxlocal | create a database for edxapp - name: create a database for edxapp
mysql_db: > mysql_db: >
db=edxapp db=edxapp
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | create a database for xqueue - name: create a database for xqueue
mysql_db: > mysql_db: >
db=xqueue db=xqueue
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | create a database for ora - name: create a database for ora
mysql_db: > mysql_db: >
db=ora db=ora
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | create a database for discern - name: create a database for discern
mysql_db: > mysql_db: >
db=discern db=discern
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | install memcached - name: install memcached
apt: pkg=memcached state=present apt: pkg=memcached state=present
...@@ -14,13 +14,13 @@ ...@@ -14,13 +14,13 @@
# - oraclejdk # - oraclejdk
# - elasticsearch # - elasticsearch
- name: elasticsearch | download elasticsearch - name: download elasticsearch
get_url: > get_url: >
url={{ elasticsearch_url }} url={{ elasticsearch_url }}
dest=/var/tmp/{{ elasticsearch_file }} dest=/var/tmp/{{ elasticsearch_file }}
force=no force=no
- name: elasticsearch | install elasticsearch from local package - name: install elasticsearch from local package
shell: > shell: >
dpkg -i /var/tmp/elasticsearch-{{ elasticsearch_version }}.deb dpkg -i /var/tmp/elasticsearch-{{ elasticsearch_version }}.deb
executable=/bin/bash executable=/bin/bash
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
- elasticsearch - elasticsearch
- install - install
- name: elasticsearch | Ensure elasticsearch is enabled and started - name: Ensure elasticsearch is enabled and started
service: name=elasticsearch state=started enabled=yes service: name=elasticsearch state=started enabled=yes
tags: tags:
- elasticsearch - elasticsearch
......
...@@ -13,35 +13,41 @@ forum_path: "{{ forum_code_dir }}/bin:{{ forum_rbenv_bin }}:{{ forum_rbenv_shims ...@@ -13,35 +13,41 @@ forum_path: "{{ forum_code_dir }}/bin:{{ forum_rbenv_bin }}:{{ forum_rbenv_shims
FORUM_MONGO_USER: "cs_comments_service" FORUM_MONGO_USER: "cs_comments_service"
FORUM_MONGO_PASSWORD: "password" FORUM_MONGO_PASSWORD: "password"
FORUM_MONGO_HOST: "localhost" FORUM_MONGO_HOSTS:
- "localhost"
FORUM_MONGO_TAGS: !!null
FORUM_MONGO_PORT: "27017" FORUM_MONGO_PORT: "27017"
FORUM_MONGO_DATABASE: "cs_comments_service"
FORUM_MONGO_URL: "mongodb://{{ FORUM_MONGO_USER }}:{{ FORUM_MONGO_PASSWORD }}@{%- for host in FORUM_MONGO_HOSTS -%}{{host}}:{{ FORUM_MONGO_PORT }}{%- if not loop.last -%},{%- endif -%}{%- endfor -%}/{{ FORUM_MONGO_DATABASE }}{%- if FORUM_MONGO_TAGS -%}?{{ FORUM_MONGO_TAGS }}{%- endif -%}"
FORUM_SINATRA_ENV: "development" FORUM_SINATRA_ENV: "development"
FORUM_RACK_ENV: "development"
FORUM_NGINX_PORT: "18080"
FORUM_API_KEY: "password"
FORUM_ELASTICSEARCH_HOST: "localhost"
FORUM_ELASTICSEARCH_PORT: "9200"
FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTICSEARCH_PORT }}"
FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key"
FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app"
forum_environment: forum_environment:
RBENV_ROOT: "{{ forum_rbenv_root }}" RBENV_ROOT: "{{ forum_rbenv_root }}"
GEM_HOME: "{{ forum_gem_root }}" GEM_HOME: "{{ forum_gem_root }}"
GEM_PATH: "{{ forum_gem_root }}" GEM_PATH: "{{ forum_gem_root }}"
PATH: "{{ forum_path }}" PATH: "{{ forum_path }}"
MONGOHQ_USER: "{{ FORUM_MONGO_USER }}" RACK_ENV: "{{ FORUM_RACK_ENV }}"
MONGOHQ_PASS: "{{ FORUM_MONGO_PASSWORD }}"
RACK_ENV: "{{ forum_rack_env }}"
SINATRA_ENV: "{{ FORUM_SINATRA_ENV }}" SINATRA_ENV: "{{ FORUM_SINATRA_ENV }}"
API_KEY: "{{ forum_api_key }}" API_KEY: "{{ FORUM_API_KEY }}"
SEARCH_SERVER: "{{ forum_elasticsearch_url }}" SEARCH_SERVER: "{{ FORUM_ELASTICSEARCH_URL }}"
MONGOHQ_URL: "{{ forum_mongo_url }}" MONGOHQ_URL: "{{ FORUM_MONGO_URL }}"
HOME: "{{ forum_app_dir }}" HOME: "{{ forum_app_dir }}"
NEW_RELIC_APP_NAME: "{{ FORUM_NEW_RELIC_APP_NAME }}"
NEW_RELIC_LICENSE_KEY: " {{ FORUM_NEW_RELIC_LICENSE_KEY }}"
forum_user: "forum" forum_user: "forum"
forum_ruby_version: "1.9.3-p448" forum_ruby_version: "1.9.3-p448"
forum_source_repo: "https://github.com/edx/cs_comments_service.git" forum_source_repo: "https://github.com/edx/cs_comments_service.git"
forum_version: "HEAD" forum_version: "HEAD"
forum_mongo_database: "cs_comments_service" forum_unicorn_port: "4567"
forum_mongo_url: "mongodb://{{ FORUM_MONGO_USER }}:{{ FORUM_MONGO_PASSWORD }}@{{ FORUM_MONGO_HOST }}:{{ FORUM_MONGO_PORT }}/{{ forum_mongo_database }}"
forum_rack_env: "development"
forum_api_key: "password"
forum_elasticsearch_host: "localhost"
forum_elasticsearch_port: "9200"
forum_elasticsearch_url: "http://{{ forum_elasticsearch_host }}:{{ forum_elasticsearch_port }}"
# #
# test config # test config
...@@ -49,11 +55,9 @@ forum_elasticsearch_url: "http://{{ forum_elasticsearch_host }}:{{ forum_elastic ...@@ -49,11 +55,9 @@ forum_elasticsearch_url: "http://{{ forum_elasticsearch_host }}:{{ forum_elastic
# #
# The following services should be listening on the associated # The following services should be listening on the associated
# ports when the role has been successfully created. # ports when the role has been successfully created. Note that
# connectivity to Mongo is also tested, but separately.
# #
forum_services: forum_services:
- {service: "sinatra", host: "localhost", port: "4567"} - {service: "sinatra", host: "localhost", port: "{{ forum_unicorn_port }}"}
- {service: "mongo", host: "{{ FORUM_MONGO_HOST }}", port: "27017"} - {service: "elasticsearch", host: "{{ FORUM_ELASTICSEARCH_HOST }}", port: "{{ FORUM_ELASTICSEARCH_PORT }}"}
- {service: "mongo", host: "{{ FORUM_MONGO_HOST }}", port: "28017"} \ No newline at end of file
- {service: "elasticsearch", host: "{{ forum_elasticsearch_host }}", port: "9200"}
- {service: "elasticsearch", host: "{{ forum_elasticsearch_host }}", port: "9300"}
--- ---
- name: forum | restart the forum service - name: restart the forum service
supervisorctl_local: > supervisorctl_local: >
name=forum name=forum
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=restarted state=restarted
when: not devstack when: forum_installed is defined and not devstack
tags: deploy
--- ---
- name: forum | create the supervisor config - name: create the supervisor config
template: > template: >
src=forum.conf.j2 dest={{ supervisor_cfg_dir }}/forum.conf src=forum.conf.j2 dest={{ supervisor_cfg_dir }}/forum.conf
owner={{ supervisor_user }} owner={{ supervisor_user }}
...@@ -8,51 +8,49 @@ ...@@ -8,51 +8,49 @@
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
when: not devstack when: not devstack
register: forum_supervisor register: forum_supervisor
tags: deploy
- name: forum | create the supervisor wrapper - name: create the supervisor wrapper
template: > template: >
src={{ forum_supervisor_wrapper|basename }}.j2 src={{ forum_supervisor_wrapper|basename }}.j2
dest={{ forum_supervisor_wrapper }} dest={{ forum_supervisor_wrapper }}
mode=0755 mode=0755
sudo_user: "{{ forum_user }}" sudo_user: "{{ forum_user }}"
when: not devstack when: not devstack
notify: forum | restart the forum service notify: restart the forum service
tags: deploy
- name: forum | git checkout forum repo into {{ forum_code_dir }} - name: git checkout forum repo into {{ forum_code_dir }}
git: dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }} git: dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }}
sudo_user: "{{ forum_user }}" sudo_user: "{{ forum_user }}"
notify: forum | restart the forum service notify: restart the forum service
tags: deploy
# TODO: This is done as the common_web_user # TODO: This is done as the common_web_user
# since the process owner needs write access # since the process owner needs write access
# to the rbenv # to the rbenv
- name: forum | install comments service bundle - name: install comments service bundle
shell: bundle install chdir={{ forum_code_dir }} shell: bundle install chdir={{ forum_code_dir }}
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
environment: "{{ forum_environment }}" environment: "{{ forum_environment }}"
notify: forum | restart the forum service notify: restart the forum service
tags: deploy
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
# the supervisorctl config and restarts # the supervisorctl config and restarts
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: forum | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
when: not devstack when: not devstack
tags: deploy
- name: forum | ensure forum is started - name: ensure forum is started
supervisorctl_local: > supervisorctl_local: >
name=forum name=forum
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=started state=started
when: not devstack when: not devstack
tags: deploy
- include: test.yml tags=deploy
- set_fact: forum_installed=true
...@@ -21,27 +21,26 @@ ...@@ -21,27 +21,26 @@
# rbenv_ruby_version: "{{ forum_ruby_version }}" # rbenv_ruby_version: "{{ forum_ruby_version }}"
# - forum # - forum
- name: forum | create application user - name: create application user
user: > user: >
name="{{ forum_user }}" home="{{ forum_app_dir }}" name="{{ forum_user }}" home="{{ forum_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
notify: forum | restart the forum service notify: restart the forum service
- name: forum | create forum app dir - name: create forum app dir
file: > file: >
path="{{ forum_app_dir }}" state=directory path="{{ forum_app_dir }}" state=directory
owner="{{ forum_user }}" group="{{ common_web_group }}" owner="{{ forum_user }}" group="{{ common_web_group }}"
notify: forum | restart the forum service notify: restart the forum service
- name: forum | setup the forum env - name: setup the forum env
template: > template: >
src=forum_env.j2 dest={{ forum_app_dir }}/forum_env src=forum_env.j2 dest={{ forum_app_dir }}/forum_env
owner={{ forum_user }} group={{ common_web_user }} owner={{ forum_user }} group={{ common_web_user }}
mode=0644 mode=0644
notify: notify:
- forum | restart the forum service - restart the forum service
- include: deploy.yml - include: deploy.yml tags=deploy
- include: test.yml
--- ---
- name: forum | test that the required service are listening - name: test that the required service are listening
wait_for: port={{ item.port }} host={{ item.host }} timeout=30 wait_for: port={{ item.port }} host={{ item.host }} timeout=30
with_items: "{{ forum_services }}" with_items: forum_services
when: not devstack
- name: test that mongo replica set members are listing
wait_for: port={{ FORUM_MONGO_PORT }} host={{ item }} timeout=30
with_items: FORUM_MONGO_HOSTS
when: not devstack when: not devstack
tags:
- forum
- test
# {{ ansible_managed }} # {{ ansible_managed }}
{% for name,value in forum_environment.items() %}
{%- if value %} {% for name,value in forum_environment.items() -%}
{%- if value -%}
export {{ name }}="{{ value }}" export {{ name }}="{{ value }}"
{%- endif %} {% endif %}
{% endfor %} {%- endfor %}
eval "$(rbenv init -)" eval "$(rbenv init -)"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role gerrit
#
gerrit_app_dir: "{{ COMMON_APP_DIR }}/gerrit"
gerrit_data_dir: "{{ COMMON_DATA_DIR }}/gerrit"
gerrit_debian_pkgs:
- python-mysqldb
- python-boto
gerrit_release: 2.8.1
gerrit_user: gerrit2
gerrit_db_name: reviewdb
gerrit_http_port: 8080
gerrit_sshd_port: 29418
gerrit_jre_path: /usr/lib/jvm/java-7-oracle/jre
gerrit_java_exe_path: "{{ gerrit_jre_path }}/bin/java"
gerrit_repo_volume_os_device: /dev/xvdf
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role gerrit
- name: gerrit restarted
service: name=gerrit state=restarted
- name: nginx restarted
service: name=nginx state=restarted
---
dependencies:
- role: oraclejdk
- role: nginx
nginx_sites: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role gerrit
#
# Overview: Installs and configures Gerrit on the server. Requires
# several secure variables to be defined that are not defined in this
# role.
#
#
# Dependencies:
# - An existing running database.
# - An S3 bucket containing all of the necessary plugin jars.
# - In addition to the variables defined in defaults/main.yml, the following variables must be defined:
#
# gerrit_github_client_id: alskdjdfkjasdjfsdlfkj
# gerrit_github_client_secret: 0938908450deffaaa87665a555a6fc6de5777f77f
# gerrit_db_hostname: somedb.88374jhyehf.us-east-1.rds.amazonaws.com
# gerrit_db_admin_username: adminuser
# gerrit_db_admin_password: adminpassword
# gerrit_db_password: gerrituserpassword
# gerrit_artifact_s3_bucket:
# name: some-s3-bucket
# aws_access_key_id: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}"
# aws_secret_access_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}"
# gerrit_hostname: "gerrit.example.com"
# gerrit_smtp_enabled: false
# gerrit_email: gerrit@example.com
# gerrit_smtp_server: smtp.example.com
# gerrit_smtp_encryption: none
# gerrit_smtp_user: someuser
# gerrit_smtp_pass: somepassword
#
#
# Example play:
#
# - name: Deploy gerrit
# hosts: gerrit
# gather_facts: True
# sudo: True
# roles:
# - gerrit
- name: system package pre-requisites installed
apt: pkg={{ item }}
with_items: gerrit_debian_pkgs
- name: user
user: name={{ gerrit_user }} system=yes home={{ gerrit_app_dir }} createhome=no
- name: directories created
file: path={{ item }} mode=700 owner={{ gerrit_user }} state=directory
with_items:
- "{{ gerrit_app_dir }}"
- "{{ gerrit_app_dir }}/etc"
- "{{ gerrit_data_dir }}"
- name: repository volume fs exists
shell: file -s {{ gerrit_repo_volume_os_device }} | grep ext4
ignore_errors: yes
register: is_formatted
- name: repository volume formatted
command: mkfs -t ext4 {{ gerrit_repo_volume_os_device }}
when: is_formatted | failed
- name: fstab includes repository volume
lineinfile: >
dest=/etc/fstab
regexp="^{{ gerrit_repo_volume_os_device }}\s"
line="{{ gerrit_repo_volume_os_device }} {{ gerrit_data_dir }} ext4 defaults 0 2"
# output will look roughly like:
# /dev/foo on /some/mount/point type ext4 (options)
- name: repository volume is mounted
shell: >
mount -l | grep '^{{ gerrit_repo_volume_os_device }} '
ignore_errors: yes
register: is_mounted
- name: repository volume mounted
command: mount {{ gerrit_repo_volume_os_device }}
when: is_mounted | failed
- name: war file downloaded
s3: >
bucket={{ gerrit_artifact_s3_bucket.name }}
object=gerrit-{{ gerrit_release }}.war
dest=/tmp/gerrit.war
mode=get
aws_access_key="{{ gerrit_artifact_s3_bucket.aws_access_key_id }}"
aws_secret_key="{{ gerrit_artifact_s3_bucket.aws_secret_access_key }}"
sudo_user: "{{ gerrit_user }}"
- name: database created
mysql_db: >
name={{ gerrit_db_name }}
encoding=utf8
login_host={{ gerrit_db_hostname }} login_user={{ gerrit_db_admin_username }} login_password={{ gerrit_db_admin_password }}
register: db_created
- name: database user created
mysql_user: >
name={{ gerrit_user }}
password={{ gerrit_db_password }}
host='%'
priv="{{ gerrit_db_name }}.*:ALL"
login_host={{ gerrit_db_hostname }} login_user={{ gerrit_db_admin_username }} login_password={{ gerrit_db_admin_password }}
- name: configuration uploaded
template: src=gerrit.config.j2 dest={{ gerrit_app_dir }}/etc/gerrit.config mode=600
sudo_user: "{{ gerrit_user }}"
notify: gerrit restarted
- name: initialized
command: >
{{ gerrit_java_exe_path }} -jar /tmp/gerrit.war init -d {{ gerrit_app_dir }} --batch --no-auto-start
creates={{ gerrit_app_dir }}/bin
sudo_user: "{{ gerrit_user }}"
notify: gerrit restarted
- name: artifacts installed from s3
s3: >
bucket={{ gerrit_artifact_s3_bucket.name }}
object={{ item.jar }}
dest={{ item.dest }}/{{ item.jar }}
mode=get
aws_access_key="{{ gerrit_artifact_s3_bucket.aws_access_key_id }}"
aws_secret_key="{{ gerrit_artifact_s3_bucket.aws_secret_access_key }}"
sudo_user: "{{ gerrit_user }}"
notify: gerrit restarted
with_items:
- { jar: "github-oauth-{{ gerrit_release }}.jar", dest: "{{ gerrit_app_dir }}/lib" }
- { jar: "github-plugin-{{ gerrit_release }}.jar", dest: "{{ gerrit_app_dir }}/plugins" }
- { jar: "singleusergroup-{{ gerrit_release }}.jar", dest: "{{ gerrit_app_dir }}/plugins" }
- name: plugins installed from war
shell: unzip -p /tmp/gerrit.war WEB-INF/plugins/replication.jar > {{ gerrit_app_dir }}/plugins/replication.jar creates={{ gerrit_app_dir }}/plugins/replication.jar
sudo_user: "{{ gerrit_user }}"
notify: gerrit restarted
- name: setup ngnix vhost
template: >
src=nginx-gerrit.j2
dest={{ nginx_sites_available_dir }}/gerrit
- name: enable gerrit vhost
file: >
src={{ nginx_sites_available_dir }}/gerrit
dest={{ nginx_sites_enabled_dir }}/gerrit
state=link
notify: nginx restarted
- name: init script configured
template: src=gerritcodereview.j2 dest=/etc/default/gerritcodereview mode=644
- name: init script installed
file: src={{ gerrit_app_dir }}/bin/gerrit.sh dest=/etc/init.d/gerrit state=link
- name: starts on boot
service: name=gerrit enabled=yes
# {{ ansible_managed }}
[gerrit]
basePath = {{ gerrit_data_dir }}
canonicalWebUrl = http://{{ gerrit_hostname }}/
changeScreen = CHANGE_SCREEN2
[database]
type = MYSQL
hostname = {{ gerrit_db_hostname }}
database = {{ gerrit_db_name }}
username = {{ gerrit_user }}
password = {{ gerrit_db_password }}
[auth]
type = HTTP
httpHeader = GITHUB_USER
loginUrl = /login
logoutUrl = /oauth/reset
[sendemail]
enable = {{ gerrit_smtp_enabled }}
smtpServer = {{ gerrit_smtp_server }}
smtpEncryption = {{ gerrit_smtp_encryption }}
smtpUser = {{ gerrit_smtp_user }}
smtpPass = {{ gerrit_smtp_pass }}
[container]
user = {{ gerrit_user }}
javaHome = {{ gerrit_jre_path }}
[sshd]
listenAddress = *:{{ gerrit_sshd_port }}
[httpd]
listenUrl = http://*:{{ gerrit_http_port }}/
filterClass = com.googlesource.gerrit.plugins.github.oauth.OAuthFilter
[cache]
directory = cache
[github]
url = https://github.com
clientId = {{ gerrit_github_client_id }}
clientSecret = {{ gerrit_github_client_secret }}
[user]
email = {{ gerrit_email }}
anonymousCoward = Anonymous User
[suggest]
accounts = true
export GERRIT_SITE={{ gerrit_app_dir }}
server {
listen 80;
server_name {{ gerrit_hostname }};
location / {
proxy_pass http://localhost:{{ gerrit_http_port }};
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
\ No newline at end of file
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
# Dependencies: # Dependencies:
# - common # - common
# - nginx # - nginx
# #
# Example play: # Example play:
# roles: # roles:
# - common # - common
...@@ -28,48 +28,48 @@ ...@@ -28,48 +28,48 @@
--- ---
- name: gh_mirror | install pip packages - name: install pip packages
pip: name={{ item }} state=present pip: name={{ item }} state=present
with_items: gh_mirror_pip_pkgs with_items: gh_mirror_pip_pkgs
- name: gh_mirror | install debian packages - name: install debian packages
apt: > apt: >
pkg={{ ",".join(gh_mirror_debian_pkgs) }} pkg={{ ",".join(gh_mirror_debian_pkgs) }}
state=present state=present
update_cache=yes update_cache=yes
- name: gh_mirror | create gh_mirror user - name: create gh_mirror user
user: > user: >
name={{ gh_mirror_user }} name={{ gh_mirror_user }}
state=present state=present
- name: gh_mirror | create the gh_mirror data directory - name: create the gh_mirror data directory
file: > file: >
path={{ gh_mirror_data_dir }} path={{ gh_mirror_data_dir }}
state=directory state=directory
owner={{ gh_mirror_user }} owner={{ gh_mirror_user }}
group={{ gh_mirror_group }} group={{ gh_mirror_group }}
- name: gh_mirror | create the gh_mirror app directory - name: create the gh_mirror app directory
file: > file: >
path={{ gh_mirror_app_dir }} path={{ gh_mirror_app_dir }}
state=directory state=directory
- name: gh_mirror | create org config - name: create org config
template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml template: src=orgs.yml.j2 dest={{ gh_mirror_app_dir }}/orgs.yml
- name: copying sync scripts - name: copying sync scripts
copy: src={{ item }} dest={{ gh_mirror_app_dir }}/{{ item }} copy: src={{ item }} dest={{ gh_mirror_app_dir }}/{{ item }}
with_items: "{{ gh_mirror_app_files }}" with_items: gh_mirror_app_files
- name: creating cron job to update repos - name: creating cron job to update repos
cron: cron:
name: "update repos from github" name: "update repos from github"
job: "/usr/bin/python {{ gh_mirror_app_dir }}/repos_from_orgs.py -d {{ gh_mirror_data_dir }}" job: "/usr/bin/python {{ gh_mirror_app_dir }}/repos_from_orgs.py -d {{ gh_mirror_data_dir }}"
- name: creating cron to update github repo list - name: creating cron to update github repo list
cron: cron:
name: "refresh repo list from github" name: "refresh repo list from github"
job: "/usr/bin/python {{ gh_mirror_app_dir}}/repos_from_orgs.py -r" job: "/usr/bin/python {{ gh_mirror_app_dir}}/repos_from_orgs.py -r"
minute: 0 minute: 0
...@@ -12,34 +12,34 @@ ...@@ -12,34 +12,34 @@
# - mark # - mark
- name: gh_users | creating default .bashrc - name: creating default .bashrc
template: > template: >
src=default.bashrc.j2 dest=/etc/skel/.bashrc src=default.bashrc.j2 dest=/etc/skel/.bashrc
mode=0644 owner=root group=root mode=0644 owner=root group=root
- name: gh_users | create gh group - name: create gh group
group: name=gh state=present group: name=gh state=present
# TODO: give limited sudo access to this group # TODO: give limited sudo access to this group
- name: gh_users | grant full sudo access to gh group - name: grant full sudo access to gh group
copy: > copy: >
content="%gh ALL=(ALL) NOPASSWD:ALL" content="%gh ALL=(ALL) NOPASSWD:ALL"
dest=/etc/sudoers.d/gh owner=root group=root dest=/etc/sudoers.d/gh owner=root group=root
mode=0440 validate='visudo -cf %s' mode=0440 validate='visudo -cf %s'
- name: gh_users | create github users - name: create github users
user: user:
name={{ item }} groups=gh name={{ item }} groups=gh
shell=/bin/bash shell=/bin/bash
with_items: gh_users with_items: gh_users
- name: gh_users | create .ssh directory - name: create .ssh directory
file: file:
path=/home/{{ item }}/.ssh state=directory mode=0700 path=/home/{{ item }}/.ssh state=directory mode=0700
owner={{ item }} owner={{ item }}
with_items: gh_users with_items: gh_users
- name: gh_users | copy github key[s] to .ssh/authorized_keys - name: copy github key[s] to .ssh/authorized_keys
get_url: get_url:
url=https://github.com/{{ item }}.keys url=https://github.com/{{ item }}.keys
dest=/home/{{ item }}/.ssh/authorized_keys mode=0600 dest=/home/{{ item }}/.ssh/authorized_keys mode=0600
......
--- ---
# Install and configure simple glusterFS shared storage # Install and configure simple glusterFS shared storage
- name: gluster | all | Install common packages - name: all | Install common packages
apt: name={{ item }} state=present apt: name={{ item }} state=present
with_items: with_items:
- glusterfs-client - glusterfs-client
...@@ -9,60 +9,60 @@ ...@@ -9,60 +9,60 @@
- nfs-common - nfs-common
tags: gluster tags: gluster
- name: gluster | all | Install server packages - name: all | Install server packages
apt: name=glusterfs-server state=present apt: name=glusterfs-server state=present
when: > when: >
"{{ ansible_default_ipv4.address }}" "{{ gluster_peers|join(' ') }}" "{{ ansible_default_ipv4.address }}" "{{ gluster_peers|join(' ') }}"
tags: gluster tags: gluster
- name: gluster | all | enable server - name: all | enable server
service: name=glusterfs-server state=started enabled=yes service: name=glusterfs-server state=started enabled=yes
when: > when: >
"{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}" "{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}"
tags: gluster tags: gluster
# Ignoring error below so that we can move the data folder and have it be a link # Ignoring error below so that we can move the data folder and have it be a link
- name: gluster | all | create folders - name: all | create folders
file: path={{ item.path }} state=directory file: path={{ item.path }} state=directory
with_items: gluster_volumes with_items: gluster_volumes
when: > when: >
"{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}" "{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}"
ignore_errors: yes ignore_errors: yes
tags: gluster tags: gluster
- name: gluster | primary | create peers - name: primary | create peers
command: gluster peer probe {{ item }} command: gluster peer probe {{ item }}
with_items: gluster_peers with_items: gluster_peers
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster tags: gluster
- name: gluster | primary | create volumes - name: primary | create volumes
command: gluster volume create {{ item.name }} replica {{ item.replicas }} transport tcp {% for server in gluster_peers %}{{ server }}:{{ item.path }} {% endfor %} command: gluster volume create {{ item.name }} replica {{ item.replicas }} transport tcp {% for server in gluster_peers %}{{ server }}:{{ item.path }} {% endfor %}
with_items: gluster_volumes with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here ignore_errors: yes # There should be better error checking here
tags: gluster tags: gluster
- name: gluster | primary | start volumes - name: primary | start volumes
command: gluster volume start {{ item.name }} command: gluster volume start {{ item.name }}
with_items: gluster_volumes with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here ignore_errors: yes # There should be better error checking here
tags: gluster tags: gluster
- name: gluster | primary | set security - name: primary | set security
command: gluster volume set {{ item.name }} auth.allow {{ item.security }} command: gluster volume set {{ item.name }} auth.allow {{ item.security }}
with_items: gluster_volumes with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster tags: gluster
- name: gluster | primary | set performance cache - name: primary | set performance cache
command: gluster volume set {{ item.name }} performance.cache-size {{ item.cache_size }} command: gluster volume set {{ item.name }} performance.cache-size {{ item.cache_size }}
with_items: gluster_volumes with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster tags: gluster
- name: gluster | all | mount volume - name: all | mount volume
mount: > mount: >
name={{ item.mount_location }} name={{ item.mount_location }}
src={{ gluster_primary_ip }}:{{ item.name }} src={{ gluster_primary_ip }}:{{ item.name }}
...@@ -71,10 +71,10 @@ ...@@ -71,10 +71,10 @@
opts=defaults,_netdev opts=defaults,_netdev
with_items: gluster_volumes with_items: gluster_volumes
tags: gluster tags: gluster
# This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system # This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system
# before the network stack is up and can't lookup 127.0.0.1 # before the network stack is up and can't lookup 127.0.0.1
- name: gluster | all | sleep mount - name: all | sleep mount
lineinfile: > lineinfile: >
dest=/etc/rc.local dest=/etc/rc.local
line='sleep 5; /bin/mount -a' line='sleep 5; /bin/mount -a'
......
...@@ -14,11 +14,11 @@ ...@@ -14,11 +14,11 @@
# Overview: # Overview:
# #
# #
- name: haproxy | restart haproxy - name: restart haproxy
service: name=haproxy state=restarted service: name=haproxy state=restarted
- name: haproxy | reload haproxy - name: reload haproxy
service: name=haproxy state=reloaded service: name=haproxy state=reloaded
- name: haproxy | restart rsyslog - name: restart rsyslog
service: name=rsyslog state=restarted service: name=rsyslog state=restarted
...@@ -10,33 +10,33 @@ ...@@ -10,33 +10,33 @@
# #
# #
# Tasks for role haproxy # Tasks for role haproxy
# #
# Overview: # Overview:
# Installs and configures haproxy for load balancing. # Installs and configures haproxy for load balancing.
# HAProxy doesn't currently support included configuration # HAProxy doesn't currently support included configuration
# so it allows for a configuration template to be overriden # so it allows for a configuration template to be overriden
# with a variable # with a variable
- name: haproxy | Install haproxy - name: Install haproxy
apt: pkg=haproxy state={{ pkgs.haproxy.state }} apt: pkg=haproxy state={{ pkgs.haproxy.state }}
notify: haproxy | restart haproxy notify: restart haproxy
- name: haproxy | Server configuration file - name: Server configuration file
template: > template: >
src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg
owner=root group=root mode=0644 owner=root group=root mode=0644
notify: haproxy | reload haproxy notify: reload haproxy
- name: haproxy | Enabled in default - name: Enabled in default
lineinfile: dest=/etc/default/haproxy regexp=^ENABLED=.$ line=ENABLED=1 lineinfile: dest=/etc/default/haproxy regexp=^ENABLED=.$ line=ENABLED=1
notify: haproxy | restart haproxy notify: restart haproxy
- name: haproxy | install logrotate - name: install logrotate
template: src=haproxy.logrotate.j2 dest=/etc/logrotate.d/haproxy mode=0644 template: src=haproxy.logrotate.j2 dest=/etc/logrotate.d/haproxy mode=0644
- name: haproxy | install rsyslog conf - name: install rsyslog conf
template: src=haproxy.rsyslog.j2 dest=/etc/rsyslog.d/haproxy.conf mode=0644 template: src=haproxy.rsyslog.j2 dest=/etc/rsyslog.d/haproxy.conf mode=0644
notify: haproxy | restart rsyslog notify: restart rsyslog
- name: haproxy | make sure haproxy has started - name: make sure haproxy has started
service: name=haproxy state=started service: name=haproxy state=started
--- ---
- name: jenkins_master | restart Jenkins - name: restart Jenkins
service: name=jenkins state=restarted service: name=jenkins state=restarted
- name: jenkins_master | start nginx - name: start nginx
service: name=nginx state=started service: name=nginx state=started
- name: jenkins_master | reload nginx - name: reload nginx
service: name=nginx state=reloaded service: name=nginx state=reloaded
--- ---
- name: jenkins_master | install jenkins specific system packages - name: install jenkins specific system packages
apt: apt:
pkg={{','.join(jenkins_debian_pkgs)}} pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes state=present update_cache=yes
tags: tags:
- jenkins - jenkins
- name: jenkins_master | install jenkins extra system packages - name: install jenkins extra system packages
apt: apt:
pkg={{','.join(JENKINS_EXTRA_PKGS)}} pkg={{','.join(JENKINS_EXTRA_PKGS)}}
state=present update_cache=yes state=present update_cache=yes
tags: tags:
- jenkins - jenkins
- name: jenkins_master | create jenkins group - name: create jenkins group
group: name={{ jenkins_group }} state=present group: name={{ jenkins_group }} state=present
- name: jenkins_master | add the jenkins user to the group - name: add the jenkins user to the group
user: name={{ jenkins_user }} append=yes groups={{ jenkins_group }} user: name={{ jenkins_user }} append=yes groups={{ jenkins_group }}
# Should be resolved in the next release, but until then we need to do this # Should be resolved in the next release, but until then we need to do this
# https://issues.jenkins-ci.org/browse/JENKINS-20407 # https://issues.jenkins-ci.org/browse/JENKINS-20407
- name: jenkins_master | workaround for JENKINS-20407 - name: workaround for JENKINS-20407
command: "mkdir -p /var/run/jenkins" command: "mkdir -p /var/run/jenkins"
- name: jenkins_master | download Jenkins package - name: download Jenkins package
get_url: url="{{ jenkins_deb_url }}" dest="/tmp/{{ jenkins_deb }}" get_url: url="{{ jenkins_deb_url }}" dest="/tmp/{{ jenkins_deb }}"
- name: jenkins_master | install Jenkins package - name: install Jenkins package
command: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}" command: dpkg -i --force-depends "/tmp/{{ jenkins_deb }}"
- name: jenkins_master | stop Jenkins - name: stop Jenkins
service: name=jenkins state=stopped service: name=jenkins state=stopped
# Move /var/lib/jenkins to Jenkins home (on the EBS) # Move /var/lib/jenkins to Jenkins home (on the EBS)
- name: jenkins_master | move /var/lib/jenkins - name: move /var/lib/jenkins
command: mv /var/lib/jenkins {{ jenkins_home }} command: mv /var/lib/jenkins {{ jenkins_home }}
creates={{ jenkins_home }} creates={{ jenkins_home }}
- name: jenkins_master | set owner for Jenkins home - name: set owner for Jenkins home
file: path={{ jenkins_home }} recurse=yes state=directory file: path={{ jenkins_home }} recurse=yes state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} owner={{ jenkins_user }} group={{ jenkins_group }}
# Symlink /var/lib/jenkins to {{ COMMON_DATA_DIR }}/jenkins # Symlink /var/lib/jenkins to {{ COMMON_DATA_DIR }}/jenkins
# since Jenkins will expect its files to be in /var/lib/jenkins # since Jenkins will expect its files to be in /var/lib/jenkins
- name: jenkins_master | symlink /var/lib/jenkins - name: symlink /var/lib/jenkins
file: src={{ jenkins_home }} dest=/var/lib/jenkins state=link file: src={{ jenkins_home }} dest=/var/lib/jenkins state=link
owner={{ jenkins_user }} group={{ jenkins_group }} owner={{ jenkins_user }} group={{ jenkins_group }}
notify: notify:
- jenkins_master | restart Jenkins - restart Jenkins
- name: jenkins_master | make plugins directory - name: make plugins directory
sudo_user: jenkins sudo_user: jenkins
shell: mkdir -p {{ jenkins_home }}/plugins shell: mkdir -p {{ jenkins_home }}/plugins
# We first download the plugins to a temp directory and include # We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment # the version in the file name. That way, if we increment
# the version, the plugin will be updated in Jenkins # the version, the plugin will be updated in Jenkins
- name: jenkins_master | download Jenkins plugins - name: download Jenkins plugins
get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi get_url: url=http://updates.jenkins-ci.org/download/plugins/{{ item.name }}/{{ item.version }}/{{ item.name }}.hpi
dest=/tmp/{{ item.name }}_{{ item.version }} dest=/tmp/{{ item.name }}_{{ item.version }}
with_items: "{{ jenkins_plugins }}" with_items: jenkins_plugins
- name: jenkins_master | install Jenkins plugins - name: install Jenkins plugins
command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi command: cp /tmp/{{ item.name }}_{{ item.version }} {{ jenkins_home }}/plugins/{{ item.name }}.hpi
with_items: "{{ jenkins_plugins }}" with_items: jenkins_plugins
- name: jenkins_master | set Jenkins plugin permissions - name: set Jenkins plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi file: path={{ jenkins_home }}/plugins/{{ item.name }}.hpi
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: "{{ jenkins_plugins }}" with_items: jenkins_plugins
notify: notify:
- jenkins_master | restart Jenkins - restart Jenkins
# We had to fork some plugins to workaround # We had to fork some plugins to workaround
# certain issues. If these changes get merged # certain issues. If these changes get merged
# upstream, we may be able to use the regular plugin install process. # upstream, we may be able to use the regular plugin install process.
# Until then, we compile and install the forks ourselves. # Until then, we compile and install the forks ourselves.
- name: jenkins_master | checkout custom plugin repo - name: checkout custom plugin repo
git: repo={{ item.repo_url }} dest=/tmp/{{ item.repo_name }} version={{ item.version }} git: repo={{ item.repo_url }} dest=/tmp/{{ item.repo_name }} version={{ item.version }}
with_items: "{{ jenkins_custom_plugins }}" with_items: jenkins_custom_plugins
- name: jenkins_master | compile custom plugins - name: compile custom plugins
command: mvn -Dmaven.test.skip=true install chdir=/tmp/{{ item.repo_name }} command: mvn -Dmaven.test.skip=true install chdir=/tmp/{{ item.repo_name }}
with_items: "{{ jenkins_custom_plugins }}" with_items: jenkins_custom_plugins
- name: jenkins_master | install custom plugins - name: install custom plugins
command: mv /tmp/{{ item.repo_name }}/target/{{ item.package }} command: mv /tmp/{{ item.repo_name }}/target/{{ item.package }}
{{ jenkins_home }}/plugins/{{ item.package }} {{ jenkins_home }}/plugins/{{ item.package }}
with_items: "{{ jenkins_custom_plugins }}" with_items: jenkins_custom_plugins
notify: notify:
- jenkins_master | restart Jenkins - restart Jenkins
- name: jenkins_master | set custom plugin permissions - name: set custom plugin permissions
file: path={{ jenkins_home }}/plugins/{{ item.package }} file: path={{ jenkins_home }}/plugins/{{ item.package }}
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
with_items: "{{ jenkins_custom_plugins }}" with_items: jenkins_custom_plugins
# Plugins that are bundled with Jenkins are "pinned". # Plugins that are bundled with Jenkins are "pinned".
# Jenkins will overwrite updated plugins with its built-in version # Jenkins will overwrite updated plugins with its built-in version
# unless we create a ".pinned" file for the plugin. # unless we create a ".pinned" file for the plugin.
# See https://issues.jenkins-ci.org/browse/JENKINS-13129 # See https://issues.jenkins-ci.org/browse/JENKINS-13129
- name: jenkins_master | create plugin pin files - name: create plugin pin files
command: touch {{ jenkins_home }}/plugins/{{ item }}.jpi.pinned command: touch {{ jenkins_home }}/plugins/{{ item }}.jpi.pinned
creates={{ jenkins_home }}/plugins/{{ item }}.jpi.pinned creates={{ jenkins_home }}/plugins/{{ item }}.jpi.pinned
with_items: "{{ jenkins_bundled_plugins }}" with_items: jenkins_bundled_plugins
- name: jenkins_master | setup nginix vhost - name: setup nginix vhost
template: template:
src=etc/nginx/sites-available/jenkins.j2 src=etc/nginx/sites-available/jenkins.j2
dest=/etc/nginx/sites-available/jenkins dest=/etc/nginx/sites-available/jenkins
- name: jenkins_master | enable jenkins vhost - name: enable jenkins vhost
file: file:
src=/etc/nginx/sites-available/jenkins src=/etc/nginx/sites-available/jenkins
dest=/etc/nginx/sites-enabled/jenkins dest=/etc/nginx/sites-enabled/jenkins
state=link state=link
notify: jenkins_master | start nginx notify: start nginx
...@@ -17,6 +17,7 @@ jenkins_debian_pkgs: ...@@ -17,6 +17,7 @@ jenkins_debian_pkgs:
- libxslt1-dev - libxslt1-dev
- npm - npm
- pkg-config - pkg-config
- gettext
# Ruby Specific Vars # Ruby Specific Vars
jenkins_rbenv_root: "{{ jenkins_home }}/.rbenv" jenkins_rbenv_root: "{{ jenkins_home }}/.rbenv"
......
--- ---
- name: jenkins_worker | Install Java - name: Install Java
apt: pkg=openjdk-7-jre-headless state=present apt: pkg=openjdk-7-jre-headless state=present
- name: jenkins_worker | Download JSCover - name: Download JSCover
get_url: url={{ jscover_url }} dest=/var/tmp/jscover.zip get_url: url={{ jscover_url }} dest=/var/tmp/jscover.zip
- name: jenkins_worker | Unzip JSCover - name: Unzip JSCover
shell: unzip /var/tmp/jscover.zip -d /var/tmp/jscover shell: unzip /var/tmp/jscover.zip -d /var/tmp/jscover
creates=/var/tmp/jscover creates=/var/tmp/jscover
- name: jenkins_worker | Install JSCover JAR - name: Install JSCover JAR
command: cp /var/tmp/jscover/target/dist/JSCover-all.jar /usr/local/bin/JSCover-all-{{ jscover_version }}.jar command: cp /var/tmp/jscover/target/dist/JSCover-all.jar /usr/local/bin/JSCover-all-{{ jscover_version }}.jar
creates=/usr/local/bin/JSCover-all-{{ jscover_version }}.jar creates=/usr/local/bin/JSCover-all-{{ jscover_version }}.jar
- name: jenkins_worker | Set JSCover permissions - name: Set JSCover permissions
file: path="/usr/local/bin/JSCover-all-{{ jscover_version }}.jar" state=file file: path="/usr/local/bin/JSCover-all-{{ jscover_version }}.jar" state=file
owner=root group=root mode=0755 owner=root group=root mode=0755
--- ---
# Install scripts requiring a GitHub OAuth token # Install scripts requiring a GitHub OAuth token
- name: jenkins_worker | Install requests Python library - name: Install requests Python library
pip: name=requests state=present pip: name=requests state=present
- fail: jenkins_worker | OAuth token not defined - fail: OAuth token not defined
when: github_oauth_token is not defined when: github_oauth_token is not defined
- name: jenkins_worker | Install Python GitHub PR auth script - name: Install Python GitHub PR auth script
template: src="github_pr_auth.py.j2" dest="/usr/local/bin/github_pr_auth.py" template: src="github_pr_auth.py.j2" dest="/usr/local/bin/github_pr_auth.py"
owner=root group=root owner=root group=root
mode=755 mode=755
- name: jenkins_worker | Install Python GitHub post status script - name: Install Python GitHub post status script
template: src="github_post_status.py.j2" dest="/usr/local/bin/github_post_status.py" template: src="github_post_status.py.j2" dest="/usr/local/bin/github_post_status.py"
owner=root group=root owner=root group=root
mode=755 mode=755
# Create wheelhouse to enable fast virtualenv creation # Create wheelhouse to enable fast virtualenv creation
- name: jenkins_worker | Create wheel virtualenv - name: Create wheel virtualenv
command: /usr/local/bin/virtualenv {{ jenkins_venv }} creates={{ jenkins_venv }} command: /usr/local/bin/virtualenv {{ jenkins_venv }} creates={{ jenkins_venv }}
sudo_user: "{{ jenkins_user }}" sudo_user: "{{ jenkins_user }}"
- name: jenkins_worker | Install wheel - name: Install wheel
pip: name=wheel virtualenv={{ jenkins_venv }} virtualenv_command=/usr/local/bin/virtualenv pip: name=wheel virtualenv={{ jenkins_venv }} virtualenv_command=/usr/local/bin/virtualenv
sudo_user: "{{ jenkins_user }}" sudo_user: "{{ jenkins_user }}"
- name: jenkins_worker | Create wheelhouse dir - name: Create wheelhouse dir
file: file:
path={{ jenkins_wheel_dir }} state=directory path={{ jenkins_wheel_dir }} state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
# (need to install each one in the venv to satisfy dependencies) # (need to install each one in the venv to satisfy dependencies)
- name: jenkins_worker | Create wheel archives - name: Create wheel archives
shell: shell:
"{{ jenkins_pip }} wheel --wheel-dir={{ jenkins_wheel_dir }} \"${item.pkg}\" && "{{ jenkins_pip }} wheel --wheel-dir={{ jenkins_wheel_dir }} \"${item.pkg}\" &&
{{ jenkins_pip }} install --use-wheel --no-index --find-links={{ jenkins_wheel_dir }} \"${item.pkg}\" {{ jenkins_pip }} install --use-wheel --no-index --find-links={{ jenkins_wheel_dir }} \"${item.pkg}\"
creates={{ jenkins_wheel_dir }}/${item.wheel}" creates={{ jenkins_wheel_dir }}/${item.wheel}"
sudo_user: "{{ jenkins_user }}" sudo_user: "{{ jenkins_user }}"
with_items: "{{ jenkins_wheels }}" with_items: jenkins_wheels
- name: jenkins_worker | Add wheel_venv.sh script - name: Add wheel_venv.sh script
template: template:
src=wheel_venv.sh.j2 dest={{ jenkins_home }}/wheel_venv.sh src=wheel_venv.sh.j2 dest={{ jenkins_home }}/wheel_venv.sh
owner={{ jenkins_user }} group={{ jenkins_group }} mode=700 owner={{ jenkins_user }} group={{ jenkins_group }} mode=700
--- ---
- name: jenkins_worker | Create jenkins group - name: Create jenkins group
group: name={{ jenkins_group }} state=present group: name={{ jenkins_group }} state=present
# The Jenkins account needs a login shell because Jenkins uses scp # The Jenkins account needs a login shell because Jenkins uses scp
- name: jenkins_worker | Add the jenkins user to the group and configure shell - name: Add the jenkins user to the group and configure shell
user: name={{ jenkins_user }} append=yes group={{ jenkins_group }} shell=/bin/bash user: name={{ jenkins_user }} append=yes group={{ jenkins_group }} shell=/bin/bash
# Because of a bug in the latest release of the EC2 plugin # Because of a bug in the latest release of the EC2 plugin
# we need to use a key generated by Amazon (not imported) # we need to use a key generated by Amazon (not imported)
# To satisfy this, we allow users to log in as Jenkins # To satisfy this, we allow users to log in as Jenkins
# using the same keypair the instance was started with. # using the same keypair the instance was started with.
- name: jenkins_worker | Create .ssh directory - name: Create .ssh directory
file: file:
path={{ jenkins_home }}/.ssh state=directory path={{ jenkins_home }}/.ssh state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} owner={{ jenkins_user }} group={{ jenkins_group }}
ignore_errors: yes ignore_errors: yes
- name: jenkins_worker | Copy ssh keys for jenkins - name: Copy ssh keys for jenkins
command: cp /home/ubuntu/.ssh/authorized_keys /home/{{ jenkins_user }}/.ssh/authorized_keys command: cp /home/ubuntu/.ssh/authorized_keys /home/{{ jenkins_user }}/.ssh/authorized_keys
ignore_errors: yes ignore_errors: yes
- name: jenkins_worker | Set key permissions - name: Set key permissions
file: file:
path={{ jenkins_home }}/.ssh/authorized_keys path={{ jenkins_home }}/.ssh/authorized_keys
owner={{ jenkins_user }} group={{ jenkins_group }} mode=400 owner={{ jenkins_user }} group={{ jenkins_group }} mode=400
ignore_errors: yes ignore_errors: yes
- name: jenkins_worker | Install system packages - name: Install system packages
apt: pkg={{','.join(jenkins_debian_pkgs)}} apt: pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes state=present update_cache=yes
- name: jenkins_worker | Add script to set up environment variables - name: Add script to set up environment variables
template: template:
src=jenkins_env.j2 dest={{ jenkins_home }}/jenkins_env src=jenkins_env.j2 dest={{ jenkins_home }}/jenkins_env
owner={{ jenkins_user }} group={{ jenkins_group }} mode=0500 owner={{ jenkins_user }} group={{ jenkins_group }} mode=0500
# Need to add Github to known_hosts to avoid # Need to add Github to known_hosts to avoid
# being prompted when using git through ssh # being prompted when using git through ssh
- name: jenkins_worker | Add github.com to known_hosts if it does not exist - name: Add github.com to known_hosts if it does not exist
shell: > shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# Will terminate an instance if one and only one already exists # Will terminate an instance if one and only one already exists
# with the same name # with the same name
- name: launch_ec2 | lookup tags for terminating existing instance - name: lookup tags for terminating existing instance
local_action: local_action:
module: ec2_lookup module: ec2_lookup
region: "{{ region }}" region: "{{ region }}"
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
register: tag_lookup register: tag_lookup
when: terminate_instance == true when: terminate_instance == true
- name: launch_ec2 | checking for other instances - name: checking for other instances
debug: msg="Too many results returned, not terminating!" debug: msg="Too many results returned, not terminating!"
when: terminate_instance == true and tag_lookup.instance_ids|length > 1 when: terminate_instance == true and tag_lookup.instance_ids|length > 1
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
state: absent state: absent
when: terminate_instance == true and elb and tag_lookup.instance_ids|length == 1 when: terminate_instance == true and elb and tag_lookup.instance_ids|length == 1
- name: launch_ec2 | Launch ec2 instance - name: Launch ec2 instance
local_action: local_action:
module: ec2_local module: ec2_local
keypair: "{{ keypair }}" keypair: "{{ keypair }}"
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
instance_profile_name: "{{ instance_profile_name }}" instance_profile_name: "{{ instance_profile_name }}"
register: ec2 register: ec2
- name: launch_ec2 | Add DNS name - name: Add DNS name
local_action: local_action:
module: route53 module: route53
overwrite: yes overwrite: yes
...@@ -59,9 +59,9 @@ ...@@ -59,9 +59,9 @@
ttl: 300 ttl: 300
record: "{{ dns_name }}.{{ dns_zone }}" record: "{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}" value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}" with_items: ec2.instances
- name: launch_ec2 | Add DNS name studio - name: Add DNS name studio
local_action: local_action:
module: route53 module: route53
overwrite: yes overwrite: yes
...@@ -71,9 +71,9 @@ ...@@ -71,9 +71,9 @@
ttl: 300 ttl: 300
record: "studio.{{ dns_name }}.{{ dns_zone }}" record: "studio.{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}" value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}" with_items: ec2.instances
- name: launch_ec2 | Add DNS name preview - name: Add DNS name preview
local_action: local_action:
module: route53 module: route53
overwrite: yes overwrite: yes
...@@ -83,17 +83,17 @@ ...@@ -83,17 +83,17 @@
ttl: 300 ttl: 300
record: "preview.{{ dns_name }}.{{ dns_zone }}" record: "preview.{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}" value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}" with_items: ec2.instances
- name: launch_ec2 | Add new instance to host group - name: Add new instance to host group
local_action: > local_action: >
add_host add_host
hostname={{ item.public_ip }} hostname={{ item.public_ip }}
groupname=launched groupname=launched
with_items: "{{ ec2.instances }}" with_items: ec2.instances
- name: launch_ec2 | Wait for SSH to come up - name: Wait for SSH to come up
local_action: > local_action: >
wait_for wait_for
host={{ item.public_dns_name }} host={{ item.public_dns_name }}
...@@ -101,4 +101,4 @@ ...@@ -101,4 +101,4 @@
port=22 port=22
delay=60 delay=60
timeout=320 timeout=320
with_items: "{{ ec2.instances }}" with_items: ec2.instances
...@@ -16,32 +16,28 @@ ...@@ -16,32 +16,28 @@
- fail: msg="secure_dir not defined. This is a path to the secure ora config file." - fail: msg="secure_dir not defined. This is a path to the secure ora config file."
when: secure_dir is not defined when: secure_dir is not defined
- name: legacy_ora | create ora application config - name: create ora application config
copy: copy:
src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.env.json src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.env.json
dest={{ora_app_dir}}/env.json dest={{ora_app_dir}}/env.json
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
register: env_state register: env_state
tags:
- deploy
- name: legacy_ora | create ora auth file - name: create ora auth file
copy: copy:
src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.auth.json src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.auth.json
dest={{ora_app_dir}}/auth.json dest={{ora_app_dir}}/auth.json
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
register: auth_state register: auth_state
tags:
- deploy
# Restart ORA Services # Restart ORA Services
- name: legacy_ora | restart edx-ora - name: restart edx-ora
service: service:
name=edx-ora name=edx-ora
state=restarted state=restarted
when: env_state.changed or auth_state.changed when: env_state.changed or auth_state.changed
- name: legacy_ora | restart edx-ora-celery - name: restart edx-ora-celery
service: service:
name=edx-ora-celery name=edx-ora-celery
state=restarted state=restarted
......
--- ---
- name: local_dev | install useful system packages - name: install useful system packages
apt: apt:
pkg={{','.join(local_dev_pkgs)}} install_recommends=yes pkg={{','.join(local_dev_pkgs)}} install_recommends=yes
state=present update_cache=yes state=present update_cache=yes
- name: local_dev | set login shell for app accounts - name: set login shell for app accounts
user: name={{ item.user }} shell="/bin/bash" user: name={{ item.user }} shell="/bin/bash"
with_items: "{{ localdev_accounts }}" with_items: localdev_accounts
tags: deploy
# Ensure forum user has permissions to access .gem and .rbenv # Ensure forum user has permissions to access .gem and .rbenv
# This is a little twisty: the forum role sets the owner and group to www-data # This is a little twisty: the forum role sets the owner and group to www-data
# So we add the forum user to the www-data group and give group write permissions # So we add the forum user to the www-data group and give group write permissions
- name: local_dev | add forum user to www-data group - name: add forum user to www-data group
user: name={{ forum_user }} groups={{ common_web_group }} append=yes user: name={{ forum_user }} groups={{ common_web_group }} append=yes
tags: deploy
- name: local_dev | set forum rbenv and gem permissions - name: set forum rbenv and gem permissions
file: file:
path={{ item }} state=directory mode=770 path={{ item }} state=directory mode=770
with_items: with_items:
- "{{ forum_app_dir }}/.gem" - "{{ forum_app_dir }}/.gem"
- "{{ forum_app_dir }}/.rbenv" - "{{ forum_app_dir }}/.rbenv"
tags: deploy
# Create scripts to configure environment # Create scripts to configure environment
- name: local_dev | create login scripts - name: create login scripts
template: template:
src=app_bashrc.j2 dest={{ item.home }}/.bashrc src=app_bashrc.j2 dest={{ item.home }}/.bashrc
owner={{ item.user }} mode=755 owner={{ item.user }} mode=755
with_items: "{{ localdev_accounts }}" with_items: localdev_accounts
# Default to the correct git config # Default to the correct git config
# No more accidentally force pushing to master! :) # No more accidentally force pushing to master! :)
- name: local_dev | configure git - name: configure git
copy: copy:
src=gitconfig dest={{ item.home }}/.gitconfig src=gitconfig dest={{ item.home }}/.gitconfig
owner={{ item.user }} mode=700 owner={{ item.user }} mode=700
with_items: "{{ localdev_accounts }}" with_items: localdev_accounts
# Configure X11 for application users # Configure X11 for application users
- name: local_dev | preserve DISPLAY for sudo - name: preserve DISPLAY for sudo
copy: copy:
src=x11_display dest=/etc/sudoers.d/x11_display src=x11_display dest=/etc/sudoers.d/x11_display
owner=root group=root mode=0440 owner=root group=root mode=0440
- name: local_dev | login share X11 auth to app users - name: login share X11 auth to app users
template: template:
src=share_x11.j2 dest={{ localdev_home }}/share_x11 src=share_x11.j2 dest={{ localdev_home }}/share_x11
owner={{ localdev_user }} mode=0700 owner={{ localdev_user }} mode=0700
- name: local_dev | update bashrc with X11 share script - name: update bashrc with X11 share script
lineinfile: lineinfile:
dest={{ localdev_home }}/.bashrc dest={{ localdev_home }}/.bashrc
regexp=". {{ localdev_home }}/share_x11" regexp=". {{ localdev_home }}/share_x11"
......
--- ---
- name: restart mongo - name: restart mongo
service: name=mongodb state=restarted service: name=mongodb state=restarted
tags: deploy
--- ---
- name: mongo | install python pymongo for mongo_user ansible module - name: install python pymongo for mongo_user ansible module
pip: > pip: >
name=pymongo state=present name=pymongo state=present
version=2.6.3 extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" version=2.6.3 extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
- name: mongo | add the mongodb signing key - name: add the mongodb signing key
apt_key: > apt_key: >
id=7F0CEB10 id=7F0CEB10
url=http://docs.mongodb.org/10gen-gpg-key.asc url=http://docs.mongodb.org/10gen-gpg-key.asc
state=present state=present
- name: mongo | add the mongodb repo to the sources list - name: add the mongodb repo to the sources list
apt_repository: > apt_repository: >
repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen'
state=present state=present
- name: mongo | install mongo server and recommends - name: install mongo server and recommends
apt: > apt: >
pkg=mongodb-10gen={{ mongo_version }} pkg=mongodb-10gen={{ mongo_version }}
state=present install_recommends=yes state=present install_recommends=yes
update_cache=yes update_cache=yes
- name: mongo | create mongo dirs - name: create mongo dirs
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ mongo_user }}" owner="{{ mongo_user }}"
...@@ -32,14 +32,14 @@ ...@@ -32,14 +32,14 @@
- "{{ mongo_dbpath }}" - "{{ mongo_dbpath }}"
- "{{ mongo_log_dir }}" - "{{ mongo_log_dir }}"
- name: mongo | stop mongo service - name: stop mongo service
service: name=mongodb state=stopped service: name=mongodb state=stopped
- name: mongo | move mongodb to {{ mongo_data_dir }} - name: move mongodb to {{ mongo_data_dir }}
command: mv /var/lib/mongodb {{ mongo_data_dir}}/. creates={{ mongo_data_dir }}/mongodb command: mv /var/lib/mongodb {{ mongo_data_dir}}/. creates={{ mongo_data_dir }}/mongodb
- name: mongo | copy mongodb key file - name: copy mongodb key file
copy: > copy: >
src={{ secure_dir }}/files/mongo_key src={{ secure_dir }}/files/mongo_key
dest={{ mongo_key_file }} dest={{ mongo_key_file }}
...@@ -48,27 +48,27 @@ ...@@ -48,27 +48,27 @@
group=mongodb group=mongodb
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
- name: mongo | copy configuration template - name: copy configuration template
template: src=mongodb.conf.j2 dest=/etc/mongodb.conf backup=yes template: src=mongodb.conf.j2 dest=/etc/mongodb.conf backup=yes
notify: restart mongo notify: restart mongo
- name: mongo | start mongo service - name: start mongo service
service: name=mongodb state=started service: name=mongodb state=started
- name: mongo | wait for mongo server to start - name: wait for mongo server to start
wait_for: port=27017 delay=2 wait_for: port=27017 delay=2
- name: mongo | Create the file to initialize the mongod replica set - name: Create the file to initialize the mongod replica set
template: src=repset_init.j2 dest=/tmp/repset_init.js template: src=repset_init.j2 dest=/tmp/repset_init.js
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
- name: mongo | Initialize the replication set - name: Initialize the replication set
shell: /usr/bin/mongo /tmp/repset_init.js shell: /usr/bin/mongo /tmp/repset_init.js
when: MONGO_CLUSTERED when: MONGO_CLUSTERED
# Ignore errors doesn't work because the module throws an exception # Ignore errors doesn't work because the module throws an exception
# it doesn't catch. # it doesn't catch.
- name: mongo | create a mongodb user - name: create a mongodb user
mongodb_user: > mongodb_user: >
database={{ item.database }} database={{ item.database }}
name={{ item.user }} name={{ item.user }}
......
...@@ -3,16 +3,28 @@ ...@@ -3,16 +3,28 @@
# Set global htaccess for nginx # Set global htaccess for nginx
NGINX_HTPASSWD_USER: !!null NGINX_HTPASSWD_USER: !!null
NGINX_HTPASSWD_PASS: !!null NGINX_HTPASSWD_PASS: !!null
NGINX_ENABLE_SSL: False
# Set these to real paths on your
# filesystem, otherwise nginx will
# use a self-signed snake-oil cert
#
# To use a certificate chain add the contents
# to your certificate:
#
# cat www.example.com.crt bundle.crt > www.example.com.chained.crt
NGINX_SSL_CERTIFICATE: 'ssl-cert-snakeoil.pem'
NGINX_SSL_KEY: 'ssl-cert-snakeoil.key'
nginx_app_dir: "{{ COMMON_APP_DIR }}/nginx" nginx_app_dir: "{{ COMMON_APP_DIR }}/nginx"
nginx_data_dir: "{{ COMMON_DATA_DIR }}/nginx" nginx_data_dir: "{{ COMMON_DATA_DIR }}/nginx"
nginx_conf_dir: "{{ COMMON_APP_DIR }}/conf.d" nginx_conf_dir: "{{ nginx_app_dir }}/conf.d"
nginx_log_dir: "{{ COMMON_LOG_DIR }}/nginx" nginx_log_dir: "{{ COMMON_LOG_DIR }}/nginx"
nginx_sites_available_dir: "{{ nginx_app_dir }}/sites-available" nginx_sites_available_dir: "{{ nginx_app_dir }}/sites-available"
nginx_sites_enabled_dir: "{{ nginx_app_dir }}/sites-enabled" nginx_sites_enabled_dir: "{{ nginx_app_dir }}/sites-enabled"
nginx_user: root nginx_user: root
nginx_htpasswd_file: "{{ nginx_app_dir }}/nginx.htpasswd" nginx_htpasswd_file: "{{ nginx_app_dir }}/nginx.htpasswd"
nginx_default_sites: []
nginx_debian_pkgs: nginx_debian_pkgs:
- nginx - nginx
- python-passlib - python-passlib
......
--- ---
- name: nginx | restart nginx - name: restart nginx
service: name=nginx state=restarted service: name=nginx state=restarted
tags: deploy
- name: nginx | reload nginx - name: reload nginx
service: name=nginx state=reloaded service: name=nginx state=reloaded
tags: deploy
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# - common/tasks/main.yml # - common/tasks/main.yml
--- ---
- name: nginx | create nginx app dirs - name: create nginx app dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
- "{{ nginx_app_dir }}" - "{{ nginx_app_dir }}"
- "{{ nginx_sites_available_dir }}" - "{{ nginx_sites_available_dir }}"
- "{{ nginx_sites_enabled_dir }}" - "{{ nginx_sites_enabled_dir }}"
notify: nginx | restart nginx notify: restart nginx
- name: nginx | create nginx data dirs - name: create nginx data dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -23,71 +23,86 @@ ...@@ -23,71 +23,86 @@
with_items: with_items:
- "{{ nginx_data_dir }}" - "{{ nginx_data_dir }}"
- "{{ nginx_log_dir }}" - "{{ nginx_log_dir }}"
notify: nginx | restart nginx notify: restart nginx
- name: nginx | Install nginx packages - name: Install nginx packages
apt: pkg={{','.join(nginx_debian_pkgs)}} state=present apt: pkg={{','.join(nginx_debian_pkgs)}} state=present
notify: nginx | restart nginx notify: restart nginx
- name: nginx | Server configuration file - name: Server configuration file
template: > template: >
src=nginx.conf.j2 dest=/etc/nginx/nginx.conf src=nginx.conf.j2 dest=/etc/nginx/nginx.conf
owner=root group={{ common_web_user }} mode=0644 owner=root group={{ common_web_user }} mode=0644
notify: nginx | reload nginx notify: reload nginx
- name: nginx | Creating common nginx configuration - name: Creating common nginx configuration
template: > template: >
src=edx-release.j2 dest={{ nginx_sites_available_dir }}/edx-release src=edx-release.j2 dest={{ nginx_sites_available_dir }}/edx-release
owner=root group=root mode=0600 owner=root group=root mode=0600
notify: nginx | reload nginx notify: reload nginx
- name: nginx | Creating link for common nginx configuration - name: Creating link for common nginx configuration
file: > file: >
src={{ nginx_sites_available_dir }}/edx-release src={{ nginx_sites_available_dir }}/edx-release
dest={{ nginx_sites_enabled_dir }}/edx-release dest={{ nginx_sites_enabled_dir }}/edx-release
state=link owner=root group=root state=link owner=root group=root
notify: nginx | reload nginx notify: reload nginx
- name: nginx | Copying nginx configs for {{ nginx_sites }} - name: Copying nginx configs for {{ nginx_sites }}
template: > template: >
src={{ item }}.j2 dest={{ nginx_sites_available_dir }}/{{ item }} src={{ item }}.j2 dest={{ nginx_sites_available_dir }}/{{ item }}
owner=root group={{ common_web_user }} mode=0640 owner=root group={{ common_web_user }} mode=0640
notify: nginx | reload nginx notify: reload nginx
with_items: nginx_sites with_items: nginx_sites
- name: nginx | Creating nginx config links for {{ nginx_sites }} - name: Creating nginx config links for {{ nginx_sites }}
file: > file: >
src={{ nginx_sites_available_dir }}/{{ item }} src={{ nginx_sites_available_dir }}/{{ item }}
dest={{ nginx_sites_enabled_dir }}/{{ item }} dest={{ nginx_sites_enabled_dir }}/{{ item }}
state=link owner=root group=root state=link owner=root group=root
notify: nginx | reload nginx notify: reload nginx
with_items: nginx_sites with_items: nginx_sites
- name: nginx | Write out htpasswd file - name: Write out htpasswd file
htpasswd: > htpasswd: >
name={{ NGINX_HTPASSWD_USER }} name={{ NGINX_HTPASSWD_USER }}
password={{ NGINX_HTPASSWD_PASS }} password={{ NGINX_HTPASSWD_PASS }}
path={{ nginx_htpasswd_file }} path={{ nginx_htpasswd_file }}
when: NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS when: NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS
- name: nginx | Create nginx log file location (just in case) - name: Create nginx log file location (just in case)
file: > file: >
path={{ nginx_log_dir}} state=directory path={{ nginx_log_dir}} state=directory
owner={{ common_web_user }} group={{ common_web_user }} owner={{ common_web_user }} group={{ common_web_user }}
- name: copy ssl cert
copy: >
src={{ NGINX_SSL_CERTIFICATE }}
dest=/etc/ssl/certs/{{ item|basename }}
owner=root group=root mode=0644
when: NGINX_ENABLE_SSL and NGINX_SSL_CERTIFICATE != 'ssl-cert-snakeoil.pem'
- name: copy ssl key
copy: >
src={{ NGINX_SSL_KEY }}
dest=/etc/ssl/private/{{ item|basename }}
owner=root group=root mode=0640
when: NGINX_ENABLE_SSL and NGINX_SSL_KEY != 'ssl-cert-snakeoil.key'
# removing default link # removing default link
- name: nginx | Removing default nginx config and restart (enabled) - name: Removing default nginx config and restart (enabled)
file: path={{ nginx_sites_enabled_dir }}/default state=absent file: path={{ nginx_sites_enabled_dir }}/default state=absent
notify: nginx | reload nginx notify: reload nginx
# Note that nginx logs to /var/log until it reads its configuration, so /etc/logrotate.d/nginx is still good # Note that nginx logs to /var/log until it reads its configuration, so /etc/logrotate.d/nginx is still good
- name: nginx | Set up nginx access log rotation - name: Set up nginx access log rotation
template: > template: >
dest=/etc/logrotate.d/nginx-access src=edx_logrotate_nginx_access.j2 dest=/etc/logrotate.d/nginx-access src=edx_logrotate_nginx_access.j2
owner=root group=root mode=644 owner=root group=root mode=644
- name: nginx | Set up nginx access log rotation - name: Set up nginx access log rotation
template: > template: >
dest=/etc/logrotate.d/nginx-error src=edx_logrotate_nginx_error.j2 dest=/etc/logrotate.d/nginx-error src=edx_logrotate_nginx_error.j2
owner=root group=root mode=644 owner=root group=root mode=644
...@@ -95,5 +110,5 @@ ...@@ -95,5 +110,5 @@
# If tasks that notify restart nginx don't change the state of the remote system # If tasks that notify restart nginx don't change the state of the remote system
# their corresponding notifications don't get run. If nginx has been stopped for # their corresponding notifications don't get run. If nginx has been stopped for
# any reason, this will ensure that it is started up again. # any reason, this will ensure that it is started up again.
- name: nginx | make sure nginx has started - name: make sure nginx has started
service: name=nginx state=started service: name=nginx state=started
{%- if "cms" in nginx_default_sites -%}
{%- set default_site = "default" -%}
{%- else -%}
{%- set default_site = "" -%}
{%- endif -%}
upstream cms-backend { upstream cms-backend {
{% for host in nginx_cms_gunicorn_hosts %} {% for host in nginx_cms_gunicorn_hosts %}
server {{ host }}:{{ edxapp_cms_gunicorn_port }} fail_timeout=0; server {{ host }}:{{ edxapp_cms_gunicorn_port }} fail_timeout=0;
...@@ -7,7 +13,18 @@ upstream cms-backend { ...@@ -7,7 +13,18 @@ upstream cms-backend {
server { server {
# CMS configuration file for nginx, templated by ansible # CMS configuration file for nginx, templated by ansible
listen {{EDXAPP_CMS_NGINX_PORT}}; {% if NGINX_ENABLE_SSL %}
listen {{EDXAPP_CMS_NGINX_PORT}} {{default_site}};
listen {{EDXAPP_CMS_SSL_NGINX_PORT}} ssl;
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
{% else %}
listen {{EDXAPP_CMS_NGINX_PORT}} {{default_site}};
{% endif %}
server_name studio.*; server_name studio.*;
...@@ -21,6 +38,7 @@ server { ...@@ -21,6 +38,7 @@ server {
rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last; rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last;
location @proxy_to_cms_app { location @proxy_to_cms_app {
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port; proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
...@@ -56,6 +74,14 @@ server { ...@@ -56,6 +74,14 @@ server {
location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) { location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) {
return 403; return 403;
} }
# http://www.red-team-design.com/firefox-doesnt-allow-cross-domain-fonts-by-default
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\.(eot|otf|ttf|woff))" {
expires max;
add_header Access-Control-Allow-Origin *;
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Set django-pipelined files to maximum cache time # Set django-pipelined files to maximum cache time
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\..*)" { location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\..*)" {
expires max; expires max;
......
{%- if "forum" in nginx_default_sites -%}
{%- set default_site = "default" -%}
{%- else -%}
{%- set default_site = "" -%}
{%- endif -%}
upstream forum_app_server {
server localhost:{{ forum_unicorn_port }} fail_timeout=0;
}
server {
server_name forum.*;
listen {{ FORUM_NGINX_PORT }} {{default_site}};
client_max_body_size 1M;
keepalive_timeout 5;
location / {
try_files $uri @proxy_to_app;
}
location @proxy_to_app {
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://forum_app_server;
}
}
\ No newline at end of file
{%- if "lms" in nginx_default_sites -%}
{%- set default_site = "default" -%}
{%- else -%}
{%- set default_site = "" -%}
{%- endif -%}
upstream lms-backend { upstream lms-backend {
{% for host in nginx_lms_gunicorn_hosts %} {% for host in nginx_lms_gunicorn_hosts %}
server {{ host }}:{{ edxapp_lms_gunicorn_port }} fail_timeout=0; server {{ host }}:{{ edxapp_lms_gunicorn_port }} fail_timeout=0;
...@@ -7,7 +13,17 @@ upstream lms-backend { ...@@ -7,7 +13,17 @@ upstream lms-backend {
server { server {
# LMS configuration file for nginx, templated by ansible # LMS configuration file for nginx, templated by ansible
listen {{EDXAPP_LMS_NGINX_PORT}} default; {% if NGINX_ENABLE_SSL %}
listen {{EDXAPP_LMS_NGINX_PORT}} {{default_site}};
listen {{EDXAPP_LMS_SSL_NGINX_PORT}} {{default_site}} ssl;
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
{% else %}
listen {{EDXAPP_LMS_NGINX_PORT}} {{default_site}};
{% endif %}
access_log {{ nginx_log_dir }}/access.log; access_log {{ nginx_log_dir }}/access.log;
error_log {{ nginx_log_dir }}/error.log error; error_log {{ nginx_log_dir }}/error.log error;
...@@ -19,7 +35,6 @@ server { ...@@ -19,7 +35,6 @@ server {
rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last; rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last;
location @proxy_to_lms_app { location @proxy_to_lms_app {
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port; proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
...@@ -55,6 +70,14 @@ server { ...@@ -55,6 +70,14 @@ server {
location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) { location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) {
return 403; return 403;
} }
# http://www.red-team-design.com/firefox-doesnt-allow-cross-domain-fonts-by-default
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\.(eot|otf|ttf|woff))" {
expires max;
add_header Access-Control-Allow-Origin *;
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Set django-pipelined files to maximum cache time # Set django-pipelined files to maximum cache time
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\..*)" { location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\..*)" {
expires max; expires max;
......
...@@ -37,8 +37,8 @@ http { ...@@ -37,8 +37,8 @@ http {
'"$request" $status $body_bytes_sent $request_time ' '"$request" $status $body_bytes_sent $request_time '
'"$http_referer" "$http_user_agent"'; '"$http_referer" "$http_user_agent"';
log_format ssl_combined '$remote_addr - $ssl_client_s_dn [$time_local] ' log_format ssl_combined '$remote_addr - $ssl_client_s_dn - "$upstream_addr" [$time_local] '
'"$request" $status $body_bytes_sent ' '"$request" $status $body_bytes_sent $request_time '
'"$http_referer" "$http_user_agent"'; '"$http_referer" "$http_user_agent"';
access_log {{ nginx_log_dir }}/access.log p_combined; access_log {{ nginx_log_dir }}/access.log p_combined;
......
--- ---
NOTIFIER_USER: "notifier" NOTIFIER_USER: "notifier"
NOTIFIER_WEB_USER: "www-user" NOTIFIER_WEB_USER: "www-data"
NOTIFIER_HOME: "{{ COMMON_APP_DIR }}/notifier" NOTIFIER_HOME: "{{ COMMON_APP_DIR }}/notifier"
NOTIFIER_VENV_DIR: "{{ NOTIFIER_HOME }}/virtualenvs/notifier" NOTIFIER_VENV_DIR: "{{ NOTIFIER_HOME }}/virtualenvs/notifier"
NOTIFIER_DB_DIR: "{{ NOTIFIER_HOME }}/db" NOTIFIER_DB_DIR: "{{ NOTIFIER_HOME }}/db"
...@@ -44,7 +44,7 @@ NOTIFIER_USER_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE" ...@@ -44,7 +44,7 @@ NOTIFIER_USER_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE"
NOTIFIER_USER_SERVICE_HTTP_AUTH_USER: !!null NOTIFIER_USER_SERVICE_HTTP_AUTH_USER: !!null
NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS: !!null NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS: !!null
NOTIFIER_CELERY_BROKER_URL: "django://" NOTIFIER_CELERY_BROKER_URL: "django://"
NOTIFIER_LOGO_IMAGE_URL: "{{ NOTIFIER_LMS_URL_BASE }}/static/images/header-logo.png"
NOTIFIER_SUPERVISOR_LOG_DEST: "{{ COMMON_DATA_DIR }}/log/supervisor" NOTIFIER_SUPERVISOR_LOG_DEST: "{{ COMMON_DATA_DIR }}/log/supervisor"
NOTIFER_REQUESTS_CA_BUNDLE: "/etc/ssl/certs/ca-certificates.crt" NOTIFER_REQUESTS_CA_BUNDLE: "/etc/ssl/certs/ca-certificates.crt"
...@@ -97,3 +97,4 @@ notifier_env_vars: ...@@ -97,3 +97,4 @@ notifier_env_vars:
US_HTTP_AUTH_USER: $NOTIFIER_USER_SERVICE_HTTP_AUTH_USER US_HTTP_AUTH_USER: $NOTIFIER_USER_SERVICE_HTTP_AUTH_USER
US_HTTP_AUTH_PASS: $NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS US_HTTP_AUTH_PASS: $NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS
FORUM_DIGEST_TASK_INTERVAL: $NOTIFIER_DIGEST_TASK_INTERVAL FORUM_DIGEST_TASK_INTERVAL: $NOTIFIER_DIGEST_TASK_INTERVAL
LOGO_IMAGE_URL: $NOTIFIER_LOGO_IMAGE_URL
--- ---
- name: notifier | restart notifier-scheduler - name: restart notifier-scheduler
supervisorctl_local: > supervisorctl_local: >
name=notifier-scheduler name=notifier-scheduler
state=restarted state=restarted
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
tags: deploy
- name: notifier | restart notifier-celery-workers - name: restart notifier-celery-workers
supervisorctl_local: > supervisorctl_local: >
name=notifier-celery-workers name=notifier-celery-workers
state=restarted state=restarted
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
tags: deploy
--- ---
- name: notifier | checkout code - name: checkout code
git: git:
dest={{ NOTIFIER_CODE_DIR }} repo={{ NOTIFIER_SOURCE_REPO }} dest={{ NOTIFIER_CODE_DIR }} repo={{ NOTIFIER_SOURCE_REPO }}
version={{ NOTIFIER_VERSION }} version={{ NOTIFIER_VERSION }}
sudo: true sudo: true
sudo_user: "{{ NOTIFIER_USER }}" sudo_user: "{{ NOTIFIER_USER }}"
notify: notify:
- notifier | restart notifier-scheduler - restart notifier-scheduler
- notifier | restart notifier-celery-workers - restart notifier-celery-workers
tags:
- deploy
- name: notifier | source repo group perms - name: source repo group perms
file: file:
path={{ NOTIFIER_SOURCE_REPO }} mode=2775 state=directory path={{ NOTIFIER_SOURCE_REPO }} mode=2775 state=directory
tags:
- deploy
- name: notifier | install application requirements - name: install application requirements
pip: pip:
requirements="{{ NOTIFIER_REQUIREMENTS_FILE }}" requirements="{{ NOTIFIER_REQUIREMENTS_FILE }}"
virtualenv="{{ NOTIFIER_VENV_DIR }}" state=present virtualenv="{{ NOTIFIER_VENV_DIR }}" state=present
sudo: true sudo: true
sudo_user: "{{ NOTIFIER_USER }}" sudo_user: "{{ NOTIFIER_USER }}"
notify: notify:
- notifier | restart notifier-scheduler - restart notifier-scheduler
- notifier | restart notifier-celery-workers - restart notifier-celery-workers
# Syncdb for whatever reason always creates the file owned by www-data:www-data, and then
# complains it can't write because it's running as notifier. So this is to touch the file into
# place with proper perms first.
- name: fix permissions on notifer db file
file: >
path={{ NOTIFIER_DB_DIR }}/notifier.db state=touch owner={{ NOTIFIER_USER }} group={{ NOTIFIER_WEB_USER }}
mode=0664
sudo: true
notify:
- restart notifier-scheduler
- restart notifier-celery-workers
tags: tags:
- deploy - deploy
- name: notifier | syncdb - name: syncdb
shell: > shell: >
cd {{ NOTIFIER_CODE_DIR }} && {{ NOTIFIER_VENV_DIR }}/bin/python manage.py syncdb cd {{ NOTIFIER_CODE_DIR }} && {{ NOTIFIER_VENV_DIR }}/bin/python manage.py syncdb
sudo: true sudo: true
sudo_user: "{{ NOTIFIER_USER }}" sudo_user: "{{ NOTIFIER_USER }}"
environment: notifier_env_vars
notify: notify:
- notifier | restart notifier-scheduler - restart notifier-scheduler
- notifier | restart notifier-celery-workers - restart notifier-celery-workers
tags:
- deploy
...@@ -17,86 +17,86 @@ ...@@ -17,86 +17,86 @@
# - common # - common
# - notifier # - notifier
# #
- name: notifier | install notifier specific system packages - name: install notifier specific system packages
apt: pkg={{','.join(notifier_debian_pkgs)}} state=present apt: pkg={{','.join(notifier_debian_pkgs)}} state=present
- name: notifier | check if incommon ca is installed - name: check if incommon ca is installed
command: test -e /usr/share/ca-certificates/incommon/InCommonServerCA.crt command: test -e /usr/share/ca-certificates/incommon/InCommonServerCA.crt
register: incommon_present register: incommon_present
ignore_errors: yes ignore_errors: yes
- name: common | create incommon ca directory - name: create incommon ca directory
file: file:
path="/usr/share/ca-certificates/incommon" mode=2775 state=directory path="/usr/share/ca-certificates/incommon" mode=2775 state=directory
when: incommon_present|failed when: incommon_present|failed
- name: common | retrieve incommon server CA - name: retrieve incommon server CA
shell: curl https://www.incommon.org/cert/repository/InCommonServerCA.txt -o /usr/share/ca-certificates/incommon/InCommonServerCA.crt shell: curl https://www.incommon.org/cert/repository/InCommonServerCA.txt -o /usr/share/ca-certificates/incommon/InCommonServerCA.crt
when: incommon_present|failed when: incommon_present|failed
- name: common | add InCommon ca cert - name: add InCommon ca cert
lineinfile: lineinfile:
dest=/etc/ca-certificates.conf dest=/etc/ca-certificates.conf
regexp='incommon/InCommonServerCA.crt' regexp='incommon/InCommonServerCA.crt'
line='incommon/InCommonServerCA.crt' line='incommon/InCommonServerCA.crt'
- name: common | update ca certs globally - name: update ca certs globally
shell: update-ca-certificates shell: update-ca-certificates
- name: notifier | create notifier user {{ NOTIFIER_USER }} - name: create notifier user {{ NOTIFIER_USER }}
user: user:
name={{ NOTIFIER_USER }} state=present shell=/bin/bash name={{ NOTIFIER_USER }} state=present shell=/bin/bash
home={{ NOTIFIER_HOME }} createhome=yes home={{ NOTIFIER_HOME }} createhome=yes
- name: notifier | setup the notifier env - name: setup the notifier env
template: template:
src=notifier_env.j2 dest={{ NOTIFIER_HOME }}/notifier_env src=notifier_env.j2 dest={{ NOTIFIER_HOME }}/notifier_env
owner="{{ NOTIFIER_USER }}" group="{{ NOTIFIER_USER }}" owner="{{ NOTIFIER_USER }}" group="{{ NOTIFIER_USER }}"
- name: notifier | drop a bash_profile - name: drop a bash_profile
copy: > copy: >
src=../../common/files/bash_profile src=../../common/files/bash_profile
dest={{ NOTIFIER_HOME }}/.bash_profile dest={{ NOTIFIER_HOME }}/.bash_profile
owner={{ NOTIFIER_USER }} owner={{ NOTIFIER_USER }}
group={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }}
- name: notifier | ensure .bashrc exists - name: ensure .bashrc exists
shell: touch {{ NOTIFIER_HOME }}/.bashrc shell: touch {{ NOTIFIER_HOME }}/.bashrc
sudo: true sudo: true
sudo_user: "{{ NOTIFIER_USER }}" sudo_user: "{{ NOTIFIER_USER }}"
- name: notifier | add source of notifier_env to .bashrc - name: add source of notifier_env to .bashrc
lineinfile: lineinfile:
dest={{ NOTIFIER_HOME }}/.bashrc dest={{ NOTIFIER_HOME }}/.bashrc
regexp='. {{ NOTIFIER_HOME }}/notifier_env' regexp='. {{ NOTIFIER_HOME }}/notifier_env'
line='. {{ NOTIFIER_HOME }}/notifier_env' line='. {{ NOTIFIER_HOME }}/notifier_env'
- name: notifier | add source venv to .bashrc - name: add source venv to .bashrc
lineinfile: lineinfile:
dest={{ NOTIFIER_HOME }}/.bashrc dest={{ NOTIFIER_HOME }}/.bashrc
regexp='. {{ NOTIFIER_VENV_DIR }}/bin/activate' regexp='. {{ NOTIFIER_VENV_DIR }}/bin/activate'
line='. {{ NOTIFIER_VENV_DIR }}/bin/activate' line='. {{ NOTIFIER_VENV_DIR }}/bin/activate'
- name: notifier | create notifier DB directory - name: create notifier DB directory
file: file:
path="{{ NOTIFIER_DB_DIR }}" mode=2775 state=directory path="{{ NOTIFIER_DB_DIR }}" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_WEB_USER }}
- name: notifier | create notifier/bin directory - name: create notifier/bin directory
file: file:
path="{{ NOTIFIER_HOME }}/bin" mode=2775 state=directory path="{{ NOTIFIER_HOME }}/bin" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }}
- name: notifier | supervisord config for celery workers - name: supervisord config for celery workers
template: > template: >
src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2 src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2
dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf" dest="{{ supervisor_cfg_dir }}/notifier-celery-workers.conf"
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
notify: notifier | restart notifier-celery-workers notify: restart notifier-celery-workers
- name: notifier | supervisord config for scheduler - name: supervisord config for scheduler
template: > template: >
src=edx/app/supervisor/conf.d/notifier-scheduler.conf.j2 src=edx/app/supervisor/conf.d/notifier-scheduler.conf.j2
dest="{{ supervisor_cfg_dir }}/notifier-scheduler.conf" dest="{{ supervisor_cfg_dir }}/notifier-scheduler.conf"
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
notify: notifier | restart notifier-scheduler notify: restart notifier-scheduler
- include: deploy.yml - include: deploy.yml tags=deploy
...@@ -5,6 +5,7 @@ ORA_NGINX_PORT: 18060 ...@@ -5,6 +5,7 @@ ORA_NGINX_PORT: 18060
ora_app_dir: "{{ COMMON_APP_DIR }}/ora" ora_app_dir: "{{ COMMON_APP_DIR }}/ora"
ora_code_dir: "{{ ora_app_dir }}/ora" ora_code_dir: "{{ ora_app_dir }}/ora"
ora_data_dir: "{{ COMMON_DATA_DIR }}/ora" ora_data_dir: "{{ COMMON_DATA_DIR }}/ora"
ora_data_course_dir: "{{ ora_data_dir }}/course"
ora_venvs_dir: "{{ ora_app_dir }}/venvs" ora_venvs_dir: "{{ ora_app_dir }}/venvs"
ora_venv_dir: "{{ ora_venvs_dir }}/ora" ora_venv_dir: "{{ ora_venvs_dir }}/ora"
ora_venv_bin: "{{ ora_venv_dir }}/bin" ora_venv_bin: "{{ ora_venv_dir }}/bin"
...@@ -91,6 +92,7 @@ ora_gunicorn_host: 127.0.0.1 ...@@ -91,6 +92,7 @@ ora_gunicorn_host: 127.0.0.1
ora_env_config: ora_env_config:
LOGGING_ENV: $ORA_LOGGING_ENV LOGGING_ENV: $ORA_LOGGING_ENV
LOG_DIR: "{{ COMMON_DATA_DIR }}/logs/xqueue" LOG_DIR: "{{ COMMON_DATA_DIR }}/logs/xqueue"
COURSE_DATA_PATH: "{{ ora_data_course_dir }}"
REQUESTS_TIMEOUT: $ORA_REQUESTS_TIMEOUT REQUESTS_TIMEOUT: $ORA_REQUESTS_TIMEOUT
QUEUES_TO_PULL_FROM: $ORA_QUEUES_TO_PULL_FROM QUEUES_TO_PULL_FROM: $ORA_QUEUES_TO_PULL_FROM
TIME_BETWEEN_XQUEUE_PULLS: $ORA_TIME_BETWEEN_XQUEUE_PULLS TIME_BETWEEN_XQUEUE_PULLS: $ORA_TIME_BETWEEN_XQUEUE_PULLS
......
--- ---
- name: ora | restart ora - name: restart ora
supervisorctl_local: > supervisorctl_local: >
name=ora name=ora
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=restarted state=restarted
when: not devstack when: ora_installed is defined and not devstack
tags: deploy
- name: ora | restart ora_celery - name: restart ora_celery
supervisorctl_local: > supervisorctl_local: >
name=ora_celery name=ora_celery
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=restarted state=restarted
when: not devstack when: ora_installed is defined and not devstack
tags: deploy
- name: ora | create supervisor scripts - ora, ora_celery - name: create supervisor scripts - ora, ora_celery
template: > template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
with_items: ['ora', 'ora_celery'] with_items: ['ora', 'ora_celery']
when: not devstack when: not devstack
tags:
- deploy
- include: ease.yml - include: ease.yml
- name: ora | create ora application config - name: create ora application config
template: src=ora.env.json.j2 dest={{ora_app_dir}}/ora.env.json template: src=ora.env.json.j2 dest={{ora_app_dir}}/ora.env.json
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
tags:
- deploy
- name: ora | create ora auth file - name: create ora auth file
template: src=ora.auth.json.j2 dest={{ora_app_dir}}/ora.auth.json template: src=ora.auth.json.j2 dest={{ora_app_dir}}/ora.auth.json
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
tags:
- deploy
- name: ora | setup the ora env - name: setup the ora env
notify: notify:
- "ora | restart ora" - "restart ora"
- "ora | restart ora_celery" - "restart ora_celery"
template: > template: >
src=ora_env.j2 dest={{ ora_app_dir }}/ora_env src=ora_env.j2 dest={{ ora_app_dir }}/ora_env
owner={{ ora_user }} group={{ common_web_user }} owner={{ ora_user }} group={{ common_web_user }}
mode=0644 mode=0644
tags:
- deploy
# Do A Checkout # Do A Checkout
- name: ora | git checkout ora repo into {{ ora_app_dir }} - name: git checkout ora repo into {{ ora_app_dir }}
git: dest={{ ora_code_dir }} repo={{ ora_source_repo }} version={{ ora_version }} git: dest={{ ora_code_dir }} repo={{ ora_source_repo }} version={{ ora_version }}
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
tags:
- deploy
# TODO: Check git.py _run_if_changed() to see if the logic there to skip running certain # TODO: Check git.py _run_if_changed() to see if the logic there to skip running certain
# portions of the deploy needs to be incorporated here. # portions of the deploy needs to be incorporated here.
# Install the python pre requirements into {{ ora_venv_dir }} # Install the python pre requirements into {{ ora_venv_dir }}
- name: ora | install python pre-requirements - name: install python pre-requirements
pip: requirements="{{ ora_pre_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present pip: requirements="{{ ora_pre_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
tags:
- deploy
# Install the python post requirements into {{ ora_venv_dir }} # Install the python post requirements into {{ ora_venv_dir }}
- name: ora | install python post-requirements - name: install python post-requirements
pip: requirements="{{ ora_post_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present pip: requirements="{{ ora_post_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
tags:
- deploy
#Needed if using redis to prevent memory issues #Needed if using redis to prevent memory issues
- name: ora | change memory commit settings -- needed for redis - name: change memory commit settings -- needed for redis
command: sysctl vm.overcommit_memory=1 command: sysctl vm.overcommit_memory=1
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
tags:
- deploy
- name: ora | syncdb and migrate - name: syncdb and migrate
shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py syncdb --migrate --noinput --settings=edx_ora.aws --pythonpath={{ora_code_dir}} shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py syncdb --migrate --noinput --settings=edx_ora.aws --pythonpath={{ora_code_dir}}
when: migrate_db is defined and migrate_db|lower == "yes" when: migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
tags:
- deploy
- name: ora | create users - name: create users
shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py update_users --settings=edx_ora.aws --pythonpath={{ora_code_dir}} shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py update_users --settings=edx_ora.aws --pythonpath={{ora_code_dir}}
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
tags:
- deploy
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
...@@ -103,27 +83,35 @@ ...@@ -103,27 +83,35 @@
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: ora | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
when: not devstack when: not devstack
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
tags: deploy
- name: ora | ensure ora is started - name: ensure ora is started
supervisorctl_local: > supervisorctl_local: >
name=ora name=ora
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=started state=started
when: not devstack when: not devstack
tags: deploy
- name: ora | ensure ora_celery is started - name: ensure ora_celery is started
supervisorctl_local: > supervisorctl_local: >
name=ora_celery name=ora_celery
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=started state=started
when: not devstack when: not devstack
tags: deploy
- name: create a symlink for venv python
file: >
src="{{ ora_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.ora
state=link
with_items:
- python
- pip
- set_fact: ora_installed=true
# Do A Checkout # Do A Checkout
- name: ora | git checkout ease repo into its base dir - name: git checkout ease repo into its base dir
git: dest={{ora_ease_code_dir}} repo={{ora_ease_source_repo}} version={{ora_ease_version}} git: dest={{ora_ease_code_dir}} repo={{ora_ease_source_repo}} version={{ora_ease_version}}
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
tags:
- deploy
- name: ora | install ease system packages - name: install ease system packages
apt: pkg={{item}} state=present apt: pkg={{item}} state=present
with_items: ora_ease_debian_pkgs with_items: ora_ease_debian_pkgs
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
tags:
- deploy
# Install the python pre requirements into {{ ora_ease_venv_dir }} # Install the python pre requirements into {{ ora_ease_venv_dir }}
- name: ora | install ease python pre-requirements - name: install ease python pre-requirements
pip: requirements="{{ora_ease_pre_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present pip: requirements="{{ora_ease_pre_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
tags:
- deploy
# Install the python post requirements into {{ ora_ease_venv_dir }} # Install the python post requirements into {{ ora_ease_venv_dir }}
- name: ora | install ease python post-requirements - name: install ease python post-requirements
pip: requirements="{{ora_ease_post_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present pip: requirements="{{ora_ease_post_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
tags:
- deploy
- name: ora | install ease python package - name: install ease python package
shell: > shell: >
. {{ ora_ease_venv_dir }}/bin/activate; cd {{ ora_ease_code_dir }}; python setup.py install . {{ ora_ease_venv_dir }}/bin/activate; cd {{ ora_ease_code_dir }}; python setup.py install
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
tags:
- deploy
- name: ora | download and install nltk - name: download and install nltk
shell: | shell: |
set -e set -e
curl -o {{ ora_nltk_tmp_file }} {{ ora_nltk_download_url }} curl -o {{ ora_nltk_tmp_file }} {{ ora_nltk_download_url }}
...@@ -59,7 +49,5 @@ ...@@ -59,7 +49,5 @@
chdir={{ ora_data_dir }} chdir={{ ora_data_dir }}
sudo_user: "{{ common_web_user }}" sudo_user: "{{ common_web_user }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
tags:
- deploy
...@@ -3,56 +3,51 @@ ...@@ -3,56 +3,51 @@
# - common/tasks/main.yml # - common/tasks/main.yml
--- ---
- name: ora | create application user - name: create application user
user: > user: >
name="{{ ora_user }}" home="{{ ora_app_dir }}" name="{{ ora_user }}" home="{{ ora_app_dir }}"
createhome=no shell=/bin/false createhome=no shell=/bin/false
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
- name: ora | create ora app dir - name: create ora app dir
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ ora_user }}" group="{{ common_web_group }}" owner="{{ ora_user }}" group="{{ common_web_group }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
with_items: with_items:
- "{{ ora_venvs_dir }}" - "{{ ora_venvs_dir }}"
- "{{ ora_app_dir }}" - "{{ ora_app_dir }}"
- name: ora | create ora data dir, owned by {{ common_web_user }} - name: create ora data dir, owned by {{ common_web_user }}
file: > file: >
path="{{ item }}" state=directory path="{{ item }}" state=directory
owner="{{ common_web_user }}" group="{{ common_web_group }}" owner="{{ common_web_user }}" group="{{ common_web_group }}"
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
with_items: with_items:
- "{{ ora_data_dir }}" - "{{ ora_data_dir }}"
- "{{ ora_data_course_dir }}"
- "{{ ora_app_dir }}/ml_models" - "{{ ora_app_dir }}/ml_models"
- name: ora | install debian packages that ora needs - name: install debian packages that ora needs
apt: pkg={{item}} state=present apt: pkg={{item}} state=present
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
with_items: ora_debian_pkgs with_items: ora_debian_pkgs
- name: ora | install debian packages for ease that ora needs - name: install debian packages for ease that ora needs
apt: pkg={{item}} state=present apt: pkg={{item}} state=present
notify: notify:
- ora | restart ora - restart ora
- ora | restart ora_celery - restart ora_celery
with_items: ora_ease_debian_pkgs with_items: ora_ease_debian_pkgs
- include: deploy.yml - include: deploy.yml tags=deploy
- name: ora | create a symlink for venv python
file: >
src="{{ ora_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.ora state=link
with_items:
- python
- pip
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
# - common # - common
# - oraclejdk # - oraclejdk
- name: oraclejdk | check for Oracle Java version {{ oraclejdk_base }} - name: check for Oracle Java version {{ oraclejdk_base }}
command: test -d /usr/lib/jvm/{{ oraclejdk_base }} command: test -d /usr/lib/jvm/{{ oraclejdk_base }}
ignore_errors: true ignore_errors: true
register: oraclejdk_present register: oraclejdk_present
- name: oraclejdk | download Oracle Java - name: download Oracle Java
shell: > shell: >
curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -O -L {{ oraclejdk_url }} curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -O -L {{ oraclejdk_url }}
executable=/bin/bash executable=/bin/bash
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
creates=/var/tmp/{{ oraclejdk_file }} creates=/var/tmp/{{ oraclejdk_file }}
when: oraclejdk_present|failed when: oraclejdk_present|failed
- name: oraclejdk | install Oracle Java - name: install Oracle Java
shell: > shell: >
mkdir -p /usr/lib/jvm && tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }} mkdir -p /usr/lib/jvm && tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }}
creates=/usr/lib/jvm/{{ oraclejdk_base }} creates=/usr/lib/jvm/{{ oraclejdk_base }}
...@@ -34,10 +34,10 @@ ...@@ -34,10 +34,10 @@
sudo: true sudo: true
when: oraclejdk_present|failed when: oraclejdk_present|failed
- name: oraclejdk | create symlink expected by elasticsearch - name: create symlink expected by elasticsearch
file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link
when: oraclejdk_present|failed when: oraclejdk_present|failed
- name: oraclejdk | add JAVA_HOME for Oracle Java - name: add JAVA_HOME for Oracle Java
template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755 template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755
when: oraclejdk_present|failed when: oraclejdk_present|failed
...@@ -13,6 +13,9 @@ RABBIT_USERS: ...@@ -13,6 +13,9 @@ RABBIT_USERS:
RABBITMQ_CLUSTERED: !!null RABBITMQ_CLUSTERED: !!null
RABBITMQ_VHOSTS:
- '/'
# Internal role variables below this line # Internal role variables below this line
# option to force deletion of the mnesia dir # option to force deletion of the mnesia dir
......
...@@ -3,92 +3,103 @@ ...@@ -3,92 +3,103 @@
# There is a bug with initializing multiple nodes in the HA cluster at once # There is a bug with initializing multiple nodes in the HA cluster at once
# http://rabbitmq.1065348.n5.nabble.com/Rabbitmq-boot-failure-with-quot-tables-not-present-quot-td24494.html # http://rabbitmq.1065348.n5.nabble.com/Rabbitmq-boot-failure-with-quot-tables-not-present-quot-td24494.html
- name: rabbitmq | trust rabbit repository - name: trust rabbit repository
apt_key: url={{rabbitmq_apt_key}} state=present apt_key: url={{rabbitmq_apt_key}} state=present
- name: rabbitmq | install python-software-properties if debian - name: install python-software-properties if debian
apt: pkg={{",".join(rabbitmq_debian_pkgs)}} state=present apt: pkg={{",".join(rabbitmq_debian_pkgs)}} state=present
- name: rabbitmq | add rabbit repository - name: add rabbit repository
apt_repository: repo="{{rabbitmq_repository}}" state=present apt_repository: repo="{{rabbitmq_repository}}" state=present
- name: rabbitmq | install rabbitmq - name: install rabbitmq
apt: pkg={{rabbitmq_pkg}} state=present update_cache=yes apt: pkg={{rabbitmq_pkg}} state=present update_cache=yes
- name: rabbitmq | stop rabbit cluster - name: stop rabbit cluster
service: name=rabbitmq-server state=stopped service: name=rabbitmq-server state=stopped
# in case there are lingering processes, ignore errors # in case there are lingering processes, ignore errors
# silently # silently
- name: rabbitmq | send sigterm to any running rabbitmq processes - name: send sigterm to any running rabbitmq processes
shell: pkill -u rabbitmq || true shell: pkill -u rabbitmq || true
# Defaulting to /var/lib/rabbitmq # Defaulting to /var/lib/rabbitmq
- name: rabbitmq | create cookie directory - name: create cookie directory
file: > file: >
path={{rabbitmq_cookie_dir}} path={{rabbitmq_cookie_dir}}
owner=rabbitmq group=rabbitmq mode=0755 state=directory owner=rabbitmq group=rabbitmq mode=0755 state=directory
- name: rabbitmq | add rabbitmq erlang cookie - name: add rabbitmq erlang cookie
template: > template: >
src=erlang.cookie.j2 dest={{rabbitmq_cookie_location}} src=erlang.cookie.j2 dest={{rabbitmq_cookie_location}}
owner=rabbitmq group=rabbitmq mode=0400 owner=rabbitmq group=rabbitmq mode=0400
register: erlang_cookie register: erlang_cookie
# Defaulting to /etc/rabbitmq # Defaulting to /etc/rabbitmq
- name: rabbitmq | create rabbitmq config directory - name: create rabbitmq config directory
file: > file: >
path={{rabbitmq_config_dir}} path={{rabbitmq_config_dir}}
owner=root group=root mode=0755 state=directory owner=root group=root mode=0755 state=directory
- name: rabbitmq | add rabbitmq environment configuration - name: add rabbitmq environment configuration
template: > template: >
src=rabbitmq-env.conf.j2 dest={{rabbitmq_config_dir}}/rabbitmq-env.conf src=rabbitmq-env.conf.j2 dest={{rabbitmq_config_dir}}/rabbitmq-env.conf
owner=root group=root mode=0644 owner=root group=root mode=0644
- name: rabbitmq | add rabbitmq cluster configuration - name: add rabbitmq cluster configuration
template: > template: >
src=rabbitmq.config.j2 dest={{rabbitmq_config_dir}}/rabbitmq.config src=rabbitmq.config.j2 dest={{rabbitmq_config_dir}}/rabbitmq.config
owner=root group=root mode=0644 owner=root group=root mode=0644
register: cluster_configuration register: cluster_configuration
- name: rabbitmq | install plugins - name: install plugins
rabbitmq_plugin: rabbitmq_plugin:
names={{",".join(rabbitmq_plugins)}} state=enabled names={{",".join(rabbitmq_plugins)}} state=enabled
# When rabbitmq starts up it creates a folder of metadata at '/var/lib/rabbitmq/mnesia'. # When rabbitmq starts up it creates a folder of metadata at '/var/lib/rabbitmq/mnesia'.
# This folder should be deleted before clustering is setup because it retains data # This folder should be deleted before clustering is setup because it retains data
# that can conflict with the clustering information. # that can conflict with the clustering information.
- name: rabbitmq | remove mnesia configuration - name: remove mnesia configuration
file: path={{rabbitmq_mnesia_folder}} state=absent file: path={{rabbitmq_mnesia_folder}} state=absent
when: erlang_cookie.changed or cluster_configuration.changed or rabbitmq_refresh when: erlang_cookie.changed or cluster_configuration.changed or rabbitmq_refresh
- name: rabbitmq | start rabbit nodes - name: start rabbit nodes
service: name=rabbitmq-server state=restarted service: name=rabbitmq-server state=restarted
- name: rabbitmq | wait for rabbit to start - name: wait for rabbit to start
wait_for: port={{ rabbitmq_management_port }} delay=2 wait_for: port={{ rabbitmq_management_port }} delay=2
- name: rabbitmq | remove guest user - name: remove guest user
rabbitmq_user: user="guest" state=absent rabbitmq_user: user="guest" state=absent
- name: rabbitmq | add admin users - name: add vhosts
rabbitmq_vhost: name={{ item }} state=present
with_items: RABBITMQ_VHOSTS
- name: add admin users
rabbitmq_user: > rabbitmq_user: >
user='{{item.name}}' password='{{item.password}}' user='{{item[0].name}}' password='{{item[0].password}}'
read_priv='.*' write_priv='.*' read_priv='.*' write_priv='.*'
configure_priv='.*' tags="administrator" state=present configure_priv='.*' tags="administrator" state=present
with_items: rabbitmq_auth_config.admins vhost={{ item[1] }}
with_nested:
- ${rabbitmq_auth_config.admins}
- RABBITMQ_VHOSTS
when: "'admins' in rabbitmq_auth_config" when: "'admins' in rabbitmq_auth_config"
- name: make queues mirrored
shell: "/usr/sbin/rabbitmqctl set_policy HA '^(?!amq\\.).*' '{\"ha-mode\": \"all\"}'"
when: RABBITMQ_CLUSTERED or rabbitmq_clustered_hosts|length > 1
# #
# Depends upon the management plugin # Depends upon the management plugin
# #
- name: rabbitmq | install admin tools - name: install admin tools
get_url: > get_url: >
url=http://localhost:{{ rabbitmq_management_port }}/cli/rabbitmqadmin url=http://localhost:{{ rabbitmq_management_port }}/cli/rabbitmqadmin
dest=/usr/local/bin/rabbitmqadmin dest=/usr/local/bin/rabbitmqadmin
- name: rabbitmq | ensure rabbitmqadmin attributes - name: ensure rabbitmqadmin attributes
file: > file: >
path=/usr/local/bin/rabbitmqadmin owner=root path=/usr/local/bin/rabbitmqadmin owner=root
group=root mode=0655 group=root mode=0655
...@@ -34,95 +34,95 @@ ...@@ -34,95 +34,95 @@
- fail: rbenv_ruby_version required for role - fail: rbenv_ruby_version required for role
when: rbenv_ruby_version is not defined when: rbenv_ruby_version is not defined
- name: rbenv | create rbenv user {{ rbenv_user }} - name: create rbenv user {{ rbenv_user }}
user: > user: >
name={{ rbenv_user }} home={{ rbenv_dir }} name={{ rbenv_user }} home={{ rbenv_dir }}
shell=/bin/false createhome=no shell=/bin/false createhome=no
when: rbenv_user != common_web_user when: rbenv_user != common_web_user
- name: rbenv | create rbenv dir if it does not exist - name: create rbenv dir if it does not exist
file: > file: >
path="{{ rbenv_dir }}" owner="{{ rbenv_user }}" path="{{ rbenv_dir }}" owner="{{ rbenv_user }}"
state=directory state=directory
- name: rbenv | install build depends - name: install build depends
apt: pkg={{ ",".join(rbenv_debian_pkgs) }} state=present install_recommends=no apt: pkg={{ ",".join(rbenv_debian_pkgs) }} state=present install_recommends=no
with_items: rbenv_debian_pkgs with_items: rbenv_debian_pkgs
- name: rbenv | update rbenv repo - name: update rbenv repo
git: > git: >
repo=https://github.com/sstephenson/rbenv.git repo=https://github.com/sstephenson/rbenv.git
dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }} dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }}
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
- name: rbenv | ensure ruby_env exists - name: ensure ruby_env exists
template: > template: >
src=ruby_env.j2 dest={{ rbenv_dir }}/ruby_env src=ruby_env.j2 dest={{ rbenv_dir }}/ruby_env
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
- name: rbenv | check ruby-build installed - name: check ruby-build installed
command: test -x /usr/local/bin/ruby-build command: test -x /usr/local/bin/ruby-build
register: rbuild_present register: rbuild_present
ignore_errors: yes ignore_errors: yes
- name: rbenv | if ruby-build exists, which versions we can install - name: if ruby-build exists, which versions we can install
command: /usr/local/bin/ruby-build --definitions command: /usr/local/bin/ruby-build --definitions
when: rbuild_present|success when: rbuild_present|success
register: installable_ruby_vers register: installable_ruby_vers
ignore_errors: yes ignore_errors: yes
### in this block, we (re)install ruby-build if it doesn't exist or if it can't install the requested version ### in this block, we (re)install ruby-build if it doesn't exist or if it can't install the requested version
- name: rbenv | create temporary directory - name: create temporary directory
command: mktemp -d command: mktemp -d
register: tempdir register: tempdir
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: rbenv | clone ruby-build repo - name: clone ruby-build repo
git: repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build git: repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
- name: rbenv | install ruby-build - name: install ruby-build
command: ./install.sh chdir={{ tempdir.stdout }}/ruby-build command: ./install.sh chdir={{ tempdir.stdout }}/ruby-build
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: rbenv | remove temporary directory - name: remove temporary directory
file: path={{ tempdir.stdout }} state=absent file: path={{ tempdir.stdout }} state=absent
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: rbenv | check ruby {{ rbenv_ruby_version }} installed - name: check ruby {{ rbenv_ruby_version }} installed
shell: "rbenv versions | grep {{ rbenv_ruby_version }}" shell: "rbenv versions | grep {{ rbenv_ruby_version }}"
register: ruby_installed register: ruby_installed
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
ignore_errors: yes ignore_errors: yes
- name: rbenv | install ruby {{ rbenv_ruby_version }} - name: install ruby {{ rbenv_ruby_version }}
shell: "rbenv install {{ rbenv_ruby_version }} creates={{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}" shell: "rbenv install {{ rbenv_ruby_version }} creates={{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}"
when: ruby_installed|failed when: ruby_installed|failed
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
- name: rbenv | set global ruby {{ rbenv_ruby_version }} - name: set global ruby {{ rbenv_ruby_version }}
shell: "rbenv global {{ rbenv_ruby_version }}" shell: "rbenv global {{ rbenv_ruby_version }}"
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
- name: rbenv | install bundler - name: install bundler
shell: "gem install bundler -v {{ rbenv_bundler_version }}" shell: "gem install bundler -v {{ rbenv_bundler_version }}"
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
- name: rbenv | remove rbenv version of rake - name: remove rbenv version of rake
file: path="{{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}/bin/rake" state=absent file: path="{{ rbenv_dir }}/.rbenv/versions/{{ rbenv_ruby_version }}/bin/rake" state=absent
- name: rbenv | install rake gem - name: install rake gem
shell: "gem install rake -v {{ rbenv_rake_version }}" shell: "gem install rake -v {{ rbenv_rake_version }}"
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
- name: rbenv | rehash - name: rehash
shell: "rbenv rehash" shell: "rbenv rehash"
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
environment: "{{ rbenv_environment }}" environment: "{{ rbenv_environment }}"
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT # license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
# #
# Tasks for role s3fs # Tasks for role s3fs
# #
# Overview: # Overview:
# #
# Installs s3fs, but doesn't mount any buckets. # Installs s3fs, but doesn't mount any buckets.
...@@ -25,18 +25,18 @@ ...@@ -25,18 +25,18 @@
# #
# The role would need to include tasks like the following # The role would need to include tasks like the following
# #
# - name: my_role | create s3fs mount points # - name: create s3fs mount points
# file: # file:
# path={{ item.mount_point }} owner={{ item.owner }} # path={{ item.mount_point }} owner={{ item.owner }}
# group={{ item.group }} mode={{ item.mode }} state="directory" # group={{ item.group }} mode={{ item.mode }} state="directory"
# with_items: "{{ my_role_s3fs_mounts }}" # with_items: my_role_s3fs_mounts
# #
# - name: my_role | mount s3 buckets # - name: mount s3 buckets
# mount: # mount:
# name={{ item.mount_point }} src={{ item.bucket }} fstype=fuse.s3fs # name={{ item.mount_point }} src={{ item.bucket }} fstype=fuse.s3fs
# opts=use_cache=/tmp,iam_role={{ task_iam_role }},allow_other state=mounted # opts=use_cache=/tmp,iam_role={{ task_iam_role }},allow_other state=mounted
# with_items: "{{ myrole_s3fs_mounts }}" # with_items: myrole_s3fs_mounts
# #
# Example play: # Example play:
# #
# Required sudo for the installation phase. # Required sudo for the installation phase.
...@@ -53,38 +53,38 @@ ...@@ -53,38 +53,38 @@
# - s3fs # - s3fs
# #
- name: s3fs | install system packages - name: install system packages
apt: pkg={{','.join(s3fs_debian_pkgs)}} state=present apt: pkg={{','.join(s3fs_debian_pkgs)}} state=present
tags: tags:
- s3fs - s3fs
- install - install
- update - update
- name: s3fs | fetch package - name: fetch package
get_url: get_url:
url={{ s3fs_download_url }} url={{ s3fs_download_url }}
dest={{ s3fs_temp_dir }} dest={{ s3fs_temp_dir }}
- name: s3fs | extract package - name: extract package
shell: shell:
/bin/tar -xzf {{ s3fs_archive }} /bin/tar -xzf {{ s3fs_archive }}
chdir={{ s3fs_temp_dir }} chdir={{ s3fs_temp_dir }}
creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/configure creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/configure
- name: s3fs | configure - name: configure
shell: shell:
./configure ./configure
chdir={{ s3fs_temp_dir }}/{{ s3fs_version }} chdir={{ s3fs_temp_dir }}/{{ s3fs_version }}
creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/config.status creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/config.status
- name: s3fs | make - name: make
shell: shell:
/usr/bin/make /usr/bin/make
chdir={{ s3fs_temp_dir }}/{{ s3fs_version }} chdir={{ s3fs_temp_dir }}/{{ s3fs_version }}
creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/src/s3cmd creates={{ s3fs_temp_dir }}/{{ s3fs_version }}/src/s3cmd
- name: s3fs | make install - name: make install
shell: shell:
/usr/bin/make install /usr/bin/make install
chdir={{ s3fs_temp_dir }}/{{ s3fs_version }} chdir={{ s3fs_temp_dir }}/{{ s3fs_version }}
--- ---
- name: shibboleth | restart shibd - name: restart shibd
service: name=shibd state=restarted service: name=shibd state=restarted
#Install shibboleth #Install shibboleth
--- ---
- name: shibboleth | Installs shib and dependencies from apt - name: Installs shib and dependencies from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes apt: pkg={{item}} install_recommends=no state=present update_cache=yes
with_items: with_items:
- shibboleth-sp2-schemas - shibboleth-sp2-schemas
...@@ -9,47 +9,47 @@ ...@@ -9,47 +9,47 @@
- libshibsp-doc - libshibsp-doc
- libapache2-mod-shib2 - libapache2-mod-shib2
- opensaml2-tools - opensaml2-tools
notify: shibboleth | restart shibd notify: restart shibd
tags: tags:
- shib - shib
- install - install
- name: shibboleth | Creates /etc/shibboleth/metadata directory - name: Creates /etc/shibboleth/metadata directory
file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd file: path=/etc/shibboleth/metadata state=directory mode=2774 group=_shibd owner=_shibd
tags: tags:
- shib - shib
- install - install
- name: shibboleth | Downloads metadata into metadata directory as backup - name: Downloads metadata into metadata directory as backup
get_url: url=https://idp.stanford.edu/Stanford-metadata.xml dest=/etc/shibboleth/metadata/idp-metadata.xml mode=0640 group=_shibd owner=_shibd get_url: url=https://idp.stanford.edu/Stanford-metadata.xml dest=/etc/shibboleth/metadata/idp-metadata.xml mode=0640 group=_shibd owner=_shibd
tags: tags:
- shib - shib
- install - install
- name: shibboleth | writes out key and pem file - name: writes out key and pem file
template: src=sp.{{item}}.j2 dest=/etc/shibboleth/sp.{{item}} group=_shibd owner=_shibd mode=0600 template: src=sp.{{item}}.j2 dest=/etc/shibboleth/sp.{{item}} group=_shibd owner=_shibd mode=0600
with_items: with_items:
- key - key
- pem - pem
notify: shibboleth | restart shibd notify: restart shibd
tags: tags:
- shib - shib
- install - install
- name: shibboleth | writes out configuration files - name: writes out configuration files
template: src={{item}}.j2 dest=/etc/shibboleth/{{item}} group=_shibd owner=_shibd mode=0644 template: src={{item}}.j2 dest=/etc/shibboleth/{{item}} group=_shibd owner=_shibd mode=0644
with_items: with_items:
- attribute-map.xml - attribute-map.xml
- shibboleth2.xml - shibboleth2.xml
notify: shibboleth | restart shibd notify: restart shibd
tags: tags:
- shib - shib
- install - install
- name: shibboleth | enables shib - name: enables shib
command: a2enmod shib2 command: a2enmod shib2
notify: shibboleth | restart shibd notify: restart shibd
tags: tags:
- shib - shib
- install - install
...@@ -16,5 +16,5 @@ ...@@ -16,5 +16,5 @@
# #
# Restart Splunk # Restart Splunk
- name: splunkforwarder | restart splunkforwarder - name: restart splunkforwarder
service: name=splunk state=restarted service: name=splunk state=restarted
...@@ -10,95 +10,95 @@ ...@@ -10,95 +10,95 @@
# #
# #
# Tasks for role splunk # Tasks for role splunk
# #
# Overview: # Overview:
# #
# #
# Dependencies: # Dependencies:
# #
# #
# Example play: # Example play:
# #
# #
# Install Splunk Forwarder # Install Splunk Forwarder
- name: splunkforwarder| install splunkforwarder specific system packages - name: install splunkforwarder specific system packages
apt: pkg={{','.join(splunk_debian_pkgs)}} state=present apt: pkg={{','.join(splunk_debian_pkgs)}} state=present
tags: tags:
- splunk - splunk
- install - install
- update - update
- name: splunkforwarder | download the splunk deb - name: download the splunk deb
get_url: > get_url: >
dest="/tmp/{{SPLUNKFORWARDER_DEB}}" dest="/tmp/{{SPLUNKFORWARDER_DEB}}"
url="{{SPLUNKFORWARDER_PACKAGE_LOCATION}}{{SPLUNKFORWARDER_DEB}}" url="{{SPLUNKFORWARDER_PACKAGE_LOCATION}}{{SPLUNKFORWARDER_DEB}}"
register: download_deb register: download_deb
- name: splunkforwarder | install splunk forwarder - name: install splunk forwarder
shell: gdebi -nq /tmp/{{SPLUNKFORWARDER_DEB}} shell: gdebi -nq /tmp/{{SPLUNKFORWARDER_DEB}}
when: download_deb.changed when: download_deb.changed
# Create splunk user # Create splunk user
- name: splunkforwarder | create splunk user - name: create splunk user
user: name=splunk group=splunk createhome=no state=present append=yes groups=syslog user: name=splunk createhome=no state=present append=yes groups=syslog
when: download_deb.changed when: download_deb.changed
# Need to start splunk manually so that it can create various files # Need to start splunk manually so that it can create various files
# and directories that aren't created till the first run and are needed # and directories that aren't created till the first run and are needed
# to run some of the below commands. # to run some of the below commands.
- name: splunkforwarder | start splunk manually - name: start splunk manually
shell: > shell: >
{{splunkforwarder_output_dir}}/bin/splunk start --accept-license --answer-yes --no-prompt {{splunkforwarder_output_dir}}/bin/splunk start --accept-license --answer-yes --no-prompt
creates={{splunkforwarder_output_dir}}/var/lib/splunk creates={{splunkforwarder_output_dir}}/var/lib/splunk
when: download_deb.changed when: download_deb.changed
register: started_manually register: started_manually
- name: splunkforwarder | stop splunk manually - name: stop splunk manually
shell: > shell: >
{{splunkforwarder_output_dir}}/bin/splunk stop --accept-license --answer-yes --no-prompt {{splunkforwarder_output_dir}}/bin/splunk stop --accept-license --answer-yes --no-prompt
when: download_deb.changed and started_manually.changed when: download_deb.changed and started_manually.changed
- name: splunkforwarder | create boot script - name: create boot script
shell: > shell: >
{{splunkforwarder_output_dir}}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt {{splunkforwarder_output_dir}}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt
creates=/etc/init.d/splunk creates=/etc/init.d/splunk
register: create_boot_script register: create_boot_script
when: download_deb.changed when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder notify: restart splunkforwarder
# Update credentials # Update credentials
- name: splunkforwarder | update admin pasword - name: update admin pasword
shell: "{{splunkforwarder_output_dir}}/bin/splunk edit user admin -password {{SPLUNKFORWARDER_PASSWORD}} -auth admin:changeme --accept-license --answer-yes --no-prompt" shell: "{{splunkforwarder_output_dir}}/bin/splunk edit user admin -password {{SPLUNKFORWARDER_PASSWORD}} -auth admin:changeme --accept-license --answer-yes --no-prompt"
when: download_deb.changed when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder notify: restart splunkforwarder
- name: splunkforwarder | add chkconfig to init script - name: add chkconfig to init script
shell: 'sed -i -e "s/\/bin\/sh/\/bin\/sh\n# chkconfig: 235 98 55/" /etc/init.d/splunk' shell: 'sed -i -e "s/\/bin\/sh/\/bin\/sh\n# chkconfig: 235 98 55/" /etc/init.d/splunk'
when: download_deb.changed and create_boot_script.changed when: download_deb.changed and create_boot_script.changed
notify: splunkforwarder | restart splunkforwarder notify: restart splunkforwarder
# Ensure permissions on splunk content # Ensure permissions on splunk content
- name: splunkforwarder | ensure splunk forder permissions - name: ensure splunk forder permissions
file: path={{splunkforwarder_output_dir}} state=directory recurse=yes owner=splunk group=splunk file: path={{splunkforwarder_output_dir}} state=directory recurse=yes owner=splunk group=splunk
when: download_deb.changed when: download_deb.changed
notify: splunkforwarder | restart splunkforwarder notify: restart splunkforwarder
# Drop template files. # Drop template files.
- name: splunkforwarder | drop input configuration - name: drop input configuration
template: template:
src=opt/splunkforwarder/etc/system/local/inputs.conf.j2 src=opt/splunkforwarder/etc/system/local/inputs.conf.j2
dest=/opt/splunkforwarder/etc/system/local/inputs.conf dest=/opt/splunkforwarder/etc/system/local/inputs.conf
owner=splunk owner=splunk
group=splunk group=splunk
mode=644 mode=644
notify: splunkforwarder | restart splunkforwarder notify: restart splunkforwarder
- name: splunkforwarder | create outputs config file - name: create outputs config file
template: template:
src=opt/splunkforwarder/etc/system/local/outputs.conf.j2 src=opt/splunkforwarder/etc/system/local/outputs.conf.j2
dest=/opt/splunkforwarder/etc/system/local/outputs.conf dest=/opt/splunkforwarder/etc/system/local/outputs.conf
owner=splunk owner=splunk
group=splunk group=splunk
mode=644 mode=644
notify: splunkforwarder | restart splunkforwarder notify: restart splunkforwarder
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role stop_all_edx_services
#
#
# vars are namespace with the module name.
#
stop_all_edx_services_role_name: stop_all_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role stop_all_edx_services
#
# Overview:
#
# This stops all services on an edX server
# so that everything is shutdown prior to creating
# an AMI.
#
#
- name: stop supervisor
service: name=supervisor state=stopped
- name: stop supervisor.devpi
service: name=supervisor.devpi state=stopped
- name: stop nginx
service: name=nginx state=stopped
- name: stop rabbitmq-server
service: name=rabbitmq-server state=stopped
- name: stop mysql
service: name=mysql state=stopped
- name: stop memcached
service: name=memcached state=stopped
- name: stop supervisor.devpi
service: name=supervisor.devpi state=stopped
- name: stop nginx
service: name=nginx state=stopped
- name: stop rabbitmq-server
service: name=rabbitmq-server state=stopped
- name: stop mongodb
service: name=mongodb state=stopped
- name: kill processes by user
shell: pkill -u {{ item }} || true
with_items:
- www-data
- devpi.supervisor
- rabbitmq
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role stop_all_edx_services
#
# Overview:
#
# This stops all services on an edX server
# so that everything is shutdown prior to creating
# an AMI.
#
# Example play:
# roles:
# - stop_all_edx_services
#
#
- name: stop supervisor
stat: path=/etc/init/supervisor.conf
register: stat_out
changed_when: stat_out.stat.exists
notify: stop supervisor
- name: stop supervisor.devpi
stat: path=/etc/init/supervisor.devpi.conf
register: stat_out
changed_when: stat_out.stat.exists
notify: stop supervisor
- name: stop nginx
stat: path=/etc/init.d/nginx
register: stat_out
changed_when: stat_out.stat.exists
notify: stop nginx
- name: stop rabbitmq-server
stat: path=/etc/init.d/rabbitmq-server
register: stat_out
changed_when: stat_out.stat.exists
notify: stop rabbitmq-server
- name: stop memcached
stat: path=/etc/init.d/memcached
register: stat_out
changed_when: stat_out.stat.exists
notify: stop memcached
- name: stop mongodb
stat: path=/etc/init.d/mongodb
register: stat_out
changed_when: stat_out.stat.exists
notify: stop mongodb
- shell: "true"
notify: kill processes by user
...@@ -50,19 +50,19 @@ ...@@ -50,19 +50,19 @@
# supervisor_service: upstart-service-name # supervisor_service: upstart-service-name
# #
--- ---
- name: supervisor | create application user - name: create application user
user: > user: >
name="{{ supervisor_user }}" name="{{ supervisor_user }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: supervisor | create supervisor service user - name: create supervisor service user
user: > user: >
name="{{ supervisor_service_user }}" name="{{ supervisor_service_user }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: supervisor | create supervisor directories - name: create supervisor directories
file: > file: >
name={{ item }} name={{ item }}
state=directory state=directory
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
- "{{ supervisor_venv_dir }}" - "{{ supervisor_venv_dir }}"
- "{{ supervisor_cfg_dir }}" - "{{ supervisor_cfg_dir }}"
- name: supervisor | create supervisor directories - name: create supervisor directories
file: > file: >
name={{ item }} name={{ item }}
state=directory state=directory
...@@ -84,29 +84,29 @@ ...@@ -84,29 +84,29 @@
- "{{ supervisor_log_dir }}" - "{{ supervisor_log_dir }}"
- name: supervisor | install supervisor in its venv - name: install supervisor in its venv
pip: name=supervisor virtualenv="{{supervisor_venv_dir}}" state=present pip: name=supervisor virtualenv="{{supervisor_venv_dir}}" state=present
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
- name: supervisor | create supervisor upstart job - name: create supervisor upstart job
template: > template: >
src=supervisor-upstart.conf.j2 dest=/etc/init/{{ supervisor_service }}.conf src=supervisor-upstart.conf.j2 dest=/etc/init/{{ supervisor_service }}.conf
owner=root group=root owner=root group=root
- name: supervisor | create supervisor master config - name: create supervisor master config
template: > template: >
src=supervisord.conf.j2 dest={{ supervisor_cfg }} src=supervisord.conf.j2 dest={{ supervisor_cfg }}
owner={{ supervisor_user }} group={{ supervisor_service_user }} owner={{ supervisor_user }} group={{ supervisor_service_user }}
mode=0644 mode=0644
- name: supervisor | create a symlink for supervisortctl - name: create a symlink for supervisortctl
file: > file: >
src={{ supervisor_ctl }} src={{ supervisor_ctl }}
dest={{ COMMON_BIN_DIR }}/{{ supervisor_ctl|basename }} dest={{ COMMON_BIN_DIR }}/{{ supervisor_ctl|basename }}
state=link state=link
when: supervisor_service == "supervisor" when: supervisor_service == "supervisor"
- name: supervisor | create a symlink for supervisor cfg - name: create a symlink for supervisor cfg
file: > file: >
src={{ item }} src={{ item }}
dest={{ COMMON_CFG_DIR }}/{{ item|basename }} dest={{ COMMON_CFG_DIR }}/{{ item|basename }}
...@@ -116,7 +116,7 @@ ...@@ -116,7 +116,7 @@
- "{{ supervisor_cfg }}" - "{{ supervisor_cfg }}"
- "{{ supervisor_cfg_dir }}" - "{{ supervisor_cfg_dir }}"
- name: supervisor | start supervisor - name: start supervisor
service: > service: >
name={{supervisor_service}} name={{supervisor_service}}
state=started state=started
...@@ -124,7 +124,7 @@ ...@@ -124,7 +124,7 @@
# calling update on supervisor too soon after it # calling update on supervisor too soon after it
# starts will result in an errror. # starts will result in an errror.
- name: supervisor | wait for web port to be available - name: wait for web port to be available
wait_for: port={{ supervisor_http_bind_port }} timeout=5 wait_for: port={{ supervisor_http_bind_port }} timeout=5
when: start_supervisor.changed when: start_supervisor.changed
...@@ -134,7 +134,7 @@ ...@@ -134,7 +134,7 @@
# we don't use notifications for supervisor because # we don't use notifications for supervisor because
# they don't work well with parameterized roles. # they don't work well with parameterized roles.
# See https://github.com/ansible/ansible/issues/4853 # See https://github.com/ansible/ansible/issues/4853
- name: supervisor | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
...@@ -3,6 +3,5 @@ description "supervisord" ...@@ -3,6 +3,5 @@ description "supervisord"
start on runlevel [2345] start on runlevel [2345]
stop on runlevel [!2345] stop on runlevel [!2345]
respawn
setuid {{ supervisor_service_user }} setuid {{ supervisor_service_user }}
exec {{ supervisor_venv_dir }}/bin/supervisord --nodaemon --configuration {{ supervisor_cfg }} exec {{ supervisor_venv_dir }}/bin/supervisord --nodaemon --configuration {{ supervisor_cfg }}
...@@ -33,8 +33,8 @@ XQUEUE_AWS_ACCESS_KEY_ID : '' ...@@ -33,8 +33,8 @@ XQUEUE_AWS_ACCESS_KEY_ID : ''
XQUEUE_AWS_SECRET_ACCESS_KEY : '' XQUEUE_AWS_SECRET_ACCESS_KEY : ''
XQUEUE_BASIC_AUTH_USER: 'edx' XQUEUE_BASIC_AUTH_USER: 'edx'
XQUEUE_BASIC_AUTH_PASSWORD: 'edx' XQUEUE_BASIC_AUTH_PASSWORD: 'edx'
XQUEUE_DJANGO_USER: 'lms' XQUEUE_DJANGO_USERS:
XQUEUE_DJANGO_PASSWORD: 'password' lms: 'password'
XQUEUE_RABBITMQ_USER: 'edx' XQUEUE_RABBITMQ_USER: 'edx'
XQUEUE_RABBITMQ_PASS: 'edx' XQUEUE_RABBITMQ_PASS: 'edx'
XQUEUE_RABBITMQ_HOSTNAME: 'localhost' XQUEUE_RABBITMQ_HOSTNAME: 'localhost'
...@@ -61,7 +61,7 @@ xqueue_auth_config: ...@@ -61,7 +61,7 @@ xqueue_auth_config:
AWS_ACCESS_KEY_ID: $XQUEUE_AWS_ACCESS_KEY_ID AWS_ACCESS_KEY_ID: $XQUEUE_AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $XQUEUE_AWS_SECRET_ACCESS_KEY AWS_SECRET_ACCESS_KEY: $XQUEUE_AWS_SECRET_ACCESS_KEY
REQUESTS_BASIC_AUTH: [$XQUEUE_BASIC_AUTH_USER, $XQUEUE_BASIC_AUTH_PASSWORD] REQUESTS_BASIC_AUTH: [$XQUEUE_BASIC_AUTH_USER, $XQUEUE_BASIC_AUTH_PASSWORD]
USERS: { '{{XQUEUE_DJANGO_USER}}' : $XQUEUE_DJANGO_PASSWORD } USERS: $XQUEUE_DJANGO_USERS
DATABASES: DATABASES:
default: default:
ENGINE: "django.db.backends.mysql" ENGINE: "django.db.backends.mysql"
......
- name: xqueue | restart xqueue - name: restart xqueue
supervisorctl_local: > supervisorctl_local: >
name={{ item }} name={{ item }}
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=restarted state=restarted
when: xqueue_installed is defined
with_items: with_items:
- xqueue - xqueue
- xqueue_consumer - xqueue_consumer
tags: deploy
- name: "xqueue | writing supervisor scripts - xqueue, xqueue consumer" - name: "writing supervisor scripts - xqueue, xqueue consumer"
template: > template: >
src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf src={{ item }}.conf.j2 dest={{ supervisor_cfg_dir }}/{{ item }}.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
with_items: ['xqueue', 'xqueue_consumer'] with_items: ['xqueue', 'xqueue_consumer']
tags:
- deploy
- name: xqueue | create xqueue application config - name: create xqueue application config
template: src=xqueue.env.json.j2 dest={{ xqueue_app_dir }}/xqueue.env.json mode=0644 template: src=xqueue.env.json.j2 dest={{ xqueue_app_dir }}/xqueue.env.json mode=0644
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
tags:
- deploy
- name: xqueue | create xqueue auth file - name: create xqueue auth file
template: src=xqueue.auth.json.j2 dest={{ xqueue_app_dir }}/xqueue.auth.json mode=0644 template: src=xqueue.auth.json.j2 dest={{ xqueue_app_dir }}/xqueue.auth.json mode=0644
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
tags:
- deploy
# Do A Checkout # Do A Checkout
- name: xqueue | git checkout xqueue repo into xqueue_code_dir - name: git checkout xqueue repo into xqueue_code_dir
git: dest={{ xqueue_code_dir }} repo={{ xqueue_source_repo }} version={{ xqueue_version }} git: dest={{ xqueue_code_dir }} repo={{ xqueue_source_repo }} version={{ xqueue_version }}
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
tags:
- deploy
# Install the python pre requirements into {{ xqueue_venv_dir }} # Install the python pre requirements into {{ xqueue_venv_dir }}
- name : xqueue | install python pre-requirements - name : install python pre-requirements
pip: requirements="{{ xqueue_pre_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present pip: requirements="{{ xqueue_pre_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
tags:
- deploy
# Install the python post requirements into {{ xqueue_venv_dir }} # Install the python post requirements into {{ xqueue_venv_dir }}
- name : xqueue | install python post-requirements - name : install python post-requirements
pip: requirements="{{ xqueue_post_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present pip: requirements="{{ xqueue_post_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
tags:
- deploy
- name: xqueue | syncdb and migrate - name: syncdb and migrate
shell: > shell: >
SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py syncdb --migrate --noinput --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }} SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py syncdb --migrate --noinput --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }}
when: migrate_db is defined and migrate_db|lower == "yes" when: migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
tags:
- deploy
- name: xqueue | create users - name: create users
shell: > shell: >
SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py update_users --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }} SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py update_users --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }}
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
tags:
- deploy
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
# the supervisorctl config and restarts # the supervisorctl config and restarts
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: xqueue | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
tags: deploy
- name: xqueue | ensure xqueue, consumer is running - name: ensure xqueue, consumer is running
supervisorctl_local: > supervisorctl_local: >
name={{ item }} name={{ item }}
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
...@@ -91,4 +73,14 @@ ...@@ -91,4 +73,14 @@
with_items: with_items:
- xqueue - xqueue
- xqueue_consumer - xqueue_consumer
tags: deploy
- name: create a symlink for venv python
file: >
src="{{ xqueue_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.xqueue
state=link
with_items:
- python
- pip
- set_fact: xqueue_installed=true
...@@ -6,33 +6,33 @@ ...@@ -6,33 +6,33 @@
# #
# #
- name: xqueue | create application user - name: create application user
user: > user: >
name="{{ xqueue_user }}" name="{{ xqueue_user }}"
home="{{ xqueue_app_dir }}" home="{{ xqueue_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
notify: notify:
- xqueue | restart xqueue - restart xqueue
- name: xqueue | create xqueue app and venv dir - name: create xqueue app and venv dir
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
owner="{{ xqueue_user }}" owner="{{ xqueue_user }}"
group="{{ common_web_group }}" group="{{ common_web_group }}"
notify: notify:
- xqueue | restart xqueue - restart xqueue
with_items: with_items:
- "{{ xqueue_app_dir }}" - "{{ xqueue_app_dir }}"
- "{{ xqueue_venvs_dir }}" - "{{ xqueue_venvs_dir }}"
- name: xqueue | install a bunch of system packages on which xqueue relies - name: install a bunch of system packages on which xqueue relies
apt: pkg={{','.join(xqueue_debian_pkgs)}} state=present apt: pkg={{','.join(xqueue_debian_pkgs)}} state=present
notify: notify:
- xqueue | restart xqueue - restart xqueue
- name: xqueue | create xqueue db - name: create xqueue db
mysql_db: > mysql_db: >
name={{xqueue_auth_config.DATABASES.default.NAME}} name={{xqueue_auth_config.DATABASES.default.NAME}}
login_host={{xqueue_auth_config.DATABASES.default.HOST}} login_host={{xqueue_auth_config.DATABASES.default.HOST}}
...@@ -41,19 +41,11 @@ ...@@ -41,19 +41,11 @@
state=present state=present
encoding=utf8 encoding=utf8
notify: notify:
- xqueue | restart xqueue - restart xqueue
when: xqueue_create_db is defined and xqueue_create_db|lower == "yes" when: xqueue_create_db is defined and xqueue_create_db|lower == "yes"
- include: deploy.yml - include: deploy.yml tags=deploy
- name: xqueue | create a symlink for venv python
file: >
src="{{ xqueue_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.xqueue
state=link
with_items:
- python
- pip
...@@ -11,3 +11,5 @@ stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log ...@@ -11,3 +11,5 @@ stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true killasgroup=true
stopasgroup=true stopasgroup=true
startsecs=0
...@@ -26,7 +26,7 @@ xserver_venv_dir: "{{ xserver_venvs_dir }}/xserver" ...@@ -26,7 +26,7 @@ xserver_venv_dir: "{{ xserver_venvs_dir }}/xserver"
xserver_venv_sandbox_dir: "{{ xserver_venv_dir }}-sandbox" xserver_venv_sandbox_dir: "{{ xserver_venv_dir }}-sandbox"
xserver_venv_bin: "{{ xserver_venv_dir }}/bin" xserver_venv_bin: "{{ xserver_venv_dir }}/bin"
xserver_user: "xserver" xserver_user: "xserver"
xserver_sandbox_user: "xserver-sandbox" xserver_sandbox_user: "sandbox"
xserver_log_dir: "{{ COMMON_LOG_DIR }}/xserver" xserver_log_dir: "{{ COMMON_LOG_DIR }}/xserver"
xserver_grader_root: "{{ XSERVER_GRADER_DIR }}/graders" xserver_grader_root: "{{ XSERVER_GRADER_DIR }}/graders"
xserver_git_identity: "{{ xserver_app_dir }}/{{ XSERVER_LOCAL_GIT_IDENTITY|basename }}" xserver_git_identity: "{{ xserver_app_dir }}/{{ XSERVER_LOCAL_GIT_IDENTITY|basename }}"
......
...@@ -14,11 +14,10 @@ ...@@ -14,11 +14,10 @@
# Overview: # Overview:
# #
- name: xserver | restart xserver - name: restart xserver
supervisorctl_local: > supervisorctl_local: >
name=xserver name=xserver
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=restarted state=restarted
tags: deploy
- name: "xserver | writing supervisor script" - name: "writing supervisor script"
template: > template: >
src=xserver.conf.j2 dest={{ supervisor_cfg_dir }}/xserver.conf src=xserver.conf.j2 dest={{ supervisor_cfg_dir }}/xserver.conf
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644 owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
tags:
- deploy
- name: xserver | checkout code - name: checkout code
git: dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}} git: dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}}
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
notify: xserver | restart xserver notify: restart xserver
tags:
- deploy
- name: xserver | install requirements - name: install requirements
pip: requirements="{{xserver_requirements_file}}" virtualenv="{{ xserver_venv_dir }}" state=present pip: requirements="{{xserver_requirements_file}}" virtualenv="{{ xserver_venv_dir }}" state=present
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
notify: xserver | restart xserver notify: restart xserver
tags:
- deploy
- name: xserver | install sandbox requirements - name: install sandbox requirements
pip: requirements="{{xserver_requirements_file}}" virtualenv="{{xserver_venv_sandbox_dir}}" state=present pip: requirements="{{xserver_requirements_file}}" virtualenv="{{xserver_venv_sandbox_dir}}" state=present
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
notify: xserver | restart xserver notify: restart xserver
tags:
- deploy
- name: xserver | create xserver application config - name: create xserver application config
template: src=xserver.env.json.j2 dest={{ xserver_app_dir }}/env.json template: src=xserver.env.json.j2 dest={{ xserver_app_dir }}/env.json
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
notify: xserver | restart xserver notify: restart xserver
tags:
- deploy
- name: xserver | install read-only ssh key for the content repo that is required for grading - name: install read-only ssh key for the content repo that is required for grading
copy: > copy: >
src={{ XSERVER_LOCAL_GIT_IDENTITY }} dest={{ xserver_git_identity }} src={{ XSERVER_LOCAL_GIT_IDENTITY }} dest={{ xserver_git_identity }}
owner={{ xserver_user }} group={{ xserver_user }} mode=0600 owner={{ xserver_user }} group={{ xserver_user }} mode=0600
notify: xserver | restart xserver notify: restart xserver
tags:
- deploy
- name: xserver | upload ssh script - name: upload ssh script
template: > template: >
src=git_ssh.sh.j2 dest=/tmp/git_ssh.sh src=git_ssh.sh.j2 dest=/tmp/git_ssh.sh
owner={{ xserver_user }} mode=750 owner={{ xserver_user }} mode=750
notify: xserver | restart xserver notify: restart xserver
tags:
- deploy
- name: xserver | checkout grader code - name: checkout grader code
git: dest={{ XSERVER_GRADER_DIR }} repo={{ XSERVER_GRADER_SOURCE }} version={{ xserver_grader_version }} git: dest={{ XSERVER_GRADER_DIR }} repo={{ XSERVER_GRADER_SOURCE }} version={{ xserver_grader_version }}
environment: environment:
GIT_SSH: /tmp/git_ssh.sh GIT_SSH: /tmp/git_ssh.sh
notify: xserver | restart xserver notify: restart xserver
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
tags:
- deploy
- name: xserver | remove read-only ssh key for the content repo - name: remove read-only ssh key for the content repo
file: path={{ xserver_git_identity }} state=absent file: path={{ xserver_git_identity }} state=absent
notify: xserver | restart xserver notify: restart xserver
tags:
- deploy
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
# the supervisorctl config and restarts # the supervisorctl config and restarts
# the services if any of the configurations # the services if any of the configurations
# have changed. # have changed.
# #
- name: xserver | update supervisor configuration - name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update" shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update register: supervisor_update
changed_when: supervisor_update.stdout != "" changed_when: supervisor_update.stdout != ""
tags: deploy
- name: xserver | ensure xserver is started - name: ensure xserver is started
supervisorctl_local: > supervisorctl_local: >
name=xserver name=xserver
supervisorctl_path={{ supervisor_ctl }} supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }} config={{ supervisor_cfg }}
state=started state=started
tags: deploy
- name: create a symlink for venv python
file: >
src="{{ xserver_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.xserver
state=link
with_items:
- python
- pip
- name: enforce app-armor rules
command: aa-enforce {{ xserver_venv_sandbox_dir }}
...@@ -3,28 +3,28 @@ ...@@ -3,28 +3,28 @@
# access to the edX 6.00x repo which is not public # access to the edX 6.00x repo which is not public
--- ---
- name: xserver | checking for grader info - name: checking for grader info
fail: msg="You must define XSERVER_GRADER_DIR and XSERVER_GRADER_SOURCE to use this role!" fail: msg="You must define XSERVER_GRADER_DIR and XSERVER_GRADER_SOURCE to use this role!"
when: not XSERVER_GRADER_DIR or not XSERVER_GRADER_SOURCE when: not XSERVER_GRADER_DIR or not XSERVER_GRADER_SOURCE
- name: xserver | checking for git identity - name: checking for git identity
fail: msg="You must define XSERVER_LOCAL_GIT_IDENTITY to use this role" fail: msg="You must define XSERVER_LOCAL_GIT_IDENTITY to use this role"
when: not XSERVER_LOCAL_GIT_IDENTITY when: not XSERVER_LOCAL_GIT_IDENTITY
- name: xserver | create application user - name: create application user
user: > user: >
name="{{ xserver_user }}" name="{{ xserver_user }}"
home="{{ xserver_app_dir }}" home="{{ xserver_app_dir }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: xserver | create application sandbox user - name: create application sandbox user
user: > user: >
name="{{ xserver_sandbox_user }}" name="{{ xserver_sandbox_user }}"
createhome=no createhome=no
shell=/bin/false shell=/bin/false
- name: xserver | create xserver app and data dirs - name: create xserver app and data dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -36,43 +36,30 @@ ...@@ -36,43 +36,30 @@
- "{{ xserver_data_dir }}" - "{{ xserver_data_dir }}"
- "{{ xserver_data_dir }}/data" - "{{ xserver_data_dir }}/data"
- name: xserver | create sandbox sudoers file - name: create sandbox sudoers file
template: src=99-sandbox.j2 dest=/etc/sudoers.d/99-sandbox owner=root group=root mode=0440 template: src=99-sandbox.j2 dest=/etc/sudoers.d/99-sandbox owner=root group=root mode=0440
# Make sure this line is in the common-session file. # Make sure this line is in the common-session file.
- name: xserver | ensure pam-limits module is loaded - name: ensure pam-limits module is loaded
lineinfile: lineinfile:
dest=/etc/pam.d/common-session dest=/etc/pam.d/common-session
regexp="session required pam_limits.so" regexp="session required pam_limits.so"
line="session required pam_limits.so" line="session required pam_limits.so"
- name: xserver | set sandbox limits - name: set sandbox limits
template: src={{ item }} dest=/etc/security/limits.d/sandbox.conf template: src={{ item }} dest=/etc/security/limits.d/sandbox.conf
first_available_file: first_available_file:
- "{{ secure_dir }}/sandbox.conf.j2" - "{{ secure_dir }}/sandbox.conf.j2"
- "sandbox.conf.j2" - "sandbox.conf.j2"
- name: xserver | install system dependencies of xserver - name: install system dependencies of xserver
apt: pkg={{ item }} state=present apt: pkg={{ item }} state=present
with_items: xserver_debian_pkgs with_items: xserver_debian_pkgs
- name: xserver | load python-sandbox apparmor profile - name: load python-sandbox apparmor profile
template: src={{ item }} dest=/etc/apparmor.d/edx_apparmor_sandbox template: src={{ item }} dest=/etc/apparmor.d/edx_apparmor_sandbox
first_available_file: first_available_file:
- "{{ secure_dir }}/files/edx_apparmor_sandbox.j2" - "{{ secure_dir }}/files/edx_apparmor_sandbox.j2"
- "usr.bin.python-sandbox.j2" - "usr.bin.python-sandbox.j2"
- include: deploy.yml - include: deploy.yml tags=deploy
- name: xserver | enforce app-armor rules
command: aa-enforce {{ xserver_venv_sandbox_dir }}
- name: xserver | create a symlink for venv python
file: >
src="{{ xserver_venv_bin }}/{{ item }}"
dest={{ COMMON_BIN_DIR }}/{{ item }}.xserver
state=link
with_items:
- python
- pip
---
gerrit_github_client_id: alskdjdfkjasdjfsdlfkj
gerrit_github_client_secret: 0938908450deffaaa87665a555a6fc6de5777f77f
gerrit_db_hostname: somedb.88374jhyehf.us-east-1.rds.amazonaws.com
gerrit_db_admin_username: adminuser
gerrit_db_admin_password: adminpassword
gerrit_db_password: gerrituserpassword
gerrit_artifact_s3_bucket:
name: some-s3-bucket
aws_access_key_id: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}"
aws_secret_access_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}"
gerrit_hostname: "gerrit.example.com"
gerrit_smtp_enabled: false
gerrit_email: gerrit@example.com
gerrit_smtp_server: smtp.example.com
gerrit_smtp_encryption: none
gerrit_smtp_user: someuser
gerrit_smtp_pass: somepassword
...@@ -22,3 +22,4 @@ ...@@ -22,3 +22,4 @@
- ora - ora
- browsers - browsers
- local_dev - local_dev
- demo
...@@ -17,13 +17,17 @@ ...@@ -17,13 +17,17 @@
- cms - cms
- lms - lms
- ora - ora
- forum
- xqueue - xqueue
nginx_default_sites:
- lms
- cms
- edxlocal - edxlocal
- mongo - mongo
- { role: 'edxapp', celery_worker: True }
- edxapp - edxapp
- demo - demo
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' } - { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True }
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
- forum - forum
......
Jinja2==2.7.1 ansible==1.4.4
MarkupSafe==0.18
PyYAML==3.10 PyYAML==3.10
ansible==1.3.2 Jinja2==2.7.2
MarkupSafe==0.18
argparse==1.2.1 argparse==1.2.1
boto==2.10.0 boto==2.20.1
ecdsa==0.10 ecdsa==0.10
paramiko==1.12.0 paramiko==1.12.0
pycrypto==2.6.1 pycrypto==2.6.1
......
##
## Installs the pre-requisites for running edX on a single Ubuntu 12.04
## instance. This script is provided as a convenience and any of these
## steps could be executed manually.
##
## Note that this script requires that you have the ability to run
## commands as root via sudo. Caveat Emptor!
##
##
## Sanity check
##
if [[ ! "$(lsb_release -d | cut -f2)" =~ $'Ubuntu 12.04' ]]; then
echo "This script is only known to work on Ubuntu 12.04, exiting...";
exit;
fi
##
## Install system pre-requisites
##
sudo apt-get install -y build-essential software-properties-common python-software-properties curl git-core libxml2-dev libxslt1-dev python-pip python-apt python-dev
wget https://bitbucket.org/pypa/setuptools/raw/0.8/ez_setup.py -O - | sudo python
sudo pip install --upgrade pip
sudo pip install --upgrade virtualenv
##
## Clone the configuration repository and run Ansible
##
cd /var/tmp
git clone https://github.com/edx/configuration
##
## Install the ansible requirements
##
cd /var/tmp/configuration
sudo pip install -r requirements.txt
##
## Run the edx_sandbox.yml playbook in the configuration/playbooks directory
##
cd /var/tmp/configuration/playbooks
sudo ansible-playbook -c local ./edx_sandbox.yml -i "localhost,"
...@@ -21,6 +21,16 @@ ...@@ -21,6 +21,16 @@
export PYTHONUNBUFFERED=1 export PYTHONUNBUFFERED=1
export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto
if [[ -n $WORKSPACE ]]; then
# setup a virtualenv in jenkins
if [[ ! -d ".venv" ]]; then
virtualenv .venv
fi
source .venv/bin/activate
pip install -r requirements.txt
fi
if [[ -z $WORKSPACE ]]; then if [[ -z $WORKSPACE ]]; then
dir=$(dirname $0) dir=$(dirname $0)
source "$dir/ascii-convert.sh" source "$dir/ascii-convert.sh"
...@@ -44,7 +54,7 @@ fi ...@@ -44,7 +54,7 @@ fi
extra_vars="/var/tmp/extra-vars-$$.yml" extra_vars="/var/tmp/extra-vars-$$.yml"
if [[ -z $region ]]; then if [[ -z $region ]]; then
region="us-east1" region="us-east-1"
fi fi
if [[ -z $zone ]]; then if [[ -z $zone ]]; then
...@@ -65,33 +75,69 @@ fi ...@@ -65,33 +75,69 @@ fi
if [[ -z $ami ]]; then if [[ -z $ami ]]; then
if [[ $server_type == "full_edx_installation" ]]; then if [[ $server_type == "full_edx_installation" ]]; then
ami="ami-81e0c5e8" ami="ami-0dd1ef64"
elif [[ $server_type == "ubuntu_12.04" ]]; then elif [[ $server_type == "ubuntu_12.04" ]]; then
ami="ami-d0f89fb9" ami="ami-d0f89fb9"
fi fi
fi fi
if [[ -z $instance_type ]]; then if [[ -z $instance_type ]]; then
if [[ $server_type == "full_edx_installation" ]]; then instance_type="m1.medium"
instance_type="m1.medium"
elif [[ $server_type == "ubuntu_12.04" ]]; then
instance_type="m1.small"
fi
fi fi
deploy_host="${dns_name}.${dns_zone}" deploy_host="${dns_name}.${dns_zone}"
ssh-keygen -f "/var/lib/jenkins/.ssh/known_hosts" -R "$deploy_host" ssh-keygen -f "/var/lib/jenkins/.ssh/known_hosts" -R "$deploy_host"
if [[ -z $WORKSPACE ]]; then
dir=$(dirname $0)
source "$dir/ascii-convert.sh"
else
source "$WORKSPACE/util/jenkins/create-var-file.sh"
fi
cd playbooks/edx-east cd playbooks/edx-east
cat << EOF > $extra_vars
---
enable_datadog: False
enable_splunkforwarder: False
ansible_ssh_private_key_file: /var/lib/jenkins/${keypair}.pem
NGINX_ENABLE_SSL: True
NGINX_SSL_CERTIFICATE: '/var/lib/jenkins/star.sandbox.edx.org.crt'
NGINX_SSL_KEY: '/var/lib/jenkins/star.sandbox.edx.org.key'
EDXAPP_LMS_SSL_NGINX_PORT: 443
EDXAPP_CMS_SSL_NGINX_PORT: 443
EDXAPP_PREVIEW_LMS_BASE: preview.${deploy_host}
EDXAPP_LMS_BASE: ${deploy_host}
EDXAPP_CMS_BASE: studio.${deploy_host}
EDXAPP_LMS_NGINX_PORT: 80
EDXAPP_LMS_PREVIEW_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
EDXAPP_SITE_NAME: ${deploy_host}
COMMON_PYPI_MIRROR_URL: 'https://pypi.edx.org/root/pypi/+simple/'
XSERVER_GRADER_DIR: "/edx/var/xserver/data/content-mit-600x~2012_Fall"
XSERVER_GRADER_SOURCE: "git@github.com:/MITx/6.00x.git"
XSERVER_LOCAL_GIT_IDENTITY: /var/lib/jenkins/git-identity-edx-pull
CERTS_LOCAL_GIT_IDENTITY: /var/lib/jenkins/git-identity-edx-pull
CERTS_AWS_KEY: $(cat /var/lib/jenkins/certs-aws-key)
CERTS_AWS_ID: $(cat /var/lib/jenkins/certs-aws-id)
CERTS_BUCKET: "verify-test.edx.org"
migrate_db: "yes"
openid_workaround: True
edx_platform_version: $edxapp_version
forum_version: $forum_version
xqueue_version: $xqueue_version
xserver_version: $xserver_version
ora_version: $ora_version
ease_version: $ease_version
certs_version: $certs_version
discern_version: $discern_version
rabbitmq_ip: "127.0.0.1"
rabbitmq_refresh: True
COMMON_HOSTNAME: edx-server
EDXAPP_STATIC_URL_BASE: $static_url_base
# Settings for Grade downloads
EDXAPP_GRADE_STORAGE_TYPE: 's3'
EDXAPP_GRADE_BUCKET: 'edx-grades'
EDXAPP_GRADE_ROOT_PATH: 'sandbox'
EOF
if [[ $basic_auth == "true" ]]; then if [[ $basic_auth == "true" ]]; then
# vars specific to provisioning added to $extra-vars # vars specific to provisioning added to $extra-vars
cat << EOF_AUTH >> $extra_vars cat << EOF_AUTH >> $extra_vars
...@@ -111,7 +157,12 @@ security_group: $security_group ...@@ -111,7 +157,12 @@ security_group: $security_group
ami: $ami ami: $ami
region: $region region: $region
zone: $zone zone: $zone
instance_tags: '{"environment": "$environment", "github_username": "$github_username", "Name": "$name_tag", "source": "jenkins", "owner": "$BUILD_USER"}' instance_tags:
environment: $environment
github_username: $github_username
Name: $name_tag
source: jenkins
owner: $BUILD_USER
root_ebs_size: $root_ebs_size root_ebs_size: $root_ebs_size
name_tag: $name_tag name_tag: $name_tag
gh_users: gh_users:
...@@ -122,8 +173,8 @@ GH_USERS_PROMPT: '[$name_tag] ' ...@@ -122,8 +173,8 @@ GH_USERS_PROMPT: '[$name_tag] '
elb: $elb elb: $elb
EOF EOF
cat $extra_vars
# run the tasks to launch an ec2 instance from AMI # run the tasks to launch an ec2 instance from AMI
cat $extra_vars
ansible-playbook edx_provision.yml -i inventory.ini -e "@${extra_vars}" --user ubuntu ansible-playbook edx_provision.yml -i inventory.ini -e "@${extra_vars}" --user ubuntu
if [[ $server_type == "full_edx_installation" ]]; then if [[ $server_type == "full_edx_installation" ]]; then
...@@ -135,27 +186,27 @@ EOF ...@@ -135,27 +186,27 @@ EOF
fi fi
declare -A deploy declare -A deploy
roles="edxapp forum xqueue xserver ora discern certs"
for role in $roles; do
deploy[$role]=${!role}
done
deploy[edxapp]=$edxapp # If reconfigure was selected or if starting from an ubuntu 12.04 AMI
deploy[forum]=$forum # run non-deploy tasks for all roles
deploy[xqueue]=$xqueue if [[ $reconfigure == "true" || $server_type == "ubuntu_12.04" ]]; then
deploy[xserver]=$xserver cat $extra_vars
deploy[ora]=$ora
deploy[discern]=$discern
deploy[certs]=$certs
# If reconfigure was selected run non-deploy tasks for all roles
if [[ $reconfigure == "true" ]]; then
ansible-playbook edx_continuous_integration.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --skip-tags deploy ansible-playbook edx_continuous_integration.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --skip-tags deploy
fi fi
# Run deploy tasks for the roles selected # Run deploy tasks for the roles selected
for i in "${!deploy[@]}"; do for i in $roles; do
if [[ ${deploy[$i]} == "true" ]]; then if [[ ${deploy[$i]} == "true" ]]; then
cat $extra_vars
ansible-playbook ${i}.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --tags deploy ansible-playbook ${i}.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --tags deploy
fi fi
done done
# deploy the edx_ansible role
ansible-playbook edx_ansible.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu
rm -f "$extra_vars" rm -f "$extra_vars"
# creates a var file with common values for
# both deployment and provisioning
cat << EOF > $extra_vars
---
ansible_ssh_private_key_file: /var/lib/jenkins/${keypair}.pem
EDXAPP_PREVIEW_LMS_BASE: preview.${deploy_host}
EDXAPP_LMS_BASE: ${deploy_host}
EDXAPP_LMS_NGINX_PORT: 80
EDXAPP_LMS_PREVIEW_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
EDXAPP_SITE_NAME: ${deploy_host}
COMMON_PYPI_MIRROR_URL: 'https://pypi.edx.org/root/pypi/+simple/'
XSERVER_GRADER_DIR: "{{ xserver_data_dir }}/data/content-mit-600x~2012_Fall"
XSERVER_GRADER_SOURCE: "git@github.com:/MITx/6.00x.git"
XSERVER_LOCAL_GIT_IDENTITY: /var/lib/jenkins/git-identity-edx-pull
CERTS_LOCAL_GIT_IDENTITY: /var/lib/jenkins/git-identity-edx-pull
CERTS_AWS_KEY: $(cat /var/lib/jenkins/certs-aws-key)
CERTS_AWS_ID: $(cat /var/lib/jenkins/certs-aws-id)
CERTS_BUCKET: "verify-test.edx.org"
migrate_db: "yes"
openid_workaround: True
edx_platform_version: $edxapp_version
forum_version: $forum_version
xqueue_version: $xqueue_version
xserver_version: $xserver_version
ora_version: $ora_version
ease_version: $ease_version
certs_version: $certs_version
discern_version: $discern_version
rabbitmq_ip: "127.0.0.1"
rabbitmq_refresh: True
COMMON_HOSTNAME: edx-server
EDXAPP_STATIC_URL_BASE: $static_url_base
# Settings for Grade downloads
EDXAPP_GRADE_STORAGE_TYPE: 's3'
EDXAPP_GRADE_BUCKET: 'edx-grades'
EDXAPP_GRADE_ROOT_PATH: 'sandbox'
EOF
...@@ -83,7 +83,8 @@ def uri_from(doc_store_config): ...@@ -83,7 +83,8 @@ def uri_from(doc_store_config):
def prepare_release(args): def prepare_release(args):
config = yaml.safe_load(open(args.config)) config = yaml.safe_load(open(args.config))
client = MongoClient(uri_from(config['DOC_STORE_CONFIG'])) mongo_uri = uri_from(config['DOC_STORE_CONFIG'])
client = MongoClient(mongo_uri)
db = client[config['DOC_STORE_CONFIG']['db']] db = client[config['DOC_STORE_CONFIG']['db']]
# Get configuration repo versions # Get configuration repo versions
...@@ -150,9 +151,12 @@ def prepare_release(args): ...@@ -150,9 +151,12 @@ def prepare_release(args):
all_plays[play]['amis'][env] = None all_plays[play]['amis'][env] = None
release['plays'] = all_plays release['plays'] = all_plays
release_coll.insert(release) if not args.noop:
release_coll.insert(release)
# All plays that need new AMIs have been updated. # All plays that need new AMIs have been updated.
notify_abbey(config['abbey_url'], config['abbey_token'], args.deployment, all_plays, args.release_id) notify_abbey(config['abbey_url'], config['abbey_token'], args.deployment,
all_plays, args.release_id, mongo_uri, config_repo_ver,
config_secure_ver, args.noop)
def ami_for(db, env, deployment, play, configuration, def ami_for(db, env, deployment, play, configuration,
configuration_secure, ansible_vars): configuration_secure, ansible_vars):
...@@ -169,7 +173,8 @@ def ami_for(db, env, deployment, play, configuration, ...@@ -169,7 +173,8 @@ def ami_for(db, env, deployment, play, configuration,
return db.amis.find_one(ami_signature) return db.amis.find_one(ami_signature)
import requests import requests
def notify_abbey(abbey_url, abbey_token, deployment, all_plays, release_id): def notify_abbey(abbey_url, abbey_token, deployment, all_plays, release_id,
mongo_uri, configuration_ref, configuration_secure_ref, noop=False):
for play_name, play in all_plays.items(): for play_name, play in all_plays.items():
for env, ami in play['amis'].items(): for env, ami in play['amis'].items():
if ami is None: if ami is None:
...@@ -177,20 +182,24 @@ def notify_abbey(abbey_url, abbey_token, deployment, all_plays, release_id): ...@@ -177,20 +182,24 @@ def notify_abbey(abbey_url, abbey_token, deployment, all_plays, release_id):
params.append({ 'name': 'play', 'value': play_name}) params.append({ 'name': 'play', 'value': play_name})
params.append({ 'name': 'deployment', 'value': deployment}) params.append({ 'name': 'deployment', 'value': deployment})
params.append({ 'name': 'environment', 'value': env}) params.append({ 'name': 'environment', 'value': env})
params.append({ 'name': 'vars', 'value': yaml.dump(play['vars'], default_flow_style=False)}) params.append({ 'name': 'vars', 'value': yaml.safe_dump(play['vars'], default_flow_style=False)})
params.append({ 'name': 'release_id', 'value': release_id}) params.append({ 'name': 'release_id', 'value': release_id})
params.append({ 'name': 'mongo_uri', 'value': mongo_uri})
params.append({ 'name': 'configuration', 'value': configuration_ref})
params.append({ 'name': 'configuration_secure', 'value': configuration_secure_ref})
build_params = {'parameter': params} build_params = {'parameter': params}
log.info("Need ami for {}".format(pformat(build_params))) log.info("Need ami for {}".format(pformat(build_params)))
r = requests.post(abbey_url, if not noop:
data={"token": abbey_token}, r = requests.post(abbey_url,
params={"json": json.dumps(build_params)}) data={"token": abbey_token},
params={"json": json.dumps(build_params)})
log.info("Sent request got {}".format(r)) log.info("Sent request got {}".format(r))
if r.status_code != 201: if r.status_code != 201:
# Something went wrong. # Something went wrong.
msg = "Failed to submit request with params: {}" msg = "Failed to submit request with params: {}"
raise Exception(msg.format(pformat(build_params))) raise Exception(msg.format(pformat(build_params)))
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Prepare a new release.") parser = argparse.ArgumentParser(description="Prepare a new release.")
...@@ -199,6 +208,8 @@ if __name__ == "__main__": ...@@ -199,6 +208,8 @@ if __name__ == "__main__":
msg = "The deployment to build for eg. edx, edge, loadtest" msg = "The deployment to build for eg. edx, edge, loadtest"
parser.add_argument('-d', '--deployment', required=True, help=msg) parser.add_argument('-d', '--deployment', required=True, help=msg)
parser.add_argument('-r', '--release-id', required=True, help="Id of Release.") parser.add_argument('-r', '--release-id', required=True, help="Id of Release.")
parser.add_argument('-n', '--noop', action='store_true',
help="Run without sending requests to abbey.")
parser.add_argument('REPOS', nargs='+', parser.add_argument('REPOS', nargs='+',
help="Any number of var=value(no spcae around '='" + \ help="Any number of var=value(no spcae around '='" + \
" e.g. 'edxapp=3233bac xqueue=92832ab'") " e.g. 'edxapp=3233bac xqueue=92832ab'")
......
#!/usr/bin/env python -u #!/usr/bin/env python -u
import sys import sys
from argparse import ArgumentParser from argparse import ArgumentParser
import time import time
import json import json
import yaml
import os
try: try:
import boto.ec2 import boto.ec2
import boto.sqs import boto.sqs
from boto.vpc import VPCConnection from boto.vpc import VPCConnection
from boto.exception import NoAuthHandlerFound from boto.exception import NoAuthHandlerFound, EC2ResponseError
from boto.sqs.message import RawMessage from boto.sqs.message import RawMessage
except ImportError: except ImportError:
print "boto required for script" print "boto required for script"
sys.exit(1) sys.exit(1)
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure, DuplicateKeyError
from pprint import pprint
AMI_TIMEOUT = 600 # time to wait for AMIs to complete AMI_TIMEOUT = 600 # time to wait for AMIs to complete
EC2_RUN_TIMEOUT = 180 # time to wait for ec2 state transition EC2_RUN_TIMEOUT = 180 # time to wait for ec2 state transition
EC2_STATUS_TIMEOUT = 300 # time to wait for ec2 system status checks EC2_STATUS_TIMEOUT = 300 # time to wait for ec2 system status checks
NUM_TASKS = 5 # number of tasks for time summary report NUM_TASKS = 5 # number of tasks for time summary report
NUM_PLAYBOOKS = 2
class MongoConnection:
def __init__(self):
try:
mongo = MongoClient(host=args.mongo_uri)
except ConnectionFailure:
print "Unable to connect to the mongo database specified"
sys.exit(1)
mongo_db = getattr(mongo, args.mongo_db)
if args.mongo_ami_collection not in mongo_db.collection_names():
mongo_db.create_collection(args.mongo_ami_collection)
if args.mongo_deployment_collection not in mongo_db.collection_names():
mongo_db.create_collection(args.mongo_deployment_collection)
self.mongo_ami = getattr(mongo_db, args.mongo_ami_collection)
self.mongo_deployment = getattr(
mongo_db, args.mongo_deployment_collection)
def update_ami(self, ami):
"""
Creates a new document in the AMI
collection with the ami id as the
id
"""
query = {
'_id': ami,
'play': args.play,
'env': args.environment,
'deployment': args.deployment,
'configuration_ref': args.configuration_version,
'configuration_secure_ref': args.configuration_secure_version,
'vars': extra_vars,
}
try:
self.mongo_ami.insert(query)
except DuplicateKeyError:
if not args.noop:
print "Entry already exists for {}".format(ami)
raise
def update_deployment(self, ami):
"""
Adds the built AMI to the deployment
collection
"""
query = {'_id': args.jenkins_build}
deployment = self.mongo_deployment.find_one(query)
try:
deployment['plays'][args.play]['amis'][args.environment] = ami
except KeyError:
msg = "Unexpected document structure, couldn't write " +\
"to path deployment['plays']['{}']['amis']['{}']"
print msg.format(args.play, args.environment)
pprint(deployment)
if args.noop:
deployment = {
'plays': {
args.play: {
'amis': {
args.environment: ami,
},
},
},
}
else:
raise
self.mongo_deployment.save(deployment)
class Unbuffered: class Unbuffered:
...@@ -45,11 +122,11 @@ def parse_args(): ...@@ -45,11 +122,11 @@ def parse_args():
default=False) default=False)
parser.add_argument('--secure-vars', required=False, parser.add_argument('--secure-vars', required=False,
metavar="SECURE_VAR_FILE", metavar="SECURE_VAR_FILE",
help="path to secure-vars, defaults to " help="path to secure-vars from the root of "
"../../../configuration-secure/ansible/" "the secure repo (defaults to ansible/"
"vars/DEPLOYMENT/ENVIRONMENT.yml") "vars/DEPLOYMENT/ENVIRONMENT-DEPLOYMENT.yml)")
parser.add_argument('--stack-name', parser.add_argument('--stack-name',
help="defaults to DEPLOYMENT-ENVIRONMENT", help="defaults to ENVIRONMENT-DEPLOYMENT",
metavar="STACK_NAME", metavar="STACK_NAME",
required=False) required=False)
parser.add_argument('-p', '--play', parser.add_argument('-p', '--play',
...@@ -69,11 +146,14 @@ def parse_args(): ...@@ -69,11 +146,14 @@ def parse_args():
help="Application for subnet, defaults to admin", help="Application for subnet, defaults to admin",
default="admin") default="admin")
parser.add_argument('--configuration-version', required=False, parser.add_argument('--configuration-version', required=False,
help="configuration repo version", help="configuration repo branch(no hashes)",
default="master") default="master")
parser.add_argument('--configuration-secure-version', required=False, parser.add_argument('--configuration-secure-version', required=False,
help="configuration-secure repo version", help="configuration-secure repo branch(no hashes)",
default="master") default="master")
parser.add_argument('--configuration-secure-repo', required=False,
default="git@github.com:edx-ops/prod-secure",
help="repo to use for the secure files")
parser.add_argument('-j', '--jenkins-build', required=False, parser.add_argument('-j', '--jenkins-build', required=False,
help="jenkins build number to update") help="jenkins build number to update")
parser.add_argument('-b', '--base-ami', required=False, parser.add_argument('-b', '--base-ami', required=False,
...@@ -101,30 +181,54 @@ def parse_args(): ...@@ -101,30 +181,54 @@ def parse_args():
default=5, default=5,
help="How long to delay message display from sqs " help="How long to delay message display from sqs "
"to ensure ordering") "to ensure ordering")
parser.add_argument("--mongo-uri", required=False,
default=None,
help="Mongo uri for the host that contains"
"the AMI collection")
parser.add_argument("--mongo-db", required=False,
default="test",
help="Mongo database")
parser.add_argument("--mongo-ami-collection", required=False,
default="amis",
help="Mongo ami collection")
parser.add_argument("--mongo-deployment-collection", required=False,
default="deployment",
help="Mongo deployment collection")
return parser.parse_args() return parser.parse_args()
def create_instance_args(): def get_instance_sec_group(vpc_id, security_group):
"""
Looks up security group, subnet
and returns arguments to pass into
ec2.run_instances() including
user data
"""
security_group_id = None security_group_id = None
grp_details = ec2.get_all_security_groups() grp_details = ec2.get_all_security_groups(
filters={
'vpc_id':vpc_id
}
)
for grp in grp_details: for grp in grp_details:
if grp.name == args.security_group: if grp.name == security_group:
security_group_id = grp.id security_group_id = grp.id
break break
if not security_group_id: if not security_group_id:
print "Unable to lookup id for security group {}".format( print "Unable to lookup id for security group {}".format(
args.security_group) security_group)
sys.exit(1) sys.exit(1)
return security_group_id
def create_instance_args():
"""
Looks up security group, subnet
and returns arguments to pass into
ec2.run_instances() including
user data
"""
vpc = VPCConnection() vpc = VPCConnection()
subnet = vpc.get_all_subnets( subnet = vpc.get_all_subnets(
filters={ filters={
...@@ -136,6 +240,9 @@ def create_instance_args(): ...@@ -136,6 +240,9 @@ def create_instance_args():
len(subnet))) len(subnet)))
sys.exit(1) sys.exit(1)
subnet_id = subnet[0].id subnet_id = subnet[0].id
vpc_id = subnet[0].vpc_id
security_group_id = get_instance_sec_group(vpc_id, args.security_group)
if args.identity: if args.identity:
config_secure = 'true' config_secure = 'true'
...@@ -159,8 +266,12 @@ environment="{environment}" ...@@ -159,8 +266,12 @@ environment="{environment}"
deployment="{deployment}" deployment="{deployment}"
play="{play}" play="{play}"
config_secure={config_secure} config_secure={config_secure}
secure_vars_file="$base_dir/configuration-secure\\ git_repo_name="configuration"
/ansible/vars/$environment/$environment-$deployment.yml" git_repo="https://github.com/edx/$git_repo_name"
git_repo_secure="{configuration_secure_repo}"
git_repo_secure_name="{configuration_secure_repo_basename}"
secure_vars_file="$base_dir/$git_repo_secure_name/{secure_vars}"
common_vars_file="$base_dir/$git_repo_secure_name/ansible/vars/common/common.yml"
instance_id=\\ instance_id=\\
$(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null) $(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null)
instance_ip=\\ instance_ip=\\
...@@ -168,8 +279,6 @@ $(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null) ...@@ -168,8 +279,6 @@ $(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null)
instance_type=\\ instance_type=\\
$(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null) $(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null)
playbook_dir="$base_dir/configuration/playbooks/edx-east" playbook_dir="$base_dir/configuration/playbooks/edx-east"
git_repo="https://github.com/edx/configuration"
git_repo_secure="git@github.com:edx/configuration-secure"
if $config_secure; then if $config_secure; then
git_cmd="env GIT_SSH=$git_ssh git" git_cmd="env GIT_SSH=$git_ssh git"
...@@ -219,32 +328,42 @@ EOF ...@@ -219,32 +328,42 @@ EOF
chmod 400 $secure_identity chmod 400 $secure_identity
$git_cmd clone -b $configuration_version $git_repo $git_cmd clone $git_repo $git_repo_name
cd $git_repo_name
$git_cmd checkout $configuration_version
cd $base_dir
if $config_secure; then if $config_secure; then
$git_cmd clone -b $configuration_secure_version \\ $git_cmd clone $git_repo_secure $git_repo_secure_name
$git_repo_secure cd $git_repo_secure_name
$git_cmd checkout $configuration_secure_version
cd $base_dir
fi fi
cd $base_dir/configuration cd $base_dir/$git_repo_name
sudo pip install -r requirements.txt sudo pip install -r requirements.txt
cd $playbook_dir cd $playbook_dir
ansible-playbook -vvvv -c local -i "localhost," $play.yml -e@$extra_vars ansible-playbook -vvvv -c local -i "localhost," $play.yml -e@$extra_vars -e@$common_vars_file
ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml -e@$extra_vars -e@$common_vars_file
rm -rf $base_dir rm -rf $base_dir
""".format( """.format(
configuration_version=args.configuration_version, configuration_version=args.configuration_version,
configuration_secure_version=args.configuration_secure_version, configuration_secure_version=args.configuration_secure_version,
configuration_secure_repo=args.configuration_secure_repo,
configuration_secure_repo_basename=os.path.basename(
args.configuration_secure_repo),
environment=args.environment, environment=args.environment,
deployment=args.deployment, deployment=args.deployment,
play=args.play, play=args.play,
config_secure=config_secure, config_secure=config_secure,
identity_file=identity_file, identity_file=identity_file,
queue_name=run_id, queue_name=run_id,
extra_vars_yml=extra_vars_yml) extra_vars_yml=extra_vars_yml,
secure_vars=secure_vars)
ec2_args = { ec2_args = {
'security_group_ids': [security_group_id], 'security_group_ids': [security_group_id],
...@@ -254,6 +373,7 @@ rm -rf $base_dir ...@@ -254,6 +373,7 @@ rm -rf $base_dir
'instance_type': args.instance_type, 'instance_type': args.instance_type,
'instance_profile_name': args.role_name, 'instance_profile_name': args.role_name,
'user_data': user_data, 'user_data': user_data,
} }
return ec2_args return ec2_args
...@@ -276,6 +396,7 @@ def poll_sqs_ansible(): ...@@ -276,6 +396,7 @@ def poll_sqs_ansible():
buf = [] buf = []
task_report = [] # list of tasks for reporting task_report = [] # list of tasks for reporting
last_task = None last_task = None
completed = 0
while True: while True:
messages = [] messages = []
while True: while True:
...@@ -306,53 +427,68 @@ def poll_sqs_ansible(): ...@@ -306,53 +427,68 @@ def poll_sqs_ansible():
now = int(time.time()) now = int(time.time())
if buf: if buf:
if (now - max([msg['recv_ts'] for msg in buf])) > args.msg_delay: try:
# sort by TS instead of recv_ts if (now - max([msg['recv_ts'] for msg in buf])) > args.msg_delay:
# because the sqs timestamp is not as # sort by TS instead of recv_ts
# accurate # because the sqs timestamp is not as
buf.sort(key=lambda k: k['msg']['TS']) # accurate
to_disp = buf.pop(0) buf.sort(key=lambda k: k['msg']['TS'])
if 'START' in to_disp['msg']: to_disp = buf.pop(0)
print '\n{:0>2.0f}:{:0>5.2f} {} : Starting "{}"'.format( if 'START' in to_disp['msg']:
to_disp['msg']['TS'] / 60, print '\n{:0>2.0f}:{:0>5.2f} {} : Starting "{}"'.format(
to_disp['msg']['TS'] % 60, to_disp['msg']['TS'] / 60,
to_disp['msg']['PREFIX'], to_disp['msg']['TS'] % 60,
to_disp['msg']['START']), to_disp['msg']['PREFIX'],
to_disp['msg']['START']),
elif 'TASK' in to_disp['msg']:
print "\n{:0>2.0f}:{:0>5.2f} {} : {}".format( elif 'TASK' in to_disp['msg']:
to_disp['msg']['TS'] / 60, print "\n{:0>2.0f}:{:0>5.2f} {} : {}".format(
to_disp['msg']['TS'] % 60, to_disp['msg']['TS'] / 60,
to_disp['msg']['PREFIX'], to_disp['msg']['TS'] % 60,
to_disp['msg']['TASK']), to_disp['msg']['PREFIX'],
last_task = to_disp['msg']['TASK'] to_disp['msg']['TASK']),
elif 'OK' in to_disp['msg']: last_task = to_disp['msg']['TASK']
if args.verbose: elif 'OK' in to_disp['msg']:
print "\n" if args.verbose:
for key, value in to_disp['msg']['OK'].iteritems(): print "\n"
print " {:<15}{}".format(key, value) for key, value in to_disp['msg']['OK'].iteritems():
else: print " {:<15}{}".format(key, value)
if to_disp['msg']['OK']['changed']:
changed = "*OK*"
else: else:
changed = "OK" invocation = to_disp['msg']['OK']['invocation']
print " {}".format(changed), module = invocation['module_name']
task_report.append({ # 'set_fact' does not provide a changed value.
'TASK': last_task, if module == 'set_fact':
'INVOCATION': to_disp['msg']['OK']['invocation'], changed = "OK"
'DELTA': to_disp['msg']['delta'], elif to_disp['msg']['OK']['changed']:
}) changed = "*OK*"
elif 'FAILURE' in to_disp['msg']: else:
print " !!!! FAILURE !!!!", changed = "OK"
for key, value in to_disp['msg']['FAILURE'].iteritems(): print " {}".format(changed),
print " {:<15}{}".format(key, value) task_report.append({
raise Exception("Failed Ansible run") 'TASK': last_task,
elif 'STATS' in to_disp['msg']: 'INVOCATION': to_disp['msg']['OK']['invocation'],
print "\n{:0>2.0f}:{:0>5.2f} {} : COMPLETE".format( 'DELTA': to_disp['msg']['delta'],
to_disp['msg']['TS'] / 60, })
to_disp['msg']['TS'] % 60, elif 'FAILURE' in to_disp['msg']:
to_disp['msg']['PREFIX']) print " !!!! FAILURE !!!!",
return (to_disp['msg']['TS'], task_report) for key, value in to_disp['msg']['FAILURE'].iteritems():
print " {:<15}{}".format(key, value)
raise Exception("Failed Ansible run")
elif 'STATS' in to_disp['msg']:
print "\n{:0>2.0f}:{:0>5.2f} {} : COMPLETE".format(
to_disp['msg']['TS'] / 60,
to_disp['msg']['TS'] % 60,
to_disp['msg']['PREFIX'])
# Since 3 ansible plays get run.
# We see the COMPLETE message 3 times
# wait till the last one to end listening
# for new messages.
completed += 1
if completed >= NUM_PLAYBOOKS:
return (to_disp['msg']['TS'], task_report)
except KeyError:
print "Failed to print status from message: {}".format(to_disp)
if not messages: if not messages:
# wait 1 second between sqs polls # wait 1 second between sqs polls
...@@ -375,7 +511,7 @@ def create_ami(instance_id, name, description): ...@@ -375,7 +511,7 @@ def create_ami(instance_id, name, description):
break break
else: else:
time.sleep(1) time.sleep(1)
except boto.exception.EC2ResponseError as e: except EC2ResponseError as e:
if e.error_code == 'InvalidAMIID.NotFound': if e.error_code == 'InvalidAMIID.NotFound':
time.sleep(1) time.sleep(1)
else: else:
...@@ -388,6 +524,95 @@ def create_ami(instance_id, name, description): ...@@ -388,6 +524,95 @@ def create_ami(instance_id, name, description):
return image_id return image_id
def launch_and_configure(ec2_args):
"""
Creates an sqs queue, launches an ec2 instance,
configures it and creates an AMI. Polls
SQS for updates
"""
print "{:<40}".format(
"Creating SQS queue and launching instance for {}:".format(run_id))
print
for k, v in ec2_args.iteritems():
if k != 'user_data':
print " {:<25}{}".format(k, v)
print
global sqs_queue
global instance_id
sqs_queue = sqs.create_queue(run_id)
sqs_queue.set_message_class(RawMessage)
res = ec2.run_instances(**ec2_args)
inst = res.instances[0]
instance_id = inst.id
print "{:<40}".format(
"Waiting for instance {} to reach running status:".format(instance_id)),
status_start = time.time()
for _ in xrange(EC2_RUN_TIMEOUT):
res = ec2.get_all_instances(instance_ids=[instance_id])
if res[0].instances[0].state == 'running':
status_delta = time.time() - status_start
run_summary.append(('EC2 Launch', status_delta))
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
status_delta / 60,
status_delta % 60)
break
else:
time.sleep(1)
else:
raise Exception("Timeout waiting for running status: {} ".format(
instance_id))
print "{:<40}".format("Waiting for system status:"),
system_start = time.time()
for _ in xrange(EC2_STATUS_TIMEOUT):
status = ec2.get_all_instance_status(inst.id)
if status[0].system_status.status == u'ok':
system_delta = time.time() - system_start
run_summary.append(('EC2 Status Checks', system_delta))
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
system_delta / 60,
system_delta % 60)
break
else:
time.sleep(1)
else:
raise Exception("Timeout waiting for status checks: {} ".format(
instance_id))
print
print "{:<40}".format(
"Waiting for user-data, polling sqs for Ansible events:")
(ansible_delta, task_report) = poll_sqs_ansible()
run_summary.append(('Ansible run', ansible_delta))
print
print "{} longest Ansible tasks (seconds):".format(NUM_TASKS)
for task in sorted(
task_report, reverse=True,
key=lambda k: k['DELTA'])[:NUM_TASKS]:
print "{:0>3.0f} {}".format(task['DELTA'], task['TASK'])
print " - {}".format(task['INVOCATION'])
print
print "{:<40}".format("Creating AMI:"),
ami_start = time.time()
ami = create_ami(instance_id, run_id, run_id)
ami_delta = time.time() - ami_start
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
ami_delta / 60,
ami_delta % 60)
run_summary.append(('AMI Build', ami_delta))
total_time = time.time() - start_time
all_stages = sum(run[1] for run in run_summary)
if total_time - all_stages > 0:
run_summary.append(('Other', total_time - all_stages))
run_summary.append(('Total', total_time))
return run_summary, ami
if __name__ == '__main__': if __name__ == '__main__':
args = parse_args() args = parse_args()
...@@ -399,15 +624,16 @@ if __name__ == '__main__': ...@@ -399,15 +624,16 @@ if __name__ == '__main__':
if args.vars: if args.vars:
with open(args.vars) as f: with open(args.vars) as f:
extra_vars_yml = f.read() extra_vars_yml = f.read()
extra_vars = yaml.load(extra_vars_yml)
else: else:
extra_vars_yml = "---\n" extra_vars_yml = "---\n"
extra_vars = {}
if args.secure_vars: if args.secure_vars:
secure_vars = args.secure_vars secure_vars = args.secure_vars
else: else:
secure_vars = "../../../configuration-secure/" \ secure_vars = "ansible/vars/{}/{}-{}.yml".format(
"ansible/vars/{}/{}.yml".format( args.environment, args.environment, args.deployment)
args.deployment, args.environment)
if args.stack_name: if args.stack_name:
stack_name = args.stack_name stack_name = args.stack_name
else: else:
...@@ -420,6 +646,9 @@ if __name__ == '__main__': ...@@ -420,6 +646,9 @@ if __name__ == '__main__':
print 'You must be able to connect to sqs and ec2 to use this script' print 'You must be able to connect to sqs and ec2 to use this script'
sys.exit(1) sys.exit(1)
if args.mongo_uri:
mongo_con = MongoConnection()
try: try:
sqs_queue = None sqs_queue = None
instance_id = None instance_id = None
...@@ -429,100 +658,32 @@ if __name__ == '__main__': ...@@ -429,100 +658,32 @@ if __name__ == '__main__':
ec2_args = create_instance_args() ec2_args = create_instance_args()
print "{:<40}".format( if args.noop:
"Creating SQS queue and launching instance for {}:".format(run_id)) print "Would have created sqs_queue with id: {}\nec2_args:".format(
print run_id)
for k, v in ec2_args.iteritems(): pprint(ec2_args)
if k != 'user_data': ami = "ami-00000"
print " {:<25}{}".format(k, v)
print
sqs_queue = sqs.create_queue(run_id)
sqs_queue.set_message_class(RawMessage)
res = ec2.run_instances(**ec2_args)
inst = res.instances[0]
instance_id = inst.id
print "{:<40}".format("Waiting for running status:"),
status_start = time.time()
for _ in xrange(EC2_RUN_TIMEOUT):
res = ec2.get_all_instances(instance_ids=[instance_id])
if res[0].instances[0].state == 'running':
status_delta = time.time() - status_start
run_summary.append(('EC2 Launch', status_delta))
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
status_delta / 60,
status_delta % 60)
break
else:
time.sleep(1)
else: else:
raise Exception("Timeout waiting for running status: {} ".format( run_summary, ami = launch_and_configure(ec2_args)
instance_id)) print
print "Summary:\n"
print "{:<40}".format("Waiting for system status:"),
system_start = time.time() for run in run_summary:
for _ in xrange(EC2_STATUS_TIMEOUT): print "{:<30} {:0>2.0f}:{:0>5.2f}".format(
status = ec2.get_all_instance_status(inst.id) run[0], run[1] / 60, run[1] % 60)
if status[0].system_status.status == u'ok': print "AMI: {}".format(ami)
system_delta = time.time() - system_start if args.mongo_uri:
run_summary.append(('EC2 Status Checks', system_delta)) mongo_con.update_ami(ami)
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format( mongo_con.update_deployment(ami)
system_delta / 60,
system_delta % 60)
break
else:
time.sleep(1)
else:
raise Exception("Timeout waiting for status checks: {} ".format(
instance_id))
user_start = time.time()
print
print "{:<40}".format(
"Waiting for user-data, polling sqs for Ansible events:")
(ansible_delta, task_report) = poll_sqs_ansible()
user_pre_ansible = time.time() - user_start - ansible_delta
run_summary.append(('Ansible run', ansible_delta))
print
print "{} longest Ansible tasks (seconds):".format(NUM_TASKS)
for task in sorted(
task_report, reverse=True,
key=lambda k: k['DELTA'])[:NUM_TASKS]:
print "{:0>3.0f} {}".format(task['DELTA'], task['TASK'])
print " - {}".format(task['INVOCATION'])
print
print "{:<40}".format("Creating AMI:"),
ami_start = time.time()
ami = create_ami(instance_id, run_id, run_id)
ami_delta = time.time() - ami_start
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
ami_delta / 60,
ami_delta % 60)
run_summary.append(('AMI Build', ami_delta))
total_time = time.time() - start_time
all_stages = sum(run[1] for run in run_summary)
if total_time - all_stages > 0:
run_summary.append(('Other', total_time - all_stages))
run_summary.append(('Total', total_time))
print
print "Summary:\n"
for run in run_summary:
print "{:<30} {:0>2.0f}:{:0>5.2f}".format(
run[0], run[1] / 60, run[1] % 60)
print "AMI: {}".format(ami)
finally: finally:
print print
if not args.no_cleanup: if not args.no_cleanup and not args.noop:
if sqs_queue: if sqs_queue:
print "Cleaning up - Removing SQS queue - {}".format(run_id) print "Cleaning up - Removing SQS queue - {}".format(run_id)
sqs.delete_queue(sqs_queue) sqs.delete_queue(sqs_queue)
if instance_id: if instance_id:
print "Cleaning up - Terminating instance ID - {}".format( print "Cleaning up - Terminating instance ID - {}".format(
instance_id) instance_id)
ec2.terminate_instances(instance_ids=[instance_id]) # Check to make sure we have an instance id.
if instance_id:
ec2.terminate_instances(instance_ids=[instance_id])
...@@ -32,7 +32,8 @@ def upload_file(file_path, bucket_name, key_name): ...@@ -32,7 +32,8 @@ def upload_file(file_path, bucket_name, key_name):
key.set_contents_from_filename(file_path) key.set_contents_from_filename(file_path)
key.set_acl('public-read') key.set_acl('public-read')
url = key.generate_url(300, query_auth=False) url = "https://s3.amazonaws.com/{}/{}".format(bucket.name, key.name)
print( "URL: {}".format(url))
return url return url
def create_stack(stack_name, template, region='us-east-1', blocking=True, def create_stack(stack_name, template, region='us-east-1', blocking=True,
......
...@@ -84,12 +84,17 @@ def _ssh_config(args): ...@@ -84,12 +84,17 @@ def _ssh_config(args):
for reservation in reservations: for reservation in reservations:
for instance in reservation.instances: for instance in reservation.instances:
if 'role' in instance.tags: if 'play' in instance.tags:
logical_id = instance.tags['play']
elif 'role' in instance.tags:
# deprecated, use "play" instead
logical_id = instance.tags['role'] logical_id = instance.tags['role']
elif 'group' in instance.tags: elif 'group' in instance.tags:
logical_id = instance.tags['group'] logical_id = instance.tags['group']
else: elif 'aws:cloudformation:logical-id' in instance.tags:
logical_id = instance.tags['aws:cloudformation:logical-id'] logical_id = instance.tags['aws:cloudformation:logical-id']
else:
continue
instance_number = id_type_counter[logical_id] instance_number = id_type_counter[logical_id]
id_type_counter[logical_id] += 1 id_type_counter[logical_id] += 1
......
#!/usr/bin/env python -u
#
# Updates DNS records for a stack
#
# Example usage:
#
# # update route53 entries for ec2 and rds instances
# # in the vpc with stack-name "stage-stack" and
# # create DNS entries in the example.com hosted
# # zone
#
# python vpc_dns.py -s stage-stack -z example.com
#
# # same thing but just print what will be done without
# # making any changes
#
# python vpc_dns.py -n -s stage-stack -z example.com
#
# # Create a new zone "vpc.example.com", update the parent
# # zone "example.com"
#
# python vpc_dns.py -s stage-stack -z vpc.example.com
#
import argparse import argparse
import boto import boto
import datetime
from vpcutil import vpc_for_stack_name from vpcutil import vpc_for_stack_name
from pprint import pprint
r53 = boto.connect_route53() r53 = boto.connect_route53()
# Utility Functions def add_or_update_record(zone, record_name, record_type,
def add_or_update_record(zone, record_name, record_type, record_ttl, record_values): record_ttl, record_values):
zone_id = zone.Id.replace("/hostedzone/","") """
Creates or updates a DNS record in a hosted route53
zone
"""
status_msg = """
record_name: {}
record_type: {}
record_ttl: {}
record_values: {}
""".format(record_name, record_type,
record_ttl, record_values)
if args.noop:
print("Would have updated DNS record:\n{}".format(status_msg))
return
zone_id = zone.Id.replace("/hostedzone/", "")
records = r53.get_all_rrsets(zone_id) records = r53.get_all_rrsets(zone_id)
old_records = { r.name[:-1] : r for r in records } old_records = {r.name[:-1]: r for r in records}
pprint(old_records)
change_set = boto.route53.record.ResourceRecordSets() change_set = boto.route53.record.ResourceRecordSets()
# If the record name already points to something. # If the record name already points to something.
# Delete the existing connection. # Delete the existing connection.
if record_name in old_records.keys(): if record_name in old_records.keys():
print "adding delete" print("Deleting record:\n{}".format(status_msg))
change = change_set.add_change( change = change_set.add_change(
'DELETE', 'DELETE',
record_name, record_name,
...@@ -38,110 +78,117 @@ def add_or_update_record(zone, record_name, record_type, record_ttl, record_valu ...@@ -38,110 +78,117 @@ def add_or_update_record(zone, record_name, record_type, record_ttl, record_valu
for value in record_values: for value in record_values:
change.add_value(value) change.add_value(value)
print(change_set.to_xml())
r53.change_rrsets(zone_id, change_set.to_xml())
r53.change_rrsets(zone_id, change_set.to_xml())
print("Updated DNS record:\n{}".format(status_msg))
def add_zone_to_parent(zone, parent):
#Add a reference for the new zone to its parent zone.
parent_name = parent.Name[:-1]
zone_name = zone.Name[:-1]
add_or_update_record(parent, zone_name, 'NS', 900, zone.NameServers) def get_or_create_hosted_zone(zone_name):
"""
Creates the zone and updates the parent
with the NS information in the zone
returns: created zone
"""
def get_or_create_hosted_zone(zone_name): zone = r53.get_hosted_zone_by_name(zone_name)
# Get the parent zone.
parent_zone_name = ".".join(zone_name.split('.')[1:]) parent_zone_name = ".".join(zone_name.split('.')[1:])
parent_zone = r53.get_hosted_zone_by_name(parent_zone_name) parent_zone = r53.get_hosted_zone_by_name(parent_zone_name)
if not parent_zone:
msg = "Parent zone({}) does not exist."
raise Exception(msg.format(parent_zone_name))
hosted_zone = r53.get_hosted_zone_by_name(zone_name)
if not hosted_zone:
r53.create_hosted_zone(zone_name,
comment="Created by automation.")
hosted_zone = r53.get_hosted_zone_by_name(zone_name)
add_zone_to_parent(hosted_zone, parent_zone)
return hosted_zone
def elbs_for_stack_name(stack_name):
vpc_id = vpc_for_stack_name(stack_name)
elbs = boto.connect_elb()
for elb in elbs.get_all_load_balancers():
if elb.vpc_id == vpc_id:
yield elb
def rdss_for_stack_name(stack_name):
vpc_id = vpc_for_stack_name(stack_name)
rds = boto.connect_rds()
for instance in rds.get_all_dbinstances():
if hasattr(instance, 'VpcId') and instance.VpcId == vpc_id:
yield instance
def ensure_service_dns(generated_dns_name, prefix, zone):
dns_template = "{prefix}.{zone_name}"
# Have to remove the trailing period that is on zone names.
zone_name = zone.Name[:-1]
dns_name = dns_template.format(prefix=prefix,
zone_name=zone_name)
add_or_update_record(zone, dns_name, 'CNAME', 600, [generated_dns_name])
if args.noop:
if parent_zone:
print("Would have created/updated zone: {} parent: {}".format(
zone_name, parent_zone_name))
else:
print("Would have created/updated zone: {}".format(
zone_name, parent_zone_name))
return zone
if not zone:
print("zone {} does not exist, creating".format(zone_name))
ts = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%SUTC')
zone = r53.create_hosted_zone(
zone_name, comment="Created by vpc_dns script - {}".format(ts))
if parent_zone:
print("Updating parent zone {}".format(parent_zone_name))
add_or_update_record(parent_zone,
zone_name, 'NS', 900,
zone.NameServers)
return zone
def update_elb_rds_dns(zone):
"""
Creates elb and rds CNAME records
in a zone for args.stack_name.
Uses the tags of the instances attached
to the ELBs to create the dns name
"""
elb_con = boto.connect_elb()
ec2_con = boto.connect_ec2()
rds_con = boto.connect_rds()
vpc_id = vpc_for_stack_name(args.stack_name)
if not zone and args.noop:
# use a placeholder for zone name
# if it doesn't exist
zone_name = "<zone name>"
else:
zone_name = zone.Name[:-1]
stack_rdss = [rds for rds in rds_con.get_all_dbinstances()
if hasattr(rds.subnet_group, 'vpc_id') and
rds.subnet_group.vpc_id == vpc_id]
for rds in stack_rdss:
fqdn = "{}.{}".format('rds', zone_name)
add_or_update_record(zone, fqdn, 'CNAME', 600,
[stack_rdss[0].endpoint[0]])
stack_elbs = [elb for elb in elb_con.get_all_load_balancers()
if elb.vpc_id == vpc_id]
for elb in stack_elbs:
for inst in elb.instances:
instance = ec2_con.get_all_instances(
instance_ids=[inst.id])[0].instances[0]
try:
env_tag = instance.tags['environment']
if 'play' in instance.tags:
play_tag = instance.tags['play']
else:
# deprecated, for backwards compatibility
play_tag = instance.tags['role']
play_tag = instance.tags['role']
fqdn = "{}-{}.{}".format(env_tag, play_tag, zone_name)
add_or_update_record(zone, fqdn, 'CNAME', 600, [elb.dns_name])
if play_tag == 'edxapp':
# create courses and studio CNAME records for edxapp
for name in ['courses', 'studio']:
fqdn = "{}.{}".format(name, zone_name)
add_or_update_record(zone, fqdn, 'CNAME',
600, [elb.dns_name])
break # only need the first instance for tag info
except KeyError:
print("Instance {}, attached to elb {} does not "
"have tags for environment and play".format(elb, inst))
raise
if __name__ == "__main__": if __name__ == "__main__":
description = "Give a cloudformation stack name, for an edx stack, setup \ description = "Give a cloudformation stack name, for an edx stack, setup \
DNS names for the ELBs in the stack." DNS names for the ELBs in the stack."
parser = argparse.ArgumentParser(description=description) parser = argparse.ArgumentParser(description=description)
parser.add_argument('-n', '--stackname', parser.add_argument('-s', '--stack-name', required=True,
help="The name of the cloudformation stack.", help="The name of the cloudformation stack.")
required=True) parser.add_argument('-n', '--noop',
help="Don't make any changes.", action="store_true",
default=False)
parser.add_argument('-z', '--zone-name', default="vpc.edx.org",
help="The name of the zone under which to "
"create the dns entries.")
parser.add_argument('-z', '--parent-zone',
help="The parent zone under which the dns for this vpc resides.")
args = parser.parse_args() args = parser.parse_args()
stack_name = args.stackname zone = get_or_create_hosted_zone(args.zone_name)
update_elb_rds_dns(zone)
# Create DNS for edxapp and xqueue.
elb_dns_settings = {
'edxapp': ['courses', 'studio'],
'xqueue': ['xqueue'],
'rabbit': ['rabbit'],
'xserver': ['xserver'],
'worker': ['worker'],
'forum': ['forum'],
}
# Create a zone for the stack.
parent_zone = 'vpc.edx.org'
if args.parent_zone:
parent_zone = args.parent_zone
zone_name = "{}.{}".format(stack_name, parent_zone)
zone = get_or_create_hosted_zone(zone_name)
stack_elbs = elbs_for_stack_name(stack_name)
for elb in stack_elbs:
for role, dns_prefixes in elb_dns_settings.items():
#FIXME this breaks when the service name is in the stack name ie. testforumstack.
# Get the tags for the instances in this elb and compare the service against the role tag.
if role in elb.dns_name.lower():
for prefix in dns_prefixes:
ensure_service_dns(elb.dns_name, prefix, zone)
# Add a DNS name for the RDS
stack_rdss = list(rdss_for_stack_name(stack_name))
if len(stack_rdss) != 1:
msg = "Didn't find exactly one RDS in this VPC(Found {})"
raise Exception(msg.format(len(stack_rdss)))
else:
ensure_service_dns(stack_rdss[0].endpoint[0], 'rds', zone)
...@@ -35,7 +35,7 @@ Vagrant.configure("2") do |config| ...@@ -35,7 +35,7 @@ Vagrant.configure("2") do |config|
# Creates an edX devstack VM from an official release # Creates an edX devstack VM from an official release
config.vm.box = "empanada-devstack" config.vm.box = "empanada-devstack"
config.vm.box_url = "http://edx-static.s3.amazonaws.com/vagrant-images/20131219-empanada-devstack.box" config.vm.box_url = "http://files.edx.org/vagrant-images/20140130-focaccia-devstack.box"
config.vm.network :private_network, ip: "192.168.33.10" config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000 config.vm.network :forwarded_port, guest: 8000, host: 8000
......
...@@ -5,7 +5,7 @@ Vagrant.configure("2") do |config| ...@@ -5,7 +5,7 @@ Vagrant.configure("2") do |config|
# Creates an edX fullstack VM from an official release # Creates an edX fullstack VM from an official release
config.vm.box = "empanada" config.vm.box = "empanada"
config.vm.box_url = "http://edx-static.s3.amazonaws.com/vagrant-images/20131218-empanada-fullstack.box" config.vm.box_url = "http://files.edx.org/vagrant-images/20140130-focaccia-fullstack.box"
config.vm.network :private_network, ip: "192.168.33.10" config.vm.network :private_network, ip: "192.168.33.10"
config.hostsupdater.aliases = ["preview.localhost"] config.hostsupdater.aliases = ["preview.localhost"]
......
#!/usr/bin/env python
import os
import subprocess
import re
import json
import glob
ROOT_DIR = '/opt/wwc'
VERSION_FILE = '/opt/wwc/versions.html'
VERSION_JSON = '/opt/wwc/versions.json'
GLOB_DIRS = [
os.path.join(ROOT_DIR, '*/.git'),
os.path.join(ROOT_DIR, '*/*/.git'),
os.path.join(ROOT_DIR, 'data/*/.git'),
]
TEMPLATE = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title></title>
<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.5.2/jquery.min.js"></script>
<style>
body {{
font-size: 2em;
color: #000;
font-family: monospace;
}}
</style>
</head>
<body>
<ul>
{BODY}
</ul>
</body>
<script type="text/javascript">
$(".collapse").click(function () {{
$(this).parent().children().toggle();
$(this).toggle();
}});
$(document).ready(function() {{
$('.collapse').parent().children().toggle();
$('.collapse').toggle();
}});
</script>
</html>
"""
def main():
assert os.path.isdir(ROOT_DIR)
# using glob with fixed depths is much
# faster than os.walk for finding all .git repos
git_dirs = [git_dir for glob_dir in GLOB_DIRS
for git_dir in glob.glob(glob_dir)]
git_dirs = filter(lambda f: os.path.isdir(f), git_dirs)
version_info = ""
versions = {}
for git_dir in git_dirs:
repo_dir = git_dir.replace('/.git', '')
repo_dir_basename = os.path.basename(repo_dir)
# get the revision of the repo
p = subprocess.Popen(['/usr/bin/git', 'rev-parse', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=git_dir)
rev_output, rev_err = p.communicate()
revision = rev_output.splitlines()[0][:8]
# dictionary that will be written out as JSON
versions[repo_dir_basename] = revision
# use reflogs for the repo history
p = subprocess.Popen(
['/usr/bin/git', 'log', '-g', '--abbrev-commit', '--pretty=oneline'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=git_dir)
reflog_output, reflog_err = p.communicate()
# ignore lines that do not have 'HEAD'
reflog_lines = filter(lambda x: x.find('HEAD') >= 0,
reflog_output.splitlines())
# get the repo name, `git remote -v` seems like the fastest option
p = subprocess.Popen(['/usr/bin/git', 'remote', '-v'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=git_dir)
remote_output, remote_err = p.communicate()
repo_url = None
for line in remote_output.splitlines():
if ' (fetch)' in line:
repo_url = re.search(
'(git@|git://)(.*) \(fetch\)', line).group(2).replace(
':', '/')
break
if not repo_url:
raise Exception("Unable to parse repo name")
version_info += """
<li> <span class="collapse"> <a href="http://{0}">{1}</a> - {2}
[ click for history (most recent last) ]</span>
<ul>""".format(repo_url, repo_dir_basename, revision)
ref_prev = None
for line in reflog_lines[:0:-1]:
ref = line.split()[0]
version_info += """
<li><span class="collapse">{ref} -
<a href="http://{repo}/compare/{ref}...{revision}">[diff current]</a>
""".format(ref=ref, repo=repo_url, revision=revision)
if ref_prev:
version_info += """
<a href="http://{repo}/compare/{ref_prev}...{ref}">[diff previous]</a>
""".format(repo=repo_url, ref=ref, ref_prev=ref_prev)
version_info += "</span></li>"
ref_prev = ref
version_info += """
</ul></li>"""
with open(VERSION_FILE, 'w') as f:
f.write(TEMPLATE.format(BODY=version_info))
with open(VERSION_JSON, 'w') as f:
f.write(json.dumps(versions, sort_keys=True, indent=4,
separators=(',', ': ')))
if __name__ == '__main__':
main()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment