Commit 29f6483e by Jason Zhu

Merge pull request #2 from edx/master

up to date
parents 1e9b3ebe 5a9076ee
...@@ -3,7 +3,7 @@ language: python ...@@ -3,7 +3,7 @@ language: python
python: python:
- "2.7" - "2.7"
install: install:
- "sudo apt-get install -y npm python-demjson" - "sudo apt-get install -y nodejs python-demjson"
- "pip install --allow-all-external -r requirements.txt" - "pip install --allow-all-external -r requirements.txt"
- "pip install --allow-all-external demjson" - "pip install --allow-all-external demjson"
script: script:
......
...@@ -32,3 +32,4 @@ Ray Hooker <ray.hooker@gmail.com> ...@@ -32,3 +32,4 @@ Ray Hooker <ray.hooker@gmail.com>
David Pollack <david@sologourmand.com> David Pollack <david@sologourmand.com>
Rodolphe Quiedeville <rodolphe@quiedeville.org> Rodolphe Quiedeville <rodolphe@quiedeville.org>
Matjaz Gregoric <mtyaka@gmail.com> Matjaz Gregoric <mtyaka@gmail.com>
Ben Patterson <bpatterson@edx.org>
- Role: analytics-api
- Added a new role for the analytics-api Django app. Currently a private repo
- Logrotation now happens hourly by default for all logs.
- Role: xqwatcher, xqueue, nginx, edxapp, common - Role: xqwatcher, xqueue, nginx, edxapp, common
- Moving nginx basic authorization flag and credentials to the common role - Moving nginx basic authorization flag and credentials to the common role
- Basic auth will be turned on by default
- Role: Edxapp - Role: Edxapp
- Turn on code sandboxing by default and allow the jailed code to be able to write - Turn on code sandboxing by default and allow the jailed code to be able to write
...@@ -9,6 +15,6 @@ ...@@ -9,6 +15,6 @@
- The repo.txt requirements file is no longer being processed in anyway. This file was removed from edxplatform - The repo.txt requirements file is no longer being processed in anyway. This file was removed from edxplatform
via pull #3487(https://github.com/edx/edx-platform/pull/3487) via pull #3487(https://github.com/edx/edx-platform/pull/3487)
- Update CMS_HOSTNAME default to allow any hostname that starts with `studio` along with `prod-studio` or `stage-studio`. - Update `CMS_HOSTNAME` default to allow any hostname that starts with `studio` along with `prod-studio` or `stage-studio`.
- Start a change log to keep track of backwards incompatible changes and deprecations. - Start a change log to keep track of backwards incompatible changes and deprecations.
#
# Overview:
# This play needs to be run per environment-deployment and you will need to
# provide the boto environment and vpc_id as arguments
#
# ansible-playbook -i 'localhost,' ./vpc-migrate-analytics_api-edge-stage.yml \
# -e 'profile=edge vpc_id=vpc-416f9b24'
#
# Caveats
#
# - This requires ansible 1.6
# - Required the following branch of Ansible /e0d/add-instance-profile from
# https://github.com/e0d/ansible.git
# - This play isn't full idempotent because of and ec2 module update issue
# with ASGs. This can be worked around by deleting the ASG and re-running
# the play
# - The instance_profile_name will need to be created in advance as there
# isn't a way to do so from ansible.
#
# Prequisities:
# Create a iam ec2 role
#
- name: Add resources for the Analytics API
hosts: localhost
connection: local
gather_facts: False
tasks:
# Fail intermittantly with the following error:
# The specified rule does not exist in this security group
- name: Create instance security group
ec2_group:
profile: "{{ profile }}"
description: "Open up SSH access"
name: "{{ security_group }}"
vpc_id: "{{ vpc_id }}"
region: "{{ ec2_region }}"
rules:
- proto: tcp
from_port: "{{ sec_group_ingress_from_port }}"
to_port: "{{ sec_group_ingress_to_port }}"
cidr_ip: "{{ item }}"
with_items: sec_group_ingress_cidrs
register: created_sec_group
ignore_errors: True
- name: debug
debug:
msg: "Registered created_sec_group: {{ created_sec_group }}"
# Needs ansible 1.7 for vpc support of elbs
# - name: Create elb security group
# ec2_group:
# profile: "{{ profile }}"
# description: "ELB security group"
# name: "ELB-{{ security_group }}"
# vpc_id: "{{ vpc_id }}"
# region: "{{ ec2_region }}"
# rules:
# - proto: tcp
# from_port: "443"
# to_port: "443"
# cidr_ip: "0.0.0.0/0"
# register: created_elb_sec_group
# ignore_errors: True
# Needs 1.7 for VPC support
# - name: "Create ELB"
# ec2_elb_lb:
# profile: "{{ profile }}"
# region: "{{ ec2_region }}"
# zones:
# - us-east-1b
# - us-east-1c
# name: "{{ edp }}"
# state: present
# security_group_ids: "{{ created_elb_sec_group.group_id }}"
# listeners:
# - protocol: https
# load_balancer_port: 443
# instance_protocol: http # optional, defaults to value of protocol setting
# instance_port: 80
# # ssl certificate required for https or ssl
# ssl_certificate_id: "{{ ssl_cert }}"
# instance_profile_name was added by me in my fork
- name: Create the launch configuration
ec2_lc:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ lc_name }}"
image_id: "{{ lc_ami }}"
key_name: "{{ key_name }}"
security_groups: "{{ created_sec_group.results[0].group_id }}"
instance_type: "{{ instance_type }}"
instance_profile_name: "{{ instance_profile_name }}"
volumes:
- device_name: "/dev/sda1"
volume_size: "{{ instance_volume_size }}"
- name: Create ASG
ec2_asg:
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ asg_name }}"
launch_config_name: "{{ lc_name }}"
load_balancers: "{{ elb_name }}"
availability_zones:
- us-east-1b
- us-east-1c
min_size: 0
max_size: 2
desired_capacity: 1
vpc_zone_identifier: "{{ subnets|join(',') }}"
instance_tags:
Name: "{{ env }}-{{ deployment }}-{{ play }}"
autostack: "true"
environment: "{{ env }}"
deployment: "{{ deployment }}"
play: "{{ play }}"
services: "{{ play }}"
register: asg
- name: debug
debug:
msg: "DEBUG: {{ asg }}"
- name: Create scale up policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleUpPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: 1
min_adjustment_step: 1
cooldown: 60
register: scale_up_policy
- name: debug
debug:
msg: "Registered scale_up_policy: {{ scale_up_policy }}"
- name: Create scale down policy
ec2_scaling_policy:
state: present
profile: "{{ profile }}"
region: "{{ ec2_region }}"
name: "{{ edp }}-ScaleDownPolicy"
adjustment_type: "ChangeInCapacity"
asg_name: "{{ asg_name }}"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 60
register: scale_down_policy
- name: debug
debug:
msg: "Registered scale_down_policy: {{ scale_down_policy }}"
#
# Sometimes the scaling policy reports itself changed, but
# does not return data about the policy. It's bad enough
# that consistent data isn't returned when things
# have and have not changed; this make writing idempotent
# tasks difficult.
- name: create high-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-high"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: ">="
threshold: 90.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-up if CPU > 90% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_up_policy.arn }}"]
when: scale_up_policy.arn is defined
- name: create low-cpu alarm
ec2_metric_alarm:
state: present
region: "{{ ec2_region }}"
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 50.0
period: 300
evaluation_periods: 2
unit: "Percent"
description: "Scale-down if CPU < 50% for 10 minutes"
dimensions: {"AutoScalingGroupName":"{{ asg_name }}"}
alarm_actions: ["{{ scale_down_policy.arn }}"]
when: scale_down_policy.arn is defined
\ No newline at end of file
...@@ -2865,15 +2865,13 @@ ...@@ -2865,15 +2865,13 @@
"Key":"environment", "Key":"environment",
"Value":{ "Value":{
"Ref":"EnvironmentTag" "Ref":"EnvironmentTag"
}, }
"PropagateAtLaunch":true
}, },
{ {
"Key":"deployment", "Key":"deployment",
"Value":{ "Value":{
"Ref":"DeploymentTag" "Ref":"DeploymentTag"
}, }
"PropagateAtLaunch":true
} }
], ],
"UserData": { "Fn::Base64" : { "Fn::Join" : ["", [ "UserData": { "Fn::Base64" : { "Fn::Join" : ["", [
......
{
"AWSTemplateFormatVersion":"2010-09-09",
"Description":"Separate VPC for database clones and replicas.",
"Parameters":{
"EnvironmentTag":{
"Type":"String",
"Description":"A tag value applied to the hosts in the VPC indicating which environment to use during the configuration phase, e.g., stage, prod, sandbox",
"Default":"prod"
},
"DeploymentTag":{
"Type":"String",
"Description":"A tag value applied to the hosts in the VPC indicating which deployment this is, e.g., edx, edge, <university>, <org>",
"Default":"edx"
},
"KeyName":{
"Type":"String",
"Description":"Name of an existing EC2 KeyPair to enable SSH access to the web server",
"Default":"deployment-201407"
},
"ClassB":{
"Default":"1",
"Description":"The second octet of the Class B to be allocated for this VPC. 10.?.xxx.xxx",
"Type":"Number",
"MinValue":"0",
"MaxValue":"255",
"ConstraintDescription":"ClassB value must be between 0 and 255."
}
},
"Mappings":{
"SubnetConfig":{
"VPC": { "CIDR":".0.0/16" },
"Data01": { "CIDR":".50.0/24" },
"Data02": { "CIDR":".51.0/24" }
},
"MapRegionsToAvailZones":{
"us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" },
"us-west-1": { "AZone0":"us-west-1a", "AZone2":"us-west-1b", "AZone1":"us-west-1c" },
"us-west-2": { "AZone0":"us-west-2a", "AZone1":"us-west-2b", "AZone2":"us-west-2c" },
"eu-west-1": { "AZone0":"eu-west-1a", "AZone1":"eu-west-1b", "AZone2":"eu-west-1c" },
"sa-east-1": { "AZone0":"sa-east-1a", "AZone1":"sa-east-1b", "AZone2":"sa-east-1c" },
"ap-southeast-1": { "AZone0":"ap-southeast-1a", "AZone1":"ap-southeast-1b", "AZone2":"ap-southeast-1c" },
"ap-southeast-2": { "AZone0":"ap-southeast-2a", "AZone1":"ap-southeast-2b", "AZone2":"ap-southeast-2c" },
"ap-northeast-1": { "AZone0":"ap-northeast-1a", "AZone1":"ap-northeast-1b", "AZone2":"ap-northeast-1c" }
}
},
"Resources":{
"EdxVPC":{
"Type":"AWS::EC2::VPC",
"Properties":{
"EnableDnsSupport" : "true",
"EnableDnsHostnames" : "true",
"CidrBlock": { "Fn::Join": ["", ["10.", { "Ref": "ClassB" }, { "Fn::FindInMap": [ "SubnetConfig", "VPC", "CIDR"]}]]},
"InstanceTenancy":"default"
}
},
"Data01":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Data01",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
]
},
"Tags":[
{
"Key":"Name",
"Value":"Subnet-for-sanitized-dbs"
}
]
}
},
"Data02":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::Join": ["", [
"10.", { "Ref": "ClassB"},
{"Fn::FindInMap":[
"SubnetConfig",
"Data02",
"CIDR"
]}
]]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone1"
]
},
"Tags":[
{
"Key":"Name",
"Value":"Subnet-for-non-sanitized-clones"
}
]
}
},
"PrivateRouteTable":{
"Type":"AWS::EC2::RouteTable",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Private"
}
]
}
},
"PrivateSubnetRouteTableAssociationData01":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data01"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateSubnetRouteTableAssociationData02":{
"Type":"AWS::EC2::SubnetRouteTableAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data02"
},
"RouteTableId":{
"Ref":"PrivateRouteTable"
}
}
},
"PrivateNetworkAcl":{
"Type":"AWS::EC2::NetworkAcl",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"Tags":[
{
"Key":"Application",
"Value":{
"Ref":"AWS::StackId"
}
},
{
"Key":"Network",
"Value":"Private"
}
]
}
},
"InboundPrivateNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
},
"RuleNumber":"100",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"false",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"0",
"To":"65535"
}
}
},
"OutBoundPrivateNetworkAclEntry":{
"Type":"AWS::EC2::NetworkAclEntry",
"Properties":{
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
},
"RuleNumber":"100",
"Protocol":"6",
"RuleAction":"allow",
"Egress":"true",
"CidrBlock":"0.0.0.0/0",
"PortRange":{
"From":"0",
"To":"65535"
}
}
},
"PrivateSubnetNetworkAclAssociationData01":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data01"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"PrivateSubnetNetworkAclAssociationData02":{
"Type":"AWS::EC2::SubnetNetworkAclAssociation",
"Properties":{
"SubnetId":{
"Ref":"Data02"
},
"NetworkAclId":{
"Ref":"PrivateNetworkAcl"
}
}
},
"EdxDataSecurityGroup":{
"Type":"AWS::EC2::SecurityGroup",
"Properties":{
"GroupDescription":"Open up access to the data subnet",
"VpcId":{
"Ref":"EdxVPC"
},
"SecurityGroupIngress":[
{
"IpProtocol":"tcp",
"FromPort":"3306",
"ToPort":"3306",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"27017",
"ToPort":"27017",
"CidrIp":"0.0.0.0/0"
}
]
}
},
"EdxDBSubnetGroup":{
"Type":"AWS::RDS::DBSubnetGroup",
"Properties":{
"DBSubnetGroupDescription":"Subnets available for the RDS DB Instance",
"SubnetIds":[
{
"Ref":"Data01"
},
{
"Ref":"Data02"
}
]
}
},
"DBSecurityGroup":{
"Type":"AWS::RDS::DBSecurityGroup",
"Properties":{
"EC2VpcId":{
"Ref":"EdxVPC"
},
"GroupDescription":"Data access"
}
}
}
}
...@@ -225,12 +225,16 @@ class Ec2Inventory(object): ...@@ -225,12 +225,16 @@ class Ec2Inventory(object):
cache_path = config.get('ec2', 'cache_path') cache_path = config.get('ec2', 'cache_path')
if not os.path.exists(cache_path): if not os.path.exists(cache_path):
os.makedirs(cache_path) os.makedirs(cache_path)
self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_tags = cache_path + "/ansible-ec2.tags.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
if 'AWS_PROFILE' in os.environ:
aws_profile = "{}-".format(os.environ.get('AWS_PROFILE'))
else:
aws_profile = ""
self.cache_path_cache = cache_path + "/{}ansible-ec2.cache".format(aws_profile)
self.cache_path_tags = cache_path + "/{}ansible-ec2.tags.cache".format(aws_profile)
self.cache_path_index = cache_path + "/{}ansible-ec2.index".format(aws_profile)
self.cache_max_age = config.getint('ec2', 'cache_max_age')
def parse_cli_args(self): def parse_cli_args(self):
''' Command line argument processing ''' ''' Command line argument processing '''
......
# A simple utility play to add a public key to the authorized key
# file for the ubuntu user.
# You must pass in the entire line that you are adding.
# Example: ansible-playbook add-ubuntu-key.yml -c local -i 127.0.0.1, \
# -e "public_key=deployment-201407" \
# -e owner=jarv -e keyfile=/home/jarv/.ssh/authorized_keys
- hosts: all
vars:
# Number of instances to operate on at a time
serial_count: 1
owner: ubuntu
keyfile: "/home/{{ owner }}/.ssh/authorized_keys"
serial: "{{ serial_count }}"
tasks:
- fail: msg="You must pass in a public_key"
when: public_key is not defined
- fail: msg="public does not exist in secrets"
when: ubuntu_public_keys[public_key] is not defined
- command: mktemp
register: mktemp
- name: Validate the public key before we add it to authorized_keys
copy: >
content="{{ ubuntu_public_keys[public_key] }}"
dest={{ mktemp.stdout }}
# This tests the public key and will not continue if it does not look valid
- command: ssh-keygen -l -f {{ mktemp.stdout }}
- file: >
path={{ mktemp.stdout }}
state=absent
- lineinfile: >
dest={{ keyfile }}
line="{{ ubuntu_public_keys[public_key] }}"
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- name: Deploy aide IDS
hosts: all
sudo: True
gather_facts: True
roles:
- aide
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- name: Deploy Analytics API
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- role: nginx
nginx_sites:
- analytics-api
- aws
- analytics-api
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
# ansible-playbook -i ec2.py commoncluster.yml --limit tag_Name_stage-edx-commoncluster -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts
- hosts: all
sudo: True
serial: 1
vars:
# By default take instances in and out of the elb(s) they
# are attached to
# To skip elb operations use "-e elb_pre_post=fase"
elb_pre_post: true
# Number of instances to operate on at a time
serial_count: 1
serial: "{{ serial_count }}"
pre_tasks:
- action: ec2_facts
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Instance De-register
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1
state: absent
wait_timeout: 60
sudo: False
when: elb_pre_post
tasks:
- debug: msg="{{ ansible_ec2_local_ipv4 }}"
with_items: list.results
- shell: echo "rabbit@ip-{{ item|replace('.', '-') }}"
when: item != ansible_ec2_local_ipv4
with_items: hostvars.keys()
register: list
- command: rabbitmqctl stop_app
- command: rabbitmqctl join_cluster {{ item.stdout }}
when: item.stdout is defined
with_items: list.results
- command: rabbitmqctl start_app
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post
- name: Register instance in the elb
local_action: ec2_elb_local_1.6.2
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
region: us-east-1
state: present
wait_timeout: 60
with_items: ec2_elbs
sudo: False
when: elb_pre_post
...@@ -22,25 +22,24 @@ ...@@ -22,25 +22,24 @@
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1 region: us-east-1
state: absent state: absent
wait_timeout: 60
sudo: False sudo: False
when: elb_pre_post when: elb_pre_post
roles: roles:
- aws - aws
- role: nginx
nginx_sites:
- xqueue
- role: xqueue
- role: datadog - role: datadog
when: COMMON_ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic - role: newrelic
when: COMMON_ENABLE_NEWRELIC when: COMMON_ENABLE_NEWRELIC
- role: nginx
nginx_sites:
- xqueue
- xqueue
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
- rabbitmq - rabbitmq
- datadog
- splunkforwarder
post_tasks: post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}" - debug: var="{{ ansible_ec2_instance_id }}"
when: elb_pre_post when: elb_pre_post
...@@ -51,6 +50,7 @@ ...@@ -51,6 +50,7 @@
ec2_elbs: "{{ item }}" ec2_elbs: "{{ item }}"
region: us-east-1 region: us-east-1
state: present state: present
wait_timeout: 60
with_items: ec2_elbs with_items: ec2_elbs
sudo: False sudo: False
when: elb_pre_post when: elb_pre_post
......
...@@ -4,91 +4,6 @@ ...@@ -4,91 +4,6 @@
sudo: True sudo: True
tasks: tasks:
- name: Switch the mongo db to use ephemeral
file: >
name=/mnt/mongodb
state=directory
owner=mongodb
group=mongodb
tags: update_mongo_data
- name: update the mongo config to use the new mongo dir
shell: >
sed -i 's#^dbpath=.*#dbpath=/mnt/mongodb#' /etc/mongodb.conf
tags: update_mongo_data
- name: restart mongodb
service: >
name=mongodb
state=restarted
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep comment | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_forum
tags: update_mongo_data
- name: grab the most recent backup from s3 for forums
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_forum.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_forum.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_forum.stdout|basename }}
chdir=/mnt
when: s3cmd_out_forum.stdout is defined
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd ls s3://edx-mongohq/mongohq_backups/ | grep prod-edx | sort | tail -1 | awk '{ print $4 }'
register: s3cmd_out_modulestore
tags: update_mongo_data
- name: grab the most recent backup from s3 for prod-edx
shell : >
/edx/bin/s3cmd get {{ s3cmd_out_modulestore.stdout }} --skip-existing
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: untar the s3 backup
shell: >
tar zxf {{ s3cmd_out_modulestore.stdout|basename }}
chdir=/mnt
tags: update_mongo_data
when: s3cmd_out_modulestore.stdout is defined
- name: Restore the mongo data for the forums
shell: >
mongorestore --drop -d cs_comments_service /mnt/comments-prod
tags: update_mongo_data
- name: Restore the mongo data for the modulestore
shell: >
mongorestore --drop -d edxapp /mnt/prod-edx
tags: update_mongo_data
# recreate users after the restore
- name: create a mongodb users
mongodb_user: >
database={{ item.database }}
name={{ item.user }}
password={{ item.password }}
state=present
with_items:
- user: cs_comments_service
password: password
database: cs_comments_service
- user: exdapp
password: password
database: edxapp
# WARNING - calling lineinfile on a symlink # WARNING - calling lineinfile on a symlink
# will convert the symlink to a file! # will convert the symlink to a file!
# don't use /edx/etc/server-vars.yml here # don't use /edx/etc/server-vars.yml here
...@@ -108,6 +23,17 @@ ...@@ -108,6 +23,17 @@
- "EDXAPP_MYSQL_PASSWORD: {{ EDXAPP_MYSQL_PASSWORD }}" - "EDXAPP_MYSQL_PASSWORD: {{ EDXAPP_MYSQL_PASSWORD }}"
tags: update_edxapp_mysql_host tags: update_edxapp_mysql_host
- name: Update mongo to point to the sandbox mongo clone
lineinfile: >
dest=/edx/app/edx_ansible/server-vars.yml
line="{{ item }}"
with_items:
- "EDXAPP_MONGO_HOSTS: {{ EDXAPP_MONGO_HOSTS }}"
- "EDXAPP_MONGO_DB_NAME: {{ EDXAPP_MONGO_DB_NAME }}"
- "EDXAPP_MONGO_USER: {{ EDXAPP_MONGO_USER }}"
- "EDXAPP_MONGO_PASS: {{ EDXAPP_MONGO_PASS }}"
tags: update_edxapp_mysql_host
- name: call update on edx-platform - name: call update on edx-platform
shell: > shell: >
/edx/bin/update edx-platform master /edx/bin/update edx-platform master
......
#
# Requires MySQL-python be installed for system python
# This play will create databases and user for an application.
# It can be run like so:
#
# ansible-playbook -i 'localhost,' create_analytics_reports_dbs.yml -e@./db.yml
#
# where the content of dbs.yml contains the following dictionaries
#
# database_connection: &default_connection
# login_host: "mysql.example.org"
# login_user: "root"
# login_password: "super-secure-password"
# DEFAULT_ENCODING: "utf8"
# databases:
# reports:
# state: "present"
# encoding: "{{ DEFAULT_ENCODING }}"
# <<: *default_connection
# application:
# state: "present"
# encoding: "{{ DEFAULT_ENCODING }}"
# <<: *default_connection
# database_users:
# migrate:
# state: "present"
# password: "user-with-ddl-privs"
# host: "%"
# privileges:
# - "reports.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
# - "wwc.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
# <<: *default_connection
# runtime:
# state: "present"
# password: "user-with-dml-privs"
# host: "%"
# privileges:
# - "reports.*:SELECT"
# - "wwc.*:SELECT,INSERT,UPDATE,DELETE"
# <<: *default_connection
- name: Create databases and users
hosts: all
connection: local
gather_facts: False
tasks:
# Install required library, currently this needs to be available
# to system python.
- name: install python mysqldb module
pip: name={{item}} state=present
sudo: yes
with_items:
- MySQL-python
- name: create mysql databases
mysql_db: >
db={{ item.key }}
state={{ item.value.state }}
encoding={{ item.value.encoding }}
login_host={{ item.value.login_host }}
login_user={{ item.value.login_user }}
login_password={{ item.value.login_password }}
with_dict: databases
- name: create mysql users and assign privileges
mysql_user: >
name="{{ item.key }}"
priv="{{ '/'.join(item.value.privileges) }}"
password="{{ item.value.password }}"
host={{ item.value.host }}
login_host={{ item.value.login_host }}
login_user={{ item.value.login_user }}
login_password={{ item.value.login_password }}
append_privs=yes
with_dict: database_users
...@@ -16,14 +16,16 @@ ...@@ -16,14 +16,16 @@
- xqueue - xqueue
- xserver - xserver
- certs - certs
- analytics-api
nginx_default_sites: nginx_default_sites:
- lms - lms
- edxlocal - role: edxlocal
tags: edxlocal
- mongo - mongo
- { role: 'edxapp', celery_worker: True } - { role: 'edxapp', celery_worker: True }
- edxapp - edxapp
- role: demo - role: demo
tags: ['demo'] tags: demo
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' } - { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
...@@ -33,6 +35,7 @@ ...@@ -33,6 +35,7 @@
- ora - ora
- certs - certs
- edx_ansible - edx_ansible
- analytics-api
- role: datadog - role: datadog
when: COMMON_ENABLE_DATADOG when: COMMON_ENABLE_DATADOG
- role: splunkforwarder - role: splunkforwarder
......
...@@ -4,12 +4,12 @@ ...@@ -4,12 +4,12 @@
gather_facts: False gather_facts: False
vars: vars:
keypair: continuous-integration keypair: continuous-integration
instance_type: m1.medium instance_type: t2.medium
security_group: sandbox security_group: sandbox-vpc
# ubuntu 12.04 # ubuntu 12.04
ami: ami-d0f89fb9 ami: ami-f478849c
region: us-east-1 region: us-east-1
zone: us-east-1b zone: us-east-1c
instance_tags: instance_tags:
environment: sandbox environment: sandbox
github_username: temp github_username: temp
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
dns_zone: m.sandbox.edx.org dns_zone: m.sandbox.edx.org
name_tag: sandbox-temp name_tag: sandbox-temp
elb: false elb: false
vpc_subnet_id: subnet-cd867aba
roles: roles:
- role: launch_ec2 - role: launch_ec2
keypair: "{{ keypair }}" keypair: "{{ keypair }}"
...@@ -33,6 +34,8 @@ ...@@ -33,6 +34,8 @@
dns_name: "{{ dns_name }}" dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}" dns_zone: "{{ dns_zone }}"
zone: "{{ zone }}" zone: "{{ zone }}"
vpc_subnet_id: "{{ vpc_subnet_id }}"
assign_public_ip: yes
terminate_instance: true terminate_instance: true
instance_profile_name: sandbox instance_profile_name: sandbox
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
- name: syncdb and migrate - name: syncdb and migrate
shell: > shell: >
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
python manage.py {{ item }} migrate --noinput --settings=aws_migrate {{ db_dry_run }} python manage.py {{ item }} syncdb --migrate --noinput --settings=aws_migrate {{ db_dry_run }}
environment: environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}" DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}" DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1 region: us-east-1
state: absent state: absent
wait_timeout: 60
sudo: False sudo: False
when: elb_pre_post when: elb_pre_post
roles: roles:
...@@ -38,6 +39,7 @@ ...@@ -38,6 +39,7 @@
ec2_elbs: "{{ item }}" ec2_elbs: "{{ item }}"
region: us-east-1 region: us-east-1
state: present state: present
wait_timeout: 60
with_items: ec2_elbs with_items: ec2_elbs
sudo: False sudo: False
when: elb_pre_post when: elb_pre_post
# Configure an admin instance with jenkins and asgard. # Configure an instance with the admin jenkins.
- name: Configure instance(s) - name: Configure instance(s)
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
roles: roles:
- aws
- edx_ansible
- user
- jenkins_admin - jenkins_admin
- hotg
- alton
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
# Configure a Jenkins master instance # Configure a Jenkins master instance for testeng
# This has the Jenkins Java app, but none of the requirements # This has the Jenkins Java app, but none of the requirements
# to run the tests. # to run the tests.
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
gather_facts: True gather_facts: True
vars: vars:
COMMON_DATA_DIR: "/mnt" COMMON_DATA_DIR: "/mnt"
COMMON_ENABLE_DATADOG: True
roles: roles:
- common - common
- role: datadog
when: COMMON_ENABLE_DATADOG
- jenkins_master - jenkins_master
- name: Deploy MongoDB
hosts: all
sudo: True
gather_facts: True
roles:
- mongo
- mongo_mms
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
notify: notify:
- "restart edxapp" - "restart edxapp"
- "restart workers" - "restart workers"
tags:
- deploy
- name: syncdb and migrate - name: syncdb and migrate
shell: > shell: >
...@@ -40,6 +42,8 @@ ...@@ -40,6 +42,8 @@
notify: notify:
- "restart edxapp" - "restart edxapp"
- "restart workers" - "restart workers"
tags:
- deploy
handlers: handlers:
- name: restart edxapp - name: restart edxapp
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1 region: us-east-1
state: absent state: absent
wait_timeout: 60
sudo: False sudo: False
when: elb_pre_post when: elb_pre_post
roles: roles:
...@@ -39,6 +40,7 @@ ...@@ -39,6 +40,7 @@
ec2_elbs: "{{ item }}" ec2_elbs: "{{ item }}"
region: us-east-1 region: us-east-1
state: present state: present
wait_timeout: 60
with_items: ec2_elbs with_items: ec2_elbs
sudo: False sudo: False
when: elb_pre_post when: elb_pre_post
# A simple utility play to remove a public key from the authorized key
# file for the ubuntu user
# You must pass in the entire line that you are adding
- hosts: all
vars:
# Number of instances to operate on at a time
serial_count: 1
owner: ubuntu
keyfile: "/home/{{ owner }}/.ssh/authorized_keys"
serial: "{{ serial_count }}"
tasks:
- fail: msg="You must pass in a public_key"
when: public_key is not defined
- fail: msg="public does not exist in secrets"
when: ubuntu_public_keys[public_key] is not defined
- command: mktemp
register: mktemp
# This command will fail if this returns zero lines which will prevent
# the last key from being removed
- shell: >
grep -Fv '{{ ubuntu_public_keys[public_key] }}' {{ keyfile }} > {{ mktemp.stdout }}
- shell: >
while read line; do ssh-keygen -lf /dev/stdin <<<$line; done <{{ mktemp.stdout }}
executable=/bin/bash
register: keycheck
- fail: msg="public key check failed!"
when: keycheck.stderr != ""
- command: cp {{ mktemp.stdout }} {{ keyfile }}
- file: >
path={{ keyfile }}
owner={{ owner }}
mode=0600
- file: >
path={{ mktemp.stdout }}
state=absent
- shell: wc -l < {{ keyfile }}
register: line_count
- fail: msg="There should only be one line in ubuntu's authorized_keys"
when: line_count.stdout|int != 1
- name: Deploy snort IDS
hosts: all
sudo: True
gather_facts: True
roles:
- snort
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1 region: us-east-1
state: absent state: absent
wait_timeout: 60
sudo: False sudo: False
when: elb_pre_post when: elb_pre_post
tasks: tasks:
...@@ -33,6 +34,7 @@ ...@@ -33,6 +34,7 @@
ec2_elbs: "{{ item }}" ec2_elbs: "{{ item }}"
region: us-east-1 region: us-east-1
state: present state: present
wait_timeout: 60
with_items: ec2_elbs with_items: ec2_elbs
sudo: False sudo: False
when: elb_pre_post when: elb_pre_post
# ansible-playbook -c ssh -vvvv --user=ubuntu -i ec2.py deployer.yml -e "@/path/to/secure/ansible/vars/edx_admin.yml" --limit="tag_aws_cloudformation_stack-name_<admin_stack_name>" # ansible-playbook -vvv -c ssh -i admin_url, vpc_admin.yml -e "@path_to_common_overrides" -e "@path_to_deployment_specific_overrides"
# You will need to create a gh_users.yml that contains the github names of users that should have login access to the machines.
# Setup user login on the bastion
- name: Configure Bastion
hosts: tag_play_bastion
sudo: True
gather_facts: False
roles:
- aws
# Configure an admin instance with jenkins and asgard. # Configure an admin instance with jenkins and asgard.
- name: Configure instance(s) - name: Configure instance(s)
hosts: tag_play_admin hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
roles: roles:
- aws - aws
- edx_ansible - edx_ansible
- jenkins_admin - user
- hotg - jenkins_admin
- newrelic - hotg
- alton
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
region: us-east-1 region: us-east-1
state: absent state: absent
wait_timeout: 60
sudo: False sudo: False
when: elb_pre_post when: elb_pre_post
roles: roles:
...@@ -45,6 +46,7 @@ ...@@ -45,6 +46,7 @@
ec2_elbs: "{{ item }}" ec2_elbs: "{{ item }}"
region: us-east-1 region: us-east-1
state: present state: present
wait_timeout: 60
with_items: ec2_elbs with_items: ec2_elbs
sudo: False sudo: False
when: elb_pre_post when: elb_pre_post
...@@ -25,6 +25,7 @@ ALTON_HANDLE: 'alton' ...@@ -25,6 +25,7 @@ ALTON_HANDLE: 'alton'
ALTON_REDIS_URL: 'redis://fakeuser:redispassword@localhost:6379' ALTON_REDIS_URL: 'redis://fakeuser:redispassword@localhost:6379'
ALTON_HTTPSERVER_PORT: '8081' ALTON_HTTPSERVER_PORT: '8081'
ALTON_WORLD_WEATHER_KEY: !!null ALTON_WORLD_WEATHER_KEY: !!null
ALTON_AWS_CREDENTIALS: !!null
# Needed if you want to build AMIs from alton. # Needed if you want to build AMIs from alton.
ALTON_JENKINS_URL: !!null ALTON_JENKINS_URL: !!null
...@@ -55,6 +56,7 @@ alton_environment: ...@@ -55,6 +56,7 @@ alton_environment:
WILL_HTTPSERVER_PORT: "{{ ALTON_HTTPSERVER_PORT }}" WILL_HTTPSERVER_PORT: "{{ ALTON_HTTPSERVER_PORT }}"
WORLD_WEATHER_ONLINE_KEY: "{{ ALTON_WORLD_WEATHER_KEY }}" WORLD_WEATHER_ONLINE_KEY: "{{ ALTON_WORLD_WEATHER_KEY }}"
JENKINS_URL: "{{ ALTON_JENKINS_URL }}" JENKINS_URL: "{{ ALTON_JENKINS_URL }}"
BOTO_CONFIG: "{{ alton_app_dir }}/.boto"
# #
# OS packages # OS packages
......
- name: configure the boto profiles for alton
template: >
src="boto.j2"
dest="{{ alton_app_dir }}/.boto"
owner="{{ alton_user }}"
group="{{ common_web_user }}"
mode="0640"
notify: restart alton
- name: checkout the code - name: checkout the code
git: > git: >
dest="{{ alton_code_dir }}" repo="{{ alton_source_repo }}" dest="{{ alton_code_dir }}" repo="{{ alton_source_repo }}"
version="{{ alton_version }}" accept_hostkey=yes version="{{ alton_version }}" accept_hostkey=yes
sudo_user: "{{ alton_user }}" sudo_user: "{{ alton_user }}"
register: alton_checkout
notify: restart alton notify: restart alton
- name: install the requirements - name: install the requirements
...@@ -55,3 +65,5 @@ ...@@ -55,3 +65,5 @@
state=started state=started
when: not disable_edx_services when: not disable_edx_services
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
---
- name: get instance information
action: ec2_facts
tags:
- deploy
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:alton" : "{{ alton_source_repo }} {{ alton_checkout.after |truncate(7,True,'')}}"
when: alton_checkout.after is defined
tags:
- deploy
{% for deployment, creds in ALTON_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
{% endfor %}
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role analytics-api
#
ANALYTICS_API_GIT_IDENTITY: !!null
# depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC
# and a key being provided via NEWRELIC_LICENSE_KEY
ANALYTICS_API_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-analytics-api"
ANALYTICS_API_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
ANALYTICS_API_NGINX_PORT: "18100"
ANALYTICS_API_VERSION: "master"
# Default dummy user, override this!!
ANALYTICS_API_USERS:
"dummy-api-user": "changeme"
ANALYTICS_API_CONFIG:
ANALYTICS_DATABASE: 'reports'
SECRET_KEY: 'Your secret key here'
TIME_ZONE: 'America/New_York'
LANGUAGE_CODE: 'en-us'
# email config
EMAIL_HOST: 'smtp.example.com'
EMAIL_HOST_PASSWORD: ""
EMAIL_HOST_USER: ""
EMAIL_PORT: 587
API_AUTH_TOKEN: 'put-your-api-token-here'
STATICFILES_DIRS: []
STATIC_ROOT: "{{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }}/staticfiles"
# db config
DATABASE_OPTIONS:
connect_timeout: 10
DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: 'analytics-api'
USER: 'api001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
# read-only user
reports:
ENGINE: 'django.db.backends.mysql'
NAME: 'reports'
USER: 'reports001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
ANALYTICS_API_GUNICORN_WORKERS: "2"
#
# vars are namespace with the module name.
#
analytics_api_environment:
DJANGO_SETTINGS_MODULE: "analyticsdataserver.settings.production"
ANALYTICS_API_CFG: "{{ COMMON_CFG_DIR }}/{{ analytics_api_service_name }}.yaml"
analytics_api_role_name: "analytics-api"
analytics_api_service_name: "analytics-api"
analytics_api_user: "analytics-api"
analytics_api_app_dir: "{{ COMMON_APP_DIR }}/{{ analytics_api_service_name }}"
analytics_api_home: "{{ COMMON_APP_DIR }}/{{ analytics_api_service_name }}"
analytics_api_venv_base: "{{ analytics_api_home }}/venvs"
analytics_api_venv_dir: "{{ analytics_api_venv_base }}/{{ analytics_api_service_name }}"
analytics_api_venv_bin: "{{ analytics_api_venv_dir }}/bin"
analytics_api_code_dir: "{{ analytics_api_app_dir }}/edx-analytics-data-api"
analytics_api_conf_dir: "{{ analytics_api_home }}"
analytics_api_gunicorn_host: "127.0.0.1"
analytics_api_gunicorn_port: "8100"
analytics_api_gunicorn_timeout: "300"
analytics_api_django_settings: "production"
analytics_api_source_repo: "git@{{ COMMON_GIT_MIRROR }}:edx/edx-analytics-data-api"
analytics_api_git_ssh_opts: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ analytics_api_git_identity_file }}"
analytics_api_git_identity_file: "{{ analytics_api_app_dir }}/git-identity"
analytics_api_log_dir: "{{ COMMON_LOG_DIR }}/{{ analytics_api_service_name }}"
analytics_api_requirements_base: "{{ analytics_api_code_dir }}/requirements"
analytics_api_requirements:
- base.txt
- production.txt
- optional.txt
#
# OS packages
#
analytics_api_debian_pkgs:
- 'libmysqlclient-dev'
analytics_api_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role analytics-api
#
# Overview:
#
#
- name: "restart the analytics service"
supervisorctl_local: >
name={{ analytics_api_service_name }}
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not disable_edx_services
\ No newline at end of file
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
# Role includes for role analytics-api
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- role: edx_service
edx_role_name: "{{ analytics_api_role_name }}"
edx_service_name: "{{ analytics_api_service_name }}"
- supervisor
---
- name: install read-only ssh key
copy: >
content="{{ ANALYTICS_API_GIT_IDENTITY }}" dest={{ analytics_api_git_identity_file }}
owner={{ analytics_api_user }} group={{ analytics_api_user }} mode=0600
- name: setup the analytics-api env file
template: >
src="edx/app/analytics-api/analytics_api_env.j2"
dest="{{ analytics_api_app_dir }}/analytics_api_env"
owner={{ analytics_api_user }}
group={{ analytics_api_user }}
mode=0644
- name: checkout code
git: >
dest={{ analytics_api_code_dir }} repo={{ analytics_api_source_repo }} version={{ ANALYTICS_API_VERSION }}
accept_hostkey=yes
ssh_opts="{{ analytics_api_git_ssh_opts }}"
register: analytics_api_code_checkout
notify: "restart the analytics service"
sudo_user: "{{ analytics_api_user }}"
- name: write out app config file
template: >
src=edx/app/analytics-api/analytics-api.yaml.j2
dest={{ COMMON_CFG_DIR }}/{{ analytics_api_service_name }}.yaml
mode=0644 owner={{ analytics_api_user }} group={{ analytics_api_user }}
notify: restart the analytics service
- name: install application requirements
pip: >
requirements="{{ analytics_api_requirements_base }}/{{ item }}"
virtualenv="{{ analytics_api_venv_dir }}" state=present
sudo_user: "{{ analytics_api_user }}"
notify: restart the analytics service
with_items: analytics_api_requirements
- name: syncdb and migrate
shell: >
chdir={{ analytics_api_code_dir }}
DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }}
DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }}
{{ analytics_api_venv_bin }}/python ./manage.py syncdb --migrate --noinput
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
- name: run collectstatic
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_venv_bin }}/python manage.py collectstatic --noinput
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
- name: create api users
shell: >
chdir={{ analytics_api_code_dir }}
{{ analytics_api_venv_bin }}/python manage.py set_api_key {{ item.key }} {{ item.value }}
sudo_user: "{{ analytics_api_user }}"
environment: "{{ analytics_api_environment }}"
with_dict: ANALYTICS_API_USERS
- name: write out the supervisior wrapper
template: >
src=edx/app/analytics-api/analytics-api.sh.j2
dest={{ analytics_api_app_dir }}/{{ analytics_api_service_name }}.sh
mode=0650 owner={{ supervisor_user }} group={{ common_web_user }}
notify: restart the analytics service
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d.available/analytics-api.conf.j2
dest="{{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
notify: restart the analytics service
- name: enable supervisor script
file: >
src={{ supervisor_available_dir }}/{{ analytics_api_service_name }}.conf
dest={{ supervisor_cfg_dir }}/{{ analytics_api_service_name }}.conf
state=link
force=yes
notify: restart the analytics service
when: not disable_edx_services
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
when: not disable_edx_services
- name: create symlinks from the venv bin dir
file: >
src="{{ analytics_api_venv_bin }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics-api"
state=link
with_items:
- python
- pip
- django-admin.py
- name: create symlinks from the repo dir
file: >
src="{{ analytics_api_code_dir }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.analytics-api"
state=link
with_items:
- manage.py
- name: remove read-only ssh key for the content repo
file: path={{ analytics_api_git_identity_file }} state=absent
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role analytics-api
#
# Overview:
#
# Install the Analytics Data API server, a python
# django application that runs under gunicorn
#
# Dependencies:
#
# Example play:
# - name: Deploy Analytics API
# hosts: all
# sudo: True
# gather_facts: True
# vars:
# ENABLE_DATADOG: False
# ENABLE_SPLUNKFORWARDER: False
# ENABLE_NEWRELIC: False
# roles:
# - aws
# - analytics-api
#
# ansible-playbook -i 'api.example.com,' ./analyticsapi.yml -e@/ansible/vars/deployment.yml -e@/ansible/vars/env-deployment.yml
#
- fail: msg="You must provide an private key for the analytics repo"
when: not ANALYTICS_API_GIT_IDENTITY
- include: deploy.yml tags=deploy
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:analytics_api" : "{{ analytics_api_source_repo }} {{ analytics_api_code_checkout.after |truncate(7,True,'')}}"
when: analytics_api_code_checkout.after is defined
#!/usr/bin/env bash
# {{ ansible_managed }}
{% if COMMON_ENABLE_NEWRELIC %}
{% set executable = analytics_api_venv_bin + '/newrelic-admin run-program ' + analytics_api_venv_bin + '/gunicorn' %}
{% else %}
{% set executable = analytics_api_venv_bin + '/gunicorn' %}
{% endif %}
{% if COMMON_ENABLE_NEWRELIC %}
export NEW_RELIC_APP_NAME="{{ ANALYTICS_API_NEWRELIC_APPNAME }}"
export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}"
{% endif -%}
source {{ analytics_api_app_dir }}/analytics_api_env
{{ executable }} --pythonpath={{ analytics_api_code_dir }} -b {{ analytics_api_gunicorn_host }}:{{ analytics_api_gunicorn_port }} -w {{ ANALYTICS_API_GUNICORN_WORKERS }} --timeout={{ analytics_api_gunicorn_timeout }} analyticsdataserver.wsgi:application
---
# {{ ansible_managed }}
{{ ANALYTICS_API_CONFIG | to_nice_yaml }}
# {{ ansible_managed }}
{% for name,value in analytics_api_environment.items() -%}
{%- if value -%}
export {{ name }}="{{ value }}"
{% endif %}
{%- endfor %}
# {{ ansible_managed }}
[program:{{ analytics_api_service_name }}]
command={{ analytics_api_app_dir }}/analytics-api.sh
user={{ common_web_user }}
directory={{ analytics_api_code_dir }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
...@@ -18,7 +18,7 @@ AS_DB_ANALYTICS_HOST: 'localhost' ...@@ -18,7 +18,7 @@ AS_DB_ANALYTICS_HOST: 'localhost'
AS_SERVER_PORT: '9000' AS_SERVER_PORT: '9000'
AS_ENV_LANG: 'en_US.UTF-8' AS_ENV_LANG: 'en_US.UTF-8'
AS_LOG_LEVEL: 'INFO' AS_LOG_LEVEL: 'INFO'
AS_WORKERS: '4' AS_WORKERS: '2'
# add public keys to enable the automator user # add public keys to enable the automator user
# for running manage.py commands # for running manage.py commands
......
...@@ -18,7 +18,7 @@ ANALYTICS_DB_ANALYTICS_HOST: 'localhost' ...@@ -18,7 +18,7 @@ ANALYTICS_DB_ANALYTICS_HOST: 'localhost'
ANALYTICS_SERVER_PORT: '9000' ANALYTICS_SERVER_PORT: '9000'
ANALYTICS_ENV_LANG: 'en_US.UTF-8' ANALYTICS_ENV_LANG: 'en_US.UTF-8'
ANALYTICS_LOG_LEVEL: 'INFO' ANALYTICS_LOG_LEVEL: 'INFO'
ANALYTICS_WORKERS: '4' ANALYTICS_WORKERS: '2'
DATABASES: DATABASES:
default: &databases_default default: &databases_default
......
--- ---
apache_port: 80 apache_ports:
- 80
apache_sites:
- lms
apache_template_dir: '.'
--- ---
- name: restart apache - name: restart apache
service: name=apache2 state=restarted service: name=apache2 state=restarted
tags: deploy
---
dependencies:
- common
# Requires nginx package
---
- name: Copying apache config {{ site_name }}
template: src={{ item }} dest=/etc/apache2/sites-available/{{ site_name }}
first_available_file:
- "{{ local_dir }}/apache/templates/{{ site_name }}.j2"
# seems like paths in first_available_file must be relative to the playbooks dir
- "roles/apache/templates/{{ site_name }}.j2"
notify: restart apache
when: apache_role_run is defined
tags:
- apache
- update
- name: Creating apache2 config link {{ site_name }}
file: src=/etc/apache2/sites-available/{{ site_name }} dest=/etc/apache2/sites-enabled/{{ site_name }} state={{ state }} owner=root group=root
notify: restart apache
when: apache_role_run is defined
tags:
- apache
- update
#Installs apache and runs the lms wsgi # Installs apache and runs the lms wsgi by default
--- ---
- name: Installs apache and mod_wsgi from apt - name: Installs apache and mod_wsgi from apt
...@@ -7,30 +7,29 @@ ...@@ -7,30 +7,29 @@
- apache2 - apache2
- libapache2-mod-wsgi - libapache2-mod-wsgi
notify: restart apache notify: restart apache
tags:
- apache
- install
- name: disables default site - name: disables default site
command: a2dissite 000-default command: a2dissite 000-default
notify: restart apache notify: restart apache
tags:
- apache
- install
- name: rewrite apache ports conf - name: rewrite apache ports conf
template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root template: dest=/etc/apache2/ports.conf src=ports.conf.j2 owner=root group=root
notify: restart apache notify: restart apache
tags:
- apache
- install
- name: Register the fact that apache role has run - debug: msg={{ apache_sites }}
command: echo True
register: apache_role_run
tags:
- apache
- install
- name: Copying apache configs for {{ apache_sites }}
template: >
src={{ apache_template_dir }}/{{ item }}.j2
dest=/etc/apache2/sites-available/{{ item }}
owner=root group={{ common_web_user }} mode=0640
notify: restart apache
with_items: apache_sites
- include: apache_site.yml state=link site_name=lms - name: Creating apache2 config links for {{ apache_sites }}
file: >
src=/etc/apache2/sites-available/{{ item }}
dest=/etc/apache2/sites-enabled/{{ item }}
state=link owner=root group=root
notify: restart apache
with_items: apache_sites
NameVirtualHost *:{{ apache_port }} {%- for port in apache_ports -%}
Listen {{ apache_port }} NameVirtualHost *:{{ port }}
Listen {{ port }}
{% endfor %}
...@@ -56,7 +56,7 @@ aws_debian_pkgs: ...@@ -56,7 +56,7 @@ aws_debian_pkgs:
aws_pip_pkgs: aws_pip_pkgs:
- https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz - https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz
- awscli - awscli
- boto==2.20.1 - boto==2.29.1
aws_redhat_pkgs: [] aws_redhat_pkgs: []
aws_s3cmd_version: s3cmd-1.5.0-beta1 aws_s3cmd_version: s3cmd-1.5.0-beta1
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
environment: environment:
GIT_SSH: "{{ certs_git_ssh }}" GIT_SSH: "{{ certs_git_ssh }}"
register: certs_checkout
notify: restart certs notify: restart certs
- name: remove read-only ssh key for the certs repo - name: remove read-only ssh key for the certs repo
...@@ -96,4 +97,7 @@ ...@@ -96,4 +97,7 @@
- python - python
- pip - pip
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: certs_installed=true - set_fact: certs_installed=true
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:certs" : "{{ CERT_REPO }} {{ certs_checkout.after|truncate(7,True,'') }}"
when: certs_checkout.after is defined
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
# where edX is installed # where edX is installed
# Set global htpasswd credentials # Set global htpasswd credentials
COMMON_ENABLE_BASIC_AUTH: True COMMON_ENABLE_BASIC_AUTH: False
COMMON_HTPASSWD_USER: edx COMMON_HTPASSWD_USER: edx
COMMON_HTPASSWD_PASS: edx COMMON_HTPASSWD_PASS: edx
...@@ -44,17 +44,18 @@ COMMON_SSH_PASSWORD_AUTH: "no" ...@@ -44,17 +44,18 @@ COMMON_SSH_PASSWORD_AUTH: "no"
# the migrate user is granted table alter privs on all dbs # the migrate user is granted table alter privs on all dbs
COMMON_MYSQL_READ_ONLY_USER: 'read_only' COMMON_MYSQL_READ_ONLY_USER: 'read_only'
COMMON_MYSQL_READ_ONLY_PASS: !!null COMMON_MYSQL_READ_ONLY_PASS: 'password'
COMMON_MYSQL_ADMIN_USER: 'admin' COMMON_MYSQL_ADMIN_USER: 'admin'
COMMON_MYSQL_ADMIN_PASS: !!null COMMON_MYSQL_ADMIN_PASS: 'password'
COMMON_MYSQL_MIGRATE_USER: 'migrate' COMMON_MYSQL_MIGRATE_USER: 'migrate'
COMMON_MYSQL_MIGRATE_PASS: !!null COMMON_MYSQL_MIGRATE_PASS: 'password'
COMMON_MONGO_READ_ONLY_USER: 'read_only' COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: !!null COMMON_MONGO_READ_ONLY_PASS: !!null
COMMON_ENABLE_DATADOG: False COMMON_ENABLE_DATADOG: False
COMMON_ENABLE_SPLUNKFORWARDER: False COMMON_ENABLE_SPLUNKFORWARDER: False
COMMON_ENABLE_NEWRELIC: False COMMON_ENABLE_NEWRELIC: False
COMMON_TAG_EC2_INSTANCE: False
common_debian_pkgs: common_debian_pkgs:
- ntp - ntp
- ack-grep - ack-grep
......
...@@ -60,17 +60,17 @@ ...@@ -60,17 +60,17 @@
owner=root group=root mode=644 owner=root group=root mode=644
notify: restart rsyslogd notify: restart rsyslogd
- name: Install logrotate configuration for edX
template: >
dest=/etc/logrotate.d/edx-services
src=etc/logrotate.d/edx_logrotate.j2
owner=root group=root mode=644
# This is in common to keep all logrotation config # This is in common to keep all logrotation config
# in the same role # in the same role
- name: Create hourly subdirectory in logrotate.d - name: Create hourly subdirectory in logrotate.d
file: path=/etc/logrotate.d/hourly state=directory file: path=/etc/logrotate.d/hourly state=directory
- name: Install logrotate configuration for edX
template: >
dest=/etc/logrotate.d/hourly/edx-services
src=etc/logrotate.d/hourly/edx_logrotate.j2
owner=root group=root mode=644
- name: Install logrotate configuration for tracking file - name: Install logrotate configuration for tracking file
template: > template: >
dest=/etc/logrotate.d/hourly/tracking.log dest=/etc/logrotate.d/hourly/tracking.log
...@@ -83,10 +83,6 @@ ...@@ -83,10 +83,6 @@
src=etc/cron.hourly/logrotate src=etc/cron.hourly/logrotate
owner=root group=root mode=555 owner=root group=root mode=555
# This can be removed after new release of edX
- name: Remove old tracking.log config from /etc/logrotate.d
file: path=/etc/logrotate.d/tracking.log state=absent
- name: update /etc/hosts - name: update /etc/hosts
template: src=hosts.j2 dest=/etc/hosts template: src=hosts.j2 dest=/etc/hosts
when: COMMON_HOSTNAME when: COMMON_HOSTNAME
......
******************************************************************* *******************************************************************
* * * _ __ __ *
* _ _| |\ \/ / * * _ _| |\ \/ / *
* / -_) _` | > < * * / -_) _` | > < *
* \___\__,_|/_/\_\ * * \___\__,_|/_/\_\ *
......
...@@ -4,9 +4,13 @@ ...@@ -4,9 +4,13 @@
copytruncate copytruncate
delaycompress delaycompress
dateext dateext
dateformat -%Y%m%d-%s
missingok missingok
notifempty notifempty
daily daily
rotate 90 rotate 90
size 1M size 1M
postrotate
/usr/bin/killall -HUP rsyslogd
endscript
} }
...@@ -40,5 +40,5 @@ edx_ansible_user: "edx-ansible" ...@@ -40,5 +40,5 @@ edx_ansible_user: "edx-ansible"
edx_ansible_source_repo: https://github.com/edx/configuration.git edx_ansible_source_repo: https://github.com/edx/configuration.git
edx_ansible_requirements_file: "{{ edx_ansible_code_dir }}/requirements.txt" edx_ansible_requirements_file: "{{ edx_ansible_code_dir }}/requirements.txt"
# edX configuration repo # edX configuration repo
configuration_version: master configuration_version: release
edx_ansible_var_file: "{{ edx_ansible_app_dir }}/server-vars.yml" edx_ansible_var_file: "{{ edx_ansible_app_dir }}/server-vars.yml"
...@@ -12,7 +12,7 @@ IFS="," ...@@ -12,7 +12,7 @@ IFS=","
-v add verbosity to edx_ansible run -v add verbosity to edx_ansible run
-h this -h this
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code <repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code edx-analytics-data-api
<version> - can be a commit or tag <version> - can be a commit or tag
EO EO
...@@ -49,6 +49,8 @@ repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' ...@@ -49,6 +49,8 @@ repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2'
repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'" repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'" repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
repos_to_cmd["read-only-certificate-code"]="$edx_ansible_cmd certs.yml -e 'certs_version=$2'" repos_to_cmd["read-only-certificate-code"]="$edx_ansible_cmd certs.yml -e 'certs_version=$2'"
repos_to_cmd["edx-analytics-data-api"]="$edx_ansible_cmd analyticsapi.yml -e 'ANALYTICS_API_VERSION=$2'"
repos_to_cmd["edx-ora2"]="$edx_ansible_cmd ora2.yml -e 'ora2_version=$2'"
if [[ -z $1 || -z $2 ]]; then if [[ -z $1 || -z $2 ]]; then
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
# #
# #
# Tasks for role edx_service # Tasks for role edx_service
# #
# Overview: # Overview:
# #
# This role performs the repetive tasks that most edX roles # This role performs the repetive tasks that most edX roles
...@@ -43,14 +43,35 @@ ...@@ -43,14 +43,35 @@
with_items: with_items:
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}" - "{{ COMMON_APP_DIR }}/{{ edx_service_name }}"
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/venvs" - "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/venvs"
- "{{ COMMON_APP_DIR }}/{{ edx_service_name }}/data"
- name: create edx_service data and staticfiles dir
file: >
path="{{ item }}"
state=directory
owner="{{ edx_service_name }}"
group="{{ common_web_group }}"
with_items:
- "{{ COMMON_DATA_DIR }}/{{ edx_service_name }}/data"
- "{{ COMMON_DATA_DIR }}/{{ edx_service_name }}/staticfiles"
- name: create edx_service log dir
file: >
path="{{ item }}"
state=directory
owner="syslog"
group="syslog"
with_items:
- "{{ COMMON_LOG_DIR }}/{{ edx_service_name }}"
# Replace dashes with underscores to support roles that use
# dashes (the role vars will contain underscores)
- name: install a bunch of system packages on which edx_service relies - name: install a bunch of system packages on which edx_service relies
apt: pkg={{ item }} state=present apt: pkg={{ item }} state=present
with_items: "{{ edx_service_name }}_debian_pkgs" with_items: "{{ edx_service_name.replace('-', '_') }}_debian_pkgs"
when: ansible_distribution in common_debian_variants when: ansible_distribution in common_debian_variants
- name: install a bunch of system packages on which edx_service relies - name: install a bunch of system packages on which edx_service relies
yum: pkg={{ item }} state=present yum: pkg={{ item }} state=present
with_items: "{{ edx_service_name }}_redhat_pkgs" with_items: "{{ edx_service_name.replace('-', '_') }}_redhat_pkgs"
when: ansible_distribution in common_redhat_variants when: ansible_distribution in common_redhat_variants
\ No newline at end of file
...@@ -11,12 +11,18 @@ ...@@ -11,12 +11,18 @@
# #
# Defaults specified here should not contain # Defaults specified here should not contain
# any secrets or host identifying information. # any secrets or host identifying information.
#
# Variables set to "None" will be converted to None
# when the edxapp config is written to disk.
EDXAPP_LMS_BASE: '' EDXAPP_LMS_BASE: ""
EDXAPP_PREVIEW_LMS_BASE: '' EDXAPP_PREVIEW_LMS_BASE: ""
EDXAPP_CMS_BASE: '' EDXAPP_CMS_BASE: ""
EDXAPP_AWS_ACCESS_KEY_ID: '' # 'None' will be written out as null in
EDXAPP_AWS_SECRET_ACCESS_KEY: '' # the configuration on disk
EDXAPP_AWS_ACCESS_KEY_ID: "None"
EDXAPP_AWS_SECRET_ACCESS_KEY: "None"
EDXAPP_XQUEUE_BASIC_AUTH: [ "{{ COMMON_HTPASSWD_USER }}", "{{ COMMON_HTPASSWD_PASS }}" ] EDXAPP_XQUEUE_BASIC_AUTH: [ "{{ COMMON_HTPASSWD_USER }}", "{{ COMMON_HTPASSWD_PASS }}" ]
EDXAPP_XQUEUE_DJANGO_AUTH: EDXAPP_XQUEUE_DJANGO_AUTH:
username: 'lms' username: 'lms'
...@@ -32,11 +38,9 @@ EDXAPP_MONGO_DB_NAME: 'edxapp' ...@@ -32,11 +38,9 @@ EDXAPP_MONGO_DB_NAME: 'edxapp'
EDXAPP_MYSQL_DB_NAME: 'edxapp' EDXAPP_MYSQL_DB_NAME: 'edxapp'
EDXAPP_MYSQL_USER: 'edxapp001' EDXAPP_MYSQL_USER: 'edxapp001'
EDXAPP_MYSQL_USER_ADMIN: 'root' EDXAPP_MYSQL_USER_ADMIN: 'root'
EDXAPP_MYSQL_USER_MIGRATE: 'migrate'
EDXAPP_MYSQL_PASSWORD: 'password' EDXAPP_MYSQL_PASSWORD: 'password'
EDXAPP_MYSQL_PASSWORD_READ_ONLY: 'password' EDXAPP_MYSQL_PASSWORD_READ_ONLY: 'password'
EDXAPP_MYSQL_PASSWORD_ADMIN: 'password' EDXAPP_MYSQL_PASSWORD_ADMIN: 'password'
EDXAPP_MYSQL_PASSWORD_MIGRATE: 'password'
EDXAPP_MYSQL_REPLICA_DB_NAME: "{{ EDXAPP_MYSQL_DB_NAME }}" EDXAPP_MYSQL_REPLICA_DB_NAME: "{{ EDXAPP_MYSQL_DB_NAME }}"
EDXAPP_MYSQL_REPLICA_USER: "{{ EDXAPP_MYSQL_USER }}" EDXAPP_MYSQL_REPLICA_USER: "{{ EDXAPP_MYSQL_USER }}"
EDXAPP_MYSQL_REPLICA_PASSWORD: "{{ EDXAPP_MYSQL_PASSWORD }}" EDXAPP_MYSQL_REPLICA_PASSWORD: "{{ EDXAPP_MYSQL_PASSWORD }}"
...@@ -54,27 +58,36 @@ EDXAPP_MEMCACHE: [ 'localhost:11211' ] ...@@ -54,27 +58,36 @@ EDXAPP_MEMCACHE: [ 'localhost:11211' ]
EDXAPP_COMMENTS_SERVICE_URL: 'http://localhost:18080' EDXAPP_COMMENTS_SERVICE_URL: 'http://localhost:18080'
EDXAPP_COMMENTS_SERVICE_KEY: 'password' EDXAPP_COMMENTS_SERVICE_KEY: 'password'
EDXAPP_EDXAPP_SECRET_KEY: '' EDXAPP_EDXAPP_SECRET_KEY: ""
EDXAPP_OEE_URL: 'http://localhost:18060/' EDXAPP_OEE_URL: 'http://localhost:18060/'
EDXAPP_OEE_USER: 'lms' EDXAPP_OEE_USER: 'lms'
EDXAPP_OEE_PASSWORD: 'password' EDXAPP_OEE_PASSWORD: 'password'
EDXAPP_ANALYTICS_API_KEY: '' EDXAPP_ANALYTICS_API_KEY: ""
EDXAPP_ZENDESK_USER: '' EDXAPP_PAYMENT_SUPPORT_EMAIL: "billing@example.com"
EDXAPP_ZENDESK_API_KEY: '' EDXAPP_ZENDESK_USER: ""
EDXAPP_ZENDESK_URL: ""
EDXAPP_ZENDESK_API_KEY: ""
EDXAPP_CELERY_USER: 'celery' EDXAPP_CELERY_USER: 'celery'
EDXAPP_CELERY_PASSWORD: 'celery' EDXAPP_CELERY_PASSWORD: 'celery'
EDXAPP_CELERY_BROKER_VHOST: ""
EDXAPP_VIDEO_CDN_URLS:
EXAMPLE_COUNTRY_CODE: "http://example.com/edx/video?s3_url="
EDXAPP_PLATFORM_NAME: 'Your Platform Name Here' EDXAPP_PLATFORM_NAME: 'Your Platform Name Here'
EDXAPP_CAS_SERVER_URL: '' EDXAPP_CAS_SERVER_URL: ""
EDXAPP_CAS_EXTRA_LOGIN_PARAMS: '' EDXAPP_CAS_EXTRA_LOGIN_PARAMS: ""
EDXAPP_CAS_ATTRIBUTE_CALLBACK: '' EDXAPP_CAS_ATTRIBUTE_CALLBACK: ""
EDXAPP_CAS_ATTRIBUTE_PACKAGE: '' EDXAPP_CAS_ATTRIBUTE_PACKAGE: ""
# Enable an end-point that creates a user and logs them in # Enable an end-point that creates a user and logs them in
# Used for performance testing # Used for performance testing
EDXAPP_ENABLE_AUTO_AUTH: false EDXAPP_ENABLE_AUTO_AUTH: false
# Settings for enabling and configuring third party authorization
EDXAPP_ENABLE_THIRD_PARTY_AUTH: false
EDXAPP_THIRD_PARTY_AUTH: "None"
EDXAPP_FEATURES: EDXAPP_FEATURES:
AUTH_USE_OPENID_PROVIDER: true AUTH_USE_OPENID_PROVIDER: true
...@@ -87,21 +100,22 @@ EDXAPP_FEATURES: ...@@ -87,21 +100,22 @@ EDXAPP_FEATURES:
ENABLE_S3_GRADE_DOWNLOADS: true ENABLE_S3_GRADE_DOWNLOADS: true
USE_CUSTOM_THEME: $edxapp_use_custom_theme USE_CUSTOM_THEME: $edxapp_use_custom_theme
AUTOMATIC_AUTH_FOR_TESTING: $EDXAPP_ENABLE_AUTO_AUTH AUTOMATIC_AUTH_FOR_TESTING: $EDXAPP_ENABLE_AUTO_AUTH
ENABLE_THIRD_PARTY_AUTH: $EDXAPP_ENABLE_THIRD_PARTY_AUTH
EDXAPP_BOOK_URL: '' EDXAPP_BOOK_URL: ""
# This needs to be set to localhost # This needs to be set to localhost
# if xqueue is run on the same server # if xqueue is run on the same server
# as the lms (it's sent in the request) # as the lms (it's sent in the request)
EDXAPP_SITE_NAME: 'localhost' EDXAPP_SITE_NAME: 'localhost'
EDXAPP_LMS_SITE_NAME: "{{ EDXAPP_SITE_NAME }}" EDXAPP_LMS_SITE_NAME: "{{ EDXAPP_SITE_NAME }}"
EDXAPP_CMS_SITE_NAME: 'localhost' EDXAPP_CMS_SITE_NAME: 'localhost'
EDXAPP_MEDIA_URL: '' EDXAPP_MEDIA_URL: ""
EDXAPP_ANALYTICS_SERVER_URL: '' EDXAPP_ANALYTICS_SERVER_URL: ""
EDXAPP_FEEDBACK_SUBMISSION_EMAIL: '' EDXAPP_FEEDBACK_SUBMISSION_EMAIL: ""
EDXAPP_CELERY_BROKER_HOSTNAME: '' EDXAPP_CELERY_BROKER_HOSTNAME: ""
EDXAPP_LOGGING_ENV: 'sandbox' EDXAPP_LOGGING_ENV: 'sandbox'
EDXAPP_SYSLOG_SERVER: '' EDXAPP_SYSLOG_SERVER: ""
EDXAPP_RABBIT_HOSTNAME: 'localhost' EDXAPP_RABBIT_HOSTNAME: 'localhost'
EDXAPP_XML_MAPPINGS: {} EDXAPP_XML_MAPPINGS: {}
...@@ -145,11 +159,11 @@ EDXAPP_GRADE_ROOT_PATH: '/tmp/edx-s3/grades' ...@@ -145,11 +159,11 @@ EDXAPP_GRADE_ROOT_PATH: '/tmp/edx-s3/grades'
# These are the same defaults set in common.py # These are the same defaults set in common.py
EDXAPP_CC_PROCESSOR: EDXAPP_CC_PROCESSOR:
CyberSource: CyberSource:
SHARED_SECRET: '' SHARED_SECRET: ""
MERCHANT_ID: '' MERCHANT_ID: ""
SERIAL_NUMBER: '' SERIAL_NUMBER: ""
ORDERPAGE_VERSION: '7' ORDERPAGE_VERSION: '7'
PURCHASE_ENDPOINT: '' PURCHASE_ENDPOINT: ""
# does not affect verified students # does not affect verified students
EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY: ['usd', '$'] EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY: ['usd', '$']
...@@ -185,9 +199,6 @@ EDXAPP_USE_GIT_IDENTITY: false ...@@ -185,9 +199,6 @@ EDXAPP_USE_GIT_IDENTITY: false
# into this var # into this var
EDXAPP_GIT_IDENTITY: !!null EDXAPP_GIT_IDENTITY: !!null
# Configuration for database migration
EDXAPP_TEST_MIGRATE_DB_NAME: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ EDXAPP_MYSQL_DB_NAME }}"
EDXAPP_UPDATE_STATIC_FILES_KEY: false EDXAPP_UPDATE_STATIC_FILES_KEY: false
# Set this to true if you want to install the private pip # Set this to true if you want to install the private pip
# requirements in the edx-platform repo. # requirements in the edx-platform repo.
...@@ -197,10 +208,15 @@ EDXAPP_UPDATE_STATIC_FILES_KEY: false ...@@ -197,10 +208,15 @@ EDXAPP_UPDATE_STATIC_FILES_KEY: false
EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: false EDXAPP_INSTALL_PRIVATE_REQUIREMENTS: false
EDXAPP_GOOGLE_ANALYTICS_ACCOUNT: "UA-DUMMY" EDXAPP_GOOGLE_ANALYTICS_ACCOUNT: "None"
EDXAPP_PEARSON_TEST_PASSWORD: "" EDXAPP_PEARSON_TEST_PASSWORD: ""
EDXAPP_SEGMENT_IO_LMS: false
EDXAPP_SEGMENT_IO_LMS_KEY: "" EDXAPP_SEGMENT_IO_LMS_KEY: ""
# For the CMS
EDXAPP_SEGMENT_IO_KEY: ""
EDXAPP_SEGMENT_IO: false
EDXAPP_EDX_API_KEY: "" EDXAPP_EDX_API_KEY: ""
# This is the default set in common.py # This is the default set in common.py
EDXAPP_VERIFY_STUDENT: EDXAPP_VERIFY_STUDENT:
...@@ -211,18 +227,43 @@ EDXAPP_BULK_EMAIL_EMAILS_PER_TASK: 500 ...@@ -211,18 +227,43 @@ EDXAPP_BULK_EMAIL_EMAILS_PER_TASK: 500
# If using microsites this should point to the microsite repo # If using microsites this should point to the microsite repo
EDXAPP_MICROSITE_ROOT_DIR: "{{ edxapp_app_dir }}/edx-microsite" EDXAPP_MICROSITE_ROOT_DIR: "{{ edxapp_app_dir }}/edx-microsite"
# this dictionary defines what microsites are configured # this dictionary defines what microsites are configured
EDXAPP_MICROSITE_CONFIGRATION: {} EDXAPP_MICROSITE_CONFIGURATION: {}
# Instructor code that will not be run in the code sandbox # Instructor code that will not be run in the code sandbox
EDXAPP_COURSES_WITH_UNSAFE_CODE: [] EDXAPP_COURSES_WITH_UNSAFE_CODE: []
EDXAPP_SESSION_COOKIE_DOMAIN: "" EDXAPP_SESSION_COOKIE_DOMAIN: ""
EDXAPP_SESSION_COOKIE_NAME: "sessionid"
# XML Course related flags # XML Course related flags
EDXAPP_XML_FROM_GIT: false EDXAPP_XML_FROM_GIT: false
EDXAPP_XML_S3_BUCKET: !!null EDXAPP_XML_S3_BUCKET: !!null
EDXAPP_XML_S3_KEY: !!null EDXAPP_XML_S3_KEY: !!null
EDXAPP_NEWRELIC_LMS_APPNAME: "edX-LMS" EDXAPP_NEWRELIC_LMS_APPNAME: "{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}-edxapp-lms"
EDXAPP_NEWRELIC_CMS_APPNAME: "edX-CMS" EDXAPP_NEWRELIC_CMS_APPNAME: "{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}-edxapp-cms"
EDXAPP_ORA2_FILE_PREFIX: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}/ora2'
EDXAPP_FILE_UPLOAD_STORAGE_BUCKET_NAME: 'edxuploads'
EDXAPP_FILE_UPLOAD_STORAGE_PREFIX: 'submissions_attachments'
EDXAPP_CODE_JAIL_LIMITS:
# Limit the memory of the jailed process to something high but not
# infinite (128MiB in bytes)
VMEM: 134217728
# Time in seconds that the jailed process has to run.
REALTIME: 1
# Needs to be non-zero so that jailed code can use it as their temp directory.(1MiB in bytes)
FSIZE: 1048576
EDXAPP_VIRTUAL_UNIVERSITIES: []
EDXAPP_SUBDOMAIN_BRANDING: {}
# Set the number of workers explicitely for lms and cms
# Should be set to
# EDXAPP_WORKERS:
# lms: <num workers>
# cms: <num workers>
EDXAPP_WORKERS: !!null
EDXAPP_ANALYTICS_DATA_TOKEN: ""
EDXAPP_ANALYTICS_DATA_URL: ""
#-------- Everything below this line is internal to the role ------------ #-------- Everything below this line is internal to the role ------------
#Use YAML references (& and *) and hash merge <<: to factor out shared settings #Use YAML references (& and *) and hash merge <<: to factor out shared settings
...@@ -317,6 +358,7 @@ edxapp_environment: ...@@ -317,6 +358,7 @@ edxapp_environment:
edxapp_generic_auth_config: &edxapp_generic_auth edxapp_generic_auth_config: &edxapp_generic_auth
ANALYTICS_DATA_TOKEN: $EDXAPP_ANALYTICS_DATA_TOKEN
AWS_ACCESS_KEY_ID: $EDXAPP_AWS_ACCESS_KEY_ID AWS_ACCESS_KEY_ID: $EDXAPP_AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $EDXAPP_AWS_SECRET_ACCESS_KEY AWS_SECRET_ACCESS_KEY: $EDXAPP_AWS_SECRET_ACCESS_KEY
SECRET_KEY: $EDXAPP_EDXAPP_SECRET_KEY SECRET_KEY: $EDXAPP_EDXAPP_SECRET_KEY
...@@ -392,12 +434,17 @@ edxapp_generic_auth_config: &edxapp_generic_auth ...@@ -392,12 +434,17 @@ edxapp_generic_auth_config: &edxapp_generic_auth
CELERY_BROKER_USER: $EDXAPP_CELERY_USER CELERY_BROKER_USER: $EDXAPP_CELERY_USER
CELERY_BROKER_PASSWORD: $EDXAPP_CELERY_PASSWORD CELERY_BROKER_PASSWORD: $EDXAPP_CELERY_PASSWORD
GOOGLE_ANALYTICS_ACCOUNT: $EDXAPP_GOOGLE_ANALYTICS_ACCOUNT GOOGLE_ANALYTICS_ACCOUNT: $EDXAPP_GOOGLE_ANALYTICS_ACCOUNT
THIRD_PARTY_AUTH: $EDXAPP_THIRD_PARTY_AUTH
generic_env_config: &edxapp_generic_env generic_env_config: &edxapp_generic_env
ANALYTICS_DATA_URL: $EDXAPP_ANALYTICS_DATA_URL
CELERY_BROKER_VHOST: $EDXAPP_CELERY_BROKER_VHOST
PAYMENT_SUPPORT_EMAIL: $EDXAPP_PAYMENT_SUPPORT_EMAIL
ZENDESK_URL: $EDXAPP_ZENDESK_URL
COURSES_WITH_UNSAFE_CODE: $EDXAPP_COURSES_WITH_UNSAFE_CODE COURSES_WITH_UNSAFE_CODE: $EDXAPP_COURSES_WITH_UNSAFE_CODE
BULK_EMAIL_EMAILS_PER_TASK: $EDXAPP_BULK_EMAIL_EMAILS_PER_TASK BULK_EMAIL_EMAILS_PER_TASK: $EDXAPP_BULK_EMAIL_EMAILS_PER_TASK
MICROSITE_ROOT_DIR: $EDXAPP_MICROSITE_ROOT_DIR MICROSITE_ROOT_DIR: $EDXAPP_MICROSITE_ROOT_DIR
MICROSITE_CONFIGURATION: $EDXAPP_MICROSITE_CONFIGRATION MICROSITE_CONFIGURATION: $EDXAPP_MICROSITE_CONFIGURATION
GRADES_DOWNLOAD: GRADES_DOWNLOAD:
STORAGE_TYPE: $EDXAPP_GRADE_STORAGE_TYPE STORAGE_TYPE: $EDXAPP_GRADE_STORAGE_TYPE
BUCKET: $EDXAPP_GRADE_BUCKET BUCKET: $EDXAPP_GRADE_BUCKET
...@@ -429,29 +476,31 @@ generic_env_config: &edxapp_generic_env ...@@ -429,29 +476,31 @@ generic_env_config: &edxapp_generic_env
default: &default_generic_cache default: &default_generic_cache
BACKEND: 'django.core.cache.backends.memcached.MemcachedCache' BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
KEY_FUNCTION: 'util.memcache.safe_key' KEY_FUNCTION: 'util.memcache.safe_key'
KEY_PREFIX: 'sandbox_default' KEY_PREFIX: 'default'
LOCATION: $EDXAPP_MEMCACHE LOCATION: $EDXAPP_MEMCACHE
general: general:
<<: *default_generic_cache <<: *default_generic_cache
KEY_PREFIX: 'sandbox_general' KEY_PREFIX: 'general'
mongo_metadata_inheritance: mongo_metadata_inheritance:
<<: *default_generic_cache <<: *default_generic_cache
KEY_PREFIX: 'integration_mongo_metadata_inheritance' KEY_PREFIX: 'mongo_metadata_inheritance'
TIMEOUT: 300 TIMEOUT: 300
staticfiles: staticfiles:
<<: *default_generic_cache <<: *default_generic_cache
KEY_PREFIX: 'integration_static_files' KEY_PREFIX: "{{ ansible_hostname|default('staticfiles') }}_general"
celery: celery:
<<: *default_generic_cache <<: *default_generic_cache
KEY_PREFIX: 'integration_celery' KEY_PREFIX: 'celery'
TIMEOUT: "7200" TIMEOUT: "7200"
CELERY_BROKER_TRANSPORT: 'amqp' CELERY_BROKER_TRANSPORT: 'amqp'
CELERY_BROKER_HOSTNAME: $EDXAPP_RABBIT_HOSTNAME CELERY_BROKER_HOSTNAME: $EDXAPP_RABBIT_HOSTNAME
COMMENTS_SERVICE_URL: $EDXAPP_COMMENTS_SERVICE_URL COMMENTS_SERVICE_URL: $EDXAPP_COMMENTS_SERVICE_URL
LOGGING_ENV: $EDXAPP_LOGGING_ENV LOGGING_ENV: $EDXAPP_LOGGING_ENV
SESSION_COOKIE_DOMAIN: $EDXAPP_SESSION_COOKIE_DOMAIN SESSION_COOKIE_DOMAIN: $EDXAPP_SESSION_COOKIE_DOMAIN
SESSION_COOKIE_NAME: $EDXAPP_SESSION_COOKIE_NAME
COMMENTS_SERVICE_KEY: $EDXAPP_COMMENTS_SERVICE_KEY COMMENTS_SERVICE_KEY: $EDXAPP_COMMENTS_SERVICE_KEY
SEGMENT_IO_LMS: true SEGMENT_IO_LMS: $EDXAPP_SEGMENT_IO_LMS
SEGMENT_IO: $EDXAPP_SEGMENT_IO
THEME_NAME: $edxapp_theme_name THEME_NAME: $edxapp_theme_name
TECH_SUPPORT_EMAIL: $EDXAPP_TECH_SUPPORT_EMAIL TECH_SUPPORT_EMAIL: $EDXAPP_TECH_SUPPORT_EMAIL
CONTACT_EMAIL: $EDXAPP_CONTACT_EMAIL CONTACT_EMAIL: $EDXAPP_CONTACT_EMAIL
...@@ -464,11 +513,17 @@ generic_env_config: &edxapp_generic_env ...@@ -464,11 +513,17 @@ generic_env_config: &edxapp_generic_env
CAS_EXTRA_LOGIN_PARAMS: $EDXAPP_CAS_EXTRA_LOGIN_PARAMS CAS_EXTRA_LOGIN_PARAMS: $EDXAPP_CAS_EXTRA_LOGIN_PARAMS
CAS_ATTRIBUTE_CALLBACK: $EDXAPP_CAS_ATTRIBUTE_CALLBACK CAS_ATTRIBUTE_CALLBACK: $EDXAPP_CAS_ATTRIBUTE_CALLBACK
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS: HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS:
'preview\.': 'draft' 'preview\.': 'draft-preferred'
UNIVERSITY_EMAIL: $EDXAPP_UNIVERSITY_EMAIL UNIVERSITY_EMAIL: $EDXAPP_UNIVERSITY_EMAIL
PRESS_EMAIL: $EDXAPP_PRESS_EMAIL PRESS_EMAIL: $EDXAPP_PRESS_EMAIL
PLATFORM_TWITTER_ACCOUNT: $EDXAPP_PLATFORM_TWITTER_ACCOUNT PLATFORM_TWITTER_ACCOUNT: $EDXAPP_PLATFORM_TWITTER_ACCOUNT
PLATFORM_FACEBOOK_ACCOUNT: $EDXAPP_PLATFORM_FACEBOOK_ACCOUNT PLATFORM_FACEBOOK_ACCOUNT: $EDXAPP_PLATFORM_FACEBOOK_ACCOUNT
ORA2_FILE_PREFIX: $EDXAPP_ORA2_FILE_PREFIX
FILE_UPLOAD_STORAGE_BUCKET_NAME: $EDXAPP_FILE_UPLOAD_STORAGE_BUCKET_NAME
FILE_UPLOAD_STORAGE_PREFIX: $EDXAPP_FILE_UPLOAD_STORAGE_PREFIX
VIRTUAL_UNIVERSITIES: $EDXAPP_VIRTUAL_UNIVERSITIES
SUBDOMAIN_BRANDING: $EDXAPP_SUBDOMAIN_BRANDING
lms_auth_config: lms_auth_config:
<<: *edxapp_generic_auth <<: *edxapp_generic_auth
...@@ -509,21 +564,15 @@ lms_env_config: ...@@ -509,21 +564,15 @@ lms_env_config:
<<: *edxapp_generic_env <<: *edxapp_generic_env
PAID_COURSE_REGISTRATION_CURRENCY: $EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY PAID_COURSE_REGISTRATION_CURRENCY: $EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY
SITE_NAME: $EDXAPP_LMS_SITE_NAME SITE_NAME: $EDXAPP_LMS_SITE_NAME
VIDEO_CDN_URL: $EDXAPP_VIDEO_CDN_URLS
CODE_JAIL: CODE_JAIL:
# from https://github.com/edx/codejail/blob/master/codejail/django_integration.py#L24, '' should be same as None # from https://github.com/edx/codejail/blob/master/codejail/django_integration.py#L24, '' should be same as None
python_bin: '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}' python_bin: '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}'
limits: limits: $EDXAPP_CODE_JAIL_LIMITS
# Limit the memory of the jailed process to something high but not
# infinite (128MiB in bytes)
VMEM: 134217728
# Time in seconds that the jailed process has to run.
REALTIME: 1
# Needs to be non-zero so that jailed code can use it as their temp directory.(1MiB in bytes)
FSIZE: 1048576
user: '{{ edxapp_sandbox_user }}' user: '{{ edxapp_sandbox_user }}'
cms_auth_config: cms_auth_config:
<<: *edxapp_generic_auth <<: *edxapp_generic_auth
SEGMENT_IO_KEY: $EDXAPP_SEGMENT_IO_KEY
cms_env_config: cms_env_config:
<<: *edxapp_generic_env <<: *edxapp_generic_env
SITE_NAME: $EDXAPP_CMS_SITE_NAME SITE_NAME: $EDXAPP_CMS_SITE_NAME
...@@ -585,8 +634,9 @@ sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/base ...@@ -585,8 +634,9 @@ sandbox_base_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/base
sandbox_local_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/local.txt" sandbox_local_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/local.txt"
sandbox_post_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/post.txt" sandbox_post_requirements: "{{ edxapp_code_dir }}/requirements/edx-sandbox/post.txt"
edxapp_chrislea_ppa: "ppa:chris-lea/node.js"
edxapp_debian_pkgs: edxapp_debian_pkgs:
- npm
# for compiling the virtualenv # for compiling the virtualenv
# (only needed if wheel files aren't available) # (only needed if wheel files aren't available)
- build-essential - build-essential
...@@ -599,7 +649,6 @@ edxapp_debian_pkgs: ...@@ -599,7 +649,6 @@ edxapp_debian_pkgs:
# libopenblas-base, it will cause # libopenblas-base, it will cause
# problems for numpy # problems for numpy
- gfortran - gfortran
- libatlas3gf-base
- liblapack-dev - liblapack-dev
- g++ - g++
- libxml2-dev - libxml2-dev
...@@ -609,7 +658,7 @@ edxapp_debian_pkgs: ...@@ -609,7 +658,7 @@ edxapp_debian_pkgs:
# misc # misc
- curl - curl
- ipython - ipython
- npm - nodejs
- ntp - ntp
# for shapely # for shapely
- libgeos-dev - libgeos-dev
......
...@@ -29,12 +29,14 @@ ...@@ -29,12 +29,14 @@
# Do A Checkout # Do A Checkout
- name: checkout edx-platform repo into {{edxapp_code_dir}} - name: checkout edx-platform repo into {{edxapp_code_dir}}
git: > git: >
dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}} dest={{edxapp_code_dir}}
repo={{edx_platform_repo}}
version={{edx_platform_version}}
accept_hostkey=yes accept_hostkey=yes
register: chkout
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: environment:
GIT_SSH: "{{ edxapp_git_ssh }}" GIT_SSH: "{{ edxapp_git_ssh }}"
register: edxapp_platform_checkout
notify: notify:
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
...@@ -48,12 +50,15 @@ ...@@ -48,12 +50,15 @@
- name: checkout theme - name: checkout theme
git: > git: >
dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}} dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}}
repo={{edxapp_theme_source_repo}}
version={{edxapp_theme_version}}
accept_hostkey=yes accept_hostkey=yes
when: edxapp_theme_name != '' when: edxapp_theme_name != ''
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: environment:
GIT_SSH: "{{ edxapp_git_ssh }}" GIT_SSH: "{{ edxapp_git_ssh }}"
register: edxapp_theme_checkout
notify: notify:
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
...@@ -183,6 +188,25 @@ ...@@ -183,6 +188,25 @@
- "restart edxapp_workers" - "restart edxapp_workers"
when: not inst.stat.exists or new.stat.md5 != inst.stat.md5 when: not inst.stat.exists or new.stat.md5 != inst.stat.md5
# Install the python custom requirements into {{ edxapp_venv_dir }}
- stat: path="{{ custom_requirements_file }}"
register: custom_requirements
sudo_user: "{{ edxapp_user }}"
- name : install python custom-requirements
pip: >
requirements="{{ custom_requirements_file }}"
virtualenv="{{ edxapp_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
when: custom_requirements.stat.exists and new.stat.md5 != inst.stat.md5
# Install the final python modules into {{ edxapp_venv_dir }} # Install the final python modules into {{ edxapp_venv_dir }}
- name : install python post-post requirements - name : install python post-post requirements
# Need to use shell rather than pip so that we can maintain the context of our current working directory; some # Need to use shell rather than pip so that we can maintain the context of our current working directory; some
...@@ -248,33 +272,6 @@ ...@@ -248,33 +272,6 @@
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=300
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url: url="{{ s3_one_time_url.url }}" dest="/tmp/{{ EDXAPP_XML_S3_KEY|basename }}"
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf /tmp/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# The next few tasks set up the python code sandbox # The next few tasks set up the python code sandbox
# need to disable this profile, otherwise the pip inside the sandbox venv has no permissions # need to disable this profile, otherwise the pip inside the sandbox venv has no permissions
...@@ -355,6 +352,32 @@ ...@@ -355,6 +352,32 @@
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
# The next few tasks install xml courses.
# Install the xml courses from an s3 bucket
- name: get s3 one time url
s3: >
bucket="{{ EDXAPP_XML_S3_BUCKET }}"
object="{{ EDXAPP_XML_S3_KEY }}"
mode="geturl"
expiration=30
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: s3_one_time_url
- name: download from one time url
get_url:
url="{{ s3_one_time_url.url }}"
dest="{{ edxapp_data_dir }}/{{ EDXAPP_XML_S3_KEY|basename }}"
mode=0600
when: not EDXAPP_XML_FROM_GIT and EDXAPP_XML_S3_BUCKET and EDXAPP_XML_S3_KEY
register: download_xml_s3
- name: unzip the data to the data dir
shell: >
tar xzf {{ edxapp_data_dir }}/{{ EDXAPP_XML_S3_KEY|basename }}
chdir="{{ edxapp_data_dir }}"
when: download_xml_s3.changed
# creates the supervisor jobs for the # creates the supervisor jobs for the
# service variants configured, runs # service variants configured, runs
# gather_assets and db migrations # gather_assets and db migrations
...@@ -363,6 +386,10 @@ ...@@ -363,6 +386,10 @@
- service_variant_config - service_variant_config
- deploy - deploy
- include: xml.yml
tags: deploy
when: EDXAPP_XML_FROM_GIT
# call supervisorctl update. this reloads # call supervisorctl update. this reloads
# the supervisorctl config and restarts # the supervisorctl config and restarts
# the services if any of the configurations # the services if any of the configurations
...@@ -417,4 +444,7 @@ ...@@ -417,4 +444,7 @@
file: path={{ edxapp_git_identity }} state=absent file: path={{ edxapp_git_identity }} state=absent
when: EDXAPP_USE_GIT_IDENTITY when: EDXAPP_USE_GIT_IDENTITY
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: edxapp_installed=true - set_fact: edxapp_installed=true
...@@ -47,8 +47,12 @@ ...@@ -47,8 +47,12 @@
- "{{ edxapp_course_data_dir }}" - "{{ edxapp_course_data_dir }}"
- "{{ edxapp_upload_dir }}" - "{{ edxapp_upload_dir }}"
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ edxapp_chrislea_ppa }}"
- name: install system packages on which LMS and CMS rely - name: install system packages on which LMS and CMS rely
apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present apt: pkg={{','.join(edxapp_debian_pkgs)}} state=present update_cache=yes
notify: notify:
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
......
...@@ -75,24 +75,6 @@ ...@@ -75,24 +75,6 @@
when: celery_worker is defined and not disable_edx_services when: celery_worker is defined and not disable_edx_services
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
# Fake syncdb with migrate, only when fake_migrations is defined
# This overrides the database name to be the test database which
# the default application user has full write access to.
#
# This is run in cases where you want to test to see if migrations
# work without actually runnning them (when creating AMIs for example).
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is defined and migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ edxapp_user }}"
environment:
DB_MIGRATION_NAME: "{{ EDXAPP_TEST_MIGRATE_DB_NAME }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Syncdb with migrate when the migrate user is overridden in extra vars # Syncdb with migrate when the migrate user is overridden in extra vars
- name: syncdb and migrate - name: syncdb and migrate
shell: > shell: >
...@@ -107,54 +89,6 @@ ...@@ -107,54 +89,6 @@
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
# Syncdb with migrate when the default migrate user is not set,
# in this case use the EDXAPP_MYSQL_USER_MIGRATE user to run migrations
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes" and not COMMON_MYSQL_MIGRATE_PASS
environment:
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Fake migrate, only when fake_migrations is defined
# This overrides the database name to be the test database which
# the default application user has full write access to
- name: db migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate
when: fake_migrations is defined and migrate_only is defined and migrate_only|lower == "yes"
sudo_user: "{{ edxapp_user }}"
environment:
DB_MIGRATION_NAME: "{{ EDXAPP_TEST_MIGRATE_DB_NAME }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Regular migrations
- name: db migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_only is defined and migrate_only|lower == "yes"
environment:
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Gather assets using rake if possible # Gather assets using rake if possible
- name: gather {{ item }} static assets with rake - name: gather {{ item }} static assets with rake
......
---
- name: get instance information
action: ec2_facts
- name: tag instance with edx_platform version
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:edx_platform" : "{{ edx_platform_repo }} {{ edxapp_platform_checkout.after|truncate(7,True,'') }}"
when: edxapp_platform_checkout.after is defined
- name: tag instance with edxapp theme version
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:edxapp_theme" : "{{ edxapp_theme_source_repo }} {{ edxapp_theme_checkout.after|truncate(7,True,'') }}"
when: edxapp_theme_checkout.after is defined
...@@ -20,9 +20,11 @@ ...@@ -20,9 +20,11 @@
shell: > shell: >
executable=/bin/bash executable=/bin/bash
if [[ -d {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static ]]; then if [[ -d {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static ]]; then
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.repo_name }} ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.repo_name}}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }}/static {{ edxapp_course_static_dir }}/{{ item.course}}
else else
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.repo_name }} ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.repo_name}}
ln -sf {{ edxapp_course_data_dir }}/{{ item.repo_name }} {{ edxapp_course_static_dir }}/{{ item.course}}
fi fi
with_items: EDXAPP_XML_COURSES with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import" when: item.disposition == "on disk" or item.disposition == "no static import"
...@@ -56,13 +58,8 @@ ...@@ -56,13 +58,8 @@
with_items: EDXAPP_XML_COURSES with_items: EDXAPP_XML_COURSES
when: item.disposition == "import" when: item.disposition == "import"
- name: delete .git repos
file: path="{{ edxapp_course_data_dir }}/{{ item.repo_name }}/.git" state=absent
with_items: EDXAPP_XML_COURSES
when: item.disposition == "on disk" or item.disposition == "no static import"
- name: create an archive of course data and course static dirs - name: create an archive of course data and course static dirs
shell: tar czf /tmp/static_course_content.tar.gz -C {{ edxapp_data_dir }} {{ edxapp_course_data_dir|basename }} {{ edxapp_course_static_dir|basename }} shell: tar czf /tmp/static_course_content.tar.gz -C {{ edxapp_data_dir }} --exclude ".git" {{ edxapp_course_data_dir|basename }} {{ edxapp_course_static_dir|basename }}
- name: upload archive to s3 - name: upload archive to s3
s3: > s3: >
......
{% if devstack %}
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{% else %}
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python {{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp {{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp
{{ common_web_user }} ALL=(ALL) NOPASSWD:/bin/kill {{ common_web_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ common_web_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill {{ common_web_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{% endif %}
{% do cms_auth_config.update(EDXAPP_AUTH_EXTRA) %} {% do cms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{% for key, value in cms_auth_config.iteritems() %}
{% if value == 'None' %}
{% do cms_auth_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ cms_auth_config | to_nice_json }} {{ cms_auth_config | to_nice_json }}
...@@ -6,11 +6,16 @@ ...@@ -6,11 +6,16 @@
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %} {% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %} {% endif %}
{% if ansible_processor|length > 0 %} {% if EDXAPP_WORKERS -%}
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ EDXAPP_WORKERS.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else -%}
{# This is for backwards compatibility, set workers explicitely using EDXAPP_WORKERS #}
{% if ansible_processor|length > 0 -%}
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% else %} {% else -%}
command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi command={{ executable }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
{% endif %} {% endif -%}
{% endif -%}
user={{ common_web_user }} user={{ common_web_user }}
directory={{ edxapp_code_dir }} directory={{ edxapp_code_dir }}
......
{% do cms_env_config.update(EDXAPP_ENV_EXTRA) %} {% do cms_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %} {% for key, value in cms_env_config.iteritems() %}
{%- do cms_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %} {% if value == 'None' %}
{% endif %} {% do cms_env_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ cms_env_config | to_nice_json }} {{ cms_env_config | to_nice_json }}
{% do lms_auth_config.update(EDXAPP_AUTH_EXTRA) %} {% do lms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{% for key, value in lms_auth_config.iteritems() %}
{% if value == 'None' %}
{% do lms_auth_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ lms_auth_config | to_nice_json }} {{ lms_auth_config | to_nice_json }}
...@@ -6,11 +6,16 @@ ...@@ -6,11 +6,16 @@
{% set executable = edxapp_venv_dir + '/bin/gunicorn' %} {% set executable = edxapp_venv_dir + '/bin/gunicorn' %}
{% endif %} {% endif %}
{% if ansible_processor|length > 0 %} {% if EDXAPP_WORKERS -%}
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ EDXAPP_WORKERS.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else -%}
{# This is for backwards compatibility, set workers explicitely using EDXAPP_WORKERS #}
{% if ansible_processor|length > 0 -%}
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else %} {% else -%}
command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi command={{ executable }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% endif %} {% endif %}
{% endif %}
user={{ common_web_user }} user={{ common_web_user }}
directory={{ edxapp_code_dir }} directory={{ edxapp_code_dir }}
......
{% do lms_env_config.update(EDXAPP_ENV_EXTRA) %} {% do lms_env_config.update(EDXAPP_ENV_EXTRA) %}
{% if EDXAPP_UPDATE_STATIC_FILES_KEY %} {% for key, value in lms_env_config.iteritems() %}
{%- do lms_env_config['CACHES']['staticfiles'].update({'KEY_PREFIX': edxapp_dynamic_cache_key}) %} {% if value == 'None' %}
{% endif %} {% do lms_env_config.update({key: None }) %}
{% endif %}
{% endfor %}
{{ lms_env_config | to_nice_json }} {{ lms_env_config | to_nice_json }}
...@@ -13,17 +13,12 @@ ...@@ -13,17 +13,12 @@
- name: install packages needed for single server - name: install packages needed for single server
apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present
- name: setup the migration db user
mysql_user: >
name={{ EDXAPP_MYSQL_USER_MIGRATE }}
password={{ EDXAPP_MYSQL_PASSWORD_MIGRATE}}
priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL'
- name: setup the edxapp db user - name: setup the edxapp db user
mysql_user: > mysql_user: >
name={{ EDXAPP_MYSQL_USER }} name={{ EDXAPP_MYSQL_USER }}
password={{ EDXAPP_MYSQL_PASSWORD }} password={{ EDXAPP_MYSQL_PASSWORD }}
priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL' priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL'
when: EDXAPP_MYSQL_USER is defined
- name: create a database for edxapp - name: create a database for edxapp
mysql_db: > mysql_db: >
...@@ -31,26 +26,27 @@ ...@@ -31,26 +26,27 @@
state=present state=present
encoding=utf8 encoding=utf8
when: EDXAPP_MYSQL_USER is defined when: EDXAPP_MYSQL_USER is defined
- name: setup the xqueue db user - name: setup the xqueue db user
mysql_user: > mysql_user: >
name={{ XQUEUE_MYSQL_USER }} name={{ XQUEUE_MYSQL_USER }}
password={{ XQUEUE_MYSQL_PASSWORD }} password={{ XQUEUE_MYSQL_PASSWORD }}
priv='{{XQUEUE_MYSQL_DB_NAME}}.*:ALL' priv='{{XQUEUE_MYSQL_DB_NAME}}.*:ALL'
when: XQUEUE_MYSQL_USER is defined and not disable_edx_services when: XQUEUE_MYSQL_USER is defined
- name: create a database for xqueue - name: create a database for xqueue
mysql_db: > mysql_db: >
db=xqueue db=xqueue
state=present state=present
encoding=utf8 encoding=utf8
when: XQUEUE_MYSQL_USER is defined and not disable_edx_services when: XQUEUE_MYSQL_USER is defined
- name: setup the ora db user - name: setup the ora db user
mysql_user: > mysql_user: >
name={{ ORA_MYSQL_USER }} name={{ ORA_MYSQL_USER }}
password={{ ORA_MYSQL_PASSWORD }} password={{ ORA_MYSQL_PASSWORD }}
priv='{{ORA_MYSQL_DB_NAME}}.*:ALL' priv='{{ORA_MYSQL_DB_NAME}}.*:ALL'
when: ORA_MYSQL_USER is defined
- name: create a database for ora - name: create a database for ora
mysql_db: > mysql_db: >
...@@ -59,20 +55,66 @@ ...@@ -59,20 +55,66 @@
encoding=utf8 encoding=utf8
when: ORA_MYSQL_USER is defined when: ORA_MYSQL_USER is defined
- name: setup the discern db user - name: create databases for analytics api
mysql_user: >
name={{ DISCERN_MYSQL_USER }}
password={{ DISCERN_MYSQL_PASSWORD }}
priv='{{DISCERN_MYSQL_DB_NAME}}.*:ALL'
when: DISCERN_MYSQL_USER is defined and not disable_edx_services
- name: create a database for discern
mysql_db: > mysql_db: >
db=discern db={{ item }}
state=present state=present
encoding=utf8 encoding=utf8
when: DISCERN_MYSQL_USER is defined and not disable_edx_services when: ANALYTICS_API_CONFIG is defined
with_items:
- "{{ ANALYTICS_API_CONFIG['DATABASES']['default']['NAME'] }}"
- "{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['NAME'] }}"
- name: create api user for the analytics api
mysql_user: >
name=api001
password=password
priv='{{ ANALYTICS_API_CONFIG['DATABASES']['default']['NAME'] }}.*:ALL/reports.*:SELECT'
when: ANALYTICS_API_CONFIG is defined
- name: create read-only reports user for the analytics-api
mysql_user: >
name=reports001
password=password
priv='{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['NAME'] }}.*:SELECT'
when: ANALYTICS_API_CONFIG is defined
- name: setup the migration db user
mysql_user: >
name={{ COMMON_MYSQL_MIGRATE_USER }}
password={{ COMMON_MYSQL_MIGRATE_PASS }}
priv='{{ item }}.*:ALL'
append_privs=yes
when: item != 'None'
with_items:
- "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
- "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
- "{{ ORA_MYSQL_DB_NAME|default('None') }}"
- name: setup the migration db user for analytics
mysql_user: >
name={{ COMMON_MYSQL_MIGRATE_USER }}
password={{ COMMON_MYSQL_MIGRATE_PASS }}
priv='{{ item }}.*:ALL'
append_privs=yes
when: ANALYTICS_API_CONFIG is defined
with_items:
- "{{ ANALYTICS_API_CONFIG['DATABASES']['default']['NAME'] }}"
- "{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['NAME'] }}"
- name: setup the read-only db user
mysql_user: >
name={{ COMMON_MYSQL_READ_ONLY_USER }}
password={{ COMMON_MYSQL_READ_ONLY_PASS }}
priv='*.*:ALL'
- name: setup the admin db user
mysql_user: >
name={{ COMMON_MYSQL_ADMIN_USER }}
password={{ COMMON_MYSQL_ADMIN_PASS }}
priv='*.*:CREATE USER'
- name: install memcached - name: install memcached
......
...@@ -16,6 +16,11 @@ path.logs: {{elasticsearch_log_dir}} ...@@ -16,6 +16,11 @@ path.logs: {{elasticsearch_log_dir}}
# #
bootstrap.mlockall: true bootstrap.mlockall: true
# Disable dynamic scripting as it is insecure and we don't use it
# See: http://bouk.co/blog/elasticsearch-rce/
# CVE: CVE-2014-3120
script.disable_dynamic: true
# Unicast discovery allows to explicitly control which nodes will be used # Unicast discovery allows to explicitly control which nodes will be used
# to discover the cluster. It can be used when multicast is not present, # to discover the cluster. It can be used when multicast is not present,
# or to restrict the cluster communication-wise. # or to restrict the cluster communication-wise.
...@@ -37,4 +42,4 @@ bootstrap.mlockall: true ...@@ -37,4 +42,4 @@ bootstrap.mlockall: true
discovery.zen.ping.unicast.hosts: ['{{hosts|join("\',\'") }}'] discovery.zen.ping.unicast.hosts: ['{{hosts|join("\',\'") }}']
{% endif -%} {% endif -%}
\ No newline at end of file
...@@ -30,7 +30,7 @@ FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTIC ...@@ -30,7 +30,7 @@ FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTIC
# This needs to be a string, set to 'false' to disable # This needs to be a string, set to 'false' to disable
FORUM_NEW_RELIC_ENABLE: 'true' FORUM_NEW_RELIC_ENABLE: 'true'
FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key" FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key"
FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app" FORUM_NEW_RELIC_APP_NAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-forum"
FORUM_WORKER_PROCESSES: "4" FORUM_WORKER_PROCESSES: "4"
FORUM_LISTEN_HOST: "0.0.0.0" FORUM_LISTEN_HOST: "0.0.0.0"
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }} dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }}
accept_hostkey=yes accept_hostkey=yes
sudo_user: "{{ forum_user }}" sudo_user: "{{ forum_user }}"
register: forum_checkout
notify: restart the forum service notify: restart the forum service
# TODO: This is done as the common_web_user # TODO: This is done as the common_web_user
...@@ -66,4 +67,7 @@ ...@@ -66,4 +67,7 @@
- include: test.yml tags=deploy - include: test.yml tags=deploy
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: forum_installed=true - set_fact: forum_installed=true
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:forum" : "{{ forum_source_repo }} {{ forum_checkout.after|truncate(7,True,'') }}"
when: forum_checkout.after is defined
...@@ -17,29 +17,29 @@ ...@@ -17,29 +17,29 @@
JENKINS_ADMIN_NAME: 'default_jenkins_name' JENKINS_ADMIN_NAME: 'default_jenkins_name'
# A dictionary of AWS credentials to use to make
# a boto file for jenkins.
JENKINS_ADMIN_AWS_CREDENTIALS: !!null
# jenkins_admin also requires other variables that are not defined by default. # jenkins_admin also requires other variables that are not defined by default.
# JENKINS_ADMIN_S3_PROFILE: !!null # JENKINS_ADMIN_S3_PROFILE: !!null
# JENKINS_ADMIN_CONFIGURATION_REPO: !!null
# JENKINS_ADMIN_CONFIGURATION_SECURE_REPO: !!null
#
# # git key to use to checkout secure repos on jenkins and in abbey
# JENKINS_ADMIN_GIT_KEY: !!null
#
# # EC2 Key to use when bringing up the abbey instance in ec2 (aws key-pair)
# JENKINS_ADMIN_EC2_KEY: !!null
jenkins_admin_role_name: jenkins_admin jenkins_admin_role_name: jenkins_admin
# repo for nodejs
jenkins_chrislea_ppa: "ppa:chris-lea/node.js"
# #
# OS packages # OS packages
# #
jenkins_admin_debian_repos:
- "deb http://cosmos.cites.illinois.edu/pub/ubuntu/ precise-backports main universe"
jenkins_admin_debian_pkgs: jenkins_admin_debian_pkgs:
# These are copied from the edxapp # These are copied from the edxapp
# role so that we can create virtualenvs # role so that we can create virtualenvs
# on the jenkins server for edxapp # on the jenkins server for edxapp
- npm
# for compiling the virtualenv # for compiling the virtualenv
# (only needed if wheel files aren't available) # (only needed if wheel files aren't available)
- build-essential - build-essential
...@@ -62,7 +62,7 @@ jenkins_admin_debian_pkgs: ...@@ -62,7 +62,7 @@ jenkins_admin_debian_pkgs:
# misc # misc
- curl - curl
- ipython - ipython
- npm - nodejs
- ntp - ntp
# for shapely # for shapely
- libgeos-dev - libgeos-dev
...@@ -77,6 +77,8 @@ jenkins_admin_debian_pkgs: ...@@ -77,6 +77,8 @@ jenkins_admin_debian_pkgs:
- ruby1.9.1 - ruby1.9.1
# for check-migrations # for check-migrations
- mysql-client - mysql-client
# for aws cli scripting
- jq
jenkins_admin_gem_pkgs: jenkins_admin_gem_pkgs:
# for generating status.edx.org # for generating status.edx.org
...@@ -141,4 +143,3 @@ jenkins_admin_plugins: ...@@ -141,4 +143,3 @@ jenkins_admin_plugins:
jenkins_admin_jobs: jenkins_admin_jobs:
- 'backup-jenkins' - 'backup-jenkins'
- 'build-ami'
...@@ -21,29 +21,21 @@ ...@@ -21,29 +21,21 @@
# #
# #
- fail: "JENKINS_ADMIN_S3_PROFILE is not defined." - fail: msg="JENKINS_ADMIN_S3_PROFILE is not defined."
when: JENKINS_ADMIN_S3_PROFILE is not defined when: JENKINS_ADMIN_S3_PROFILE is not defined
- fail: "JENKINS_ADMIN_S3_PROFILE.name is not defined." - fail: msg="JENKINS_ADMIN_S3_PROFILE.name is not defined."
when: JENKINS_ADMIN_S3_PROFILE.name is not defined when: JENKINS_ADMIN_S3_PROFILE.name is not defined
- fail: "JENKINS_ADMIN_S3_PROFILE.access_key is not defined." - fail: msg="JENKINS_ADMIN_S3_PROFILE.access_key is not defined."
when: JENKINS_ADMIN_S3_PROFILE.access_key is not defined when: JENKINS_ADMIN_S3_PROFILE.access_key is not defined
- fail: "JENKINS_ADMIN_S3_PROFILE.secret_key is not defined." - fail: msg="JENKINS_ADMIN_S3_PROFILE.secret_key is not defined."
when: JENKINS_ADMIN_S3_PROFILE.secret_key is not defined when: JENKINS_ADMIN_S3_PROFILE.secret_key is not defined
- fail: "JENKINS_ADMIN_CONFIGURATION_REPO is not defined." - name: add admin specific apt repositories
when: JENKINS_ADMIN_CONFIGURATION_REPO is not defined apt_repository: repo="{{ item }}" state=present update_cache=yes
with_items: jenkins_admin_debian_repos
- fail: "JENKINS_ADMIN_CONFIGURATION_SECURE_REPO is not defined."
when: JENKINS_ADMIN_CONFIGURATION_SECURE_REPO is not defined
- fail: "JENKINS_ADMIN_GIT_KEY is not defined."
when: JENKINS_ADMIN_GIT_KEY is not defined
- fail: "JENKINS_ADMIN_EC2_KEY is not defined."
when: JENKINS_ADMIN_EC2_KEY is not defined
# We first download the plugins to a temp directory and include # We first download the plugins to a temp directory and include
# the version in the file name. That way, if we increment # the version in the file name. That way, if we increment
...@@ -72,6 +64,14 @@ ...@@ -72,6 +64,14 @@
group={{ jenkins_group }} group={{ jenkins_group }}
mode=0644 mode=0644
- name: configure the boto profiles for jenkins
template: >
src="./{{ jenkins_home }}/boto.j2"
dest="{{ jenkins_home }}/.boto"
owner="{{ jenkins_user }}"
group="{{ jenkins_group }}"
mode="0600"
- name: create the ssh directory - name: create the ssh directory
file: > file: >
path={{ jenkins_home }}/.ssh path={{ jenkins_home }}/.ssh
...@@ -86,14 +86,6 @@ ...@@ -86,14 +86,6 @@
shell: > shell: >
ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts ssh-keygen -f {{ jenkins_home }}/.ssh/known_hosts -H -F github.com | grep -q found || ssh-keyscan -H github.com > {{ jenkins_home }}/.ssh/known_hosts
- name: drop the secure credentials
copy: >
content="{{ JENKINS_ADMIN_GIT_KEY }}"
dest={{ jenkins_home }}/.ssh/id_rsa
owner={{ jenkins_user }}
group={{ jenkins_group }}
mode=0600
- name: create job directory - name: create job directory
file: > file: >
path="{{ jenkins_home }}/jobs" path="{{ jenkins_home }}/jobs"
...@@ -120,9 +112,12 @@ ...@@ -120,9 +112,12 @@
mode=0644 mode=0644
with_items: jenkins_admin_jobs with_items: jenkins_admin_jobs
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ jenkins_chrislea_ppa }}"
- name: install system packages for edxapp virtualenvs - name: install system packages for edxapp virtualenvs
apt: pkg={{ item }} state=present apt: pkg={{','.join(jenkins_admin_debian_pkgs)}} state=present update_cache=yes
with_items: jenkins_admin_debian_pkgs
# This is necessary so that ansible can run with # This is necessary so that ansible can run with
# sudo set to True (as the jenkins user) on jenkins # sudo set to True (as the jenkins user) on jenkins
......
{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
{% endfor %}
...@@ -27,7 +27,6 @@ mkdir -p $BUILD_ID/jobs ...@@ -27,7 +27,6 @@ mkdir -p $BUILD_ID/jobs
# Copy global configuration files into the workspace # Copy global configuration files into the workspace
cp $JENKINS_HOME/*.xml $BUILD_ID/ cp $JENKINS_HOME/*.xml $BUILD_ID/
# Copy keys and secrets into the workspace # Copy keys and secrets into the workspace
cp $JENKINS_HOME/identity.key $BUILD_ID/
cp $JENKINS_HOME/secret.key $BUILD_ID/ cp $JENKINS_HOME/secret.key $BUILD_ID/
cp $JENKINS_HOME/secret.key.not-so-secret $BUILD_ID/ cp $JENKINS_HOME/secret.key.not-so-secret $BUILD_ID/
cp -r $JENKINS_HOME/secrets $BUILD_ID/ cp -r $JENKINS_HOME/secrets $BUILD_ID/
......
<?xml version='1.0' encoding='UTF-8'?>
<project>
<actions/>
<description></description>
<keepDependencies>false</keepDependencies>
<properties>
<hudson.model.ParametersDefinitionProperty>
<parameterDefinitions>
<hudson.model.StringParameterDefinition>
<name>play</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>deployment</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>environment</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.TextParameterDefinition>
<name>refs</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.TextParameterDefinition>
<hudson.model.TextParameterDefinition>
<name>vars</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.TextParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>configuration</name>
<description>The GITREF of configuration to use. Leave blank to default to master.</description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>configuration_secure</name>
<description>The GITREF of configuration-secure repository to use. Leave blank to default to master.</description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition>
<name>base_ami</name>
<description></description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>
<hudson.model.BooleanParameterDefinition>
<name>use_blessed</name>
<description></description>
<defaultValue>true</defaultValue>
</hudson.model.BooleanParameterDefinition>
</parameterDefinitions>
</hudson.model.ParametersDefinitionProperty>
<com.sonyericsson.rebuild.RebuildSettings plugin="rebuild@1.20">
<autoRebuild>false</autoRebuild>
</com.sonyericsson.rebuild.RebuildSettings>
</properties>
<scm class="org.jenkinsci.plugins.multiplescms.MultiSCM" plugin="multiple-scms@0.2">
<scms>
<hudson.plugins.git.GitSCM plugin="git@1.5.0">
<configVersion>2</configVersion>
<userRemoteConfigs>
<hudson.plugins.git.UserRemoteConfig>
<name></name>
<refspec></refspec>
<url>{{ JENKINS_ADMIN_CONFIGURATION_REPO }}</url>
</hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs>
<branches>
<hudson.plugins.git.BranchSpec>
<name>*/master</name>
</hudson.plugins.git.BranchSpec>
</branches>
<disableSubmodules>false</disableSubmodules>
<recursiveSubmodules>false</recursiveSubmodules>
<doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
<authorOrCommitter>false</authorOrCommitter>
<clean>false</clean>
<wipeOutWorkspace>false</wipeOutWorkspace>
<pruneBranches>false</pruneBranches>
<remotePoll>false</remotePoll>
<ignoreNotifyCommit>false</ignoreNotifyCommit>
<useShallowClone>false</useShallowClone>
<abortIfNoNewRevs>false</abortIfNoNewRevs>
<cutoffHours></cutoffHours>
<buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
<gitTool>Default</gitTool>
<submoduleCfg class="list"/>
<relativeTargetDir>configuration</relativeTargetDir>
<reference></reference>
<excludedRegions></excludedRegions>
<excludedUsers></excludedUsers>
<gitConfigName></gitConfigName>
<gitConfigEmail></gitConfigEmail>
<skipTag>true</skipTag>
<includedRegions></includedRegions>
<scmName>configuration</scmName>
</hudson.plugins.git.GitSCM>
<hudson.plugins.git.GitSCM plugin="git@1.5.0">
<configVersion>2</configVersion>
<userRemoteConfigs>
<hudson.plugins.git.UserRemoteConfig>
<name></name>
<refspec></refspec>
<url>{{ JENKINS_ADMIN_CONFIGURATION_SECURE_REPO }}</url>
</hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs>
<branches>
<hudson.plugins.git.BranchSpec>
<name>*/master</name>
</hudson.plugins.git.BranchSpec>
</branches>
<disableSubmodules>false</disableSubmodules>
<recursiveSubmodules>false</recursiveSubmodules>
<doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
<authorOrCommitter>false</authorOrCommitter>
<clean>false</clean>
<wipeOutWorkspace>false</wipeOutWorkspace>
<pruneBranches>false</pruneBranches>
<remotePoll>false</remotePoll>
<ignoreNotifyCommit>false</ignoreNotifyCommit>
<useShallowClone>false</useShallowClone>
<abortIfNoNewRevs>false</abortIfNoNewRevs>
<cutoffHours></cutoffHours>
<buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
<gitTool>Default</gitTool>
<submoduleCfg class="list"/>
<relativeTargetDir>configuration-secure</relativeTargetDir>
<reference></reference>
<excludedRegions></excludedRegions>
<excludedUsers></excludedUsers>
<gitConfigName></gitConfigName>
<gitConfigEmail></gitConfigEmail>
<skipTag>true</skipTag>
<includedRegions></includedRegions>
<scmName>configuration-secure</scmName>
</hudson.plugins.git.GitSCM>
</scms>
</scm>
<canRoam>true</canRoam>
<disabled>false</disabled>
<blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
<authToken>MULTIPASS</authToken>
<triggers/>
<concurrentBuild>true</concurrentBuild>
<builders>
<jenkins.plugins.shiningpanda.builders.VirtualenvBuilder plugin="shiningpanda@0.20">
<pythonName>System-CPython-2.7</pythonName>
<home></home>
<clear>false</clear>
<useDistribute>true</useDistribute>
<systemSitePackages>false</systemSitePackages>
<nature>shell</nature>
<command>
#!/bin/bash -x
export jenkins_admin_ec2_key="{{ JENKINS_ADMIN_EC2_KEY }}"
export jenkins_admin_configuration_secure_repo="{{ JENKINS_ADMIN_CONFIGURATION_SECURE_REPO }}"
configuration/util/jenkins/build-ami.sh
</command>
<ignoreExitCode>false</ignoreExitCode>
</jenkins.plugins.shiningpanda.builders.VirtualenvBuilder>
<hudson.tasks.Shell>
<command>#!/bin/bash -x
if [[(&quot;$play&quot; == &quot;&quot;)]]; then
echo &quot;No Play Specified. Nothing to Do.&quot;
exit 0
fi
rm /var/tmp/$BUILD_ID-extra-vars.yml
rm /var/tmp/$BUILD_ID-refs.yml</command>
</hudson.tasks.Shell>
</builders>
<publishers/>
</project>
...@@ -4,7 +4,7 @@ jenkins_group: "edx" ...@@ -4,7 +4,7 @@ jenkins_group: "edx"
jenkins_server_name: "jenkins.testeng.edx.org" jenkins_server_name: "jenkins.testeng.edx.org"
jenkins_port: 8080 jenkins_port: 8080
jenkins_version: 1.538 jenkins_version: 1.571
jenkins_deb_url: "http://pkg.jenkins-ci.org/debian/binary/jenkins_{{ jenkins_version }}_all.deb" jenkins_deb_url: "http://pkg.jenkins-ci.org/debian/binary/jenkins_{{ jenkins_version }}_all.deb"
jenkins_deb: "jenkins_{{ jenkins_version }}_all.deb" jenkins_deb: "jenkins_{{ jenkins_version }}_all.deb"
...@@ -17,7 +17,7 @@ jenkins_plugins: ...@@ -17,7 +17,7 @@ jenkins_plugins:
- { name: "copy-to-slave", version: "1.4.3" } - { name: "copy-to-slave", version: "1.4.3" }
- { name: "credentials", version: "1.8.3" } - { name: "credentials", version: "1.8.3" }
- { name: "dashboard-view", version: "2.9.1" } - { name: "dashboard-view", version: "2.9.1" }
- { name: "ec2", version: "1.19" } - { name: "ec2", version: "1.23" }
- { name: "github", version: "1.8" } - { name: "github", version: "1.8" }
- { name: "github-api", version: "1.44" } - { name: "github-api", version: "1.44" }
- { name: "github-oauth", version: "0.14" } - { name: "github-oauth", version: "0.14" }
...@@ -28,10 +28,12 @@ jenkins_plugins: ...@@ -28,10 +28,12 @@ jenkins_plugins:
- { name: "mailer", version: "1.5" } - { name: "mailer", version: "1.5" }
- { name: "nested-view", version: "1.10" } - { name: "nested-view", version: "1.10" }
- { name: "next-build-number", version: "1.0" } - { name: "next-build-number", version: "1.0" }
- { name: "node-iterator-api", version: "1.5" }
- { name: "notification", version: "1.5" } - { name: "notification", version: "1.5" }
- { name: "pam-auth", version: "1.0" } - { name: "pam-auth", version: "1.0" }
- { name: "parameterized-trigger", version: "2.20" } - { name: "parameterized-trigger", version: "2.20" }
- { name: "postbuild-task", version: "1.8" } - { name: "postbuild-task", version: "1.8" }
- { name: "PrioritySorter", version: "2.8" }
- { name: "sauce-ondemand", version: "1.61" } - { name: "sauce-ondemand", version: "1.61" }
- { name: "s3", version: "0.5" } - { name: "s3", version: "0.5" }
- { name: "ssh-agent", version: "1.3" } - { name: "ssh-agent", version: "1.3" }
...@@ -45,6 +47,7 @@ jenkins_plugins: ...@@ -45,6 +47,7 @@ jenkins_plugins:
- { name: "multiple-scms", version: "0.2" } - { name: "multiple-scms", version: "0.2" }
- { name: "timestamper", version: "1.5.7" } - { name: "timestamper", version: "1.5.7" }
- { name: "thinBackup", version: "1.7.4"} - { name: "thinBackup", version: "1.7.4"}
- { name: "xunit", version: "1.89"}
jenkins_bundled_plugins: jenkins_bundled_plugins:
- "credentials" - "credentials"
......
--- ---
dependencies: dependencies:
- common - common
- role: datadog
COMMON_ENABLE_DATADOG: True
...@@ -128,3 +128,4 @@ ...@@ -128,3 +128,4 @@
notify: start nginx notify: start nginx
- include: datadog.yml tags=datadog - include: datadog.yml tags=datadog
when: COMMON_ENABLE_DATADOG
...@@ -3,6 +3,9 @@ jenkins_user: "jenkins" ...@@ -3,6 +3,9 @@ jenkins_user: "jenkins"
jenkins_group: "jenkins" jenkins_group: "jenkins"
jenkins_home: /home/jenkins jenkins_home: /home/jenkins
# repo for nodejs
jenkins_chrislea_ppa: "ppa:chris-lea/node.js"
# System packages # System packages
jenkins_debian_pkgs: jenkins_debian_pkgs:
- build-essential - build-essential
...@@ -15,7 +18,7 @@ jenkins_debian_pkgs: ...@@ -15,7 +18,7 @@ jenkins_debian_pkgs:
- libxml2-dev - libxml2-dev
- libgeos-dev - libgeos-dev
- libxslt1-dev - libxslt1-dev
- npm - nodejs
- pkg-config - pkg-config
- gettext - gettext
...@@ -69,7 +72,7 @@ jenkins_wheels: ...@@ -69,7 +72,7 @@ jenkins_wheels:
- { pkg: "mongoengine==0.7.10", wheel: "mongoengine-0.7.10-py27-none-any.whl" } - { pkg: "mongoengine==0.7.10", wheel: "mongoengine-0.7.10-py27-none-any.whl" }
- { pkg: "networkx==1.7", wheel: "networkx-1.7-py27-none-any.whl" } - { pkg: "networkx==1.7", wheel: "networkx-1.7-py27-none-any.whl" }
- { pkg: "nltk==2.0.4", wheel: "nltk-2.0.4-py27-none-any.whl" } - { pkg: "nltk==2.0.4", wheel: "nltk-2.0.4-py27-none-any.whl" }
- { pkg: "oauthlib==0.5.1", wheel: "oauthlib-0.5.1-py27-none-any.whl" } - { pkg: "oauthlib==0.6.3", wheel: "oauthlib-0.6.3-py27-none-any.whl" }
- { pkg: "paramiko==1.9.0", wheel: "paramiko-1.9.0-py27-none-any.whl" } - { pkg: "paramiko==1.9.0", wheel: "paramiko-1.9.0-py27-none-any.whl" }
- { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" } - { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" }
- { pkg: "Pillow==1.7.8", wheel: "Pillow-1.7.8-cp27-none-linux_x86_64.whl" } - { pkg: "Pillow==1.7.8", wheel: "Pillow-1.7.8-cp27-none-linux_x86_64.whl" }
...@@ -86,7 +89,7 @@ jenkins_wheels: ...@@ -86,7 +89,7 @@ jenkins_wheels:
- { pkg: "pytz==2012h", wheel: "pytz-2012h-py27-none-any.whl" } - { pkg: "pytz==2012h", wheel: "pytz-2012h-py27-none-any.whl" }
- { pkg: "pysrt==0.4.7", wheel: "pysrt-0.4.7-py27-none-any.whl" } - { pkg: "pysrt==0.4.7", wheel: "pysrt-0.4.7-py27-none-any.whl" }
- { pkg: "PyYAML==3.10", wheel: "PyYAML-3.10-cp27-none-linux_x86_64.whl" } - { pkg: "PyYAML==3.10", wheel: "PyYAML-3.10-cp27-none-linux_x86_64.whl" }
- { pkg: "requests==1.2.3", wheel: "requests-1.2.3-py27-none-any.whl" } - { pkg: "requests==2.3.0", wheel: "requests-2.3.0-py27-none-any.whl" }
- { pkg: "scipy==0.11.0", wheel: "scipy-0.11.0-cp27-none-linux_x86_64.whl" } - { pkg: "scipy==0.11.0", wheel: "scipy-0.11.0-cp27-none-linux_x86_64.whl" }
- { pkg: "Shapely==1.2.16", wheel: "Shapely-1.2.16-cp27-none-linux_x86_64.whl" } - { pkg: "Shapely==1.2.16", wheel: "Shapely-1.2.16-cp27-none-linux_x86_64.whl" }
- { pkg: "singledispatch==3.4.0.2", wheel: "singledispatch-3.4.0.2-py27-none-any.whl" } - { pkg: "singledispatch==3.4.0.2", wheel: "singledispatch-3.4.0.2-py27-none-any.whl" }
......
...@@ -26,6 +26,10 @@ ...@@ -26,6 +26,10 @@
owner={{ jenkins_user }} group={{ jenkins_group }} mode=400 owner={{ jenkins_user }} group={{ jenkins_group }} mode=400
ignore_errors: yes ignore_errors: yes
# adding chris-lea nodejs repo
- name: add ppas for current versions of nodejs
apt_repository: repo="{{ jenkins_chrislea_ppa }}"
- name: Install system packages - name: Install system packages
apt: pkg={{','.join(jenkins_debian_pkgs)}} apt: pkg={{','.join(jenkins_debian_pkgs)}}
state=present update_cache=yes state=present update_cache=yes
......
...@@ -41,6 +41,8 @@ ...@@ -41,6 +41,8 @@
group: "{{ security_group }}" group: "{{ security_group }}"
instance_type: "{{ instance_type }}" instance_type: "{{ instance_type }}"
image: "{{ ami }}" image: "{{ ami }}"
vpc_subnet_id: "{{ vpc_subnet_id }}"
assign_public_ip: yes
wait: true wait: true
region: "{{ region }}" region: "{{ region }}"
instance_tags: "{{instance_tags}}" instance_tags: "{{instance_tags}}"
......
...@@ -53,3 +53,10 @@ ...@@ -53,3 +53,10 @@
regexp=". {{ localdev_home }}/share_x11" regexp=". {{ localdev_home }}/share_x11"
line=". {{ localdev_home }}/share_x11" line=". {{ localdev_home }}/share_x11"
state=present state=present
# Create scripts to add paver autocomplete
- name: add paver autocomplete
template:
src=paver_autocomplete dest={{ item.home }}/.paver_autocomplete
owner={{ item.user }} mode=755
with_items: localdev_accounts
...@@ -15,3 +15,5 @@ else ...@@ -15,3 +15,5 @@ else
fi fi
cd "{{ item.home }}/{{ item.repo }}" cd "{{ item.home }}/{{ item.repo }}"
source "{{ item.home }}/.paver_autocomplete"
# Courtesy of Gregory Nicholas
_paver()
{
local cur
COMPREPLY=()
# Variable to hold the current word
cur="${COMP_WORDS[COMP_CWORD]}"
# Build a list of the available tasks from: `paver --help --quiet`
local cmds=$(paver -hq | awk '/^ ([a-zA-Z][a-zA-Z0-9_]+)/ {print $1}')
# Generate possible matches and store them in the
# array variable COMPREPLY
COMPREPLY=($(compgen -W "${cmds}" $cur))
}
# Assign the auto-completion function for our command.
complete -F _paver paver
\ No newline at end of file
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
- name: copy mongodb key file - name: copy mongodb key file
copy: > copy: >
src={{ secure_dir }}/files/mongo_key content="{{ MONGO_CLUSTER_KEY }}"
dest={{ mongo_key_file }} dest={{ mongo_key_file }}
mode=0600 mode=0600
owner=mongodb owner=mongodb
......
mms_agent_version: "2.2.0.70-1"
mms_agent_url: "https://mms.mongodb.com/download/agent/monitoring/mongodb-mms-monitoring-agent_{{ mms_agent_version }}_amd64.deb"
---
- name: restart mms
service: name=mongodb-mms-monitoring-agent state=restarted
---
# mongo_mms
#
# Example play:
#
# roles:
# - mongo_mms
- fail: MMSAPIKEY is required
when: MMSAPIKEY is not defined
# this cruft can be removed in ansible 1.6, which can have apt install local deb files
- name: install required packages
apt: name={{ item }} state=present
with_items:
- gdebi
- name: download mongo mms agent
get_url: >
url="{{ mms_agent_url }}"
dest="/tmp/mongodb-mms-monitoring-agent-{{ mms_agent_version }}.deb"
register: download_mms_deb
- name: install mongo mms agent
shell: "gdebi -nq /tmp/mongodb-mms-monitoring-agent-{{ mms_agent_version }}.deb"
when: download_mms_deb.changed
notify: restart mms
- name: add key to monitoring-agent.config
lineinfile: >
dest=/etc/mongodb-mms/monitoring-agent.config
regexp="^mmsApiKey="
line="mmsApiKey={{ MMSAPIKEY }}"
notify: restart mms
- name: start mms service
service: name=mongodb-mms-monitoring-agent state=started
...@@ -16,9 +16,9 @@ ...@@ -16,9 +16,9 @@
# #
newrelic_role_name: newrelic newrelic_role_name: newrelic
NEWRELIC_REPO: 'deb http://apt.newrelic.com/debian/ newrelic non-free' NEWRELIC_DEBIAN_REPO: 'deb http://apt.newrelic.com/debian/ newrelic non-free'
NEWRELIC_KEY_ID: '548C16BF' NEWRELIC_DEBIAN_KEY_ID: '548C16BF'
NEWRELIC_KEY_URL: 'https://download.newrelic.com/{{ NEWRELIC_KEY_ID }}.gpg' NEWRELIC_DEBIAN_KEY_URL: 'https://download.newrelic.com/{{ NEWRELIC_DEBIAN_KEY_ID }}.gpg'
NEWRELIC_LICENSE_KEY: "SPECIFY_KEY_HERE" NEWRELIC_LICENSE_KEY: "SPECIFY_KEY_HERE"
# #
...@@ -28,4 +28,5 @@ NEWRELIC_LICENSE_KEY: "SPECIFY_KEY_HERE" ...@@ -28,4 +28,5 @@ NEWRELIC_LICENSE_KEY: "SPECIFY_KEY_HERE"
newrelic_debian_pkgs: newrelic_debian_pkgs:
- newrelic-sysmond - newrelic-sysmond
newrelic_redhat_pkgs: [] newrelic_redhat_pkgs:
- newrelic-sysmond
...@@ -27,18 +27,33 @@ ...@@ -27,18 +27,33 @@
- name: add apt key - name: add apt key
apt_key: > apt_key: >
id="{{ NEWRELIC_KEY_ID }}" url="{{ NEWRELIC_KEY_URL }}" id="{{ NEWRELIC_DEBIAN_KEY_ID }}" url="{{ NEWRELIC_DEBIAN_KEY_URL }}"
state=present state=present
when: ansible_distribution == 'Ubuntu'
- name: Configure the New Relic Servers yum repository
shell: >
rpm -Uvh https://yum.newrelic.com/pub/newrelic/el5/x86_64/newrelic-repo-5-3.noarch.rpm
creates=/etc/yum.repos.d/newrelic.repo
when: ansible_distribution == 'Amazon'
- name: install apt repository - name: install apt repository
apt_repository: repo="{{ NEWRELIC_REPO }}" update_cache=yes apt_repository: repo="{{ NEWRELIC_DEBIAN_REPO }}" update_cache=yes
when: ansible_distribution == 'Ubuntu'
- name: install newrelic agent - name: install newrelic agent (apt)
apt: pkg="newrelic-sysmond" apt: pkg="newrelic-sysmond"
when: ansible_distribution == 'Ubuntu'
- name: Install newrelic related system packages. - name: Install newrelic related system packages for Ubuntu
apt: pkg={{ item }} install_recommends=yes state=present apt: pkg={{ item }} install_recommends=yes state=present
with_items: newrelic_debian_pkgs with_items: newrelic_debian_pkgs
when: ansible_distribution == 'Ubuntu'
- name: Install newrelic related system packages for Amazon
yum: pkg={{ item }} state=present
with_items: newrelic_redhat_pkgs
when: ansible_distribution == 'Amazon'
- name: configure the agent with the license key - name: configure the agent with the license key
shell: > shell: >
......
...@@ -51,6 +51,8 @@ nginx_lms_preview_gunicorn_hosts: ...@@ -51,6 +51,8 @@ nginx_lms_preview_gunicorn_hosts:
- 127.0.0.1 - 127.0.0.1
nginx_cms_gunicorn_hosts: nginx_cms_gunicorn_hosts:
- 127.0.0.1 - 127.0.0.1
nginx_analytics_api_gunicorn_hosts:
- 127.0.0.1
nginx_cfg: nginx_cfg:
# - link - turn on # - link - turn on
......
upstream analytics_api_app_server {
{% for host in nginx_analytics_api_gunicorn_hosts %}
server {{ host }}:{{ analytics_api_gunicorn_port }} fail_timeout=0;
{% endfor %}
}
server {
listen {{ ANALYTICS_API_NGINX_PORT }} default_server;
location ~ ^/static/(?P<file>.*) {
root {{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }};
try_files /staticfiles/$file =404;
}
location / {
{% include "basic-auth.j2" %}
try_files $uri @proxy_to_app;
}
# No basic auth security on the heartbeat url, so that ELB can use it
location /api/v0/status {
try_files $uri @proxy_to_app;
}
{% include "robots.j2" %}
location @proxy_to_app {
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://analytics_api_app_server;
}
}
...@@ -2,6 +2,9 @@ ...@@ -2,6 +2,9 @@
satisfy any; satisfy any;
allow 127.0.0.1; allow 127.0.0.1;
allow 10.0.0.0/8;
allow 192.168.0.0/16;
allow 172.16.0.0/12;
deny all; deny all;
auth_basic "Restricted"; auth_basic "Restricted";
......
...@@ -72,36 +72,7 @@ server { ...@@ -72,36 +72,7 @@ server {
} }
{% include "robots.j2" %} {% include "robots.j2" %}
{% include "static-files.j2" %}
# Check security on this
location ~ ^/static/(?P<file>.*) {
root {{ edxapp_data_dir }};
try_files /staticfiles/$file /course_static/$file =404;
# return a 403 for static files that shouldn't be
# in the staticfiles directory
location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) {
return 403;
}
# http://www.red-team-design.com/firefox-doesnt-allow-cross-domain-fonts-by-default
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\.(eot|otf|ttf|woff))" {
expires max;
add_header Access-Control-Allow-Origin *;
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Set django-pipelined files to maximum cache time
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\..*)" {
expires max;
# Without this try_files, files that have been run through
# django-pipeline return 404s
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Expire other static files immediately (there should be very few / none of these)
expires epoch;
}
# Forward to HTTPS if we're an HTTP request... # Forward to HTTPS if we're an HTTP request...
if ($http_x_forwarded_proto = "http") { if ($http_x_forwarded_proto = "http") {
......
...@@ -69,36 +69,7 @@ server { ...@@ -69,36 +69,7 @@ server {
} }
{% include "robots.j2" %} {% include "robots.j2" %}
{% include "static-files.j2" %}
# Check security on this
location ~ ^/static/(?P<file>.*) {
root {{ edxapp_data_dir }};
try_files /staticfiles/$file /course_static/$file =404;
# return a 403 for static files that shouldn't be
# in the staticfiles directory
location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) {
return 403;
}
# http://www.red-team-design.com/firefox-doesnt-allow-cross-domain-fonts-by-default
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\.(eot|otf|ttf|woff))" {
expires max;
add_header Access-Control-Allow-Origin *;
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Set django-pipelined files to maximum cache time
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\..*)" {
expires max;
# Without this try_files, files that have been run through
# django-pipeline return 404s
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Expire other static files immediately (there should be very few / none of these)
expires epoch;
}
# Forward to HTTPS if we're an HTTP request... # Forward to HTTPS if we're an HTTP request...
if ($http_x_forwarded_proto = "http") { if ($http_x_forwarded_proto = "http") {
......
location ~ ^/static/(?P<file>.*) {
root {{ edxapp_data_dir }};
try_files /staticfiles/$file /course_static/$file =404;
# return a 403 for static files that shouldn't be
# in the staticfiles directory
location ~ ^/static/(?:.*)(?:\.xml|\.json|README.TXT) {
return 403;
}
# http://www.red-team-design.com/firefox-doesnt-allow-cross-domain-fonts-by-default
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\.(eot|otf|ttf|woff))" {
expires max;
add_header Access-Control-Allow-Origin *;
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Set django-pipelined files to maximum cache time
location ~ "/static/(?P<collected>.*\.[0-9a-f]{12}\..*)" {
expires max;
# Without this try_files, files that have been run through
# django-pipeline return 404s
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Set django-pipelined files for studio to maximum cache time
location ~ "/static/(?P<collected>[0-9a-f]{7}/.*)" {
expires max;
# Without this try_files, files that have been run through
# django-pipeline return 404s
try_files /staticfiles/$collected /course_static/$collected =404;
}
# Expire other static files immediately (there should be very few / none of these)
expires epoch;
}
...@@ -76,7 +76,7 @@ ORA_AWS_SECRET_ACCESS_KEY: '' ...@@ -76,7 +76,7 @@ ORA_AWS_SECRET_ACCESS_KEY: ''
# Default nginx listen port # Default nginx listen port
# These should be overrided if you want # These should be overrided if you want
# to serve all content on port 80 # to serve all content on port 80
ora_gunicorn_workers: 4 ora_gunicorn_workers: 2
ora_gunicorn_port: 8060 ora_gunicorn_port: 8060
ora_gunicorn_host: 127.0.0.1 ora_gunicorn_host: 127.0.0.1
......
...@@ -12,44 +12,46 @@ ...@@ -12,44 +12,46 @@
# - common # - common
# - oraclejdk # - oraclejdk
- name: check for Oracle Java version {{ oraclejdk_base }}
command: test -d /usr/lib/jvm/{{ oraclejdk_base }}
ignore_errors: true
register: oraclejdk_present
- name: download Oracle Java - name: download Oracle Java
shell: > shell: >
curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -b oraclelicense=accept-securebackup-cookie -O -L {{ oraclejdk_url }} curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -b oraclelicense=accept-securebackup-cookie -O -L {{ oraclejdk_url }}
executable=/bin/bash executable=/bin/bash
chdir=/var/tmp chdir=/var/tmp
creates=/var/tmp/{{ oraclejdk_file }} creates=/var/tmp/{{ oraclejdk_file }}
when: oraclejdk_present|failed
- name: install Oracle Java - name: create jvm dir
file: >
path=/usr/lib/jvm
state=directory
owner=root
group=root
- name: untar Oracle Java
shell: > shell: >
mkdir -p /usr/lib/jvm && tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }} tar -C /usr/lib/jvm -zxvf /var/tmp/{{ oraclejdk_file }}
creates=/usr/lib/jvm/{{ oraclejdk_base }}
executable=/bin/bash executable=/bin/bash
creates=/usr/lib/jvm/{{ oraclejdk_base }}
sudo: true
when: oraclejdk_present|failed
- name: create symlink expected by elasticsearch - name: create symlink expected by elasticsearch
file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link
when: oraclejdk_present|failed
- name: update alternatives java - name: update alternatives java
shell: > shell: >
update-alternatives --install "/usr/bin/java" "java" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/java" 1 update-alternatives --install "/usr/bin/java" "java" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/java" 1
register: update_alt
changed_when: update_alt.stdout != ""
- name: update alternatives javac - name: update alternatives javac
shell: > shell: >
update-alternatives --install "/usr/bin/javac" "javac" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/javac" 1 update-alternatives --install "/usr/bin/javac" "javac" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/javac" 1
register: update_alt
changed_when: update_alt.stdout != ""
- name: update alternatives javaws - name: update alternatives javaws
shell: > shell: >
update-alternatives --install "/usr/bin/javaws" "javaws" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/javaws" 1 update-alternatives --install "/usr/bin/javaws" "javaws" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/javaws" 1
register: update_alt
changed_when: update_alt.stdout != ""
- name: add JAVA_HOME for Oracle Java - name: add JAVA_HOME for Oracle Java
template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755 template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755
when: oraclejdk_present|failed
...@@ -30,7 +30,10 @@ XQUEUE_MYSQL_USER: 'xqueue001' ...@@ -30,7 +30,10 @@ XQUEUE_MYSQL_USER: 'xqueue001'
XQUEUE_MYSQL_PASSWORD: 'password' XQUEUE_MYSQL_PASSWORD: 'password'
XQUEUE_MYSQL_HOST: 'localhost' XQUEUE_MYSQL_HOST: 'localhost'
XQUEUE_MYSQL_PORT: '3306' XQUEUE_MYSQL_PORT: '3306'
XQUEUE_NEWRELIC_APPNAME: "edX-xqueue" XQUEUE_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-xqueue"
# Set the number of workers explicitely for xqueue
XQUEUE_WORKERS: !!null
XQUEUE_WORKERS_PER_QUEUE: 12
# Internal vars below this line # Internal vars below this line
############################################# #############################################
...@@ -51,7 +54,7 @@ xqueue_gunicorn_host: 127.0.0.1 ...@@ -51,7 +54,7 @@ xqueue_gunicorn_host: 127.0.0.1
xqueue_env_config: xqueue_env_config:
XQUEUES: $XQUEUE_QUEUES XQUEUES: $XQUEUE_QUEUES
XQUEUE_WORKERS_PER_QUEUE: 12 XQUEUE_WORKERS_PER_QUEUE: $XQUEUE_WORKERS_PER_QUEUE
LOGGING_ENV : $XQUEUE_LOGGING_ENV LOGGING_ENV : $XQUEUE_LOGGING_ENV
SYSLOG_SERVER: $XQUEUE_SYSLOG_SERVER SYSLOG_SERVER: $XQUEUE_SYSLOG_SERVER
LOG_DIR : "{{ COMMON_DATA_DIR }}/logs/xqueue" LOG_DIR : "{{ COMMON_DATA_DIR }}/logs/xqueue"
...@@ -81,7 +84,6 @@ xqueue_version: 'HEAD' ...@@ -81,7 +84,6 @@ xqueue_version: 'HEAD'
xqueue_pre_requirements_file: "{{ xqueue_code_dir }}/pre-requirements.txt" xqueue_pre_requirements_file: "{{ xqueue_code_dir }}/pre-requirements.txt"
xqueue_post_requirements_file: "{{ xqueue_code_dir }}/requirements.txt" xqueue_post_requirements_file: "{{ xqueue_code_dir }}/requirements.txt"
# These packages are required for the xqueue server, # These packages are required for the xqueue server,
# copied from the LMS role for now since there is a lot # copied from the LMS role for now since there is a lot
# of overlap # of overlap
...@@ -99,7 +101,6 @@ xqueue_debian_pkgs: ...@@ -99,7 +101,6 @@ xqueue_debian_pkgs:
# misc # misc
- curl - curl
- ipython - ipython
- npm
- ntp - ntp
# for shapely # for shapely
- libgeos-dev - libgeos-dev
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
dest={{ xqueue_code_dir }} repo={{ xqueue_source_repo }} version={{ xqueue_version }} dest={{ xqueue_code_dir }} repo={{ xqueue_source_repo }} version={{ xqueue_version }}
accept_hostkey=yes accept_hostkey=yes
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
register: xqueue_checkout
notify: notify:
- restart xqueue - restart xqueue
...@@ -114,4 +115,7 @@ ...@@ -114,4 +115,7 @@
- python - python
- pip - pip
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
- set_fact: xqueue_installed=true - set_fact: xqueue_installed=true
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:xqueue" : "{{ xqueue_source_repo }} {{ xqueue_checkout.after|truncate(7,True,'') }}"
when: xqueue_checkout.after is defined
...@@ -6,11 +6,15 @@ ...@@ -6,11 +6,15 @@
{% set executable = xqueue_venv_bin + '/gunicorn' %} {% set executable = xqueue_venv_bin + '/gunicorn' %}
{% endif %} {% endif %}
{% if XQUEUE_WORKERS -%}
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ XQUEUE_WORKERS }} --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
{% else -%}
{% if ansible_processor|length > 0 %} {% if ansible_processor|length > 0 %}
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ ansible_processor|length * 2 }} --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ ansible_processor|length * 2 }} --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
{% else %} {% else -%}
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w 2 --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w 2 --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
{% endif %} {% endif -%}
{% endif -%}
user={{ common_web_user }} user={{ common_web_user }}
directory={{ xqueue_code_dir }} directory={{ xqueue_code_dir }}
......
...@@ -19,6 +19,7 @@ XQWATCHER_COURSES: ...@@ -19,6 +19,7 @@ XQWATCHER_COURSES:
- COURSE: "exampleX-101x" - COURSE: "exampleX-101x"
GIT_REPO: "git@github.com:foo/graders-exampleX-101x.git" GIT_REPO: "git@github.com:foo/graders-exampleX-101x.git"
GIT_REF: "master" GIT_REF: "master"
PYTHON_REQUIREMENTS: []
QUEUE_NAME: "exampleX-101x" QUEUE_NAME: "exampleX-101x"
QUEUE_CONFIG: QUEUE_CONFIG:
SERVER: "https://xqueue.example.com" SERVER: "https://xqueue.example.com"
...@@ -35,6 +36,7 @@ XQWATCHER_COURSES: ...@@ -35,6 +36,7 @@ XQWATCHER_COURSES:
- COURSE: "exampleX-202x" - COURSE: "exampleX-202x"
GIT_REPO: "git@github.com:foo/graders-exampleX-202x.git" GIT_REPO: "git@github.com:foo/graders-exampleX-202x.git"
GIT_REF: "master" GIT_REF: "master"
PYTHON_REQUIREMENTS: []
QUEUE_NAME: "exampleX-202x" QUEUE_NAME: "exampleX-202x"
QUEUE_CONFIG: QUEUE_CONFIG:
SERVER: "https://xqueue.example.com" SERVER: "https://xqueue.example.com"
...@@ -56,7 +58,7 @@ XQWATCHER_GIT_IDENTITY: | ...@@ -56,7 +58,7 @@ XQWATCHER_GIT_IDENTITY: |
# depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC # depends upon Newrelic being enabled via COMMON_ENABLE_NEWRELIC
# and a key being provided via NEWRELIC_LICENSE_KEY # and a key being provided via NEWRELIC_LICENSE_KEY
XQWATCHER_NEWRELIC_APPNAME: "your Newrelic appname" XQWATCHER_NEWRELIC_APPNAME: "{{ COMMON_DEPLOYMENT }}-{{ COMMON_ENVIRONMENT }}-xqwatcher"
XQWATCHER_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}" XQWATCHER_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
# #
# #
...@@ -68,12 +70,11 @@ xqwatcher_user: "xqwatcher" ...@@ -68,12 +70,11 @@ xqwatcher_user: "xqwatcher"
xqwatcher_module: "xqueue_watcher" xqwatcher_module: "xqueue_watcher"
xqwatcher_app_dir: "{{ COMMON_APP_DIR }}/{{ xqwatcher_service_name }}" xqwatcher_app_dir: "{{ COMMON_APP_DIR }}/{{ xqwatcher_service_name }}"
xqwatcher_home: "{{ COMMON_APP_DIR }}/{{ xqwatcher_service_name }}" xqwatcher_app_data: "{{ xqwatcher_app_dir }}/data"
xqwatcher_venv_base: "{{ xqwatcher_home }}/venvs" xqwatcher_venv_base: "{{ xqwatcher_app_dir }}/venvs"
xqwatcher_venv_dir: "{{ xqwatcher_venv_base }}/{{ xqwatcher_service_name }}" xqwatcher_venv_dir: "{{ xqwatcher_venv_base }}/{{ xqwatcher_service_name }}"
xqwatcher_code_dir: "{{ xqwatcher_app_dir }}/src" xqwatcher_code_dir: "{{ xqwatcher_app_dir }}/src"
xqwatcher_conf_dir: "{{ xqwatcher_home }}" xqwatcher_conf_dir: "{{ xqwatcher_app_dir }}"
xqwatcher_data_dir: "{{ xqwatcher_home }}/data"
xqwatcher_source_repo: "git@{{ COMMON_GIT_MIRROR }}:edx/xqueue-watcher.git" xqwatcher_source_repo: "git@{{ COMMON_GIT_MIRROR }}:edx/xqueue-watcher.git"
xqwatcher_git_ssh_opts: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ xqwatcher_git_identity }}" xqwatcher_git_ssh_opts: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ xqwatcher_git_identity }}"
...@@ -87,6 +88,7 @@ xqwatcher_log_dir: "{{ COMMON_LOG_DIR }}/{{ xqwatcher_service_name }}" ...@@ -87,6 +88,7 @@ xqwatcher_log_dir: "{{ COMMON_LOG_DIR }}/{{ xqwatcher_service_name }}"
# supervisor related config # supervisor related config
# #
xqwatcher_supervisor_app_dir: "{{ xqwatcher_app_dir }}/supervisor" xqwatcher_supervisor_app_dir: "{{ xqwatcher_app_dir }}/supervisor"
xqwatcher_supervisor_http_port: 9003
xqwatcher_supervisor_data_dir: "{{ COMMON_DATA_DIR }}/{{ xqwatcher_service_name }}" xqwatcher_supervisor_data_dir: "{{ COMMON_DATA_DIR }}/{{ xqwatcher_service_name }}"
xqwatcher_supervisor_log_dir: "{{ xqwatcher_log_dir }}" xqwatcher_supervisor_log_dir: "{{ xqwatcher_log_dir }}"
xqwatcher_supervisor_venv_dir: "{{ xqwatcher_venv_base }}/supervisor" xqwatcher_supervisor_venv_dir: "{{ xqwatcher_venv_base }}/supervisor"
......
...@@ -24,4 +24,4 @@ dependencies: ...@@ -24,4 +24,4 @@ dependencies:
supervisor_service_user: "{{ xqwatcher_supervisor_user }}" supervisor_service_user: "{{ xqwatcher_supervisor_user }}"
supervisor_available_dir: "{{ xqwatcher_supervisor_available_dir }}" supervisor_available_dir: "{{ xqwatcher_supervisor_available_dir }}"
supervisor_service: "supervisor.xqwatcher" supervisor_service: "supervisor.xqwatcher"
supervisor_http_bind_port: '9003' supervisor_http_bind_port: "{{ xqwatcher_supervisor_http_port }}"
...@@ -18,17 +18,10 @@ ...@@ -18,17 +18,10 @@
mode=0644 owner=root group=root mode=0644 owner=root group=root
with_items: XQWATCHER_COURSES with_items: XQWATCHER_COURSES
- name: write out sudoers config jail user
template: >
src=etc/sudoers.d/95-jailed-user.j2
dest=/etc/sudoers.d/95-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}
mode=0440 owner=root group=root validate='visudo -c -f %s'
with_items: XQWATCHER_COURSES
- name: write out sudoers for watcher - name: write out sudoers for watcher
template: > template: >
src=etc/sudoers.d/95-xqwatcher.j2 src=etc/sudoers.d/95-xqwatcher.j2
dest=/etc/sudoers.d/95-xqwatcher-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} dest=/etc/sudoers.d/95-xqwatcher-{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user|replace('.', '') }}
mode=0440 owner=root group=root validate='visudo -c -f %s' mode=0440 owner=root group=root validate='visudo -c -f %s'
with_items: XQWATCHER_COURSES with_items: XQWATCHER_COURSES
...@@ -44,13 +37,13 @@ ...@@ -44,13 +37,13 @@
- name: write out requirements.txt - name: write out requirements.txt
template: > template: >
src=edx/app/xqwatcher/data/requirements.txt.j2 src=edx/app/xqwatcher/data/requirements.txt.j2
dest={{ xqwatcher_data_dir }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt dest={{ xqwatcher_app_data }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt
mode=0440 owner=root group=root mode=0440 owner=root group=root
with_items: XQWATCHER_COURSES with_items: XQWATCHER_COURSES
- name : install course specific python requirements - name : install course specific python requirements
pip: > pip: >
requirements="{{ xqwatcher_data_dir }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt" requirements="{{ xqwatcher_app_data }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}-requirements.txt"
virtualenv="{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" virtualenv="{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
state=present state=present
extra_args="{{ XQWATCHER_PIP_EXTRA_ARGS }}" extra_args="{{ XQWATCHER_PIP_EXTRA_ARGS }}"
...@@ -74,9 +67,10 @@ ...@@ -74,9 +67,10 @@
# environment where untrusted users can submit code # environment where untrusted users can submit code
- name: put code jail into aa-complain - name: put code jail into aa-complain
command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" command: /usr/sbin/aa-complain "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
when: CODE_JAIL_COMPLAIN when: CODE_JAIL_COMPLAIN|bool
with_items: XQWATCHER_COURSES with_items: XQWATCHER_COURSES
- name: put code sandbox into aa-enforce - name: put code sandbox into aa-enforce
command: /usr/sbin/aa-enforce "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}" command: /usr/sbin/aa-enforce "/etc/apparmor.d/code.jail.{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}"
when: CODE_JAIL_COMPAIN is not defined | not CODE_JAIL_COMPLAIN when: not CODE_JAIL_COMPLAIN|bool
\ No newline at end of file with_items: XQWATCHER_COURSES
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
- name: checkout grader code - name: checkout grader code
git: > git: >
dest={{ xqwatcher_data_dir }}/{{ item.COURSE }} repo={{ item.GIT_REPO }} dest={{ xqwatcher_app_data }}/{{ item.COURSE }} repo={{ item.GIT_REPO }}
version={{ item.GIT_REF }} version={{ item.GIT_REF }}
ssh_opts="{{ xqwatcher_git_ssh_opts }}" ssh_opts="{{ xqwatcher_git_ssh_opts }}"
with_items: XQWATCHER_COURSES with_items: XQWATCHER_COURSES
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
dest={{ xqwatcher_code_dir }} repo={{ xqwatcher_source_repo }} version={{ XQWATCHER_VERSION }} dest={{ xqwatcher_code_dir }} repo={{ xqwatcher_source_repo }} version={{ XQWATCHER_VERSION }}
accept_hostkey=yes accept_hostkey=yes
ssh_opts="{{ xqwatcher_git_ssh_opts }}" ssh_opts="{{ xqwatcher_git_ssh_opts }}"
register: xqwatcher_checkout
- name: install application requirements - name: install application requirements
pip: > pip: >
...@@ -39,4 +40,7 @@ ...@@ -39,4 +40,7 @@
- name: update supervisor configuration - name: update supervisor configuration
shell: "{{ xqwatcher_supervisor_ctl }} -c {{ xqwatcher_supervisor_app_dir }}/supervisord.conf update" shell: "{{ xqwatcher_supervisor_ctl }} -c {{ xqwatcher_supervisor_app_dir }}/supervisord.conf update"
when: not disable_edx_services when: not disable_edx_services
notify: restart xqwatcher notify: restart xqwatcher
\ No newline at end of file
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
# - COURSE: "exampleX-101x" # - COURSE: "exampleX-101x"
# GIT_REPO: "git@github.com:foo/graders-exampleX-101x.git" # GIT_REPO: "git@github.com:foo/graders-exampleX-101x.git"
# GIT_REF: "master" # GIT_REF: "master"
# PYTHON_REQUIREMENTS: []
# QUEUE_NAME: "exampleX-101x" # QUEUE_NAME: "exampleX-101x"
# QUEUE_CONFIG: # QUEUE_CONFIG:
# SERVER: "https://xqueue.example.com" # SERVER: "https://xqueue.example.com"
...@@ -64,6 +65,7 @@ ...@@ -64,6 +65,7 @@
# - COURSE: "exampleX-202x" # - COURSE: "exampleX-202x"
# GIT_REPO: "git@github.com:foo/graders-exampleX-202x.git" # GIT_REPO: "git@github.com:foo/graders-exampleX-202x.git"
# GIT_REF: "master" # GIT_REF: "master"
# PYTHON_REQUIREMENTS: []
# QUEUE_NAME: "exampleX-202x" # QUEUE_NAME: "exampleX-202x"
# QUEUE_CONFIG: # QUEUE_CONFIG:
# SERVER: "https://xqueue.example.com" # SERVER: "https://xqueue.example.com"
...@@ -84,8 +86,6 @@ ...@@ -84,8 +86,6 @@
# -----END RSA PRIVATE KEY----- # -----END RSA PRIVATE KEY-----
# #
- include: code_jail.yml CODE_JAIL_COMPLAIN=false
- name: create conf dir - name: create conf dir
file: > file: >
path="{{ xqwatcher_conf_dir }}" path="{{ xqwatcher_conf_dir }}"
...@@ -100,4 +100,13 @@ ...@@ -100,4 +100,13 @@
owner="{{ xqwatcher_user }}" owner="{{ xqwatcher_user }}"
group="{{ xqwatcher_user }}" group="{{ xqwatcher_user }}"
- name: create app data dir
file: >
path="{{ xqwatcher_app_data }}"
state=directory
owner="{{ xqwatcher_user }}"
group="{{ xqwatcher_user }}"
- include: code_jail.yml CODE_JAIL_COMPLAIN=false
- include: deploy.yml tags=deploy - include: deploy.yml tags=deploy
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:xqwatcher" : "{{ xqwatcher_source_repo }} {{ xqwatcher_checkout.after|truncate(7,True,'') }}"
when: xqwatcher_checkout.after is defined
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
{% set executable = xqwatcher_venv_dir + '/bin/python' %} {% set executable = xqwatcher_venv_dir + '/bin/python' %}
{% endif %} {% endif %}
[program:xqwatcher_service_name] [program:{{ xqwatcher_service_name }}]
command={{ executable }} -m {{ xqwatcher_module }} -d {{ xqwatcher_conf_dir }} command={{ executable }} -m {{ xqwatcher_module }} -d {{ xqwatcher_conf_dir }}
process_name=%(program_name)s process_name=%(program_name)s
user={{ xqwatcher_user }} user={{ xqwatcher_user }}
......
{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/python
{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{{ xqwatcher_user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/python {{ xqwatcher_user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) SETENV:NOPASSWD:{{ xqwatcher_venv_base }}/{{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.name }}/bin/python
{{ xqwatcher_user }} ALL=(ALL) NOPASSWD:/bin/kill {{ xqwatcher_user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) NOPASSWD:/bin/kill
{{ xqwatcher_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill {{ xqwatcher_user }} ALL=({{ item.QUEUE_CONFIG.HANDLERS[0].CODEJAIL.user }}) NOPASSWD:/usr/bin/pkill
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}} dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}}
accept_hostkey=yes accept_hostkey=yes
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
register: xserver_checkout
notify: restart xserver notify: restart xserver
- name: install requirements - name: install requirements
...@@ -56,6 +57,7 @@ ...@@ -56,6 +57,7 @@
environment: environment:
GIT_SSH: /tmp/git_ssh.sh GIT_SSH: /tmp/git_ssh.sh
notify: restart xserver notify: restart xserver
register: xserver_grader_checkout
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
- name: remove read-only ssh key for the content repo - name: remove read-only ssh key for the content repo
...@@ -92,3 +94,6 @@ ...@@ -92,3 +94,6 @@
- name: enforce app-armor rules - name: enforce app-armor rules
command: aa-enforce {{ xserver_venv_sandbox_dir }} command: aa-enforce {{ xserver_venv_sandbox_dir }}
- include: ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
---
- name: get instance information
action: ec2_facts
- name: tag instance for xserver
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:xserver" : "{{ xserver_source_repo }} {{ xserver_checkout.after|truncate(7,True,'') }}"
when: xserver_checkout.after is defined
- name: tag instance for xserver grader
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:xserver_grader" : "{{ XSERVER_GRADER_SOURCE }} {{ xserver_grader_checkout.after|truncate(7,True,'') }}"
when: xserver_grader_checkout.after is defined
...@@ -5,6 +5,6 @@ ...@@ -5,6 +5,6 @@
# #
- hosts: all - hosts: all
sudo: True sudo: True
gather_facts: False gather_facts: True
roles: roles:
- "{{role}}" - "{{role}}"
...@@ -26,3 +26,5 @@ ...@@ -26,3 +26,5 @@
- browsers - browsers
- local_dev - local_dev
- demo - demo
- role: analytics-api
when: ANALYTICS_API_GIT_IDENTITY
...@@ -38,4 +38,6 @@ ...@@ -38,4 +38,6 @@
- { role: "xqueue", update_users: True } - { role: "xqueue", update_users: True }
- ora - ora
- certs - certs
- role: analytics-api
when: ANALYTICS_API_GIT_IDENTITY
- edx_ansible - edx_ansible
...@@ -3,7 +3,7 @@ PyYAML==3.11 ...@@ -3,7 +3,7 @@ PyYAML==3.11
Jinja2==2.7.2 Jinja2==2.7.2
MarkupSafe==0.23 MarkupSafe==0.23
argparse==1.2.1 argparse==1.2.1
boto==2.28.0 boto==2.29.1
ecdsa==0.11 ecdsa==0.11
paramiko==1.14.0 paramiko==1.14.0
pycrypto==2.6.1 pycrypto==2.6.1
......
...@@ -58,5 +58,9 @@ if __name__ == '__main__': ...@@ -58,5 +58,9 @@ if __name__ == '__main__':
if disposition.lower() == "on disk": if disposition.lower() == "on disk":
all_xml_mappings[slug] = 'xml' all_xml_mappings[slug] = 'xml'
edxapp_xml_courses = { "EDXAPP_XML_COURSES": all_course_data, "EDXAPP_XML_MAPPINGS": all_xml_mappings } edxapp_xml_courses = {
"EDXAPP_XML_COURSES": all_course_data,
"EDXAPP_XML_MAPPINGS": all_xml_mappings,
"EDXAPP_XML_FROM_GIT": True
}
print yaml.safe_dump(edxapp_xml_courses, default_flow_style=False) print yaml.safe_dump(edxapp_xml_courses, default_flow_style=False)
...@@ -17,6 +17,12 @@ if [[ ! "$(lsb_release -d | cut -f2)" =~ $'Ubuntu 12.04' ]]; then ...@@ -17,6 +17,12 @@ if [[ ! "$(lsb_release -d | cut -f2)" =~ $'Ubuntu 12.04' ]]; then
fi fi
## ##
## Update and Upgrade apt packages
##
sudo apt-get update -y
sudo apt-get upgrade -y
##
## Install system pre-requisites ## Install system pre-requisites
## ##
sudo apt-get install -y build-essential software-properties-common python-software-properties curl git-core libxml2-dev libxslt1-dev python-pip python-apt python-dev sudo apt-get install -y build-essential software-properties-common python-software-properties curl git-core libxml2-dev libxslt1-dev python-pip python-apt python-dev
......
...@@ -64,7 +64,11 @@ if [[ -z $region ]]; then ...@@ -64,7 +64,11 @@ if [[ -z $region ]]; then
fi fi
if [[ -z $zone ]]; then if [[ -z $zone ]]; then
zone="us-east-1b" zone="us-east-1c"
fi
if [[ -z $vpc_subnet_id ]]; then
vpc_subnet_id="subnet-cd867aba"
fi fi
if [[ -z $elb ]]; then if [[ -z $elb ]]; then
...@@ -81,16 +85,16 @@ fi ...@@ -81,16 +85,16 @@ fi
if [[ -z $ami ]]; then if [[ -z $ami ]]; then
if [[ $server_type == "full_edx_installation" ]]; then if [[ $server_type == "full_edx_installation" ]]; then
ami="ami-97dbc3fe" ami="ami-f287419a"
elif [[ $server_type == "ubuntu_12.04" || $server_type == "full_edx_installation_from_scratch" ]]; then elif [[ $server_type == "ubuntu_12.04" || $server_type == "full_edx_installation_from_scratch" ]]; then
ami="ami-59a4a230" ami="ami-f478849c"
elif [[ $server_type == "ubuntu_14.04(experimental)" ]]; then elif [[ $server_type == "ubuntu_14.04(experimental)" ]]; then
ami="ami-408c7f28" ami="ami-a6926dce"
fi fi
fi fi
if [[ -z $instance_type ]]; then if [[ -z $instance_type ]]; then
instance_type="m1.medium" instance_type="t2.medium"
fi fi
if [[ -z $enable_monitoring ]]; then if [[ -z $enable_monitoring ]]; then
...@@ -131,11 +135,18 @@ EOF ...@@ -131,11 +135,18 @@ EOF
if [[ $basic_auth == "true" ]]; then if [[ $basic_auth == "true" ]]; then
# vars specific to provisioning added to $extra-vars # vars specific to provisioning added to $extra-vars
cat << EOF_AUTH >> $extra_vars_file cat << EOF_AUTH >> $extra_vars_file
COMMON_ENABLE_BASIC_AUTH: True
COMMON_HTPASSWD_USER: $auth_user COMMON_HTPASSWD_USER: $auth_user
COMMON_HTPASSWD_PASS: $auth_pass COMMON_HTPASSWD_PASS: $auth_pass
XQUEUE_BASIC_AUTH_USER: $auth_user XQUEUE_BASIC_AUTH_USER: $auth_user
XQUEUE_BASIC_AUTH_PASSWORD: $auth_pass XQUEUE_BASIC_AUTH_PASSWORD: $auth_pass
EOF_AUTH EOF_AUTH
else
cat << EOF_AUTH >> $extra_vars_file
COMMON_ENABLE_BASIC_AUTH: False
EOF_AUTH
fi fi
if [[ $edx_internal == "true" ]]; then if [[ $edx_internal == "true" ]]; then
...@@ -194,7 +205,7 @@ EOF ...@@ -194,7 +205,7 @@ EOF
# run the tasks to launch an ec2 instance from AMI # run the tasks to launch an ec2 instance from AMI
cat $extra_vars_file cat $extra_vars_file
ansible-playbook edx_provision.yml -i inventory.ini $extra_var_arg --user ubuntu -v ansible-playbook edx_provision.yml -i inventory.ini $extra_var_arg --user ubuntu
if [[ $server_type == "full_edx_installation" ]]; then if [[ $server_type == "full_edx_installation" ]]; then
# additional tasks that need to be run if the # additional tasks that need to be run if the
...@@ -217,12 +228,12 @@ if [[ $reconfigure == "true" || $server_type == "full_edx_installation_from_scra ...@@ -217,12 +228,12 @@ if [[ $reconfigure == "true" || $server_type == "full_edx_installation_from_scra
ansible-playbook edx_continuous_integration.yml -i "${deploy_host}," $extra_var_arg --user ubuntu ansible-playbook edx_continuous_integration.yml -i "${deploy_host}," $extra_var_arg --user ubuntu
fi fi
if [[ $server_type == "full_edx_installation" ]]; then if [[ $reconfigure != "true" && $server_type == "full_edx_installation" ]]; then
# Run deploy tasks for the roles selected # Run deploy tasks for the roles selected
for i in $roles; do for i in $roles; do
if [[ ${deploy[$i]} == "true" ]]; then if [[ ${deploy[$i]} == "true" ]]; then
cat $extra_vars_file cat $extra_vars_file
ansible-playbook ${i}.yml -i "${deploy_host}," $extra_var_arg --user ubuntu --tags deploy -v ansible-playbook ${i}.yml -i "${deploy_host}," $extra_var_arg --user ubuntu --tags deploy
fi fi
done done
fi fi
......
...@@ -29,11 +29,6 @@ if [[ -z "$BUILD_NUMBER" ]]; then ...@@ -29,11 +29,6 @@ if [[ -z "$BUILD_NUMBER" ]]; then
exit -1 exit -1
fi fi
if [[ -z "$refs" ]]; then
echo "refs not specified."
exit -1
fi
if [[ -z "$deployment" ]]; then if [[ -z "$deployment" ]]; then
echo "deployment not specified." echo "deployment not specified."
exit -1 exit -1
...@@ -61,17 +56,13 @@ fi ...@@ -61,17 +56,13 @@ fi
export PYTHONUNBUFFERED=1 export PYTHONUNBUFFERED=1
if [[ -z $configuration ]]; then cd $WORKSPACE/configuration
cd configuration configuration=`git rev-parse --short HEAD`
configuration=`git rev-parse HEAD` cd $WORKSPACE
cd ..
fi
if [[ -z $configuration_secure ]]; then cd $WORKSPACE/configuration-secure
cd configuration-secure configuration_secure=`git rev-parse --short HEAD`
configuration_secure=`git rev-parse HEAD` cd $WORKSPACE
cd ..
fi
base_params="" base_params=""
if [[ -n "$base_ami" ]]; then if [[ -n "$base_ami" ]]; then
...@@ -83,9 +74,10 @@ if [[ "$use_blessed" == "true" ]]; then ...@@ -83,9 +74,10 @@ if [[ "$use_blessed" == "true" ]]; then
blessed_params="--blessed" blessed_params="--blessed"
fi fi
playbookdir_params="" if [[ -e "configuration/playbooks/edx-east/${play}.yml" ]]; then
if [[ ! -z "$playbook_dir" ]]; then playbookdir_params="--playbook-dir configuration/playbooks/edx-east"
playbookdir_params="--playbook-dir $playbook_dir" else
playbookdir_params="--playbook-dir ansible-private"
fi fi
configurationprivate_params="" configurationprivate_params=""
...@@ -96,11 +88,6 @@ if [[ ! -z "$configurationprivaterepo" ]]; then ...@@ -96,11 +88,6 @@ if [[ ! -z "$configurationprivaterepo" ]]; then
fi fi
fi fi
stackname_params=""
if [[ ! -z "$playbook_dir" ]]; then
stackname_params="--playbook-dir $playbook_dir"
fi
hipchat_params="" hipchat_params=""
if [[ ! -z "$hipchat_room_id" ]] && [[ ! -z "$hipchat_api_token" ]]; then if [[ ! -z "$hipchat_room_id" ]] && [[ ! -z "$hipchat_api_token" ]]; then
hipchat_params="--hipchat-room-id $hipchat_room_id --hipchat-api-token $hipchat_api_token" hipchat_params="--hipchat-room-id $hipchat_room_id --hipchat-api-token $hipchat_api_token"
...@@ -116,10 +103,7 @@ pip install -r requirements.txt ...@@ -116,10 +103,7 @@ pip install -r requirements.txt
cd util/vpc-tools/ cd util/vpc-tools/
echo "$refs" > /var/tmp/$BUILD_ID-refs.yml
cat /var/tmp/$BUILD_ID-refs.yml
echo "$vars" > /var/tmp/$BUILD_ID-extra-vars.yml echo "$vars" > /var/tmp/$BUILD_ID-extra-vars.yml
cat /var/tmp/$BUILD_ID-extra-vars.yml cat /var/tmp/$BUILD_ID-extra-vars.yml
python -u abbey.py -p $play -t c3.large -d $deployment -e $environment -i /edx/var/jenkins/.ssh/id_rsa $base_params $blessed_params $playbookdir_params --vars /var/tmp/$BUILD_ID-extra-vars.yml --refs /var/tmp/$BUILD_ID-refs.yml -c $BUILD_NUMBER --configuration-version $configuration --configuration-secure-version $configuration_secure -k $jenkins_admin_ec2_key --configuration-secure-repo $jenkins_admin_configuration_secure_repo $configurationprivate_params $hipchat_params $cleanup_params python -u abbey.py -p $play -t c3.large -d $deployment -e $environment -i /edx/var/jenkins/.ssh/id_rsa $base_params $blessed_params $playbookdir_params --vars /var/tmp/$BUILD_ID-extra-vars.yml -c $BUILD_NUMBER --configuration-version $configuration --configuration-secure-version $configuration_secure -k $jenkins_admin_ec2_key --configuration-secure-repo $jenkins_admin_configuration_secure_repo $configurationprivate_params $hipchat_params $cleanup_params
#!/usr/bin/env bash
# A simple wrapper to add ssh keys from
# This assumes that you will be running on one or more servers
# that are tagged with Name: <environment>-<deployment>-<play>
if [[
-z $WORKSPACE ||
-z $environment_tag ||
-z $deployment_tag ||
-z $play ||
-z $first_in ||
-z $public_key ||
-z $serial_count
]]; then
echo "Environment incorrect for this wrapper script"
env
exit 1
fi
cd $WORKSPACE/configuration/playbooks/edx-east
export AWS_PROFILE=$deployment_tag
ansible_extra_vars+=" -e serial_count=$serial_count -e elb_pre_post=$elb_pre_post"
if [[ ! -z "$extra_vars" ]]; then
ansible_extra_vars+=" -e $extra_vars"
fi
if [[ $check_mode == "true" ]]; then
ansible_extra_vars+=" --check"
fi
if [[ ! -z "$run_on_single_ip" ]]; then
ansible_limit+="$run_on_single_ip"
else
if [[ $first_in == "true" ]]; then
ansible_limit+="first_in_"
fi
ansible_limit+="tag_environment_${environment_tag}:&tag_deployment_${deployment_tag}"
fi
ansible_extra_vars+=" -e public_key=$public_key"
export PYTHONUNBUFFERED=1
env
ansible-playbook -v -D -u ubuntu $play -i ./ec2.py $ansible_task_tags --limit $ansible_limit -e@"$WORKSPACE/configuration-secure/ansible/vars/ubuntu-public-keys.yml" $ansible_extra_vars
rm -f $extra_vars_file
...@@ -24,7 +24,9 @@ cd $WORKSPACE/configuration/playbooks/edx-east ...@@ -24,7 +24,9 @@ cd $WORKSPACE/configuration/playbooks/edx-east
ansible_extra_vars+=" -e serial_count=$serial_count -e elb_pre_post=$elb_pre_post" ansible_extra_vars+=" -e serial_count=$serial_count -e elb_pre_post=$elb_pre_post"
if [ ! -z "$extra_vars" ]; then if [ ! -z "$extra_vars" ]; then
ansible_extra_vars+=" -e $extra_vars" for arg in $extra_vars; do
ansible_extra_vars+=" -e $arg"
done
fi fi
if [[ $run_migrations == "true" ]]; then if [[ $run_migrations == "true" ]]; then
......
...@@ -18,7 +18,7 @@ except ImportError: ...@@ -18,7 +18,7 @@ except ImportError:
from pprint import pprint from pprint import pprint
AMI_TIMEOUT = 600 # time to wait for AMIs to complete AMI_TIMEOUT = 1800 # time to wait for AMIs to complete(30 minutes)
EC2_RUN_TIMEOUT = 180 # time to wait for ec2 state transition EC2_RUN_TIMEOUT = 180 # time to wait for ec2 state transition
EC2_STATUS_TIMEOUT = 300 # time to wait for ec2 system status checks EC2_STATUS_TIMEOUT = 300 # time to wait for ec2 system status checks
NUM_TASKS = 5 # number of tasks for time summary report NUM_TASKS = 5 # number of tasks for time summary report
...@@ -76,19 +76,17 @@ def parse_args(): ...@@ -76,19 +76,17 @@ def parse_args():
help="don't cleanup on failures") help="don't cleanup on failures")
parser.add_argument('--vars', metavar="EXTRA_VAR_FILE", parser.add_argument('--vars', metavar="EXTRA_VAR_FILE",
help="path to extra var file", required=False) help="path to extra var file", required=False)
parser.add_argument('--refs', metavar="GIT_REFS_FILE",
help="path to a var file with app git refs", required=False)
parser.add_argument('--configuration-version', required=False, parser.add_argument('--configuration-version', required=False,
help="configuration repo branch(no hashes)", help="configuration repo gitref",
default="master") default="master")
parser.add_argument('--configuration-secure-version', required=False, parser.add_argument('--configuration-secure-version', required=False,
help="configuration-secure repo branch(no hashes)", help="configuration-secure repo gitref",
default="master") default="master")
parser.add_argument('--configuration-secure-repo', required=False, parser.add_argument('--configuration-secure-repo', required=False,
default="git@github.com:edx-ops/prod-secure", default="git@github.com:edx-ops/prod-secure",
help="repo to use for the secure files") help="repo to use for the secure files")
parser.add_argument('--configuration-private-version', required=False, parser.add_argument('--configuration-private-version', required=False,
help="configuration-private repo branch(no hashes)", help="configuration-private repo gitref",
default="master") default="master")
parser.add_argument('--configuration-private-repo', required=False, parser.add_argument('--configuration-private-repo', required=False,
default="git@github.com:edx-ops/ansible-private", default="git@github.com:edx-ops/ansible-private",
...@@ -287,18 +285,13 @@ cat << EOF >> $extra_vars ...@@ -287,18 +285,13 @@ cat << EOF >> $extra_vars
# of all the repositories # of all the repositories
{extra_vars_yml} {extra_vars_yml}
{git_refs_yml}
# abbey will always run fake migrations # abbey will always run fake migrations
# this is so that the application can come # this is so that the application can come
# up healthy # up healthy
fake_migrations: true fake_migrations: true
# Use the build number an the dynamic cache key.
EDXAPP_UPDATE_STATIC_FILES_KEY: true
edxapp_dynamic_cache_key: {deployment}-{environment}-{play}-{cache_id}
disable_edx_services: true disable_edx_services: true
COMMON_TAG_EC2_INSTANCE: true
# abbey should never take instances in # abbey should never take instances in
# and out of elbs # and out of elbs
...@@ -369,7 +362,6 @@ rm -rf $base_dir ...@@ -369,7 +362,6 @@ rm -rf $base_dir
identity_contents=identity_contents, identity_contents=identity_contents,
queue_name=run_id, queue_name=run_id,
extra_vars_yml=extra_vars_yml, extra_vars_yml=extra_vars_yml,
git_refs_yml=git_refs_yml,
secure_vars_file=secure_vars_file, secure_vars_file=secure_vars_file,
cache_id=args.cache_id) cache_id=args.cache_id)
...@@ -528,18 +520,21 @@ def create_ami(instance_id, name, description): ...@@ -528,18 +520,21 @@ def create_ami(instance_id, name, description):
time.sleep(AWS_API_WAIT_TIME) time.sleep(AWS_API_WAIT_TIME)
img.add_tag("play", args.play) img.add_tag("play", args.play)
time.sleep(AWS_API_WAIT_TIME) time.sleep(AWS_API_WAIT_TIME)
img.add_tag("configuration_ref", args.configuration_version) conf_tag = "{} {}".format("http://github.com/edx/configuration", args.configuration_version)
time.sleep(AWS_API_WAIT_TIME) img.add_tag("version:configuration", conf_tag)
img.add_tag("configuration_secure_ref", args.configuration_secure_version)
time.sleep(AWS_API_WAIT_TIME) time.sleep(AWS_API_WAIT_TIME)
img.add_tag("configuration_secure_repo", args.configuration_secure_repo) conf_secure_tag = "{} {}".format(args.configuration_secure_repo, args.configuration_secure_version)
img.add_tag("version:configuration_secure", conf_secure_tag)
time.sleep(AWS_API_WAIT_TIME) time.sleep(AWS_API_WAIT_TIME)
img.add_tag("cache_id", args.cache_id) img.add_tag("cache_id", args.cache_id)
time.sleep(AWS_API_WAIT_TIME) time.sleep(AWS_API_WAIT_TIME)
for repo, ref in git_refs.items():
key = "refs:{}".format(repo) # Get versions from the instance.
img.add_tag(key, ref) tags = ec2.get_all_tags(filters={'resource-id': instance_id})
time.sleep(AWS_API_WAIT_TIME) for tag in tags:
if tag.name.startswith('version:'):
img.add_tag(tag.name, tag.value)
time.sleep(AWS_API_WAIT_TIME)
break break
else: else:
time.sleep(1) time.sleep(1)
...@@ -647,6 +642,7 @@ def launch_and_configure(ec2_args): ...@@ -647,6 +642,7 @@ def launch_and_configure(ec2_args):
def send_hipchat_message(message): def send_hipchat_message(message):
print(message)
#If hipchat is configured send the details to the specified room #If hipchat is configured send the details to the specified room
if args.hipchat_api_token and args.hipchat_room_id: if args.hipchat_api_token and args.hipchat_room_id:
import hipchat import hipchat
...@@ -673,14 +669,6 @@ if __name__ == '__main__': ...@@ -673,14 +669,6 @@ if __name__ == '__main__':
extra_vars_yml = "" extra_vars_yml = ""
extra_vars = {} extra_vars = {}
if args.refs:
with open(args.refs) as f:
git_refs_yml = f.read()
git_refs = yaml.load(git_refs_yml)
else:
git_refs_yml = ""
git_refs = {}
if args.secure_vars_file: if args.secure_vars_file:
# explicit path to a single # explicit path to a single
# secure var file # secure var file
...@@ -710,6 +698,7 @@ if __name__ == '__main__': ...@@ -710,6 +698,7 @@ if __name__ == '__main__':
else: else:
base_ami = args.base_ami base_ami = args.base_ami
error_in_abbey_run = False
try: try:
sqs_queue = None sqs_queue = None
instance_id = None instance_id = None
...@@ -749,6 +738,7 @@ if __name__ == '__main__': ...@@ -749,6 +738,7 @@ if __name__ == '__main__':
play=args.play, play=args.play,
exception=repr(e)) exception=repr(e))
send_hipchat_message(message) send_hipchat_message(message)
error_in_abbey_run = True
finally: finally:
print print
if not args.no_cleanup and not args.noop: if not args.no_cleanup and not args.noop:
...@@ -761,3 +751,5 @@ if __name__ == '__main__': ...@@ -761,3 +751,5 @@ if __name__ == '__main__':
# Check to make sure we have an instance id. # Check to make sure we have an instance id.
if instance_id: if instance_id:
ec2.terminate_instances(instance_ids=[instance_id]) ec2.terminate_instances(instance_ids=[instance_id])
if error_in_abbey_run:
exit(1)
#!/usr/bin/env python #!/usr/bin/env python -u
import boto import boto
import boto.route53 import boto.route53
import boto.route53.record import boto.route53.record
...@@ -37,25 +37,6 @@ RDS_SIZES = [ ...@@ -37,25 +37,6 @@ RDS_SIZES = [
'db.m2.4xlarg', 'db.m2.4xlarg',
] ]
# These are the groups for the different
# stack names that will be assigned once
# the corresponding db is cloned
SG_GROUPS = {
'stage-edx': 'sg-d2f623b7',
}
# This group must already be created
# and allows for full access to port
# 3306 from within the vpc.
# This group is assigned temporarily
# for cleaning the db
SG_GROUPS_FULL = {
'stage-edx': 'sg-0abf396f',
}
def parse_args(args=sys.argv[1:]): def parse_args(args=sys.argv[1:]):
stack_names = all_stack_names() stack_names = all_stack_names()
...@@ -64,9 +45,12 @@ def parse_args(args=sys.argv[1:]): ...@@ -64,9 +45,12 @@ def parse_args(args=sys.argv[1:]):
for db in rds.describe_db_instances()['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']] for db in rds.describe_db_instances()['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']]
parser = ArgumentParser(description=description, formatter_class=RawTextHelpFormatter) parser = ArgumentParser(description=description, formatter_class=RawTextHelpFormatter)
parser.add_argument('-s', '--stack-name', choices=stack_names, parser.add_argument('--vpc', default=None, action="store_true",
default=None, help='this is for a vpc')
help='Stack name for where you want this RDS instance launched') parser.add_argument('--security-group', default=None,
help='security group name that should be assigned to the new RDS instance (vpc only!)')
parser.add_argument('--subnet', default=None,
help='subnet that should be used for the RDS instance (vpc only!)')
parser.add_argument('-t', '--type', choices=RDS_SIZES, parser.add_argument('-t', '--type', choices=RDS_SIZES,
default='db.m1.small', help='RDS size to create instances of') default='db.m1.small', help='RDS size to create instances of')
parser.add_argument('-d', '--db-source', choices=dbs, parser.add_argument('-d', '--db-source', choices=dbs,
...@@ -86,8 +70,8 @@ def parse_args(args=sys.argv[1:]): ...@@ -86,8 +70,8 @@ def parse_args(args=sys.argv[1:]):
parser.add_argument('--dump', action="store_true", parser.add_argument('--dump', action="store_true",
default=False, default=False,
help="create a sql dump after launching it into the vpc") help="create a sql dump after launching it into the vpc")
parser.add_argument('--secret-var-file', parser.add_argument('-s', '--secret-var-files', action="append", required=True,
help="using a secret var file run ansible against the host to update db users") help="use one or more secret var files to run ansible against the host to update db users")
return parser.parse_args(args) return parser.parse_args(args)
...@@ -99,10 +83,11 @@ def wait_on_db_status(db_name, region='us-east-1', wait_on='available', aws_id=N ...@@ -99,10 +83,11 @@ def wait_on_db_status(db_name, region='us-east-1', wait_on='available', aws_id=N
if len(statuses) > 1: if len(statuses) > 1:
raise Exception("More than one instance returned for {0}".format(db_name)) raise Exception("More than one instance returned for {0}".format(db_name))
if statuses[0]['DBInstanceStatus'] == wait_on: if statuses[0]['DBInstanceStatus'] == wait_on:
print("Status is: {}".format(wait_on))
break break
sys.stdout.write(".") sys.stdout.write("status is {}..\n".format(statuses[0]['DBInstanceStatus']))
sys.stdout.flush() sys.stdout.flush()
time.sleep(2) time.sleep(10)
return return
if __name__ == '__main__': if __name__ == '__main__':
...@@ -119,31 +104,33 @@ if __name__ == '__main__': ...@@ -119,31 +104,33 @@ if __name__ == '__main__':
use_latest_restorable_time=True, use_latest_restorable_time=True,
db_instance_class=args.type, db_instance_class=args.type,
) )
if args.stack_name: if args.vpc:
subnet_name = rds_subnet_group_name_for_stack_name(args.stack_name) restore_args['db_subnet_group_name'] = args.subnet
restore_args['db_subnet_group_name'] = subnet_name
rds.restore_db_instance_to_point_in_time(**restore_args) rds.restore_db_instance_to_point_in_time(**restore_args)
wait_on_db_status(restore_dbid) wait_on_db_status(restore_dbid)
print("Getting db host")
db_host = rds.describe_db_instances(restore_dbid)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['Endpoint']['Address'] db_host = rds.describe_db_instances(restore_dbid)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'][0]['Endpoint']['Address']
if args.password or args.stack_name: modify_args = dict(
modify_args = dict( apply_immediately=True
apply_immediately=True )
) if args.password:
if args.password: modify_args['master_user_password'] = args.password
modify_args['master_user_password'] = args.password
if args.stack_name: if args.vpc:
modify_args['vpc_security_group_ids'] = [SG_GROUPS[args.stack_name], SG_GROUPS_FULL[args.stack_name]] modify_args['vpc_security_group_ids'] = [args.security_group]
else: else:
# dev-edx is the default security group for dbs that # dev-edx is the default security group for dbs that
# are not in the vpc, it allows connections from the various # are not in the vpc, it allows connections from the various
# NAT boxes and from sandboxes # NAT boxes and from sandboxes
modify_args['db_security_groups'] = ['dev-edx'] modify_args['db_security_groups'] = ['dev-edx']
# Update the db immediately # Update the db immediately
rds.modify_db_instance(restore_dbid, **modify_args) print("Updating db instance: {}".format(modify_args))
rds.modify_db_instance(restore_dbid, **modify_args)
print("Waiting 15 seconds before checking to see if db is available")
time.sleep(15)
wait_on_db_status(restore_dbid)
if args.clean_wwc: if args.clean_wwc:
# Run the mysql clean sql file # Run the mysql clean sql file
sanitize_cmd = """mysql -u root -p{root_pass} -h{db_host} wwc < {sanitize_wwc_sql_file} """.format( sanitize_cmd = """mysql -u root -p{root_pass} -h{db_host} wwc < {sanitize_wwc_sql_file} """.format(
...@@ -162,12 +149,16 @@ if __name__ == '__main__': ...@@ -162,12 +149,16 @@ if __name__ == '__main__':
print("Running {}".format(sanitize_cmd)) print("Running {}".format(sanitize_cmd))
os.system(sanitize_cmd) os.system(sanitize_cmd)
if args.secret_var_file: if args.secret_var_files:
extra_args = ""
for secret_var_file in args.secret_var_files:
extra_args += " -e@{}".format(secret_var_file)
db_cmd = """cd {play_path} && ansible-playbook -c local -i 127.0.0.1, update_edxapp_db_users.yml """ \ db_cmd = """cd {play_path} && ansible-playbook -c local -i 127.0.0.1, update_edxapp_db_users.yml """ \
"""-e @{secret_var_file} -e "edxapp_db_root_user=root edxapp_db_root_pass={root_pass} """ \ """{extra_args} -e "edxapp_db_root_user=root edxapp_db_root_pass={root_pass} """ \
"""EDXAPP_MYSQL_HOST={db_host}" """.format( """EDXAPP_MYSQL_HOST={db_host}" """.format(
root_pass=args.password, root_pass=args.password,
secret_var_file=args.secret_var_file, extra_args=extra_args,
db_host=db_host, db_host=db_host,
play_path=play_path) play_path=play_path)
print("Running {}".format(db_cmd)) print("Running {}".format(db_cmd))
...@@ -181,6 +172,3 @@ if __name__ == '__main__': ...@@ -181,6 +172,3 @@ if __name__ == '__main__':
db_host=db_host) db_host=db_host)
print("Running {}".format(dns_cmd)) print("Running {}".format(dns_cmd))
os.system(dns_cmd) os.system(dns_cmd)
if args.stack_name:
rds.modify_db_instance(restore_dbid, vpc_security_group_ids=[SG_GROUPS[args.stack_name]])
...@@ -31,7 +31,7 @@ BASTION_CONFIG = """Host {jump_box} ...@@ -31,7 +31,7 @@ BASTION_CONFIG = """Host {jump_box}
HOST_CONFIG = """# Instance ID: {instance_id} HOST_CONFIG = """# Instance ID: {instance_id}
Host {name} Host {name}
ProxyCommand ssh {config_file} -W %h:%p {jump_box} ProxyCommand ssh -q {config_file} -W %h:%p {jump_box}
HostName {ip} HostName {ip}
ForwardAgent yes ForwardAgent yes
User {user} User {user}
......
Vagrant.require_version ">= 1.5.3" Vagrant.require_version ">= 1.5.3"
unless Vagrant.has_plugin?("vagrant-vbguest")
raise "Please install the vagrant-vbguest plugin by running `vagrant plugin install vagrant-vbguest`"
end
VAGRANTFILE_API_VERSION = "2" VAGRANTFILE_API_VERSION = "2"
...@@ -6,18 +9,19 @@ MEMORY = 2048 ...@@ -6,18 +9,19 @@ MEMORY = 2048
CPU_COUNT = 2 CPU_COUNT = 2
edx_platform_mount_dir = "edx-platform" edx_platform_mount_dir = "edx-platform"
themes_mount_dir = "themes"
forum_mount_dir = "cs_comments_service" forum_mount_dir = "cs_comments_service"
ora_mount_dir = "ora" ora_mount_dir = "ora"
if ENV['VAGRANT_MOUNT_BASE'] if ENV['VAGRANT_MOUNT_BASE']
edx_platform_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + edx_platform_mount_dir edx_platform_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + edx_platform_mount_dir
themes_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + themes_mount_dir
forum_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + forum_mount_dir forum_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + forum_mount_dir
ora_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + ora_mount_dir ora_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + ora_mount_dir
end end
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates a devstack from a base Ubuntu 12.04 image for virtualbox # Creates a devstack from a base Ubuntu 12.04 image for virtualbox
...@@ -33,15 +37,33 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ...@@ -33,15 +37,33 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.ssh.insert_key = true config.ssh.insert_key = true
config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform", :create => true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service", :create => true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora", :create => true, nfs: true
# Enable X11 forwarding so we can interact with GUI applications # Enable X11 forwarding so we can interact with GUI applications
if ENV['VAGRANT_X11'] if ENV['VAGRANT_X11']
config.ssh.forward_x11 = true config.ssh.forward_x11 = true
end
if ENV['VAGRANT_USE_VBOXFS'] == 'true'
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform",
create: true, owner: "edxapp", group: "www-data"
config.vm.synced_folder "#{themes_mount_dir}", "/edx/app/edxapp/themes",
create: true, owner: "edxapp", group: "www-data"
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service",
create: true, owner: "forum", group: "www-data"
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora",
create: true, owner: "ora", group: "www-data"
else
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform",
create: true, nfs: true
config.vm.synced_folder "#{themes_mount_dir}", "/edx/app/edxapp/themes",
create: true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service",
create: true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora",
create: true, nfs: true
end end
config.vm.provider :virtualbox do |vb| config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s] vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s]
vb.customize ["modifyvm", :id, "--cpus", CPU_COUNT.to_s] vb.customize ["modifyvm", :id, "--cpus", CPU_COUNT.to_s]
...@@ -66,9 +88,12 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ...@@ -66,9 +88,12 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# See: https://github.com/mitchellh/vagrant/issues/1188 # See: https://github.com/mitchellh/vagrant/issues/1188
config.vm.provision "shell", inline: 'echo \'LC_ALL="en_US.UTF-8"\' > /etc/default/locale' config.vm.provision "shell", inline: 'echo \'LC_ALL="en_US.UTF-8"\' > /etc/default/locale'
# Use vagrant-vbguest plugin to make sure Guest Additions are in sync
config.vbguest.auto_reboot = true
config.vbguest.auto_update = true
config.vm.provision :ansible do |ansible| config.vm.provision :ansible do |ansible|
ansible.playbook = "../../../playbooks/vagrant-devstack.yml" ansible.playbook = "../../../playbooks/vagrant-devstack.yml"
ansible.verbose = "vvvv" ansible.verbose = "vvvv"
end end
end end
...@@ -23,7 +23,7 @@ cd /edx/app/edx_ansible/edx_ansible/playbooks ...@@ -23,7 +23,7 @@ cd /edx/app/edx_ansible/edx_ansible/playbooks
# this can cause problems (e.g. looking for templates that no longer exist). # this can cause problems (e.g. looking for templates that no longer exist).
/edx/bin/update configuration release /edx/bin/update configuration release
ansible-playbook -i localhost, -c local vagrant-devstack.yml -e configuration_version=release ansible-playbook -i localhost, -c local vagrant-devstack.yml --tags=deploy -e configuration_version=release
SCRIPT SCRIPT
edx_platform_mount_dir = "edx-platform" edx_platform_mount_dir = "edx-platform"
...@@ -43,9 +43,8 @@ end ...@@ -43,9 +43,8 @@ end
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates an edX devstack VM from an official release # Creates an edX devstack VM from an official release
config.vm.box = "injera-devstack" config.vm.box = "johnnycake-devstack"
config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.box_url = "http://files.edx.org/vagrant-images/20140625-johnnycake-devstack.box"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140418-injera-devstack.box"
config.vm.network :private_network, ip: "192.168.33.10" config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000 config.vm.network :forwarded_port, guest: 8000, host: 8000
...@@ -55,15 +54,32 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ...@@ -55,15 +54,32 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.network :forwarded_port, guest: 9200, host: 9200 config.vm.network :forwarded_port, guest: 9200, host: 9200
config.ssh.insert_key = true config.ssh.insert_key = true
config.vm.synced_folder ".", "/vagrant", disabled: true
# Enable X11 forwarding so we can interact with GUI applications # Enable X11 forwarding so we can interact with GUI applications
if ENV['VAGRANT_X11'] if ENV['VAGRANT_X11']
config.ssh.forward_x11 = true config.ssh.forward_x11 = true
end end
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform", :create => true, nfs: true if ENV['VAGRANT_USE_VBOXFS'] == 'true'
config.vm.synced_folder "#{themes_mount_dir}", "/edx/app/edxapp/themes", :create => true, nfs: true config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform",
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service", :create => true, nfs: true create: true, owner: "edxapp", group: "www-data"
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora", :create => true, nfs: true config.vm.synced_folder "#{themes_mount_dir}", "/edx/app/edxapp/themes",
create: true, owner: "edxapp", group: "www-data"
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service",
create: true, owner: "forum", group: "www-data"
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora",
create: true, owner: "ora", group: "www-data"
else
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform",
create: true, nfs: true
config.vm.synced_folder "#{themes_mount_dir}", "/edx/app/edxapp/themes",
create: true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service",
create: true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora",
create: true, nfs: true
end
config.vm.provider :virtualbox do |vb| config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s] vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s]
...@@ -76,8 +92,8 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ...@@ -76,8 +92,8 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
["vmware_fusion", "vmware_workstation"].each do |vmware_provider| ["vmware_fusion", "vmware_workstation"].each do |vmware_provider|
config.vm.provider vmware_provider do |v, override| config.vm.provider vmware_provider do |v, override|
override.vm.box = "injera-devstack-vmware" override.vm.box = "johnnycake-devstack-vmware"
override.vm.box_url = "http://files.edx.org/vagrant-images/20140418-injera-devstack-vmware.box" override.vm.box_url = "http://files.edx.org/vagrant-images/20140630-johnnycake-devstack-vmware.box"
v.vmx["memsize"] = MEMORY.to_s v.vmx["memsize"] = MEMORY.to_s
v.vmx["numvcpus"] = CPU_COUNT.to_s v.vmx["numvcpus"] = CPU_COUNT.to_s
end end
...@@ -90,5 +106,4 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ...@@ -90,5 +106,4 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Assume that the base box has the edx_ansible role installed # Assume that the base box has the edx_ansible role installed
# We can then tell the Vagrant instance to update itself. # We can then tell the Vagrant instance to update itself.
config.vm.provision "shell", inline: $script config.vm.provision "shell", inline: $script
end end
...@@ -8,8 +8,8 @@ CPU_COUNT = 2 ...@@ -8,8 +8,8 @@ CPU_COUNT = 2
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates an edX fullstack VM from an official release # Creates an edX fullstack VM from an official release
config.vm.box = "injera-fullstack" config.vm.box = "johnnycake-fullstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140418-injera-fullstack.box" config.vm.box_url = "http://files.edx.org/vagrant-images/20140625-johnnycake-fullstack.box"
config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.synced_folder ".", "/vagrant", disabled: true
config.ssh.insert_key = true config.ssh.insert_key = true
...@@ -25,11 +25,11 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ...@@ -25,11 +25,11 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# http://askubuntu.com/questions/238040/how-do-i-fix-name-service-for-vagrant-client # http://askubuntu.com/questions/238040/how-do-i-fix-name-service-for-vagrant-client
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end end
["vmware_fusion", "vmware_workstation"].each do |vmware_provider| ["vmware_fusion", "vmware_workstation"].each do |vmware_provider|
config.vm.provider vmware_provider do |v, override| config.vm.provider vmware_provider do |v, override|
override.vm.box = "injera-fullstack-vmware" override.vm.box = "johnnycake-fullstack-vmware"
override.vm.box_url = "http://files.edx.org/vagrant-images/20140418-injera-fullstack-vmware.box" override.vm.box_url = "http://files.edx.org/vagrant-images/20140630-johnnycake-fullstack-vmware.box"
v.vmx["memsize"] = MEMORY.to_s v.vmx["memsize"] = MEMORY.to_s
v.vmx["numvcpus"] = CPU_COUNT.to_s v.vmx["numvcpus"] = CPU_COUNT.to_s
end end
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment