Commit 50041fa8 by John Jarvis

Merge pull request #561 from edx/rc/empanada

Rc/empanada
parents 3e037702 4b80d36d
......@@ -5,7 +5,7 @@
\#*\#
*~
.#*
vagrant/devstack/cs_comments_service
vagrant/devstack/edx-platform
vagrant/release/*/devstack/cs_comments_service
vagrant/release/*/devstack/edx-platform
vagrant/*/devstack/edx-platform
vagrant/*/devstack/cs_comments_service
vagrant/*/devstack/ora
......@@ -16,7 +16,7 @@
"Description":"Name of an existing EC2 KeyPair to enable SSH access to the web server",
"Default":"deployment"
},
"InstanceType":{
"EdxappInstanceType":{
"Description":"WebServer EC2 instance type",
"Type":"String",
"Default":"m1.small",
......@@ -131,6 +131,52 @@
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
"XserverInstanceType":{
"Description":"RabbitMQ server EC2 instance type",
"Type":"String",
"Default":"m1.small",
"AllowedValues":[
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
"XqueueInstanceType":{
"Description":"RabbitMQ server EC2 instance type",
"Type":"String",
"Default":"m1.small",
"AllowedValues":[
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
],
"ConstraintDescription":"must be a valid EC2 instance type."
},
"SSHLocation":{
"Description":"The IP address range that can be used to SSH to the EC2 instances",
"Type":"String",
......@@ -393,7 +439,8 @@
"Forum02": { "CIDR":"10.0.81.0/24" },
"Mongo01": { "CIDR":"10.0.90.0/24" },
"Mongo02": { "CIDR":"10.0.91.0/24" },
"Mongo03": { "CIDR":"10.0.92.0/24" }
"Mongo03": { "CIDR":"10.0.92.0/24" },
"Admin": { "CIDR":"10.0.200.0/24" }
},
"MapRegionsToAvailZones":{
"us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" },
......@@ -460,6 +507,38 @@
}
}
},
"AdminSubnet":{
"Type":"AWS::EC2::Subnet",
"Properties":{
"VpcId":{
"Ref":"EdxVPC"
},
"CidrBlock":{
"Fn::FindInMap":[
"SubnetConfig",
"Admin",
"CIDR"
]
},
"AvailabilityZone":{
"Fn::FindInMap":[
"MapRegionsToAvailZones",
{ "Ref":"AWS::Region" },
"AZone0"
]
},
"Tags":[
{
"Key":"Application",
"Value":"admin"
},
{
"Key":"Network",
"Value":"Private"
}
]
}
},
"EdxappSubnet01":{
"Type":"AWS::EC2::Subnet",
"Properties":{
......@@ -1974,6 +2053,12 @@
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"11371",
"ToPort":"11371",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"icmp",
"FromPort":"-1",
"ToPort":"-1",
......@@ -2018,6 +2103,12 @@
"FromPort":"10016",
"ToPort":"10016",
"CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":"11371",
"ToPort":"11371",
"CidrIp":"0.0.0.0/0"
}
]
}
......@@ -2430,7 +2521,7 @@
"Fn::FindInMap":[
"AWSInstanceType2Arch",
{
"Ref":"InstanceType"
"Ref":"EdxappInstanceType"
},
"Arch"
]
......@@ -2477,7 +2568,7 @@
"Ref":"KeyName"
},
"InstanceType":{
"Ref":"InstanceType"
"Ref":"EdxappInstanceType"
},
"BlockDeviceMappings":[
{
......@@ -2544,8 +2635,12 @@
"LaunchConfigurationName":{
"Ref":"EdxappServer"
},
"MinSize":"2",
"MaxSize":"2",
"MinSize":{
"Ref":"EdxappDesiredCapacity"
},
"MaxSize":{
"Ref":"EdxappDesiredCapacity"
},
"DesiredCapacity":{
"Ref":"EdxappDesiredCapacity"
},
......@@ -2781,7 +2876,7 @@
"Fn::FindInMap":[
"AWSInstanceType2Arch",
{
"Ref":"InstanceType"
"Ref":"XqueueInstanceType"
},
"Arch"
]
......@@ -2824,7 +2919,7 @@
"Ref":"KeyName"
},
"InstanceType":{
"Ref":"InstanceType"
"Ref":"XqueueInstanceType"
},
"BlockDeviceMappings":[
{
......@@ -2891,8 +2986,12 @@
"LaunchConfigurationName":{
"Ref":"XqueueServer"
},
"MinSize":"2",
"MaxSize":"2",
"MinSize":{
"Ref":"XqueueDesiredCapacity"
},
"MaxSize":{
"Ref":"XqueueDesiredCapacity"
},
"DesiredCapacity":{
"Ref":"XqueueDesiredCapacity"
},
......@@ -3154,7 +3253,7 @@
"Ref":"KeyName"
},
"InstanceType":{
"Ref":"InstanceType"
"Ref":"RabbitInstanceType"
},
"BlockDeviceMappings":[
{
......@@ -3221,8 +3320,12 @@
"LaunchConfigurationName":{
"Ref":"RabbitMQServer"
},
"MinSize":"2",
"MaxSize":"2",
"MinSize":{
"Ref":"RabbitMQDesiredCapacity"
},
"MaxSize":{
"Ref":"RabbitMQDesiredCapacity"
},
"DesiredCapacity":{
"Ref":"RabbitMQDesiredCapacity"
},
......@@ -3477,7 +3580,7 @@
"Fn::FindInMap":[
"AWSInstanceType2Arch",
{
"Ref":"InstanceType"
"Ref":"XserverInstanceType"
},
"Arch"
]
......@@ -3520,7 +3623,7 @@
"Ref":"KeyName"
},
"InstanceType":{
"Ref":"InstanceType"
"Ref":"XserverInstanceType"
},
"BlockDeviceMappings":[
{
......@@ -3587,8 +3690,12 @@
"LaunchConfigurationName":{
"Ref":"XServer"
},
"MinSize":"2",
"MaxSize":"2",
"MinSize":{
"Ref":"XServerDesiredCapacity"
},
"MaxSize":{
"Ref":"XServerDesiredCapacity"
},
"DesiredCapacity":{
"Ref":"XServerDesiredCapacity"
},
......@@ -3979,7 +4086,7 @@
"Ref":"KeyName"
},
"InstanceType":{
"Ref":"InstanceType"
"Ref":"WorkerInstanceType"
},
"BlockDeviceMappings":[
{
......@@ -4046,8 +4153,12 @@
"LaunchConfigurationName":{
"Ref":"WorkerServer"
},
"MinSize":"2",
"MaxSize":"2",
"MinSize":{
"Ref":"WorkerDesiredCapacity"
},
"MaxSize":{
"Ref":"WorkerDesiredCapacity"
},
"DesiredCapacity":{
"Ref":"WorkerDesiredCapacity"
}
......@@ -4224,7 +4335,7 @@
"Ref":"KeyName"
},
"InstanceType":{
"Ref":"InstanceType"
"Ref":"ForumInstanceType"
},
"BlockDeviceMappings":[
{
......@@ -4291,8 +4402,12 @@
"LaunchConfigurationName":{
"Ref":"ForumServer"
},
"MinSize":"2",
"MaxSize":"2",
"MinSize":{
"Ref":"ForumDesiredCapacity"
},
"MaxSize":{
"Ref":"ForumDesiredCapacity"
},
"DesiredCapacity":{
"Ref":"ForumDesiredCapacity"
},
......@@ -4595,7 +4710,7 @@
"Ref":"KeyName"
},
"InstanceType":{
"Ref":"InstanceType"
"Ref":"MongoInstanceType"
},
"BlockDeviceMappings":[
{
......@@ -4683,8 +4798,12 @@
"LaunchConfigurationName":{
"Ref":"MongoServer"
},
"MinSize":"3",
"MaxSize":"3",
"MinSize":{
"Ref":"MongoDesiredCapacity"
},
"MaxSize":{
"Ref":"MongoDesiredCapacity"
},
"DesiredCapacity":{
"Ref":"MongoDesiredCapacity"
}
......@@ -4864,52 +4983,6 @@
"Value":{
"Ref":"EdxappServerSecurityGroup"
}
},
"DatabaseConfigurationString":{
"Description":"JDBC connection string for database",
"Value":{
"Fn::Join":[
"",
[
"'DATABASES': {\n",
" 'default': {\n",
" 'ENGINE': 'django.db.backends.mysql',\n",
" 'NAME': '",
{
"Ref":"DBName"
},
"',\n",
" 'USER': '",
{
"Ref":"DBUsername"
},
"',\n",
" 'PASSWORD': '",
{
"Ref":"DBPassword"
},
"',\n",
" 'HOST': '",
{
"Fn::GetAtt":[
"EdxDB",
"Endpoint.Address"
]
},
"',\n",
" 'PORT': '",
{
"Fn::GetAtt":[
"EdxDB",
"Endpoint.Port"
]
},
"'\n",
" }\n",
"}\n"
]
]
}
}
}
}
# Copyright 2013 John Jarvis <john@jarv.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import time
import json
try:
import boto.sqs
from boto.exception import NoAuthHandlerFound
except ImportError:
print "Boto is required for the sqs_notify callback plugin"
raise
class CallbackModule(object):
"""
This Ansible callback plugin sends task events
to SQS.
The following vars must be set in the environment:
ANSIBLE_ENABLE_SQS - enables the callback module
SQS_REGION - AWS region to connect to
SQS_MSG_PREFIX - Additional data that will be put
on the queue (optional)
The following events are put on the queue
- FAILURE events
- OK events
- TASK events
- START events
"""
def __init__(self):
self.start_time = time.time()
if 'ANSIBLE_ENABLE_SQS' in os.environ:
self.enable_sqs = True
if not 'SQS_REGION' in os.environ:
print 'ANSIBLE_ENABLE_SQS enabled but SQS_REGION ' \
'not defined in environment'
sys.exit(1)
self.region = os.environ['SQS_REGION']
try:
self.sqs = boto.sqs.connect_to_region(self.region)
except NoAuthHandlerFound:
print 'ANSIBLE_ENABLE_SQS enabled but cannot connect ' \
'to AWS due invalid credentials'
sys.exit(1)
if not 'SQS_NAME' in os.environ:
print 'ANSIBLE_ENABLE_SQS enabled but SQS_NAME not ' \
'defined in environment'
sys.exit(1)
self.name = os.environ['SQS_NAME']
self.queue = self.sqs.create_queue(self.name)
if 'SQS_MSG_PREFIX' in os.environ:
self.prefix = os.environ['SQS_MSG_PREFIX']
else:
self.prefix = ''
self.last_seen_ts = {}
else:
self.enable_sqs = False
def runner_on_failed(self, host, res, ignore_errors=False):
if self.enable_sqs:
if not ignore_errors:
self._send_queue_message(res, 'FAILURE')
def runner_on_ok(self, host, res):
if self.enable_sqs:
# don't send the setup results
if res['invocation']['module_name'] != "setup":
self._send_queue_message(res, 'OK')
def playbook_on_task_start(self, name, is_conditional):
if self.enable_sqs:
self._send_queue_message(name, 'TASK')
def playbook_on_play_start(self, pattern):
if self.enable_sqs:
self._send_queue_message(pattern, 'START')
def playbook_on_stats(self, stats):
if self.enable_sqs:
d = {}
delta = time.time() - self.start_time
d['delta'] = delta
for s in ['changed', 'failures', 'ok', 'processed', 'skipped']:
d[s] = getattr(stats, s)
self._send_queue_message(d, 'STATS')
def _send_queue_message(self, msg, msg_type):
if self.enable_sqs:
from_start = time.time() - self.start_time
payload = {msg_type: msg}
payload['TS'] = from_start
payload['PREFIX'] = self.prefix
# update the last seen timestamp for
# the message type
self.last_seen_ts[msg_type] = time.time()
if msg_type in ['OK', 'FAILURE']:
# report the delta between the OK/FAILURE and
# last TASK
if 'TASK' in self.last_seen_ts:
from_task = \
self.last_seen_ts[msg_type] - self.last_seen_ts['TASK']
payload['delta'] = from_task
for output in ['stderr', 'stdout']:
if output in payload[msg_type]:
# only keep the last 1000 characters
# of stderr and stdout
if len(payload[msg_type][output]) > 1000:
payload[msg_type][output] = "(clipping) ... " \
+ payload[msg_type][output][-1000:]
self.sqs.send_message(self.queue, json.dumps(payload))
---
# This playbook demonstrates how to use the ansible cloudformation module to launch an AWS CloudFormation stack.
#
# This module requires that the boto python library is installed, and that you have your AWS credentials
# in $HOME/.boto
#The thought here is to bring up a bare infrastructure with CloudFormation, but use ansible to configure it.
#I generally do this in 2 different playbook runs as to allow the ec2.py inventory to be updated.
#This module also uses "complex arguments" which were introduced in ansible 1.1 allowing you to specify the
#Cloudformation template parameters
#This example launches a 3 node AutoScale group, with a security group, and an InstanceProfile with root permissions.
#If a stack does not exist, it will be created. If it does exist and the template file has changed, the stack will be updated.
#If the parameters are different, the stack will also be updated.
#CloudFormation stacks can take awhile to provision, if you are curious about its status, use the AWS
#web console or one of the CloudFormation CLI's.
#Example update -- try first launching the stack with 3 as the ClusterSize. After it is launched, change it to 4
#and run the playbook again.
- name: provision stack
hosts: localhost
connection: local
gather_facts: false
# Launch the cloudformation-example.json template. Register the output.
tasks:
- name: edX configuration
cloudformation: >
stack_name="$name" state=present
region=$region disable_rollback=false
template=../cloudformation_templates/edx-server-multi-instance.json
args:
template_parameters:
KeyName: $key
InstanceType: m1.small
GroupTag: $group
register: stack
- name: show stack outputs
debug: msg="My stack outputs are ${stack.stack_outputs}"
../callback_plugins
\ No newline at end of file
......@@ -2,7 +2,7 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- common
- supervisor
- certs
......@@ -2,6 +2,7 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- gh_users
- common
# Creates a cname for a sandbox ec2 instance
- name: Creates a CNAME
hosts: all
gather_facts: False
tasks:
- name: Add DNS name
route53:
overwrite: yes
command: create
zone: "{{ dns_zone }}"
type: CNAME
ttl: 300
record: "{{ dns_name }}.{{ dns_zone }}"
value: "{{ sandbox }}"
# Creates a single user on a server
# Example: ansible-playbook -i "jarv.m.sandbox.edx.org," ./create_user.yml -e "user=jarv"
- name: Create a single user
hosts: all
sudo: True
gather_facts: False
pre_tasks:
- fail: msg="You must pass a user into this play"
when: not user
- set_fact:
gh_users:
- "{{ user }}"
roles:
- gh_users
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: False
vars_files:
- roles/edxapp/defaults/main.yml
- roles/ora/defaults/main.yml
- roles/xqueue/defaults/main.yml
- roles/xserver/defaults/main.yml
roles:
- common
- role: nginx
nginx_sites:
- cms
- lms
- ora
- xqueue
- xserver
......@@ -2,7 +2,7 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- common
- supervisor
- devpi
......@@ -2,7 +2,7 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- common
- supervisor
- discern
---
# dummy var file
# This file is needed as a fall through
# for vars_files
dummy_var: True
- name: Deploy ansible
hosts: all
sudo: True
gather_facts: True
roles:
- edx_ansible
......@@ -7,7 +7,6 @@
migrate_db: "yes"
openid_workaround: True
roles:
- common
- role: nginx
nginx_sites:
- cms
......@@ -16,7 +15,6 @@
- xqueue
- xserver
- edxlocal
- supervisor
- mongo
- edxapp
- role: demo
......
......@@ -14,8 +14,6 @@
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- datadog
- role: nginx
nginx_sites:
......@@ -26,7 +24,7 @@
EDXAPP_LMS_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'sarina/install-datadog'
edx_platform_version: 'sarina/install-datadog'
- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_worker
sudo: True
vars_files:
......@@ -34,8 +32,6 @@
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- datadog
- role: nginx
nginx_sites:
......@@ -45,14 +41,13 @@
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
edx_platform_commit: 'sarina/install-datadog'
edx_platform_version: 'sarina/install-datadog'
#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_xserver
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/dev/dev2.yml"
# - "{{ secure_dir }}/vars/users.yml"
# roles:
# - common
# - nginx
# - xserver
#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_rabbitmq
......@@ -62,7 +57,6 @@
# - "{{ secure_dir }}/vars/dev/dev2.yml"
# - "{{ secure_dir }}/vars/users.yml"
# roles:
# - common
# - rabbitmq
#- hosts: tag_aws_cloudformation_stack-name_dev2:&tag_group_xqueue
# sudo: True
......@@ -70,6 +64,5 @@
# - "{{ secure_dir }}/vars/dev/dev2.yml"
# - "{{ secure_dir }}/vars/users.yml"
# roles:
# - common
# - nginx
# - xqueue
......@@ -6,8 +6,6 @@
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- datadog
- role: nginx
nginx_sites:
......@@ -16,7 +14,7 @@
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'release'
edx_platform_version: 'release'
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_worker
sudo: True
......@@ -25,8 +23,6 @@
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- datadog
- role: nginx
nginx_sites:
......@@ -36,7 +32,7 @@
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
edx_platform_commit: 'release'
edx_platform_version: 'release'
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xserver
sudo: True
......@@ -44,8 +40,6 @@
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xserver
......@@ -58,8 +52,6 @@
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- rabbitmq
- splunkforwarder
- hosts: tag_aws_cloudformation_stack-name_feanilsandbox:&tag_role_xqueue
......@@ -68,8 +60,6 @@
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- xqueue
......@@ -81,6 +71,5 @@
- "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- role: 'mongo'
mongo_clustered: true
......@@ -4,8 +4,6 @@
sudo: True
gather_facts: False
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- devpi
......
......@@ -6,8 +6,6 @@
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- common
- supervisor
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......@@ -22,8 +20,6 @@
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- common
- supervisor
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......@@ -38,8 +34,6 @@
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- common
- supervisor
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......@@ -54,8 +48,6 @@
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- common
- supervisor
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......@@ -71,8 +63,6 @@
gather_facts: True
vars:
roles:
- common
- supervisor
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......
......@@ -15,6 +15,7 @@
dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}"
terminate_instance: true
instance_profile_name: sandbox
- name: Configure instance(s)
hosts: launched
......@@ -26,9 +27,41 @@
path=/var/log/cloud-init.log
timeout=15
search_regex="final-message"
vars_files:
- roles/edxapp/defaults/main.yml
- roles/ora/defaults/main.yml
- roles/xqueue/defaults/main.yml
- roles/xserver/defaults/main.yml
roles:
# rerun common to set the hostname
# rerun common to set the hostname, nginx to set basic auth
- common
- role: nginx
nginx_sites:
- cms
- lms
- ora
- xqueue
- xserver
# gh_users hash must be passed
# in as a -e variable
- gh_users
post_tasks:
- name: get instance id for elb registration
local_action:
module: ec2_lookup
region: us-east-1
tags:
Name: "{{ name_tag }}"
register: ec2_info
when: elb
sudo: False
- name: register instance into an elb if one was provided
local_action:
module: ec2_elb
region: "{{ region }}"
instance_id: "{{ ec2_info.instance_ids[0] }}"
state: present
ec2_elbs:
- "{{ elb }}"
when: elb
sudo: False
---
# This playbook is to configuration
# the official edX sandbox instance
# sandbox.edx.org
- name: Configure instance(s)
hosts: tag_Name_edx-sandbox
sudo: True
gather_facts: True
vars:
migrate_db: "yes"
mysql5_workaround: True
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxlocal
- mongo
- edxapp
- rabbitmq
- oraclejdk
- elasticsearch
- { role: 'edxapp', celery_worker: True }
- role: rbenv
rbenv_user: "{{ forum_user }}"
rbenv_dir: "{{ forum_home }}"
rbenv_ruby_version: "{{ forum_ruby_version }}"
- forum
......@@ -2,31 +2,29 @@
- hosts: first_in_tag_role_mongo
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/users.yml"
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
roles:
- common
- gh_users
- role: 'mongo'
mongo_create_users: yes
#- hosts: tag_role_mongo:!first_in_tag_role_mongo
# sudo: True
# vars_files:
# - "{{ secure_dir }}/vars/dev/feanilsandbox.yml"
# - "{{ secure_dir }}/vars/users.yml"
# - "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
# - "{{ secure_dir }}/vars/common/common.yml"
# roles:
# - common
# - gh_users
# - mongo
- hosts: first_in_tag_role_edxapp
sudo: True
serial: 1
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- gh_users
- datadog
- supervisor
- role: nginx
nginx_sites:
- lms
......@@ -34,21 +32,18 @@
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
migrate_db: 'yes'
migrate_db: '{{ RUN_EDXAPP_MIGRATION }}'
openid_workaround: 'yes'
edx_platform_commit: 'HEAD'
- splunkforwarder
- hosts: tag_role_edxapp:!first_in_tag_role_edxapp
sudo: True
serial: 1
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- gh_users
- datadog
- supervisor
- role: nginx
nginx_sites:
- lms
......@@ -56,18 +51,15 @@
- lms-preview
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
edx_platform_commit: 'HEAD'
- splunkforwarder
- hosts: tag_role_worker
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- gh_users
- datadog
- supervisor
- role: nginx
nginx_sites:
- lms
......@@ -76,17 +68,14 @@
- role: 'edxapp'
edxapp_lms_env: 'lms.envs.load_test'
celery_worker: True
edx_platform_commit: 'HEAD'
- splunkforwarder
- hosts: tag_role_xserver
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- gh_users
- role: nginx
nginx_sites:
- xserver
......@@ -96,38 +85,32 @@
serial: 1
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- gh_users
- rabbitmq
- splunkforwarder
- hosts: first_in_tag_role_xqueue
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- gh_users
- role: nginx
nginx_sites:
- xqueue
- role: xqueue
migrate_db: 'yes'
migrate_db: '{{ RUN_XQUEUE_MIGRATION }}'
- splunkforwarder
- hosts: tag_role_xqueue:!first_in_tag_role_xqueue
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- gh_users
- role: nginx
nginx_sites:
- xqueue
......@@ -136,12 +119,10 @@
- hosts: tag_role_forum
sudo: True
vars_files:
- "{{ secure_dir }}/vars/dev/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/{{ENVIRONMENT}}/{{CLOUDFORMATION_STACK_NAME}}.yml"
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/users.yml"
roles:
- common
- supervisor
- gh_users
- oraclejdk
- elasticsearch
- forum
......@@ -2,7 +2,7 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- common
- supervisor
- edxapp
......@@ -2,7 +2,7 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- common
- supervisor
- forum
......@@ -9,4 +9,6 @@
vars:
COMMON_DATA_DIR: "/mnt"
roles:
- common
- gh_users
- jenkins_master
......@@ -6,6 +6,8 @@
hosts: jenkins_worker
sudo: True
gather_facts: True
vars:
mongo_enable_journal: False
roles:
- common
- edxlocal
......
# ansible-playbook -i ec2.py --limit="tag_group_grader:&tag_environment_stage" legacy_ora.yml -e "COMMON_ENV_TYPE=stage secure_dir=/path/to/secure/dir"
- name: Deploy legacy_ora
hosts: all
sudo: True
gather_facts: True
vars:
ora_app_dir: '/opt/wwc'
ora_user: 'www-data'
serial: 1
roles:
- legacy_ora
......@@ -5,8 +5,6 @@
- "{{ secure_dir }}/vars/users.yml"
- "{{ secure_dir }}/vars/mlapi_prod_users.yml"
roles:
- common
- supervisor
- discern
sudo: True
- hosts:
......
......@@ -5,8 +5,6 @@
- "{{ secure_dir }}/vars/users.yml"
- "{{ secure_dir }}/vars/mlapi_sandbox_users.yml"
roles:
- common
- supervisor
- discern
sudo: True
- hosts:
......
......@@ -5,8 +5,6 @@
- "{{ secure_dir }}/vars/users.yml"
- "{{ secure_dir }}/vars/mlapi_stage_users.yml"
roles:
- common
- supervisor
- discern
sudo: True
- hosts:
......
......@@ -2,7 +2,7 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- common
- supervisor
- ora
......@@ -2,5 +2,7 @@
hosts: all
sudo: True
gather_facts: False
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- rabbitmq
- name: Deploy worker
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- role: edxapp
celery_worker: True
......@@ -2,8 +2,7 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- common
- supervisor
- role: xqueue
tags: ['xqueue']
......@@ -2,8 +2,7 @@
hosts: all
sudo: True
gather_facts: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- common
- supervisor
- role: xserver
tags: ['xserver']
Readme
------
This directory has the live playbooks that we use here at Stanford to
maintain our instance of OpenEdX at [class.stanford.edu][c]. We check
it in to this public repo since we think that others might benefit from
seeing how we are configured.
[c]: https://class.stanford.edu/
That said, we haven't documented things in here well, so we have no
expectation that others will be able to make enough sense of this to
give us useful contributions back. Generally a PR affecting files in
here will be ignored / rejected.
This README is a useful proximate place to keep commands. But it is
a public repo so we shouldn't store anything confidential in here.
Other install docs:
- Giulio's install doc [here][1].
[1]: https://docs.google.com/document/d/1ZDx51Jxa-zffyeKvHmTp_tIskLW9D9NRg9NytPTbnrA/edit#heading=h.iggugvghbcpf
Ansible Commands - Prod
-----------------------
Generally we do installs as the "ubuntu" user. You want to make
sure that the stanford-deploy-20130415 ssh key is in your ssh agent.
ANSIBLE_EC2_INI=ec2.ini ansible-playbook prod-log.yml -u ubuntu -c ssh -i ./ec2.py
Ansible Commands - Stage
------------------------
Verify that you're doing something reasonable:
ANSIBLE_CONFIG=stage-ansible.cfg ANSIBLE_EC2_INI=ec2.ini ansible-playbook stage-app.yml -u ubuntu -c ssh -i ./ec2.py --list-hosts
Verify that you're doing something reasonable:
ANSIBLE_CONFIG=stage-ansible.cfg ANSIBLE_EC2_INI=ec2.ini ansible-playbook stage-app.yml -u ubuntu -c ssh -i ./ec2.py
- hosts: tag_Name_log10_prod
sudo: True
vars_files:
- "{{ secure_dir }}/vars/users.yml"
vars:
secure_dir: '../../../configuration-secure/ansible'
local_dir: '../../../configuration-secure/ansible/local'
roles:
- common
- name: Configure stage instance(s)
hosts: notifier_stage
sudo: True
vars_files:
- "{{ secure_dir }}/vars/stage/notifier.yml"
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- common
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
virtualenv_name: "notifier"
- notifier
- name: Configure loadtest instance(s)
hosts: notifier_loadtest
sudo: True
vars_files:
- "{{ secure_dir }}/vars/loadtest/notifier.yml"
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- common
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
virtualenv_name: "notifier"
- notifier
- name: Configure stage edge instance(s)
hosts: notifier_edge_stage
sudo: True
vars_files:
- "{{ secure_dir }}/vars/edge_stage/notifier.yml"
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- common
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
virtualenv_name: "notifier"
- notifier
- name: Configure prod instance(s)
hosts: notifier_prod
sudo: True
vars_files:
- "{{ secure_dir }}/vars/prod/notifier.yml"
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- common
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
virtualenv_name: "notifier"
- notifier
- name: Configure edge prod instance(s)
hosts: notifier_edge_prod
sudo: True
vars_files:
- "{{ secure_dir }}/vars/edge_prod/notifier.yml"
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
vars:
roles:
- common
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
virtualenv_name: "notifier"
- notifier
- name: Create sandbox instance
hosts: localhost
connection: local
gather_facts: False
vars:
keypair: continuous-integration
instance_type: m1.small
security_group: sandbox
image: ami-d0f89fb9
region: us-east-1
instance_tags: '{"disposable": "true"}'
roles:
- launch_instance
- name: Configure instance(s)
hosts: launched
sudo: True
gather_facts: True
vars:
migrate_db: "yes"
openid_workaround: True
ansible_ssh_private_key_file: /var/lib/jenkins/continuous-integration.pem
vars_files:
- "{{ secure_dir }}/vars/edxapp_ref_users.yml"
- "{{ secure_dir }}/vars/edxapp_sandbox.yml"
- "{{ secure_dir }}/vars/edx_jenkins_tests.yml"
roles:
- common
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- xserver
- xqueue
- edxlocal
- mongo
- edxapp
- xqueue
- xserver
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
local_action:
module: ec2
state: 'absent'
instance_ids: ${ec2.instance_ids}
# This playbook is to configure
# the official edX sandbox instance
# sandbox.edx.org
#
# On the machine you want to configure run the following
# command from the configuration/playbooks directory:
# ansible-playbook -c local --limit "localhost:127.0.0.1" /path/to/configuration/playbooks/edx_sandbox.yml -i "localhost,"
#
# To use different default ports for lms-preview, cms and to set the lms_base and lms_preview_base,
# for the following configuration:
# studio listening on port 80 - studio.example.com
# lms listening on port 80 - example.com
# lms-preview listening on port 80 - preview.example.com
#
# ansible-playbook -c local --limit "localhost:127.0.0.1" path/to/configuration/playbooks/edx_sandbox.yml -i "localhost," -e "EDXAPP_CMS_NGINX_PORT=80 EDXAPP_LMS_PREVIEW_NGINX_PORT=80 EDXAPP_LMS_BASE=example.com EDXAPP_PREVIEW_LMS_BASE=preview.example.com"
#
---
# Example sandbox configuration
# for single server community
# installs
- name: Configure instance(s)
hosts: localhost
hosts: all
sudo: True
gather_facts: True
vars:
migrate_db: "yes"
openid_workaround: True
EDXAPP_LMS_NGINX_PORT: '80'
edx_platform_version: 'master'
roles:
- common
- supervisor
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- xqueue
- lms
- ora
- xqueue
- edxlocal
- mongo
- edxapp
- demo
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- { role: 'edxapp', celery_worker: True }
- oraclejdk
......@@ -41,3 +31,5 @@
- forum
- { role: "xqueue", update_users: True }
- ora
- discern
- edx_ansible
- hosts: tag_Group_edxapp_ref
sudo: True
vars_files:
- "{{ secure_dir }}/vars/edxapp_ref_vars.yml"
- "{{ secure_dir }}/vars/edxapp_ref_users.yml"
roles:
- common
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- gunicorn
- edxapp
- ruby
- npm
# run this role last
- in_production
# ansible-playbook -v --user=ubuntu edxapp_rolling_example.yml -i ./ec2.py --private-key=/path/to/deployment.pem
- hosts: tag_Group_anothermulti
serial: 2
vars_files:
- "{{ secure_dir }}/vars/edxapp_stage_vars.yml"
- "{{ secure_dir }}/vars/users.yml"
pre_tasks:
- name: Gathering ec2 facts
ec2_facts:
- name: Removing instance from the ELB
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent'
roles:
- common
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxapp
- ruby
post_tasks:
- name: Adding instance back to the ELB
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ ec2_elbs }}"
state: 'present'
[jenkins_test]
jenkins-test.sandbox.edx.org
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vpc_lookup
short_description: returns a list of subnet Ids using tags as criteria
description:
- Returns a list of subnet Ids for a given set of tags that identify one or more VPCs
version_added: "1.5"
options:
region:
description:
- The AWS region to use. Must be specified if ec2_url
is not used. If not specified then the value of the
EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of
the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the
AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
tags:
desription:
- tags to lookup
required: false
default: null
type: dict
aliases: []
requirements: [ "boto" ]
author: John Jarvis
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Return all instances that match the tag "Name: foo"
- local_action:
module: vpc_lookup
tags:
Name: foo
'''
import sys
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
try:
from boto.vpc import VPCConnection
from boto.vpc import connect_to_region
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
module=AnsibleModule(
argument_spec=dict(
region=dict(choices=AWS_REGIONS),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'],
no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
tags=dict(default=None, type='dict'),
)
)
tags = module.params.get('tags')
aws_secret_key = module.params.get('aws_secret_key')
aws_access_key = module.params.get('aws_access_key')
region = module.params.get('region')
# If we have a region specified, connect to its endpoint.
if region:
try:
vpc = connect_to_region(region, aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
vpc_conn = VPCConnection()
subnet_ids = []
for subnet in vpc_conn.get_all_subnets(filters={'tag:' + tag: value
for tag, value in tags.iteritems()}):
subnet_ids.append(subnet.id)
vpc_ids = []
for vpc in vpc.get_all_vpcs(filters={'tag:' + tag: value
for tag, value in tags.iteritems()}):
vpc_ids.append(vpc.id)
module.exit_json(changed=False, subnet_ids=subnet_ids, vpc_ids=vpc_ids)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
---
dependencies:
- {
role: automated,
......
......@@ -15,7 +15,7 @@
#
- name: certs | restart certs
supervisorctl: >
supervisorctl_local: >
name=certs
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
......
dependencies:
- supervisor
......@@ -68,7 +68,7 @@
changed_when: supervisor_update.stdout != ""
- name: certs | ensure certs has started
supervisorctl: >
supervisorctl_local: >
name=certs
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
......
......@@ -57,7 +57,7 @@
- name: certs | create certs gpg dir
file: >
path="{{ certs_gpg_dir }}" state=directory
owner="{{ certs_user }}" group="{{ certs_user }}"
owner="{{ common_web_user }}"
mode=0700
notify: certs | restart certs
......@@ -65,7 +65,7 @@
copy: >
src={{ CERTS_LOCAL_PRIVATE_KEY }}
dest={{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
owner={{ certs_user }} mode=0600
owner={{ common_web_user }} mode=0600
notify: certs | restart certs
register: certs_gpg_key
......@@ -73,7 +73,7 @@
- name: certs | load the gpg key
shell: >
/usr/bin/gpg --homedir {{ certs_gpg_dir }} --import {{ certs_app_dir }}/{{ CERTS_LOCAL_PRIVATE_KEY|basename }}
sudo_user: "{{ certs_user }}"
sudo_user: "{{ common_web_user }}"
when: certs_gpg_key.changed
notify: certs | restart certs
......
......@@ -22,6 +22,7 @@ COMMON_GIT_MIRROR: 'github.com'
COMMON_HOSTNAME: !!null
common_debian_pkgs:
- ntp
- ack-grep
- lynx-cur
- logrotate
......@@ -44,3 +45,7 @@ common_web_group: www-data
common_log_user: syslog
common_git_ppa: "ppa:git-core/ppa"
# Skip supervisor tasks
# Useful when supervisor is not installed (local dev)
devstack: False
......@@ -15,11 +15,6 @@
- "{{ COMMON_BIN_DIR }}"
- "{{ COMMON_CFG_DIR }}"
- name: common | Create common log directory
file: >
path={{ COMMON_LOG_DIR }} state=directory owner=syslog
group=syslog mode=0755
# Need to install python-pycurl to use Ansible's apt_repository module
- name: common | Install python-pycurl
apt: pkg=python-pycurl state=present update_cache=yes
......@@ -36,6 +31,11 @@
pkg={{','.join(common_debian_pkgs)}} install_recommends=yes
state=present update_cache=yes
- name: common | Create common log directory
file: >
path={{ COMMON_LOG_DIR }} state=directory owner=syslog
group=syslog mode=0755
- name: common | upload sudo config for key forwarding as root
copy: >
src=ssh_key_forward dest=/etc/sudoers.d/ssh_key_forward
......
......@@ -15,50 +15,43 @@
# - datadog
#
- name: datadog | add apt key
apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present
- name: datadog | install debian needed pkgs
apt: pkg={{ item }}
with_items: datadog_debian_pkgs
tags:
- datadog
- ubuntu
when: ansible_distribution in common_debian_variants
- name: datadog | install apt repository
shell: echo 'deb http://apt.datadoghq.com/ unstable main' > /etc/apt/sources.list.d/datadog-source.list
- name: datadog | add apt key
apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present
tags:
- datadog
- ubuntu
when: ansible_distribution in common_debian_variants
- name: datadog | add yum repo
copy:
src=etc/yum.repo.d/datdog.repo
dest=/etc/yum.repo.d/datdog.repo
- name: datadog | install apt repository
apt_repository: repo='deb http://apt.datadoghq.com/ unstable main' update_cache=yes
tags:
- datadog
- redhat
when_string: ansible_distribution in common_redhat_variants
- name: datadog | install datadog agent
apt: pkg="datadog-agent" update_cache=yes
apt: pkg="datadog-agent"
tags:
- datadog
- ubuntu
when: ansible_distribution in common_debian_variants
- name: datadog | bootstrap config
shell: cp /etc/dd-agent/datadog.conf.example /etc/dd-agent/datadog.conf creates=/etc/dd-agent/datadog.conf
tags:
- datadog
# quoting intentional, missing space after line=api_key: also
# ansible wasn't handling the double quoted yaml properly
# otherwise.
- name: datadog | update api-key
lineinfile:
lineinfile: >
dest="/etc/dd-agent/datadog.conf"
"regexp=^api_key:.*"
"line=api_key:{{ common_dd_api_key }}"
regexp="^api_key:.*"
line="api_key:{{ datadog_api_key }}"
notify:
- datadog | restart the datadog service
tags:
- datadog
- name: datadog | ensure started and enabled
service: name=datadog-agent state=started enabled=yes
tags:
- datadog
......@@ -39,3 +39,11 @@
when: demo_checkout.changed
tags: deploy
- name: demo | seed the forums for the demo course
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
with_items: demo_test_users
when: demo_checkout.changed
tags: deploy
......@@ -12,7 +12,7 @@
#
---
- name: devpi | restart devpi
supervisorctl: >
supervisorctl_local: >
state=restarted
supervisorctl_path={{ devpi_supervisor_ctl }}
config={{ devpi_supervisor_cfg }}
......
......@@ -107,7 +107,7 @@
tags: deploy
- name: devpi | ensure devpi is started
supervisorctl: >
supervisorctl_local: >
state=started
supervisorctl_path={{ devpi_supervisor_ctl }}
config={{ devpi_supervisor_cfg }}
......
DISCERN_NGINX_PORT: 18070
DISCERN_BASIC_AUTH: False
DISCERN_MEMCACHE: [ 'localhost:11211' ]
DISCERN_AWS_ACCESS_KEY_ID: ""
DISCERN_AWS_SECRET_ACCESS_KEY: ""
......@@ -32,10 +31,12 @@ discern_ease_pre_requirements_file: "{{ discern_ease_code_dir }}/pre-requirement
discern_ease_post_requirements_file: "{{ discern_ease_code_dir }}/requirements.txt"
discern_nltk_data_dir: "{{ discern_data_dir}}/nltk_data"
discern_nltk_download_url: http://edx-static.s3.amazonaws.com/nltk/nltk-data-20131113.tar.gz
discern_nltk_tmp_file: "{{ discern_data_dir }}/nltk.tmp.tar.tz"
discern_source_repo: https://github.com/edx/discern.git
discern_settings: discern.aws
discern_branch: master
discern_version: master
discern_gunicorn_port: 8070
discern_gunicorn_host: 127.0.0.1
......
---
- name: discern | restart discern
supervisorctl: >
supervisorctl_local: >
name=discern
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
......
---
dependencies:
- supervisor
......@@ -25,7 +25,7 @@
- deploy
- name: discern | git checkout discern repo into discern_code_dir
git: dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_branch }}
git: dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_version }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
......@@ -41,7 +41,7 @@
- deploy
#Numpy has to be a pre-requirement in order for scipy to build
- name : install python pre-requirements for discern and ease
- name : discern | install python pre-requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
sudo_user: "{{ discern_user }}"
notify:
......@@ -52,7 +52,7 @@
tags:
- deploy
- name : install python requirements for discern and ease
- name : discern | install python requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
sudo_user: "{{ discern_user }}"
notify:
......@@ -71,16 +71,22 @@
tags:
- deploy
#Needed for the ease package to work
- name: discern | install nltk data using rendered shell script
shell: >
{{ discern_venv_dir }}/bin/python -m nltk.downloader -d {{ discern_nltk_data_dir }} all
- name: discern | download and install nltk
shell: |
set -e
curl -o {{ discern_nltk_tmp_file }} {{ discern_nltk_download_url }}
tar zxf {{ discern_nltk_tmp_file }}
rm -f {{ discern_nltk_tmp_file }}
touch {{ discern_nltk_download_url|basename }}-installed
creates={{ discern_data_dir }}/{{ discern_nltk_download_url|basename }}-installed
chdir={{ discern_data_dir }}
sudo_user: "{{ discern_user }}"
notify:
- discern | restart discern
tags:
- deploy
#Run this instead of using the ansible module because the ansible module only support syncdb of these three, and does not
#support virtualenvs as of this comment
- name: discern | django syncdb migrate and collectstatic for discern
......@@ -121,7 +127,7 @@
tags: deploy
- name: discern | ensure discern, discern_celery has started
supervisorctl: >
supervisorctl_local: >
name={{ item }}
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role edx_ansible, an edx_ansible role to install edx_ansible
#
#
# OS packages
#
edx_ansible_debian_pkgs:
- python-pip
- python-apt
- git-core
- build-essential
- python-dev
- libxml2-dev
- libxslt1-dev
- curl
edx_ansible_app_dir: "{{ COMMON_APP_DIR }}/edx_ansible"
edx_ansible_code_dir: "{{ edx_ansible_app_dir }}/edx_ansible"
edx_ansible_data_dir: "{{ COMMON_DATA_DIR }}/edx_ansible"
edx_ansible_venvs_dir: "{{ edx_ansible_app_dir }}/venvs"
edx_ansible_venv_dir: "{{ edx_ansible_venvs_dir }}/edx_ansible"
edx_ansible_venv_bin: "{{ edx_ansible_venv_dir }}/bin"
edx_ansible_user: "edx-ansible"
edx_ansible_source_repo: https://github.com/edx/configuration.git
edx_ansible_requirements_file: "{{ edx_ansible_code_dir }}/requirements.txt"
# edX configuration repo
configuration_version: master
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role edx_ansible
dependencies:
- supervisor
---
- name: edx_ansible | git checkout edx_ansible repo into edx_ansible_code_dir
git: dest={{ edx_ansible_code_dir }} repo={{ edx_ansible_source_repo }} version={{ configuration_version }}
sudo_user: "{{ edx_ansible_user }}"
tags: deploy
- name : edx_ansible | install edx_ansible venv requirements
pip: requirements="{{ edx_ansible_requirements_file }}" virtualenv="{{ edx_ansible_venv_dir }}" state=present
sudo_user: "{{ edx_ansible_user }}"
tags: deploy
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
# Tasks for role edx_ansible
#
# Overview:
#
# This is an edx_ansible role that installs edx_ansible :)
# The purpose is to install edx_ansible on a server so
# that it can be updated locally.
#
# This role will also drop some helper scripts that
# for running edx_ansible tasks
#
# Example play:
#
#
#
- name: edx_ansible | create application user
user: >
name="{{ edx_ansible_user }}"
home="{{ edx_ansible_app_dir }}"
createhome=no
shell=/bin/false
- name: edx_ansible | create edx_ansible app and venv dir
file: >
path="{{ item }}"
state=directory
owner="{{ edx_ansible_user }}"
group="{{ common_web_group }}"
with_items:
- "{{ edx_ansible_app_dir }}"
- "{{ edx_ansible_venvs_dir }}"
- name: edx_ansible | install a bunch of system packages on which edx_ansible relies
apt: pkg={{','.join(edx_ansible_debian_pkgs)}} state=present
- include: deploy.yml
- name: edx_ansible | create update script
template: >
dest={{ edx_ansible_app_dir}}/update
src=update.j2 owner={{ edx_ansible_user }} group={{ edx_ansible_user }} mode=755
- name: edxapp | create a symlink for update.sh
file: >
src={{ edx_ansible_app_dir }}/update
dest={{ COMMON_BIN_DIR }}/update
state=link
#!/usr/bin/env bash
# This script runs edx_ansible locally
set -e
usage() {
SAVE_IFS=$IFS
IFS=","
cat<<EO
Usage: $PROG <repo> <version>
-v add verbosity to edx_ansible run
-h this
<repo> - must be one of [${!repos_to_cmd[*]}]
<version> - can be a commit or tag
EO
IFS=$SAVE_IFS
}
declare -A repos_to_cmd
edx_ansible_cmd="{{ edx_ansible_venv_bin}}/ansible-playbook -i localhost, -c local --tags deploy"
repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'"
repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'"
repos_to_cmd["forums"]="$edx_ansible_cmd forums.yml -e 'forum_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd forums.yml -e 'xserver_version=$2'"
repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'"
repos_to_cmd["discern"]="$edx_ansible_cmd discern.yml -e 'discern_version=$2'"
repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
PROG=${0##*/}
while getopts "vh" opt; do
case $opt in
v)
verbose="-vvvv"
shift
;;
h)
usage
exit 0
;;
esac
done
if [[ -z $1 || -z $2 ]]; then
echo
echo "ERROR: You must specify a repo and commit"
usage
exit 1
fi
if [[ -z ${repos_to_cmd[$1]} ]]; then
echo
echo "ERROR: Invalid repo name"
usage
exit 1
fi
cd {{ edx_ansible_code_dir }}/playbooks/edx-east
eval "sudo ${repos_to_cmd["$1"]} $verbose"
......@@ -45,8 +45,6 @@ EDXAPP_COMMENTS_SERVICE_KEY: 'password'
EDXAPP_EDXAPP_SECRET_KEY: ''
EDXAPP_PEARSON_TEST_PASWORD: ''
EDXAPP_OEE_URL: 'http://localhost:18060/'
EDXAPP_OEE_USER: 'lms'
EDXAPP_OEE_PASSWORD: 'password'
......@@ -59,15 +57,15 @@ EDXAPP_CELERY_PASSWORD: 'celery'
EDXAPP_PLATFORM_NAME: 'edX'
EDXAPP_MITX_FEATURES:
EDXAPP_FEATURES:
AUTH_USE_OPENID_PROVIDER: true
CERTIFICATES_ENABLED: true
ENABLE_DISCUSSION_SERVICE: true
ENABLE_INSTRUCTOR_ANALYTICS: true
ENABLE_PEARSON_HACK_TEST: false
SUBDOMAIN_BRANDING: false
SUBDOMAIN_COURSE_LISTINGS: false
PREVIEW_LMS_BASE: $EDXAPP_PREVIEW_LMS_BASE
ENABLE_S3_GRADE_DOWNLOADS: true
EDXAPP_BOOK_URL: ''
# This needs to be set to localhost
......@@ -88,24 +86,32 @@ EDXAPP_LMS_NGINX_PORT: 18000
EDXAPP_LMS_PREVIEW_NGINX_PORT: 18020
EDXAPP_CMS_NGINX_PORT: 18010
EDXAPP_LMS_BASIC_AUTH: False
EDXAPP_CMS_BASIC_AUTH: False
EDXAPP_LMS_PREVIEW_BASIC_AUTH: False
EDXAPP_LANG: 'en_US.UTF-8'
EDXAPP_TIME_ZONE: 'America/New_York'
EDXAPP_TECH_SUPPORT_EMAIL: ''
EDXAPP_CONTACT_EMAIL: ''
EDXAPP_BUGS_EMAIL: ''
EDXAPP_DEFAULT_FROM_EMAIL: ''
EDXAPP_DEFAULT_FEEDBACK_EMAIL: ''
EDXAPP_SERVER_EMAIL: ''
EDXAPP_BULK_EMAIL_DEFAULT_FROM_EMAIL: ''
EDXAPP_TECH_SUPPORT_EMAIL: 'technical@example.com'
EDXAPP_CONTACT_EMAIL: 'info@example.com'
EDXAPP_BUGS_EMAIL: 'bugs@example.com'
EDXAPP_DEFAULT_FROM_EMAIL: 'registration@example.com'
EDXAPP_DEFAULT_FEEDBACK_EMAIL: 'feedback@example.com'
EDXAPP_DEFAULT_SERVER_EMAIL: 'devops@example.com'
EDXAPP_BULK_EMAIL_DEFAULT_FROM_EMAIL: 'no-reply@example.com'
EDXAPP_ENV_EXTRA: {}
EDXAPP_AUTH_EXTRA: {}
EDXAPP_MKTG_URL_LINK_MAP: {}
# Set this sets the url for static files
# Override this var to use a CDN
# Example: xxxxx.cloudfront.net/static/
EDXAPP_STATIC_URL_BASE: "/static/"
# Settings for Grade downloads
EDXAPP_GRADE_STORAGE_TYPE: 'localfs'
EDXAPP_GRADE_BUCKET: 'edx-grades'
EDXAPP_GRADE_ROOT_PATH: '/tmp/edx-s3/grades'
# Configure rake tasks in edx-platform to skip Python/Ruby/Node installation
EDXAPP_NO_PREREQ_INSTALL: 1
#-------- Everything below this line is internal to the role ------------
......@@ -181,7 +187,7 @@ edxapp_all_req_files:
edxapp_environment:
LANG: $EDXAPP_LANG
NO_PREREQ_INSTALL: 1
NO_PREREQ_INSTALL: $EDXAPP_NO_PREREQ_INSTALL
SKIP_WS_MIGRATIONS: 1
RBENV_ROOT: $edxapp_rbenv_root
GEM_HOME: $edxapp_gem_root
......@@ -228,7 +234,7 @@ edxapp_generic_auth_config: &edxapp_generic_auth
host: $EDXAPP_MONGO_HOSTS
password: $EDXAPP_MONGO_PASSWORD
port: $EDXAPP_MONGO_PORT
render_template: 'mitxmako.shortcuts.render_to_string'
render_template: 'edxmako.shortcuts.render_to_string'
# Needed for the CMS to be able to run update_templates
user: $EDXAPP_MONGO_USER
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
......@@ -244,7 +250,6 @@ edxapp_generic_auth_config: &edxapp_generic_auth
PASSWORD: $EDXAPP_MYSQL_PASSWORD
HOST: $EDXAPP_MYSQL_HOST
PORT: $EDXAPP_MYSQL_PORT
PEARSON_TEST_PASSWORD: $EDXAPP_PEARSON_TEST_PASSWORD
OPEN_ENDED_GRADING_INTERFACE:
url: $EDXAPP_OEE_URL
password: $EDXAPP_OEE_PASSWORD
......@@ -259,6 +264,11 @@ edxapp_generic_auth_config: &edxapp_generic_auth
CELERY_BROKER_PASSWORD: $EDXAPP_CELERY_PASSWORD
generic_env_config: &edxapp_generic_env
GRADES_DOWNLOAD:
STORAGE_TYPE: $EDXAPP_GRADE_STORAGE_TYPE
BUCKET: $EDXAPP_GRADE_BUCKET
ROOT_PATH: $EDXAPP_GRADE_ROOT_PATH
STATIC_URL_BASE: $EDXAPP_STATIC_URL_BASE
STATIC_ROOT_BASE: $edxapp_staticfile_dir
LMS_BASE: $EDXAPP_LMS_BASE
CMS_BASE: $EDXAPP_CMS_BASE
......@@ -268,7 +278,7 @@ generic_env_config: &edxapp_generic_env
LOCAL_LOGLEVEL: $EDXAPP_LOG_LEVEL
# default email backed set to local SMTP
EMAIL_BACKEND: $EDXAPP_EMAIL_BACKEND
MITX_FEATURES: $EDXAPP_MITX_FEATURES
FEATURES: $EDXAPP_FEATURES
WIKI_ENABLED: true
SYSLOG_SERVER: $EDXAPP_SYSLOG_SERVER
SITE_NAME: $EDXAPP_SITE_NAME
......@@ -278,6 +288,8 @@ generic_env_config: &edxapp_generic_env
FEEDBACK_SUBMISSION_EMAIL: $EDXAPP_FEEDBACK_SUBMISSION_EMAIL
TIME_ZONE: $EDXAPP_TIME_ZONE
MKTG_URL_LINK_MAP: $EDXAPP_MKTG_URL_LINK_MAP
# repo root for courses
GITHUB_REPO_ROOT: $edxapp_course_data_dir
CACHES:
default: &default_generic_cache
BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
......@@ -336,7 +348,7 @@ lms_auth_config:
host: $EDXAPP_MONGO_HOSTS
db: $EDXAPP_MONGO_DB_NAME
collection: 'modulestore'
render_template: 'mitxmako.shortcuts.render_to_string'
render_template: 'edxmako.shortcuts.render_to_string'
user: $EDXAPP_MONGO_USER
password: $EDXAPP_MONGO_PASSWORD
port: $EDXAPP_MONGO_PORT
......@@ -401,9 +413,9 @@ edxapp_theme_version: 'HEAD'
# make this the public URL instead of writable
edx_platform_repo: "https://{{ COMMON_GIT_MIRROR }}/edx/edx-platform.git"
# `edx_platform_commit` can be anything that git recognizes as a commit
# `edx_platform_version` can be anything that git recognizes as a commit
# reference, including a tag, a branch name, or a commit hash
edx_platform_commit: 'release'
edx_platform_version: 'release'
local_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/local.txt"
pre_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/pre.txt"
post_requirements_file: "{{ edxapp_code_dir }}/requirements/edx/post.txt"
......@@ -446,6 +458,8 @@ edxapp_debian_pkgs:
- ntp
# for shapely
- libgeos-dev
# i18n
- gettext
# Ruby Specific Vars
edxapp_ruby_version: "1.9.3-p374"
......@@ -457,6 +471,3 @@ edxapp_cms_variant: cms
# Worker Settings
worker_django_settings_module: 'aws'
# Skip supervisor tasks
# Useful when supervisor is not installed (local dev)
devstack: False
---
- name: edxapp | restart edxapp
supervisorctl: >
supervisorctl_local: >
state=restarted
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name="edxapp:{{ item }}"
when: not devstack
when: celery_worker is not defined and not devstack
sudo_user: "{{ supervisor_service_user }}"
with_items: service_variants_enabled
tags: deploy
- name: edxapp | restart edxapp_workers
supervisorctl: >
supervisorctl_local: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
......
---
dependencies:
- supervisor
- role: rbenv
rbenv_user: "{{ edxapp_user }}"
rbenv_dir: "{{ edxapp_app_dir }}"
......
- name: edxapp | setup the edxapp env
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
template: >
src=edxapp_env.j2 dest={{ edxapp_app_dir }}/edxapp_env
owner={{ edxapp_user }} group={{ common_web_user }}
mode=0644
tags: deploy
# Do A Checkout
- name: edxapp | checkout edx-platform repo into {{edxapp_code_dir}}
git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_commit}}
git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}}
register: chkout
sudo_user: "{{ edxapp_user }}"
notify:
......@@ -201,8 +211,7 @@
# https://code.launchpad.net/~wligtenberg/django-openid-auth/mysql_fix/+merge/22726
# This is necessary for when syncdb is run and the django_openid_auth module is installed,
# not sure if this fix will ever get merged
# We should never do this in production
- name: edxapp | openid workaround - NOT FOR PRODUCTION
- name: edxapp | openid workaround
shell: sed -i -e 's/claimed_id = models.TextField(max_length=2047, unique=True/claimed_id = models.TextField(max_length=2047/' {{ edxapp_venv_dir }}/lib/python2.7/site-packages/django_openid_auth/models.py
when: openid_workaround is defined
sudo_user: "{{ edxapp_user }}"
......@@ -231,18 +240,18 @@
tags: deploy
- name: edxapp | ensure edxapp has started
supervisorctl: >
supervisorctl_local: >
state=started
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
name="edxapp:{{ item }}"
sudo_user: "{{ supervisor_service_user }}"
when: not devstack
when: celery_worker is not defined and not devstack
with_items: service_variants_enabled
tags: deploy
- name: edxapp | ensure edxapp_workers has started
supervisorctl: >
supervisorctl_local: >
name="edxapp_worker:{{ item.service_variant }}_{{ item.queue }}_{{ item.concurrency }}"
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
......
......@@ -68,15 +68,6 @@
mode=0750
with_items: service_variants_enabled
- name: edxapp | setup the edxapp env
notify:
- "edxapp | restart edxapp"
- "edxapp | restart edxapp_workers"
template: >
src=edxapp_env.j2 dest={{ edxapp_app_dir }}/edxapp_env
owner={{ edxapp_user }} group={{ common_web_user }}
mode=0644
- include: deploy.yml
- name: edxapp | create a symlink for venv python
......
......@@ -63,7 +63,7 @@
executable=/bin/bash
chdir={{ edxapp_code_dir }}
sudo_user: "{{ edxapp_user }}"
when: celery_worker is not defined
when: celery_worker is not defined and not devstack and item != "lms-preview"
with_items: service_variants_enabled
notify:
- "edxapp | restart edxapp"
......
# gunicorn
description "gunicorn server"
author "Calen Pennington <cpennington@mitx.mit.edu>"
start on started edxapp
stop on stopped edxapp
respawn
respawn limit 3 30
env PID=/var/tmp/lms.pid
[program:lms-preview]
{% if ansible_processor|length > 0 %}
env WORKERS={{ ansible_processor|length * worker_core_mult.lms_preview }}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_preview_gunicorn_host }}:{{ edxapp_lms_preview_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms_preview }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% else %}
env WORKERS={{ worker_core_mult.lms_preview }}
command={{ edxapp_venv_dir }}/bin/gunicorn --preload -b {{ edxapp_lms_preview_gunicorn_host }}:{{ edxapp_lms_preview_gunicorn_port }} -w {{ worker_core_mult.lms_preview }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
{% endif %}
env PORT={{edxapp_lms_preview_gunicorn_port}}
env ADDRESS={{edxapp_lms_preview_gunicorn_host}}
env LANG=en_US.UTF-8
env DJANGO_SETTINGS_MODULE=lms.envs.aws
env SERVICE_VARIANT="lms-preview"
chdir {{edxapp_code_dir}}
setuid www-data
exec {{edxapp_venv_dir}}/bin/gunicorn --preload -b $ADDRESS:$PORT -w $WORKERS --timeout=300 --pythonpath={{edxapp_code_dir}} lms.wsgi
post-start script
while true
do
if $(curl -s -i localhost:$PORT/heartbeat | egrep -q '200 OK'); then
break;
else
sleep 1;
fi
done
end script
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment=PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms-preview"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
......@@ -57,7 +57,3 @@ forum_services:
- {service: "mongo", host: "{{ FORUM_MONGO_HOST }}", port: "28017"}
- {service: "elasticsearch", host: "{{ forum_elasticsearch_host }}", port: "9200"}
- {service: "elasticsearch", host: "{{ forum_elasticsearch_host }}", port: "9300"}
# Skip supervisor tasks
# Used in local dev where supervisor isn't installed
devstack: False
---
- name: forum | restart the forum service
supervisorctl: >
supervisorctl_local: >
name=forum
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
......
---
dependencies:
- supervisor
- role: rbenv
# TODO: setting the rbenv ownership to
# the common_web_user is a workaround
......
......@@ -49,7 +49,7 @@
tags: deploy
- name: forum | ensure forum is started
supervisorctl: >
supervisorctl_local: >
name=forum
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
......
---
dependencies:
- supervisor
gluster_primary_ip: 127.0.0.1
gluster_peers:
gluster_volumes:
- path: /mnt/gfsv0
name: gfsv0
replicas: 2
cache_size: 128MB
security: "*"
mount_location: /mnt/data
---
# Install and configure simple glusterFS shared storage
- name: gluster | all | Install common packages
apt: name={{ item }} state=present
with_items:
- glusterfs-client
- glusterfs-common
- nfs-common
tags: gluster
- name: gluster | all | Install server packages
apt: name=glusterfs-server state=present
when: >
"{{ ansible_default_ipv4.address }}" "{{ gluster_peers|join(' ') }}"
tags: gluster
- name: gluster | all | enable server
service: name=glusterfs-server state=started enabled=yes
when: >
"{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}"
tags: gluster
# Ignoring error below so that we can move the data folder and have it be a link
- name: gluster | all | create folders
file: path={{ item.path }} state=directory
with_items: gluster_volumes
when: >
"{{ ansible_default_ipv4.address }}" in "{{ gluster_peers|join(' ') }}"
ignore_errors: yes
tags: gluster
- name: gluster | primary | create peers
command: gluster peer probe {{ item }}
with_items: gluster_peers
when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster
- name: gluster | primary | create volumes
command: gluster volume create {{ item.name }} replica {{ item.replicas }} transport tcp {% for server in gluster_peers %}{{ server }}:{{ item.path }} {% endfor %}
with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here
tags: gluster
- name: gluster | primary | start volumes
command: gluster volume start {{ item.name }}
with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip
ignore_errors: yes # There should be better error checking here
tags: gluster
- name: gluster | primary | set security
command: gluster volume set {{ item.name }} auth.allow {{ item.security }}
with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster
- name: gluster | primary | set performance cache
command: gluster volume set {{ item.name }} performance.cache-size {{ item.cache_size }}
with_items: gluster_volumes
when: ansible_default_ipv4.address == gluster_primary_ip
tags: gluster
- name: gluster | all | mount volume
mount: >
name={{ item.mount_location }}
src={{ gluster_primary_ip }}:{{ item.name }}
fstype=glusterfs
state=mounted
opts=defaults,_netdev
with_items: gluster_volumes
tags: gluster
# This required due to an annoying bug in Ubuntu and gluster where it tries to mount the system
# before the network stack is up and can't lookup 127.0.0.1
- name: gluster | all | sleep mount
lineinfile: >
dest=/etc/rc.local
line='sleep 5; /bin/mount -a'
regexp='sleep 5; /bin/mount -a'
insertbefore='exit 0'
tags: gluster
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role haproxy
#
#
# vars are namespace with the module name.
#
haproxy_role_name: haproxy
#
# OS packages
#
haproxy_debian_pkgs: []
haproxy_redhat_pkgs: []
pkgs:
haproxy:
state: installed
haproxy_template_dir: .
haproxy_extra_global_config: ''
haproxy_default_config: |
log global
mode http
option httplog
option dontlognull
option redispatch
retries 3
maxconn 2000
contimeout 5000
clitimeout 50000
srvtimeout 50000
# Sample rabbitmq load balance config
# but this should likely get overidden with your
# desired applications
haproxy_applications:
- |
listen rabbitmq 127.0.0.1:5672
mode tcp
balance roundrobin
option tcplog
option tcpka
server rabbit01 172.23.128.10:5672 check inter 5000 rise 2 fall 3
server rabbit02 172.23.129.10:5672 backup check inter 5000 rise 2 fall 3
server rabbit03 172.23.130.10:5672 backup check inter 5000 rise 2 fall 3
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role haproxy
#
# Overview:
#
#
- name: haproxy | restart haproxy
service: name=haproxy state=restarted
- name: haproxy | reload haproxy
service: name=haproxy state=reloaded
- name: haproxy | restart rsyslog
service: name=rsyslog state=restarted
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role haproxy
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role haproxy
#
# Overview:
# Installs and configures haproxy for load balancing.
# HAProxy doesn't currently support included configuration
# so it allows for a configuration template to be overriden
# with a variable
- name: haproxy | Install haproxy
apt: pkg=haproxy state={{ pkgs.haproxy.state }}
notify: haproxy | restart haproxy
- name: haproxy | Server configuration file
template: >
src={{ haproxy_template_dir }}/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg
owner=root group=root mode=0644
notify: haproxy | reload haproxy
- name: haproxy | Enabled in default
lineinfile: dest=/etc/default/haproxy regexp=^ENABLED=.$ line=ENABLED=1
notify: haproxy | restart haproxy
- name: haproxy | install logrotate
template: src=haproxy.logrotate.j2 dest=/etc/logrotate.d/haproxy mode=0644
- name: haproxy | install rsyslog conf
template: src=haproxy.rsyslog.j2 dest=/etc/rsyslog.d/haproxy.conf mode=0644
notify: haproxy | restart rsyslog
- name: haproxy | make sure haproxy has started
service: name=haproxy state=started
# this config needs haproxy-1.1.28 or haproxy-1.2.1
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
#log loghost local0 info
maxconn 4096
#chroot /usr/share/haproxy
user haproxy
group haproxy
daemon
#debug
#quiet
{{ haproxy_extra_global_config }}
defaults
{{ haproxy_default_config }}
{%- for app in haproxy_applications -%}
{{ app }}
{%- endfor -%}
{{ COMMON_LOG_DIR }}/haproxy/*.log {
weekly
missingok
rotate 7
compress
delaycompress
notifempty
create 640 root adm
sharedscripts
postrotate
/etc/init.d/haproxy reload > /dev/null
endscript
}
if ($programname == 'haproxy' and $syslogseverity-text == 'info') then -{{ COMMON_LOG_DIR }}/haproxy/haproxy-info.log
& ~
if ($programname == 'haproxy' and $syslogseverity-text == 'notice') then -{{ COMMON_LOG_DIR }}/haproxy/haproxy-notice.log
& ~
......@@ -68,3 +68,6 @@ jenkins_debian_pkgs:
- maven
- daemon
- python-pycurl
# Extra packages need for a specific jenkins instance.
JENKINS_EXTRA_PKGS: []
......@@ -7,6 +7,13 @@
tags:
- jenkins
- name: jenkins_master | install jenkins extra system packages
apt:
pkg={{','.join(JENKINS_EXTRA_PKGS)}}
state=present update_cache=yes
tags:
- jenkins
- name: jenkins_master | create jenkins group
group: name={{ jenkins_group }} state=present
......
......@@ -10,5 +10,4 @@
- include: system.yml
- include: python.yml
- include: browsers.yml
- include: jscover.yml
......@@ -18,15 +18,25 @@
- name: terminating single instance
local_action:
module: ec2
module: ec2_local
state: 'absent'
region: "{{ region }}"
instance_ids: ${tag_lookup.instance_ids}
when: terminate_instance == true and tag_lookup.instance_ids|length == 1
- name: deregister instance from an an elb if it was in one
local_action:
module: ec2_elb
region: "{{ region }}"
instance_id: "{{ tag_lookup.instance_ids[0] }}"
ec2_elbs:
- "{{ elb }}"
state: absent
when: terminate_instance == true and elb and tag_lookup.instance_ids|length == 1
- name: launch_ec2 | Launch ec2 instance
local_action:
module: ec2
module: ec2_local
keypair: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
......@@ -35,6 +45,8 @@
region: "{{ region }}"
instance_tags: "{{instance_tags}}"
root_ebs_size: "{{ root_ebs_size }}"
zone: "{{ zone }}"
instance_profile_name: "{{ instance_profile_name }}"
register: ec2
- name: launch_ec2 | Add DNS name
......@@ -90,5 +102,3 @@
delay=60
timeout=320
with_items: "{{ ec2.instances }}"
#
# Update config for a legacy ora installation.
#
# This role requires that ora_app_dir and ora_user both be defined.
# There is no default for them.
#
- fail: msg="ora_app_dir not defined. eg. /edx/app/ora, /opt/wwc"
when: ora_app_dir is not defined
- fail: msg="ora_user not defined. eg. ora, www-data"
when: ora_user is not defined
- fail: msg="COMMON_ENV_TYPE not defined. eg. stage, prod"
when: COMMON_ENV_TYPE is not defined
- fail: msg="secure_dir not defined. This is a path to the secure ora config file."
when: secure_dir is not defined
- name: legacy_ora | create ora application config
copy:
src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.env.json
dest={{ora_app_dir}}/env.json
sudo_user: "{{ ora_user }}"
register: env_state
tags:
- deploy
- name: legacy_ora | create ora auth file
copy:
src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.auth.json
dest={{ora_app_dir}}/auth.json
sudo_user: "{{ ora_user }}"
register: auth_state
tags:
- deploy
# Restart ORA Services
- name: legacy_ora | restart edx-ora
service:
name=edx-ora
state=restarted
when: env_state.changed or auth_state.changed
- name: legacy_ora | restart edx-ora-celery
service:
name=edx-ora-celery
state=restarted
when: env_state.changed or auth_state.changed
#! /usr/bin/env bash
# {{ ansible_managed }}
source "{{ item.home }}/{{ item.env }}"
# If X11 forwarding is enabled, then use the DISPLAY value
# already set and use the X11 session cookie
if [ -n "$DISPLAY" ]; then
export XAUTHORITY="{{ localdev_xauthority }}"
# Otherwise, configure the display to use the virtual frame buffer
else
export DISPLAY="{{ localdev_xvfb_display }}"
fi
# Default to the code repository
cd "{{ item.home }}/{{ item.repo }}"
---
localdev_user: "vagrant"
localdev_home: "/home/vagrant"
localdev_xauthority: "{{ localdev_home }}/.Xauthority"
localdev_xvfb_display: ":1"
localdev_accounts:
- { user: "{{ edxapp_user}}", home: "{{ edxapp_app_dir }}" }
- { user: "{{ forum_user }}", home: "{{ forum_app_dir }}" }
- { user: "{{ edxapp_user}}", home: "{{ edxapp_app_dir }}",
env: "edxapp_env", repo: "edx-platform" }
- { user: "{{ forum_user }}",home: "{{ forum_app_dir }}",
env: "forum_env", repo: "cs_comments_service" }
- { user: "{{ ora_user }}", home: "{{ ora_app_dir }}",
env: "ora_env", repo: "ora" }
localdev_env:
DISPLAY: "{{ browser_xvfb_display }}"
# Helpful system packages for local dev
local_dev_pkgs:
- vim
- emacs
- xorg
- openbox
......@@ -7,12 +7,14 @@
- name: local_dev | set login shell for app accounts
user: name={{ item.user }} shell="/bin/bash"
with_items: "{{ localdev_accounts }}"
tags: deploy
# Ensure forum user has permissions to access .gem and .rbenv
# This is a little twisty: the forum role sets the owner and group to www-data
# So we add the forum user to the www-data group and give group write permissions
- name: local_dev | add forum user to www-data group
user: name={{ forum_user }} groups={{ common_web_group }} append=yes
tags: deploy
- name: local_dev | set forum rbenv and gem permissions
file:
......@@ -20,11 +22,12 @@
with_items:
- "{{ forum_app_dir }}/.gem"
- "{{ forum_app_dir }}/.rbenv"
tags: deploy
# Create scripts to configure environment
- name: local_dev | create login scripts
template:
src={{ item.user }}_bashrc.j2 dest={{ item.home }}/.bashrc
src=app_bashrc.j2 dest={{ item.home }}/.bashrc
owner={{ item.user }} mode=755
with_items: "{{ localdev_accounts }}"
......@@ -35,3 +38,21 @@
src=gitconfig dest={{ item.home }}/.gitconfig
owner={{ item.user }} mode=700
with_items: "{{ localdev_accounts }}"
# Configure X11 for application users
- name: local_dev | preserve DISPLAY for sudo
copy:
src=x11_display dest=/etc/sudoers.d/x11_display
owner=root group=root mode=0440
- name: local_dev | login share X11 auth to app users
template:
src=share_x11.j2 dest={{ localdev_home }}/share_x11
owner={{ localdev_user }} mode=0700
- name: local_dev | update bashrc with X11 share script
lineinfile:
dest={{ localdev_home }}/.bashrc
regexp=". {{ localdev_home }}/share_x11"
line=". {{ localdev_home }}/share_x11"
state=present
#! /usr/bin/env bash
# {{ ansible_managed }}
source "{{ item.home }}/{{ item.env }}"
# If X11 forwarding is enabled, then use the DISPLAY value
# already set and use the X11 session cookie
if [ -n "$DISPLAY" ]; then
export XAUTHORITY="{{ localdev_xauthority }}"
# Otherwise, configure the display to use the virtual frame buffer
else
export DISPLAY="{{ localdev_xvfb_display }}"
fi
cd "{{ item.home }}/{{ item.repo }}"
#! /usr/bin/env bash
# {{ ansible_managed }}
source "{{ edxapp_app_dir }}/edxapp_env"
export DISPLAY="{{ browser_xvfb_display }}"
cd $HOME/edx-platform
#! /usr/bin/env bash
# {{ ansible_managed }}
source "{{ forum_app_dir }}/forum_env"
export DISPLAY="{{ browser_xvfb_display }}"
cd $HOME/cs_comments_service
#!/usr/bin/env bash
# Change permissions on the X11 session cookie
# so application users can use the same X11 session.
# This is very insecure and should *only* be used for local VMs.
if [ -f {{ localdev_xauthority }} ]; then
chmod og+r {{ localdev_xauthority }}
fi
......@@ -35,3 +35,7 @@ mongo_create_users: !!null
# cloudformation stack, this group name can used to pull out
# the name of the stack the mongo server resides in.
mongo_aws_stack_name: "tag_aws_cloudformation_stack-name_"
# In environments that do not require durability (devstack / Jenkins)
# you can disable the journal to reduce disk usage
mongo_enable_journal: True
---
dependencies:
- common
......@@ -19,7 +19,12 @@ bind_ip = {{ MONGO_BIND_IP }}
port = {{ mongo_port }}
# Enable journaling, http://www.mongodb.org/display/DOCS/Journaling
{% if mongo_enable_journal %}
journal=true
{% else %}
journal=false
nojournal=true
{% endif %}
{% if MONGO_CLUSTERED %}
keyFile = {{ mongo_key_file }}
......
# Variables for nginx role
---
# Set global htaccess for nginx
NGINX_HTPASSWD_USER: !!null
NGINX_HTPASSWD_PASS: !!null
nginx_app_dir: "{{ COMMON_APP_DIR }}/nginx"
nginx_data_dir: "{{ COMMON_DATA_DIR }}/nginx"
......@@ -8,10 +11,11 @@ nginx_log_dir: "{{ COMMON_LOG_DIR }}/nginx"
nginx_sites_available_dir: "{{ nginx_app_dir }}/sites-available"
nginx_sites_enabled_dir: "{{ nginx_app_dir }}/sites-enabled"
nginx_user: root
nginx_htpasswd_file: "{{ nginx_app_dir }}/nginx.htpasswd"
pkgs:
nginx:
state: installed
nginx_debian_pkgs:
- nginx
- python-passlib
nginx_xserver_gunicorn_hosts:
- 127.0.0.1
......@@ -36,7 +40,3 @@ nginx_cfg:
# nginx configuration
version_html: "{{ nginx_app_dir }}/versions.html"
version_json: "{{ nginx_app_dir }}/versions.json"
# default htpasswd contents set to edx/edx
# this value can be overiden in vars/secure/<group>.yml
htpasswd: |
edx:$apr1$2gWcIvlc$Nu7b/KTwd5HoIDEkSPNUk/
---
dependencies:
- common
......@@ -25,8 +25,8 @@
- "{{ nginx_log_dir }}"
notify: nginx | restart nginx
- name: nginx | Install nginx
apt: pkg=nginx state={{ pkgs.nginx.state }}
- name: nginx | Install nginx packages
apt: pkg={{','.join(nginx_debian_pkgs)}} state=present
notify: nginx | restart nginx
- name: nginx | Server configuration file
......@@ -63,10 +63,12 @@
notify: nginx | reload nginx
with_items: nginx_sites
- name: nginx | Write out default htpasswd file
copy: >
content={{ nginx_cfg.htpasswd }} dest={{ nginx_app_dir }}/nginx.htpasswd
owner=www-data group=www-data mode=0600
- name: nginx | Write out htpasswd file
htpasswd: >
name={{ NGINX_HTPASSWD_USER }}
password={{ NGINX_HTPASSWD_PASS }}
path={{ nginx_htpasswd_file }}
when: NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS
- name: nginx | Create nginx log file location (just in case)
file: >
......
{% if NGINX_HTPASSWD_USER and NGINX_HTPASSWD_PASS %}
satisfy any;
allow 127.0.0.1;
deny all;
auth_basic "Restricted";
auth_basic_user_file {{ nginx_app_dir }}/nginx.htpasswd;
auth_basic_user_file {{ nginx_htpasswd_file }};
index index.html
proxy_set_header X-Forwarded-Proto https;
{% endif %}
......@@ -32,9 +32,7 @@ server {
}
location / {
{% if EDXAPP_CMS_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
try_files $uri @proxy_to_cms_app;
}
......
......@@ -20,9 +20,7 @@ server {
}
location / {
{% if DISCERN_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
......
# Put in place by ansible
{{ nginx_log_dir }}/access.log {
{{ nginx_log_dir }}/*access.log {
create 0640 www-data adm
compress
delaycompress
......
# Put in place by ansible
{{ nginx_log_dir }}/error.log {
{{ nginx_log_dir }}/*error.log {
create 0640 www-data adm
compress
delaycompress
......
......@@ -30,9 +30,7 @@ server {
location / {
{% if EDXAPP_LMS_PREVIEW_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
try_files $uri @proxy_to_lms-preview_app;
}
......
......@@ -31,10 +31,7 @@ server {
}
location / {
{% if EDXAPP_LMS_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
try_files $uri @proxy_to_lms_app;
}
......
......@@ -36,6 +36,11 @@ http {
log_format p_combined '$http_x_forwarded_for - $remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent $request_time '
'"$http_referer" "$http_user_agent"';
log_format ssl_combined '$remote_addr - $ssl_client_s_dn [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
access_log {{ nginx_log_dir }}/access.log p_combined;
error_log {{ nginx_log_dir }}/error.log;
......
......@@ -9,9 +9,7 @@ server {
location / {
{% if ORA_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
try_files $uri @proxy_to_app;
}
......
......@@ -8,9 +8,7 @@ server {
listen {{ XQUEUE_NGINX_PORT }} default_server;
location / {
{% if XQUEUE_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
try_files $uri @proxy_to_app;
}
......
......@@ -18,9 +18,7 @@ server {
listen {{ XSERVER_NGINX_PORT }} default_server;
location / {
{% if XSERVER_BASIC_AUTH %}
{% include "basic-auth.j2" %}
{% endif %}
try_files $uri @proxy_to_app;
}
......
......@@ -14,6 +14,14 @@ NOTIFIER_LOG_LEVEL: "INFO"
NOTIFIER_RSYSLOG_ENABLED: "yes"
NOTIFIER_DIGEST_TASK_INTERVAL: "1440"
NOTIFIER_DIGEST_EMAIL_SENDER: "notifications@example.com"
NOTIFIER_DIGEST_EMAIL_SUBJECT: "Daily Discussion Digest"
NOTIFIER_DIGEST_EMAIL_TITLE: "Discussion Digest"
NOTIFIER_DIGEST_EMAIL_DESCRIPTION: "A digest of unread content from course discussions you are following."
NOTIFIER_EMAIL_SENDER_POSTAL_ADDRESS: ""
NOTIFIER_LANGUAGE: ""
NOTIFIER_ENV: "Development"
NOTIFIER_EMAIL_BACKEND: "console"
......@@ -23,7 +31,6 @@ NOTIFIER_EMAIL_USER: ""
NOTIFIER_EMAIL_PASS: ""
NOTIFIER_EMAIL_USE_TLS: "False"
NOTIFIER_EMAIL_DOMAIN: "notifications.edx.org"
NOTIFIER_EMAIL_REWRITE_RECIPIENT: ""
NOTIFIER_LMS_URL_BASE: "http://localhost:8000"
......@@ -34,8 +41,8 @@ NOTIFIER_COMMENT_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE"
NOTIFIER_USER_SERVICE_BASE: "http://localhost:8000"
NOTIFIER_USER_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE"
NOTIFIER_USER_SERVICE_HTTP_AUTH_USER: "guido"
NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS: "vanrossum"
NOTIFIER_USER_SERVICE_HTTP_AUTH_USER: !!null
NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS: !!null
NOTIFIER_CELERY_BROKER_URL: "django://"
NOTIFIER_SUPERVISOR_LOG_DEST: "{{ COMMON_DATA_DIR }}/log/supervisor"
......@@ -61,6 +68,12 @@ notifier_debian_pkgs:
# the env variable for the supervisor job definition.
#
notifier_env_vars:
FORUM_DIGEST_EMAIL_SENDER: $NOTIFIER_DIGEST_EMAIL_SENDER
FORUM_DIGEST_EMAIL_SUBJECT: $NOTIFIER_DIGEST_EMAIL_SUBJECT
FORUM_DIGEST_EMAIL_TITLE: $NOTIFIER_DIGEST_EMAIL_TITLE
FORUM_DIGEST_EMAIL_DESCRIPTION: $NOTIFIER_DIGEST_EMAIL_DESCRIPTION
EMAIL_SENDER_POSTAL_ADDRESS: $NOTIFIER_EMAIL_SENDER_POSTAL_ADDRESS
NOTIFIER_LANGUAGE: $NOTIFIER_LANGUAGE
NOTIFIER_ENV: $NOTIFIER_ENV
NOTIFIER_DB_DIR: $NOTIFIER_DB_DIR
EMAIL_BACKEND: $NOTIFIER_EMAIL_BACKEND
......@@ -69,7 +82,6 @@ notifier_env_vars:
EMAIL_HOST_USER: $NOTIFIER_EMAIL_USER
EMAIL_HOST_PASSWORD: $NOTIFIER_EMAIL_PASS
EMAIL_USE_TLS: $NOTIFIER_EMAIL_USE_TLS
EMAIL_DOMAIN: $NOTIFIER_EMAIL_DOMAIN
EMAIL_REWRITE_RECIPIENT: $NOTIFIER_EMAIL_REWRITE_RECIPIENT
LMS_URL_BASE: $NOTIFIER_LMS_URL_BASE
SECRET_KEY: $NOTIFIER_LMS_SECRET_KEY
......
---
- name: notifier | restart notifier-scheduler
supervisorctl: >
supervisorctl_local: >
name=notifier-scheduler
state=restarted
config={{ supervisor_cfg }}
......@@ -9,7 +9,7 @@
tags: deploy
- name: notifier | restart notifier-celery-workers
supervisorctl: >
supervisorctl_local: >
name=notifier-celery-workers
state=restarted
config={{ supervisor_cfg }}
......
---
dependencies:
- supervisor
# vars for the ORA role
---
ORA_NGINX_PORT: 18060
ORA_BASIC_AUTH: False
ora_app_dir: "{{ COMMON_APP_DIR }}/ora"
ora_code_dir: "{{ ora_app_dir }}/ora"
......@@ -10,17 +9,21 @@ ora_venvs_dir: "{{ ora_app_dir }}/venvs"
ora_venv_dir: "{{ ora_venvs_dir }}/ora"
ora_venv_bin: "{{ ora_venv_dir }}/bin"
ora_user: "ora"
ora_deploy_path: "{{ ora_venv_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
ora_nltk_data_dir: "{{ ora_data_dir}}/nltk_data"
ora_nltk_download_url: http://edx-static.s3.amazonaws.com/nltk/nltk-data-20131113.tar.gz
ora_nltk_tmp_file: "{{ ora_data_dir }}/nltk.tmp.tar.tz"
ora_source_repo: https://github.com/edx/edx-ora.git
ora_version: 'HEAD'
ora_version: 'master'
ora_pre_requirements_file: "{{ ora_code_dir }}/pre-requirements.txt"
ora_post_requirements_file: "{{ ora_code_dir }}/requirements.txt"
ora_ease_venv_dir: "{{ ora_venv_dir }}"
ora_ease_code_dir: "{{ ora_app_dir }}/ease"
ora_ease_source_repo: https://github.com/edx/ease.git
ora_ease_version: 'HEAD'
ora_ease_version: 'master'
ora_ease_pre_requirements_file: "{{ ora_ease_code_dir }}/pre-requirements.txt"
ora_ease_post_requirements_file: "{{ ora_ease_code_dir }}/requirements.txt"
......@@ -31,12 +34,12 @@ ORA_TIME_BETWEEN_XQUEUE_PULLS: 5
ORA_TIME_BETWEEN_EXPIRED_CHECKS: 1800
ORA_GRADER_SETTINGS_DIRECTORY: "grader_settings/"
ORA_MAX_NUMBER_OF_TIMES_TO_RETRY_GRADING: 10
ORA_MIN_TO_USE_ML: 100
ORA_MIN_TO_USE_ML: 4
ORA_ML_PATH: "machine-learning/"
ORA_ML_MODEL_PATH: "ml_models/"
ORA_TIME_BETWEEN_ML_CREATOR_CHECKS: 300
ORA_TIME_BETWEEN_ML_GRADER_CHECKS: 5
ORA_MIN_TO_USE_PEER: 10
ORA_MIN_TO_USE_PEER: 4
ORA_PEER_GRADER_COUNT: 3
ORA_PEER_GRADER_MINIMUM_TO_CALIBRATE: 3
ORA_PEER_GRADER_MAXIMUM_TO_CALIBRATE: 6
......@@ -55,6 +58,10 @@ ORA_USERS:
"lms": "password"
ORA_XQUEUE_URL: "http://localhost:18040"
ORA_XQUEUE_DJANGO_USER: "lms"
ORA_XQUEUE_DJANGO_PASSWORD: "password"
ORA_XQUEUE_BASIC_AUTH_USER: "edx"
ORA_XQUEUE_BASIC_AUTH_PASSWORD: "edx"
ORA_DJANGO_USER: "lms"
ORA_DJANGO_PASSWORD: "password"
......@@ -112,9 +119,9 @@ ora_auth_config:
USERS: $ORA_USERS
XQUEUE_INTERFACE:
django_auth:
username: $XQUEUE_DJANGO_USER
password: $XQUEUE_DJANGO_PASSWORD
basic_auth: [$XQUEUE_BASIC_AUTH_USER, $XQUEUE_BASIC_AUTH_PASSWORD]
username: $ORA_XQUEUE_DJANGO_USER
password: $ORA_XQUEUE_DJANGO_PASSWORD
basic_auth: [ $ORA_XQUEUE_BASIC_AUTH_USER, $ORA_XQUEUE_BASIC_AUTH_PASSWORD ]
url: $ORA_XQUEUE_URL
GRADING_CONTROLLER_INTERFACE:
django_auth:
......@@ -132,6 +139,12 @@ ora_auth_config:
AWS_ACCESS_KEY_ID: $ORA_AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $ORA_AWS_SECRET_ACCESS_KEY
ora_environment:
SERVICE_VARIANT: ora
NLTK_DATA: $ora_nltk_data_dir
LANG: $ORA_LANG
PATH: $ora_deploy_path
ora_debian_pkgs:
- python-software-properties
- pkg-config
......
---
- name: ora | restart ora
supervisorctl: >
supervisorctl_local: >
name=ora
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not devstack
tags: deploy
- name: ora | restart ora_celery
supervisorctl: >
supervisorctl_local: >
name=ora_celery
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not devstack
tags: deploy
---
dependencies:
- supervisor
......@@ -6,6 +6,7 @@
- ora | restart ora
- ora | restart ora_celery
with_items: ['ora', 'ora_celery']
when: not devstack
tags:
- deploy
......@@ -23,6 +24,17 @@
tags:
- deploy
- name: ora | setup the ora env
notify:
- "ora | restart ora"
- "ora | restart ora_celery"
template: >
src=ora_env.j2 dest={{ ora_app_dir }}/ora_env
owner={{ ora_user }} group={{ common_web_user }}
mode=0644
tags:
- deploy
# Do A Checkout
- name: ora | git checkout ora repo into {{ ora_app_dir }}
git: dest={{ ora_code_dir }} repo={{ ora_source_repo }} version={{ ora_version }}
......@@ -94,21 +106,24 @@
- name: ora | update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
register: supervisor_update
when: not devstack
changed_when: supervisor_update.stdout != ""
tags: deploy
- name: ora | ensure ora is started
supervisorctl: >
supervisorctl_local: >
name=ora
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: not devstack
tags: deploy
- name: ora | ensure ora_celery is started
supervisorctl: >
supervisorctl_local: >
name=ora_celery
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=started
when: not devstack
tags: deploy
......@@ -48,20 +48,15 @@
tags:
- deploy
#Needed for the ease package to work
- name: ora | check for the existence of nltk data taggers/maxent_treebank_pos_tagger/english.pickle
shell: "[ -f {{ ora_nltk_data_dir }}/taggers/maxent_treebank_pos_tagger/english.pickle ] && echo 'Found' || echo ''"
register: ora_nltk_data_installed
notify:
- ora | restart ora
- ora | restart ora_celery
tags:
- deploy
- name: ora | install nltk data using rendered shell script
command: "{{ora_ease_venv_dir}}/bin/python -m nltk.downloader -d {{ ora_nltk_data_dir }} all"
when: ora_nltk_data_installed.stdout != "Found"
- name: ora | download and install nltk
shell: |
set -e
curl -o {{ ora_nltk_tmp_file }} {{ ora_nltk_download_url }}
tar zxf {{ ora_nltk_tmp_file }}
rm -f {{ ora_nltk_tmp_file }}
touch {{ ora_nltk_download_url|basename }}-installed
creates={{ ora_data_dir }}/{{ ora_nltk_download_url|basename }}-installed
chdir={{ ora_data_dir }}
sudo_user: "{{ common_web_user }}"
notify:
- ora | restart ora
......
......@@ -56,5 +56,3 @@
with_items:
- python
- pip
......@@ -5,7 +5,7 @@ command={{ ora_venv_bin }}/python {{ ora_code_dir }}/manage.py celeryd --logleve
user={{ common_web_user }}
directory={{ ora_code_dir }}
environment=DJANGO_SETTINGS_MODULE=edx_ora.aws,SERVICE_VARIANT=ora
environment=DJANGO_SETTINGS_MODULE=edx_ora.aws,SERVICE_VARIANT=ora,NLTK_DATA={{ ora_nltk_data_dir }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
......
# {{ ansible_managed }}
{% for name,value in ora_environment.items() %}
{%- if value %}
export {{ name }}="{{ value }}"
{% endif %}
{% endfor %}
......@@ -21,7 +21,7 @@
# in case there are lingering processes, ignore errors
# silently
- name: rabbitmq | send sigterm to any running rabbitmq processes
shell: killall beam rabbitmq-server epmd || true
shell: pkill -u rabbitmq || true
# Defaulting to /var/lib/rabbitmq
- name: rabbitmq | create cookie directory
......
......@@ -10,6 +10,7 @@ rbenv_bin: "{{ rbenv_dir }}/.rbenv/bin"
rbenv_shims: "{{ rbenv_root }}/shims"
rbenv_path: "{{ rbenv_bin }}:{{ rbenv_shims }}:{{ rbenv_gem_bin }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
rbenv_debian_pkgs:
- curl
- build-essential
- libcurl4-openssl-dev
- libreadline-dev
......
......@@ -2,7 +2,6 @@
# when the role is included
---
XQUEUE_NGINX_PORT: 18040
XQUEUE_BASIC_AUTH: False
xqueue_app_dir: "{{ COMMON_APP_DIR }}/xqueue"
xqueue_code_dir: "{{ xqueue_app_dir }}/xqueue"
......
- name: xqueue | restart xqueue
supervisorctl: >
supervisorctl_local: >
name={{ item }}
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
......
---
dependencies:
- supervisor
......@@ -34,7 +34,7 @@
# Install the python pre requirements into {{ xqueue_venv_dir }}
- name : install python pre-requirements
- name : xqueue | install python pre-requirements
pip: requirements="{{ xqueue_pre_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present
sudo_user: "{{ xqueue_user }}"
notify:
......@@ -43,7 +43,7 @@
- deploy
# Install the python post requirements into {{ xqueue_venv_dir }}
- name : install python post-requirements
- name : xqueue | install python post-requirements
pip: requirements="{{ xqueue_post_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present
sudo_user: "{{ xqueue_user }}"
notify:
......@@ -83,7 +83,7 @@
tags: deploy
- name: xqueue | ensure xqueue, consumer is running
supervisorctl: >
supervisorctl_local: >
name={{ item }}
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
......
......@@ -2,7 +2,6 @@
---
XSERVER_NGINX_PORT: 18050
XSERVER_BASIC_AUTH: False
XSERVER_RUN_URL: ''
XSERVER_LOGGING_ENV: 'sandbox'
......
......@@ -15,7 +15,7 @@
#
- name: xserver | restart xserver
supervisorctl: >
supervisorctl_local: >
name=xserver
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
......
---
dependencies:
- supervisor
......@@ -76,7 +76,7 @@
tags: deploy
- name: xserver | ensure xserver is started
supervisorctl: >
supervisorctl_local: >
name=xserver
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
......
- include: edxapp.yml
# ansible reads $ANSIBLE_CONFIG, ansible.cfg, ~/.ansible.cfg or /etc/ansible/ansible.cfg
[defaults]
# Always have these for using the configuration repo
jinja2_extensions=jinja2.ext.do
hash_behaviour=merge
# These are environment-specific defaults
forks=10
#forks=1
log_path=stage-edx-ansible.log
transport=ssh
hostfile=./ec2.py
extra_vars='key=deployment name=edx-stage group=edx-stage region=us-west-1'
user=ubuntu
[ssh_connection]
# example from https://github.com/ansible/ansible/blob/devel/examples/ansible.cfg
#ssh_args=-o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r
ssh_args=-F stage-ssh-config
scp_if_ssh=True
#### edx-stage VPC
Host 54.241.183.3
#Host ec2-54-241-183-3.us-west-1.compute.amazonaws.com
#Host vpc-jumpbox
#HostName ec2-54-241-183-3.us-west-1.compute.amazonaws.com
HostName 54.241.183.3
User ubuntu
ForwardAgent yes
Host *.us-west-1.compute.internal
User ubuntu
ForwardAgent yes
#ProxyCommand ssh -W %h:%p ec2-54-241-183-3.us-west-1.compute.amazonaws.com
#ProxyCommand ssh -W %h:%p vpc-jumpbox
ProxyCommand ssh -W %h:%p ubuntu@54.241.183.3
Host *
ForwardAgent yes
SendEnv LANG LC_*
HashKnownHosts yes
GSSAPIAuthentication yes
GSSAPIDelegateCredentials no
- name: Configure instance(s)
hosts: vagrant
hosts: all
sudo: True
gather_facts: True
vars:
migrate_db: "yes"
openid_workaround: True
devstack: True
edx_platform_commit: 'master'
edx_platform_version: 'master'
mongo_enable_journal: False
EDXAPP_NO_PREREQ_INSTALL: 0
vars_files:
- "group_vars/all"
roles:
- common
- edx_ansible
- edxlocal
- mongo
- edxapp
- oraclejdk
- elasticsearch
- forum
- ora
- browsers
- local_dev
- name: Configure instance(s)
hosts: vagrant
hosts: all
sudo: True
gather_facts: True
vars:
migrate_db: "yes"
openid_workaround: True
EDXAPP_LMS_NGINX_PORT: '80'
edx_platform_commit: 'master'
edx_platform_version: 'master'
vars_files:
- "group_vars/all"
roles:
- edx_ansible
- gh_users
- common
- role: nginx
nginx_sites:
- cms
......@@ -19,7 +19,6 @@
- ora
- xqueue
- edxlocal
- supervisor
- mongo
- edxapp
- demo
......@@ -31,3 +30,4 @@
- { role: "xqueue", update_users: True }
- ora
- discern
- edx_ansible
- name: Configure instance(s)
hosts: vagrant
sudo: True
gather_facts: True
vars:
migrate_db: "yes"
openid_workaround: True
vars_files:
- "group_vars/all"
roles:
- common
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxlocal
- supervisor
- mongo
- { role: 'edxapp', EDXAPP_LMS_NGINX_PORT: 18030, EDXAPP_LMS_XML_NGINX_PORT: 80 }
- name: Configure instance(s)
hosts: vagrant
sudo: True
gather_facts: True
vars:
migrate_db: "yes"
openid_workaround: True
vars_files:
- "group_vars/all"
roles:
- common
- role: nginx
nginx_sites:
- lms
- cms
- lms-preview
- edxlocal
- supervisor
- mongo
- edxapp
- oraclejdk
- elasticsearch
- { role: 'edxapp', celery_worker: True }
- forum
Fabric==1.5.1
Jinja2==2.6
Jinja2==2.7.1
MarkupSafe==0.18
PyYAML==3.10
WebOb==1.2.3
ansible==1.3.2
argparse==1.2.1
beautifulsoup4==4.1.3
boto==2.10.0
cloudformation==0.0.0
decorator==3.4.0
distribute==0.6.30
docopt==0.6.1
dogapi==1.2.3
ipython==0.13.1
jenkinsapi==0.1.11
lxml==3.1beta1
newrelic==1.10.2.38
path.py==3.0.1
pingdom==0.2.0
pycrypto==2.6
pyparsing==1.5.6
pyrelic==0.2.0
python-dateutil==2.1
requests==1.1.0
schema==0.1.1
simplejson==3.3.0
simples3==1.0-alpha
six==1.2.0
-e git+https://github.com/bos/statprof.py.git@a17f7923b102c9039763583be9e377e8422e8f5f#egg=statprof-dev
ujson==1.30
ecdsa==0.10
paramiko==1.12.0
pycrypto==2.6.1
wsgiref==0.1.2
ansible==1.3.2
GitPython==0.3.2.RC1
pymongo==2.4.1
......@@ -18,6 +18,7 @@
# - environment
# - name_tag
export PYTHONUNBUFFERED=1
export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto
if [[ -z $WORKSPACE ]]; then
......@@ -27,6 +28,10 @@ else
source "$WORKSPACE/util/jenkins/ascii-convert.sh"
fi
if [[ -z $static_url_base ]]; then
static_url_base="/static"
fi
if [[ -z $github_username ]]; then
github_username=$BUILD_USER_ID
fi
......@@ -38,6 +43,18 @@ fi
extra_vars="/var/tmp/extra-vars-$$.yml"
if [[ -z $region ]]; then
region="us-east1"
fi
if [[ -z $zone ]]; then
zone="us-east-1b"
fi
if [[ -z $elb ]]; then
elb="false"
fi
if [[ -z $dns_name ]]; then
dns_name=$github_username
fi
......@@ -75,6 +92,15 @@ fi
cd playbooks/edx-east
if [[ $basic_auth == "true" ]]; then
# vars specific to provisioning added to $extra-vars
cat << EOF_AUTH >> $extra_vars
NGINX_HTPASSWD_USER: $auth_user
NGINX_HTPASSWD_PASS: $auth_pass
EOF_AUTH
fi
if [[ $recreate == "true" ]]; then
# vars specific to provisioning added to $extra-vars
cat << EOF >> $extra_vars
......@@ -84,6 +110,7 @@ instance_type: $instance_type
security_group: $security_group
ami: $ami
region: $region
zone: $zone
instance_tags: '{"environment": "$environment", "github_username": "$github_username", "Name": "$name_tag", "source": "jenkins", "owner": "$BUILD_USER"}'
root_ebs_size: $root_ebs_size
name_tag: $name_tag
......@@ -92,7 +119,9 @@ gh_users:
dns_zone: $dns_zone
rabbitmq_refresh: True
GH_USERS_PROMPT: '[$name_tag] '
elb: $elb
EOF
cat $extra_vars
# run the tasks to launch an ec2 instance from AMI
ansible-playbook edx_provision.yml -i inventory.ini -e "@${extra_vars}" --user ubuntu
......@@ -100,7 +129,7 @@ EOF
if [[ $server_type == "full_edx_installation" ]]; then
# additional tasks that need to be run if the
# entire edx stack is brought up from an AMI
ansible-playbook deploy_rabbitmq.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu
ansible-playbook rabbitmq.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu
ansible-playbook restart_supervisor.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu
fi
fi
......@@ -124,7 +153,7 @@ fi
# Run deploy tasks for the roles selected
for i in "${!deploy[@]}"; do
if [[ ${deploy[$i]} == "true" ]]; then
ansible-playbook deploy_${i}.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --tags deploy
ansible-playbook ${i}.yml -i "${deploy_host}," -e "@${extra_vars}" --user ubuntu --tags deploy
fi
done
......
......@@ -8,8 +8,8 @@ EDXAPP_LMS_BASE: ${deploy_host}
EDXAPP_LMS_NGINX_PORT: 80
EDXAPP_LMS_PREVIEW_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
EDXAPP_SITE_NAME: ${deploy_host}
COMMON_PYPI_MIRROR_URL: 'https://pypi.edx.org/root/pypi/+simple/'
COMMON_GIT_MIRROR: 'git.edx.org'
XSERVER_GRADER_DIR: "{{ xserver_data_dir }}/data/content-mit-600x~2012_Fall"
XSERVER_GRADER_SOURCE: "git@github.com:/MITx/6.00x.git"
XSERVER_LOCAL_GIT_IDENTITY: /var/lib/jenkins/git-identity-edx-pull
......@@ -19,14 +19,23 @@ CERTS_AWS_ID: $(cat /var/lib/jenkins/certs-aws-id)
CERTS_BUCKET: "verify-test.edx.org"
migrate_db: "yes"
openid_workaround: True
edx_platform_commit: $edxapp_version
edx_platform_version: $edxapp_version
forum_version: $forum_version
xqueue_version: $xqueue_version
xserver_version: $xserver_version
ora_version: $ora_version
ease_version: $ease_version
certs_version: $certs_version
discern_version: $discern_version
rabbitmq_ip: "127.0.0.1"
rabbitmq_refresh: True
COMMON_HOSTNAME: edx-server
EDXAPP_STATIC_URL_BASE: $static_url_base
# Settings for Grade downloads
EDXAPP_GRADE_STORAGE_TYPE: 's3'
EDXAPP_GRADE_BUCKET: 'edx-grades'
EDXAPP_GRADE_ROOT_PATH: 'sandbox'
EOF
import argparse
import json
import logging as log
import pickle
import requests
import yaml
from datetime import datetime
from git import Repo
from os import path
from pprint import pformat
from pymongo import MongoClient, DESCENDING
from stage_release import uri_from
def releases(repo):
"""
Yield a list of all release candidates from the origin.
"""
for ref in repo.refs:
if ref.name.startswith('origin/rc/'):
yield ref
def candidates_since(repo, time):
"""
Given a repo yield a list of release candidate refs that have a
commit on them after the passed in time
"""
for rc in releases(repo):
last_update = datetime.utcfromtimestamp(rc.commit.committed_date)
if last_update > time:
# New or updated RC
yield rc
def stage_release(url, token, repo, rc):
"""
Submit a job to stage a new release for the new rc of the repo.
"""
# Setup the Jenkins params.
params = []
params.append({'name': "{}_REF".format(repo), 'value': True})
params.append({'name': repo, 'value': rc.commit.hexsha})
build_params = {'parameter': params}
log.info("New rc found{}, staging new release.".format(rc.name))
r = requests.post(url,
data={"token", token},
params={"json": json.dumps(build_params)})
if r.status_code != 201:
msg = "Failed to submit request with params: {}"
raise Exception(msg.format(pformat(build_params)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Monitor git repos for new rc branches.")
parser.add_argument('-c', '--config', required=True,
help="Config file.")
parser.add_argument('-p', '--pickle', default="data.pickle",
help="Pickle of presistent data.")
args = parser.parse_args()
config = yaml.safe_load(open(args.config))
if path.exists(args.pickle):
data = pickle.load(open(args.pickle))
else:
data = {}
# Presist the last time we made this check.
if 'last_check' not in data:
last_check = datetime.utcnow()
else:
last_check = data['last_check']
data['last_check'] = datetime.utcnow()
# Find plays that are affected by this repo.
repos_with_changes = {}
for repo in config['repos']:
# Check for new rc candidates.
for rc in candidates_since(Repo(repo), last_check):
# Notify stage-release to build for the new repo.
stage_release(config['abbey_url'], config['abbey_token'], repo, rc)
pickle.dump(data, open(args.pickle, 'w'))
"""
Take in a YAML file with the basic data of all the things we could
deploy and command line hashes for the repos that we want to deploy
right now.
Example Config YAML file:
---
DOC_STORE_CONFIG:
hosts: [ list, of, mongo, hosts]
port: #
db: 'db'
user: 'jenkins'
password: 'password'
configuration_repo: "/path/to/configuration/repo"
configuration_secure_repo: "/path/to/configuration-secure"
repos:
edxapp:
plays:
- edxapp
- worker
xqueue:
plays:
- xqueue
6.00x:
plays:
- xserver
xserver:
plays:
- xserver
deployments:
edx:
- stage
- prod
edge:
- stage
- prod
loadtest:
- stage
# A jenkins URL to post requests for building AMIs
abbey_url: "http://...."
abbey_token: "API_TOKEN"
---
"""
import argparse
import json
import yaml
import logging as log
from datetime import datetime
from git import Repo
from pprint import pformat
from pymongo import MongoClient, DESCENDING
log.basicConfig(level=log.DEBUG)
def uri_from(doc_store_config):
"""
Convert the below structure to a mongodb uri.
DOC_STORE_CONFIG:
hosts:
- 'host1.com'
- 'host2.com'
port: 10012
db: 'devops'
user: 'username'
password: 'password'
"""
uri_format = "mongodb://{user}:{password}@{hosts}/{db}"
host_format = "{host}:{port}"
port = doc_store_config['port']
host_uris = [host_format.format(host=host,port=port) for host in doc_store_config['hosts']]
return uri_format.format(
user=doc_store_config['user'],
password=doc_store_config['password'],
hosts=",".join(host_uris),
db=doc_store_config['db'])
def prepare_release(args):
config = yaml.safe_load(open(args.config))
client = MongoClient(uri_from(config['DOC_STORE_CONFIG']))
db = client[config['DOC_STORE_CONFIG']['db']]
# Get configuration repo versions
config_repo_ver = Repo(config['configuration_repo']).commit().hexsha
config_secure_ver = Repo(config['configuration_secure_repo']).commit().hexsha
# Parse the vars.
var_array = map(lambda key_value: key_value.split('='), args.REPOS)
update_repos = { item[0]:item[1] for item in var_array }
log.info("Update repos: {}".format(pformat(update_repos)))
release = {}
now = datetime.utcnow()
release['_id'] = args.release_id
release['date_created'] = now
release['date_modified'] = now
release['build_status'] = 'Unknown'
release['build_user'] = args.user
release_coll = db[args.deployment]
releases = release_coll.find({'build_status': 'Succeeded'}).sort('_id', DESCENDING)
all_plays = {}
try:
last_successful = releases.next()
all_plays = last_successful['plays']
except StopIteration:
# No successful builds.
log.warn("No Previously successful builds.")
# For all repos that were updated
for repo, ref in update_repos.items():
var_name = "{}_version".format(repo.replace('-','_'))
if repo not in config['repos']:
raise Exception("No info for repo with name '{}'".format(repo))
# For any play that uses the updated repo
for play in config['repos'][repo]:
if play not in all_plays:
all_plays[play] = {}
if 'vars' not in all_plays[play]:
all_plays[play]['vars'] = {}
all_plays[play]['vars'][var_name] = ref
# Configuration to use to build these AMIs
all_plays[play]['configuration_ref'] = config_repo_ver
all_plays[play]['configuration_secure_ref'] = config_secure_ver
# Set amis to None for all envs of this deployment
all_plays[play]['amis'] = {}
for env in config['deployments'][args.deployment]:
# Check the AMIs collection to see if an ami already exist
# for this configuration.
potential_ami = ami_for(db, env,
args.deployment,
play, config_repo_ver,
config_secure_ver,
ref)
if potential_ami:
all_plays[play]['amis'][env] = potential_ami['_id']
else:
all_plays[play]['amis'][env] = None
release['plays'] = all_plays
release_coll.insert(release)
# All plays that need new AMIs have been updated.
notify_abbey(config['abbey_url'], config['abbey_token'], args.deployment, all_plays, args.release_id)
def ami_for(db, env, deployment, play, configuration,
configuration_secure, ansible_vars):
ami_signature = {
'env': env,
'deployment': deployment,
'play': play,
'configuration_ref': configuration,
'configuration_secure_ref': configuration_secure,
'vars': ansible_vars,
}
return db.amis.find_one(ami_signature)
import requests
def notify_abbey(abbey_url, abbey_token, deployment, all_plays, release_id):
for play_name, play in all_plays.items():
for env, ami in play['amis'].items():
if ami is None:
params = []
params.append({ 'name': 'play', 'value': play_name})
params.append({ 'name': 'deployment', 'value': deployment})
params.append({ 'name': 'environment', 'value': env})
params.append({ 'name': 'vars', 'value': yaml.dump(play['vars'], default_flow_style=False)})
params.append({ 'name': 'release_id', 'value': release_id})
build_params = {'parameter': params}
log.info("Need ami for {}".format(pformat(build_params)))
r = requests.post(abbey_url,
data={"token": abbey_token},
params={"json": json.dumps(build_params)})
log.info("Sent request got {}".format(r))
if r.status_code != 201:
# Something went wrong.
msg = "Failed to submit request with params: {}"
raise Exception(msg.format(pformat(build_params)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Prepare a new release.")
parser.add_argument('-c', '--config', required=True, help="Configuration for deploys")
parser.add_argument('-u', '--user', required=True, help="User staging the release.")
msg = "The deployment to build for eg. edx, edge, loadtest"
parser.add_argument('-d', '--deployment', required=True, help=msg)
parser.add_argument('-r', '--release-id', required=True, help="Id of Release.")
parser.add_argument('REPOS', nargs='+',
help="Any number of var=value(no spcae around '='" + \
" e.g. 'edxapp=3233bac xqueue=92832ab'")
args = parser.parse_args()
log.debug(args)
prepare_release(args)
#!/usr/bin/env python -u
import sys
from argparse import ArgumentParser
import time
import json
try:
import boto.ec2
import boto.sqs
from boto.vpc import VPCConnection
from boto.exception import NoAuthHandlerFound
from boto.sqs.message import RawMessage
except ImportError:
print "boto required for script"
sys.exit(1)
AMI_TIMEOUT = 600 # time to wait for AMIs to complete
EC2_RUN_TIMEOUT = 180 # time to wait for ec2 state transition
EC2_STATUS_TIMEOUT = 300 # time to wait for ec2 system status checks
NUM_TASKS = 5 # number of tasks for time summary report
class Unbuffered:
"""
For unbuffered output, not
needed if PYTHONUNBUFFERED is set
"""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
def parse_args():
parser = ArgumentParser()
parser.add_argument('--noop', action='store_true',
help="don't actually run the cmds",
default=False)
parser.add_argument('--secure-vars', required=False,
metavar="SECURE_VAR_FILE",
help="path to secure-vars, defaults to "
"../../../configuration-secure/ansible/"
"vars/DEPLOYMENT/ENVIRONMENT.yml")
parser.add_argument('--stack-name',
help="defaults to DEPLOYMENT-ENVIRONMENT",
metavar="STACK_NAME",
required=False)
parser.add_argument('-p', '--play',
help='play name without the yml extension',
metavar="PLAY", required=True)
parser.add_argument('-d', '--deployment', metavar="DEPLOYMENT",
required=True)
parser.add_argument('-e', '--environment', metavar="ENVIRONMENT",
required=True)
parser.add_argument('-v', '--verbose', action='store_true',
help="turn on verbosity")
parser.add_argument('--no-cleanup', action='store_true',
help="don't cleanup on failures")
parser.add_argument('--vars', metavar="EXTRA_VAR_FILE",
help="path to extra var file", required=False)
parser.add_argument('-a', '--application', required=False,
help="Application for subnet, defaults to admin",
default="admin")
parser.add_argument('--configuration-version', required=False,
help="configuration repo version",
default="master")
parser.add_argument('--configuration-secure-version', required=False,
help="configuration-secure repo version",
default="master")
parser.add_argument('-j', '--jenkins-build', required=False,
help="jenkins build number to update")
parser.add_argument('-b', '--base-ami', required=False,
help="ami to use as a base ami",
default="ami-0568456c")
parser.add_argument('-i', '--identity', required=False,
help="path to identity file for pulling "
"down configuration-secure",
default=None)
parser.add_argument('-r', '--region', required=False,
default="us-east-1",
help="aws region")
parser.add_argument('-k', '--keypair', required=False,
default="deployment",
help="AWS keypair to use for instance")
parser.add_argument('-t', '--instance-type', required=False,
default="m1.large",
help="instance type to launch")
parser.add_argument("--security-group", required=False,
default="abbey", help="Security group to use")
parser.add_argument("--role-name", required=False,
default="abbey",
help="IAM role name to use (must exist)")
parser.add_argument("--msg-delay", required=False,
default=5,
help="How long to delay message display from sqs "
"to ensure ordering")
return parser.parse_args()
def create_instance_args():
"""
Looks up security group, subnet
and returns arguments to pass into
ec2.run_instances() including
user data
"""
security_group_id = None
grp_details = ec2.get_all_security_groups()
for grp in grp_details:
if grp.name == args.security_group:
security_group_id = grp.id
break
if not security_group_id:
print "Unable to lookup id for security group {}".format(
args.security_group)
sys.exit(1)
vpc = VPCConnection()
subnet = vpc.get_all_subnets(
filters={
'tag:aws:cloudformation:stack-name': stack_name,
'tag:Application': args.application}
)
if len(subnet) != 1:
sys.stderr.write("ERROR: Expected 1 admin subnet, got {}\n".format(
len(subnet)))
sys.exit(1)
subnet_id = subnet[0].id
if args.identity:
config_secure = 'true'
with open(args.identity) as f:
identity_file = f.read()
else:
config_secure = 'false'
identity_file = "dummy"
user_data = """#!/bin/bash
set -x
set -e
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
base_dir="/var/tmp/edx-cfg"
extra_vars="$base_dir/extra-vars-$$.yml"
secure_identity="$base_dir/secure-identity"
git_ssh="$base_dir/git_ssh.sh"
configuration_version="{configuration_version}"
configuration_secure_version="{configuration_secure_version}"
environment="{environment}"
deployment="{deployment}"
play="{play}"
config_secure={config_secure}
secure_vars_file="$base_dir/configuration-secure\\
/ansible/vars/$environment/$environment-$deployment.yml"
instance_id=\\
$(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null)
instance_ip=\\
$(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null)
instance_type=\\
$(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null)
playbook_dir="$base_dir/configuration/playbooks/edx-east"
git_repo="https://github.com/edx/configuration"
git_repo_secure="git@github.com:edx/configuration-secure"
if $config_secure; then
git_cmd="env GIT_SSH=$git_ssh git"
else
git_cmd="git"
fi
ANSIBLE_ENABLE_SQS=true
SQS_NAME={queue_name}
SQS_REGION=us-east-1
SQS_MSG_PREFIX="[ $instance_id $instance_ip $environment-$deployment $play ]"
PYTHONUNBUFFERED=1
# environment for ansible
export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED
if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then
echo "Installing pkg dependencies"
/usr/bin/apt-get update
/usr/bin/apt-get install -y git python-pip python-apt \\
git-core build-essential python-dev libxml2-dev \\
libxslt-dev curl --force-yes
fi
rm -rf $base_dir
mkdir -p $base_dir
cd $base_dir
cat << EOF > $git_ssh
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i "$secure_identity" "\$@"
EOF
chmod 755 $git_ssh
if $config_secure; then
cat << EOF > $secure_identity
{identity_file}
EOF
fi
cat << EOF >> $extra_vars
{extra_vars_yml}
secure_vars: $secure_vars_file
EOF
chmod 400 $secure_identity
$git_cmd clone -b $configuration_version $git_repo
if $config_secure; then
$git_cmd clone -b $configuration_secure_version \\
$git_repo_secure
fi
cd $base_dir/configuration
sudo pip install -r requirements.txt
cd $playbook_dir
ansible-playbook -vvvv -c local -i "localhost," $play.yml -e@$extra_vars
rm -rf $base_dir
""".format(
configuration_version=args.configuration_version,
configuration_secure_version=args.configuration_secure_version,
environment=args.environment,
deployment=args.deployment,
play=args.play,
config_secure=config_secure,
identity_file=identity_file,
queue_name=run_id,
extra_vars_yml=extra_vars_yml)
ec2_args = {
'security_group_ids': [security_group_id],
'subnet_id': subnet_id,
'key_name': args.keypair,
'image_id': args.base_ami,
'instance_type': args.instance_type,
'instance_profile_name': args.role_name,
'user_data': user_data,
}
return ec2_args
def poll_sqs_ansible():
"""
Prints events to the console and
blocks until a final STATS ansible
event is read off of SQS.
SQS does not guarantee FIFO, for that
reason there is a buffer that will delay
messages before they are printed to the
console.
Returns length of the ansible run.
"""
oldest_msg_ts = 0
buf = []
task_report = [] # list of tasks for reporting
last_task = None
while True:
messages = []
while True:
# get all available messages on the queue
msgs = sqs_queue.get_messages(attributes='All')
if not msgs:
break
messages.extend(msgs)
for message in messages:
recv_ts = float(
message.attributes['ApproximateFirstReceiveTimestamp']) * .001
sent_ts = float(message.attributes['SentTimestamp']) * .001
try:
msg_info = {
'msg': json.loads(message.get_body()),
'sent_ts': sent_ts,
'recv_ts': recv_ts,
}
buf.append(msg_info)
except ValueError as e:
print "!!! ERROR !!! unable to parse queue message, " \
"expecting valid json: {} : {}".format(
message.get_body(), e)
if not oldest_msg_ts or recv_ts < oldest_msg_ts:
oldest_msg_ts = recv_ts
sqs_queue.delete_message(message)
now = int(time.time())
if buf:
if (now - max([msg['recv_ts'] for msg in buf])) > args.msg_delay:
# sort by TS instead of recv_ts
# because the sqs timestamp is not as
# accurate
buf.sort(key=lambda k: k['msg']['TS'])
to_disp = buf.pop(0)
if 'START' in to_disp['msg']:
print '\n{:0>2.0f}:{:0>5.2f} {} : Starting "{}"'.format(
to_disp['msg']['TS'] / 60,
to_disp['msg']['TS'] % 60,
to_disp['msg']['PREFIX'],
to_disp['msg']['START']),
elif 'TASK' in to_disp['msg']:
print "\n{:0>2.0f}:{:0>5.2f} {} : {}".format(
to_disp['msg']['TS'] / 60,
to_disp['msg']['TS'] % 60,
to_disp['msg']['PREFIX'],
to_disp['msg']['TASK']),
last_task = to_disp['msg']['TASK']
elif 'OK' in to_disp['msg']:
if args.verbose:
print "\n"
for key, value in to_disp['msg']['OK'].iteritems():
print " {:<15}{}".format(key, value)
else:
if to_disp['msg']['OK']['changed']:
changed = "*OK*"
else:
changed = "OK"
print " {}".format(changed),
task_report.append({
'TASK': last_task,
'INVOCATION': to_disp['msg']['OK']['invocation'],
'DELTA': to_disp['msg']['delta'],
})
elif 'FAILURE' in to_disp['msg']:
print " !!!! FAILURE !!!!",
for key, value in to_disp['msg']['FAILURE'].iteritems():
print " {:<15}{}".format(key, value)
raise Exception("Failed Ansible run")
elif 'STATS' in to_disp['msg']:
print "\n{:0>2.0f}:{:0>5.2f} {} : COMPLETE".format(
to_disp['msg']['TS'] / 60,
to_disp['msg']['TS'] % 60,
to_disp['msg']['PREFIX'])
return (to_disp['msg']['TS'], task_report)
if not messages:
# wait 1 second between sqs polls
time.sleep(1)
def create_ami(instance_id, name, description):
params = {'instance_id': instance_id,
'name': name,
'description': description,
'no_reboot': True}
image_id = ec2.create_image(**params)
for _ in xrange(AMI_TIMEOUT):
try:
img = ec2.get_image(image_id)
if img.state == 'available':
break
else:
time.sleep(1)
except boto.exception.EC2ResponseError as e:
if e.error_code == 'InvalidAMIID.NotFound':
time.sleep(1)
else:
raise Exception("Unexpected error code: {}".format(
e.error_code))
time.sleep(1)
else:
raise Exception("Timeout waiting for AMI to finish")
return image_id
if __name__ == '__main__':
args = parse_args()
run_summary = []
start_time = time.time()
if args.vars:
with open(args.vars) as f:
extra_vars_yml = f.read()
else:
extra_vars_yml = "---\n"
if args.secure_vars:
secure_vars = args.secure_vars
else:
secure_vars = "../../../configuration-secure/" \
"ansible/vars/{}/{}.yml".format(
args.deployment, args.environment)
if args.stack_name:
stack_name = args.stack_name
else:
stack_name = "{}-{}".format(args.environment, args.deployment)
try:
sqs = boto.sqs.connect_to_region(args.region)
ec2 = boto.ec2.connect_to_region(args.region)
except NoAuthHandlerFound:
print 'You must be able to connect to sqs and ec2 to use this script'
sys.exit(1)
try:
sqs_queue = None
instance_id = None
run_id = "abbey-{}-{}-{}".format(
args.environment, args.deployment, int(time.time() * 100))
ec2_args = create_instance_args()
print "{:<40}".format(
"Creating SQS queue and launching instance for {}:".format(run_id))
print
for k, v in ec2_args.iteritems():
if k != 'user_data':
print " {:<25}{}".format(k, v)
print
sqs_queue = sqs.create_queue(run_id)
sqs_queue.set_message_class(RawMessage)
res = ec2.run_instances(**ec2_args)
inst = res.instances[0]
instance_id = inst.id
print "{:<40}".format("Waiting for running status:"),
status_start = time.time()
for _ in xrange(EC2_RUN_TIMEOUT):
res = ec2.get_all_instances(instance_ids=[instance_id])
if res[0].instances[0].state == 'running':
status_delta = time.time() - status_start
run_summary.append(('EC2 Launch', status_delta))
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
status_delta / 60,
status_delta % 60)
break
else:
time.sleep(1)
else:
raise Exception("Timeout waiting for running status: {} ".format(
instance_id))
print "{:<40}".format("Waiting for system status:"),
system_start = time.time()
for _ in xrange(EC2_STATUS_TIMEOUT):
status = ec2.get_all_instance_status(inst.id)
if status[0].system_status.status == u'ok':
system_delta = time.time() - system_start
run_summary.append(('EC2 Status Checks', system_delta))
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
system_delta / 60,
system_delta % 60)
break
else:
time.sleep(1)
else:
raise Exception("Timeout waiting for status checks: {} ".format(
instance_id))
user_start = time.time()
print
print "{:<40}".format(
"Waiting for user-data, polling sqs for Ansible events:")
(ansible_delta, task_report) = poll_sqs_ansible()
user_pre_ansible = time.time() - user_start - ansible_delta
run_summary.append(('Ansible run', ansible_delta))
print
print "{} longest Ansible tasks (seconds):".format(NUM_TASKS)
for task in sorted(
task_report, reverse=True,
key=lambda k: k['DELTA'])[:NUM_TASKS]:
print "{:0>3.0f} {}".format(task['DELTA'], task['TASK'])
print " - {}".format(task['INVOCATION'])
print
print "{:<40}".format("Creating AMI:"),
ami_start = time.time()
ami = create_ami(instance_id, run_id, run_id)
ami_delta = time.time() - ami_start
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
ami_delta / 60,
ami_delta % 60)
run_summary.append(('AMI Build', ami_delta))
total_time = time.time() - start_time
all_stages = sum(run[1] for run in run_summary)
if total_time - all_stages > 0:
run_summary.append(('Other', total_time - all_stages))
run_summary.append(('Total', total_time))
print
print "Summary:\n"
for run in run_summary:
print "{:<30} {:0>2.0f}:{:0>5.2f}".format(
run[0], run[1] / 60, run[1] % 60)
print "AMI: {}".format(ami)
finally:
print
if not args.no_cleanup:
if sqs_queue:
print "Cleaning up - Removing SQS queue - {}".format(run_id)
sqs.delete_queue(sqs_queue)
if instance_id:
print "Cleaning up - Terminating instance ID - {}".format(
instance_id)
ec2.terminate_instances(instance_ids=[instance_id])
import argparse
import boto
import yaml
from os.path import basename
from time import sleep
from pprint import pprint
FAILURE_STATES = [
'CREATE_FAILED',
......@@ -28,28 +31,39 @@ def upload_file(file_path, bucket_name, key_name):
key.key = key_name
key.set_contents_from_filename(file_path)
url = 'https://s3.amazonaws.com/{}/{}'.format(bucket_name, key_name)
key.set_acl('public-read')
url = key.generate_url(300, query_auth=False)
return url
def create_stack(stack_name, template, region='us-east-1', blocking=True, temp_bucket='edx-sandbox-devops'):
def create_stack(stack_name, template, region='us-east-1', blocking=True,
temp_bucket='edx-sandbox-devops', parameters=[],
update=False):
cfn = boto.connect_cloudformation()
# Upload the template to s3
key_name = 'cloudformation/auto/{}_{}'.format(stack_name, basename(template))
key_pattern = 'devops/cloudformation/auto/{}_{}'
key_name = key_pattern.format(stack_name, basename(template))
template_url = upload_file(template, temp_bucket, key_name)
# Reference the stack.
try:
if update:
stack_id = cfn.update_stack(stack_name,
template_url=template_url,
capabilities=['CAPABILITY_IAM'],
tags={'autostack':'true'},
parameters=parameters)
else:
stack_id = cfn.create_stack(stack_name,
template_url=template_url,
capabilities=['CAPABILITY_IAM'],
tags={'autostack':'true'},
parameters=[('KeyName', 'continuous-integration')])
parameters=parameters)
except Exception as e:
print(e.message)
raise e
status = None
while blocking:
sleep(5)
......@@ -65,6 +79,9 @@ def create_stack(stack_name, template, region='us-east-1', blocking=True, temp_b
return stack_id
def cfn_params_from(filename):
params_dict = yaml.safe_load(open(filename))
return [ (key,value) for key,value in params_dict.items() ]
if __name__ == '__main__':
description = 'Create a cloudformation stack from a template.'
......@@ -73,6 +90,9 @@ if __name__ == '__main__':
msg = 'Name for the cloudformation stack.'
parser.add_argument('-n', '--stackname', required=True, help=msg)
msg = 'Pass this argument if we are updating an existing stack.'
parser.add_argument('-u', '--update', action='store_true')
msg = 'Name of the bucket to use for temporarily uploading the \
template.'
parser.add_argument('-b', '--bucketname', default="edx-sandbox-devops",
......@@ -84,11 +104,16 @@ if __name__ == '__main__':
msg = 'The AWS region to build this stack in.'
parser.add_argument('-r', '--region', default='us-east-1', help=msg)
msg = 'YAML file containing stack build parameters'
parser.add_argument('-p', '--parameters', help=msg)
args = parser.parse_args()
stack_name = args.stackname
template = args.template
region = args.region
bucket_name = args.bucketname
parameters = cfn_params_from(args.parameters)
update = args.update
create_stack(stack_name, template, region, bucket_name)
create_stack(stack_name, template, region, temp_bucket=bucket_name, parameters=parameters, update=update)
print('Stack({}) created.'.format(stack_name))
......@@ -104,6 +104,9 @@ if __name__ == "__main__":
parser.add_argument('-n', '--stackname',
help="The name of the cloudformation stack.",
required=True)
parser.add_argument('-z', '--parent-zone',
help="The parent zone under which the dns for this vpc resides.")
args = parser.parse_args()
stack_name = args.stackname
......@@ -118,7 +121,10 @@ if __name__ == "__main__":
}
# Create a zone for the stack.
zone_name = "{}.vpc.edx.org".format(stack_name)
parent_zone = 'vpc.edx.org'
if args.parent_zone:
parent_zone = args.parent_zone
zone_name = "{}.{}".format(stack_name, parent_zone)
zone = get_or_create_hosted_zone(zone_name)
......
Vagrant
=======
Vagrant instances for local development and testing.
- Vagrant stacks in ``base`` create new base boxes from scratch.
- Vagrant stacks in ``release`` download a base box with most requirements already installed. The instances then update themselves with the latest versions of the application code.
If you are a developer or designer, you should use the ``release`` stacks.
There are two versions of the stack:
- ``fullstack`` is a production-like configuration running all the services on a single server. https://github.com/edx/configuration/wiki/edX-Production-Stack
- ``devstack`` is designed for local development. Although it uses the same system requirements as in production, it simplifies certain settings to make development more convenient. https://github.com/edx/configuration/wiki/edX-Developer-Stack
......@@ -3,18 +3,20 @@ CPU_COUNT = 2
edx_platform_mount_dir = "edx-platform"
forum_mount_dir = "cs_comments_service"
ora_mount_dir = "ora"
if ENV['VAGRANT_MOUNT_BASE']
edx_platform_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + forum_mount_dir
forum_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + edx_platform_mount_dir
edx_platform_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + edx_platform_mount_dir
forum_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + forum_mount_dir
ora_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + ora_mount_dir
end
Vagrant.configure("2") do |config|
# Creates a devstack from a base Ubuntu 12.04 image
# Creates a devstack from a base Ubuntu 12.04 image
config.vm.box = "precise64"
config.vm.box_url = "http://files.vagrantup.com/precise64.box"
......@@ -25,17 +27,27 @@ Vagrant.configure("2") do |config|
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform", :create => true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service", :create => true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora", :create => true, nfs: true
config.hostsupdater.aliases = ["preview.localhost"]
# Enable X11 forwarding so we can interact with GUI applications
if ENV['VAGRANT_X11']
config.ssh.forward_x11 = true
end
config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s]
vb.customize ["modifyvm", :id, "--cpus", CPU_COUNT.to_s]
# Allow DNS to work for Ubuntu 12.10 host
# http://askubuntu.com/questions/238040/how-do-i-fix-name-service-for-vagrant-client
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end
config.vm.provision :ansible do |ansible|
ansible.playbook = "../../playbooks/vagrant-devstack.yml"
ansible.inventory_path = "../../playbooks/vagrant/inventory.ini"
ansible.playbook = "../../../playbooks/vagrant-devstack.yml"
ansible.inventory_path = "../../../playbooks/vagrant/inventory.ini"
ansible.verbose = "extra"
end
end
../../../playbooks/ansible.cfg
\ No newline at end of file
......@@ -12,12 +12,16 @@ Vagrant.configure("2") do |config|
# You can adjust this to the amount of CPUs your system has available
vb.customize ["modifyvm", :id, "--cpus", CPU_COUNT.to_s]
# Allow DNS to work for Ubuntu 12.10 host
# http://askubuntu.com/questions/238040/how-do-i-fix-name-service-for-vagrant-client
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end
config.vm.provision :ansible do |ansible|
# point Vagrant at the location of your playbook you want to run
ansible.playbook = "../../playbooks/vagrant-fullstack.yml"
ansible.inventory_path = "../../playbooks/vagrant/inventory.ini"
ansible.playbook = "../../../playbooks/vagrant-fullstack.yml"
ansible.inventory_path = "../../../playbooks/vagrant/inventory.ini"
ansible.verbose = "extra"
end
end
../../../playbooks/ansible.cfg
\ No newline at end of file
devstack
========
Vagrant instance for local development.
Overview
--------
``devstack`` is a Vagrant instance designed for local development. The instance:
- Uses the same system requirements as production. This allows developers to discover and fix system configuration issues early in development.
- Simplifies certain production settings to make development more convenient. For example, it disables ``nginx`` and ``gunicorn`` in favor of ``runserver`` for Django development.
The ``devstack`` instance is designed to run code and tests, but you can do most development in the host environment:
- Git repositories are shared with the host system, so you can use your preferred text editor/IDE.
- You can load pages served by the running Vagrant instance.
Wiki documation - https://github.com/edx/configuration/wiki/edX-Developer-Stack
../../playbooks/ansible.cfg
\ No newline at end of file
devstack
========
Vagrant instance for a production like configuration running all services on a single server
Installation instructions - https://github.com/edx/configuration/wiki#installing-edx-using-a-pre-built-vagrant-fullstack-image
../../playbooks/ansible.cfg
\ No newline at end of file
MEMORY = 2048
CPU_COUNT = 2
$script = <<SCRIPT
if [ ! -d /edx/app/edx_ansible ]; then
echo "Error: Base box is missing provisioning scripts." 1>&2
exit 1
fi
export PYTHONUNBUFFERED=1
source /edx/app/edx_ansible/venvs/edx_ansible/bin/activate
cd /edx/app/edx_ansible/edx_ansible/playbooks
# Need to ensure that the configuration repo is updated
# The vagrant-devstack.yml playbook will also do this, but only
# after loading the playbooks into memory. If these are out of date,
# this can cause problems (e.g. looking for templates that no longer exist).
/edx/bin/update configuration master
ansible-playbook -i localhost, -c local vagrant-devstack.yml
SCRIPT
edx_platform_mount_dir = "edx-platform"
forum_mount_dir = "cs_comments_service"
ora_mount_dir = "ora"
if ENV['VAGRANT_MOUNT_BASE']
edx_platform_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + edx_platform_mount_dir
forum_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + forum_mount_dir
ora_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + ora_mount_dir
end
Vagrant.configure("2") do |config|
# Creates an edX devstack VM from an official release
config.vm.box = "empanada-devstack"
config.vm.box_url = "http://edx-static.s3.amazonaws.com/vagrant-images/20131219-empanada-devstack.box"
config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000
config.vm.network :forwarded_port, guest: 8001, host: 8001
config.vm.network :forwarded_port, guest: 4567, host: 4567
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform", :create => true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service", :create => true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora", :create => true, nfs: true
config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s]
vb.customize ["modifyvm", :id, "--cpus", CPU_COUNT.to_s]
# Allow DNS to work for Ubuntu 12.10 host
# http://askubuntu.com/questions/238040/how-do-i-fix-name-service-for-vagrant-client
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end
# Assume that the base box has the edx_ansible role installed
# We can then tell the Vagrant instance to update itself.
config.vm.provision "shell", inline: $script
end
MEMORY = 2048
CPU_COUNT = 2
edx_platform_mount_dir = "edx-platform"
forum_mount_dir = "cs_comments_service"
if ENV['VAGRANT_MOUNT_BASE']
edx_platform_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + forum_mount_dir
forum_mount_dir = ENV['VAGRANT_MOUNT_BASE'] + "/" + edx_platform_mount_dir
end
Vagrant.configure("2") do |config|
config.vm.box = "edx-devstack"
config.vm.box_url = "http://edx-static.s3.amazonaws.com/vagrant-images/20131113-dosa-devstack.box"
config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000
config.vm.network :forwarded_port, guest: 8001, host: 8001
config.vm.network :forwarded_port, guest: 4567, host: 4567
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform", :create => true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service", :create => true, nfs: true
config.hostsupdater.aliases = ["preview.localhost"]
config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s]
vb.customize ["modifyvm", :id, "--cpus", CPU_COUNT.to_s]
end
edxapp_version = "master"
forum_version = "master"
config.vm.provision :ansible do |ansible|
ansible.playbook = "../../../../playbooks/vagrant-devstack.yml"
ansible.inventory_path = "../../../../playbooks/vagrant/inventory.ini"
ansible.tags = "deploy"
ansible.verbose = "extra"
end
end
../../../../playbooks/ansible.cfg
\ No newline at end of file
MEMORY = 2048
CPU_COUNT = 2
Vagrant.configure("2") do |config|
config.vm.box = "edx-fullstack"
config.vm.box_url = "http://edx-static.s3.amazonaws.com/vagrant-images/20131113-dosa-fullstack.box"
config.vm.network :private_network, ip: "192.168.33.10"
config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s]
# You can adjust this to the amount of CPUs your system has available
vb.customize ["modifyvm", :id, "--cpus", CPU_COUNT.to_s]
end
config.vm.provision :ansible do |ansible|
# point Vagrant at the location of your playbook you want to run
ansible.playbook = "../../../../playbooks/vagrant-fullstack.yml"
ansible.inventory_path = "../../../../playbooks/vagrant/inventory.ini"
ansible.verbose = "extra"
end
end
../../../../playbooks/ansible.cfg
\ No newline at end of file
MEMORY = 2048
CPU_COUNT = 2
Vagrant.configure("2") do |config|
# Creates an edX fullstack VM from an official release
config.vm.box = "empanada"
config.vm.box_url = "http://edx-static.s3.amazonaws.com/vagrant-images/20131218-empanada-fullstack.box"
config.vm.network :private_network, ip: "192.168.33.10"
config.hostsupdater.aliases = ["preview.localhost"]
config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s]
vb.customize ["modifyvm", :id, "--cpus", CPU_COUNT.to_s]
# Allow DNS to work for Ubuntu 12.10 host
# http://askubuntu.com/questions/238040/how-do-i-fix-name-service-for-vagrant-client
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end
end
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment