Commit 690038ac by John Jarvis

Merge pull request #319 from edx/jarv/provision

Jarv/provision
parents 4cb25800 7edc4201
...@@ -6,3 +6,4 @@ ...@@ -6,3 +6,4 @@
jinja2_extensions=jinja2.ext.do jinja2_extensions=jinja2.ext.do
hash_behaviour=merge hash_behaviour=merge
host_key_checking = False
- name: Configure instance(s)
hosts: all
sudo: True
gather_facts: True
vars:
openid_workaround: True
roles:
- edxapp
- name: Configure instance(s) - name: Configure instance(s)
hosts: tag_Name_edx-continuous-integration hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars: vars:
......
- name: Create ec2 instance
hosts: localhost
connection: local
gather_facts: False
roles:
- role: launch_ec2
keypair: "{{ keypair }}"
instance_type: "{{ instance_type }}"
security_group: "{{ security_group }}"
ami_image: "{{ ami }}"
region: "{{ region }}"
instance_tags: "{{ instance_tags }}"
root_ebs_size: "{{ root_ebs_size }}"
dns_name: "{{ dns_name }}"
dns_zone: "{{ dns_zone }}"
- name: Configure instance(s)
hosts: launched
sudo: True
gather_facts: True
roles:
# gh_users hash must be passed
# in as a -e variable
- gh_users
../inventory.ini
\ No newline at end of file
../library
\ No newline at end of file
...@@ -2,8 +2,9 @@ ...@@ -2,8 +2,9 @@
# This should only have variables # This should only have variables
# that are applicable to all edX roles # that are applicable to all edX roles
storage_base_dir: /mnt
app_base_dir: /opt/wwc app_base_dir: /opt/wwc
log_base_dir: /mnt/logs log_base_dir: "{{ storage_base_dir }}/logs"
venv_dir: /opt/edx venv_dir: /opt/edx
os_name: ubuntu os_name: ubuntu
......
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2
short_description: create or terminate an instance in ec2, return instanceid
description:
- Creates or terminates ec2 instances. When created optionally waits for it to be 'running'. This module has a dependency on python-boto >= 2.5
version_added: "0.9"
options:
key_name:
description:
- key pair to use on the instance
required: true
default: null
aliases: ['keypair']
id:
description:
- identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
required: false
default: null
aliases: []
group:
description:
- security group (or list of groups) to use with the instance
required: false
default: null
aliases: [ 'groups' ]
group_id:
version_added: "1.1"
description:
- security group id (or list of ids) to use with the instance
required: false
default: null
aliases: []
region:
version_added: "1.2"
description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
zone:
version_added: "1.2"
description:
- AWS availability zone in which to launch the instance
required: false
default: null
aliases: [ 'aws_zone', 'ec2_zone' ]
instance_type:
description:
- instance type to use for the instance
required: true
default: null
aliases: []
image:
description:
- I(emi) (or I(ami)) to use for the instance
required: true
default: null
aliases: []
kernel:
description:
- kernel I(eki) to use for the instance
required: false
default: null
aliases: []
ramdisk:
description:
- ramdisk I(eri) to use for the instance
required: false
default: null
aliases: []
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
ec2_url:
description:
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
required: false
default: null
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
count:
description:
- number of instances to launch
required: False
default: 1
aliases: []
monitor:
version_added: "1.1"
description:
- enable detailed monitoring (CloudWatch) for instance
required: false
default: null
aliases: []
user_data:
version_added: "0.9"
description:
- opaque blob of data which is made available to the ec2 instance
required: false
default: null
aliases: []
instance_tags:
version_added: "1.0"
description:
- a hash/dictionary of tags to add to the new instance; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
aliases: []
placement_group:
version_added: "1.3"
description:
- placement group for the instance when using EC2 Clustered Compute
required: false
default: null
aliases: []
vpc_subnet_id:
version_added: "1.1"
description:
- the subnet ID in which to launch the instance (VPC)
required: false
default: null
aliases: []
private_ip:
version_added: "1.2"
description:
- the private ip address to assign the instance (from the vpc subnet)
required: false
defualt: null
aliases: []
instance_profile_name:
version_added: "1.3"
description:
- Name of the IAM instance profile to use. Boto library must be 2.5.0+
required: false
default: null
aliases: []
instance_ids:
version_added: "1.3"
description:
- list of instance ids, currently only used when state='absent'
required: false
default: null
aliases: []
state:
version_added: "1.3"
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
root_ebs_size:
version_added: "1.4"
desription:
- size of the root volume in gigabytes
required: false
default: null
aliases: []
requirements: [ "boto" ]
author: Seth Vidal, Tim Gerla, Lester Wade, John Jarvis
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example
- local_action:
module: ec2
keypair: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
group: webserver
count: 3
# Basic provisioning example with setting the root volume size to 50GB
- local_action:
module: ec2
keypair: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
group: webserver
count: 3
root_ebs_size: 50
# Advanced example with tagging and CloudWatch
- local_action:
module: ec2
keypair: mykey
group: databases
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
instance_tags: '{"db":"postgres"}' monitoring=yes'
# Multiple groups example
local_action:
module: ec2
keypair: mykey
group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
instance_tags: '{"db":"postgres"}' monitoring=yes'
# VPC example
- local_action:
module: ec2
keypair: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245
# Launch instances, runs some tasks
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
gather_facts: False
vars:
keypair: my_keypair
instance_type: m1.small
security_group: my_securitygroup
image: my_ami_id
region: us-east-1
tasks:
- name: Launch instance
local_action: ec2 keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image }} wait=true region={{ region }}
register: ec2
- name: Add new instance to host group
local_action: add_host hostname={{ item.public_ip }} groupname=launched
with_items: ec2.instances
- name: Wait for SSH to come up
local_action: wait_for host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started
with_items: ec2.instances
- name: Configure instance(s)
hosts: launched
sudo: True
gather_facts: True
roles:
- my_awesome_role
- my_awesome_test
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
local_action:
module: ec2
state: 'absent'
instance_ids: {{ec2.instance_ids}}
'''
import sys
import time
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
try:
import boto.ec2
from boto.exception import EC2ResponseError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def get_instance_info(inst):
"""
Retrieves instance information from an instance
ID and returns it as a dictionary
"""
instance_info = {'id': inst.id,
'ami_launch_index': inst.ami_launch_index,
'private_ip': inst.private_ip_address,
'private_dns_name': inst.private_dns_name,
'public_ip': inst.ip_address,
'dns_name': inst.dns_name,
'public_dns_name': inst.public_dns_name,
'state_code': inst.state_code,
'architecture': inst.architecture,
'image_id': inst.image_id,
'key_name': inst.key_name,
'placement': inst.placement,
'kernel': inst.kernel,
'ramdisk': inst.ramdisk,
'launch_time': inst.launch_time,
'instance_type': inst.instance_type,
'root_device_type': inst.root_device_type,
'root_device_name': inst.root_device_name,
'state': inst.state,
'hypervisor': inst.hypervisor}
try:
instance_info['virtualization_type'] = getattr(inst,'virtualization_type')
except AttributeError:
instance_info['virtualization_type'] = None
return instance_info
def boto_supports_profile_name_arg(ec2):
"""
Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accept instance_profile_name argument, else false
"""
run_instances_method = getattr(ec2, 'run_instances')
return 'instance_profile_name' in run_instances_method.func_code.co_varnames
def create_instances(module, ec2):
"""
Creates new instances
module : AnsibleModule object
ec2: authenticated ec2 connection object
Returns:
A list of dictionaries with instance information
about the instances that were launched
"""
key_name = module.params.get('key_name')
id = module.params.get('id')
group_name = module.params.get('group')
group_id = module.params.get('group_id')
zone = module.params.get('zone')
instance_type = module.params.get('instance_type')
image = module.params.get('image')
count = module.params.get('count')
monitoring = module.params.get('monitoring')
kernel = module.params.get('kernel')
ramdisk = module.params.get('ramdisk')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
placement_group = module.params.get('placement_group')
user_data = module.params.get('user_data')
instance_tags = module.params.get('instance_tags')
vpc_subnet_id = module.params.get('vpc_subnet_id')
private_ip = module.params.get('private_ip')
instance_profile_name = module.params.get('instance_profile_name')
root_ebs_size = module.params.get('root_ebs_size')
if root_ebs_size:
dev_sda1 = boto.ec2.blockdevicemapping.EBSBlockDeviceType()
dev_sda1.size = root_ebs_size
bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping()
bdm['/dev/sda1'] = dev_sda1
else:
bdm = None
# group_id and group_name are exclusive of each other
if group_id and group_name:
module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)"))
sys.exit(1)
try:
# Here we try to lookup the group id from the security group name - if group is set.
if group_name:
grp_details = ec2.get_all_security_groups()
if type(group_name) == list:
# FIXME: this should be a nice list comprehension
# also not py 2.4 compliant
group_id = list(filter(lambda grp: str(grp.id) if str(tmp) in str(grp) else None, grp_details) for tmp in group_name)
elif type(group_name) == str:
for grp in grp_details:
if str(group_name) in str(grp):
group_id = [str(grp.id)]
group_name = [group_name]
# Now we try to lookup the group id testing if group exists.
elif group_id:
#wrap the group_id in a list if it's not one already
if type(group_id) == str:
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id)
grp_item = grp_details[0]
group_name = [grp_item.name]
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
# Lookup any instances that much our run id.
running_instances = []
count_remaining = int(count)
if id != None:
filter_dict = {'client-token':id, 'instance-state-name' : 'running'}
previous_reservations = ec2.get_all_instances(None, filter_dict)
for res in previous_reservations:
for prev_instance in res.instances:
running_instances.append(prev_instance)
count_remaining = count_remaining - len(running_instances)
# Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
if count_remaining == 0:
changed = False
else:
changed = True
try:
params = {'image_id': image,
'key_name': key_name,
'client_token': id,
'min_count': count_remaining,
'max_count': count_remaining,
'monitoring_enabled': monitoring,
'placement': zone,
'placement_group': placement_group,
'instance_type': instance_type,
'kernel_id': kernel,
'ramdisk_id': ramdisk,
'subnet_id': vpc_subnet_id,
'private_ip_address': private_ip,
'user_data': user_data,
'block_device_map': bdm}
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
else:
if instance_profile_name is not None:
module.fail_json(
msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
if vpc_subnet_id:
params['security_group_ids'] = group_id
else:
params['security_groups'] = group_name
res = ec2.run_instances(**params)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
instids = [ i.id for i in res.instances ]
while True:
try:
res.connection.get_all_instances(instids)
break
except boto.exception.EC2ResponseError as e:
if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
# there's a race between start and get an instance
continue
else:
module.fail_json(msg = str(e))
if instance_tags:
try:
ec2.create_tags(instids, module.from_json(instance_tags))
except boto.exception.EC2ResponseError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# wait here until the instances are up
this_res = []
num_running = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_running < len(instids):
res_list = res.connection.get_all_instances(instids)
if len(res_list) > 0:
this_res = res_list[0]
num_running = len([ i for i in this_res.instances if i.state=='running' ])
else:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if wait and num_running < len(instids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
for inst in this_res.instances:
running_instances.append(inst)
instance_dict_array = []
created_instance_ids = []
for inst in running_instances:
d = get_instance_info(inst)
created_instance_ids.append(inst.id)
instance_dict_array.append(d)
return (instance_dict_array, created_instance_ids, changed)
def terminate_instances(module, ec2, instance_ids):
"""
Terminates a list of instances
module: Ansible module object
ec2: authenticated ec2 connection object
termination_list: a list of instances to terminate in the form of
[ {id: <inst-id>}, ..]
Returns a dictionary of instance information
about the instances terminated.
If the instance to be terminated is running
"changed" will be set to False.
"""
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
terminated_instance_ids = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state == 'running':
terminated_instance_ids.append(inst.id)
instance_dict_array.append(get_instance_info(inst))
try:
ec2.terminate_instances([inst.id])
except EC2ResponseError as e:
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True
return (changed, instance_dict_array, terminated_instance_ids)
def main():
module = AnsibleModule(
argument_spec = dict(
key_name = dict(aliases = ['keypair']),
id = dict(),
group = dict(type='list'),
group_id = dict(type='list'),
region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
zone = dict(aliases=['aws_zone', 'ec2_zone']),
instance_type = dict(aliases=['type']),
image = dict(),
kernel = dict(),
count = dict(default='1'),
monitoring = dict(choices=BOOLEANS, default=False),
ramdisk = dict(),
wait = dict(choices=BOOLEANS, default=False),
wait_timeout = dict(default=300),
ec2_url = dict(),
aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key = dict(aliases=['ec2_access_key', 'access_key']),
placement_group = dict(),
user_data = dict(),
instance_tags = dict(),
vpc_subnet_id = dict(),
private_ip = dict(),
instance_profile_name = dict(),
instance_ids = dict(type='list'),
state = dict(default='present'),
root_ebs_size = dict(default=None),
)
)
ec2_url = module.params.get('ec2_url')
aws_secret_key = module.params.get('aws_secret_key')
aws_access_key = module.params.get('aws_access_key')
region = module.params.get('region')
# allow eucarc environment variables to be used if ansible vars aren't set
if not ec2_url and 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not aws_secret_key:
if 'AWS_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
aws_secret_key = os.environ['EC2_SECRET_KEY']
if not aws_access_key:
if 'AWS_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
aws_access_key = os.environ['EC2_ACCESS_KEY']
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
# If we specified an ec2_url then try connecting to it
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
if module.params.get('state') == 'absent':
instance_ids = module.params.get('instance_ids')
if not isinstance(instance_ids, list):
module.fail_json(msg='termination_list needs to be a list of instances to terminate')
(changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('key_name'):
module.fail_json(msg='key_name parameter is required for new instance')
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2)
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
...@@ -10,6 +10,14 @@ ...@@ -10,6 +10,14 @@
- pre_install - pre_install
- update - update
- name: common | Create the base directory for storage
file: >
path={{ storage_base_dir }}
state=directory
owner=root
group=root
mode=0755
- name: common | Create application root - name: common | Create application root
# In the future consider making group edx r/t adm # In the future consider making group edx r/t adm
file: path={{ app_base_dir }} state=directory owner=root group=adm mode=2775 file: path={{ app_base_dir }} state=directory owner=root group=adm mode=2775
......
...@@ -154,7 +154,7 @@ generic_env_config: &edxapp_generic_env ...@@ -154,7 +154,7 @@ generic_env_config: &edxapp_generic_env
WIKI_ENABLED: true WIKI_ENABLED: true
SYSLOG_SERVER: $EDXAPP_SYSLOG_SERVER SYSLOG_SERVER: $EDXAPP_SYSLOG_SERVER
SITE_NAME: $EDXAPP_SITE_NAME SITE_NAME: $EDXAPP_SITE_NAME
LOG_DIR: '/mnt/logs/edx' LOG_DIR: "{{ storage_base_dir }}/logs/edx"
MEDIA_URL: $EDXAPP_MEDIA_URL MEDIA_URL: $EDXAPP_MEDIA_URL
ANALYTICS_SERVER_URL: $EDXAPP_ANALYTICS_SERVER_URL ANALYTICS_SERVER_URL: $EDXAPP_ANALYTICS_SERVER_URL
FEEDBACK_SUBMISSION_EMAIL: $EDXAPP_FEEDBACK_SUBMISSION_EMAIL FEEDBACK_SUBMISSION_EMAIL: $EDXAPP_FEEDBACK_SUBMISSION_EMAIL
...@@ -293,75 +293,32 @@ sandbox_post_requirements: "{{ edx_platform_code_dir }}/requirements/edx-sandbo ...@@ -293,75 +293,32 @@ sandbox_post_requirements: "{{ edx_platform_code_dir }}/requirements/edx-sandbo
install_sandbox_reqs_into_regular_venv: true install_sandbox_reqs_into_regular_venv: true
lms_debian_pkgs: lms_debian_pkgs:
- apparmor-utils # for compiling the virtualenv
- aspell # (only needed if wheel files aren't available)
- build-essential - build-essential
- curl - s3cmd
- dvipng - pkg-config
- fabric
- g++
- gcc
- gfortran
- ghostscript
- github-cli
- graphviz
- graphviz-dev - graphviz-dev
- gunicorn - graphviz
- inoticoming
- ipython
- libcrypt-ssleay-perl
- libcurl4-openssl-dev
- libdigest-sha-perl
- libfreetype6-dev
- libgeos-dev
- libgraphviz-dev
- libjpeg8-dev
- liblapack-dev
- liblwp-protocol-https-perl
- libmysqlclient-dev - libmysqlclient-dev
- libnet-amazon-ec2-perl # for scipy, do not install
- libpng12-dev # libopenblas-base, it will cause
- libreadline-dev # problems for numpy
- libreadline6-dev - gfortran
- libssl-dev - libatlas3gf-base
- libswitch-perl - liblapack-dev
- libwww-perl - g++
- libxml++2.6-dev
- libxml2-dev - libxml2-dev
- libxml2-utils
- libxslt1-dev - libxslt1-dev
- lynx-cur # apparmor
- maven2 - apparmor-utils
- mongodb-clients # misc
- mysql-client - curl
- ipython
- npm - npm
- ntp - ntp
- openjdk-7-jdk # for shapely
- openjdk-7-jre - libgeos-dev
- pep8
- perl
- pkg-config
- postfix
- pylint
- python-boto
- python-coverage-test-runner
- python-django-nose
- python-jenkins
- python-nose
- python-nosexcover
- python-numpy
- python-pip
- python-scipy
- rake
- reprepro
- rsyslog
- rubygems
- sqlite3
- super
- vagrant
- yui-compressor
- zip
- zlib1g-dev
# Ruby Specific Vars # Ruby Specific Vars
ruby_base: /opt/www ruby_base: /opt/www
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
--- ---
- name: create cms application config - name: create cms application config
template: src=cms.env.json.j2 dest=$app_base_dir/cms.env.json mode=640 owner=www-data group=adm template: src=cms.env.json.j2 dest=$app_base_dir/cms.env.json mode=640 owner=www-data group=adm
notify: restart edxapp
tags: tags:
- cms-env - cms-env
- cms - cms
...@@ -12,6 +13,7 @@ ...@@ -12,6 +13,7 @@
- name: create cms auth file - name: create cms auth file
template: src=cms.auth.json.j2 dest=$app_base_dir/cms.auth.json mode=640 owner=www-data group=adm template: src=cms.auth.json.j2 dest=$app_base_dir/cms.auth.json mode=640 owner=www-data group=adm
notify: restart edxapp
tags: tags:
- cms-env - cms-env
- cms - cms
......
...@@ -5,12 +5,14 @@ ...@@ -5,12 +5,14 @@
--- ---
- name: create lms application config - name: create lms application config
template: src=lms-preview.env.json.j2 dest=$app_base_dir/lms-preview.env.json mode=640 owner=www-data group=adm template: src=lms-preview.env.json.j2 dest=$app_base_dir/lms-preview.env.json mode=640 owner=www-data group=adm
notify: restart edxapp
tags: tags:
- lms-preview - lms-preview
- lms-preview-env - lms-preview-env
- name: create lms auth file - name: create lms auth file
template: src=lms-preview.auth.json.j2 dest=$app_base_dir/lms-preview.auth.json mode=640 owner=www-data group=adm template: src=lms-preview.auth.json.j2 dest=$app_base_dir/lms-preview.auth.json mode=640 owner=www-data group=adm
notify: restart edxapp
tags: tags:
- lms-preview - lms-preview
- lms-preview-env - lms-preview-env
......
--- ---
- name: create lms application config - name: create lms application config
template: src=lms.env.json.j2 dest=$app_base_dir/lms.env.json mode=640 owner=www-data group=adm template: src=lms.env.json.j2 dest=$app_base_dir/lms.env.json mode=640 owner=www-data group=adm
notify: restart edxapp
tags: tags:
- lms - lms
- lms-env - lms-env
...@@ -8,6 +9,7 @@ ...@@ -8,6 +9,7 @@
- name: create lms auth file - name: create lms auth file
template: src=lms.auth.json.j2 dest=$app_base_dir/lms.auth.json mode=640 owner=www-data group=adm template: src=lms.auth.json.j2 dest=$app_base_dir/lms.auth.json mode=640 owner=www-data group=adm
notify: restart edxapp
tags: tags:
- lms - lms
- lms-env - lms-env
......
---
edxlocal_debian_pkgs:
- pymongo
- python-mysqldb
- mysql-server-5.5
- postfix
...@@ -11,14 +11,8 @@ ...@@ -11,14 +11,8 @@
# http://downloads.mysql.com/archives/mysql-5.1/mysql-5.1.62.tar.gz # http://downloads.mysql.com/archives/mysql-5.1/mysql-5.1.62.tar.gz
# #
--- ---
- name: edxlocal | install python-pymongo (req for ansible) - name: edxlocal| install packages needed for single server
pip: name=pymongo apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present
- name: edxlocal | install python-mysqldb (req for ansible)
apt: pkg=python-mysqldb state=present
- name: edxlocal | install mysql server and recommends
apt: pkg=mysql-server-5.5 state=present install_recommends=yes
- name: edxlocal | create a database for edxapp - name: edxlocal | create a database for edxapp
mysql_db: > mysql_db: >
...@@ -38,17 +32,32 @@ ...@@ -38,17 +32,32 @@
state=present state=present
encoding=utf8 encoding=utf8
- name: edxlocal | add the mongodb signing key
apt_key: >
id=7F0CEB10
url=http://docs.mongodb.org/10gen-gpg-key.asc
state=present
- name: edxlocal | add the mongodb repo to the sources list
apt_repository: >
repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen'
state=present
- name: edxlocal | install mongo server and recommends - name: edxlocal | install mongo server and recommends
apt: pkg=mongodb-server state=present install_recommends=yes apt: >
pkg=mongodb-10gen
state=present
install_recommends=yes
update_cache=yes
- name: edxlocal | stop mongo service - name: edxlocal | stop mongo service
service: name=mongodb state=stopped service: name=mongodb state=stopped
- name: edxlocal | move mongodb to /mnt - name: edxlocal | move mongodb to {{ storage_base_dir }}
command: mv /var/lib/mongodb /mnt/. creates=/mnt/mongodb command: mv /var/lib/mongodb {{ storage_base_dir }}/. creates={{ storage_base_dir }}/mongodb
- name: edxlocal | create mongodb symlink - name: edxlocal | create mongodb symlink
file: src=/mnt/mongodb dest=/var/lib/mongodb state=link file: src={{ storage_base_dir }}/mongodb dest=/var/lib/mongodb state=link
- name: edxlocal | start mongo service - name: edxlocal | start mongo service
service: name=mongodb state=started service: name=mongodb state=started
...@@ -71,4 +80,4 @@ ...@@ -71,4 +80,4 @@
state=present state=present
- name: edxlocal | install memcached - name: edxlocal | install memcached
apt: pkg=memcached state=present apt: pkg=memcached state=present
\ No newline at end of file
---
# gh_users
#
# Creates OS accounts for users based on their github credential.
# Takes a list gh_users as a parameter which is a list of users
#
# roles:
# - role: gh_users
# gh_users:
# - user: github_admin_username
# groups:
# - adm
# - user: another_github_username
# groups: !!null
- fail: gh_users list must be defined for this parameterized role
when: not gh_users
- name: gh_users | create local user for github user
user:
name={{ item.user }}
groups={{ ",".join(item.groups) }}
shell=/bin/bash
with_items: gh_users
- name: gh_users | create .ssh directory
file:
path=/home/{{ item.user }}/.ssh state=directory mode=0700
owner={{ item.user }} group={{ item.user }}
with_items: gh_users
- name: gh_users | copy github key[s] to .ssh/authorized_keys
get_url:
url=https://github.com/{{ item.user }}.keys
dest=/home/{{ item.user }}/.ssh/authorized_keys mode=0600
owner={{ item.user }} group={{ item.user }}
with_items: gh_users
jenkins_home: /mnt/jenkins jenkins_home: "{{ storage_base_dir }}/jenkins"
jenkins_user: "jenkins" jenkins_user: "jenkins"
jenkins_group: "edx" jenkins_group: "edx"
jenkins_server_name: "jenkins.testeng.edx.org" jenkins_server_name: "jenkins.testeng.edx.org"
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
file: path={{ jenkins_home }} recurse=yes state=directory file: path={{ jenkins_home }} recurse=yes state=directory
owner={{ jenkins_user }} group={{ jenkins_group }} owner={{ jenkins_user }} group={{ jenkins_group }}
# Symlink /var/lib/jenkins to /mnt/jenkins # Symlink /var/lib/jenkins to {{ storage_base_dir }}/jenkins
# since Jenkins will expect its files to be in /var/lib/jenkins # since Jenkins will expect its files to be in /var/lib/jenkins
- name: jenkins_master | Symlink /var/lib/jenkins - name: jenkins_master | Symlink /var/lib/jenkins
file: src={{ jenkins_home }} dest=/var/lib/jenkins state=link file: src={{ jenkins_home }} dest=/var/lib/jenkins state=link
......
--- ---
jenkins_workspace: /mnt/jenkins jenkins_workspace: "{{ storage_base_dir }}/jenkins"
jenkins_phantomjs_url: https://phantomjs.googlecode.com/files/phantomjs-1.9.1-linux-x86_64.tar.bz2 jenkins_phantomjs_url: https://phantomjs.googlecode.com/files/phantomjs-1.9.1-linux-x86_64.tar.bz2
jenkins_phantomjs_archive: phantomjs-1.9.1-linux-x86_64.tar.bz2 jenkins_phantomjs_archive: phantomjs-1.9.1-linux-x86_64.tar.bz2
jenkins_phantomjs_folder: phantomjs-1.9.1-linux-x86_64 jenkins_phantomjs_folder: phantomjs-1.9.1-linux-x86_64
...@@ -48,10 +48,10 @@ jscover_url: "http://superb-dca2.dl.sourceforge.net/project/jscover/JSCover-1.0. ...@@ -48,10 +48,10 @@ jscover_url: "http://superb-dca2.dl.sourceforge.net/project/jscover/JSCover-1.0.
jscover_version: "1.0.2" jscover_version: "1.0.2"
# Mongo config # Mongo config
mongo_dir: "/mnt/mongodb" mongo_dir: "{{ storage_base_dir }}/mongodb"
mongo_log_dir: "/mnt/logs/mongodb" mongo_log_dir: "{{ storage_base_dir }}/logs/mongodb"
# URL of S3 bucket containing pre-compiled Python packages # URL of S3 bucket containing pre-compiled Python packages
python_pkg_url: "https://s3.amazonaws.com/jenkins.python_pkgs" python_pkg_url: "https://s3.amazonaws.com/jenkins.python_pkgs"
python_download_dir: "/mnt/python_pkgs" python_download_dir: "{{ storage_base_dir }}/python_pkgs"
python_virtualenv: "/mnt/venv" python_virtualenv: "{{ storage_base_dir}}/venv"
--- ---
# Configure Mongo to use /mnt so we don't # Configure Mongo to use {{ storage_base_dir }} so we don't
# run out of disk space # run out of disk space
- name: jenkins_worker | Stop mongo service - name: jenkins_worker | Stop mongo service
service: name=mongodb state=stopped service: name=mongodb state=stopped
......
# Launches an ec2 instance and blocks until the instance is up
# adds it to the host group
- name: launch_ec2 | Launch ec2 instance
local_action:
module: ec2
keypair: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ ami }}"
wait: true
region: "{{ region }}"
instance_tags: "{{instance_tags}}"
root_ebs_size: "{{ root_ebs_size }}"
register: ec2
- name: launch_ec2 | Add DNS name
local_action:
module: route53
overwrite: yes
command: create
zone: "{{ dns_zone }}"
type: CNAME
ttl: 300
record: "{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Add DNS name studio
local_action:
module: route53
overwrite: yes
command: create
zone: "{{ dns_zone }}"
type: CNAME
ttl: 300
record: "studio.{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Add DNS name preview
local_action:
module: route53
overwrite: yes
command: create
zone: "{{ dns_zone }}"
type: CNAME
ttl: 300
record: "preview.{{ dns_name }}.{{ dns_zone }}"
value: "{{ item.public_dns_name }}"
with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Add new instance to host group
local_action: >
add_host
hostname={{ item.public_ip }}
groupname=launched
with_items: "{{ ec2.instances }}"
- name: launch_ec2 | Wait for SSH to come up
local_action: >
wait_for
host={{ item.public_dns_name }}
state=started
port=22
delay=60
timeout=320
with_items: "{{ ec2.instances }}"
---
instance_tags: '{"from_ansible": "true"}'
# Launches an ec2 instance and blocks until the instance is up
# adds it to the host group
- name: launch_instance | Launch instance
local_action:
module: ec2
keypair: "{{keypair}}"
group: "{{security_group}}"
instance_type: "{{instance_type}}"
image: "{{image}}"
wait: true
region: "{{region}}"
instance_tags: "{{instance_tags}}"
register: ec2
- name: launch_instance | Add new instance to host group
local_action: add_host hostname=${item.public_ip} groupname=launched
with_items: ${ec2.instances}
- name: launch_instance | Wait for SSH to come up
local_action: wait_for host=${item.public_dns_name} port=22 delay=60 timeout=320 state=started
with_items: ${ec2.instances}
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
service: name=mongodb state=stopped service: name=mongodb state=stopped
tags: mongo tags: mongo
- name: mongo | move mongodb to /mnt - name: mongo | move mongodb to {{ storage_base_dir }}
command: mv /var/lib/mongodb /mnt/. creates=/mnt/mongodb command: mv /var/lib/mongodb {{ storage_base_dir}}/. creates={{ storage_base_dir }}/mongodb
tags: mongo tags: mongo
- name: mongo | create mongodb symlink - name: mongo | create mongodb symlink
file: src=/mnt/mongodb dest=/var/lib/mongodb state=link file: src={{ storage_base_dir }}/mongodb dest=/var/lib/mongodb state=link
tags: mongo tags: mongo
- name: mongo | copy configuration template - name: mongo | copy configuration template
......
...@@ -38,7 +38,7 @@ NOTIFIER_USER_SERVICE_HTTP_AUTH_USER: "guido" ...@@ -38,7 +38,7 @@ NOTIFIER_USER_SERVICE_HTTP_AUTH_USER: "guido"
NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS: "vanrossum" NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS: "vanrossum"
NOTIFIER_CELERY_BROKER_URL: "django://" NOTIFIER_CELERY_BROKER_URL: "django://"
NOTIFIER_SUPERVISOR_LOG_DEST: "/mnt/logs/supervisor" NOTIFIER_SUPERVISOR_LOG_DEST: "{{ storage_base_dir }}/logs/supervisor"
NOTIFER_REQUESTS_CA_BUNDLE: "/etc/ssl/certs/ca-certificates.crt" NOTIFER_REQUESTS_CA_BUNDLE: "/etc/ssl/certs/ca-certificates.crt"
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
# Assuming the following config # Assuming the following config
# #
# my_role_s3fs_mounts: # my_role_s3fs_mounts:
# - { bucket: "my_bucket", mount_point: "/mnt/s3/my_bucket", owner: "root", group: "adm", mode: "0755" } # - { bucket: "my_bucket", mount_point: "{{ storage_base_dir}}/s3/my_bucket", owner: "root", group: "adm", mode: "0755" }
# #
# The role would need to include tasks like the following # The role would need to include tasks like the following
# #
...@@ -89,4 +89,4 @@ ...@@ -89,4 +89,4 @@
shell: shell:
/usr/bin/make install /usr/bin/make install
chdir={{ s3fs_temp_dir }}/{{ s3fs_unzip_dest }} chdir={{ s3fs_temp_dir }}/{{ s3fs_unzip_dest }}
\ No newline at end of file
...@@ -45,7 +45,7 @@ xqueue_env_config: ...@@ -45,7 +45,7 @@ xqueue_env_config:
XQUEUE_WORKERS_PER_QUEUE: 12 XQUEUE_WORKERS_PER_QUEUE: 12
LOGGING_ENV : $XQUEUE_LOGGING_ENV LOGGING_ENV : $XQUEUE_LOGGING_ENV
SYSLOG_SERVER: $XQUEUE_SYSLOG_SERVER SYSLOG_SERVER: $XQUEUE_SYSLOG_SERVER
LOG_DIR : '/mnt/logs/xqueue' LOG_DIR : "{{ storage_base_dir }}/logs/xqueue"
RABBIT_HOST : $XQUEUE_RABBIT_HOSTNAME RABBIT_HOST : $XQUEUE_RABBIT_HOSTNAME
S3_BUCKET : $XQUEUE_S3_BUCKET S3_BUCKET : $XQUEUE_S3_BUCKET
S3_PATH_PREFIX: $XQUEUE_S3_PATH_PREFIX S3_PATH_PREFIX: $XQUEUE_S3_PATH_PREFIX
...@@ -76,75 +76,22 @@ xqueue_post_requirements_file: "{{ xqueue_code_dir }}/requirements.txt" ...@@ -76,75 +76,22 @@ xqueue_post_requirements_file: "{{ xqueue_code_dir }}/requirements.txt"
# copied from the LMS role for now since there is a lot # copied from the LMS role for now since there is a lot
# of overlap # of overlap
xqueue_debian_pkgs: xqueue_debian_pkgs:
- apparmor-utils # for compiling the virtualenv
- aspell # (only needed if wheel files aren't available)
- build-essential - build-essential
- curl - s3cmd
- dvipng - pkg-config
- fabric
- g++
- gcc
- gfortran
- ghostscript
- git
- github-cli
- graphviz
- graphviz-dev - graphviz-dev
- gunicorn - graphviz
- inoticoming
- ipython
- libcrypt-ssleay-perl
- libcurl4-openssl-dev
- libdigest-sha-perl
- libfreetype6-dev
- libgeos-dev
- libgraphviz-dev
- libjpeg8-dev
- liblapack-dev
- liblwp-protocol-https-perl
- libmysqlclient-dev - libmysqlclient-dev
- libnet-amazon-ec2-perl # apparmor
- libpng12-dev - apparmor-utils
- libreadline-dev # misc
- libreadline6-dev - curl
- libssl-dev - ipython
- libswitch-perl
- libwww-perl
- libxml++2.6-dev
- libxml2-dev
- libxml2-utils
- libxslt1-dev
- maven2
- mongodb-clients
- mysql-client
- npm - npm
- ntp - ntp
- openjdk-7-jdk # for shapely
- openjdk-7-jre - libgeos-dev
- pep8
- perl
- pkg-config
- postfix
- pylint
- python-boto
- python-coverage-test-runner
- python-django-nose
- python-jenkins
- python-nose
- python-nosexcover
- python-numpy
- python-pip
- python-scipy
- python-mysqldb
- rake
- reprepro
- rsyslog
- rubygems
- sqlite3
- super
- vagrant
- yui-compressor
- zip
- zlib1g-dev
# Needed to be able to create the xqueue mysqldb. # Needed to be able to create the xqueue mysqldb.
- python-mysqldb - python-mysqldb
...@@ -5,12 +5,17 @@ RABBIT_RUN_URL: '' ...@@ -5,12 +5,17 @@ RABBIT_RUN_URL: ''
RABBIT_GRADER_ROOT: '' RABBIT_GRADER_ROOT: ''
RABBIT_LOGGING_ENV: 'sandbox' RABBIT_LOGGING_ENV: 'sandbox'
RABBIT_SYSLOG_SERVER: '' RABBIT_SYSLOG_SERVER: ''
# by default do not check out the content
# repo needed on the xserver for grading
# python submissions, TODO: replace with an open
# source repo
XSERVER_GRADER_CHECKOUT: False
xserver_env_config: xserver_env_config:
RUN_URL: $RABBIT_RUN_URL RUN_URL: $RABBIT_RUN_URL
GRADER_ROOT: $RABBIT_GRADER_ROOT GRADER_ROOT: $RABBIT_GRADER_ROOT
LOGGING_ENV: $RABBIT_LOGGING_ENV LOGGING_ENV: $RABBIT_LOGGING_ENV
LOG_DIR: '/mnt/logs/xserver' LOG_DIR: "{{ storage_base_dir }}/logs/xserver"
SYSLOG_SERVER: $RABBIT_SYSLOG_SERVER SYSLOG_SERVER: $RABBIT_SYSLOG_SERVER
SANDBOX_PYTHON: '/opt/edx_apparmor_sandbox/bin/python' SANDBOX_PYTHON: '/opt/edx_apparmor_sandbox/bin/python'
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
git: dest={{xserver_grader_dir}} repo={{xserver_grader_source}} version={{xserver_grader_version}} git: dest={{xserver_grader_dir}} repo={{xserver_grader_source}} version={{xserver_grader_version}}
environment: environment:
GIT_SSH: /tmp/git_ssh.sh GIT_SSH: /tmp/git_ssh.sh
when: c_skip_grader_checkout is not defined or c_skip_grader_checkout==False when: XSERVER_GRADER_CHECKOUT
tags: tags:
- deploy - deploy
......
#!/usr/bin/env bash
# Ansible configuration/deploy wrapper script that
# assumes the following parameters set
# as environment variables
#
# - dns_name - REQUIRED
# - dns_zone
# - edxapp_version
# - forum_version
# - xqueue_version
# - xserver_version
# - ora_version
# - ease_version
# - deploy
# - keypair
export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto
if [[ -z $dns_name ]]; then
echo "The hostname is required to know what machine to configure"
exit 1
fi
if [[ ! -f $BOTO_CONFIG ]]; then
echo "AWS credentials not found for $aws_account"
exit 1
fi
extra_vars="/var/tmp/extra-vars-$$.yml"
cat << EOF > $extra_vars
---
EDXAPP_PREVIEW_LMS_BASE: preview.${dns_name}.${dns_zone}
EDXAPP_LMS_BASE: ${dns_name}.${dns_zone}
EDXAPP_LMS_PREVIEW_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
XSERVER_GRADER_CHECKOUT: False
c_skip_grader_checkout: True
edx_platform_commit: $edxapp_version
forum_version: $forum_version
xqueue_version: $xqueue_version
xserver_version: $xserver_version
ora_version: $ora_version
ease_version: $ease_version
ansible_ssh_private_key_file: /var/lib/jenkins/${keypair}.pem
EOF
cat $extra_vars
cd playbooks/edx-east
./ec2.py --refresh
ansible-playbook -vvv $playbook -i ./ec2.py -e "@$extra_vars" --user ubuntu --tags deploy
#!/usr/bin/env bash
# Ansible provisioning wrapper script that
# assumes the following parameters set
# as environment variables
#
# - github_username
# - server_type
# - instance_type
# - region
# - aws_account
# - keypair
# - ami
# - root_ebs_size
# - security_group
# - dns_zone
# - dns_name
# - environment
# - name_tag
export BOTO_CONFIG=/var/lib/jenkins/${aws_account}.boto
function ascii_convert {
echo $1 | iconv -f utf8 -t ascii//TRANSLIT//IGNORE
}
# remove non-ascii chars from build user vars
BUILD_USER_LAST_NAME=$(ascii_convert $BUILD_USER_LAST_NAME)
BUILD_USER_FIRST_NAME=$(ascii_convert $BUILD_USER_FIRST_NAME)
BUILD_USER_ID=$(ascii_convert $BUILD_USER_ID)
BUILD_USER=$(ascii_convert $BUILD_USER)
if [[ -z $github_username ]]; then
github_username=$BUILD_USER_ID
fi
if [[ ! -f $BOTO_CONFIG ]]; then
echo "AWS credentials not found for $aws_account"
exit 1
fi
extra_vars="/var/tmp/extra-vars-$$.yml"
if [[ -z $dns_name ]]; then
dns_name=$github_username
fi
if [[ -z $name_tag ]]; then
name_tag=${github_username}-${environment}
fi
if [[ -z $ami ]]; then
if [[ $server_type == "full_edx_installation" ]]; then
ami="ami-c97727a0"
elif [[ $server_type == "ubuntu_12.04" ]]; then
ami="ami-d0f89fb9"
fi
fi
if [[ -z $instance_type ]]; then
if [[ $server_type == "full_edx_installation" ]]; then
instance_type="m1.medium"
elif [[ $server_type == "ubuntu_12.04" ]]; then
instance_type="m1.small"
fi
fi
cat << EOF > $extra_vars
---
EDXAPP_PREVIEW_LMS_BASE: preview.${dns_name}.${dns_zone}
EDXAPP_LMS_BASE: ${dns_name}.${dns_zone}
EDXAPP_LMS_PREVIEW_NGINX_PORT: 80
EDXAPP_CMS_NGINX_PORT: 80
ansible_ssh_private_key_file: /var/lib/jenkins/${keypair}.pem
dns_name: $dns_name
keypair: $keypair
instance_type: $instance_type
security_group: $security_group
ami: $ami
region: $region
instance_tags: '{"environment": "$environment", "github_username": "$github_username", "Name": "$name_tag", "source": "jenkins", "owner": "$BUILD_USER"}'
root_ebs_size: $root_ebs_size
gh_users:
- user: jarv
groups:
- adm
- user: feanil
groups:
- adm
- user: e0d
groups:
- adm
- user: ${github_username}
groups:
- adm
dns_zone: $dns_zone
EOF
cat $extra_vars
cd playbooks/edx-east
# run the tasks to launch an ec2 instance from AMI
ansible-playbook -vvvv edx_provision.yml -i inventory.ini -e "@${extra_vars}" --user ubuntu
# run tasks to update application config files for the sandbox hostname
if [[ $server_type == "full_edx_installation" ]]; then
ansible-playbook -vvvv edx_continuous_integration.yml -i "${dns_name}.${dns_zone}," -e "@${extra_vars}" --user ubuntu --tags "lms-env,cms-env,lms-preview-env"
fi
rm -f "$extra_vars"
...@@ -18,7 +18,7 @@ Vagrant.configure("2") do |config| ...@@ -18,7 +18,7 @@ Vagrant.configure("2") do |config|
# point Vagrant at the location of your playbook you want to run # point Vagrant at the location of your playbook you want to run
ansible.playbook = "../../playbooks/vagrant-fullstack.yml" ansible.playbook = "../../playbooks/vagrant-fullstack.yml"
ansible.inventory_path = "../../playbooks/vagrant/inventory.ini" ansible.inventory_path = "../../playbooks/vagrant/inventory.ini"
ansible.extra_vars = { c_skip_grader_checkout: 'True' } ansible.extra_vars = { XSERVER_GRADER_CHECKOUT: 'False' }
ansible.verbose = "extra" ansible.verbose = "extra"
end end
end end
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment