Commit 7bc2c211 by Feanil Patel

Merge pull request #775 from edx/feanil/release-gugelhupf

Feanil/release gugelhupf
parents f18833c7 416ac5c9
# This is a utility play to initialize the mysql dbs for the following
# roles:
# - edxapp
# - xqueue
# - ora
# - discern
#
# The mysql root user MUST be passed in as extra vars for
# at least one of the databases.
#
# the environment and deployment must be passed in as COMMON_ENVIRONMENT
# and COMMON_DEPLOYMENT. These two vars should be set in the secret
# var file for the corresponding vpc stack
#
# Example invocation:
#
# Create the databases for edxapp and xqueue:
#
# ansible-playbook -i localhost, create_db_users.yml -e@/path/to/secrets.yml -e "edxapp_db_root_user=root xqueue_db_root_user=root"
#
#
- name: Create all databases on the edX stack
hosts: all
gather_facts: False
vars:
# These should be set to the root user for the
# db, if left 'None' the databse will be skipped
edxapp_db_root_user: 'None'
xqueue_db_root_user: 'None'
ora_db_root_user: 'None'
discern_db_root_user: 'None'
vars_prompt:
# passwords use vars_prompt so they aren't in the
# bash history
- name: "edxapp_db_root_pass"
prompt: "Password for edxapp root mysql user (enter to skip)"
default: "None"
private: True
- name: "xqueue_db_root_pass"
prompt: "Password for xqueue root mysql user (enter to skip)"
default: "None"
private: True
- name: "ora_db_root_pass"
prompt: "Password for ora root mysql user (enter to skip)"
default: "None"
private: True
- name: "discern_db_root_pass"
prompt: "Password for discern root mysql user (enter to skip)"
default: "None"
private: True
tasks:
- fail: msg="COMMON_ENVIRONMENT and COMMON_DEPLOYMENT need to be defined to use this play"
when: COMMON_ENVIRONMENT is not defined or COMMON_DEPLOYMENT is not defined
- name: create mysql databases for the edX stack
mysql_db: >
db={{ item[0] }}{{ item[1].db_name }}
state=present
login_host={{ item[1].db_host }}
login_user={{ item[1].db_user }}
login_password={{ item[1].db_pass }}
encoding=utf8
when: item[1].db_user != 'None'
with_nested:
- ['{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_', '']
-
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ edxapp_db_root_pass }}"
- db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user }}"
db_pass: "{{ xqueue_db_root_pass }}"
- db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user }}"
db_pass: "{{ ora_db_root_pass }}"
- name: assign mysql user permissions for db user
mysql_user:
name: "{{ item.db_user_to_modify }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
- db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user|default('None') }}"
db_pass: "{{ xqueue_db_root_pass|default('None') }}"
db_user_to_modify: "{{ XQUEUE_MYSQL_USER }}"
db_user_to_modify_pass: "{{ XQUEUE_MYSQL_PASSWORD }}"
- db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user|default('None') }}"
db_pass: "{{ ora_db_root_pass|default('None') }}"
db_user_to_modify: "{{ ORA_MYSQL_USER }}"
db_user_to_modify_pass: "{{ ORA_MYSQL_PASSWORD }}"
# The second call to mysql_user needs to have append_privs set to
# yes otherwise it will overwrite the previous run.
# This means that both tasks will report changed on every ansible
# run
- name: assign mysql user permissions for db test user
mysql_user:
append_privs: yes
name: "{{ item.db_user_to_modify }}"
priv: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ item.db_name }}.*:ALL"
password: "{{ item.db_user_to_modify_pass }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
host: '%'
when: item.db_user != 'None'
with_items:
# These defaults are needed, otherwise ansible will throw
# variable undefined errors for when they are not defined
# in secret vars
- db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user|default('None') }}"
db_pass: "{{ edxapp_db_root_pass|default('None') }}"
db_user_to_modify: "{{ EDXAPP_MYSQL_USER }}"
db_user_to_modify_pass: "{{ EDXAPP_MYSQL_PASSWORD }}"
- db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_user: "{{ xqueue_db_root_user|default('None') }}"
db_pass: "{{ xqueue_db_root_pass|default('None') }}"
db_user_to_modify: "{{ XQUEUE_MYSQL_USER }}"
db_user_to_modify_pass: "{{ XQUEUE_MYSQL_PASSWORD }}"
- db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_user: "{{ ora_db_root_user|default('None') }}"
db_pass: "{{ ora_db_root_pass|default('None') }}"
db_user_to_modify: "{{ ORA_MYSQL_USER }}"
db_user_to_modify_pass: "{{ ORA_MYSQL_PASSWORD }}"
- name: Deploy demo course
hosts: all
sudo: True
gather_facts: True
vars:
enable_datadog: True
enable_splunkforwarder: True
vars_files:
- ["{{ secure_vars }}", "dummy.yml"]
roles:
- demo
- role: datadog
when: enable_datadog
- role: splunkforwarder
when: enable_splunkforwarder
- hosts: tag_play_commoncluster:&tag_environment_stage:&tag_deployment_edx
sudo: True
vars_files:
- "{{ secure_dir }}/vars/common/common.yml"
- "{{ secure_dir }}/vars/stage/stage-edx.yml"
roles:
- common
- gh_users
- oraclejdk
- elasticsearch
......@@ -13,14 +13,19 @@ COMMON_LOG_DIR: "{{ COMMON_DATA_DIR }}/log"
COMMON_BIN_DIR: "{{ COMMON_BASE_DIR }}/bin"
COMMON_CFG_DIR: "{{ COMMON_BASE_DIR }}/etc"
COMMON_ENV_NAME: 'default_env'
COMMON_ENV_TYPE: 'default_type'
COMMON_ENVIRONMENT: 'default_env'
COMMON_DEPLOYMENT: 'default_deployment'
COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple'
# do not include http/https
COMMON_GIT_MIRROR: 'github.com'
# override this var to set a different hostname
COMMON_HOSTNAME: !!null
# Set to true to customize DNS search domains
COMMON_CUSTOM_DHCLIENT_CONFIG: false
# uncomment and specifity your domains.
# COMMON_DHCLIENT_DNS_SEARCH: ["ec2.internal","example.com"]
common_debian_pkgs:
- ntp
- ack-grep
......
......@@ -70,3 +70,7 @@
shell: >
hostname -F /etc/hostname
when: COMMON_HOSTNAME and (etc_hosts.changed or etc_hostname.changed)
- name: update /etc/dhcp/dhclient.conf
template: src=etc/dhcp/dhclient.conf.j2 dest=/etc/dhcp/dhclient.conf
when: COMMON_CUSTOM_DHCLIENT_CONFIG
\ No newline at end of file
# {{ ansible_managed }}
# Configuration file for /sbin/dhclient, which is included in Debian's
# dhcp3-client package.
#
# This is a sample configuration file for dhclient. See dhclient.conf's
# man page for more information about the syntax of this file
# and a more comprehensive list of the parameters understood by
# dhclient.
#
# Normally, if the DHCP server provides reasonable information and does
# not leave anything out (like the domain name, for example), then
# few changes must be made to this file, if any.
#
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
send host-name "<hostname>";
#send dhcp-client-identifier 1:0:a0:24:ab:fb:9c;
#send dhcp-lease-time 3600;
#supersede domain-name "fugue.com home.vix.com";
#prepend domain-name-servers 127.0.0.1;
request subnet-mask, broadcast-address, time-offset, routers,
domain-name, domain-name-servers, domain-search, host-name,
netbios-name-servers, netbios-scope, interface-mtu,
rfc3442-classless-static-routes, ntp-servers,
dhcp6.domain-search, dhcp6.fqdn,
dhcp6.name-servers, dhcp6.sntp-servers;
#require subnet-mask, domain-name-servers;
#timeout 60;
#retry 60;
#reboot 10;
#select-timeout 5;
#initial-interval 2;
#script "/etc/dhcp3/dhclient-script";
#media "-link0 -link1 -link2", "link0 link1";
#reject 192.33.137.209;
#alias {
# interface "eth0";
# fixed-address 192.5.5.213;
# option subnet-mask 255.255.255.255;
#}
#lease {
# interface "eth0";
# fixed-address 192.33.137.200;
# medium "link0 link1";
# option host-name "andare.swiftmedia.com";
# option subnet-mask 255.255.255.0;
# option broadcast-address 192.33.137.255;
# option routers 192.33.137.250;
# option domain-name-servers 127.0.0.1;
# renew 2 2000/1/12 00:00:01;
# rebind 2 2000/1/12 00:00:01;
# expire 2 2000/1/12 00:00:01;
#}
interface "eth0" {
prepend domain-search {% for search in COMMON_DHCLIENT_DNS_SEARCH -%}"{{search}}"{%- if not loop.last -%},{%- endif -%}
{%- endfor -%};
}
......@@ -26,3 +26,8 @@ demo_test_users:
- email: 'verified@example.com'
mode: verified
password: edx
demo_edxapp_user: 'edxapp'
demo_edxapp_venv_bin: '{{COMMON_APP_DIR}}/{{demo_edxapp_user}}/venvs/{{demo_edxapp_user}}/bin'
demo_edxapp_course_data_dir: '{{COMMON_DATA_DIR}}/{{demo_edxapp_user}}/data'
demo_edxapp_code_dir: '{{COMMON_APP_DIR}}/{{demo_edxapp_user}}/edx-platform'
---
dependencies:
- common
......@@ -2,41 +2,41 @@
- name: check out the demo course
git: dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }}
sudo_user: "{{ edxapp_user }}"
sudo_user: "{{ demo_edxapp_user }}"
register: demo_checkout
- name: import demo course
shell: >
{{ edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ edxapp_course_data_dir }} {{ demo_code_dir }}
chdir={{ edxapp_code_dir }}
{{ demo_edxapp_venv_bin }}/python ./manage.py cms --settings=aws import {{ demo_edxapp_course_data_dir }} {{ demo_code_dir }}
chdir={{ demo_edxapp_code_dir }}
sudo_user: "{{ common_web_user }}"
when: demo_checkout.changed
- name: create some test users and enroll them in the course
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e {{ item.email }} -p {{ item.password }} -m {{ item.mode }} -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e {{ item.email }} -p {{ item.password }} -m {{ item.mode }} -c {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
sudo_user: "{{ common_web_user }}"
with_items: demo_test_users
when: demo_checkout.changed
- name: create staff user
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e staff@example.com -p edx -s -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms create_user -e staff@example.com -p edx -s -c {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
sudo_user: "{{ common_web_user }}"
when: demo_checkout.changed
- name: add test users to the certificate whitelist
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws --service-variant lms cert_whitelist -a {{ item.email }} -c {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
with_items: demo_test_users
when: demo_checkout.changed
- name: seed the forums for the demo course
shell: >
{{ edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}
chdir={{ edxapp_code_dir }}
{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings=aws seed_permissions_roles {{ demo_course_id }}
chdir={{ demo_edxapp_code_dir }}
with_items: demo_test_users
when: demo_checkout.changed
......@@ -33,6 +33,6 @@
- name: create demo app and data dirs
file: >
path="{{ demo_app_dir }}" state=directory
owner="{{ edxapp_user }}" group="{{ common_web_group }}"
owner="{{ demo_edxapp_user }}" group="{{ common_web_group }}"
- include: deploy.yml tags=deploy
......@@ -6,8 +6,8 @@ DISCERN_BROKER_URL: ""
DISCERN_RESULT_BACKEND: ""
DISCERN_GOOGLE_ANALYTICS_PROPERTY_ID: ""
DISCERN_MYSQL_DB_NAME: 'discern'
DISCERN_MYSQL_USER: 'root'
DISCERN_MYSQL_PASSWORD: ''
DISCERN_MYSQL_USER: 'discern001'
DISCERN_MYSQL_PASSWORD: 'password'
DISCERN_MYSQL_HOST: 'localhost'
DISCERN_MYSQL_PORT: '3306'
DISCERN_LANG: "en_US.UTF-8"
......
......@@ -30,8 +30,8 @@ EDXAPP_MONGO_USER: 'edxapp'
EDXAPP_MONGO_DB_NAME: 'edxapp'
EDXAPP_MYSQL_DB_NAME: 'edxapp'
EDXAPP_MYSQL_USER: 'root'
EDXAPP_MYSQL_PASSWORD: ''
EDXAPP_MYSQL_USER: 'edxapp001'
EDXAPP_MYSQL_PASSWORD: 'password'
EDXAPP_MYSQL_HOST: 'localhost'
EDXAPP_MYSQL_PORT: '3306'
......@@ -131,6 +131,13 @@ EDXAPP_SANDBOX_ENFORCE: true
EDXAPP_INCLUDE_AUTOMATOR_ROLE: false
EDXAPP_AUTOMATOR_AUTHORIZED_KEYS: []
EDXAPP_USE_GIT_IDENTITY: false
# Example: "{{ secure_dir }}/files/git-identity"
EDXAPP_LOCAL_GIT_IDENTITY: !!null
# Configuration for database migration
EDXAPP_TEST_MIGRATE_DB_NAME: "{{ COMMON_ENVIRONMENT }}_{{ COMMON_DEPLOYMENT }}_test_{{ EDXAPP_MYSQL_DB_NAME }}"
#-------- Everything below this line is internal to the role ------------
#Use YAML references (& and *) and hash merge <<: to factor out shared settings
......@@ -154,6 +161,8 @@ edxapp_staticfile_dir: "{{ edxapp_data_dir }}/staticfiles"
edxapp_course_data_dir: "{{ edxapp_data_dir }}/data"
edxapp_upload_dir: "{{ edxapp_data_dir }}/uploads"
edxapp_theme_dir: "{{ edxapp_data_dir }}/themes"
edxapp_git_identity: "{{ edxapp_app_dir }}/{{ EDXAPP_LOCAL_GIT_IDENTITY|basename }}"
edxapp_git_ssh: "/tmp/edxapp_git_ssh.sh"
edxapp_pypi_local_mirror: "http://localhost:{{ devpi_port }}/root/pypi/+simple"
edxapp_workers:
- queue: low
......@@ -493,6 +502,10 @@ edxapp_debian_pkgs:
- libgeos-dev
# i18n
- gettext
# Pillow (PIL Fork) Dependencies
# Needed by the CMS to manipulate images.
- libjpeg8-dev
- libpng12-dev
# Ruby Specific Vars
edxapp_ruby_version: "1.9.3-p374"
......@@ -509,4 +522,4 @@ worker_django_settings_module: 'aws'
# commands listed here will be symlinked to ~/bin/ for
# the automator user.
edxapp_automated_rbash_links:
- /usr/bin/sudo
\ No newline at end of file
- /usr/bin/sudo
......@@ -7,11 +7,32 @@
owner={{ edxapp_user }} group={{ common_web_user }}
mode=0644
# Optional auth for git
- name: create ssh script for git (not authenticated)
template: >
src=git_ssh_noauth.sh.j2 dest={{ edxapp_git_ssh }}
owner={{ edxapp_user }} mode=750
when: not EDXAPP_USE_GIT_IDENTITY
- name: create ssh script for git (authenticated)
template: >
src=git_ssh_auth.sh.j2 dest={{ edxapp_git_ssh }}
owner={{ edxapp_user }} mode=750
when: EDXAPP_USE_GIT_IDENTITY
- name: install read-only ssh key
copy: >
src={{ EDXAPP_LOCAL_GIT_IDENTITY }} dest={{ edxapp_git_identity }}
force=yes owner={{ edxapp_user }} mode=0600
when: EDXAPP_USE_GIT_IDENTITY
# Do A Checkout
- name: checkout edx-platform repo into {{edxapp_code_dir}}
git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}}
register: chkout
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
......@@ -27,10 +48,16 @@
git: dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}}
when: edxapp_theme_name != ''
sudo_user: "{{ edxapp_user }}"
environment:
GIT_SSH: "{{ edxapp_git_ssh }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
- name: remove read-only ssh key
file: path={{ edxapp_git_identity }} state=absent
when: EDXAPP_USE_GIT_IDENTITY
- name: create checksum for requirements, package.json and Gemfile
shell: >
/usr/bin/md5sum {{ " ".join(edxapp_chksum_req_files) }} 2>/dev/null > /var/tmp/edxapp.req.new
......@@ -237,7 +264,6 @@
# root access.
- name: give other read permissions to the virtualenv
command: chmod -R o+r "{{ edxapp_venv_dir }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
......
......@@ -44,35 +44,75 @@
when: celery_worker is defined and not devstack
sudo_user: "{{ supervisor_user }}"
# Gather assets using rake if possible
- name: gather {{ item }} static assets with rake
# Fake syncdb with migrate, only when fake_migrations is defined
# This overrides the database name to be the test database which
# the default application user has full write access to
- name: syncdb and migrate
shell: >
SERVICE_VARIANT={{ item }} rake {{ item }}:gather_assets:aws
executable=/bin/bash
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is defined and migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ edxapp_user }}"
when: celery_worker is not defined and not devstack and item != "lms-preview"
with_items: service_variants_enabled
environment:
DB_MIGRATION_NAME: "{{ EDXAPP_TEST_MIGRATE_DB_NAME }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
environment: "{{ edxapp_environment }}"
# Regular syncdb with migrate
- name: syncdb and migrate
shell: SERVICE_VARIANT=lms {{ edxapp_venv_bin}}/django-admin.py syncdb --migrate --noinput --settings=lms.envs.aws --pythonpath={{ edxapp_code_dir }}
when: migrate_db is defined and migrate_db|lower == "yes"
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes"
environment:
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Fake migrate, only when fake_migrations is defined
# This overrides the database name to be the test database which
# the default application user has full write access to
- name: db migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate
when: fake_migrations is defined and migrate_only is defined and migrate_only|lower == "yes"
sudo_user: "{{ edxapp_user }}"
environment:
DB_MIGRATION_NAME: "{{ EDXAPP_TEST_MIGRATE_DB_NAME }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Regular migrations
- name: db migrate
shell: SERVICE_VARIANT=lms {{ edxapp_venv_bin }}/django-admin.py migrate --noinput --settings=lms.envs.aws --pythonpath={{ edxapp_code_dir }}
when: migrate_only is defined and migrate_only|lower == "yes"
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_only is defined and migrate_only|lower == "yes"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Gather assets using rake if possible
- name: gather {{ item }} static assets with rake
shell: >
SERVICE_VARIANT={{ item }} rake {{ item }}:gather_assets:aws
executable=/bin/bash
chdir={{ edxapp_code_dir }}
sudo_user: "{{ edxapp_user }}"
when: celery_worker is not defined and not devstack and item != "lms-preview"
with_items: service_variants_enabled
notify:
- "restart edxapp"
- "restart edxapp_workers"
environment: "{{ edxapp_environment }}"
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ edxapp_git_identity }} "$@"
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no "$@"
......@@ -13,29 +13,60 @@
- name: install packages needed for single server
apt: pkg={{','.join(edxlocal_debian_pkgs)}} install_recommends=yes state=present
- name: setup the edxapp db user
mysql_user: >
name={{ EDXAPP_MYSQL_USER }}
password={{ EDXAPP_MYSQL_PASSWORD }}
priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL'
- name: create a database for edxapp
mysql_db: >
db=edxapp
state=present
encoding=utf8
when: EDXAPP_MYSQL_USER is defined
- name: setup the xqueue db user
mysql_user: >
name={{ XQUEUE_MYSQL_USER }}
password={{ XQUEUE_MYSQL_PASSWORD }}
priv='{{XQUEUE_MYSQL_DB_NAME}}.*:ALL'
when: XQUEUE_MYSQL_USER is defined and not devstack
- name: create a database for xqueue
mysql_db: >
db=xqueue
state=present
encoding=utf8
when: XQUEUE_MYSQL_USER is defined and not devstack
- name: setup the ora db user
mysql_user: >
name={{ ORA_MYSQL_USER }}
password={{ ORA_MYSQL_PASSWORD }}
priv='{{ORA_MYSQL_DB_NAME}}.*:ALL'
- name: create a database for ora
mysql_db: >
db=ora
state=present
encoding=utf8
when: ORA_MYSQL_USER is defined
- name: setup the discern db user
mysql_user: >
name={{ DISCERN_MYSQL_USER }}
password={{ DISCERN_MYSQL_PASSWORD }}
priv='{{DISCERN_MYSQL_DB_NAME}}.*:ALL'
when: DISCERN_MYSQL_USER is defined and not devstack
- name: create a database for discern
mysql_db: >
db=discern
state=present
encoding=utf8
when: DISCERN_MYSQL_USER is defined and not devstack
- name: install memcached
......
---
elasticsearch_version: "0.90.2"
elasticsearch_sha: "397227ce37f616734f39f6e93539c9eaa82bec9"
elasticsearch_app_dir: "{{ COMMON_APP_DIR }}/elasticsearch"
elasticsearch_data_dir: "{{ COMMON_DATA_DIR }}/elasticsearch"
elasticsearch_log_dir: "{{ COMMON_LOG_DIR }}/elasticsearch"
elasticsearch_cfg_dir: "{{ COMMON_CFG_DIR }}/elasticsearch"
elasticsearch_version: "0.90.11"
elasticsearch_sha: "8e81388d0ba7e427b42514d96e25ba6499024c24"
elasticsearch_file: "elasticsearch-{{ elasticsearch_version }}.deb"
elasticsearch_url: "https://download.elasticsearch.org/elasticsearch/elasticsearch/{{ elasticsearch_file }}"
elasticsearch_user: "elasticsearch"
elasticsearch_group: "elasticsearch"
#
# Defaults for a single server installation.
ELASTICSEARCH_CLUSTERED: true
ELASTICSEARCH_HEAP_SIZE: "512m"
\ No newline at end of file
......@@ -8,29 +8,79 @@
# * oraclejdk
#
# Example play:
#
# This role can be used to do a single-server or clustered
# installation of the elasticsearch service. When a cluster
# is being installed, there are two important things that
# you must know.
# The ELASTICSEARCH_CLUSTERED var must be true.
# All hosts targeted by your play will be cluster peers.
# Elasticsearch will determine who the master should be.
#
# Ansible provides handy set operators for use in the
# plays host declaration, as seen in the following example.
#
# - hosts: tag_role_elasticsearch:&tag_environment_stage
# roles:
# - common
# - oraclejdk
# - elasticsearch
#
# roles:
# - common
# - oraclejdk
# - elasticsearch
- name: download elasticsearch
get_url: >
url={{ elasticsearch_url }}
dest=/var/tmp/{{ elasticsearch_file }}
force=no
register: elasticsearch_reinstall
- name: install elasticsearch from local package
shell: >
dpkg -i /var/tmp/elasticsearch-{{ elasticsearch_version }}.deb
executable=/bin/bash
creates=/usr/share/elasticsearch/bin/elasticsearch
tags:
- elasticsearch
- install
dpkg -i --force-confold /var/tmp/elasticsearch-{{ elasticsearch_version }}.deb
executable=/bin/bash
when: elasticsearch_reinstall.changed
- name: create directories
file: >
path="{{ item }}"
state=directory
owner="{{ elasticsearch_user }}"
group="{{ elasticsearch_group }}"
with_items:
- "{{ elasticsearch_data_dir }}"
- "{{ elasticsearch_log_dir }}"
- "{{ elasticsearch_cfg_dir }}"
- name: update elasticsearch defaults
template: >
src=etc/default/elasticsearch.j2 dest=/etc/default/elasticsearch
when: ELASTICSEARCH_CLUSTERED
- name: drop the elasticsearch config
template: >
src=edx/etc/elasticsearch/elasticsearch.yml.j2 dest={{ elasticsearch_cfg_dir }}/elasticsearch.yml
mode=0744
when: ELASTICSEARCH_CLUSTERED
- name: drop the elasticsearch logging config
template: >
src=edx/etc/elasticsearch/logging.yml.j2 dest={{ elasticsearch_cfg_dir }}/logging.yml
mode=0744
when: ELASTICSEARCH_CLUSTERED
# Plugin installation fails hard when the plugin already
# exists. This is problematic if this is upgraded.
- name: check if the bigdesk plugin is installed
stat: path=/usr/share/elasticsearch/plugins/bigdesk
register: bigdesk
- name: install bigdesk plugin
shell: >
/usr/share/elasticsearch/bin/plugin -install lukas-vlcek/bigdesk/2.2.0
when: bigdesk.stat.isdir is not defined
- name: Ensure elasticsearch is enabled and started
service: name=elasticsearch state=started enabled=yes
tags:
- elasticsearch
- install
service: name=elasticsearch state=restarted enabled=yes
\ No newline at end of file
# {{ ansible_managed }}
# Path to directory where to store index data allocated for this node.
#
path.data: {{elasticsearch_data_dir}}
# Path to log files:
#
path.logs: {{elasticsearch_log_dir}}
# ElasticSearch performs poorly when JVM starts swapping: you should ensure that
# it _never_ swaps.
#
# Set this property to true to lock the memory:
#
bootstrap.mlockall: true
# Unicast discovery allows to explicitly control which nodes will be used
# to discover the cluster. It can be used when multicast is not present,
# or to restrict the cluster communication-wise.
#
# 1. Disable multicast discovery (enabled by default):
#
# discovery.zen.ping.multicast.enabled: false
#
# 2. Configure an initial list of master nodes in the cluster
# to perform discovery when new nodes (master or data) are started:
#
# discovery.zen.ping.unicast.hosts: ["host1", "host2:port", "host3[portX-portY]"]
{%- if ELASTICSEARCH_CLUSTERED -%}
{%- set hosts= [] -%}
{%- for host in hostvars.keys() -%}
{% do hosts.append(host) %}
{%- endfor %}
discovery.zen.ping.unicast.hosts: ['{{hosts|join("\',\'") }}']
{% endif -%}
\ No newline at end of file
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
es.logger.level: INFO
rootLogger: ${es.logger.level}, console, file
logger:
# log action execution errors for easier debugging
action: DEBUG
# reduce the logging for aws, too much is logged under the default INFO
com.amazonaws: WARN
# gateway
#gateway: DEBUG
#index.gateway: DEBUG
# peer shard recovery
#indices.recovery: DEBUG
# discovery
#discovery: TRACE
index.search.slowlog: TRACE, index_search_slow_log_file
index.indexing.slowlog: TRACE, index_indexing_slow_log_file
additivity:
index.search.slowlog: false
index.indexing.slowlog: false
appender:
console:
type: console
layout:
type: consolePattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
index_search_slow_log_file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}_index_search_slowlog.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
index_indexing_slow_log_file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
\ No newline at end of file
# {{ ansible_managed }}
# Run ElasticSearch as this user ID and group ID
#ES_USER=elasticsearch
#ES_GROUP=elasticsearch
# Heap Size (defaults to 256m min, 1g max)
ES_HEAP_SIZE={{ ELASTICSEARCH_HEAP_SIZE }}
# Heap new generation
#ES_HEAP_NEWSIZE=
# max direct memory
#ES_DIRECT_SIZE=
# Maximum number of open files, defaults to 65535.
#MAX_OPEN_FILES=65535
# Maximum locked memory size. Set to "unlimited" if you use the
# bootstrap.mlockall option in elasticsearch.yml. You must also set
# ES_HEAP_SIZE.
#MAX_LOCKED_MEMORY=unlimited
# ElasticSearch log directory
LOG_DIR={{ elasticsearch_log_dir }}
# ElasticSearch data directory
DATA_DIR={{ COMMON_DATA_DIR }}
# ElasticSearch work directory
#WORK_DIR=/tmp/elasticsearch
# ElasticSearch configuration directory
CONF_DIR={{ elasticsearch_cfg_dir }}
# ElasticSearch configuration file (elasticsearch.yml)
CONF_FILE=${CONF_DIR}/elasticsearch.yml
# Additional Java OPTS
#ES_JAVA_OPTS=
......@@ -28,6 +28,7 @@ FORUM_ELASTICSEARCH_PORT: "9200"
FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTICSEARCH_PORT }}"
FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key"
FORUM_NEW_RELIC_APP_NAME: "forum-newrelic-app"
FORUM_WORKER_PROCESSES: "4"
forum_environment:
RBENV_ROOT: "{{ forum_rbenv_root }}"
......@@ -41,12 +42,22 @@ forum_environment:
MONGOHQ_URL: "{{ FORUM_MONGO_URL }}"
HOME: "{{ forum_app_dir }}"
NEW_RELIC_APP_NAME: "{{ FORUM_NEW_RELIC_APP_NAME }}"
NEW_RELIC_LICENSE_KEY: " {{ FORUM_NEW_RELIC_LICENSE_KEY }}"
NEW_RELIC_LICENSE_KEY: "{{ FORUM_NEW_RELIC_LICENSE_KEY }}"
WORKER_PROCESSES: "{{ FORUM_WORKER_PROCESSES }}"
DATA_DIR: "{{ forum_data_dir }}"
forum_user: "forum"
forum_ruby_version: "1.9.3-p448"
forum_source_repo: "https://github.com/edx/cs_comments_service.git"
forum_version: "HEAD"
# Currently we are installing a branch of the comments service
# that configures unicorn to listen on a unix socket and get the
# worker count configuration from the environment. We are not
# merging to master of the comments service yet as this will have
# some incompatibilities with our Heroku deployments.
#
# https://github.com/edx/cs_comments_service/pull/83
#
forum_version: "e0d/unicorn-config"
forum_unicorn_port: "4567"
#
......@@ -59,5 +70,5 @@ forum_unicorn_port: "4567"
# connectivity to Mongo is also tested, but separately.
#
forum_services:
- {service: "sinatra", host: "localhost", port: "{{ forum_unicorn_port }}"}
- {service: "elasticsearch", host: "{{ FORUM_ELASTICSEARCH_HOST }}", port: "{{ FORUM_ELASTICSEARCH_PORT }}"}
\ No newline at end of file
- {service: "elasticsearch", host: "{{ FORUM_ELASTICSEARCH_HOST }}", port: "{{ FORUM_ELASTICSEARCH_PORT }}"}
......@@ -42,5 +42,10 @@
notify:
- restart the forum service
- include: deploy.yml tags=deploy
- name: create {{ forum_data_dir }}
file: >
path={{ forum_data_dir }} state=directory
owner="{{ common_web_user }}" group="{{ common_web_group }}"
mode=0777
- include: deploy.yml tags=deploy
\ No newline at end of file
......@@ -8,4 +8,4 @@
- name: test that mongo replica set members are listing
wait_for: port={{ FORUM_MONGO_PORT }} host={{ item }} timeout=30
with_items: FORUM_MONGO_HOSTS
when: not devstack
when: not devstack
\ No newline at end of file
#!/bin/bash
source {{ forum_app_dir }}/forum_env
cd {{ forum_code_dir }}
{% if devstack %}
{{ forum_rbenv_shims }}/ruby app.rb
{% else %}
{{ forum_gem_bin }}/unicorn -c config/unicorn.rb
{% endif %}
......@@ -18,14 +18,14 @@
- name: create ora application config
copy:
src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.env.json
src={{secure_dir}}/files/{{COMMON_ENVIRONMENT}}/legacy_ora/ora.env.json
dest={{ora_app_dir}}/env.json
sudo_user: "{{ ora_user }}"
register: env_state
- name: create ora auth file
copy:
src={{secure_dir}}/files/{{COMMON_ENV_TYPE}}/legacy_ora/ora.auth.json
src={{secure_dir}}/files/{{COMMON_ENVIRONMENT}}/legacy_ora/ora.auth.json
dest={{ora_app_dir}}/auth.json
sudo_user: "{{ ora_user }}"
register: auth_state
......
......@@ -26,7 +26,7 @@ server {
{% endif %}
server_name studio.*;
server_name ~^((stage|prod)-)?studio\..*;
access_log {{ nginx_log_dir }}/access.log;
error_log {{ nginx_log_dir }}/error.log error;
......
#
# {{ ansible_managed }}
#
{# This prevents the injected comment from eating the server
directive. There's probably a better way of doing this,
but I don't know it currently.
#}
{% raw %}
{% endraw %}
{%- if "forum" in nginx_default_sites -%}
{%- set default_site = "default" -%}
{%- else -%}
{%- set default_site = "" -%}
{%- endif -%}
{% if devstack %}
{# Connects to webbrick on port 4567 typically. Appropriate for development deployments #}
upstream forum_app_server {
server localhost:{{ forum_unicorn_port }} fail_timeout=0;
}
{% else %}
{# Connects to unicorn over a unix socket. Appropriate for production deployments #}
server localhost:{{ forum_unicorn_port }} fail_timeout=0;
upstream forum_app_server {
server unix:{{ forum_data_dir }}/forum.sock fail_timeout=0;
}
{% endif %}
server {
......
......@@ -69,8 +69,8 @@ ORA_DJANGO_PASSWORD: "password"
ORA_URL: "http://localhost:18060"
ORA_MYSQL_DB_NAME: 'ora'
ORA_MYSQL_USER: 'root'
ORA_MYSQL_PASSWORD: ''
ORA_MYSQL_USER: 'ora001'
ORA_MYSQL_PASSWORD: 'password'
ORA_MYSQL_HOST: 'localhost'
ORA_MYSQL_PORT: '3306'
......
......@@ -38,6 +38,18 @@
file: src=/usr/lib/jvm/{{ oraclejdk_base }} dest={{ oraclejdk_link }} state=link
when: oraclejdk_present|failed
- name: update alternatives java
shell: >
update-alternatives --install "/usr/bin/java" "java" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/java" 1
- name: update alternatives javac
shell: >
update-alternatives --install "/usr/bin/javac" "javac" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/javac" 1
- name: update alternatives javaws
shell: >
update-alternatives --install "/usr/bin/javaws" "javaws" "/usr/lib/jvm/{{ oraclejdk_base }}/bin/javaws" 1
- name: add JAVA_HOME for Oracle Java
template: src=java.sh.j2 dest=/etc/profile.d/java.sh owner=root group=root mode=0755
when: oraclejdk_present|failed
#Variables for rabbitmq
---
rabbit_app_dir: "{{ COMMON_APP_DIR }}/rabbitmq"
rabbit_data_dir: "{{ COMMON_DATA_DIR }}/rabbitmq"
rabbit_log_dir: "{{ COMMON_LOG_DIR }}/rabbitmq"
rabbit_cfg_dir: "{{ COMMON_CFG_DIR }}/rabbitmq"
# Environment specific vars
RABBIT_ERLANG_COOKIE: 'DEFAULT_COOKIE'
RABBIT_USERS:
......@@ -42,11 +47,6 @@ rabbitmq_auth_config:
erlang_cookie: $RABBIT_ERLANG_COOKIE
admins: $RABBIT_USERS
# If the system is running out of an Amazon Web Services
# cloudformation stack, this group name can used to pull out
# the name of the stack the rabbit server resides in.
rabbitmq_aws_stack_name: "tag_aws_cloudformation_stack-name_"
rabbitmq_clustered_hosts: []
rabbitmq_plugins:
......
......@@ -48,7 +48,8 @@
- name: add rabbitmq cluster configuration
template: >
src=rabbitmq.config.j2 dest={{rabbitmq_config_dir}}/rabbitmq.config
src=etc/rabbitmq/rabbitmq.config.j2
dest={{rabbitmq_config_dir}}/rabbitmq.config
owner=root group=root mode=0644
register: cluster_configuration
......
{# Get the list of hosts that are in the same stack as the current machine
and also a rabbitmq machine.
#}
% {{ ansible_managed }}
{% if RABBITMQ_CLUSTERED -%}
{%- set hosts= [] -%}
{%- if RABBITMQ_CLUSTERED -%}
{%- set hosts= [] -%}
{%- set all_rabbit_hosts = [] -%}
{%- do all_rabbit_hosts.extend(groups.tag_role_rabbitmq) -%}
{%- do all_rabbit_hosts.extend(groups.tag_group_rabbitmq) -%}
{%- for name in group_names -%}
{%- if name.startswith(rabbitmq_aws_stack_name) -%}
{%- for host in all_rabbit_hosts -%}
{%- if host in groups[name] -%}
{% do hosts.append("rabbit@ip-" + host.replace('.','-')) %}
{%- endif -%}
{%- endfor -%}
{%- endif -%}
{%- endfor -%}
{%- for host in hostvars.keys() -%}
{% do hosts.append("rabbit@ip-" + host.replace('.','-')) %}
{%- endfor %}
[{rabbit,
[{cluster_nodes, {['{{ hosts|join("\',\'") }}'], disc}}]}].
......
......@@ -25,15 +25,15 @@ SPLUNKFORWARDER_PASSWORD: !!null
SPLUNKFORWARDER_LOG_ITEMS:
- directory: '{{ COMMON_LOG_DIR }}'
recursive: true
index: '{{COMMON_ENV_TYPE}}-{{COMMON_ENV_NAME}}'
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
sourcetype: 'edx'
- directory: '/var/log'
recursive: true
index: '{{COMMON_ENV_TYPE}}-{{COMMON_ENV_NAME}}'
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
sourcetype: 'syslog'
- directory: '{{ COMMON_LOG_DIR }}/nginx'
recursive: true
index: '{{COMMON_ENV_TYPE}}-{{COMMON_ENV_NAME}}'
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
sourcetype: 'nginx'
#
......
......@@ -41,8 +41,8 @@ XQUEUE_RABBITMQ_HOSTNAME: 'localhost'
XQUEUE_LANG: 'en_US.UTF-8'
XQUEUE_MYSQL_DB_NAME: 'xqueue'
XQUEUE_MYSQL_USER: 'root'
XQUEUE_MYSQL_PASSWORD: ''
XQUEUE_MYSQL_USER: 'xqueue001'
XQUEUE_MYSQL_PASSWORD: 'password'
XQUEUE_MYSQL_HOST: 'localhost'
XQUEUE_MYSQL_PORT: '3306'
......@@ -73,7 +73,6 @@ xqueue_auth_config:
RABBITMQ_USER: $XQUEUE_RABBITMQ_USER
RABBITMQ_PASS: $XQUEUE_RABBITMQ_PASS
xqueue_create_db: 'yes'
xqueue_source_repo: https://github.com/edx/xqueue.git
xqueue_version: 'HEAD'
xqueue_pre_requirements_file: "{{ xqueue_code_dir }}/pre-requirements.txt"
......
......@@ -32,18 +32,6 @@
notify:
- restart xqueue
- name: create xqueue db
mysql_db: >
name={{xqueue_auth_config.DATABASES.default.NAME}}
login_host={{xqueue_auth_config.DATABASES.default.HOST}}
login_user={{xqueue_auth_config.DATABASES.default.USER}}
login_password={{xqueue_auth_config.DATABASES.default.PASSWORD}}
state=present
encoding=utf8
notify:
- restart xqueue
when: xqueue_create_db is defined and xqueue_create_db|lower == "yes"
- include: deploy.yml tags=deploy
......
......@@ -33,5 +33,4 @@
- forum
- { role: "xqueue", update_users: True }
- ora
- discern
- edx_ansible
......@@ -75,9 +75,9 @@ fi
if [[ -z $ami ]]; then
if [[ $server_type == "full_edx_installation" ]]; then
ami="ami-0dd1ef64"
ami="ami-bd6b6ed4"
elif [[ $server_type == "ubuntu_12.04" ]]; then
ami="ami-d0f89fb9"
ami="ami-a73264ce"
fi
fi
......@@ -186,7 +186,7 @@ EOF
fi
declare -A deploy
roles="edxapp forum xqueue xserver ora discern certs"
roles="edxapp forum xqueue xserver ora discern certs demo"
for role in $roles; do
deploy[$role]=${!role}
done
......
......@@ -42,13 +42,19 @@ deployments:
# A jenkins URL to post requests for building AMIs
abbey_url: "http://...."
abbey_token: "API_TOKEN"
# A mapping of plays to base AMIs
base_ami:{}
# The default AMI to use if there isn't one specific to your plays.
default_base_ami: ''
---
"""
import argparse
import json
import yaml
import logging as log
import requests
from datetime import datetime
from git import Repo
from pprint import pformat
......@@ -151,10 +157,12 @@ def prepare_release(args):
all_plays[play]['amis'][env] = None
release['plays'] = all_plays
if not args.noop:
if args.noop:
print("Would insert into release collection: {}".format(pformat(release)))
else:
release_coll.insert(release)
# All plays that need new AMIs have been updated.
notify_abbey(config['abbey_url'], config['abbey_token'], args.deployment,
notify_abbey(config, args.deployment,
all_plays, args.release_id, mongo_uri, config_repo_ver,
config_secure_ver, args.noop)
......@@ -172,34 +180,39 @@ def ami_for(db, env, deployment, play, configuration,
return db.amis.find_one(ami_signature)
import requests
def notify_abbey(abbey_url, abbey_token, deployment, all_plays, release_id,
def notify_abbey(config, deployment, all_plays, release_id,
mongo_uri, configuration_ref, configuration_secure_ref, noop=False):
abbey_url = config['abbey_url']
base_amis = config['base_amis']
default_base = config['default_base_ami']
for play_name, play in all_plays.items():
for env, ami in play['amis'].items():
if ami is None:
params = []
params.append({ 'name': 'play', 'value': play_name})
params.append({ 'name': 'deployment', 'value': deployment})
params.append({ 'name': 'environment', 'value': env})
params.append({ 'name': 'vars', 'value': yaml.safe_dump(play['vars'], default_flow_style=False)})
params.append({ 'name': 'release_id', 'value': release_id})
params.append({ 'name': 'mongo_uri', 'value': mongo_uri})
params.append({ 'name': 'configuration', 'value': configuration_ref})
params.append({ 'name': 'configuration_secure', 'value': configuration_secure_ref})
build_params = {'parameter': params}
log.info("Need ami for {}".format(pformat(build_params)))
if not noop:
r = requests.post(abbey_url,
data={"token": abbey_token},
params={"json": json.dumps(build_params)})
params = {}
params['play'] = play_name
params['deployment'] = deployment
params['environment'] = env
params['vars'] = yaml.safe_dump(play['vars'], default_flow_style=False)
params['release_id'] = release_id
params['mongo_uri'] = mongo_uri
params['configuration'] = configuration_ref
params['configuration_secure'] = configuration_secure_ref
params['base_ami'] = base_amis.get(play_name, default_base)
log.info("Need ami for {}".format(pformat(params)))
if noop:
r = requests.Request('POST', abbey_url, params=params)
url = r.prepare().url
print("Would have posted: {}".format(url))
else:
r = requests.post(abbey_url, params=params)
log.info("Sent request got {}".format(r))
if r.status_code != 201:
if r.status_code != 200:
# Something went wrong.
msg = "Failed to submit request with params: {}"
raise Exception(msg.format(pformat(build_params)))
raise Exception(msg.format(pformat(params)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Prepare a new release.")
......
......@@ -58,7 +58,7 @@ class MongoConnection:
'deployment': args.deployment,
'configuration_ref': args.configuration_version,
'configuration_secure_ref': args.configuration_secure_version,
'vars': extra_vars,
'vars': git_refs,
}
try:
self.mongo_ami.insert(query)
......@@ -142,6 +142,8 @@ def parse_args():
help="don't cleanup on failures")
parser.add_argument('--vars', metavar="EXTRA_VAR_FILE",
help="path to extra var file", required=False)
parser.add_argument('--refs', metavar="GIT_REFS_FILE",
help="path to a var file with app git refs", required=False)
parser.add_argument('-a', '--application', required=False,
help="Application for subnet, defaults to admin",
default="admin")
......@@ -314,8 +316,31 @@ EOF
fi
cat << EOF >> $extra_vars
---
# extra vars passed into
# abbey.py including versions
# of all the repositories
{extra_vars_yml}
{git_refs_yml}
# path to local checkout of
# the secure repo
secure_vars: $secure_vars_file
# The private key used for pulling down
# private edx-platform repos is the same
# identity of the github huser that has
# access to the secure vars repo.
# EDXAPP_USE_GIT_IDENTITY needs to be set
# to true in the extra vars for this
# variable to be used.
EDXAPP_LOCAL_GIT_IDENTITY: $secure_identity
# abbey will always run fake migrations
# this is so that the application can come
# up healthy
fake_migrations: true
EOF
chmod 400 $secure_identity
......@@ -355,6 +380,7 @@ rm -rf $base_dir
identity_file=identity_file,
queue_name=run_id,
extra_vars_yml=extra_vars_yml,
git_refs_yml=git_refs_yml,
secure_vars=secure_vars)
ec2_args = {
......@@ -618,9 +644,17 @@ if __name__ == '__main__':
extra_vars_yml = f.read()
extra_vars = yaml.load(extra_vars_yml)
else:
extra_vars_yml = "---\n"
extra_vars_yml = ""
extra_vars = {}
if args.refs:
with open(args.refs) as f:
git_refs_yml = f.read()
git_refs = yaml.load(git_refs_yml)
else:
git_refs_yml = ""
git_refs = {}
if args.secure_vars:
secure_vars = args.secure_vars
else:
......
......@@ -149,6 +149,7 @@ def update_elb_rds_dns(zone):
stack_elbs = [elb for elb in elb_con.get_all_load_balancers()
if elb.vpc_id == vpc_id]
for elb in stack_elbs:
for inst in elb.instances:
instance = ec2_con.get_all_instances(
......@@ -160,13 +161,12 @@ def update_elb_rds_dns(zone):
else:
# deprecated, for backwards compatibility
play_tag = instance.tags['role']
play_tag = instance.tags['role']
fqdn = "{}-{}.{}".format(env_tag, play_tag, zone_name)
add_or_update_record(zone, fqdn, 'CNAME', 600, [elb.dns_name])
if play_tag == 'edxapp':
# create courses and studio CNAME records for edxapp
for name in ['courses', 'studio']:
fqdn = "{}.{}".format(name, zone_name)
fqdn = "{}-{}.{}".format(env_tag, name, zone_name)
add_or_update_record(zone, fqdn, 'CNAME',
600, [elb.dns_name])
break # only need the first instance for tag info
......
......@@ -29,8 +29,6 @@ Vagrant.configure("2") do |config|
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service", :create => true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora", :create => true, nfs: true
config.hostsupdater.aliases = ["preview.localhost"]
# Enable X11 forwarding so we can interact with GUI applications
if ENV['VAGRANT_X11']
config.ssh.forward_x11 = true
......
......@@ -34,8 +34,8 @@ end
Vagrant.configure("2") do |config|
# Creates an edX devstack VM from an official release
config.vm.box = "focaccia-devstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140130-focaccia-devstack.box"
config.vm.box = "gugelhupf-devstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140210-gugelhupf-devstack.box"
config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000
......
......@@ -5,7 +5,7 @@ Vagrant.configure("2") do |config|
# Creates an edX fullstack VM from an official release
config.vm.box = "facaccia"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140130-focaccia-fullstack.box"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140210-gugelhupf-fullstack.box"
config.vm.network :private_network, ip: "192.168.33.10"
config.hostsupdater.aliases = ["preview.localhost"]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment