Commit f76556d4 by Edward Zarecor

Merge branch 'master' into e0d/inventory

parents e4b8b7ae 000af8ca
*
!playbooks/
!docker/
!docker/build/*/ansible_overrides.yml
!docker/build/*/*.sh
!docker/plays/
!util/install/
......@@ -11,6 +11,8 @@ vagrant/*/*/cs_comments_service
vagrant/*/*/ora
vagrant/*/*/analytics_api
vagrant/*/*/insights
vagrant/*/*/ecommerce
vagrant/*/*/programs
vagrant_ansible_inventory_default
### OS X artifacts
......@@ -18,3 +20,10 @@ vagrant_ansible_inventory_default
.AppleDouble
:2e_*
:2e#
## Make artifacts
.build
playbooks/edx-east/travis-test.yml
## Local virtualenv
/venv
# Travis CI configuration file for running tests
language: python
branches:
only:
- master
python:
- "2.7"
services:
- docker
addons:
apt:
packages:
- nodejs
- python-demjson
before_install:
- sudo apt-get -y update
- sudo apt-get -y install -o Dpkg::Options::="--force-confold" docker-engine
install:
- "sudo apt-get install -y nodejs python-demjson"
- "pip install --allow-all-external -r requirements.txt"
- "pip install --allow-all-external demjson"
script:
- |
for yml in $(find . -name "*.yml"); do
python -c "import sys,yaml; yaml.load(open(sys.argv[1]))" $yml >/dev/null
if [[ $? -ne 0 ]]; then
echo "ERROR parsing $yml"
exit 1
fi
done
- |
for json in $(find . -name "*.json"); do
jsonlint -v $json
if [[ $? -ne 0 ]]; then
echo "ERROR parsing $json"
exit 1
fi
done
- |
pushd playbooks
for j2 in $(git diff --name-only refs/heads/master..$(git rev-parse --abbrev-ref HEAD) |grep -E '.+\.j2$|.+\.yml'); do
python ../tests/jinja_check.py ../$j2
if [[ $? -ne 0 ]]; then
echo "Jinja parsing error on $j2"
exit 1
fi
done
popd
- |
set -e
ROOT_DIR=$PWD
cd playbooks/edx-east
ROLE_DIRS=$(/bin/ls -d roles/*)
cat <<EOF >travis-test.yml
- name: Play to test all roles
hosts: all
roles:
EOF
for role_dir in $ROLE_DIRS; do
echo " - $(basename $role_dir)" >> travis-test.yml
done
ansible-playbook -i localhost, --syntax-check travis-test.yml
output_dir="$PWD/test_output/env-dep"
mkdir -p $output_dir
ansible-playbook -i localhost, -c local --tags edxapp_cfg edxapp.yml -e edxapp_user=`whoami` -e edxapp_app_dir=$output_dir -e edxapp_code_dir=$output_dir -e EDXAPP_CFG_DIR=$output_dir
root_dir=$output_dir
environment_deployments="."
source $ROOT_DIR/tests/validate_templates.sh
env:
- MAKE_TARGET=test.syntax
- MAKE_TARGET=test.edx_east_roles
- MAKE_TARGET=docker.test.shard SHARD=0 SHARDS=3
- MAKE_TARGET=docker.test.shard SHARD=1 SHARDS=3
- MAKE_TARGET=docker.test.shard SHARD=2 SHARDS=3
script:
- docker --version
- make --version
- travis_wait 50 make --keep-going $MAKE_TARGET SHARD=$SHARD SHARDS=$SHARDS
......@@ -49,3 +49,6 @@ Xiang Junfu <xiangjf.fnst@cn.fujitsu.com>
Sarina Canelake <sarina@edx.org>
Steven Burch <stv@stanford.edu>
Dan Powell <dan@abakas.com>
Omar Al-Ithawi <oithawi@qrf.org>
David Adams<dcadams@stanford.edu>
Florian Haas <florian@hastexo.com>
- Role: rabbitmq
- Removed the RABBITMQ_CLUSTERED var and related tooling. The goal of the var was to be able to setup a cluster in the aws environment without having to know all the IPs of the cluster before hand. It relied on the `hostvars` ansible varible to work correctly which it no longer does in 1.9. This may get fixed in the future but for now, the "magic" setup doesn't work.
- Changed `rabbitmq_clustered_hosts` to RABBITMQ_CLUSTERED_HOSTS.
- Role: edxapp
- Removed SUBDOMAIN_BRANDING and SUBDOMAIN_COURSE_LISTINGS variables
- Role: ora
- Remove the ora1 role as support for it was deprecated in Cypress.
- Removed dependencies on ora throughout the playbooks / vagrantfiles.
- Role: edxapp
- Removed XmlModuleStore from the default list of modulestores for the LMS.
- EDXAPP_XML_MAPPINGS variable no longer exists by default and is not used by the edxapp role.
......
SHELL := /bin/bash
.PHONY: help requirements clean build test pkg
include *.mk
help:
@echo ''
@echo 'Makefile for the edX Configuration'
@echo ''
@echo 'Usage:'
@echo ' make requirements install requirements'
@echo ' make test run all tests'
@echo ' make build build everything'
@echo ' make pkg package everything'
@echo ' make clean remove build by-products'
@echo ''
@echo ' Docker:'
@echo ' $$image: any dockerhub image'
@echo ' $$container: any container defined in docker/build/$$container/Dockerfile'
@echo ''
@echo ' make $(docker_pull)$$image pull $$image from dockerhub'
@echo ''
@echo ' make $(docker_build)$$container build $$container'
@echo ' make $(docker_test)$$container test that $$container will build'
@echo ' make $(docker_pkg)$$container package $$container for a push to dockerhub'
@echo ' make $(docker_push)$$container push $$container to dockerhub '
@echo ''
@echo ' make docker.build build all defined docker containers (based on dockerhub base images)'
@echo ' make docker.test test all defined docker containers'
@echo ' make docker.pkg package all defined docker containers (using local base images)'
@echo ' make docker.push push all defined docker containers'
@echo ''
@echo ' Tests:'
@echo ' test.syntax Run all syntax tests'
@echo ' test.syntax.json Run syntax tests on .json files'
@echo ' test.syntax.yml Run syntax tests on .yml files'
@echo ' test.syntax.jinja Run syntax tests on .j2 files'
@echo ' test.edx_east_roles Run validation on edx-east roles'
@echo ''
requirements:
pip install -qr pre-requirements.txt --exists-action w
pip install -qr requirements.txt --exists-action w
......@@ -6,6 +6,8 @@ The goal of the edx/configuration project is to provide a simple, but
flexible, way for anyone to stand up an instance of Open edX that is
fully configured and ready-to-go.
Before getting started, please look at the [Open EdX Deployment options](https://open.edx.org/deployment-options), to see which method for deploying OpenEdX is right for you.
Building the platform takes place in two phases:
* Infrastructure provisioning
......@@ -17,6 +19,9 @@ and are free to use one, but not the other. The provisioning phase
stands-up the required resources and tags them with role identifiers
so that the configuration tool can come in and complete the job.
__Note__: The Cloudformation templates used for infrastructure provisioning
are no longer maintained. We are working to move to a more modern and flexible tool.
The reference platform is provisioned using an Amazon
[CloudFormation](http://aws.amazon.com/cloudformation/) template.
When the stack has been fully created you will have a new AWS Virtual
......@@ -28,11 +33,9 @@ The configuration phase is managed by [Ansible](http://ansible.com/).
We have provided a number of playbooks that will configure each of
the edX services.
This project is a re-write of the current edX provisioning and
configuration tools, we will be migrating features to this project
over time, so expect frequent changes.
__Important__:
The edX configuration scripts need to be run as root on your servers and will make changes to service configurations including, but not limited to, sshd, dhclient, sudo, apparmor and syslogd. Our scripts are made available as we use them and they implement our best practices. We strongly recommend that you review everything that these scripts will do before running them against your servers. We also recommend against running them against servers that are hosting other applications. No warranty is expressed or implied.
For more information including installation instruction please see the [Configuration Wiki](https://github.com/edx/configuration/wiki).
For more information including installation instruction please see the [OpenEdX Wiki](https://openedx.atlassian.net/wiki/display/OpenOPS/Open+edX+Operations+Home).
For info on any large recent changes please see the [change log](https://github.com/edx/configuration/blob/master/CHANGELOG.md).
.PHONY: docker.build docker.test docker.pkg
SHARD=0
SHARDS=1
dockerfiles:=$(shell ls docker/build/*/Dockerfile)
images:=$(patsubst docker/build/%/Dockerfile,%,$(dockerfiles))
docker_build=docker.build.
docker_test=docker.test.
docker_pkg=docker.pkg.
docker_push=docker.push.
# N.B. / is used as a separator so that % will match the /
# in something like 'edxops/trusty-common:latest'
# Also, make can't handle ':' in filenames, so we instead '@'
# which means the same thing to docker
docker_pull=docker.pull/
build: docker.build
test: docker.test
pkg: docker.pkg
clean:
rm -rf .build
docker.test.shard: $(foreach image,$(shell echo $(images) | tr ' ' '\n' | awk 'NR%$(SHARDS)==$(SHARD)'),$(docker_test)$(image))
docker.build: $(foreach image,$(images),$(docker_build)$(image))
docker.test: $(foreach image,$(images),$(docker_test)$(image))
docker.pkg: $(foreach image,$(images),$(docker_pkg)$(image))
docker.push: $(foreach image,$(images),$(docker_push)$(image))
$(docker_pull)%:
docker pull $(subst @,:,$*)
$(docker_build)%: docker/build/%/Dockerfile
docker build -f $< .
$(docker_test)%: .build/%/Dockerfile.test
docker build -t $*:test -f $< .
$(docker_pkg)%: .build/%/Dockerfile.pkg
docker build -t $*:latest -f $< .
$(docker_push)%: $(docker_pkg)%
docker tag -f $*:latest edxops/$*:latest
docker push edxops/$*:latest
.build/%/Dockerfile.d: docker/build/%/Dockerfile Makefile
@mkdir -p .build/$*
$(eval FROM=$(shell grep "^\s*FROM" $< | sed -E "s/FROM //" | sed -E "s/:/@/g"))
$(eval EDXOPS_FROM=$(shell echo "$(FROM)" | sed -E "s#edxops/([^@]+)(@.*)?#\1#"))
@echo "$(docker_build)$*: $(docker_pull)$(FROM)" > $@
@if [ "$(EDXOPS_FROM)" != "$(FROM)" ]; then \
echo "$(docker_test)$*: $(docker_test)$(EDXOPS_FROM:@%=)" >> $@; \
echo "$(docker_pkg)$*: $(docker_pkg)$(EDXOPS_FROM:@%=)" >> $@; \
else \
echo "$(docker_test)$*: $(docker_pull)$(FROM)" >> $@; \
echo "$(docker_pkg)$*: $(docker_pull)$(FROM)" >> $@; \
fi
.build/%/Dockerfile.test: docker/build/%/Dockerfile Makefile
@mkdir -p .build/$*
@sed -E "s#FROM edxops/([^:]+)(:\S*)?#FROM \1:test#" $< > $@
.build/%/Dockerfile.pkg: docker/build/%/Dockerfile Makefile
@mkdir -p .build/$*
@sed -E "s#FROM edxops/([^:]+)(:\S*)?#FROM \1:test#" $< > $@
-include $(foreach image,$(images),.build/$(image)/Dockerfile.d)
# Docker Support
## Introduction
Docker support for edX services is volatile and experimental.
We welcome interested testers and contributors. If you are
interested in participating, please join us on Slack at
https://openedx.slack.com/messages/docker.
We do not and may never run run these images in production.
They are not currently suitable for production use.
## Tooling
`Dockerfile`s for individual services should be placed in
`docker/build/<service>`. There should be an accompanying `ansible_overrides.yml`
which specifies any docker-specific configuration values.
Once the `Dockerfile` has been created, it can be built and published
using a set of make commands.
```shell
make docker.build.<service> # Build the service container (but don't tag it)
# By convention, this will build the container using
# the currently checked-out configuration repository,
# and will build on top of the most-recently available
# base container image from dockerhub.
make docker.test.<service> # Test that the Dockerfile for <service> will build.
# This will rebuild any edx-specific containers that
# the Dockerfile depends on as well, in case there
# are failures as a result of changes to the base image.
make docker.pkg.<service> # Package <service> for publishing to Dockerhub. This
# will also package and tag pre-requisite service containers.
make docker.push.<service> # Push <service> to Dockerhub as latest.
```
## Conventions
In order to facilitate development, Dockerfiles should be based on
one of the `edxops/<ubuntu version>-common` base images, and should
`COPY . /edx/app/edx_ansible/edx_ansible` in order to load your local
ansible plays into the image. The actual work of configuring the image
should be done by executing ansible (rather than explicit steps in the
Dockerfile), unless those steps are docker specific. Devstack-specific
steps can be tagged with the `devstack:install` tag in order that they
only run when building a devstack image.
The user used in the `Dockerfile` should be `root`.
FROM edxops/precise-common:latest
MAINTAINER edxops
RUN apt-get update
ADD . /edx/app/edx_ansible/edx_ansible
COPY docker/build/analytics_api/ansible_overrides.yml /
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
COPY docker/build/analytics_api/ansible_overrides.yml /
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook analytics_api.yml -i '127.0.0.1,' -c local -t "install:base,install:system-requirements,install:configuration,install:app-requirements,install:code" -e@/ansible_overrides.yml
WORKDIR /edx/app/
CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf"]
EXPOSE 443 80
../../plays/analytics_api.yml
\ No newline at end of file
---
DOCKER_TLD: "edx"
ANALYTICS_API_DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: '{{ ANALYTICS_API_DEFAULT_DB_NAME }}'
USER: 'api001'
PASSWORD: 'password'
HOST: 'db.{{ DOCKER_TLD }}'
PORT: '3306'
# read-only user
reports:
ENGINE: 'django.db.backends.mysql'
NAME: '{{ ANALYTICS_API_REPORTS_DB_NAME }}'
USER: 'reports001'
PASSWORD: 'password'
HOST: "db.{{ DOCKER_TLD }}"
PORT: '3306'
---
course_discovery_gunicorn_host: 0.0.0.0
COURSE_DISCOVERY_MYSQL: 'db'
COURSE_DISCOVERY_DJANGO_SETTINGS_MODULE: 'course_discovery.settings.devstack'
COURSE_DISCOVERY_ELASTICSEARCH_HOST: 'es'
COURSE_DISCOVERY_MYSQL_MATCHER: '%'
# To build this Dockerfile:
#
# From the root of configuration:
#
# docker build -f docker/build/credentials/Dockerfile .
#
# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible
# with the currently checked-out configuration repo.
FROM edxops/trusty-common:latest
MAINTAINER edxops
ARG CREDENTIALS_VERSION=master
ARG REPO_OWNER=edx
ADD . /edx/app/edx_ansible/edx_ansible
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
COPY docker/build/credentials/ansible_overrides.yml /
RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook credentials.yml \
-c local -i '127.0.0.1,' \
-t 'install,assets,devstack:install' \
--extra-vars="@/ansible_overrides.yml" \
--extra-vars="CREDENTIALS_VERSION=$CREDENTIALS_VERSION" \
--extra-vars="COMMON_GIT_PATH=$REPO_OWNER"
USER root
CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
---
credentials_gunicorn_host: 127.0.0.1
CREDENTIALS_MYSQL: 'db'
CREDENTIALS_DJANGO_SETTINGS_MODULE: 'credentials.settings.devstack'
CREDENTIALS_MYSQL_MATCHER: '%'
# To build this Dockerfile:
#
# From the root of configuration:
#
# docker build -f docker/build/discovery/Dockerfile .
#
# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible
# with the currently checked-out configuration repo.
FROM edxops/trusty-common:latest
MAINTAINER edxops
ENV DISCOVERY_VERSION=master
ENV REPO_OWNER=edx
ADD . /edx/app/edx_ansible/edx_ansible
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
COPY docker/build/discovery/ansible_overrides.yml /
RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook discovery.yml \
-c local -i '127.0.0.1,' \
-t 'install,assets,devstack:install' \
--extra-vars="@/ansible_overrides.yml" \
--extra-vars="DISCOVERY_VERSION=$DISCOVERY_VERSION" \
--extra-vars="COMMON_GIT_PATH=$REPO_OWNER"
USER root
CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
---
discovery_gunicorn_host: 0.0.0.0
DISCOVERY_MYSQL: 'db'
DISCOVERY_DJANGO_SETTINGS_MODULE: 'course_discovery.settings.devstack'
DISCOVERY_ELASTICSEARCH_HOST: 'es'
FROM edxops/precise-common:latest
MAINTAINER edxops
USER root
RUN apt-get update
ADD . /edx/app/edx_ansible/edx_ansible
COPY docker/build/edxapp/ansible_overrides.yml /
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
COPY docker/build/edxapp/ansible_overrides.yml /
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook edxapp.yml -i '127.0.0.1,' -c local -e "EDXAPP_PYTHON_SANDBOX=false" -t "install:base,install:configuration,install:app-requirements,install:code" -e@/ansible_overrides.yml
WORKDIR /edx/app
CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
EXPOSE 8000 8010
---
DOCKER_TLD: "edx"
# prevents Travis from giving up on the build
COMMON_PIP_VERBOSITY: "-vvvv"
EDXAPP_MYSQL_HOST: "db.{{ DOCKER_TLD }}"
EDXAPP_MONGO_HOSTS:
- "mongo.{{ DOCKER_TLD }}"
FROM edxops/precise-common:latest
MAINTAINER edxops
WORKDIR /edx/app/edx_ansible
WORKDIR /edx/app/edx_ansible/edx_ansible/playbooks/edx-east
RUN sudo git checkout e0d/docker-latest
RUN sudo git reset --hard origin/e0d/docker-latest
RUN sudo git pull
RUN sudo ansible-playbook elasticsearch-docker.yml -c local
USER root
WORKDIR /etc/elasticsearch
CMD ["/usr/share/elasticsearch/bin/elasticsearch","-f"]
EXPOSE 9200 9300
FROM edxops/precise-common:latest
MAINTAINER edxops
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
ADD . /edx/app/edx_ansible/edx_ansible
COPY docker/build/forums/ansible_overrides.yml /
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook forum.yml \
-i '127.0.0.1,' -c local \
-t "install:base,install:configuration,install:app-requirements,install:code" \
-e@/ansible_overrides.yml
WORKDIR /edx/app
CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
EXPOSE 4567
\ No newline at end of file
---
FLOCK_TLD: "edx"
FORUM_MONGO_HOSTS:
- mongo.{{ FLOCK_TLD }}
FORUM_ELASTICSEARCH_HOST: "es.{{ FLOCK_TLD }}"
forum_source_repo: "https://github.com/open-craft/cs_comments_service.git"
forum_version: "mongoid5"
FORUM_USE_TCP: "true"
FORUM_RACK_ENV: "staging"
FORUM_SINATRA_ENV: "staging"
\ No newline at end of file
# Build using: docker build -f Dockerfile.gocd-agent -t gocd-agent .
# FROM edxops/precise-common:latest
FROM gocd/gocd-agent:16.2.1
LABEL version="0.01" \
description="This custom go-agent docker file installs additional requirements for the edx pipeline"
RUN apt-get update && apt-get install -y -q \
python \
python-dev \
python-distribute \
python-pip
# TODO: repalce this with a pip install command so we can version this properly
RUN git clone https://github.com/edx/tubular.git /opt/tubular
RUN pip install -r /opt/tubular/requirements.txt
RUN cd /opt/tubular;python setup.py install
\ No newline at end of file
##Usage
Start the container with this:
```docker run -ti -e GO_SERVER=your.go.server.ip_or_host gocd/gocd-agent```
If you need to start a few GoCD agents together, you can of course use the shell to do that. Start a few agents in the background, like this:
```for each in 1 2 3; do docker run -d --link angry_feynman:go-server gocd/gocd-agent; done```
##Getting into the container
Sometimes, you need a shell inside the container (to create test repositories, etc). docker provides an easy way to do that:
```docker exec -i -t CONTAINER-ID /bin/bash```
To check the agent logs, you can do this:
```docker exec -i -t CONTAINER-ID tail -f /var/log/go-agent/go-agent.log```
##Agent Configuration
The go-agent expects it's configuration to be found at ```/var/lib/go-agent/config/```. Sharing the
configuration between containers is done by mounting a volume at this location that contains any configuration files
necessary.
**Example docker run command:**
```docker run -ti -v /tmp/go-agent/conf:/var/lib/go-agent/config -e GO_SERVER=gocd.sandbox.edx.org 718d75c467c0 bash```
[How to setup auto registration for remote agents](https://docs.go.cd/current/advanced_usage/agent_auto_register.html)
FROM edxops/precise-common:latest
MAINTAINER edxops
ADD . /edx/app/edx_ansible/edx_ansible
COPY docker/build/insights/ansible_overrides.yml /
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook insights.yml \
-i '127.0.0.1,' -c local \
-t "install:base,install:system-requirements,install:configuration,install:app-requirements,install:code" \
-e@/ansible_overrides.yml
CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord -n --configuration /edx/app/supervisor/supervisord.conf"]
EXPOSE 8110 18110
---
DOCKER_TLD: "edx"
INSIGHTS_LMS_BASE: "http://lms.{{ DOCKER_TLD }}:8000"
INSIGHTS_CMS_BASE: "http://cms.{{ DOCKER_TLD }}:8010"
INSIGHTS_BASE_URL: "http://insights.{{ DOCKER_TLD }}:8110"
INSIGHTS_MEMCACHE:
- "memcache.{{ DOCKER_TLD }}:11211"
ANALYTICS_API_ENDPOINT: "http://analtyicsapi.{{ DOCKER_TLD }}:8100/api/v0"
INSIGHTS_DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: '{{ INSIGHTS_DATABASE_NAME }}'
USER: 'rosencrantz'
PASSWORD: 'secret'
HOST: "db.{{ DOCKER_TLD }}"
PORT: '3306'
- name: Deploy Insights
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common_vars
- docker
- insights
FROM edxops/precise-common:latest
MAINTAINER edxops
USER root
ADD . /edx/app/edx_ansible/edx_ansible
COPY docker/build/nginx/ansible_overrides.yml /
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook nginx.yml -c local \
-i '127.0.0.1,' \
-e@roles/edxapp/defaults/main.yml \
-e@roles/xqueue/defaults/main.yml \
-e@roles/certs/defaults/main.yml \
-e@roles/forum/defaults/main.yml
RUN echo "\ndaemon off;" >> /etc/nginx/nginx.conf
WORKDIR /etc/nginx
CMD ["/usr/sbin/nginx"]
EXPOSE 18000 48000 18010 48010 18020
FROM ubuntu:precise
MAINTAINER edxops
ENV ANSIBLE_REPO="https://github.com/edx/ansible"
ENV CONFIGURATION_REPO="https://github.com/edx/configuration.git"
ENV CONFIGURATION_VERSION="master"
ADD util/install/ansible-bootstrap.sh /tmp/ansible-bootstrap.sh
RUN chmod +x /tmp/ansible-bootstrap.sh
RUN /tmp/ansible-bootstrap.sh
FROM ubuntu:trusty
MAINTAINER edxops
ENV ANSIBLE_REPO="https://github.com/edx/ansible"
ENV CONFIGURATION_REPO="https://github.com/edx/configuration.git"
ENV CONFIGURATION_VERSION="master"
ADD util/install/ansible-bootstrap.sh /tmp/ansible-bootstrap.sh
RUN chmod +x /tmp/ansible-bootstrap.sh
RUN /tmp/ansible-bootstrap.sh
FROM edxops/precise-common:latest
MAINTAINER edxops
USER root
RUN apt-get update
ADD . /edx/app/edx_ansible/edx_ansible
COPY docker/build/xqueue/ansible_overrides.yml /
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook xqueue.yml -i '127.0.0.1,' -c local -t "install:base,install:system-requirements,install:configuration,install:app-requirements,install:code" -e@/ansible_overrides.yml
COPY docker/build/xqueue/docker-run.sh /
ENTRYPOINT ["/docker-run.sh"]
EXPOSE 8110 18110
---
DOCKER_TLD: "xqueue"
CONFIGURATION_REPO: "https://github.com/edx/configuration.git"
CONFIGURATION_VERSION: "hack2015/docker"
XQUEUE_SYSLOG_SERVER: "localhost"
XQUEUE_RABBITMQ_HOSTNAME: "rabbit.{{ DOCKER_TLD }}"
XQUEUE_MYSQL_HOST: "db.{{ DOCKER_TLD }}"
#!/bin/bash
set -e
/usr/sbin/rsyslogd
/edx/app/supervisor/venvs/supervisor/bin/supervisord --nodaemon --configuration /edx/app/supervisor/supervisord.conf
FROM edxops/trusty-common:v3
MAINTAINER edxops
ADD . /edx/app/edx_ansible/edx_ansible
COPY docker/build/xqwatcher/ansible_overrides.yml /
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook xqwatcher.yml \
-i '127.0.0.1,' -c local \
-t "install:base,install:configuration,install:system-requirements,install:app-requirements,install:code" \
-e@/ansible_overrides.yml
WORKDIR /edx/app
CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
---
FLOCK_TLD: "edx"
# Note that this is currently a non-working exemplar configuration,
# there isn't a grader provided in the edx-demo-course yet.
XQWATCHER_COURSES:
- COURSE: "demo"
GIT_REPO: "https://github.com/edx/edx-demo-course"
GIT_REF: "master"
QUEUE_NAME: "test-pull"
QUEUE_CONFIG:
SERVER: "http://xqueue.{{ FLOCK_TLD }}"
CONNECTIONS: 2
AUTH: ["lms", "password"]
HANDLERS:
- HANDLER: "xqueue_watcher.jailedgrader.JailedGrader"
CODEJAIL:
name: "demo"
python_bin: "{{ xqwatcher_venv_base }}/demo/bin/python"
user: "demo"
KWARGS:
grader_root: "../data/edx-demo-course/graders/"
PYTHON_REQUIREMENTS:
- { name: "numpy", version: "1.6.2" }
- { name: "lxml", version: "2.3.6" }
#
# Single Docker Compose cluster that will eventually start
# all edX services in a single flock of coordinated containers
#
# This work is currently experimental and a number of services
# are missing entirely. Containers that are present will not
# currently work without manual steps. We are working on
# addressing that.
#
# When running compose you must pass in two environment variables
#
# DOCKER_EDX_ROOT which points to the directory into which you checkout
# your edX source code. For example, assuming the following directory
# structure under /home/me
#
# |-- edx-src
# | |-- discovery
# | |-- cs_comments_service
# | |-- edx_discovery
# | |-- edx-platform
# | |-- xqueue
# you would define DOCKER_EDX_ROOT="/home/me/edx-src"
#
# DOCKER_DATA_ROOT is the location on your host machine where Docker
# guests can access your local filesystem for storing persistent data
# files, say MongoDB or MySQL data files.
#
db:
container_name: db
image: mysql:5.6
environment:
- MYSQL_ROOT_PASSWORD='password'
#- MYSQL_DATABASE=''
- MYSQL_USER='migrate'
- MYSQL_PASSWORD='password'
volumes:
- ${DOCKER_DATA_ROOT}/mysql/data:/data
ports:
- 3306:3306
mongo:
container_name: mongo
image: mongo:3.0
volumes:
- ${DOCKER_DATA_ROOT}/mongo/data:/data
ports:
- 27017:27017
# Need to build our own for ES 0.9
es:
container_name: es
image: edxops/elasticsearch:v1
volumes:
- ${DOCKER_DATA_ROOT}/elasticsearch/data:/data
ports:
- 9100:9100
- 9200:9200
- 9300:9300
memcache:
container_name: memcache
image: memcached:1.4.24
volumes:
- ${DOCKER_DATA_ROOT}/memcache/data:/data
ports:
- 11211:11211
nginx:
container_name: nginx
image: edxops/nginx:v1
ports:
- 80:80
- 443:443
rabbitmq:
container_name: rabbitmq
image: rabbitmq:3.5.3
volumes:
- ${DOCKER_DATA_ROOT}/rabbitmq/data:/data
ports:
- 5672:5672
forums:
container_name: forums
# Image built from the opencraft fork as it fixes
# an auth bug. Update when the change merges
# upstream
image: edxops/forums:opencraft-v2
volumes:
- ${DOCKER_EDX_ROOT}/cs_comments_service:/edx/app/forum/cs_comments_service
ports:
- 4567:4567
xqueue:
container_name: xqueue
image: edxops/xqueue:v1
ports:
- 8040:8040
- 18040:18040
volumes:
- ${DOCKER_EDX_ROOT}/xqueue:/edx/app/edxapp/xqueue
lms:
container_name: lms
image: edxops/edxapp:v2
ports:
- 8000:8000
- 18000:18000
volumes:
- ${DOCKER_EDX_ROOT}/edx-platform:/edx/app/edxapp/edx-platform
cms:
container_name: cms
image: edxops/edxapp:v2
ports:
- 8010:8010
- 18010:18010
volumes:
- ${DOCKER_EDX_ROOT}/edx-platform:/edx/app/edxapp/edx-platform
- name: Deploy Analytics API
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common_vars
- docker
- analytics_api
[defaults]
jinja2_extensions=jinja2.ext.do
roles_path=../playbooks/roles
library=../playbooks/library
roles_path=../plays:../../playbooks/roles
library=../../playbooks/library
- name: Deploy Credentials
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- nginx
- docker
- role: credentials
nginx_default_sites:
- credentials
- name: Deploy Discovery
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- nginx
- role: discovery
nginx_default_sites:
- discovery
\ No newline at end of file
- name: Deploy edxapp
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common_vars
- docker
- edxapp
- name: Deploy forum
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common_vars
- docker
- forum
- name: Deploy Insights
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- docker
- insights
../../playbooks/library/
\ No newline at end of file
- name: Deploy nginx
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common_vars
- docker
- role: nginx
nginx_sites:
- lms
- cms
- xqueue
- certs
- forum
nginx_default_sites:
- lms
nginx_extra_sites: "{{ NGINX_EDXAPP_EXTRA_SITES }}"
nginx_extra_configs: "{{ NGINX_EDXAPP_EXTRA_CONFIGS }}"
nginx_redirects: "{{ NGINX_EDXAPP_CUSTOM_REDIRECTS }}"
../../playbooks/roles/
\ No newline at end of file
- name: Deploy xqueue
hosts: all
sudo: True
gather_facts: True
roles:
- common_vars
- docker
- xqueue
- name: Deploy xqwatcher
hosts: all
sudo: True
gather_facts: True
roles:
- docker
- xqwatcher
- name: Configure instance(s)
hosts: all
sudo: True
roles:
- jenkins_analytics
import os
import datetime
import time
import logging
import datadog
import sys
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("dd").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
"""
Originally written by 'Jharrod LaFon'
#https://github.com/jlafon/ansible-profile/blob/master/callback_plugins/profile_tasks.py
"""
class CallbackModule(object):
"""
Ansible plugin get the time of each task and total time
to run the complete playbook
"""
def __init__(self):
self.stats = {}
self.current_task = None
self.playbook_name = None
self.datadog_api_key = os.getenv('DATADOG_API_KEY')
self.datadog_api_initialized = False
if self.datadog_api_key:
datadog.initialize(api_key=self.datadog_api_key,
app_key=None)
self.datadog_api_initialized = True
def clean_tag_value(self, value):
return value.replace(" | ", ".").replace(" ", "-").lower()
def playbook_on_play_start(self, pattern):
self.playbook_name, _ = os.path.splitext(
os.path.basename(self.play.playbook.filename)
)
def playbook_on_task_start(self, name, is_conditional):
"""
Logs the start of each task
"""
if self.current_task is not None:
# Record the running time of the last executed task
self.stats[self.current_task] = (time.time(), time.time() - self.stats[self.current_task])
# Record the start time of the current task
self.current_task = name
self.stats[self.current_task] = time.time()
def playbook_on_stats(self, stats):
"""
Prints the timing of each task and total time to
run the complete playbook
"""
# Record the timing of the very last task, we use it here, because we
# don't have stop task function by default
if self.current_task is not None:
self.stats[self.current_task] = (time.time(), time.time() - self.stats[self.current_task])
# Sort the tasks by their running time
results = sorted(self.stats.items(),
key=lambda value: value[1][1], reverse=True)
# Total time to run the complete playbook
total_seconds = sum([x[1][1] for x in self.stats.items()])
# send the metric to datadog
if self.datadog_api_initialized:
datadog_tasks_metrics = []
for name, points in results:
datadog_tasks_metrics.append({'metric': 'edx.ansible.task_duration',
'date_happened': points[0],
'points': points[1],
'tags': ['task:{0}'.format(self.clean_tag_value(name)),
'playbook:{0}'.format(self.clean_tag_value(self.playbook_name))
]
}
)
try:
datadog.api.Metric.send(datadog_tasks_metrics)
datadog.api.Metric.send(metric="edx.ansible.playbook_duration",
date_happened=time.time(),
points=total_seconds,
tags=["playbook:{0}".format(self.clean_tag_value(self.playbook_name))]
)
except Exception as ex:
logger.error(ex.message)
# Log the time of each task
for name, elapsed in results[:10]:
logger.info(
"{0:-<80}{1:->8}".format(
'{0} '.format(name),
' {0:.02f}s'.format(elapsed[1]),
)
)
logger.info("\nPlaybook {0} finished: {1}, {2} total tasks. {3} elapsed. \n".format(
self.playbook_name,
time.asctime(),
len(self.stats.items()),
datetime.timedelta(seconds=(int(total_seconds)))
)
)
# Spawn an instance from an AMI and then report if any packages need to be upgraded
#
# Usage:
# ansible-playbook check_package_upgrades.yml -i localhost, -e 'packages="PKG1 PKG2 ..."' -e 'ami=ami-xxxxxxxx'
# -e 'key_name=KEY' -e 'security_group=sg-xxxxxxxx' -e 'subnet_id=subnet-xxxxxxxx'
#
# Required arguments:
# -e 'packages="PKG1 ...": space-separated list of packages to check
# -e 'ami=ami-xxxxxxxx': AMI ID to use for the instance
# -e 'key_name=KEY': private ssh key to use for the instance
# -e 'security_group=sg-xxxxxxxx': security group to use for the instance
# -e subnet_id=subnet-xxxxxxxx': subnet to use for the instance
#
# Relevant optional arguments:
# -e 'script_path=PATH': path to the apt_check_upgrades.py script
# -e 'report_dest_path=PATH': path to which the resulting report will be written.
# --private-key=PATH_TO_PRIVATE_KEY_FILE: ssh key to use when connecting to the new host
# -e 'key_name': AWS key to use for the new instance. This key must be available locally
# either as an ssh profile or as specified with the above option.
# -e 'profile=PROFILE': AWS profile to use for AWS API calls
# -e 'region=REGION': AWS region to make the instance in
# -e 'security_group_id=sg-xxxxxxxx': security group to attach to the new instance
# -e 'subnet_id=subnet-xxxxxxxx': subnet to make the new instance in
# -e 'instance_type=INSTANCE.TYPE': instance type to use
#Get an AMI ID from an E-D-P:
#edc=CHANGEME
#lconfig=$(aws autoscaling describe-auto-scaling-groups |
# jq -r ".AutoScalingGroups[] | select(.Tags[] | select(.Key == \"Name\").Value == \"$edc\").LaunchConfigurationName")
#if [ $(echo $lconfig | wc -l) -ne 1 ]; then
# echo "More than 1 ASG found for E-D-P: $edp"
# exit 1
#else
# ami=$(aws autoscaling describe-launch-configurations --launch-configuration-names $lconfig |
# jq -r '.LaunchConfigurations[].ImageId')
#fi
- name: Launch instance for checking packages
hosts: localhost
connection: local
gather_facts: false
vars:
ami: !!null
profile: !!null
security_group_id: !!null
subnet_id: !!null
key_name: !!null
region: us-east-1
instance_type: t2.large
tasks:
- name: Launch instance
ec2:
image: "{{ ami }}"
instance_type: "{{ instance_type }}"
profile: "{{ profile }}"
region: "{{ region }}"
group_id: "{{ security_group_id }}"
vpc_subnet_id: "{{ subnet_id }}"
key_name: "{{ key_name }}"
volumes:
- device_name: /dev/sda1
delete_on_termination: true
volume_size: 50
instance_tags:
Name: temp-package-checker
wait: yes
register: instance
- name: Wait for instance to be ready
wait_for:
host: "{{ instance.instances.0.private_ip }}"
port: 22
- name: Add new instance to host group
add_host:
hostname: "{{ instance.instances.0.private_ip }}"
id: "{{ instance.instances.0.id }}"
groups: instance_group
ansible_ssh_user: ubuntu
- name: Check for package upgrades
hosts: instance_group
become: true
vars:
packages: !!null
script_path: ./apt_check_upgrades.py
report_dest_path: .
tasks:
- name: Update apt cache
apt:
update_cache: yes
- name: Install pyyaml to allow for yaml script output
pip:
name: pyyaml
state: present
- name: Transfer package-checking script
copy:
src: "{{ script_path }}"
dest: /tmp/apt_check_upgrades.py
mode: 0700
- name: Run package-checking script
shell: /tmp/apt_check_upgrades.py -y {{ packages }} > /tmp/upgrade_results.yml
- name: Retrieve results
fetch:
src: /tmp/upgrade_results.yml
dest: "{{ report_dest_path}}"
flat: true
- name: Clean up instance
hosts: localhost
connection: local
vars:
region: us-east-1
tasks:
- name: Terminate instance
ec2:
state: absent
instance_ids: "{{ hostvars[groups.instance_group.0].id }}"
region: "{{ region }}"
......@@ -6,6 +6,7 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- aide
- role: datadog
when: COMMON_ENABLE_DATADOG
......
# Configure an admin instance with jenkins and asgard.
# Usage: ansible-playbook alton.yml -i <admin-host>, -e <secure-repo>/admin/edx_admin.yml -e <secure-repo>/admin/admin.yml
- name: Configure instance(s)
hosts: all
sudo: True
......@@ -7,4 +8,5 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- alton
......@@ -9,6 +9,7 @@
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
- mysql
- edxlocal
- analytics_api
......
......@@ -8,10 +8,10 @@
ENABLE_NEWRELIC: False
CLUSTER_NAME: 'analytics-api'
roles:
- aws
- role: nginx
nginx_sites:
- analytics_api
- aws
- analytics_api
- role: datadog
when: COMMON_ENABLE_DATADOG
......
......@@ -3,6 +3,7 @@
sudo: True
gather_facts: True
roles:
- aws
- antivirus
- role: datadog
when: COMMON_ENABLE_DATADOG
......
- name: Deploy Asqatasun
hosts: all
sudo: True
gather_facts: True
roles:
- mysql
- asqatasun
......@@ -6,4 +6,5 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common
- aws
#
# Requires MySQL-python be installed for system python
# This play will create databases and user for an application.
# It can be run like so:
#
# ansible-playbook -i 'localhost,' create_analytics_reports_dbs.yml -e@./db.yml
# ansible-playbook -c local -i 'localhost,' create_dbs_and_users.yml -e@./db.yml
#
# where the content of dbs.yml contains the following dictionaries
#
......@@ -44,14 +43,12 @@
- name: Create databases and users
hosts: all
connection: local
gather_facts: False
tasks:
# Install required library, currently this needs to be available
# to system python.
- name: install python mysqldb module
pip: name={{item}} state=present
sudo: yes
with_items:
- MySQL-python
......
# Usage: AWS_PROFILE=myprofile ansible-playbook create_csmh_db.yml -i localhost, -e 'from_db=my-rds-identifier rds_name=env-dep-edxapphistory'
- name: Create new edxapp history RDS instance
hosts: all
connection: local
gather_facts: false
vars:
from_db: null
rds_name: null
region: us-east-1
instance_type: db.m4.large
env: null
app: edxapp
tasks:
- name: Validate arguments
fail:
msg: "One or more arguments were not set correctly: {{ item }}"
when: not {{ item }}
with_items:
- from_db
- rds_name
- name: Create edxapp history RDS instance
rds:
command: replicate
instance_name: "{{ rds_name }}"
source_instance: "{{ from_db }}"
region: "{{ region }}"
instance_type: "{{ instance_type }}"
publicly_accessible: no
wait: yes
wait_timeout: 900
register: created_db
- name: Deploy edX Credentials Service
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
CLUSTER_NAME: 'credentials'
roles:
- role: nginx
nginx_sites:
- credentials
nginx_default_sites:
- credentials
- aws
- credentials
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
......@@ -4,16 +4,15 @@
gather_facts: False
vars_files:
- roles/edxapp/defaults/main.yml
- roles/ora/defaults/main.yml
- roles/xqueue/defaults/main.yml
- roles/xserver/defaults/main.yml
roles:
- common
- aws
- role: nginx
nginx_sites:
- cms
- lms
- ora
- xqueue
- xserver
nginx_default_sites:
......
......@@ -6,4 +6,5 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- devpi
- name: Deploy edX Course Discovery Service
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
CLUSTER_NAME: 'discovery'
roles:
- aws
- role: nginx
nginx_default_sites:
- discovery
- discovery
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
......@@ -8,12 +8,12 @@
ENABLE_NEWRELIC: False
CLUSTER_NAME: 'ecommerce'
roles:
- aws
- role: nginx
nginx_sites:
- ecommerce
nginx_default_sites:
- ecommerce
- aws
- ecommerce
- role: datadog
when: COMMON_ENABLE_DATADOG
......
- name: Deploy the edx_ansible role
- name: Deploy the edx_ansible on AWS
hosts: all
sudo: True
gather_facts: True
......@@ -6,4 +6,5 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common
- edx_ansible
......@@ -18,6 +18,7 @@
- analytics_api
- ecommerce
- programs
- credentials
nginx_default_sites:
- lms
- mysql
......@@ -40,6 +41,7 @@
- analytics_api
- ecommerce
- programs
- credentials
- oauth_client_setup
- role: datadog
when: COMMON_ENABLE_DATADOG
......
......@@ -14,6 +14,7 @@
- "{{ secure_dir }}/vars/edx_jenkins_tests.yml"
roles:
- common
- aws
- role: nginx
nginx_sites:
- lms
......@@ -21,7 +22,6 @@
- lms-preview
- xqueue
- xserver
- ora
nginx_default_sites:
- lms
- mysql
......@@ -30,5 +30,4 @@
- edxapp
- xqueue
- xserver
- ora
- rabbitmq
......@@ -6,6 +6,7 @@
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- aws
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......@@ -20,6 +21,7 @@
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- aws
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......@@ -34,6 +36,7 @@
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- aws
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......@@ -48,6 +51,7 @@
- "{{ secure_dir }}/vars/users.yml"
gather_facts: True
roles:
- aws
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......@@ -63,6 +67,7 @@
gather_facts: True
vars:
roles:
- aws
- role: virtualenv
virtualenv_user: "notifier"
virtualenv_user_home: "/opt/wwc/notifier"
......
......@@ -53,19 +53,18 @@
search_regex="final-message"
vars_files:
- roles/edxapp/defaults/main.yml
- roles/ora/defaults/main.yml
- roles/xqueue/defaults/main.yml
- roles/xserver/defaults/main.yml
- roles/forum/defaults/main.yml
roles:
# rerun common to set the hostname, nginx to set basic auth
- common
- aws
- edx-sandbox
- role: nginx
nginx_sites:
- cms
- lms
- ora
- xqueue
- xserver
- forum
......
# Usage: ansible-playbook -i localhost, edx_service.yml -e@<PATH TO>/edx-secure/cloud_migrations/edx_service.yml -e@<PATH TO>/<DEPLOYMENT>-secure/cloud_migrations/vpcs/<ENVIRONMENT>-<DEPLOYMENT>.yml -e@<PATH TO>/edx-secure/cloud_migrations/idas/<CLUSTER>.yml
---
- name: Build application artifacts
......@@ -174,6 +175,7 @@
- name: Setup ELB DNS
route53:
profile: "{{ profile }}"
command: "create"
zone: "{{ dns_zone_name }}"
record: "{{ item.elb.name }}.{{ dns_zone_name }}"
......@@ -201,6 +203,7 @@
instance_type: "{{ service_config.instance_type }}"
instance_profile_name: "{{ instance_profile_name }}"
volumes: "{{ service_config.volumes }}"
instance_monitoring: "{{ service_config.detailed_monitoring }}"
when: auto_scaling_service
#
......@@ -307,9 +310,7 @@
region: "{{ aws_region }}"
wait: "yes"
group_id:
#Apply the ELB security group to the instances so they can talk to teach other
- "{{ service_sec_group.group_id }}"
- "{{ elb_sec_group.group_id }}"
key_name: "{{ service_config.key_name }}"
vpc_subnet_id: "{{ created_service_subnets.results[item | int % created_service_subnets.results | length].subnet_id }}"
instance_type: "{{ service_config.instance_type }}"
......@@ -318,6 +319,7 @@
instance_profile_name: "{{ instance_profile_name }}"
volumes: "{{ service_config.volumes }}"
ebs_optimized: "{{ service_config.ebs_optimized }}"
monitoring: "{{ detailed_monitoring }}"
with_sequence: count={% if not auto_scaling_service %}{{ (create_instances | int - potential_existing_instances.instances|length) | default(created_service_subnets.results | length) }}{% else %}0{% endif %}
when: not auto_scaling_service and (potential_existing_instances.instances|length < create_instances | int)
register: created_instances
......
......@@ -3,28 +3,33 @@
sudo: False
gather_facts: False
vars:
db_dry_run: "--db-dry-run"
syncdb: false
db_dry_run: "--list"
roles:
- edxapp
tasks:
- name: syncdb
- name: migrate lms
shell: >
chdir={{ edxapp_code_dir }}
python manage.py {{ item }} syncdb --noinput --settings=aws_migrate
python manage.py lms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws_migrate
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
when: syncdb
# Migrate any database in the config, but not the read_replica
when: item != 'read_replica'
with_items:
- lms
- cms
# Syncdb with migrate when the migrate user is overridden in extra vars
- name: migrate
- "{{ lms_auth_config.DATABASES.keys() }}"
tags:
- always
- name: migrate cms
shell: >
chdir={{ edxapp_code_dir }}
python manage.py {{ item }} migrate --noinput {{ db_dry_run }} --settings=aws_migrate
python manage.py cms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws_migrate
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
# Migrate any database in the config, but not the read_replica
when: item != 'read_replica'
with_items:
- lms
- cms
- "{{ cms_auth_config.DATABASES.keys() }}"
tags:
- always
......@@ -25,6 +25,7 @@
when: elb_pre_post
roles:
- common
- aws
- oraclejdk
- elasticsearch
post_tasks:
......
......@@ -6,4 +6,5 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- flower
......@@ -8,10 +8,10 @@
ENABLE_NEWRELIC: True
CLUSTER_NAME: 'insights'
roles:
- aws
- role: nginx
nginx_sites:
- insights
- aws
- insights
- role: datadog
when: COMMON_ENABLE_DATADOG
......
......@@ -7,4 +7,5 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- jenkins_admin
......@@ -10,7 +10,7 @@
COMMON_DATA_DIR: "/mnt2"
COMMON_ENABLE_DATADOG: True
COMMON_ENABLE_SPLUNKFORWARDER: True
jenkins_jvm_args: "-Djava.awt.headless=true -Xmx7168m -XX:MaxPermSize=512m"
jenkins_jvm_args: "-Djava.awt.headless=true -Xmx8192m -XX:MaxPermSize=512m"
SPLUNKFORWARDER_LOG_ITEMS:
- source: '/var/lib/jenkins/jobs/*/builds/*/junitResult.xml'
......@@ -46,7 +46,7 @@
followSymlink: false
roles:
- common
- aws
- role: datadog
when: COMMON_ENABLE_DATADOG
- jenkins_master
......
# Configure an instance with the tool jenkins.
- name: Configure Jenkins instance(s)
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- tools_jenkins
......@@ -13,11 +13,11 @@
serial: "{{ serial_count }}"
vars_files:
- roles/edxapp/defaults/main.yml
- roles/ora/defaults/main.yml
- roles/xqueue/defaults/main.yml
- roles/xserver/defaults/main.yml
- roles/forum/defaults/main.yml
roles:
- aws
- mysql
- edxlocal
- mongo
......
......@@ -13,9 +13,9 @@
serial: "{{ serial_count }}"
vars_files:
- roles/edxapp/defaults/main.yml
- roles/ora/defaults/main.yml
- roles/xqueue/defaults/main.yml
- roles/xserver/defaults/main.yml
- roles/forum/defaults/main.yml
roles:
- aws
- jenkins_worker
......@@ -14,7 +14,7 @@
- name: stop certs service
service: name="certificates" state="stopped"
- name: checkout code
git: >
git_2_0_1: >
repo="{{ repo_url }}"
dest="{{ repo_path }}"
version="{{ certificates_version }}"
......
- name: Deploy Locust
hosts: all
sudo: True
gather_facts: True
roles:
- aws
- locust
......@@ -6,6 +6,5 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common
- aws
- minos
......@@ -3,6 +3,7 @@
sudo: True
gather_facts: True
roles:
- aws
- mongo
- role: datadog
when: COMMON_ENABLE_DATADOG
......
# Manages a mongo cluster.
# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG
# as used by mongo_replica_set in the mongo_3_0 role.
#
# If you are initializing a cluster, your command might look like:
# ansible-playbook mongo_3_0.yml -i 10.1.1.1,10.2.2.2,10.3.3.3 -e@/path/to/edx.yml -e@/path/to/ed.yml
# If you just want to deploy an updated replica set config, you can run
# ansible-playbook mongo_3_0.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set
#
# ADDING A NEW CLUSTER MEMBER
# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory
# ansible-playbook mongo_3_0.yml -i 10.1.1.1,10.2.2.2,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml
- name: Deploy MongoDB
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 3
serial: "{{ serial_count }}"
roles:
- aws
- mongo_3_0
......
- name: Deploy MySQL
hosts: all
sudo: True
gather_facts: True
roles:
- mysql
......@@ -7,10 +7,10 @@
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: True
roles:
- aws
- role: nginx
nginx_sites:
- edx_notes_api
- aws
- edx_notes_api
- role: datadog
when: COMMON_ENABLE_DATADOG
......
......@@ -8,5 +8,6 @@
- "roles/insights/defaults/main.yml"
- "roles/ecommerce/defaults/main.yml"
- "roles/programs/defaults/main.yml"
- "roles/credentials/defaults/main.yml"
roles:
- oauth_client_setup
......@@ -32,7 +32,7 @@
tags:
- deploy
- name: syncdb and migrate
- name: migrate
shell: >
{{ edxapp_venv_dir }}/bin/python manage.py lms migrate --settings=aws
chdir={{ edxapp_code_dir }}
......
......@@ -8,12 +8,12 @@
ENABLE_NEWRELIC: False
CLUSTER_NAME: 'programs'
roles:
- aws
- role: nginx
nginx_sites:
- programs
nginx_default_sites:
- programs
- aws
- programs
- role: datadog
when: COMMON_ENABLE_DATADOG
......
# Step 2 of migrating to the MySQL separate-database StudentModuleHistory backend
# Step 1 is in create_edxapp_history_db.yml
#
# Usage: AWS_PROFILE=myprofile ansible-playbook promote_csmh_db.yml -i localhost, -e 'rds_name=env-dep-csm admin_password=SUPERSECRET'
#NB: should this do tags?
- name: Promote new edxapp history RDS instance
hosts: all
connection: local
gather_facts: false
vars:
rds_name:
region: us-east-1
admin_password:
backup_retention_days: 30
backup_window: 02:00-03:00
maint_window: Mon:00:00-Mon:01:15
tasks:
- name: Validate arguments
fail:
msg: "One or more arguments were not set correctly: {{ item }}"
when: not {{ item }}
with_items:
- rds_name
- admin_password
- name: Validate boto version >= 1.9.9
shell: |
version=$(aws --version 2>&1 | sed -r 's|.*aws-cli/([0-9]+\.[0-9]+\.[0-9]).*|\1|')
if [ $version != '1.9.9' ]; then
cmp=$(echo -e "$version\n1.9.9" | sort -rV | head -n1)
[ $cmp = "1.9.9" ] && exit 1 || exit 0
fi
changed_when: False
- name: Promote edxapp history RDS to primary instance
#Use local module for promoting only because of this issue:
#<https://github.com/ansible/ansible-modules-core/issues/2150>
rds_local:
command: promote
instance_name: "{{ rds_name }}"
region: "{{ region }}"
wait: yes
wait_timeout: 900
#Can't use the module if you want to be able to set storage types until this PR lands:
#<https://github.com/ansible/ansible-modules-core/issues/633>
#The StorageType option isn't in boto, but it is in boto3
#Requires awscli>=1.9.9
- name: Modify edxapp history RDS
shell: >
aws rds modify-db-instance
--db-instance-identifier {{ rds_name }}
--apply-immediately
--multi-az
--master-user-password {{ admin_password }}
--publicly-accessible
--backup-retention-period {{ backup_retention_days }}
--preferred-backup-window {{ backup_window }}
--preferred-maintenance-window {{ maint_window }}
--storage-type gp2
......@@ -3,4 +3,5 @@
sudo: True
gather_facts: True
roles:
- aws
- sitespeedio
......@@ -6,6 +6,7 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- snort
- role: datadog
when: COMMON_ENABLE_DATADOG
......
......@@ -9,6 +9,7 @@
ENABLE_SPLUNKFORWARDER: True
ENABLE_NEWRELIC: True
roles:
- aws
- datadog
- splunkforwarder
- newrelic
- name: Deploy Tanaguru
hosts: all
sudo: True
gather_facts: True
roles:
- aws
- mysql
- tanaguru
......@@ -10,4 +10,5 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- ad_hoc_reporting
......@@ -30,7 +30,7 @@
- role: nginx
nginx_sites:
- xqueue
- role: xqueue
- xqueue
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
......
......@@ -10,7 +10,7 @@
- role: nginx
nginx_sites:
- xserver
- role: xserver
- xserver
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
......
This diff is collapsed. Click to expand it.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment