Commit 82a3fb0e by YSC

Merge branch 'master' into open-release-mongol/ginkgo.master

parents 91717d48 65697683
......@@ -8,4 +8,4 @@ Make sure that the following steps are done before merging:
- [ ] Update the appropriate internal repo (be sure to update for all our environments)
- [ ] If you are updating a secure value rather than an internal one, file a DEVOPS ticket with details.
- [ ] Add an entry to the CHANGELOG.
- [ ] Have you performed the proper testing specified on the [Ops Ansible Testing Checklist](https://openedx.atlassian.net/wiki/display/EdxOps/Ops+Ansible+Testing+Checklist)?
- [ ] If you are making a complicated change, have you performed the proper testing specified on the [Ops Ansible Testing Checklist](https://openedx.atlassian.net/wiki/display/EdxOps/Ops+Ansible+Testing+Checklist)? Adding a new variable does not require the full list (although testing on a sandbox is a great idea to ensure it links with your downstream code changes).
......@@ -30,3 +30,6 @@ playbooks/edx-east/travis-test.yml
## Ansible Artifacts
*.retry
### VisualStudioCode ###
.vscode/*
......@@ -57,3 +57,4 @@ Bill DeRusha <bill@edx.org>
Jillian Vogel <jill@opencraft.com>
Zubair Afzal <zubair.afzal@arbisoft.com>
Kyle McCormick <kylemccor@gmail.com>
Muzaffar Yousaf <muzaffar@edx.org>
Do not use GitHub issues for Open edX support. The mailing list and Slack channels are explained here: http://open.edx.org/getting-help. If it turns out there's a bug in the configuration scripts, we can open an issue or PR here.
FROM edxops/precise-common:latest
FROM edxops/xenial-common:latest
MAINTAINER edxops
ADD . /edx/app/edx_ansible/edx_ansible
......
FROM selenium/standalone-chrome-debug:3.4.0-einsteinium
MAINTAINER edxops
USER root
# Install a password generator
RUN apt-get update -qqy \
&& apt-get -qqy install \
pwgen \
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/*
USER seluser
CMD export VNC_PASSWORD=$(pwgen -s -1 $(shuf -i 10-20 -n 1)) \
&& x11vnc -storepasswd $VNC_PASSWORD /home/seluser/.vnc/passwd \
&& echo "Chrome VNC password: $VNC_PASSWORD" \
&& /opt/bin/entry_point.sh
EXPOSE 4444 5900
......@@ -9,26 +9,19 @@
FROM edxops/xenial-common:latest
MAINTAINER edxops
ARG CREDENTIALS_VERSION=master
ARG REPO_OWNER=edx
USER root
CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
ADD . /edx/app/edx_ansible/edx_ansible
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
RUN echo '{ "allow_root": true }' > /root/.bowerrc
RUN apt-get update
RUN apt install -y xvfb firefox gettext
COPY docker/build/credentials/ansible_overrides.yml /
COPY docker/build/devstack/ansible_overrides.yml /devstack/ansible_overrides.yml
RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook credentials.yml \
-c local -i '127.0.0.1,' \
-t 'install,assets,devstack:install' \
-c local -i "127.0.0.1," \
-t "install,assets,devstack" \
--extra-vars="@/ansible_overrides.yml" \
--extra-vars="CREDENTIALS_VERSION=$CREDENTIALS_VERSION" \
--extra-vars="COMMON_GIT_PATH=$REPO_OWNER"
--extra-vars="@/devstack/ansible_overrides.yml"
USER root
CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
EXPOSE 18150
---
credentials_gunicorn_host: 0.0.0.0
CREDENTIALS_MYSQL: 'db'
CREDENTIALS_DJANGO_SETTINGS_MODULE: 'credentials.settings.devstack'
CREDENTIALS_GUNICORN_EXTRA: '--reload'
CREDENTIALS_MYSQL_MATCHER: '%'
CREDENTIALS_MYSQL_HOST: 'db'
CREDENTIALS_MYSQL_PASSWORD: 'password'
COMMON_GIT_PATH: 'edx'
CREDENTIALS_VERSION: 'master'
COMMON_MYSQL_MIGRATE_USER: '{{ CREDENTIALS_MYSQL_USER }}'
COMMON_MYSQL_MIGRATE_PASS: '{{ CREDENTIALS_MYSQL_PASSWORD }}'
CREDENTIALS_MYSQL_HOST: 'edx.devstack.mysql'
CREDENTIALS_DJANGO_SETTINGS_MODULE: 'credentials.settings.devstack'
CREDENTIALS_GUNICORN_EXTRA: '--reload'
CREDENTIALS_MEMCACHE: ['edx.devstack.memcached:11211']
CREDENTIALS_EXTRA_APPS: ['credentials.apps.edx_credentials_extensions']
CREDENTIALS_URL_ROOT: 'http://localhost:18150'
edx_django_service_is_devstack: true
# NOTE: The creation of demo data requires database access,
# which we don't have when making new images.
credentials_create_demo_data: false
# docker build -f docker/build/ecommerce/Dockerfile . -t edxops/ecommerce
# To build this Dockerfile:
#
# From the root of configuration:
#
# docker build -f docker/build/ecommerce/Dockerfile .
#
# This allows the dockerfile to update /edx/app/edx_ansible/edx_ansible
# with the currently checked-out configuration repo.
FROM edxops/xenial-common:latest
MAINTAINER edxops
USER root
CMD ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
ADD . /edx/app/edx_ansible/edx_ansible
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
RUN echo '{ "allow_root": true }' > /root/.bowerrc
RUN apt-get update
RUN apt install -y xvfb firefox gettext
COPY docker/build/ecommerce/ansible_overrides.yml /
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook ecommerce.yml -i '127.0.0.1,' -c local -t "install:base,install:system-requirements,install:configuration,install:app-requirements,install:code" -e@/ansible_overrides.yml
COPY docker/build/ecommerce/docker-run.sh /
COPY docker/build/devstack/ansible_overrides.yml /devstack/ansible_overrides.yml
RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook ecommerce.yml \
-c local -i '127.0.0.1,' \
-t 'install,assets,devstack' \
--extra-vars="@/ansible_overrides.yml" \
--extra-vars="@/devstack/ansible_overrides.yml"
CMD ["/docker-run.sh"]
EXPOSE 8130
EXPOSE 18130
---
COMMON_GIT_PATH: 'edx'
ECOMMERCE_VERSION: 'master'
DOCKER_TLD: "edx"
ECOMMERCE_DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: '{{ ECOMMERCE_DEFAULT_DB_NAME }}'
USER: 'ecomm001'
PASSWORD: 'password'
HOST: 'db.{{ DOCKER_TLD }}'
PORT: '3306'
ATOMIC_REQUESTS: true
CONN_MAX_AGE: 60
\ No newline at end of file
COMMON_MYSQL_MIGRATE_USER: '{{ ECOMMERCE_MYSQL_USER }}'
COMMON_MYSQL_MIGRATE_PASS: '{{ ECOMMERCE_MYSQL_PASSWORD }}'
# NOTE: Theming requires downloading a theme from a separate Git repo. This repo (edx/edx-themes) is private for
# edX.org. In order to build an image with these themes, you must update COMMON_GIT_IDENTITY to an SSH key with access
# to the private repo. Otherwise, the sample-themes repository, which has no ecommerce themes, will be downloaded if
# comprehensive theming is enabled.
ECOMMERCE_ENABLE_COMPREHENSIVE_THEMING: false
#THEMES_GIT_IDENTITY: "{{ COMMON_GIT_IDENTITY }}"
#THEMES_GIT_PROTOCOL: "ssh"
#THEMES_GIT_MIRROR: "github.com"
#THEMES_GIT_PATH: "edx"
#THEMES_REPO: "edx-themes.git"
ECOMMERCE_DATABASE_HOST: 'edx.devstack.mysql'
ECOMMERCE_DJANGO_SETTINGS_MODULE: 'ecommerce.settings.devstack'
ECOMMERCE_GUNICORN_EXTRA: '--reload'
ECOMMERCE_MEMCACHE: ['edx.devstack.memcached:11211']
ECOMMERCE_ECOMMERCE_URL_ROOT: 'http://localhost:18130'
ECOMMERCE_LMS_URL_ROOT: 'http://edx.devstack.lms:18000'
ECOMMERCE_DISCOVERY_SERVICE_URL: 'http://edx.devstack.discovery:18381'
edx_django_service_is_devstack: true
# NOTE: The creation of demo data requires database access,
# which we don't have when making new images.
ecommerce_create_demo_data: false
#!/bin/bash
set -e
/usr/sbin/rsyslogd
/edx/app/supervisor/venvs/supervisor/bin/supervisord --nodaemon --configuration /edx/app/supervisor/supervisord.conf
......@@ -29,6 +29,7 @@ RUN sudo /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook edxapp.yml
--extra-vars=edx_platform_version=${OPENEDX_RELEASE} \
--extra-vars="@/ansible_overrides.yml" \
--extra-vars="@/devstack.yml" \
--extra-vars="@/devstack/ansible_overrides.yml"
--extra-vars="@/devstack/ansible_overrides.yml" \
&& rm -rf /edx/app/edxapp/edx-platform
EXPOSE 18000 18010
FROM selenium/standalone-firefox-debug:3.4.0-einsteinium
MAINTAINER edxops
USER root
# Install a password generator and the codecs needed to support mp4 video in Firefox
RUN apt-get update -qqy \
&& apt-get -qqy install \
gstreamer1.0-libav \
pwgen \
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/*
USER seluser
CMD export VNC_PASSWORD=$(pwgen -s -1 $(shuf -i 10-20 -n 1)) \
&& x11vnc -storepasswd $VNC_PASSWORD /home/seluser/.vnc/passwd \
&& echo "Firefox VNC password: $VNC_PASSWORD" \
&& /opt/bin/entry_point.sh
EXPOSE 4444 5900
......@@ -9,3 +9,5 @@ FORUM_ELASTICSEARCH_HOST: "es.{{ FLOCK_TLD }}"
FORUM_USE_TCP: "true"
FORUM_RACK_ENV: "staging"
FORUM_SINATRA_ENV: "staging"
devstack: "true"
# Build using: docker build -f Dockerfile.gocd-agent -t gocd-agent .
# https://hub.docker.com/r/gocd/gocd-agent-deprecated/
FROM gocd/gocd-agent-deprecated:17.1.0
FROM gocd/gocd-agent-deprecated:17.7.0
LABEL version="0.02" \
description="This custom go-agent docker file installs additional requirements for the edx pipeline"
......
FROM edxops/precise-common
MAINTAINER edxops
USER root
# Fix selinux issue with useradd on 12.04
RUN curl http://salilab.org/~ben/libselinux1_2.1.0-5.1ubuntu1_amd64.deb -o /tmp/libselinux1_2.1.0-5.1ubuntu1_amd64.deb
RUN dpkg -i /tmp/libselinux1_2.1.0-5.1ubuntu1_amd64.deb
RUN apt-get update
ADD . /edx/app/edx_ansible/edx_ansible
COPY docker/build/xqwatcher/ansible_overrides.yml /
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook harstorage.yml \
-i '127.0.0.1,' -c local \
-t "install:base,install:configuration,install:app-requirements,install:code" \
-e@/ansible_overrides.yml
WORKDIR /edx/app/harstorage/harstorage
CMD ["/edx/app/harstorage/venvs/harstorage/bin/paster", "serve", "--daemon", "/edx/app/harstorage/venvs/harstorage/edx/etc/harstorage/production.ini"]
FROM edxops/precise-common:latest
FROM edxops/xenial-common:latest
MAINTAINER edxops
USER root
......
FROM edxops/xenial-common:latest
MAINTAINER edxops
ADD . /edx/app/edx_ansible/edx_ansible
COPY docker/build/mongo/ansible_overrides.yml /
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook mongo.yml \
-i '127.0.0.1,' -c local \
-t 'install' \
-e@/ansible_overrides.yml
WORKDIR /edx/app
EXPOSE 27017
FROM edxops/precise-common:latest
FROM edxops/xenial-common:latest
MAINTAINER edxops
USER root
......
FROM ubuntu:precise
MAINTAINER edxops
# Set locale to UTF-8 which is not the default for docker.
# See the links for details:
# http://jaredmarkell.com/docker-and-locales/
# https://github.com/docker-library/python/issues/13
# https://github.com/docker-library/python/pull/14/files
RUN apt-get update &&\
apt-get install -y locales &&\
locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
ENV ANSIBLE_REPO="https://github.com/edx/ansible"
ENV CONFIGURATION_REPO="https://github.com/edx/configuration.git"
ENV CONFIGURATION_VERSION="master"
ADD util/install/ansible-bootstrap.sh /tmp/ansible-bootstrap.sh
RUN chmod +x /tmp/ansible-bootstrap.sh
RUN /tmp/ansible-bootstrap.sh
FROM edxops/precise-common:latest
FROM edxops/xenial-common:latest
MAINTAINER edxops
USER root
......
#
# Single Docker Compose cluster that will eventually start
# all edX services in a single flock of coordinated containers
#
# This work is currently experimental and a number of services
# are missing entirely. Containers that are present will not
# currently work without manual steps. We are working on
# addressing that.
#
# When running compose you must pass in two environment variables
#
# DOCKER_EDX_ROOT which points to the directory into which you checkout
# your edX source code. For example, assuming the following directory
# structure under /home/me
#
# |-- edx-src
# | |-- discovery
# | |-- cs_comments_service
# | |-- edx_discovery
# | |-- edx-platform
# | |-- xqueue
# you would define DOCKER_EDX_ROOT="/home/me/edx-src"
#
# DOCKER_DATA_ROOT is the location on your host machine where Docker
# guests can access your local filesystem for storing persistent data
# files, say MongoDB or MySQL data files.
#
db:
container_name: db
image: mysql:5.6
environment:
- MYSQL_ROOT_PASSWORD='password'
#- MYSQL_DATABASE=''
- MYSQL_USER='migrate'
- MYSQL_PASSWORD='password'
volumes:
- ${DOCKER_DATA_ROOT}/mysql/data:/data
ports:
- 3306:3306
mongo:
container_name: mongo
image: mongo:3.0
volumes:
- ${DOCKER_DATA_ROOT}/mongo/data:/data
ports:
- 27017:27017
# Need to build our own for ES 0.9
es:
container_name: es
image: edxops/elasticsearch:v1
volumes:
- ${DOCKER_DATA_ROOT}/elasticsearch/data:/data
ports:
- 9100:9100
- 9200:9200
- 9300:9300
memcache:
container_name: memcache
image: memcached:1.4.24
volumes:
- ${DOCKER_DATA_ROOT}/memcache/data:/data
ports:
- 11211:11211
nginx:
container_name: nginx
image: edxops/nginx:v1
ports:
- 80:80
- 443:443
rabbitmq:
container_name: rabbitmq
image: rabbitmq:3.5.3
volumes:
- ${DOCKER_DATA_ROOT}/rabbitmq/data:/data
ports:
- 5672:5672
forum:
container_name: forum
# Image built from the opencraft fork as it fixes
# an auth bug. Update when the change merges
# upstream
image: edxops/forum:latest
volumes:
- ${DOCKER_EDX_ROOT}/cs_comments_service:/edx/app/forum/cs_comments_service
ports:
- 4567:4567
xqueue:
container_name: xqueue
image: edxops/xqueue:v1
ports:
- 8040:8040
- 18040:18040
volumes:
- ${DOCKER_EDX_ROOT}/xqueue:/edx/app/edxapp/xqueue
lms:
container_name: lms
image: edxops/edxapp:v2
ports:
- 8000:8000
- 18000:18000
volumes:
- ${DOCKER_EDX_ROOT}/edx-platform:/edx/app/edxapp/edx-platform
cms:
container_name: cms
image: edxops/edxapp:v2
ports:
- 8010:8010
- 18010:18010
volumes:
- ${DOCKER_EDX_ROOT}/edx-platform:/edx/app/edxapp/edx-platform
- name: Deploy Credentials
- name: Deploy credentials
hosts: all
become: True
gather_facts: True
......@@ -6,8 +6,7 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- nginx
- docker
- role: credentials
- role: nginx
nginx_default_sites:
- credentials
- credentials
......@@ -6,7 +6,7 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- nginx
- role: discovery
- role: nginx
nginx_default_sites:
- discovery
\ No newline at end of file
- discovery
- discovery
......@@ -6,6 +6,7 @@
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common_vars
- docker
- role: nginx
nginx_default_sites:
- ecommerce
- ecommerce
- name: Deploy Harstorage
- name: Deploy MongoDB 3.2
hosts: all
become: True
gather_facts: True
roles:
- common_vars
- docker
- mongo
- harstorage
- mongo_3_2
......@@ -6,10 +6,10 @@
jinja2_extensions=jinja2.ext.do
host_key_checking=False
roles_path=../../../ansible-roles/roles:../../../ansible-private/roles:../../../ansible-roles/
roles_path=../../../ansible-roles/roles:../../../ansible-private/roles:../../../ansible-roles/:../../playbooks/roles
library=../library/
ansible_managed=This file is created and updated by ansible, edit at your peril
[ssh_connection]
ssh_args=-o ControlMaster=auto -o ControlPersist=60s -o ControlPath="~/.ansible/tmp/ansible-ssh-%h-%p-%r" -o ServerAliveInterval=30
retries=5
\ No newline at end of file
retries=5
......@@ -47,13 +47,3 @@
file:
path: "{{ artifact_path }}"
state: absent
- name: Send Hipchat notification cleanup has finished
hipchat:
api: "{{ hipchat_url }}"
token: "{{ hipchat_token }}"
room: "{{ hipchat_room }}"
msg: "Cleanup for run id: {{ keypair_id }} complete."
ignore_errors: yes
when: hipchat_token is defined
......@@ -57,7 +57,7 @@
register: instance_tags
- name: Create AMI
ec2_ami_2_0_0_1:
ec2_ami:
instance_id: "{{ instance_id }}"
name: "{{ edx_environment }} -- {{ deployment }} -- {{ play }} -- {{ extra_name_identifier }} -- {{ app_version[:7] }}"
region: "{{ ec2_region }}"
......@@ -116,7 +116,7 @@
api: "{{ hipchat_url }}"
token: "{{ hipchat_token }}"
room: "{{ hipchat_room }}"
msg: "Finished baking AMI for: {{ play }} \n
msg: "Finished baking AMI for: {{ edx_environment }}-{{ deployment }}-{{ play }} \n
AMI-ID: {{ ami_register.image_id }} \n
"
ignore_errors: yes
......
......@@ -70,13 +70,12 @@
key_name: "{{ automation_prefix }} {{ unique_key_name.stdout }}"
instance_type: "{{ ec2_instance_type }}"
image: "{{ launch_ami_id }}"
wait: yes
group_id: "{{ ec2_security_group_id }}"
count: 1
vpc_subnet_id: "{{ ec2_vpc_subnet_id }}"
assign_public_ip: "{{ ec2_assign_public_ip }}"
volumes:
- device_name: /dev/sdf
- device_name: /dev/sda1
volume_type: 'gp2'
volume_size: "{{ ebs_volume_size }}"
wait: yes
......
......@@ -118,6 +118,7 @@ from boto import ec2
from boto import rds
from boto import route53
import ConfigParser
import traceback
try:
import json
......@@ -612,5 +613,11 @@ class Ec2Inventory(object):
# Run the script
Ec2Inventory()
RETRIES = 3
for _ in xrange(RETRIES):
try:
Ec2Inventory()
break
except Exception:
traceback.print_exc()
---
- name: Bootstrap instance(s)
hosts: all
gather_facts: no
become: True
roles:
- role: python
tags:
- install
- install:system-requirements
- name: Configure instance(s)
hosts: all
become: True
gather_facts: True
roles:
- oauth2_proxy
......@@ -6,7 +6,6 @@
migrate_db: "yes"
disable_edx_services: false
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
......
......@@ -4,7 +4,6 @@
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
CLUSTER_NAME: 'analytics-api'
roles:
......
- name: Deploy common
hosts: all
become: True
gather_facts: True
vars:
SECURITY_UNATTENDED_UPGRADES: true
COMMON_SECURITY_UPDATES: true
roles:
- common
......@@ -4,6 +4,10 @@
#
# ansible-playbook -c local -i 'localhost,' create_dbs_and_users.yml -e@./db.yml
#
# If running ansible from a python virtualenv you will need a command like the following
#
# ansible-playbook -c local -i 'localhost,' create_dbs_and_users.yml -e@./db.yml -e "ansible_python_interpreter=$(which python)"
#
# where the content of db.yml contains the following dictionaries
#
# database_connection: &default_connection
......@@ -67,6 +71,7 @@
- name: create mysql users and assign privileges
mysql_user:
name: "{{ item.name }}"
state: "{{ item.state | default('present') }}"
priv: "{{ '/'.join(item.privileges) }}"
password: "{{ item.password }}"
host: "{{ item.host }}"
......
......@@ -4,16 +4,13 @@
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
CLUSTER_NAME: 'credentials'
roles:
- aws
- role: nginx
nginx_sites:
- credentials
nginx_default_sites:
- credentials
- aws
- credentials
- role: datadog
when: COMMON_ENABLE_DATADOG
......
......@@ -4,7 +4,6 @@
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
CLUSTER_NAME: 'discovery'
roles:
......
......@@ -4,14 +4,11 @@
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
CLUSTER_NAME: 'ecommerce'
roles:
- aws
- role: nginx
nginx_sites:
- ecommerce
nginx_default_sites:
- ecommerce
- ecommerce
......
......@@ -4,7 +4,6 @@
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles:
- aws
......
......@@ -14,19 +14,18 @@
- xqueue
- xserver
- analytics_api
- ecommerce
- credentials
nginx_default_sites:
- lms
- mysql
- role: edxlocal
tags: edxlocal
- memcache
- mongo
- mongo_3_2
- { role: 'edxapp', celery_worker: True }
- edxapp
- testcourses
- { role: 'rabbitmq', rabbitmq_ip: '127.0.0.1' }
- role: redis
- oraclejdk
- elasticsearch
- forum
......@@ -38,6 +37,7 @@
- analytics_api
- ecommerce
- credentials
- veda_web_frontend
- oauth_client_setup
- role: datadog
when: COMMON_ENABLE_DATADOG
......
......@@ -6,8 +6,8 @@
keypair: continuous-integration
instance_type: t2.medium
security_group: sandbox-vpc
# ubuntu 12.04
ami: ami-f478849c
# ubuntu 16.04 - 20170721
ami: ami-cd0f5cb6
region: us-east-1
zone: us-east-1c
instance_tags:
......@@ -18,6 +18,7 @@
owner: temp
root_ebs_size: 50
dns_name: temp
instance_initiated_shutdown_behavior: stop
dns_zone: sandbox.edx.org
name_tag: sandbox-temp
elb: false
......@@ -33,6 +34,7 @@
- role: launch_ec2
keypair: "{{ keypair }}"
instance_type: "{{ instance_type }}"
instance_initiated_shutdown_behavior: "{{ instance_initiated_shutdown_behavior }}"
security_group: "{{ security_group }}"
ami: "{{ ami }}"
region: "{{ region }}"
......@@ -58,7 +60,7 @@
- name: Wait for cloud-init to finish
wait_for:
path: /var/log/cloud-init.log
timeout: 15
timeout: 15
search_regex: "final-message"
- name: gather_facts
setup: ""
......
......@@ -21,6 +21,8 @@
nginx_extra_configs: "{{ NGINX_EDXAPP_EXTRA_CONFIGS }}"
nginx_redirects: "{{ NGINX_EDXAPP_CUSTOM_REDIRECTS }}"
- edxapp
- role: devstack_sqlite_fix
when: devstack is defined and devstack
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
......
- name: Run edxapp migrations
hosts: all
become: False
gather_facts: False
vars:
db_dry_run: "--list"
roles:
- edxapp
tasks:
- name: migrate lms
shell: "python manage.py lms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws"
args:
chdir: "{{ edxapp_code_dir }}"
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
# Migrate any database in the config, but not the read_replica
when: item != 'read_replica'
with_items:
- "{{ lms_auth_config.DATABASES.keys() }}"
tags:
- always
- name: migrate cms
shell: "python manage.py cms migrate --database {{ item }} --noinput {{ db_dry_run }} --settings=aws"
args:
chdir: "{{ edxapp_code_dir }}"
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
# Migrate any database in the config, but not the read_replica
when: item != 'read_replica'
with_items:
- "{{ cms_auth_config.DATABASES.keys() }}"
tags:
- always
- name: Deploy Harstorage
hosts: all
become: True
gather_facts: True
vars:
nginx_default_sites:
- harstorage
roles:
- aws
- mongo
- nginx
- harstorage
......@@ -4,7 +4,6 @@
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: True
CLUSTER_NAME: 'insights'
roles:
......
# Configure an instance with the admin jenkins.
- name: install python2
hosts: all
become: True
gather_facts: False
roles:
- python
- name: Configure instance(s)
hosts: all
become: True
gather_facts: True
vars:
serial_count: 1
COMMON_SECURITY_UPDATES: yes
SECURITY_UPGRADE_ON_ANSIBLE: true
serial: "{{ serial_count }}"
roles:
- aws
- jenkins_admin
......@@ -20,3 +24,5 @@
# crcSalt: <SOURCE>
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
---
- name: Bootstrap instance(s)
hosts: all
gather_facts: no
become: True
roles:
- python
- name: Configure instance(s)
hosts: all
become: True
gather_facts: True
vars:
COMMON_ENABLE_DATADOG: False
COMMON_ENABLE_SPLUNKFORWARDER: True
COMMON_ENABLE_NEWRELIC: True
COMMON_SECURITY_UPDATES: yes
SECURITY_UPGRADE_ON_ANSIBLE: true
SPLUNKFORWARDER_LOG_ITEMS:
- source: '/var/lib/jenkins/jobs/edx-platform-*/builds/*/junitResult.xml'
recursive: true
index: 'testeng'
sourcetype: junit
followSymlink: false
blacklist: coverage|private|subset|specific|custom|special|\.gz$
crcSalt: '<SOURCE>'
- source: '/var/lib/jenkins/jobs/*/builds/*/build.xml'
index: 'testeng'
recursive: true
sourcetype: build_result
followSymlink: false
crcSalt: '<SOURCE>'
blacklist: '\.gz$'
- source: '/var/lib/jenkins/jobs/edx-platform-*/builds/*/archive/.../test_root/log/timing.*.log'
index: 'testeng'
recursive: true
sourcetype: 'json_timing_log'
followSymlink: false
crcSalt: '<SOURCE>'
blacklist: coverage|private|subset|specific|custom|special|\.gz$
- source: '/var/log/jenkins/jenkins.log'
index: 'testeng'
recursive: false
followSymlink: false
blacklist: '\.gz$'
roles:
- aws
- role: datadog
when: COMMON_ENABLE_DATADOG
- jenkins_build
# run just the splunkforwarder role by using '--tags "splunkonly"'
# e.g. ansible-playbook jenkins_testeng_master.yml -i inventory.ini --tags "splunkonly" -vvvv
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
tags:
- splunkonly
- jenkins:promote-to-production
become: True
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
tags:
- newreliconly
---
- name: Bootstrap instance(s)
hosts: all
gather_facts: no
become: True
roles:
- python
- name: Configure instance(s)
hosts: all
become: True
gather_facts: True
vars:
COMMON_ENABLE_DATADOG: True
COMMON_ENABLE_SPLUNKFORWARDER: False
COMMON_SECURITY_UPDATES: yes
SECURITY_UPGRADE_ON_ANSIBLE: true
roles:
- aws
- role: datadog
when: COMMON_ENABLE_DATADOG
- jenkins_de
......@@ -28,6 +28,7 @@
sourcetype: build_result
followSymlink: false
crcSalt: '<SOURCE>'
blacklist: '(((\.(gz))|\d)$)|(.*seed.*)'
- source: '/var/lib/jenkins/jobs/*/builds/*/log'
index: 'testeng'
......@@ -35,18 +36,13 @@
sourcetype: build_log
followSymlink: false
crcSalt: '<SOURCE>'
blacklist: '(((\.(gz))|\d)$)|(.*seed.*)'
- source: '/var/lib/jenkins/jobs/*/builds/*/archive/test_root/log/timing.*.log'
index: 'testeng'
sourcetype: 'json_timing_log'
followSymlink: false
- source: '/var/lib/jenkins/jobs/*/builds/*/archive/sitespeed-result/*/data/result.json'
index: 'testeng'
recursive: true
sourcetype: sitespeed_result
followSymlink: false
- source: '/var/log/jenkins/jenkins.log'
index: 'testeng'
recursive: false
......
......@@ -24,7 +24,6 @@
- mysql
- edxlocal
- memcache
- mongo
- mongo_3_2
- browsers
- browsermob-proxy
- jenkins_worker
......@@ -29,71 +29,68 @@ group and state.
}
"""
import argparse
import boto
import boto.ec2.autoscale
import boto3
import json
from collections import defaultdict
from os import environ
class LifecycleInventory():
profile = None
def __init__(self, profile):
def __init__(self, region):
parser = argparse.ArgumentParser()
self.profile = profile
self.region = region
def get_e_d_from_tags(self, group):
environment = "default_environment"
deployment = "default_deployment"
for r in group.tags:
if r.key == "environment":
environment = r.value
elif r.key == "deployment":
deployment = r.value
for r in group['Tags']:
if r['Key'] == "environment":
environment = r['Value']
elif r['Key'] == "deployment":
deployment = r['Value']
return environment,deployment
def get_instance_dict(self):
ec2 = boto.ec2.connect_to_region(region,profile_name=self.profile)
reservations = ec2.get_all_instances()
ec2 = boto3.client('ec2', region_name=self.region)
reservations = ec2.describe_instances()['Reservations']
dict = {}
for instance in [i for r in reservations for i in r.instances]:
dict[instance.id] = instance
for instance in [i for r in reservations for i in r['Instances']]:
dict[instance['InstanceId']] = instance
return dict
def run(self):
asg = boto.ec2.autoscale.connect_to_region(region,profile_name=self.profile)
groups = asg.get_all_groups()
asg = boto3.client('autoscaling', region_name=self.region)
groups = asg.describe_auto_scaling_groups()['AutoScalingGroups']
instances = self.get_instance_dict()
inventory = defaultdict(list)
for group in groups:
for instance in group.instances:
for instance in group['Instances']:
private_ip_address = instances[instance.instance_id].private_ip_address
private_ip_address = instances[instance['InstanceId']]['PrivateIpAddress']
if private_ip_address:
environment,deployment = self.get_e_d_from_tags(group)
inventory[environment + "_" + deployment + "_" + instance.lifecycle_state.replace(":","_")].append(private_ip_address)
inventory[group.name].append(private_ip_address)
inventory[group.name + "_" + instance.lifecycle_state.replace(":","_")].append(private_ip_address)
inventory[instance.lifecycle_state.replace(":","_")].append(private_ip_address)
inventory[environment + "_" + deployment + "_" + instance['LifecycleState'].replace(":","_")].append(private_ip_address)
inventory[group['AutoScalingGroupName']].append(private_ip_address)
inventory[group['AutoScalingGroupName'] + "_" + instance['LifecycleState'].replace(":","_")].append(private_ip_address)
inventory[instance['LifecycleState'].replace(":","_")].append(private_ip_address)
print json.dumps(inventory, sort_keys=True, indent=2)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--profile', help='The aws profile to use when connecting.')
parser.add_argument('-r', '--region', help='The aws region to use when connecting.', default='us-east-1')
parser.add_argument('-l', '--list', help='Ansible passes this, we ignore it.', action='store_true', default=True)
args = parser.parse_args()
region = environ.get('AWS_REGION','us-east-1')
LifecycleInventory(args.profile).run()
LifecycleInventory(args.region).run()
......@@ -10,7 +10,7 @@
#
# Overview:
# This playbook ensures that the specified users and groups exist in the targeted
# edxapp cluster.
# edxapp cluster.
#
# Users have the following properties:
# - username (required, str)
......@@ -72,7 +72,6 @@
# for perm in Permission.objects.all():
# print '{}:{}:{}'.format(perm.content_type.app_label, perm.content_type.model, perm.codename)
#
- hosts: all
vars:
python_path: /edx/bin/python.edxapp
......
# Manages a mongo cluster.
# To set up a new mongo cluster, make sure you've configured MONGO_RS_CONFIG
# as used by mongo_replica_set in the mongo_3_2 role.
#
# If you are initializing a cluster, your command might look like:
# ansible-playbook mongo_3_2.yml -i 203.0.113.11,203.0.113.12,203.0.113.13 -e@/path/to/edx.yml -e@/path/to/ed.yml
# If you just want to deploy an updated replica set config, you can run
# ansible-playbook mongo_3_2.yml -i any-cluster-ip -e@/path/to/edx.yml -e@/path/to/ed.yml --tags configure_replica_set
#
# ADDING A NEW CLUSTER MEMBER
# If you are adding a member to a cluster, you must be sure that the new machine is not first in your inventory
# ansible-playbook mongo_3_2.yml -i 203.0.113.11,203.0.113.12,new-machine-ip -e@/path/to/edx.yml -e@/path/to/ed.yml
- name: Bootstrap instance(s)
hosts: all
gather_facts: no
become: True
roles:
- python
- name: Deploy MongoDB
hosts: all
become: True
gather_facts: True
roles:
- aws
- mongo_3_2
- munin_node
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- role: newrelic_infrastructure
when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE
- name: Deploy mongo_mms instance
hosts: all
become: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- mongo_mms
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- role: datadog
when: COMMON_ENABLE_DATADOG
......@@ -4,7 +4,6 @@
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: True
roles:
- aws
......
......@@ -9,5 +9,6 @@
- "roles/ecommerce/defaults/main.yml"
- "roles/credentials/defaults/main.yml"
- "roles/discovery/defaults/main.yml"
- "roles/veda_web_frontend/defaults/main.yml"
roles:
- oauth_client_setup
- name: Deploy redis
hosts: all
become: True
gather_facts: True
roles:
- aws
- redis
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
- role: newrelic_infrastructure
when: COMMON_ENABLE_NEWRELIC_INFRASTRUCTURE
......@@ -10,3 +10,6 @@
service:
name: "{{ supervisor_service }}"
state: restarted
register: rc
until: rc|success
retries: 5
......@@ -12,10 +12,6 @@
hosts: "{{TARGET}}"
become: True
gather_facts: True
pre_tasks:
- set_fact:
STOP_ALL_EDX_SERVICES_EXTRA_ARGS: "--no-wait"
when: ansible_distribution_release == 'precise' or ansible_distribution_release == 'trusty'
roles:
- stop_all_edx_services
......
- name: Deploy edX VEDA delivery Worker
hosts: all
gather_facts: True
roles:
- aws
- veda_delivery_worker
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- name: Deploy edX VEDA Encode Worker
hosts: all
become: True
gather_facts: True
roles:
- veda_encode_worker
- name: Deploy edX VEDA Intake Worker
hosts: all
gather_facts: True
roles:
- veda_intake_worker
- name: Deploy edX VEDA pipeline Worker
hosts: all
become: True
gather_facts: True
roles:
- aws
- veda_pipeline_worker
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- name: Deploy edX Video Pipeline Web Frontend
hosts: all
become: True
gather_facts: True
roles:
- aws
- role: nginx
nginx_default_sites:
- veda_web_frontend
- role: veda_web_frontend
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
# TODO! Add new relic instrumentation once all the other pieces of video pipeline are in place.
......@@ -28,6 +28,8 @@
when: elb_pre_post
roles:
- aws
- role: automated
AUTOMATED_USERS: "{{ XQUEUE_AUTOMATED_USERS | default({}) }}"
- role: nginx
nginx_sites:
- xqueue
......
......@@ -6,7 +6,6 @@
COMMON_APP_DIR: "/edx/app"
common_web_group: "www-data"
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
serial_count: 1
serial: "{{ serial_count }}"
......
......@@ -22,6 +22,8 @@
# Set to false if deployed behind another proxy/load balancer.
NGINX_SET_X_FORWARDED_HEADERS: True
DISCOVERY_URL_ROOT: 'http://localhost:{{ DISCOVERY_NGINX_PORT }}'
ecommerce_create_demo_data: true
credentials_create_demo_data: true
roles:
- role: swapfile
SWAPFILE_SIZE: 4GB
......@@ -39,7 +41,7 @@
when: EDXAPP_MYSQL_HOST == 'localhost'
- role: memcache
when: "'localhost' in ' '.join(EDXAPP_MEMCACHE)"
- role: mongo
- role: mongo_3_2
when: "'localhost' in EDXAPP_MONGO_HOSTS"
- role: rabbitmq
rabbitmq_ip: 127.0.0.1
......
......@@ -351,6 +351,12 @@ def validate_args():
if (username and not password) or (password and not username):
module.fail_json(msg="Must provide both username and password or neither.")
# Check that if votes is 0 priority is also 0
for member in module.params.get('rs_config').get('members'):
if member.get('votes') == 0 and member.get('priority') != 0:
module.fail_json(msg="Non-voting member {} must have priority 0".
format(member['host']))
return module
......
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vpc_lookup
short_description: returns a list of subnet Ids using tags as criteria
description:
- Returns a list of subnet Ids for a given set of tags that identify one or more VPCs
version_added: "1.5"
options:
region:
description:
- The AWS region to use. Must be specified if ec2_url
is not used. If not specified then the value of the
EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of
the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the
AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
tags:
desription:
- tags to lookup
required: false
default: null
type: dict
aliases: []
requirements: [ "boto" ]
author: John Jarvis
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Return all instances that match the tag "Name: foo"
- local_action:
module: vpc_lookup
tags:
Name: foo
'''
import sys
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
try:
from boto.vpc import VPCConnection
from boto.vpc import connect_to_region
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
module=AnsibleModule(
argument_spec=dict(
region=dict(choices=AWS_REGIONS),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'],
no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
tags=dict(default=None, type='dict'),
)
)
tags = module.params.get('tags')
aws_secret_key = module.params.get('aws_secret_key')
aws_access_key = module.params.get('aws_access_key')
region = module.params.get('region')
# If we have a region specified, connect to its endpoint.
if region:
try:
vpc = connect_to_region(region, aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
vpc_conn = VPCConnection()
subnet_ids = []
for subnet in vpc_conn.get_all_subnets(filters={'tag:' + tag: value
for tag, value in tags.iteritems()}):
subnet_ids.append(subnet.id)
vpc_ids = []
for vpc in vpc.get_all_vpcs(filters={'tag:' + tag: value
for tag, value in tags.iteritems()}):
vpc_ids.append(vpc.id)
module.exit_json(changed=False, subnet_ids=subnet_ids, vpc_ids=vpc_ids)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
......@@ -20,30 +20,41 @@ ANALYTICS_API_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
ANALYTICS_API_NGINX_PORT: "18100"
ANALYTICS_API_DEFAULT_DB_NAME: 'analytics-api'
ANALYTICS_API_DEFAULT_USER: 'api001'
ANALYTICS_API_DEFAULT_PASSWORD: 'password'
ANALYTICS_API_DEFAULT_HOST: 'localhost'
ANALYTICS_API_DEFAULT_PORT: '3306'
ANALYTICS_API_DEFAULT_MYSQL_OPTIONS:
connect_timeout: 10
init_command: "SET sql_mode='STRICT_TRANS_TABLES'"
ANALYTICS_API_REPORTS_DB_NAME: 'reports'
ANALYTICS_API_REPORTS_USER: 'reports001'
ANALYTICS_API_REPORTS_PASSWORD: 'password'
ANALYTICS_API_REPORTS_HOST: 'localhost'
ANALYTICS_API_REPORTS_PORT: '3306'
ANALYTICS_API_REPORTS_MYSQL_OPTIONS:
connect_timeout: 10
init_command: "SET sql_mode='STRICT_TRANS_TABLES'"
ANALYTICS_API_DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: '{{ ANALYTICS_API_DEFAULT_DB_NAME }}'
USER: 'api001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
USER: '{{ ANALYTICS_API_DEFAULT_USER }}'
PASSWORD: '{{ ANALYTICS_API_DEFAULT_PASSWORD }}'
HOST: '{{ ANALYTICS_API_DEFAULT_HOST }}'
PORT: '{{ ANALYTICS_API_DEFAULT_PORT }}'
OPTIONS: "{{ ANALYTICS_API_DEFAULT_MYSQL_OPTIONS }}"
# read-only user
reports:
ENGINE: 'django.db.backends.mysql'
NAME: '{{ ANALYTICS_API_REPORTS_DB_NAME }}'
USER: 'reports001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
USER: '{{ ANALYTICS_API_REPORTS_USER }}'
PASSWORD: '{{ ANALYTICS_API_REPORTS_PASSWORD }}'
HOST: '{{ ANALYTICS_API_REPORTS_HOST }}'
PORT: '{{ ANALYTICS_API_REPORTS_PORT }}'
OPTIONS: "{{ ANALYTICS_API_REPORTS_MYSQL_OPTIONS }}"
ANALYTICS_API_VERSION: "master"
......@@ -54,10 +65,6 @@ ANALYTICS_API_USERS:
ANALYTICS_API_SECRET_KEY: 'Your secret key here'
ANALYTICS_API_TIME_ZONE: 'UTC'
ANALYTICS_API_LANGUAGE_CODE: 'en-us'
ANALYTICS_API_EMAIL_HOST: 'localhost'
ANALYTICS_API_EMAIL_HOST_USER: 'mail_user'
ANALYTICS_API_EMAIL_HOST_PASSWORD: 'mail_password'
ANALYTICS_API_EMAIL_PORT: 587
ANALYTICS_API_AUTH_TOKEN: 'put-your-api-token-here'
......@@ -107,11 +114,6 @@ ANALYTICS_API_SERVICE_CONFIG:
SECRET_KEY: '{{ ANALYTICS_API_SECRET_KEY }}'
TIME_ZONE: '{{ ANALYTICS_API_TIME_ZONE }}'
LANGUAGE_CODE: '{{ANALYTICS_API_LANGUAGE_CODE }}'
# email config
EMAIL_HOST: '{{ ANALYTICS_API_EMAIL_HOST }}'
EMAIL_HOST_PASSWORD: '{{ ANALYTICS_API_EMAIL_HOST_PASSWORD }}'
EMAIL_HOST_USER: '{{ ANALYTICS_API_EMAIL_HOST_USER }}'
EMAIL_PORT: '{{ ANALYTICS_API_EMAIL_PORT }}'
API_AUTH_TOKEN: '{{ ANALYTICS_API_AUTH_TOKEN }}'
STATICFILES_DIRS: ['static']
STATIC_ROOT: "{{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }}/staticfiles"
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://openedx.atlassian.net/wiki/display/OpenOPS
# code style: https://openedx.atlassian.net/wiki/display/OpenOPS/Ansible+Code+Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Allow this role to be duplicated in dependencies.
allow_duplicates: yes
......@@ -26,9 +26,9 @@
# EDXAPP_AUTOMATED_USERS:
# ecom:
# sudo_commands:
# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py lms migrate --list --settings=aws"
# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py lms showmigrations --settings=aws"
# sudo_user: "edxapp"
# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py cms migrate --list --settings=aws"
# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py cms showmigrations --settings=aws"
# sudo_user: "edxapp"
# authorized_keys:
# - 'ssh-rsa <REDACTED> ecom+admin@example.com'
......@@ -62,7 +62,7 @@
mode: "0440"
validate: 'visudo -cf %s'
with_dict: "{{ AUTOMATED_USERS }}"
- name: Create .ssh directory
file:
path: "/home/{{ item.key }}/.ssh"
......@@ -71,7 +71,7 @@
owner: "{{ item.key }}"
group: "{{ item.key }}"
with_dict: "{{ AUTOMATED_USERS }}"
- name: Build authorized_keys file
template:
src: "home/automator/.ssh/authorized_keys.j2"
......@@ -80,7 +80,7 @@
owner: "{{ item.key }}"
group: "{{ item.key }}"
with_dict: "{{ AUTOMATED_USERS }}"
- name: Build known_hosts file
file:
path: "/home/{{ item.key }}/.ssh/known_hosts"
......
# browsermob-proxy
browsermob_proxy_version: '2.0.0'
browsermob_proxy_url: 'https://github.com/lightbody/browsermob-proxy/releases/download/browsermob-proxy-{{ browsermob_proxy_version }}/browsermob-proxy-{{ browsermob_proxy_version }}-bin.zip'
#!/bin/sh
/etc/browsermob-proxy/bin/browsermob-proxy $*
# Install browsermob-proxy, which is used for page performance testing with bok-choy
---
- name: get zip file
get_url:
url: "{{ browsermob_proxy_url }}"
dest: "/var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip"
register: download_browsermob_proxy
- name: unzip into /var/tmp/
shell: "unzip /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }}.zip"
args:
chdir: "/var/tmp"
when: download_browsermob_proxy.changed
- name: move to /etc/browsermob-proxy/
shell: "mv /var/tmp/browsermob-proxy-{{ browsermob_proxy_version }} /etc/browsermob-proxy"
when: download_browsermob_proxy.changed
- name: change permissions of main script
file:
path: "/etc/browsermob-proxy/bin/browsermob-proxy"
mode: 0755
when: download_browsermob_proxy.changed
- name: add wrapper script /usr/local/bin/browsermob-proxy
copy:
src: browsermob-proxy
dest: /usr/local/bin/browsermob-proxy
when: download_browsermob_proxy.changed
- name: change permissions of wrapper script
file:
path: /usr/local/bin/browsermob-proxy
mode: 0755
when: download_browsermob_proxy.changed
......@@ -32,10 +32,10 @@ browser_s3_deb_pkgs:
url: https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_55.0.2883.87-1_amd64.deb
trusty_browser_s3_deb_pkgs:
- name: google-chrome-stable_30.0.1599.114-1_amd64.deb
url: https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_30.0.1599.114-1_amd64.deb
- name: firefox-mozilla-build_42.0-0ubuntu1_amd64.deb
url: https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox-mozilla-build_42.0-0ubuntu1_amd64.deb
- name: google-chrome-stable_59.0.3071.115-1_amd64.deb
url: https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_59.0.3071.115-1_amd64.deb
# ChromeDriver
chromedriver_version: 2.27
......
......@@ -44,7 +44,7 @@
get_url:
dest: /tmp/{{ item.name }}
url: "{{ item.url }}"
register: download_deb
register: download_trusty_deb
with_items: "{{ trusty_browser_s3_deb_pkgs }}"
when: ansible_distribution_release == 'trusty'
tags:
......@@ -55,7 +55,7 @@
get_url:
dest: /tmp/{{ item.name }}
url: "{{ item.url }}"
register: download_deb
register: download_xenial_deb
with_items: "{{ browser_s3_deb_pkgs }}"
when: ansible_distribution_release == 'xenial'
tags:
......@@ -65,7 +65,7 @@
- name: install trusty browser packages
shell: gdebi -nq /tmp/{{ item.name }}
with_items: "{{ trusty_browser_s3_deb_pkgs }}"
when: download_deb.changed and
when: download_trusty_deb.changed and
ansible_distribution_release == 'trusty'
tags:
- install
......@@ -74,7 +74,7 @@
- name: install xenial browser packages
shell: gdebi -nq /tmp/{{ item.name }}
with_items: "{{ browser_s3_deb_pkgs }}"
when: download_deb.changed and
when: download_xenial_deb.changed and
ansible_distribution_release == 'xenial'
tags:
- install
......
......@@ -10,6 +10,14 @@
- { src: 'certs.env.json.j2', dest: 'env.json' }
- { src: 'certs.auth.json.j2', dest: 'auth.json' }
- name: Copy the boto file
template:
src: "boto.j2"
dest: "{{ certs_app_dir }}/.boto"
owner: "{{ certs_user }}"
group: "{{ common_web_user }}"
mode: 0644
- name: Writing supervisor script for certificates
template:
src: certs.conf.j2
......
[program:certs]
command={{ certs_venv_bin }}/python {{ certs_code_dir }}/certificate_agent.py
priority=999
environment=SERVICE_VARIANT="certs",HOME="/"
environment=SERVICE_VARIANT="certs",HOME="/",BOTO_CONFIG="{{ certs_app_dir }}/.boto"
user={{ common_web_user }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)s-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)s-stderr.log
......
......@@ -157,7 +157,6 @@ common_debian_variants:
# We only have to install old Python for these releases:
old_python_ppa_releases:
- precise
- trusty
common_redhat_variants:
......
......@@ -9,24 +9,34 @@
#
##
# Role includes for role credentials
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- common
- supervisor
- role: edx_service
edx_service_name: "{{ credentials_service_name }}"
edx_service_config: "{{ CREDENTIALS_SERVICE_CONFIG }}"
edx_service_repos: "{{ CREDENTIALS_REPOS }}"
edx_service_user: "{{ credentials_user }}"
edx_service_home: "{{ credentials_home }}"
edx_service_packages:
debian: "{{ credentials_debian_pkgs }}"
redhat: "{{ credentials_redhat_pkgs }}"
- role: edx_django_service
edx_django_service_version: '{{ CREDENTIALS_VERSION }}'
edx_django_service_name: '{{ credentials_service_name }}'
edx_django_service_config_overrides: '{{ credentials_service_config_overrides }}'
edx_django_service_debian_pkgs_extra: '{{ credentials_debian_pkgs }}'
edx_django_service_gunicorn_port: '{{ credentials_gunicorn_port }}'
edx_django_service_django_settings_module: '{{ CREDENTIALS_DJANGO_SETTINGS_MODULE }}'
edx_django_service_environment_extra: '{{ credentials_environment }}'
edx_django_service_gunicorn_extra: '{{ CREDENTIALS_GUNICORN_EXTRA }}'
edx_django_service_nginx_port: '{{ CREDENTIALS_NGINX_PORT }}'
edx_django_service_ssl_nginx_port: '{{ CREDENTIALS_SSL_NGINX_PORT }}'
edx_django_service_language_code: '{{ CREDENTIALS_LANGUAGE_CODE }}'
edx_django_service_secret_key: '{{ CREDENTIALS_SECRET_KEY }}'
edx_django_service_staticfiles_storage: '{{ CREDENTIALS_STATICFILES_STORAGE }}'
edx_django_service_media_storage_backend: '{{ CREDENTIALS_MEDIA_STORAGE_BACKEND }}'
edx_django_service_memcache: '{{ CREDENTIALS_MEMCACHE }}'
edx_django_service_default_db_host: '{{ CREDENTIALS_MYSQL_HOST }}'
edx_django_service_default_db_name: '{{ CREDENTIALS_DEFAULT_DB_NAME }}'
edx_django_service_default_db_atomic_requests: false
edx_django_service_db_user: '{{ CREDENTIALS_MYSQL_USER }}'
edx_django_service_db_password: '{{ CREDENTIALS_MYSQL_PASSWORD }}'
edx_django_service_social_auth_edx_oidc_key: '{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_KEY }}'
edx_django_service_social_auth_edx_oidc_secret: '{{ CREDENTIALS_SOCIAL_AUTH_EDX_OIDC_SECRET }}'
edx_django_service_social_auth_redirect_is_https: '{{ CREDENTIALS_SOCIAL_AUTH_REDIRECT_IS_HTTPS }}'
edx_django_service_extra_apps: '{{ CREDENTIALS_EXTRA_APPS }}'
edx_django_service_session_expire_at_browser_close: '{{ CREDENTIALS_SESSION_EXPIRE_AT_BROWSER_CLOSE }}'
edx_django_service_automated_users: '{{ CREDENTIALS_AUTOMATED_USERS }}'
edx_django_service_cors_whitelist: '{{ CREDENTIALS_CORS_ORIGIN_WHITELIST }}'
edx_django_service_post_migrate_commands: '{{ credentials_post_migrate_commands }}'
......@@ -11,7 +11,7 @@
#
# Tasks for role credentials
#
# Overview:
# Overview: This role's tasks come from edx_django_service.
#
#
# Dependencies:
......@@ -20,233 +20,3 @@
# Example play:
#
#
- name: add gunicorn configuration file
template:
src: edx/app/credentials/credentials_gunicorn.py.j2
dest: "{{ credentials_home }}/credentials_gunicorn.py"
become_user: "{{ credentials_user }}"
tags:
- install
- install:configuration
- name: add deadsnakes repository
apt_repository:
repo: "ppa:fkrull/deadsnakes"
tags:
- install
- install:system-requirements
- name: install python3.5
apt:
name: "{{ item }}"
with_items:
- python3.5
- python3.5-dev
tags:
- install
- install:system-requirements
- name: build virtualenv
command: "virtualenv --python=python3.5 {{ credentials_venv_dir }}"
args:
creates: "{{ credentials_venv_dir }}/bin/pip"
become_user: "{{ credentials_user }}"
tags:
- install
- install:system-requirements
- name: install nodenv
pip:
name: "nodeenv"
version: "1.1.2"
# NOTE (CCB): Using the "virtualenv" option here doesn't seem to work.
executable: "{{ credentials_venv_dir }}/bin/pip"
become_user: "{{ credentials_user }}"
tags:
- install
- install:system-requirements
- name: create nodeenv
shell: "{{ credentials_venv_dir }}/bin/nodeenv {{ credentials_nodeenv_dir }} --node={{ credentials_node_version }} --prebuilt --force"
become_user: "{{ credentials_user }}"
tags:
- install
- install:system-requirements
- name: install application requirements
command: make production-requirements
args:
chdir: "{{ credentials_code_dir }}"
become_user: "{{ credentials_user }}"
environment: "{{ credentials_environment }}"
tags:
- install
- install:app-requirements
- name: install development requirements
command: make requirements
args:
chdir: "{{ credentials_code_dir }}"
become_user: "{{ credentials_user }}"
environment: "{{ credentials_environment }}"
tags:
- devstack
- devstack:install
- name: migrate database
command: make migrate
args:
chdir: "{{ credentials_code_dir }}"
become_user: "{{ credentials_user }}"
environment: "{{ credentials_migration_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
tags:
- migrate
- migrate:db
# var should have more permissive permissions than the rest
- name: create credentials var dirs
file:
path: "{{ item }}"
state: directory
mode: 0775
owner: "{{ credentials_user }}"
group: "{{ common_web_group }}"
with_items:
- "{{ CREDENTIALS_MEDIA_ROOT }}"
tags:
- install
- install:base
- name: write out the supervisor wrapper
template:
src: "edx/app/credentials/credentials.sh.j2"
dest: "{{ credentials_home }}/{{ credentials_service_name }}.sh"
mode: 0650
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
tags:
- install
- install:configuration
- name: write supervisord config
template:
src: "edx/app/supervisor/conf.d.available/credentials.conf.j2"
dest: "{{ supervisor_available_dir }}/{{ credentials_service_name }}.conf"
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
mode: 0644
tags:
- install
- install:configuration
- name: write devstack script
template:
src: "edx/app/credentials/devstack.sh.j2"
dest: "{{ credentials_home }}/devstack.sh"
owner: "{{ supervisor_user }}"
group: "{{ common_web_user }}"
mode: 0744
tags:
- devstack
- devstack:install
- name: setup the credentials env file
template:
src: "./{{ credentials_home }}/{{ credentials_service_name }}_env.j2"
dest: "{{ credentials_home }}/credentials_env"
owner: "{{ credentials_user }}"
group: "{{ credentials_user }}"
mode: 0644
tags:
- install
- install:configuration
- name: enable supervisor script
file:
src: "{{ supervisor_available_dir }}/{{ credentials_service_name }}.conf"
dest: "{{ supervisor_cfg_dir }}/{{ credentials_service_name }}.conf"
state: link
force: yes
when: not disable_edx_services
tags:
- install
- install:configuration
- name: update supervisor configuration
command: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
when: not disable_edx_services
tags:
- manage
- manage:start
- name: create symlinks from the venv bin dir
file:
src: "{{ credentials_venv_dir }}/bin/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.credentials"
state: link
with_items:
- python
- pip
- django-admin.py
tags:
- install
- install:app-requirements
- name: create symlinks from the repo dir
file:
src: "{{ credentials_code_dir }}/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.credentials"
state: link
with_items:
- manage.py
tags:
- install
- install:app-requirements
- name: run collectstatic
command: make static
args:
chdir: "{{ credentials_code_dir }}"
become_user: "{{ credentials_user }}"
environment: "{{ credentials_environment }}"
tags:
- assets
- assets:gather
- name: restart the application
supervisorctl:
state: restarted
supervisorctl_path: "{{ supervisor_ctl }}"
config: "{{ supervisor_cfg }}"
name: "{{ credentials_service_name }}"
when: not disable_edx_services
become_user: "{{ supervisor_service_user }}"
tags:
- manage
- manage:start
- name: Copying nginx configs for credentials
template:
src: edx/app/nginx/sites-available/credentials.j2
dest: "{{ nginx_sites_available_dir }}/credentials"
owner: root
group: "{{ common_web_user }}"
mode: 0640
notify: reload nginx
tags:
- install
- install:vhosts
- name: Creating nginx config links for credentials
file:
src: "{{ nginx_sites_available_dir }}/credentials"
dest: "{{ nginx_sites_enabled_dir }}/credentials"
state: link
owner: root
group: root
notify: reload nginx
tags:
- install
- install:vhosts
#!/usr/bin/env bash
# {{ ansible_managed }}
{% set credentials_venv_bin = credentials_home + "/venvs/" + credentials_service_name + "/bin" %}
{% if COMMON_ENABLE_NEWRELIC_APP %}
{% set executable = credentials_venv_bin + '/newrelic-admin run-program ' + credentials_venv_bin + '/gunicorn' %}
{% else %}
{% set executable = credentials_venv_bin + '/gunicorn' %}
{% endif %}
{% if COMMON_ENABLE_NEWRELIC_APP %}
export NEW_RELIC_APP_NAME="{{ CREDENTIALS_NEWRELIC_APPNAME }}"
export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}"
{% endif -%}
source {{ credentials_home }}/credentials_env
{{ executable }} -c {{ credentials_home }}/credentials_gunicorn.py {{ CREDENTIALS_GUNICORN_EXTRA }} credentials.wsgi:application
"""
gunicorn configuration file: http://docs.gunicorn.org/en/develop/configure.html
{{ ansible_managed }}
"""
timeout = {{ credentials_gunicorn_timeout }}
bind = "{{ credentials_gunicorn_host }}:{{ credentials_gunicorn_port }}"
pythonpath = "{{ credentials_code_dir }}"
workers = {{ CREDENTIALS_GUNICORN_WORKERS }}
worker_class = "{{ CREDENTIALS_GUNICORN_WORKER_CLASS }}"
{{ CREDENTIALS_GUNICORN_EXTRA_CONF }}
#!/usr/bin/env bash
# {{ ansible_managed }}
source {{ credentials_home }}/credentials_env
COMMAND=$1
case $COMMAND in
start)
{% set credentials_venv_bin = credentials_home + "/venvs/" + credentials_service_name + "/bin" %}
{{ supervisor_venv_bin }}/supervisord --configuration {{ supervisor_cfg }}
# Needed to run bower as root. See explaination around 'credentials_user=root'
echo '{ "allow_root": true }' > /root/.bowerrc
cd /edx/app/edx_ansible/edx_ansible/docker/plays
/edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook credentials.yml -c local -i '127.0.0.1,' \
-t 'install:app-requirements,assets:gather,devstack,migrate' \
--extra-vars="migrate_db=yes" \
--extra-vars="@/ansible_overrides.yml" \
--extra-vars="credentials_user=root" # Needed when sharing the volume with the host machine because node/bower drops
# everything in the code directory by default. So we get issues with permissions
# on folders owned by the developer.
# Need to start supervisord and nginx manually because systemd is hard to run on docker
# http://developers.redhat.com/blog/2014/05/05/running-systemd-within-docker-container/
# Both daemon by default
nginx
/edx/app/supervisor/venvs/supervisor/bin/supervisord --configuration /edx/app/supervisor/supervisord.conf
# Docker requires an active foreground task. Tail the logs to appease Docker and
# provide useful output for development.
cd {{ supervisor_log_dir }}
tail -f {{ credentials_service_name }}-stderr.log -f {{ credentials_service_name }}-stdout.log
;;
open)
cd {{ credentials_code_dir }}/
. {{ credentials_venv_bin }}/activate
/bin/bash
;;
esac
#
# {{ ansible_managed }}
#
{% if nginx_default_sites is defined and "credentials" in nginx_default_sites %}
{% set default_site = "default_server" %}
{% else %}
{% set default_site = "" %}
{% endif %}
upstream credentials_app_server {
{% for host in NGINX_CREDENTIALS_GUNICORN_HOSTS %}
server {{ host }}:{{ credentials_gunicorn_port }} fail_timeout=0;
{% endfor %}
}
# The Origin request header indicates where a fetch originates from. It doesn't include any path information,
# but only the server name (e.g. https://www.example.com).
# See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin for details.
#
# Here we set the value that is included in the Access-Control-Allow-Origin response header. If the origin is one
# of our known hosts--served via HTTP or HTTPS--we allow for CORS. Otherwise, we set the "null" value, disallowing CORS.
map $http_origin $cors_origin {
default "null";
{% for host in CREDENTIALS_CORS_ORIGIN_WHITELIST %}
"~*^https?:\/\/{{ host|replace('.', '\.') }}$" $http_origin;
{% endfor %}
}
server {
server_name {{ CREDENTIALS_HOSTNAME }};
{% if NGINX_ENABLE_SSL %}
listen {{ CREDENTIALS_NGINX_PORT }} {{ default_site }};
listen {{ CREDENTIALS_SSL_NGINX_PORT }} ssl;
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
# request the browser to use SSL for all connections
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
{% else %}
listen {{ CREDENTIALS_NGINX_PORT }} {{ default_site }};
{% endif %}
location ~ ^{{ CREDENTIALS_MEDIA_URL }}(?P<file>.*) {
root {{ CREDENTIALS_MEDIA_ROOT }};
try_files /$file =404;
}
location ~ ^{{ CREDENTIALS_STATIC_URL }}(?P<file>.*) {
root {{ CREDENTIALS_STATIC_ROOT }};
add_header Cache-Control "max-age=31536000";
add_header 'Access-Control-Allow-Origin' $cors_origin;
# Inform downstream caches to take certain headers into account when reading/writing to cache.
add_header 'Vary' 'Accept-Encoding,Origin';
try_files /$file =404;
}
location / {
try_files $uri @proxy_to_app;
}
{% if NGINX_ROBOT_RULES|length > 0 %}
location /robots.txt {
root {{ nginx_app_dir }};
try_files $uri /robots.txt =404;
}
{% endif %}
location @proxy_to_app {
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://credentials_app_server;
}
# Forward to HTTPS if we're an HTTP request...
if ($http_x_forwarded_proto = "http") {
set $do_redirect "true";
}
# Run our actual redirect...
if ($do_redirect = "true") {
rewrite ^ https://$host$request_uri? permanent;
}
}
......@@ -24,19 +24,24 @@ demo_test_users:
username: honor
hashed_password: "{{ demo_hashed_password }}"
is_staff: false
is_superuser: false
- email: 'audit@example.com'
username: audit
hashed_password: "{{ demo_hashed_password }}"
is_staff: false
is_superuser: false
- email: 'verified@example.com'
username: verified
hashed_password: "{{ demo_hashed_password }}"
is_staff: false
is_superuser: false
demo_staff_user:
email: 'staff@example.com'
username: staff
hashed_password: "{{ demo_hashed_password }}"
is_staff: true
is_superuser: false
SANDBOX_EDXAPP_USERS: []
demo_edxapp_user: 'edxapp'
demo_edxapp_settings: '{{ COMMON_EDXAPP_SETTINGS }}'
demo_edxapp_venv_bin: '{{ COMMON_APP_DIR }}/{{ demo_edxapp_user }}/venvs/{{demo_edxapp_user}}/bin'
......
......@@ -26,12 +26,16 @@
demo_test_and_staff_users: "{{ demo_test_users }}"
when: not DEMO_CREATE_STAFF_USER
- name: build staff, admin, and test user list
set_fact:
demo_test_admin_and_staff_users: "{{ demo_test_and_staff_users + SANDBOX_EDXAPP_USERS }}"
- name: create some test users
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings={{ demo_edxapp_settings }} --service-variant lms manage_user {{ item.username}} {{ item.email }} --initial-password-hash {{ item.hashed_password | quote }}{% if item.is_staff %} --staff{% endif %}"
shell: "{{ demo_edxapp_venv_bin }}/python ./manage.py lms --settings={{ demo_edxapp_settings }} --service-variant lms manage_user {{ item.username}} {{ item.email }} --initial-password-hash {{ item.hashed_password | quote }}{% if item.is_staff %} --staff{% endif %}{% if item.is_superuser %} --superuser{% endif %}"
args:
chdir: "{{ demo_edxapp_code_dir }}"
become_user: "{{ common_web_user }}"
with_items: "{{ demo_test_and_staff_users }}"
with_items: "{{ demo_test_admin_and_staff_users }}"
when: demo_checkout.changed
- name: enroll test users in the demo course
......
---
SQLITE_FIX_TMP_DIR: "/var/tmp/sqlite_fix"
PYSQLITE_URL: "https://codeload.github.com/ghaering/pysqlite/tar.gz/2.8.3"
PYSQLITE_CREATED_PATH: "pysqlite-2.8.3"
PYSQLITE_TMP_PATH: "{{ SQLITE_FIX_TMP_DIR }}/{{ PYSQLITE_CREATED_PATH }}"
SQLITE_AUTOCONF_URL: "https://www.sqlite.org/2016/sqlite-autoconf-3140100.tar.gz"
SQLITE_AUTOCONF_CREATED_PATH: "sqlite-autoconf-3140100"
SQLITE_TMP_PATH: "{{ SQLITE_FIX_TMP_DIR }}/{{ SQLITE_AUTOCONF_CREATED_PATH }}"
---
- name: Creates directory
file:
path: "{{ SQLITE_FIX_TMP_DIR }}"
state: directory
mode: 0775
when: devstack is defined and devstack
tags:
- devstack
- devstack:install
# Tasks to download and upgrade pysqlite to prevent segfaults when testing in devstack
- name: Download and unzip sqlite autoconf update
unarchive:
src: "{{ SQLITE_AUTOCONF_URL }}"
dest: "{{ SQLITE_FIX_TMP_DIR }}"
remote_src: yes
when: devstack is defined and devstack
tags:
- devstack
- devstack:install
- name: Download and unzip pysqlite update
unarchive:
src: "{{ PYSQLITE_URL }}"
dest: "{{ SQLITE_FIX_TMP_DIR }}"
remote_src: yes
when: devstack is defined and devstack
tags:
- devstack
- devstack:install
# Copy module doesn't support recursive dir copies for remote_src: yes
- name: Copy pysqlite autoconf into pyslite update dir
command: "cp -av . {{ PYSQLITE_TMP_PATH }}/"
args:
chdir: "{{ SQLITE_TMP_PATH }}"
when: devstack is defined and devstack
tags:
- devstack
- devstack:install
- name: Build and install pysqlite update
command: "python setup.py build_static install"
args:
chdir: "{{ PYSQLITE_TMP_PATH }}"
when: devstack is defined and devstack
tags:
- devstack
- devstack:install
- name: Clean up pysqlite install artifacts
file:
state: absent
path: "{{ SQLITE_FIX_TMP_DIR }}/"
when: devstack is defined and devstack
tags:
- devstack
- devstack:install
......@@ -11,6 +11,7 @@
# Defaults for role discovery
#
DISCOVERY_GIT_IDENTITY: !!null
#
# vars are namespace with the module name.
......@@ -23,6 +24,7 @@ discovery_environment:
discovery_user: "{{ discovery_service_name }}"
discovery_home: "{{ COMMON_APP_DIR }}/{{ discovery_service_name }}"
discovery_code_dir: "{{ discovery_home }}/{{ discovery_service_name }}"
#
# OS packages
......@@ -57,7 +59,20 @@ DISCOVERY_URL_ROOT: 'http://discovery:{{ DISCOVERY_NGINX_PORT }}'
DISCOVERY_LOGOUT_URL: '{{ DISCOVERY_URL_ROOT }}/logout/'
DISCOVERY_SECRET_KEY: 'Your secret key here'
DISCOVERY_LANGUAGE_CODE: 'en-us'
DISCOVERY_LANGUAGE_CODE: 'en'
## Configuration for django-parler package. For more information visit
## https://django-parler.readthedocs.io/en/latest/configuration.html#parler-languages
DISCOVERY_PARLER_DEFAULT_LANGUAGE_CODE: '{{DISCOVERY_LANGUAGE_CODE}}'
DISCOVERY_PARLER_LANGUAGES :
1:
- code: 'en'
default:
fallbacks:
- '{{DISCOVERY_PARLER_DEFAULT_LANGUAGE_CODE}}'
hide_untranslated: 'False'
DISCOVERY_DEFAULT_PARTNER_ID: 1
DISCOVERY_SESSION_EXPIRE_AT_BROWSER_CLOSE: false
......@@ -96,10 +111,21 @@ DISCOVERY_EMAIL_HOST_PASSWORD: ''
DISCOVERY_PUBLISHER_FROM_EMAIL: !!null
DISCOVERY_OPENEXCHANGERATES_API_KEY: ''
DISCOVERY_GUNICORN_EXTRA: ''
DISCOVERY_EXTRA_APPS: []
DISCOVERY_REPOS:
- PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}"
DOMAIN: "{{ COMMON_GIT_MIRROR }}"
PATH: "{{ COMMON_GIT_PATH }}"
REPO: 'course-discovery.git'
VERSION: "{{ DISCOVERY_VERSION }}"
DESTINATION: "{{ discovery_code_dir }}"
SSH_KEY: "{{ DISCOVERY_GIT_IDENTITY }}"
discovery_service_config_overrides:
ELASTICSEARCH_URL: '{{ DISCOVERY_ELASTICSEARCH_URL }}'
ELASTICSEARCH_INDEX_NAME: '{{ DISCOVERY_ELASTICSEARCH_INDEX_NAME }}'
......@@ -123,5 +149,11 @@ discovery_service_config_overrides:
PUBLISHER_FROM_EMAIL: '{{ DISCOVERY_PUBLISHER_FROM_EMAIL }}'
OPENEXCHANGERATES_API_KEY: '{{ DISCOVERY_OPENEXCHANGERATES_API_KEY }}'
LANGUAGE_CODE: '{{DISCOVERY_LANGUAGE_CODE}}'
PARLER_DEFAULT_LANGUAGE_CODE: '{{DISCOVERY_PARLER_DEFAULT_LANGUAGE_CODE}}'
PARLER_LANGUAGES : '{{DISCOVERY_PARLER_LANGUAGES}}'
# See edx_django_service_automated_users for an example of what this should be
DISCOVERY_AUTOMATED_USERS: {}
......@@ -20,8 +20,7 @@
# }
dependencies:
- role: edx_django_service
edx_django_service_repo: 'course-discovery'
edx_django_service_version: '{{ DISCOVERY_VERSION }}'
edx_django_service_repos: '{{ DISCOVERY_REPOS }}'
edx_django_service_name: '{{ discovery_service_name }}'
edx_django_service_user: '{{ discovery_user }}'
edx_django_service_home: '{{ COMMON_APP_DIR }}/{{ discovery_service_name }}'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment