Commit f713b79b by Feanil Patel

Merge pull request #1678 from edx/feanil/rc-lavash-0

Feanil/rc lavash 0
parents 3f695726 82d97ecb
- Role: edxapp
- A new var was added to make it easy ot invalidate the default
memcache store to make it easier to invalidate sessions. Updating
the edxapp env.json files will result in all users getting logged
out. This is a one time penalty as long as the value of `EDXAPP_DEFAULT_CACHE_VERSION`
is not explicitly changed.
- Role: nginx
- New html templates for server errors added.
Defaults for a ratelimiting static page and server error static page.
CMS/LMS are set to use them by default, wording can be changed in the
Nginx default vars.
- Role: edxapp
- We now have an all caps variable override for celery workers
- Role: common
- We now remove the default syslog.d conf file (50-default.conf) this will
break people who have hand edited that file.
......@@ -5,6 +20,10 @@
- Role: edxapp
- Updated the module store settings to match the new settings format.
- Update, possible breaking change: the edxapp role vars edxapp_lms_env and edxapp_cms_env have
been changed to EDXAPP_LMS_ENV and EDXAPP_CMS_ENV to indicate, via our convention,
that overridding them is expected. The default values remain the same.
- Role: analytics-api
- Added a new role for the analytics-api Django app. Currently a private repo
......@@ -29,3 +48,7 @@
- Role: Mongo
- Fixed case of variable used in if block that breaks cluster configuration
by changing mongo_clustered to MONGO_CLUSTERED.
- Role: Edxapp
- Added EDXAPP_LMS_AUTH_EXTRA and EDXAPP_CMS_AUTH_EXTRA for passing unique AUTH_EXTRA configurations to the LMS and CMS.
Both variables default to EDXAPP_AUTH_EXTRA for backward compatibility
import os
import prettytable
import hipchat
import time
import random
from ansible import utils
try:
import prettytable
except ImportError:
prettytable = None
try:
import hipchat
except ImportError:
hipchat = None
class CallbackModule(object):
......@@ -24,30 +29,40 @@ class CallbackModule(object):
"""
def __init__(self):
if 'HIPCHAT_TOKEN' in os.environ:
self.start_time = time.time()
self.task_report = []
self.last_task = None
self.last_task_changed = False
self.last_task_count = 0
self.last_task_delta = 0
self.last_task_start = time.time()
self.condensed_task_report = (os.getenv('HIPCHAT_CONDENSED', True) == True)
self.room = os.getenv('HIPCHAT_ROOM', 'ansible')
self.from_name = os.getenv('HIPCHAT_FROM', 'ansible')
self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') != 'false')
try:
self.hipchat_conn = hipchat.HipChat(token=os.getenv('HIPCHAT_TOKEN'))
except Exception as e:
utils.warning("Unable to connect to hipchat: {}".format(e))
self.hipchat_msg_prefix = os.getenv('HIPCHAT_MSG_PREFIX', '')
self.hipchat_msg_color = os.getenv('HIPCHAT_MSG_COLOR', '')
self.printed_playbook = False
self.playbook_name = None
self.enabled = True
else:
self.enabled = False
self.enabled = "HIPCHAT_TOKEN" in os.environ
if not self.enabled:
return
# make sure we got our imports
if not hipchat:
raise ImportError(
"The hipchat plugin requires the hipchat Python module, "
"which is not installed or was not found."
)
if not prettytable:
raise ImportError(
"The hipchat plugin requires the prettytable Python module, "
"which is not installed or was not found."
)
self.start_time = time.time()
self.task_report = []
self.last_task = None
self.last_task_changed = False
self.last_task_count = 0
self.last_task_delta = 0
self.last_task_start = time.time()
self.condensed_task_report = (os.getenv('HIPCHAT_CONDENSED', True) == True)
self.room = os.getenv('HIPCHAT_ROOM', 'ansible')
self.from_name = os.getenv('HIPCHAT_FROM', 'ansible')
self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') != 'false')
try:
self.hipchat_conn = hipchat.HipChat(token=os.getenv('HIPCHAT_TOKEN'))
except Exception as e:
utils.warning("Unable to connect to hipchat: {}".format(e))
self.hipchat_msg_prefix = os.getenv('HIPCHAT_MSG_PREFIX', '')
self.hipchat_msg_color = os.getenv('HIPCHAT_MSG_COLOR', '')
self.printed_playbook = False
self.playbook_name = None
def _send_hipchat(self, message, room=None, from_name=None, color=None, message_format='text'):
......@@ -221,7 +236,7 @@ class CallbackModule(object):
summary_output = "<b>{}</b>: <i>{}</i> - ".format(self.hipchat_msg_prefix, host)
for summary_item in ['ok', 'changed', 'unreachable', 'failures']:
if stats[summary_item] != 0:
summary_output += "<b>{}</b> - {} ".format(summary_item, stats[summary_item])
summary_output += "<b>{}</b> - {} ".format(summary_item, stats[summary_item])
summary_all_host_output.append(summary_output)
self._send_hipchat("<br />".join(summary_all_host_output), message_format='html')
msg = "<b>{description}</b>: Finished Ansible run for <b><i>{play}</i> in {min:02} minutes, {sec:02} seconds</b><br /><br />".format(
......
......@@ -22,11 +22,12 @@ import time
import json
import socket
try:
import boto
except ImportError:
boto = None
else:
import boto.sqs
from boto.exception import NoAuthHandlerFound
except ImportError:
print "Boto is required for the sqs_notify callback plugin"
raise
class CallbackModule(object):
......@@ -47,36 +48,42 @@ class CallbackModule(object):
- START events
"""
def __init__(self):
self.enable_sqs = 'ANSIBLE_ENABLE_SQS' in os.environ
if not self.enable_sqs:
return
# make sure we got our imports
if not boto:
raise ImportError(
"The sqs callback module requires the boto Python module, "
"which is not installed or was not found."
)
self.start_time = time.time()
if 'ANSIBLE_ENABLE_SQS' in os.environ:
self.enable_sqs = True
if not 'SQS_REGION' in os.environ:
print 'ANSIBLE_ENABLE_SQS enabled but SQS_REGION ' \
'not defined in environment'
sys.exit(1)
self.region = os.environ['SQS_REGION']
try:
self.sqs = boto.sqs.connect_to_region(self.region)
except NoAuthHandlerFound:
print 'ANSIBLE_ENABLE_SQS enabled but cannot connect ' \
'to AWS due invalid credentials'
sys.exit(1)
if not 'SQS_NAME' in os.environ:
print 'ANSIBLE_ENABLE_SQS enabled but SQS_NAME not ' \
'defined in environment'
sys.exit(1)
self.name = os.environ['SQS_NAME']
self.queue = self.sqs.create_queue(self.name)
if 'SQS_MSG_PREFIX' in os.environ:
self.prefix = os.environ['SQS_MSG_PREFIX']
else:
self.prefix = ''
self.last_seen_ts = {}
if not 'SQS_REGION' in os.environ:
print 'ANSIBLE_ENABLE_SQS enabled but SQS_REGION ' \
'not defined in environment'
sys.exit(1)
self.region = os.environ['SQS_REGION']
try:
self.sqs = boto.sqs.connect_to_region(self.region)
except NoAuthHandlerFound:
print 'ANSIBLE_ENABLE_SQS enabled but cannot connect ' \
'to AWS due invalid credentials'
sys.exit(1)
if not 'SQS_NAME' in os.environ:
print 'ANSIBLE_ENABLE_SQS enabled but SQS_NAME not ' \
'defined in environment'
sys.exit(1)
self.name = os.environ['SQS_NAME']
self.queue = self.sqs.create_queue(self.name)
if 'SQS_MSG_PREFIX' in os.environ:
self.prefix = os.environ['SQS_MSG_PREFIX']
else:
self.enable_sqs = False
self.prefix = ''
self.last_seen_ts = {}
def runner_on_failed(self, host, res, ignore_errors=False):
if self.enable_sqs:
......
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aide
- role: datadog
......
......@@ -3,5 +3,8 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- alton
- name: Deploy Antivirus Scanner
hosts: all
sudo: True
gather_facts: True
roles:
- antivirus
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
......@@ -2,5 +2,8 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
......@@ -2,5 +2,8 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- bastion
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- certs
......
# ansible-playbook -i ec2.py commoncluster.yml --limit tag_Name_stage-edx-commoncluster -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts
# ansible-playbook -i ec2.py cluster_rabbitmq.yml --limit tag_Name_stage-edx-commoncluster -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts
- hosts: all
sudo: True
......@@ -28,14 +28,9 @@
tasks:
- debug: msg="{{ ansible_ec2_local_ipv4 }}"
with_items: list.results
- shell: echo "rabbit@ip-{{ item|replace('.', '-') }}"
when: item != ansible_ec2_local_ipv4
with_items: hostvars.keys()
register: list
- command: rabbitmqctl stop_app
- command: rabbitmqctl join_cluster {{ item.stdout }}
when: item.stdout is defined
with_items: list.results
- command: rabbitmqctl join_cluster rabbit@ip-{{ hostvars.keys()[0]|replace('.', '-') }}
when: hostvars.keys()[0] != ansible_ec2_local_ipv4
- command: rabbitmqctl start_app
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
......
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common
- role: datadog
......
......@@ -31,7 +31,7 @@
- "EDXAPP_MONGO_HOSTS: {{ EDXAPP_MONGO_HOSTS }}"
- "EDXAPP_MONGO_DB_NAME: {{ EDXAPP_MONGO_DB_NAME }}"
- "EDXAPP_MONGO_USER: {{ EDXAPP_MONGO_USER }}"
- "EDXAPP_MONGO_PASS: {{ EDXAPP_MONGO_PASS }}"
- "EDXAPP_MONGO_PASSWORD: {{ EDXAPP_MONGO_PASSWORD }}"
tags: update_edxapp_mysql_host
- name: call update on edx-platform
......
......@@ -40,6 +40,10 @@
sudo: yes
with_items:
- python-mysqldb
# When this is run on jenkins the package will already
# exist and can't run as the jenkins user because it
# does not have sudo privs.
when: ansible_ssh_user != 'jenkins'
- name: create mysql databases for the edX stack
mysql_db: >
db={{ item[0] }}{{ item[1].db_name }}
......
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- demo
- role: datadog
......
......@@ -2,5 +2,8 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- devpi
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- role: nginx
......
- name: Deploy the edx_ansible role
hosts: all
sudo: True
gather_facts: False
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- edx_ansible
......@@ -12,7 +12,6 @@
nginx_sites:
- cms
- lms
- ora
- xqueue
- xserver
- certs
......@@ -31,7 +30,6 @@
- forum
- { role: "xqueue", update_users: True }
- xserver
- ora
- certs
- edx_ansible
- analytics-api
......
- name: Create ec2 instance
hosts: localhost
connection: local
gather_facts: False
gather_facts: True
vars:
keypair: continuous-integration
instance_type: t2.medium
......
......@@ -3,6 +3,8 @@
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- role: nginx
......@@ -20,6 +22,10 @@
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
NEWRELIC_LOGWATCH:
- logwatch-503.j2
- logwatch-cms-errors.j2
- logwatch-lms-errors.j2
when: COMMON_ENABLE_NEWRELIC
- role: minos
when: COMMON_ENABLE_MINOS
......@@ -4,15 +4,27 @@
gather_facts: False
vars:
db_dry_run: "--db-dry-run"
syncdb: false
tasks:
# Syncdb with migrate when the migrate user is overridden in extra vars
- name: syncdb and migrate
- name: migrate
shell: >
chdir={{ edxapp_code_dir }}
python manage.py {{ item }} syncdb --migrate --noinput --settings=aws_migrate {{ db_dry_run }}
python manage.py {{ item }} migrate --noinput {{ db_dry_run }} --settings=aws_migrate
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
with_items:
- lms
- cms
- name: syncdb
shell: >
chdir={{ edxapp_code_dir }}
python manage.py {{ item }} syncdb --noinput --settings=aws_migrate
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
when: syncdb
with_items:
- lms
- cms
......@@ -2,5 +2,8 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- flower
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- role: nginx
......
- name: Deploy Insights
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: True
roles:
- role: nginx
nginx_sites:
- insights
- aws
- insights
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
......@@ -3,5 +3,8 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- jenkins_admin
......@@ -8,6 +8,8 @@
gather_facts: True
vars:
mongo_enable_journal: False
serial_count: 1
serial: "{{ serial_count }}"
vars_files:
- roles/edxapp/defaults/main.yml
- roles/ora/defaults/main.yml
......@@ -18,4 +20,5 @@
- edxlocal
- mongo
- browsers
- browsermob-proxy
- jenkins_worker
......@@ -3,7 +3,9 @@
hosts: all
sudo: True
gather_facts: True
serial: 1
vars:
serial_count: 1
serial: "{{ serial_count }}"
vars_files:
- "{{secure_dir}}/vars/{{COMMON_ENVIRONMENT}}/legacy-ora.yml"
roles:
......
......@@ -41,6 +41,18 @@ class LifecycleInventory():
parser = argparse.ArgumentParser()
self.profile = profile
def get_e_d_from_tags(self, group):
environment = "default_environment"
deployment = "default_deployment"
for r in group.tags:
if r.key == "environment":
environment = r.value
elif r.key == "deployment":
deployment = r.value
return environment,deployment
def get_instance_dict(self):
ec2 = boto.connect_ec2(profile_name=self.profile)
reservations = ec2.get_all_instances()
......@@ -64,10 +76,12 @@ class LifecycleInventory():
for instance in group.instances:
private_ip_address = instances[instance.instance_id].private_ip_address
inventory[group.name].append(private_ip_address)
inventory[group.name + "_" + instance.lifecycle_state].append(private_ip_address)
inventory[instance.lifecycle_state.replace(":","_")].append(private_ip_address)
if private_ip_address:
environment,deployment = self.get_e_d_from_tags(group)
inventory[environment + "_" + deployment + "_" + instance.lifecycle_state.replace(":","_")].append(private_ip_address)
inventory[group.name].append(private_ip_address)
inventory[group.name + "_" + instance.lifecycle_state.replace(":","_")].append(private_ip_address)
inventory[instance.lifecycle_state.replace(":","_")].append(private_ip_address)
print json.dumps(inventory, sort_keys=True, indent=2)
......@@ -77,8 +91,8 @@ if __name__=="__main__":
parser.add_argument('-p', '--profile', help='The aws profile to use when connecting.')
parser.add_argument('-l', '--list', help='Ansible passes this, we ignore it.', action='store_true', default=True)
args = parser.parse_args()
LifecycleInventory(args.profile).run()
......@@ -3,6 +3,9 @@
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common
- minos
- aws
- minos
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- mongo
- mongo_mms
......
- name: Configure notifier instance
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- notifier
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- role: nginx
nginx_sites:
......
# ansible-playbook -i ./lifecycle_inventory.py ./retire_host.yml
# -e@/vars/env.yml --limit Terminating_Wait
# ansible-playbook -i ./lifecycle_inventory.py ./retire_host.yml
# -e@/vars/env.yml --limit Terminating_Wait -e TARGET="Terminating_Wait"
#
# Note that the target now must be specified as an argument
#
#
# This is separate because it's use of handlers
# leads to various race conditions.
#
- name: Stop all services
hosts: Terminating_Wait
hosts: "{{TARGET}}"
sudo: True
gather_facts: False
vars:
......@@ -15,41 +18,30 @@
- stop_all_edx_services
- name: Server retirement workflow
hosts: Terminating_Wait
hosts: "{{TARGET}}"
sudo: True
gather_facts: False
tasks:
- name: Force a log rotation
command: /usr/sbin/logrotate -f /etc/logrotate.d/{{ item }}
with_items:
- "apport"
- "apt"
- "aptitude"
- "dpkg"
- "hourly"
- "landscape-client"
- "newrelic-sysmond"
- "nginx"
- "nginx-access"
- "nginx-error"
- "ppp"
- "rsyslog"
- "ufw"
- "unattended-upgrades"
- "upstart"
- name: Force a log rotation
- name: Terminate existing s3 log sync
command: /usr/bin/pkill send-logs-to-s3 || true
- name: "Ensure send-logs-to-s3 script is in the logrotate file"
shell: grep send-logs-to-s3 /etc/logrotate.d/hourly/tracking.log
# We only force a rotation of edx logs.
# Forced rotation of system logfiles will only
# work if there hasn't already been a previous rotation
# The logrotate will also call send-logs-to-s3 but hasn't
# been updated for all servers yet.
- name: Force a log rotation which will call the log sync
command: /usr/sbin/logrotate -f /etc/logrotate.d/hourly/{{ item }}
with_items:
- "tracking.log"
- "edx-services"
- name: Terminate existing s3 log sync
command: /usr/bin/pkill send-logs-to-s3 || true
- name: Send logs to s3
command: /edx/bin/send-logs-to-s3
# This catches the case where tracking.log is 0b
- name: Sync again
command: /edx/bin/send-logs-to-s3 -d "{{ COMMON_LOG_DIR }}/tracking/*" -b "{{ COMMON_AWS_SYNC_BUCKET }}/logs/tracking"
- name: Run minos verification
hosts: Terminating_Wait
hosts: "{{TARGET}}"
sudo: True
gather_facts: False
tasks:
......
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- snort
- role: datadog
......
......@@ -2,5 +2,8 @@
hosts: all
sudo: True
gather_facts: False
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- stop_all_edx_services
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- testcourses
- role: datadog
......
......@@ -35,6 +35,8 @@
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
NEWRELIC_LOGWATCH:
- logwatch-xqueue-errors.j2
when: COMMON_ENABLE_NEWRELIC
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
......
......@@ -8,6 +8,8 @@
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- xqwatcher
......@@ -16,4 +18,4 @@
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
\ No newline at end of file
when: COMMON_ENABLE_NEWRELIC
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- role: nginx
......
......@@ -31,14 +31,14 @@
tasks:
- name: edX configuration
cloudformation: >
stack_name="$name" state=present
region=$region disable_rollback=false
stack_name="{{ name }}" state=present
region="{{ region }}" disable_rollback=false
template=../cloudformation_templates/edx-server-multi-instance.json
args:
template_parameters:
KeyName: $key
KeyName: "{{key}}"
InstanceType: m1.small
GroupTag: $group
GroupTag: "{{group}}"
register: stack
- name: show stack outputs
debug: msg="My stack outputs are ${stack.stack_outputs}"
debug: msg="My stack outputs are {{stack.stack_outputs}}"
......@@ -18,6 +18,7 @@
# These should stay false for the public AMI
COMMON_ENABLE_DATADOG: False
COMMON_ENABLE_SPLUNKFORWARDER: False
ENABLE_LEGACY_ORA: !!null
roles:
- role: nginx
nginx_sites:
......@@ -38,7 +39,8 @@
- elasticsearch
- forum
- { role: "xqueue", update_users: True }
- ora
- role: ora
when: ENABLE_LEGACY_ORA
- certs
- edx_ansible
- role: datadog
......
- name: setup the alton env
template: >
src="alton_env.j2" dest="{{ alton_app_dir }}/alton_env"
owner="{{ alton_user }}" group="{{ common_web_user }}"
mode=0644
notify: restart alton
- name: configure the boto profiles for alton
template: >
src="boto.j2"
......
......@@ -34,11 +34,4 @@
- "{{ alton_app_dir }}"
- "{{ alton_venvs_dir }}"
- name: setup the alton env
template: >
src="alton_env.j2" dest="{{ alton_app_dir }}/alton_env"
owner="{{ alton_user }}" group="{{ common_web_user }}"
mode=0644
notify: restart alton
- include: deploy.yml tags=deploy
......@@ -6,3 +6,5 @@ export {{ name }}="{{ value }}"
{% endif %}
{%- endfor %}
export WILL_BOTO_PROFILES="{{ ALTON_AWS_CREDENTIALS|join(';') }}"
{% for deployment, creds in ALTON_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
[profile {{ deployment }}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
......
......@@ -19,46 +19,58 @@ ANALYTICS_API_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }
ANALYTICS_API_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
ANALYTICS_API_NGINX_PORT: "18100"
ANALYTICS_API_DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: 'analytics-api'
USER: 'api001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
# read-only user
reports:
ENGINE: 'django.db.backends.mysql'
NAME: 'reports'
USER: 'reports001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
ANALYTICS_API_VERSION: "master"
# Default dummy user, override this!!
ANALYTICS_API_USERS:
"dummy-api-user": "changeme"
ANALYTICS_API_SECRET_KEY: 'Your secret key here'
ANALYTICS_API_TIME_ZONE: 'UTC'
ANALYTICS_API_LANGUAGE_CODE: 'en-us'
ANALYTICS_API_EMAIL_HOST: 'localhost'
ANALYTICS_API_EMAIL_HOST_USER: 'mail_user'
ANALYTICS_API_EMAIL_HOST_PASSWORD: 'mail_password'
ANALYTICS_API_EMAIL_PORT: 587
ANALYTICS_API_AUTH_TOKEN: 'put-your-api-token-here'
ANALYTICS_API_CONFIG:
ANALYTICS_DATABASE: 'reports'
SECRET_KEY: 'Your secret key here'
TIME_ZONE: 'America/New_York'
LANGUAGE_CODE: 'en-us'
SECRET_KEY: '{{ ANALYTICS_API_SECRET_KEY }}'
TIME_ZONE: '{{ ANALYTICS_API_TIME_ZONE }}'
LANGUAGE_CODE: '{{ANALYTICS_API_LANGUAGE_CODE }}'
# email config
EMAIL_HOST: 'smtp.example.com'
EMAIL_HOST_PASSWORD: ""
EMAIL_HOST_USER: ""
EMAIL_PORT: 587
API_AUTH_TOKEN: 'put-your-api-token-here'
STATICFILES_DIRS: []
EMAIL_HOST: '{{ ANALYTICS_API_EMAIL_HOST }}'
EMAIL_HOST_PASSWORD: '{{ ANALYTICS_API_EMAIL_HOST_PASSWORD }}'
EMAIL_HOST_USER: '{{ ANALYTICS_API_EMAIL_HOST_USER }}'
EMAIL_PORT: $ANALYTICS_API_EMAIL_PORT
API_AUTH_TOKEN: '{{ ANALYTICS_API_AUTH_TOKEN }}'
STATICFILES_DIRS: ['static']
STATIC_ROOT: "{{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }}/staticfiles"
# db config
DATABASE_OPTIONS:
connect_timeout: 10
DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: 'analytics-api'
USER: 'api001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
# read-only user
reports:
ENGINE: 'django.db.backends.mysql'
NAME: 'reports'
USER: 'reports001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
DATABASES: '{{ ANALYTICS_API_DATABASES }}'
ANALYTICS_API_GUNICORN_WORKERS: "2"
ANALYTICS_API_GUNICORN_EXTRA: ""
#
# vars are namespace with the module name.
#
......
......@@ -32,7 +32,7 @@
# ansible-playbook -i 'api.example.com,' ./analyticsapi.yml -e@/ansible/vars/deployment.yml -e@/ansible/vars/env-deployment.yml
#
- fail: msg="You must provide an private key for the analytics repo"
- fail: msg="You must provide a private key for the analytics repo"
when: not ANALYTICS_API_GIT_IDENTITY
- include: deploy.yml tags=deploy
......@@ -15,4 +15,4 @@ export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}"
source {{ analytics_api_app_dir }}/analytics_api_env
{{ executable }} --pythonpath={{ analytics_api_code_dir }} -b {{ analytics_api_gunicorn_host }}:{{ analytics_api_gunicorn_port }} -w {{ ANALYTICS_API_GUNICORN_WORKERS }} --timeout={{ analytics_api_gunicorn_timeout }} analyticsdataserver.wsgi:application
{{ executable }} --pythonpath={{ analytics_api_code_dir }} -b {{ analytics_api_gunicorn_host }}:{{ analytics_api_gunicorn_port }} -w {{ ANALYTICS_API_GUNICORN_WORKERS }} --timeout={{ analytics_api_gunicorn_timeout }} {{ ANALYTICS_API_GUNICORN_EXTRA }} analyticsdataserver.wsgi:application
......@@ -19,6 +19,7 @@ AS_SERVER_PORT: '9000'
AS_ENV_LANG: 'en_US.UTF-8'
AS_LOG_LEVEL: 'INFO'
AS_WORKERS: '2'
AS_GUNICORN_EXTRA: ""
# add public keys to enable the automator user
# for running manage.py commands
......@@ -40,14 +41,14 @@ analytics_auth_config:
DATABASES:
analytics:
<<: *databases_default
USER: $AS_DB_ANALYTICS_USER
PASSWORD: $AS_DB_ANALYTICS_PASSWORD
HOST: $AS_DB_ANALYTICS_HOST
ANALYTICS_API_KEY: $AS_API_KEY
USER: "{{ AS_DB_ANALYTICS_USER }}"
PASSWORD: "{{ AS_DB_ANALYTICS_PASSWORD }}"
HOST: "{{ AS_DB_ANALYTICS_HOST }}"
ANALYTICS_API_KEY: "{{ AS_API_KEY }}"
ANALYTICS_RESULTS_DB:
MONGO_URI: $AS_DB_RESULTS_URL
MONGO_DB: $AS_DB_RESULTS_DB
MONGO_STORED_QUERIES_COLLECTION: $AS_DB_RESULTS_COLLECTION
MONGO_URI: "{{ AS_DB_RESULTS_URL }}"
MONGO_DB: "{{ AS_DB_RESULTS_DB }}"
MONGO_STORED_QUERIES_COLLECTION: "{{ AS_DB_RESULTS_COLLECTION }}"
as_role_name: "analytics-server"
as_user: "analytics-server"
......
......@@ -28,7 +28,7 @@
accept_hostkey=yes
version={{ as_version }} force=true
environment:
GIT_SSH: $as_git_ssh
GIT_SSH: "{{ as_git_ssh }}"
notify: restart the analytics service
notify: start the analytics service
tags:
......
......@@ -18,4 +18,4 @@ env DJANGO_SETTINGS_MODULE={{ as_django_settings }}
chdir {{ as_code_dir }}
setuid {{ as_web_user }}
exec {{ as_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ as_code_dir }}/anserv anserv.wsgi
exec {{ as_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ as_code_dir }}/anserv {{ AS_GUNICORN_EXTRA }} anserv.wsgi
......@@ -19,6 +19,7 @@ ANALYTICS_SERVER_PORT: '9000'
ANALYTICS_ENV_LANG: 'en_US.UTF-8'
ANALYTICS_LOG_LEVEL: 'INFO'
ANALYTICS_WORKERS: '2'
ANALYTICS_GUNICORN_EXTRA: ""
DATABASES:
default: &databases_default
......@@ -33,14 +34,14 @@ analytics_auth_config:
DATABASES:
analytics:
<<: *databases_default
USER: $ANALYTICS_DB_ANALYTICS_USER
PASSWORD: $ANALYTICS_DB_ANALYTICS_PASSWORD
HOST: $ANALYTICS_DB_ANALYTICS_HOST
ANALYTICS_API_KEY: $ANALYTICS_API_KEY
USER: "{{ ANALYTICS_DB_ANALYTICS_USER }}"
PASSWORD: "{{ ANALYTICS_DB_ANALYTICS_PASSWORD }}"
HOST: "{{ ANALYTICS_DB_ANALYTICS_HOST }}"
ANALYTICS_API_KEY: "{{ ANALYTICS_API_KEY }}"
ANALYTICS_RESULTS_DB:
MONGO_URI: $ANALYTICS_DB_RESULTS_URL
MONGO_DB: $ANALYTICS_DB_RESULTS_DB
MONGO_STORED_QUERIES_COLLECTION: $ANALYTICS_DB_RESULTS_COLLECTION
MONGO_URI: "{{ ANALYTICS_DB_RESULTS_URL }}"
MONGO_DB: "{{ ANALYTICS_DB_RESULTS_DB }}"
MONGO_STORED_QUERIES_COLLECTION: "{{ ANALYTICS_DB_RESULTS_COLLECTION }}"
analytics_role_name: "analytics"
analytics_user: "analytics"
......
......@@ -28,7 +28,7 @@
accept_hostkey=yes
version={{ analytics_version }} force=true
environment:
GIT_SSH: $analytics_git_ssh
GIT_SSH: "{{ analytics_git_ssh }}"
notify: restart the analytics service
notify: start the analytics service
tags:
......
......@@ -18,4 +18,4 @@ env DJANGO_SETTINGS_MODULE={{ analytics_django_settings }}
chdir {{ analytics_code_dir }}
setuid {{ analytics_web_user }}
exec {{ analytics_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ analytics_code_dir }}/anserv anserv.wsgi
exec {{ analytics_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ analytics_code_dir }}/anserv {{ ANALYTICS_GUNICORN_EXTRA }} anserv.wsgi
......@@ -10,7 +10,7 @@
when: role_exists | success
- name: create role directories
file: path=roles/{{role_name}}/{{ item }} state=directory
file: path=roles/{{ role_name }}/{{ item }} state=directory
with_items:
- tasks
- meta
......
......@@ -6,7 +6,7 @@
- debug
- name: Dump lms auth|env file
template: src=../../edxapp/templates/lms.{{item}}.json.j2 dest=/tmp/lms.{{item}}.json mode=0600
template: src=../../edxapp/templates/lms.{{ item }}.json.j2 dest=/tmp/lms.{{ item }}.json mode=0600
with_items:
- env
- auth
......@@ -16,7 +16,7 @@
- debug
- name: Dump lms-preview auth|env file
template: src=../../edxapp/templates/lms-preview.{{item}}.json.j2 dest=/tmp/lms-preview.{{item}}.json mode=0600
template: src=../../edxapp/templates/lms-preview.{{ item }}.json.j2 dest=/tmp/lms-preview.{{ item }}.json mode=0600
with_items:
- env
- auth
......@@ -26,7 +26,7 @@
- debug
- name: Dump cms auth|env file
template: src=../../edxapp/templates/cms.{{item}}.json.j2 dest=/tmp/cms.{{item}}.json mode=0600
template: src=../../edxapp/templates/cms.{{ item }}.json.j2 dest=/tmp/cms.{{ item }}.json mode=0600
with_items:
- env
- auth
......@@ -44,7 +44,7 @@
- name: fetch remote files
# fetch is fail-safe for remote files that don't exist
# setting mode is not an option
fetch: src=/tmp/{{item}} dest=/tmp/{{ansible_hostname}}-{{item}} flat=True
fetch: src=/tmp/{{ item }} dest=/tmp/{{ ansible_hostname }}-{{item}} flat=True
with_items:
- ansible.all.json
- ansible.all.yml
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role antivirus
#
#
# vars are namespace with the module name.
#
antivirus_role_name: antivirus
#
# OS packages
#
antivirus_debian_pkgs: [clamav]
antivirus_redhat_pkgs: []
antivirus_pip_pkgs: []
antivirus_app_dir: /edx/app/antivirus
antivirus_user: "antivirus"
ANTIVIRUS_BUCKETS: !!null
ANTIVIRUS_MAILTO: "{{ EDXAPP_TECH_SUPPORT_EMAIL }}"
ANTIVIRUS_MAILFROM: "{{ EDXAPP_DEFAULT_FROM_EMAIL }}"
ANTIVIRUS_AWS_KEY: ""
ANTIVIRUS_AWS_SECRET: ""
ANTIVIRUS_S3_AWS_KEY: "{{ ANTIVIRUS_AWS_KEY }}"
ANTIVIRUS_SES_AWS_KEY: "{{ ANTIVIRUS_AWS_KEY }}"
ANTIVIRUS_S3_AWS_SECRET: "{{ ANTIVIRUS_AWS_SECRET}}"
ANTIVIRUS_SES_AWS_SECRET: "{{ ANTIVIRUS_AWS_SECRET}}"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role antivirus
#
# Overview:
#
#
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role antivirus
#
dependencies:
- role: user
user_info: "{{ BASTION_USER_INFO }}"
- aws
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role antivirus
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- name: install antivirus system packages
apt: pkg={{ item }} install_recommends=yes state=present
with_items: antivirus_debian_pkgs
- name: create antivirus scanner user
user: >
name="{{ antivirus_user }}"
home="{{ antivirus_app_dir }}"
createhome=no
shell=/bin/false
- name: create antivirus app and data dirs
file: >
path="{{ item }}"
state=directory
owner="{{ antivirus_user }}"
group="{{ antivirus_user }}"
with_items:
- "{{ antivirus_app_dir }}"
- "{{ antivirus_app_dir }}/data"
- name: install antivirus s3 scanner script
template: >
src=s3_bucket_virus_scan.sh.j2
dest={{ antivirus_app_dir }}/s3_bucket_virus_scan.sh
mode=0555
owner={{ antivirus_user }}
group={{ antivirus_user }}
- name: install antivirus s3 scanner cronjob
cron: >
name="antivirus-{{ item }}"
job="{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh -b '{{ item }}' -m '{{ ANTIVIRUS_MAILTO }}' -f '{{ ANTIVIRUS_MAILFROM }}'"
backup=yes
cron_file=antivirus-{{ item }}
user={{ antivirus_user }}
hour="*"
minute="0"
day="*"
with_items: ANTIVIRUS_BUCKETS
#! /bin/bash
DEBUG="false"
BUCKETNAME="none"
MAILTO=""
MAILFROM=""
ANTIVIRUS_S3_AWS_KEY="{{ ANTIVIRUS_S3_AWS_KEY }}"
ANTIVIRUS_SES_AWS_KEY="{{ ANTIVIRUS_SES_AWS_KEY }}"
ANTIVIRUS_S3_AWS_SECRET="{{ ANTIVIRUS_S3_AWS_SECRET}}"
ANTIVIRUS_SES_AWS_SECRET="{{ ANTIVIRUS_SES_AWS_SECRET}}"
AWS_DEFAULT_REGION="{{ aws_region }}"
function usage {
echo "$0 - $VERSION";
echo "Run ClamAV against the contents of an S3 Bucket.";
echo "Usage: $0 [options]";
echo "options:";
echo " -d Debug mode";
echo " -h Usage (this screen)";
echo " -b <bucket name>";
echo " -m <notify mail address>";
echo " -f <notify from address>";
echo " -k <AWS Key ID>";
echo " -s <AWS Secret Key>"
}
while getopts "dhb:m:f:k:s:" optionName; do
case "$optionName" in
d)
DEBUG="true"
;;
h)
usage;
exit;
;;
[?])
usage;
exit;
;;
b)
BUCKETNAME=$OPTARG;
;;
m)
MAILTO=$OPTARG;
;;
f)
MAILFROM=$OPTARG;
;;
k)
AWS_ACCESS_KEY_ID=$OPTARG;
ANTIVIRUS_S3_AWS_KEY=$OPTARG;
ANTIVIRUS_SES_AWS_KEY=$OPTARG;
;;
s)
AWS_SECRET_ACCESS_KEY=$OPTARG;
ANTIVIRUS_S3_AWS_SECRET=$OPTARG;
ANTIVIRUS_SES_AWS_SECRET=$OPTARG;
;;
esac
done
cd {{ antivirus_app_dir }}
export AWS_ACCESS_KEY_ID=$ANTIVIRUS_S3_AWS_KEY
export AWS_SECRET_ACCESS_KEY=$ANTIVIRUS_S3_AWS_SECRET
export AWS_DEFAULT_REGION
mkdir -p data/$BUCKETNAME
aws s3 sync s3://$BUCKETNAME/ data/$BUCKETNAME
CLAMOUT=$(clamscan -ri data/$BUCKETNAME);
if [[ $? -ne 0 ]]; then
export AWS_ACCESS_KEY_ID=$ANTIVIRUS_SES_AWS_KEY
export AWS_SECRET_ACCESS_KEY=$ANTIVIRUS_SES_AWS_SECRET
aws ses send-email --to $MAILTO --from $MAILFROM --subject "Virus Scanner malicious file on $BUCKETNAME" --text "$CLAMOUT"
fi
WSGIPythonHome {{ edxapp_venv_dir }}
WSGIRestrictEmbedded On
<VirtualHost *:{{ apache_port }}>
<VirtualHost *:*>
ServerName https://{{ lms_env_config.SITE_NAME }}
ServerAlias *.{{ lms_env_config.SITE_NAME }}
UseCanonicalName On
......
......@@ -23,29 +23,34 @@ AWS_S3_LOGS: false
# This relies on your server being able to send mail
AWS_S3_LOGS_NOTIFY_EMAIL: dummy@example.com
AWS_S3_LOGS_FROM_EMAIL: dummy@example.com
# Separate buckets for tracking logs and everything else
# You should be overriding the environment and deployment vars
# Order of precedence is left to right for exclude and include options
AWS_S3_LOG_PATHS:
- bucket: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/logs/tracking"
path: "{{ COMMON_LOG_DIR }}/tracking/*"
- bucket: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/logs/application"
path: "{{ COMMON_LOG_DIR }}/!(*tracking*)"
- bucket: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/logs/system"
path: "/var/log/*"
#
# vars are namespace with the module name.
#
aws_role_name: aws
aws_data_dir: "{{ COMMON_DATA_DIR }}/aws"
aws_app_dir: "{{ COMMON_APP_DIR }}/aws"
aws_s3_sync_script: "{{ aws_app_dir }}/send-logs-to-s3"
aws_s3_logfile: "{{ aws_log_dir }}/s3-log-sync.log"
aws_log_dir: "{{ COMMON_LOG_DIR }}/aws"
aws_dirs:
home:
path: "{{ COMMON_APP_DIR }}/{{ aws_role_name }}"
owner: "root"
group: "root"
mode: "0755"
logs:
path: "{{ COMMON_LOG_DIR }}/{{ aws_role_name }}"
owner: "syslog"
group: "syslog"
mode: "0700"
data:
path: "{{ COMMON_DATA_DIR }}/{{ aws_role_name }}"
owner: "root"
group: "root"
mode: "0700"
aws_s3_sync_script: "{{ aws_dirs.home.path }}/send-logs-to-s3"
aws_s3_logfile: "{{ aws_dirs.logs.path }}/s3-log-sync.log"
aws_region: "us-east-1"
# default path to the aws binary
s3cmd_cmd: "{{ COMMON_BIN_DIR }}/s3cmd"
aws_s3cmd: "{{ COMMON_BIN_DIR }}/s3cmd"
aws_cmd: "/usr/local/bin/aws"
#
# OS packages
......
......@@ -21,26 +21,14 @@
#
#
- name: create data directories
- name: create all service directories
file: >
path={{ item }}
state=directory
owner=root
group=root
mode=0700
with_items:
- "{{ aws_data_dir }}"
- "{{ aws_log_dir }}"
- name: create app directory
file: >
path={{ item }}
state=directory
owner=root
group=root
mode=0755
with_items:
- "{{ aws_app_dir }}"
path="{{ item.value.path }}"
state="directory"
owner="{{ item.value.owner }}"
group="{{ item.value.group }}"
mode="{{ item.value.mode }}"
with_dict: aws_dirs
- name: install system packages
apt: >
......@@ -57,18 +45,18 @@
- name: get s3cmd
get_url: >
url={{ aws_s3cmd_url }}
dest={{ aws_data_dir }}/
dest={{ aws_dirs.data.path }}/
- name: untar s3cmd
shell: >
tar xf {{ aws_data_dir }}/{{ aws_s3cmd_version }}.tar.gz
creates={{ aws_app_dir }}/{{ aws_s3cmd_version }}/s3cmd
chdir={{ aws_app_dir }}
tar xf {{ aws_dirs.data.path }}/{{ aws_s3cmd_version }}.tar.gz
creates={{ aws_dirs.data.path }}/{{ aws_s3cmd_version }}/s3cmd
chdir={{ aws_dirs.home.path }}
- name: create symlink for s3cmd
file: >
src={{ aws_app_dir }}/{{ aws_s3cmd_version }}/s3cmd
dest={{ COMMON_BIN_DIR }}/s3cmd
src={{ aws_dirs.home.path }}/{{ aws_s3cmd_version }}/s3cmd
dest={{ aws_s3cmd }}
state=link
- name: create s3 log sync script
......@@ -84,7 +72,7 @@
dest={{ COMMON_BIN_DIR }}/{{ aws_s3_sync_script|basename }}
when: AWS_S3_LOGS
- name: run s3 log sync script on supervisor shutdown
- name: force logrotate on supervisor stop
template: >
src=etc/init/sync-on-stop.conf.j2
dest=/etc/init/sync-on-stop.conf
......@@ -99,4 +87,5 @@
user: root
minute: 0
job: "{{ aws_s3_sync_script }} > /dev/null 2>&1"
state: absent
when: AWS_S3_LOGS
start on stopped supervisor
description "sync s3 logs on supervisor shutdown"
description "sync tracking logs on supervisor shutdown"
script
/bin/bash {{ aws_s3_sync_script }}
/usr/sbin/logrotate -f /etc/logrotate.d/hourly/tracking.log
/usr/sbin/logrotate -f /etc/logrotate.d/hourly/edx-services
end script
......@@ -4,13 +4,23 @@
#
# This script can be called from logrotate
# to sync logs to s3
#
if (( $EUID != 0 )); then
echo "Please run as the root user"
exit 1
fi
exec > >(tee "{{ aws_s3_logfile }}")
#
# Ensure the log processors can read without
# running as root
if [ ! -f "{{ aws_s3_logfile }}" ]; then
sudo -u syslog touch "{{ aws_s3_logfile }}"
else
chown syslog.syslog "{{ aws_s3_logfile }}"
fi
exec > >(tee -a "{{ aws_s3_logfile }}")
exec 2>&1
# s3cmd sync requires a valid home
......@@ -31,10 +41,12 @@ usage() {
-v add verbosity (set -x)
-n echo what will be done
-h this
-d directory to sync
-b bucket path to sync to
EO
}
while getopts "vhn" opt; do
while getopts "vhnb:d:" opt; do
case $opt in
v)
set -x
......@@ -48,9 +60,21 @@ while getopts "vhn" opt; do
noop="echo Would have run: "
shift
;;
d)
directory=$OPTARG
;;
b)
bucket_path=$OPTARG
;;
esac
done
if [[ -z $bucket_path || -z $directory ]]; then
echo "ERROR: You must provide a directory and a bucket to sync!"
usage
exit 1
fi
# grab the first security group for the instance
# which will be used as a directory name in the s3
# bucket
......@@ -90,9 +114,7 @@ instance_id=$(ec2metadata --instance-id)
ip=$(ec2metadata --local-ipv4)
availability_zone=$(ec2metadata --availability-zone)
# region isn't available via the metadata service
region=${availability_zone:0:${{lb}}#availability_zone{{rb}} - 1}
region=${availability_zone:0:${{ lb }}#availability_zone{{ rb }} - 1}
s3_path="${2}/$sec_grp/"
{% for item in AWS_S3_LOG_PATHS -%}
$noop {{ s3cmd_cmd }} sync {{ item['path'] }} "s3://{{ item['bucket'] }}/$sec_grp/${instance_id}-${ip}/"
{% endfor %}
$noop {{ aws_s3cmd }} --multipart-chunk-size-mb 5120 --disable-multipart sync $directory "s3://${bucket_path}/${sec_grp}/${instance_id}-${ip}/"
#!/bin/sh
/etc/browsermob-proxy/bin/browsermob-proxy
/etc/browsermob-proxy/bin/browsermob-proxy $*
......@@ -13,7 +13,7 @@ browser_deb_pkgs:
# which often causes spurious acceptance test failures.
browser_s3_deb_pkgs:
- { name: "google-chrome-stable_30.0.1599.114-1_amd64.deb", url: "https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_30.0.1599.114-1_amd64.deb" }
- { name: "firefox_25.0+build3-0ubuntu0.12.04.1_amd64.deb", url: "https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox_25.0%2Bbuild3-0ubuntu0.12.04.1_amd64.deb" }
- { name: "firefox_28.0+build2-0ubuntu0.12.04.1_amd64.deb", url: "https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox_28.0%2Bbuild2-0ubuntu0.12.04.1_amd64.deb" }
# Chrome and ChromeDriver
chromedriver_version: 2.6
......
......@@ -71,25 +71,25 @@ certs_env_config:
# CERTS_DATA is legacy, not used
CERT_DATA: {}
QUEUE_NAME: "certificates"
QUEUE_URL: $CERTS_QUEUE_URL
CERT_BUCKET: $CERTS_BUCKET
QUEUE_URL: "{{ CERTS_QUEUE_URL }}"
CERT_BUCKET: "{{ CERTS_BUCKET }}"
# gnupg signing key
CERT_KEY_ID: $CERTS_KEY_ID
CERT_KEY_ID: "{{ CERTS_KEY_ID }}"
LOGGING_ENV: ""
CERT_GPG_DIR: $certs_gpg_dir
CERT_URL: $CERTS_URL
CERT_DOWNLOAD_URL: $CERTS_DOWNLOAD_URL
CERT_WEB_ROOT: $CERTS_WEB_ROOT
COPY_TO_WEB_ROOT: $CERTS_COPY_TO_WEB_ROOT
S3_UPLOAD: $CERTS_S3_UPLOAD
CERT_VERIFY_URL: $CERTS_VERIFY_URL
TEMPLATE_DATA_DIR: $CERTS_TEMPLATE_DATA_DIR
CERT_GPG_DIR: "{{ certs_gpg_dir }}"
CERT_URL: "{{ CERTS_URL }}"
CERT_DOWNLOAD_URL: "{{ CERTS_DOWNLOAD_URL }}"
CERT_WEB_ROOT: "{{ CERTS_WEB_ROOT }}"
COPY_TO_WEB_ROOT: "{{ CERTS_COPY_TO_WEB_ROOT }}"
S3_UPLOAD: "{{ CERTS_S3_UPLOAD }}"
CERT_VERIFY_URL: "{{ CERTS_VERIFY_URL }}"
TEMPLATE_DATA_DIR: "{{ CERTS_TEMPLATE_DATA_DIR }}"
certs_auth_config:
QUEUE_USER: $CERTS_QUEUE_USER
QUEUE_PASS: $CERTS_QUEUE_PASS
QUEUE_AUTH_USER: $CERTS_XQUEUE_AUTH_USER
QUEUE_AUTH_PASS: $CERTS_XQUEUE_AUTH_PASS
CERT_KEY_ID: $CERTS_KEY_ID
CERT_AWS_ID: $CERTS_AWS_ID
CERT_AWS_KEY: $CERTS_AWS_KEY
QUEUE_USER: "{{ CERTS_QUEUE_USER }}"
QUEUE_PASS: "{{ CERTS_QUEUE_PASS }}"
QUEUE_AUTH_USER: "{{ CERTS_XQUEUE_AUTH_USER }}"
QUEUE_AUTH_PASS: "{{ CERTS_XQUEUE_AUTH_PASS }}"
CERT_KEY_ID: "{{ CERTS_KEY_ID }}"
CERT_AWS_ID: "{{ CERTS_AWS_ID }}"
CERT_AWS_KEY: "{{ CERTS_AWS_KEY }}"
......@@ -7,7 +7,12 @@
COMMON_ENABLE_BASIC_AUTH: False
COMMON_HTPASSWD_USER: edx
COMMON_HTPASSWD_PASS: edx
# Turn on syncing logs on rotation for edx
# application and tracking logs, must also
# have the AWS role installed
COMMON_AWS_SYNC: False
COMMON_AWS_SYNC_BUCKET: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}"
COMMON_AWS_S3_SYNC_SCRIPT: "{{ COMMON_BIN_DIR }}/send-logs-to-s3"
COMMON_BASE_DIR: /edx
COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var"
COMMON_APP_DIR: "{{ COMMON_BASE_DIR}}/app"
......@@ -24,6 +29,7 @@ COMMON_ENVIRONMENT: 'default_env'
COMMON_DEPLOYMENT: 'default_deployment'
COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple'
COMMON_NPM_MIRROR_URL: 'http://registry.npmjs.org'
COMMON_UBUNTU_APT_KEYSERVER: "http://keyserver.ubuntu.com/pks/lookup?op=get&fingerprint=on&search="
# do not include http/https
COMMON_GIT_MIRROR: 'github.com'
# override this var to set a different hostname
......@@ -38,6 +44,7 @@ COMMON_CUSTOM_DHCLIENT_CONFIG: false
COMMON_MOTD_TEMPLATE: "motd.tail.j2"
COMMON_SSH_PASSWORD_AUTH: "no"
COMMON_SECURITY_UPDATES: no
# These are three maintenance accounts across all databases
# the read only user is is granted select privs on all dbs
# the admin user is granted create user privs on all dbs
......@@ -69,6 +76,7 @@ common_debian_pkgs:
- mosh
- rsyslog
- screen
- tmux
- tree
- git
- unzip
......@@ -102,6 +110,10 @@ disable_edx_services: False
# so different start scripts are generated in dev mode.
devstack: False
# Some cluster apps need special settings when in vagrant
# due to eth0 always being the same IP address
vagrant_cluster: False
common_debian_variants:
- Ubuntu
- Debian
......
......@@ -2,5 +2,5 @@
dependencies:
- role: user
user_info: "{{ COMMON_USER_INFO }}"
- role: security
when: COMMON_SECURITY_UPDATES
---
- name: Update CA Certificates
shell: >
/usr/sbin/update-ca-certificates
- name: Add user www-data
# This is the default user for nginx
user: >
......
......@@ -57,6 +57,6 @@ request subnet-mask, broadcast-address, time-offset, routers,
#}
interface "eth0" {
prepend domain-search {% for search in COMMON_DHCLIENT_DNS_SEARCH -%}"{{search}}"{%- if not loop.last -%},{%- endif -%}
prepend domain-search {% for search in COMMON_DHCLIENT_DNS_SEARCH -%}"{{ search }}"{%- if not loop.last -%},{%- endif -%}
{%- endfor -%};
}
......@@ -11,4 +11,9 @@
postrotate
/usr/bin/killall -HUP rsyslogd
endscript
lastaction
{% if COMMON_AWS_SYNC -%}
{{ COMMON_AWS_S3_SYNC_SCRIPT }} -d "{{ COMMON_LOG_DIR }}/tracking/*" -b "{{ COMMON_AWS_SYNC_BUCKET }}/logs/tracking"
{% endif -%}
endscript
}
---
DATADOG_API_KEY: "SPECIFY_KEY_HERE"
datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52"
datadog_agent_version: '1:5.0.4-516'
datadog_apt_key: "0x226AE980C7A7DA52"
datadog_debian_pkgs:
- apparmor-utils
- build-essential
......
......@@ -22,17 +22,22 @@
- datadog
- name: add apt key
apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present
apt_key: id=C7A7DA52 url={{ COMMON_UBUNTU_APT_KEYSERVER }}{{ datadog_apt_key }} state=present
tags:
- datadog
- name: remove unstable apt repository
apt_repository_1.8: repo='deb http://apt.datadoghq.com/ unstable main' validate_certs=no state=absent
tags:
- datadog
- name: install apt repository
apt_repository_1.8: repo='deb http://apt.datadoghq.com/ unstable main' update_cache=yes validate_certs=no
apt_repository_1.8: repo='deb http://apt.datadoghq.com/ stable main' update_cache=yes validate_certs=no
tags:
- datadog
- name: install datadog agent
apt: pkg="datadog-agent"
apt: pkg="datadog-agent={{ datadog_agent_version }}"
tags:
- datadog
......
......@@ -30,6 +30,6 @@ demo_test_users:
password: edx
demo_edxapp_user: 'edxapp'
demo_edxapp_venv_bin: '{{COMMON_APP_DIR}}/{{demo_edxapp_user}}/venvs/{{demo_edxapp_user}}/bin'
demo_edxapp_course_data_dir: '{{COMMON_DATA_DIR}}/{{demo_edxapp_user}}/data'
demo_edxapp_code_dir: '{{COMMON_APP_DIR}}/{{demo_edxapp_user}}/edx-platform'
demo_edxapp_venv_bin: '{{ COMMON_APP_DIR }}/{{ demo_edxapp_user }}/venvs/{{demo_edxapp_user}}/bin'
demo_edxapp_course_data_dir: '{{ COMMON_DATA_DIR }}/{{ demo_edxapp_user }}/data'
demo_edxapp_code_dir: '{{ COMMON_APP_DIR }}/{{ demo_edxapp_user }}/edx-platform'
......@@ -11,7 +11,7 @@ DISCERN_MYSQL_PASSWORD: 'password'
DISCERN_MYSQL_HOST: 'localhost'
DISCERN_MYSQL_PORT: '3306'
DISCERN_LANG: "en_US.UTF-8"
DISCERN_GUNICORN_EXTRA: ""
discern_app_dir: "{{ COMMON_APP_DIR }}/discern"
discern_code_dir: "{{ discern_app_dir }}/discern"
......@@ -53,23 +53,23 @@ discern_env_config:
discern_auth_config:
AWS_ACCESS_KEY_ID: $DISCERN_AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $DISCERN_SECRET_ACCESS_KEY
BROKER_URL: $DISCERN_BROKER_URL
AWS_ACCESS_KEY_ID: "{{ DISCERN_AWS_ACCESS_KEY_ID }}"
AWS_SECRET_ACCESS_KEY: "{{ DISCERN_SECRET_ACCESS_KEY }}"
BROKER_URL: "{{ DISCERN_BROKER_URL }}"
CACHES:
default:
BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
LOCATION: $DISCERN_MEMCACHE
CELERY_RESULT_BACKEND: $DISCERN_RESULT_BACKEND
LOCATION: "{{ DISCERN_MEMCACHE }}"
CELERY_RESULT_BACKEND: "{{ DISCERN_RESULT_BACKEND }}"
DATABASES:
default:
ENGINE: django.db.backends.mysql
HOST: $DISCERN_MYSQL_HOST
NAME: $DISCERN_MYSQL_DB_NAME
PASSWORD: $DISCERN_MYSQL_PASSWORD
PORT: $DISCERN_MYSQL_PORT
USER: $DISCERN_MYSQL_USER
GOOGLE_ANALYTICS_PROPERTY_ID: $DISCERN_GOOGLE_ANALYTICS_PROPERTY_ID
HOST: "{{ DISCERN_MYSQL_HOST }}"
NAME: "{{ DISCERN_MYSQL_DB_NAME }}"
PASSWORD: "{{ DISCERN_MYSQL_PASSWORD }}"
PORT: "{{ DISCERN_MYSQL_PORT }}"
USER: "{{ DISCERN_MYSQL_USER }}"
GOOGLE_ANALYTICS_PROPERTY_ID: "{{ DISCERN_GOOGLE_ANALYTICS_PROPERTY_ID }}"
discern_debian_pkgs:
......
......@@ -51,7 +51,7 @@
#Numpy has to be a pre-requirement in order for scipy to build
- name : install python pre-requirements for discern and ease
pip: >
requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
requirements={{ item }} virtualenv={{ discern_venv_dir }} state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ discern_user }}"
notify:
......@@ -62,7 +62,7 @@
- name : install python requirements for discern and ease
pip: >
requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
requirements={{ item }} virtualenv={{ discern_venv_dir }} state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ discern_user }}"
notify:
......@@ -84,8 +84,8 @@
tar zxf {{ discern_nltk_tmp_file }}
rm -f {{ discern_nltk_tmp_file }}
touch {{ discern_nltk_download_url|basename }}-installed
creates={{ discern_data_dir }}/{{ discern_nltk_download_url|basename }}-installed
chdir={{ discern_data_dir }}
creates={{ discern_data_dir }}/{{ discern_nltk_download_url|basename }}-installed
chdir={{ discern_data_dir }}
sudo_user: "{{ discern_user }}"
notify:
- restart discern
......@@ -95,8 +95,8 @@
#support virtualenvs as of this comment
- name: django syncdb migrate and collectstatic for discern
shell: >
{{ discern_venv_dir }}/bin/python {{discern_code_dir}}/manage.py {{item}} --noinput --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
{{ discern_venv_dir }}/bin/python {{ discern_code_dir }}/manage.py {{ item }} --noinput --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}"
notify:
- restart discern
......@@ -107,8 +107,8 @@
#Have this separate from the other three because it doesn't take the noinput flag
- name: django update_index for discern
shell: >
{{ discern_venv_dir}}/bin/python {{discern_code_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
{{ discern_venv_dir}}/bin/python {{ discern_code_dir }}/manage.py update_index --settings={{ discern_settings }} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}"
notify:
- restart discern
......
......@@ -9,9 +9,9 @@ stop on runlevel [!2345]
respawn
respawn limit 3 30
env DJANGO_SETTINGS_MODULE={{discern_settings}}
env DJANGO_SETTINGS_MODULE={{ discern_settings }}
chdir {{ discern_code_dir }}
setuid {{discern_user}}
setuid {{ discern_user }}
exec {{ discern_venv_dir }}/bin/python {{ discern_code_dir }}/manage.py celeryd --loglevel=info --settings={{ discern_settings }} --pythonpath={{ discern_code_dir }} -B --autoscale={{ ansible_processor_cores * 2 }},1
[program:discern]
{% if ansible_processor|length > 0 %}
command={{ discern_venv_bin }}/gunicorn --preload -b {{ discern_gunicorn_host }}:{{ discern_gunicorn_port }} -w {{ ansible_processor|length * discern_worker_mult }} --timeout=30 --pythonpath={{ discern_code_dir }} discern.wsgi
command={{ discern_venv_bin }}/gunicorn --preload -b {{ discern_gunicorn_host }}:{{ discern_gunicorn_port }} -w {{ ansible_processor|length * discern_worker_mult }} --timeout=30 --pythonpath={{ discern_code_dir }} {{ DISCERN_GUNICORN_EXTRA }} discern.wsgi
{% else %}
command={{ discern_venv_bin }}/gunicorn --preload -b {{ discern_gunicorn_host }}:{{ discern_gunicorn_port }} -w {{ discern_worker_mult }} --timeout=30 --pythonpath={{ discern_code_dir }} discern.wsgi
command={{ discern_venv_bin }}/gunicorn --preload -b {{ discern_gunicorn_host }}:{{ discern_gunicorn_port }} -w {{ discern_worker_mult }} --timeout=30 --pythonpath={{ discern_code_dir }} {{ DISCERN_GUNICORN_EXTRA }} discern.wsgi
{% endif %}
user={{ common_web_user }}
directory={{ discern_code_dir }}
......
......@@ -12,7 +12,7 @@ IFS=","
-v add verbosity to edx_ansible run
-h this
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code edx-analytics-data-api
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code, edx-analytics-data-api
<version> - can be a commit or tag
EO
......@@ -51,6 +51,7 @@ repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuratio
repos_to_cmd["read-only-certificate-code"]="$edx_ansible_cmd certs.yml -e 'certs_version=$2'"
repos_to_cmd["edx-analytics-data-api"]="$edx_ansible_cmd analyticsapi.yml -e 'ANALYTICS_API_VERSION=$2'"
repos_to_cmd["edx-ora2"]="$edx_ansible_cmd ora2.yml -e 'ora2_version=$2'"
repos_to_cmd["insights"]="$edx_ansible_cmd insights.yml -e 'INSIGHTS_VERSION=$2'"
if [[ -z $1 || -z $2 ]]; then
......
......@@ -27,6 +27,9 @@ EDXAPP_LMS_BASE: ""
EDXAPP_PREVIEW_LMS_BASE: ""
EDXAPP_CMS_BASE: ""
EDXAPP_LMS_GUNICORN_EXTRA: ""
EDXAPP_CMS_GUNICORN_EXTRA: ""
# Set this to the maximum number
# of requests for gunicorn for the lms and cms
# gunicorn --max-requests <num>
......@@ -63,6 +66,9 @@ EDXAPP_MYSQL_REPLICA_PORT: "{{ EDXAPP_MYSQL_PORT }}"
EDXAPP_MYSQL_HOST: 'localhost'
EDXAPP_MYSQL_PORT: '3306'
EDXAPP_LMS_ENV: 'lms.envs.aws'
EDXAPP_CMS_ENV: 'cms.envs.aws'
EDXAPP_EMAIL_BACKEND: 'django.core.mail.backends.smtp.EmailBackend'
EDXAPP_EMAIL_HOST: 'localhost'
EDXAPP_EMAIL_PORT: 25
......@@ -105,7 +111,7 @@ EDXAPP_CAS_ATTRIBUTE_PACKAGE: ""
EDXAPP_ENABLE_AUTO_AUTH: false
# Settings for enabling and configuring third party authorization
EDXAPP_ENABLE_THIRD_PARTY_AUTH: false
EDXAPP_THIRD_PARTY_AUTH: "None"
EDXAPP_THIRD_PARTY_AUTH: {}
EDXAPP_MODULESTORE_MAPPINGS:
'preview\.': 'draft-preferred'
......@@ -114,12 +120,13 @@ EDXAPP_FEATURES:
AUTH_USE_OPENID_PROVIDER: true
CERTIFICATES_ENABLED: true
ENABLE_DISCUSSION_SERVICE: true
ENABLE_INSTRUCTOR_ANALYTICS: true
ENABLE_INSTRUCTOR_ANALYTICS: false
SUBDOMAIN_BRANDING: false
SUBDOMAIN_COURSE_LISTINGS: false
PREVIEW_LMS_BASE: "{{ EDXAPP_PREVIEW_LMS_BASE }}"
ENABLE_S3_GRADE_DOWNLOADS: true
USE_CUSTOM_THEME: $edxapp_use_custom_theme
ENABLE_MKTG_SITE: $EDXAPP_ENABLE_MKTG_SITE
AUTOMATIC_AUTH_FOR_TESTING: $EDXAPP_ENABLE_AUTO_AUTH
ENABLE_THIRD_PARTY_AUTH: $EDXAPP_ENABLE_THIRD_PARTY_AUTH
......@@ -147,6 +154,11 @@ EDXAPP_LMS_PREVIEW_NGINX_PORT: 18020
EDXAPP_CMS_NGINX_PORT: 18010
EDXAPP_CMS_SSL_NGINX_PORT: 48010
# NGINX Rate limiting related vars
EDXAPP_ENABLE_RATE_LIMITING: false
EDXAPP_COURSE_REQUEST_RATE: '5r/s'
EDXAPP_COURSE_REQUEST_BURST_RATE: 10
EDXAPP_LANG: 'en_US.UTF-8'
EDXAPP_LANGUAGE_CODE : 'en'
EDXAPP_TIME_ZONE: 'America/New_York'
......@@ -162,9 +174,16 @@ EDXAPP_UNIVERSITY_EMAIL: 'university@example.com'
EDXAPP_PRESS_EMAIL: 'press@example.com'
EDXAPP_PLATFORM_TWITTER_ACCOUNT: '@YourPlatformTwitterAccount'
EDXAPP_PLATFORM_FACEBOOK_ACCOUNT: 'http://www.facebook.com/YourPlatformFacebookAccount'
EDXAPP_PLATFORM_TWITTER_URL: "https://twitter.com/YourPlatformTwitterAccount"
EDXAPP_PLATFORM_MEETUP_URL: "http://www.meetup.com/YourMeetup"
EDXAPP_PLATFORM_LINKEDIN_URL: "http://www.linkedin.com/company/YourPlatform"
EDXAPP_PLATFORM_GOOGLE_PLUS_URL: "https://plus.google.com/YourGooglePlusAccount/"
EDXAPP_ENV_EXTRA: {}
EDXAPP_AUTH_EXTRA: {}
EDXAPP_LMS_AUTH_EXTRA: "{{ EDXAPP_AUTH_EXTRA }}"
EDXAPP_CMS_AUTH_EXTRA: "{{ EDXAPP_AUTH_EXTRA }}"
EDXAPP_ENABLE_MKTG_SITE: false
EDXAPP_MKTG_URL_LINK_MAP: {}
EDXAPP_MKTG_URLS: {}
# Set this sets the url for static files
......@@ -241,6 +260,7 @@ EDXAPP_PEARSON_TEST_PASSWORD: ""
EDXAPP_SEGMENT_IO_LMS: false
EDXAPP_SEGMENT_IO_LMS_KEY: ""
EDXAPP_OPTIMIZELY_PROJECT_ID: "None"
EDXAPP_TRACKING_SEGMENTIO_WEBHOOK_SECRET: ""
# For the CMS
EDXAPP_SEGMENT_IO_KEY: ""
EDXAPP_SEGMENT_IO: false
......@@ -266,22 +286,22 @@ EDXAPP_XML_FROM_GIT: false
EDXAPP_XML_S3_BUCKET: !!null
EDXAPP_XML_S3_KEY: !!null
EDXAPP_NEWRELIC_LMS_APPNAME: "{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}-edxapp-lms"
EDXAPP_NEWRELIC_CMS_APPNAME: "{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}-edxapp-cms"
EDXAPP_NEWRELIC_LMS_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-edxapp-lms"
EDXAPP_NEWRELIC_CMS_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-edxapp-cms"
EDXAPP_AWS_STORAGE_BUCKET_NAME: 'edxuploads'
EDXAPP_ORA2_FILE_PREFIX: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}/ora2'
EDXAPP_ORA2_FILE_PREFIX: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/ora2'
EDXAPP_FILE_UPLOAD_STORAGE_BUCKET_NAME: '{{ EDXAPP_AWS_STORAGE_BUCKET_NAME }}'
EDXAPP_FILE_UPLOAD_STORAGE_PREFIX: 'submissions_attachments'
EDXAPP_CODE_JAIL_LIMITS:
# Limit the memory of the jailed process to something high but not
# infinite (128MiB in bytes)
VMEM: 134217728
# infinite (512MiB in bytes)
VMEM: 536870912
# Time in seconds that the jailed process has to run.
REALTIME: 1
REALTIME: 3
# Needs to be non-zero so that jailed code can use it as their temp directory.(1MiB in bytes)
FSIZE: 1048576
......@@ -295,6 +315,44 @@ EDXAPP_SUBDOMAIN_BRANDING: {}
EDXAPP_WORKERS: !!null
EDXAPP_ANALYTICS_DATA_TOKEN: ""
EDXAPP_ANALYTICS_DATA_URL: ""
# Dashboard URL, assumes that the insights role is installed locally
EDXAPP_ANALYTICS_DASHBOARD_URL: "http://localhost:18110/courses"
EDXAPP_REGISTRATION_EXTRA_FIELDS:
level_of_education: "optional"
gender: "optional"
year_of_birth: "optional"
mailing_address: "optional"
goals: "optional"
honor_code: "required"
city: "hidden"
country: "hidden"
EDXAPP_CELERY_WORKERS:
- queue: low
service_variant: cms
concurrency: 3
- queue: default
service_variant: cms
concurrency: 4
- queue: high
service_variant: cms
concurrency: 1
- queue: low
service_variant: lms
concurrency: 1
- queue: default
service_variant: lms
concurrency: 3
- queue: high
service_variant: lms
concurrency: 4
- queue: high_mem
service_variant: lms
concurrency: 2
EDXAPP_DEFAULT_CACHE_VERSION: "1"
#-------- Everything below this line is internal to the role ------------
#Use YAML references (& and *) and hash merge <<: to factor out shared settings
......@@ -326,28 +384,7 @@ edxapp_git_ssh: "/tmp/edxapp_git_ssh.sh"
# TODO: This can be removed once VPC-122 is resolved
edxapp_legacy_course_data_dir: "{{ edxapp_app_dir }}/data"
edxapp_workers:
- queue: low
service_variant: cms
concurrency: 3
- queue: default
service_variant: cms
concurrency: 4
- queue: high
service_variant: cms
concurrency: 1
- queue: low
service_variant: lms
concurrency: 1
- queue: default
service_variant: lms
concurrency: 3
- queue: high
service_variant: lms
concurrency: 4
- queue: high_mem
service_variant: lms
concurrency: 2
edxapp_workers: "{{ EDXAPP_CELERY_WORKERS }}"
# setup for python codejail
edxapp_sandbox_venv_dir: '{{ edxapp_venvs_dir }}/edxapp-sandbox'
......@@ -384,30 +421,30 @@ edxapp_all_req_files:
# for lists and dictionaries
edxapp_environment:
LANG: $EDXAPP_LANG
NO_PREREQ_INSTALL: $EDXAPP_NO_PREREQ_INSTALL
LANG: "{{ EDXAPP_LANG }}"
NO_PREREQ_INSTALL: "{{ EDXAPP_NO_PREREQ_INSTALL }}"
SKIP_WS_MIGRATIONS: 1
RBENV_ROOT: $edxapp_rbenv_root
GEM_HOME: $edxapp_gem_root
GEM_PATH: $edxapp_gem_root
PATH: $edxapp_deploy_path
RBENV_ROOT: "{{ edxapp_rbenv_root }}"
GEM_HOME: "{{ edxapp_gem_root }}"
GEM_PATH: "{{ edxapp_gem_root }}"
PATH: "{{ edxapp_deploy_path }}"
edxapp_generic_auth_config: &edxapp_generic_auth
ANALYTICS_DATA_TOKEN: $EDXAPP_ANALYTICS_DATA_TOKEN
AWS_ACCESS_KEY_ID: $EDXAPP_AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $EDXAPP_AWS_SECRET_ACCESS_KEY
SECRET_KEY: $EDXAPP_EDXAPP_SECRET_KEY
ANALYTICS_DATA_TOKEN: "{{ EDXAPP_ANALYTICS_DATA_TOKEN }}"
AWS_ACCESS_KEY_ID: "{{ EDXAPP_AWS_ACCESS_KEY_ID }}"
AWS_SECRET_ACCESS_KEY: "{{ EDXAPP_AWS_SECRET_ACCESS_KEY }}"
SECRET_KEY: "{{ EDXAPP_EDXAPP_SECRET_KEY }}"
XQUEUE_INTERFACE:
basic_auth: $EDXAPP_XQUEUE_BASIC_AUTH
django_auth: $EDXAPP_XQUEUE_DJANGO_AUTH
url: $EDXAPP_XQUEUE_URL
basic_auth: "{{ EDXAPP_XQUEUE_BASIC_AUTH }}"
django_auth: "{{ EDXAPP_XQUEUE_DJANGO_AUTH }}"
url: "{{ EDXAPP_XQUEUE_URL }}"
DOC_STORE_CONFIG: &edxapp_generic_default_docstore
db: $EDXAPP_MONGO_DB_NAME
host: $EDXAPP_MONGO_HOSTS
password: $EDXAPP_MONGO_PASSWORD
db: "{{ EDXAPP_MONGO_DB_NAME }}"
host: "{{ EDXAPP_MONGO_HOSTS }}"
password: "{{ EDXAPP_MONGO_PASSWORD }}"
port: $EDXAPP_MONGO_PORT
user: $EDXAPP_MONGO_USER
user: "{{ EDXAPP_MONGO_USER }}"
collection: 'modulestore'
CONTENTSTORE:
ENGINE: 'xmodule.contentstore.mongo.MongoContentStore'
......@@ -416,18 +453,18 @@ edxapp_generic_auth_config: &edxapp_generic_auth
# backward compatibility
#
OPTIONS:
db: $EDXAPP_MONGO_DB_NAME
host: $EDXAPP_MONGO_HOSTS
password: $EDXAPP_MONGO_PASSWORD
db: "{{ EDXAPP_MONGO_DB_NAME }}"
host: "{{ EDXAPP_MONGO_HOSTS }}"
password: "{{ EDXAPP_MONGO_PASSWORD }}"
port: $EDXAPP_MONGO_PORT
user: $EDXAPP_MONGO_USER
ADDITIONAL_OPTIONS: $EDXAPP_CONTENTSTORE_ADDITIONAL_OPTS
user: "{{ EDXAPP_MONGO_USER }}"
ADDITIONAL_OPTIONS: "{{ EDXAPP_CONTENTSTORE_ADDITIONAL_OPTS }}"
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
MODULESTORE:
default:
ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore'
OPTIONS:
mappings: $EDXAPP_XML_MAPPINGS
mappings: "{{ EDXAPP_XML_MAPPINGS }}"
stores:
- &edxapp_generic_draft_modulestore
NAME: 'draft'
......@@ -435,13 +472,13 @@ edxapp_generic_auth_config: &edxapp_generic_auth
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
OPTIONS:
default_class: 'xmodule.hidden_module.HiddenDescriptor'
fs_root: $edxapp_course_data_dir
fs_root: "{{ edxapp_course_data_dir }}"
render_template: 'edxmako.shortcuts.render_to_string'
- &edxapp_generic_xml_modulestore
NAME: 'xml'
ENGINE: 'xmodule.modulestore.xml.XMLModuleStore'
OPTIONS:
data_dir: $edxapp_course_data_dir
data_dir: "{{ edxapp_course_data_dir }}"
default_class: 'xmodule.hidden_module.HiddenDescriptor'
- &edxapp_generic_split_modulestore
NAME: 'split'
......@@ -449,86 +486,96 @@ edxapp_generic_auth_config: &edxapp_generic_auth
DOC_STORE_CONFIG: *edxapp_generic_default_docstore
OPTIONS:
default_class: 'xmodule.hidden_module.HiddenDescriptor'
fs_root: $edxapp_course_data_dir
fs_root: "{{ edxapp_course_data_dir }}"
render_template: 'edxmako.shortcuts.render_to_string'
DATABASES:
read_replica:
ENGINE: 'django.db.backends.mysql'
NAME: $EDXAPP_MYSQL_REPLICA_DB_NAME
USER: $EDXAPP_MYSQL_REPLICA_USER
PASSWORD: $EDXAPP_MYSQL_REPLICA_PASSWORD
HOST: $EDXAPP_MYSQL_REPLICA_HOST
PORT: $EDXAPP_MYSQL_REPLICA_PORT
NAME: "{{ EDXAPP_MYSQL_REPLICA_DB_NAME }}"
USER: "{{ EDXAPP_MYSQL_REPLICA_USER }}"
PASSWORD: "{{ EDXAPP_MYSQL_REPLICA_PASSWORD }}"
HOST: "{{ EDXAPP_MYSQL_REPLICA_HOST }}"
PORT: "{{ EDXAPP_MYSQL_REPLICA_PORT }}"
default:
ENGINE: 'django.db.backends.mysql'
NAME: $EDXAPP_MYSQL_DB_NAME
USER: $EDXAPP_MYSQL_USER
PASSWORD: $EDXAPP_MYSQL_PASSWORD
HOST: $EDXAPP_MYSQL_HOST
PORT: $EDXAPP_MYSQL_PORT
NAME: "{{ EDXAPP_MYSQL_DB_NAME }}"
USER: "{{ EDXAPP_MYSQL_USER }}"
PASSWORD: "{{ EDXAPP_MYSQL_PASSWORD }}"
HOST: "{{ EDXAPP_MYSQL_HOST }}"
PORT: "{{ EDXAPP_MYSQL_PORT }}"
OPEN_ENDED_GRADING_INTERFACE:
url: $EDXAPP_OEE_URL
password: $EDXAPP_OEE_PASSWORD
url: "{{ EDXAPP_OEE_URL }}"
password: "{{ EDXAPP_OEE_PASSWORD }}"
peer_grading: 'peer_grading'
staff_grading: 'staff_grading'
grading_controller: 'grading_controller'
username: $EDXAPP_OEE_USER
ANALYTICS_API_KEY: $EDXAPP_ANALYTICS_API_KEY
EMAIL_HOST_USER: $EDXAPP_EMAIL_HOST_USER
EMAIL_HOST_PASSWORD: $EDXAPP_EMAIL_HOST_PASSWORD
ZENDESK_USER: $EDXAPP_ZENDESK_USER
ZENDESK_API_KEY: $EDXAPP_ZENDESK_API_KEY
CELERY_BROKER_USER: $EDXAPP_CELERY_USER
CELERY_BROKER_PASSWORD: $EDXAPP_CELERY_PASSWORD
GOOGLE_ANALYTICS_ACCOUNT: $EDXAPP_GOOGLE_ANALYTICS_ACCOUNT
THIRD_PARTY_AUTH: $EDXAPP_THIRD_PARTY_AUTH
username: "{{ EDXAPP_OEE_USER }}"
ANALYTICS_API_KEY: "{{ EDXAPP_ANALYTICS_API_KEY }}"
EMAIL_HOST_USER: "{{ EDXAPP_EMAIL_HOST_USER }}"
EMAIL_HOST_PASSWORD: "{{ EDXAPP_EMAIL_HOST_PASSWORD }}"
ZENDESK_USER: "{{ EDXAPP_ZENDESK_USER }}"
ZENDESK_API_KEY: "{{ EDXAPP_ZENDESK_API_KEY }}"
CELERY_BROKER_USER: "{{ EDXAPP_CELERY_USER }}"
CELERY_BROKER_PASSWORD: "{{ EDXAPP_CELERY_PASSWORD }}"
GOOGLE_ANALYTICS_ACCOUNT: "{{ EDXAPP_GOOGLE_ANALYTICS_ACCOUNT }}"
THIRD_PARTY_AUTH: "{{ EDXAPP_THIRD_PARTY_AUTH }}"
AWS_STORAGE_BUCKET_NAME: "{{ EDXAPP_AWS_STORAGE_BUCKET_NAME }}"
DJFS: $EDXAPP_DJFS
generic_cache_config: &default_generic_cache
BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
KEY_FUNCTION: 'util.memcache.safe_key'
KEY_PREFIX: 'default'
LOCATION: "{{ EDXAPP_MEMCACHE }}"
generic_env_config: &edxapp_generic_env
ANALYTICS_DATA_URL: $EDXAPP_ANALYTICS_DATA_URL
CELERY_BROKER_VHOST: $EDXAPP_CELERY_BROKER_VHOST
PAYMENT_SUPPORT_EMAIL: $EDXAPP_PAYMENT_SUPPORT_EMAIL
ZENDESK_URL: $EDXAPP_ZENDESK_URL
COURSES_WITH_UNSAFE_CODE: $EDXAPP_COURSES_WITH_UNSAFE_CODE
OAUTH_OIDC_ISSUER: "https://{{ EDXAPP_LMS_BASE }}/oauth2"
XBLOCK_FS_STORAGE_BUCKET: "{{ EDXAPP_XBLOCK_FS_STORAGE_BUCKET }}"
XBLOCK_FS_STORAGE_PREFIX: "{{ EDXAPP_XBLOCK_FS_STORAGE_PREFIX }}"
ANALYTICS_DATA_URL: "{{ EDXAPP_ANALYTICS_DATA_URL }}"
ANALYTICS_DASHBOARD_URL: '{{ EDXAPP_ANALYTICS_DASHBOARD_URL }}'
CELERY_BROKER_VHOST: "{{ EDXAPP_CELERY_BROKER_VHOST }}"
PAYMENT_SUPPORT_EMAIL: "{{ EDXAPP_PAYMENT_SUPPORT_EMAIL }}"
ZENDESK_URL: "{{ EDXAPP_ZENDESK_URL }}"
COURSES_WITH_UNSAFE_CODE: "{{ EDXAPP_COURSES_WITH_UNSAFE_CODE }}"
BULK_EMAIL_EMAILS_PER_TASK: $EDXAPP_BULK_EMAIL_EMAILS_PER_TASK
MICROSITE_ROOT_DIR: $EDXAPP_MICROSITE_ROOT_DIR
MICROSITE_ROOT_DIR: "{{ EDXAPP_MICROSITE_ROOT_DIR }}"
MICROSITE_CONFIGURATION: $EDXAPP_MICROSITE_CONFIGURATION
GRADES_DOWNLOAD:
STORAGE_TYPE: $EDXAPP_GRADE_STORAGE_TYPE
BUCKET: $EDXAPP_GRADE_BUCKET
ROOT_PATH: $EDXAPP_GRADE_ROOT_PATH
STATIC_URL_BASE: $EDXAPP_STATIC_URL_BASE
STATIC_ROOT_BASE: $edxapp_staticfile_dir
LMS_BASE: $EDXAPP_LMS_BASE
CMS_BASE: $EDXAPP_CMS_BASE
BOOK_URL: $EDXAPP_BOOK_URL
PLATFORM_NAME: $EDXAPP_PLATFORM_NAME
STORAGE_TYPE: "{{ EDXAPP_GRADE_STORAGE_TYPE }}"
BUCKET: "{{ EDXAPP_GRADE_BUCKET }}"
ROOT_PATH: "{{ EDXAPP_GRADE_ROOT_PATH }}"
STATIC_URL_BASE: "{{ EDXAPP_STATIC_URL_BASE }}"
STATIC_ROOT_BASE: "{{ edxapp_staticfile_dir }}"
LMS_BASE: "{{ EDXAPP_LMS_BASE }}"
CMS_BASE: "{{ EDXAPP_CMS_BASE }}"
BOOK_URL: "{{ EDXAPP_BOOK_URL }}"
PLATFORM_NAME: "{{ EDXAPP_PLATFORM_NAME }}"
CERT_QUEUE: 'certificates'
LOCAL_LOGLEVEL: $EDXAPP_LOG_LEVEL
LOCAL_LOGLEVEL: "{{ EDXAPP_LOG_LEVEL }}"
# default email backed set to local SMTP
EMAIL_BACKEND: $EDXAPP_EMAIL_BACKEND
EMAIL_HOST: $EDXAPP_EMAIL_HOST
EMAIL_BACKEND: "{{ EDXAPP_EMAIL_BACKEND }}"
EMAIL_HOST: "{{ EDXAPP_EMAIL_HOST }}"
EMAIL_PORT: $EDXAPP_EMAIL_PORT
EMAIL_USE_TLS: $EDXAPP_EMAIL_USE_TLS
FEATURES: $EDXAPP_FEATURES
FEATURES: "{{ EDXAPP_FEATURES }}"
WIKI_ENABLED: true
SYSLOG_SERVER: $EDXAPP_SYSLOG_SERVER
SYSLOG_SERVER: "{{ EDXAPP_SYSLOG_SERVER }}"
LOG_DIR: "{{ COMMON_DATA_DIR }}/logs/edx"
MEDIA_URL: $EDXAPP_MEDIA_URL
ANALYTICS_SERVER_URL: $EDXAPP_ANALYTICS_SERVER_URL
FEEDBACK_SUBMISSION_EMAIL: $EDXAPP_FEEDBACK_SUBMISSION_EMAIL
TIME_ZONE: $EDXAPP_TIME_ZONE
LANGUAGE_CODE : $EDXAPP_LANGUAGE_CODE
MKTG_URL_LINK_MAP: $EDXAPP_MKTG_URL_LINK_MAP
MKTG_URLS: $EDXAPP_MKTG_URLS
MEDIA_URL: "{{ EDXAPP_MEDIA_URL }}"
ANALYTICS_SERVER_URL: "{{ EDXAPP_ANALYTICS_SERVER_URL }}"
FEEDBACK_SUBMISSION_EMAIL: "{{ EDXAPP_FEEDBACK_SUBMISSION_EMAIL }}"
TIME_ZONE: "{{ EDXAPP_TIME_ZONE }}"
LANGUAGE_CODE: "{{ EDXAPP_LANGUAGE_CODE }}"
MKTG_URL_LINK_MAP: "{{ EDXAPP_MKTG_URL_LINK_MAP }}"
MKTG_URLS: "{{ EDXAPP_MKTG_URLS }}"
# repo root for courses
GITHUB_REPO_ROOT: $edxapp_course_data_dir
GITHUB_REPO_ROOT: "{{ edxapp_course_data_dir }}"
CACHES:
default: &default_generic_cache
BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
KEY_FUNCTION: 'util.memcache.safe_key'
default:
<<: *default_generic_cache
KEY_PREFIX: 'default'
LOCATION: $EDXAPP_MEMCACHE
VERSION: "{{ EDXAPP_DEFAULT_CACHE_VERSION }}"
general:
<<: *default_generic_cache
KEY_PREFIX: 'general'
......@@ -544,67 +591,72 @@ generic_env_config: &edxapp_generic_env
KEY_PREFIX: 'celery'
TIMEOUT: "7200"
CELERY_BROKER_TRANSPORT: 'amqp'
CELERY_BROKER_HOSTNAME: $EDXAPP_RABBIT_HOSTNAME
COMMENTS_SERVICE_URL: $EDXAPP_COMMENTS_SERVICE_URL
LOGGING_ENV: $EDXAPP_LOGGING_ENV
SESSION_COOKIE_DOMAIN: $EDXAPP_SESSION_COOKIE_DOMAIN
SESSION_COOKIE_NAME: $EDXAPP_SESSION_COOKIE_NAME
COMMENTS_SERVICE_KEY: $EDXAPP_COMMENTS_SERVICE_KEY
CELERY_BROKER_HOSTNAME: "{{ EDXAPP_RABBIT_HOSTNAME }}"
COMMENTS_SERVICE_URL: "{{ EDXAPP_COMMENTS_SERVICE_URL }}"
LOGGING_ENV: "{{ EDXAPP_LOGGING_ENV }}"
SESSION_COOKIE_DOMAIN: "{{ EDXAPP_SESSION_COOKIE_DOMAIN }}"
SESSION_COOKIE_NAME: "{{ EDXAPP_SESSION_COOKIE_NAME }}"
COMMENTS_SERVICE_KEY: "{{ EDXAPP_COMMENTS_SERVICE_KEY }}"
SEGMENT_IO_LMS: $EDXAPP_SEGMENT_IO_LMS
SEGMENT_IO: $EDXAPP_SEGMENT_IO
THEME_NAME: $edxapp_theme_name
TECH_SUPPORT_EMAIL: $EDXAPP_TECH_SUPPORT_EMAIL
CONTACT_EMAIL: $EDXAPP_CONTACT_EMAIL
BUGS_EMAIL: $EDXAPP_BUGS_EMAIL
DEFAULT_FROM_EMAIL: $EDXAPP_DEFAULT_FROM_EMAIL
DEFAULT_FEEDBACK_EMAIL: $EDXAPP_DEFAULT_FEEDBACK_EMAIL
SERVER_EMAIL: $EDXAPP_DEFAULT_SERVER_EMAIL
BULK_EMAIL_DEFAULT_FROM_EMAIL: $EDXAPP_BULK_EMAIL_DEFAULT_FROM_EMAIL
CAS_SERVER_URL: $EDXAPP_CAS_SERVER_URL
CAS_EXTRA_LOGIN_PARAMS: $EDXAPP_CAS_EXTRA_LOGIN_PARAMS
CAS_ATTRIBUTE_CALLBACK: $EDXAPP_CAS_ATTRIBUTE_CALLBACK
THEME_NAME: "{{ edxapp_theme_name }}"
TECH_SUPPORT_EMAIL: "{{ EDXAPP_TECH_SUPPORT_EMAIL }}"
CONTACT_EMAIL: "{{ EDXAPP_CONTACT_EMAIL }}"
BUGS_EMAIL: "{{ EDXAPP_BUGS_EMAIL }}"
DEFAULT_FROM_EMAIL: "{{ EDXAPP_DEFAULT_FROM_EMAIL }}"
DEFAULT_FEEDBACK_EMAIL: "{{ EDXAPP_DEFAULT_FEEDBACK_EMAIL }}"
SERVER_EMAIL: "{{ EDXAPP_DEFAULT_SERVER_EMAIL }}"
BULK_EMAIL_DEFAULT_FROM_EMAIL: "{{ EDXAPP_BULK_EMAIL_DEFAULT_FROM_EMAIL }}"
CAS_SERVER_URL: "{{ EDXAPP_CAS_SERVER_URL }}"
CAS_EXTRA_LOGIN_PARAMS: "{{ EDXAPP_CAS_EXTRA_LOGIN_PARAMS }}"
CAS_ATTRIBUTE_CALLBACK: "{{ EDXAPP_CAS_ATTRIBUTE_CALLBACK }}"
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS: "{{ EDXAPP_MODULESTORE_MAPPINGS }}"
UNIVERSITY_EMAIL: $EDXAPP_UNIVERSITY_EMAIL
PRESS_EMAIL: $EDXAPP_PRESS_EMAIL
PLATFORM_TWITTER_ACCOUNT: $EDXAPP_PLATFORM_TWITTER_ACCOUNT
PLATFORM_FACEBOOK_ACCOUNT: $EDXAPP_PLATFORM_FACEBOOK_ACCOUNT
ORA2_FILE_PREFIX: $EDXAPP_ORA2_FILE_PREFIX
FILE_UPLOAD_STORAGE_BUCKET_NAME: $EDXAPP_FILE_UPLOAD_STORAGE_BUCKET_NAME
FILE_UPLOAD_STORAGE_PREFIX: $EDXAPP_FILE_UPLOAD_STORAGE_PREFIX
VIRTUAL_UNIVERSITIES: $EDXAPP_VIRTUAL_UNIVERSITIES
SUBDOMAIN_BRANDING: $EDXAPP_SUBDOMAIN_BRANDING
UNIVERSITY_EMAIL: "{{ EDXAPP_UNIVERSITY_EMAIL }}"
PRESS_EMAIL: "{{ EDXAPP_PRESS_EMAIL }}"
PLATFORM_TWITTER_ACCOUNT: "{{ EDXAPP_PLATFORM_TWITTER_ACCOUNT }}"
PLATFORM_FACEBOOK_ACCOUNT: "{{ EDXAPP_PLATFORM_FACEBOOK_ACCOUNT }}"
PLATFORM_TWITTER_URL: "{{ EDXAPP_PLATFORM_TWITTER_URL }}"
PLATFORM_MEETUP_URL: "{{ EDXAPP_PLATFORM_MEETUP_URL }}"
PLATFORM_LINKEDIN_URL: "{{ EDXAPP_PLATFORM_LINKEDIN_URL }}"
PLATFORM_GOOGLE_PLUS_URL: "{{ EDXAPP_PLATFORM_GOOGLE_PLUS_URL }}"
ORA2_FILE_PREFIX: "{{ EDXAPP_ORA2_FILE_PREFIX }}"
FILE_UPLOAD_STORAGE_BUCKET_NAME: "{{ EDXAPP_FILE_UPLOAD_STORAGE_BUCKET_NAME }}"
FILE_UPLOAD_STORAGE_PREFIX: "{{ EDXAPP_FILE_UPLOAD_STORAGE_PREFIX }}"
VIRTUAL_UNIVERSITIES: "{{ EDXAPP_VIRTUAL_UNIVERSITIES }}"
SUBDOMAIN_BRANDING: "{{ EDXAPP_SUBDOMAIN_BRANDING }}"
REGISTRATION_EXTRA_FIELDS: "{{ EDXAPP_REGISTRATION_EXTRA_FIELDS }}"
lms_auth_config:
<<: *edxapp_generic_auth
PEARSON_TEST_PASSWORD: $EDXAPP_PEARSON_TEST_PASSWORD
SEGMENT_IO_LMS_KEY: $EDXAPP_SEGMENT_IO_LMS_KEY
OPTIMIZELY_PROJECT_ID: $EDXAPP_OPTIMIZELY_PROJECT_ID
EDX_API_KEY: $EDXAPP_EDX_API_KEY
VERIFY_STUDENT: $EDXAPP_VERIFY_STUDENT
GOOGLE_ANALYTICS_LINKEDIN: $EDXAPP_GOOGLE_ANALYTICS_LINKEDIN
CC_PROCESSOR_NAME: $EDXAPP_CC_PROCESSOR_NAME
CC_PROCESSOR: $EDXAPP_CC_PROCESSOR
PEARSON_TEST_PASSWORD: "{{ EDXAPP_PEARSON_TEST_PASSWORD }}"
SEGMENT_IO_LMS_KEY: "{{ EDXAPP_SEGMENT_IO_LMS_KEY }}"
OPTIMIZELY_PROJECT_ID: "{{ EDXAPP_OPTIMIZELY_PROJECT_ID }}"
EDX_API_KEY: "{{ EDXAPP_EDX_API_KEY }}"
VERIFY_STUDENT: "{{ EDXAPP_VERIFY_STUDENT }}"
GOOGLE_ANALYTICS_LINKEDIN: "{{ EDXAPP_GOOGLE_ANALYTICS_LINKEDIN }}"
CC_PROCESSOR_NAME: "{{ EDXAPP_CC_PROCESSOR_NAME }}"
CC_PROCESSOR: "{{ EDXAPP_CC_PROCESSOR }}"
TRACKING_SEGMENTIO_WEBHOOK_SECRET: "{{ EDXAPP_TRACKING_SEGMENTIO_WEBHOOK_SECRET }}"
lms_env_config:
<<: *edxapp_generic_env
PAID_COURSE_REGISTRATION_CURRENCY: $EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY
SITE_NAME: $EDXAPP_LMS_SITE_NAME
VIDEO_CDN_URL: $EDXAPP_VIDEO_CDN_URLS
SITE_NAME: "{{ EDXAPP_LMS_SITE_NAME }}"
VIDEO_CDN_URL: "{{ EDXAPP_VIDEO_CDN_URLS }}"
CODE_JAIL:
# from https://github.com/edx/codejail/blob/master/codejail/django_integration.py#L24, '' should be same as None
python_bin: '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}'
limits: $EDXAPP_CODE_JAIL_LIMITS
limits: "{{ EDXAPP_CODE_JAIL_LIMITS }}"
user: '{{ edxapp_sandbox_user }}'
cms_auth_config:
<<: *edxapp_generic_auth
SEGMENT_IO_KEY: $EDXAPP_SEGMENT_IO_KEY
SEGMENT_IO_KEY: "{{ EDXAPP_SEGMENT_IO_KEY }}"
MODULESTORE:
default:
ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore'
OPTIONS:
# See commented section below. LMS-11258
# mappings: $EDXAPP_XML_MAPPINGS
# mappings: "{{ EDXAPP_XML_MAPPINGS }}"
mappings: {}
stores:
- *edxapp_generic_draft_modulestore
......@@ -613,7 +665,7 @@ cms_auth_config:
- *edxapp_generic_split_modulestore
cms_env_config:
<<: *edxapp_generic_env
SITE_NAME: $EDXAPP_CMS_SITE_NAME
SITE_NAME: "{{ EDXAPP_CMS_SITE_NAME }}"
# install dir for the edx-platform repo
edxapp_code_dir: "{{ edxapp_app_dir }}/edx-platform"
......@@ -635,9 +687,6 @@ service_variants_enabled:
- lms
- cms
edxapp_lms_env: 'lms.envs.aws'
edxapp_cms_env: 'cms.envs.aws'
#Number of gunicorn worker processes to spawn, as a multiplier to number of virtual cores
worker_core_mult:
lms: 4
......
......@@ -27,11 +27,11 @@
when: EDXAPP_USE_GIT_IDENTITY
# Do A Checkout
- name: checkout edx-platform repo into {{edxapp_code_dir}}
- name: checkout edx-platform repo into {{ edxapp_code_dir }}
git: >
dest={{edxapp_code_dir}}
repo={{edx_platform_repo}}
version={{edx_platform_version}}
dest={{ edxapp_code_dir }}
repo={{ edx_platform_repo }}
version={{ edx_platform_version }}
accept_hostkey=yes
sudo_user: "{{ edxapp_user }}"
environment:
......@@ -42,7 +42,7 @@
- "restart edxapp_workers"
- name: git clean after checking out edx-platform
shell: cd {{edxapp_code_dir}} && git clean -xdf
shell: cd {{ edxapp_code_dir }} && git clean -xdf
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
......@@ -50,9 +50,9 @@
- name: checkout theme
git: >
dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}}
repo={{edxapp_theme_source_repo}}
version={{edxapp_theme_version}}
dest={{ edxapp_app_dir }}/themes/{{ edxapp_theme_name }}
repo={{ edxapp_theme_source_repo }}
version={{ edxapp_theme_version }}
accept_hostkey=yes
when: edxapp_theme_name != ''
sudo_user: "{{ edxapp_user }}"
......@@ -91,8 +91,8 @@
- name: gem install bundler
shell: >
gem install bundle
chdir={{ edxapp_code_dir }}
executable=/bin/bash
chdir={{ edxapp_code_dir }}
executable=/bin/bash
environment: "{{ edxapp_environment }}"
sudo_user: "{{ edxapp_user }}"
notify:
......@@ -102,8 +102,8 @@
- name: bundle install
shell: >
bundle install --binstubs
chdir={{ edxapp_code_dir }}
executable=/bin/bash
chdir={{ edxapp_code_dir }}
executable=/bin/bash
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
notify:
......@@ -144,8 +144,8 @@
# Install the python pre requirements into {{ edxapp_venv_dir }}
- name : install python pre-requirements
pip: >
requirements="{{pre_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}"
requirements="{{ pre_requirements_file }}"
virtualenv="{{ edxapp_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}"
......@@ -173,8 +173,8 @@
# Install the python post requirements into {{ edxapp_venv_dir }}
- name : install python post-requirements
pip: >
requirements="{{post_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}"
requirements="{{ post_requirements_file }}"
virtualenv="{{ edxapp_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}"
......@@ -187,8 +187,8 @@
# Install the python paver requirements into {{ edxapp_venv_dir }}
- name : install python paver-requirements
pip: >
requirements="{{paver_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}"
requirements="{{ paver_requirements_file }}"
virtualenv="{{ edxapp_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}"
......@@ -257,7 +257,7 @@
- name: install CAS attribute module
pip: >
name="{{ EDXAPP_CAS_ATTRIBUTE_PACKAGE }}"
virtualenv="{{edxapp_venv_dir}}"
virtualenv="{{ edxapp_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors"
sudo_user: "{{ edxapp_user }}"
......@@ -294,8 +294,8 @@
- name: code sandbox | Install base sandbox requirements and create sandbox virtualenv
pip: >
requirements="{{sandbox_base_requirements}}"
virtualenv="{{edxapp_sandbox_venv_dir}}"
requirements="{{ sandbox_base_requirements }}"
virtualenv="{{ edxapp_sandbox_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors"
sudo_user: "{{ edxapp_sandbox_user }}"
......
......@@ -26,6 +26,7 @@
- "{{ edxapp_theme_dir }}"
- "{{ edxapp_staticfile_dir }}"
- "{{ edxapp_course_static_dir }}"
- "{{ edxapp_course_data_dir }}"
# This is a symlink that has to exist because
# we currently can't override the DATA_DIR var
......@@ -38,7 +39,6 @@
state=link
owner="{{ edxapp_user }}"
group="{{ common_web_group }}"
- name: create edxapp log dir
file: >
......@@ -70,6 +70,12 @@
- "restart edxapp"
- "restart edxapp_workers"
- name: set up edxapp .npmrc
template:
src=.npmrc.j2 dest={{ edxapp_app_dir }}/.npmrc
owner={{ edxapp_user }} group={{ common_web_group }}
mode=0600
- name: create log directories for service variants
notify:
- "restart edxapp"
......@@ -81,7 +87,7 @@
with_items: service_variants_enabled
# Set up the python sandbox execution environment
- include: python_sandbox_env.yml
- include: python_sandbox_env.yml tags=deploy
when: EDXAPP_PYTHON_SANDBOX
- include: deploy.yml tags=deploy
# Set the alternatives this way for blas and lapack to work correctly for the
# MITx 6.341x course.
# TODO: Switch to using alternatives module in 1.6
- name: code sandbox | Use libblas for 3gf
command: update-alternatives --set libblas.so.3gf /usr/lib/libblas/libblas.so.3gf
# TODO: Switch to using alternatives module in 1.6
- name: code sandbox | Use liblapac for 3gf
command: update-alternatives --set liblapack.so.3gf /usr/lib/lapack/liblapack.so.3gf
- name: code sandbox | Create edxapp sandbox user
user: name={{ edxapp_sandbox_user }} shell=/bin/false home={{ edxapp_sandbox_venv_dir }}
notify:
......
- name: make the course data updatable by the edxapp user
file:
path="{{ edxapp_course_data_dir }}"
state=directory
recurse=yes
owner="{{ edxapp_user }}"
group="{{ edxapp_user }}"
- name: clone the xml course repo
git: >
repo="{{ item.repo_url }}"
......
registry={{ COMMON_NPM_MIRROR_URL }}
{% if devstack %}
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/usr/bin/find /tmp/codejail-*/tmp -mindepth 1 -maxdepth 1 -exec rm -rf {} ;
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{% else %}
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/usr/bin/find /tmp/codejail-*/tmp -mindepth 1 -maxdepth 1 -exec rm -rf {} ;
{{ common_web_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ common_web_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{% endif %}
{% do cms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{% do cms_auth_config.update(EDXAPP_CMS_AUTH_EXTRA) %}
{% for key, value in cms_auth_config.iteritems() %}
{% if value == 'None' %}
{% do cms_auth_config.update({key: None }) %}
......
......@@ -16,15 +16,15 @@ command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host
{% else -%}
{# This is for backwards compatibility, set workers explicitely using EDXAPP_WORKERS #}
{% if ansible_processor|length > 0 -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} {{ EDXAPP_CMS_GUNICORN_EXTRA }} cms.wsgi
{% else -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} {{ EDXAPP_CMS_GUNICORN_EXTRA }} cms.wsgi
{% endif -%}
{% endif -%}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms"
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PORT={{ edxapp_cms_gunicorn_port }},ADDRESS={{ edxapp_cms_gunicorn_host }},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ EDXAPP_CMS_ENV }},SERVICE_VARIANT="cms"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
......
......@@ -9,7 +9,7 @@
/tmp/codejail-*/** wrix,
#
# Whitelist particiclar shared objects from the system
# Whitelist particular shared objects from the system
# python installation
#
/usr/lib/python2.7/lib-dynload/_json.so mr,
......@@ -21,6 +21,22 @@
/usr/lib/python2.7/lib-dynload/_elementtree.so mr,
/usr/lib/python2.7/lib-dynload/pyexpat.so mr,
# Matplot lib needs a place for temp caches
{{ edxapp_sandbox_venv_dir }}/.config/ wrix,
{{ edxapp_sandbox_venv_dir }}/.cache/ wrix,
{{ edxapp_sandbox_venv_dir }}/.config/** wrix,
{{ edxapp_sandbox_venv_dir }}/.cache/** wrix,
# Matplotlib related libraries
/usr/lib/python2.7/lib-dynload/termios.so mr,
/usr/lib/python2.7/lib-dynload/parser.so mr,
# Matplot lib needs fonts to make graphs
/usr/share/fonts/ r,
/usr/share/fonts/** r,
/usr/local/share/fonts/ r,
/usr/local/share/fonts/** r,
#
# Allow access to selections from /proc
#
......
{% do lms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{% do lms_auth_config.update(EDXAPP_LMS_AUTH_EXTRA) %}
{% for key, value in lms_auth_config.iteritems() %}
{% if value == 'None' %}
{% do lms_auth_config.update({key: None }) %}
......
......@@ -17,15 +17,15 @@ command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host
{% else -%}
{# This is for backwards compatibility, set workers explicitely using EDXAPP_WORKERS #}
{% if ansible_processor|length > 0 -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} {{ EDXAPP_LMS_GUNICORN_EXTRA }} lms.wsgi
{% else -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} {{ EDXAPP_LMS_GUNICORN_EXTRA }} lms.wsgi
{% endif %}
{% endif %}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%} PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms",PATH="{{ edxapp_deploy_path }}"
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%} PORT={{ edxapp_lms_gunicorn_port }},ADDRESS={{ edxapp_lms_gunicorn_host }},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ EDXAPP_LMS_ENV }},SERVICE_VARIANT="lms",PATH="{{ edxapp_deploy_path }}"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
......
......@@ -17,7 +17,7 @@
mysql_user: >
name={{ EDXAPP_MYSQL_USER }}
password={{ EDXAPP_MYSQL_PASSWORD }}
priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL'
priv='{{ EDXAPP_MYSQL_DB_NAME }}.*:ALL'
when: EDXAPP_MYSQL_USER is defined
- name: create a database for edxapp
......@@ -31,7 +31,7 @@
mysql_user: >
name={{ XQUEUE_MYSQL_USER }}
password={{ XQUEUE_MYSQL_PASSWORD }}
priv='{{XQUEUE_MYSQL_DB_NAME}}.*:ALL'
priv='{{ XQUEUE_MYSQL_DB_NAME }}.*:ALL'
when: XQUEUE_MYSQL_USER is defined
- name: create a database for xqueue
......@@ -45,7 +45,7 @@
mysql_user: >
name={{ ORA_MYSQL_USER }}
password={{ ORA_MYSQL_PASSWORD }}
priv='{{ORA_MYSQL_DB_NAME}}.*:ALL'
priv='{{ ORA_MYSQL_DB_NAME }}.*:ALL'
when: ORA_MYSQL_USER is defined
- name: create a database for ora
......
......@@ -3,11 +3,11 @@
# Path to directory where to store index data allocated for this node.
#
path.data: {{elasticsearch_data_dir}}
path.data: {{ elasticsearch_data_dir }}
# Path to log files:
#
path.logs: {{elasticsearch_log_dir}}
path.logs: {{ elasticsearch_log_dir }}
# ElasticSearch performs poorly when JVM starts swapping: you should ensure that
# it _never_ swaps.
......@@ -43,3 +43,8 @@ script.disable_dynamic: true
discovery.zen.ping.unicast.hosts: ['{{hosts|join("\',\'") }}']
{% endif -%}
{% if vagrant_cluster|bool %}
network:
host: {{ ansible_ssh_host }}
{% endif %}
......@@ -24,4 +24,4 @@ flower_deploy_path: "{{ flower_venv_bin }}:/usr/local/sbin:/usr/local/bin:/usr/b
flower_broker: "amqp://{{ FLOWER_BROKER_USERNAME }}:{{ FLOWER_BROKER_PASSWORD }}@{{ FLOWER_BROKER_HOST }}:{{ FLOWER_BROKER_PORT }}"
flower_environment:
PATH: $flower_deploy_path
PATH: "{{ flower_deploy_path }}"
......@@ -18,7 +18,7 @@ FORUM_MONGO_HOSTS:
FORUM_MONGO_TAGS: !!null
FORUM_MONGO_PORT: "27017"
FORUM_MONGO_DATABASE: "cs_comments_service"
FORUM_MONGO_URL: "mongodb://{{ FORUM_MONGO_USER }}:{{ FORUM_MONGO_PASSWORD }}@{%- for host in FORUM_MONGO_HOSTS -%}{{host}}:{{ FORUM_MONGO_PORT }}{%- if not loop.last -%},{%- endif -%}{%- endfor -%}/{{ FORUM_MONGO_DATABASE }}{%- if FORUM_MONGO_TAGS -%}?tags={{ FORUM_MONGO_TAGS }}{%- endif -%}"
FORUM_MONGO_URL: "mongodb://{{ FORUM_MONGO_USER }}:{{ FORUM_MONGO_PASSWORD }}@{%- for host in FORUM_MONGO_HOSTS -%}{{ host }}:{{ FORUM_MONGO_PORT }}{%- if not loop.last -%},{%- endif -%}{%- endfor -%}/{{ FORUM_MONGO_DATABASE }}{%- if FORUM_MONGO_TAGS -%}?tags={{ FORUM_MONGO_TAGS }}{%- endif -%}"
FORUM_SINATRA_ENV: "development"
FORUM_RACK_ENV: "development"
FORUM_NGINX_PORT: "18080"
......@@ -28,8 +28,8 @@ FORUM_ELASTICSEARCH_PORT: "9200"
FORUM_ELASTICSEARCH_URL: "http://{{ FORUM_ELASTICSEARCH_HOST }}:{{ FORUM_ELASTICSEARCH_PORT }}"
# This needs to be a string, set to 'false' to disable
FORUM_NEW_RELIC_ENABLE: 'true'
FORUM_NEW_RELIC_LICENSE_KEY: "new-relic-license-key"
FORUM_NEW_RELIC_ENABLE: '{{ COMMON_ENABLE_NEWRELIC }}'
FORUM_NEW_RELIC_LICENSE_KEY: '{{ NEWRELIC_LICENSE_KEY | default("") }}'
FORUM_NEW_RELIC_APP_NAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-forum"
FORUM_WORKER_PROCESSES: "4"
......@@ -72,4 +72,3 @@ forum_version: "master"
#
forum_services:
- {service: "elasticsearch", host: "{{ FORUM_ELASTICSEARCH_HOST }}", port: "{{ FORUM_ELASTICSEARCH_PORT }}"}
......@@ -10,7 +10,7 @@ env PID=/var/tmp/comments_service.pid
chdir {{ forum_code_dir }}
script
. {{forum_app_dir}}/forum_env
{{forum_app_dir}}/.rbenv/shims/ruby app.rb
. {{ forum_app_dir }}/forum_env
{{ forum_app_dir }}/.rbenv/shims/ruby app.rb
end script
......@@ -49,11 +49,30 @@ haproxy_default_config: |
# desired applications
haproxy_applications:
- |
listen rabbitmq 127.0.0.1:5672
listen rabbitmq 127.0.0.1:35672
mode tcp
balance roundrobin
option tcplog
option tcpka
server rabbit01 172.23.128.10:5672 check inter 5000 rise 2 fall 3
server rabbit02 172.23.129.10:5672 backup check inter 5000 rise 2 fall 3
server rabbit03 172.23.130.10:5672 backup check inter 5000 rise 2 fall 3
server rabbit01 192.168.33.100:5672 check inter 5000 rise 2 fall 3
server rabbit02 192.168.33.110:5672 check inter 5000 rise 2 fall 3
server rabbit03 192.168.33.120:5672 check inter 5000 rise 2 fall 3
listen mariadb 127.0.0.1:13306
mode tcp
balance roundrobin
option tcplog
option tcpka
option mysql-check user haproxy
server galera1 192.168.33.100:3306 check weight 1
server galera2 192.168.33.110:3306 check weight 1
server galera3 192.168.33.120:3306 check weight 1
listen elasticsearch 127.0.0.1:19200
mode tcp
balance roundrobin
option tcplog
option tcpka
server galera1 192.168.33.100:9200 check weight 1
server galera2 192.168.33.110:9200 check weight 1
server galera3 192.168.33.120:9200 check weight 1
......@@ -18,3 +18,6 @@
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- common
# this config needs haproxy-1.1.28 or haproxy-1.2.1
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
log /dev/log local0 info
log /dev/log local0 notice
#log loghost local0 info
maxconn 4096
#chroot /usr/share/haproxy
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
# Defaults for role insights
#
INSIGHTS_GIT_IDENTITY: !!null
INSIGHTS_MEMCACHE: [ 'localhost:11211' ]
INSIGHTS_FEEDBACK_EMAIL: 'dashboard@example.com'
INSIGHTS_MKTG_BASE: 'http://example.com'
INSIGHTS_PRIVACY_POLICY_URL: '{{ INSIGHTS_MKTG_BASE }}/privacy-policy'
INSIGHTS_TERMS_OF_SERVICE_URL: '{{ INSIGHTS_MKTG_BASE }}/terms-service'
INSIGHTS_SUPPORT_URL: ''
INSIGHTS_OAUTH2_SECRET: 'secret'
INSIGHTS_OATH2_KEY: 'key'
INSIGHTS_OAUTH2_URL_ROOT: 'url_root'
INSIGHTS_SECRET_KEY: 'YOUR_SECRET_KEY_HERE'
INSIGHTS_OAUTH2_KEY: 'YOUR_OAUTH2_KEY'
# This will not work on single instance sandboxes
INSIGHTS_DOC_BASE: 'http://localhost/en/latest'
INSIGHTS_LMS_BASE: 'http://localhost:18000'
ANALYTICS_API_ENDPOINT: 'http://localhost:18010'
INSIGHTS_DATA_API_AUTH_TOKEN: 'YOUR_DATA_API_AUTH_TOKEN'
INSIGHTS_PLATFORM_NAME: 'edX'
INSIGHTS_APPLICATION_NAME: 'Insights'
INSIGHTS_SEGMENT_IO_KEY: 'YOUR_KEY'
# should match the timezone of your map reduce pipeline
INSIGHTS_TIME_ZONE: 'UTC'
INSIGHTS_LANGUAGE_CODE: 'en-us'
# email config
INSIGHTS_EMAIL_HOST: 'smtp.example.com'
INSIGHTS_EMAIL_HOST_PASSWORD: "mail_password"
INSIGHTS_EMAIL_HOST_USER: "mail_user"
INSIGHTS_EMAIL_PORT: 587
INSIGHTS_ENABLE_AUTO_AUTH: false
INSIGHTS_DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: 'dashboard'
USER: 'rosencrantz'
PASSWORD: 'secret'
HOST: 'localhost'
PORT: '3306'
#
# This block of config is dropped into /edx/etc/insights.yml
# and is read in by analytics_dashboard/settings/production.py
INSIGHTS_CONFIG:
SUPPORT_URL: '{{ INSIGHTS_SUPPORT_URL }}'
DOCUMENTATION_LOAD_ERROR_URL: '{{ INSIGHTS_DOC_BASE }}/Reference.html#error-conditions'
SEGMENT_IO_KEY: '{{ INSIGHTS_SEGMENT_IO_KEY }}'
FEEDBACK_EMAIL: '{{ INSIGHTS_FEEDBACK_EMAIL }}'
PRIVACY_POLICY_URL: '{{ INSIGHTS_PRIVACY_POLICY_URL }}'
TERMS_OF_SERVICE_URL: '{{ INSIGHTS_TERMS_OF_SERVICE_URL }}'
HELP_URL: '{{ INSIGHTS_DOC_BASE }}'
SECRET_KEY: '{{ INSIGHTS_SECRET_KEY }}'
DATA_API_URL: '{{ ANALYTICS_API_ENDPOINT }}'
DATA_API_AUTH_TOKEN: '{{ INSIGHTS_DATA_API_AUTH_TOKEN }}'
SOCIAL_AUTH_REDIRECT_IS_HTTPS: true
SOCIAL_AUTH_EDX_OIDC_KEY: '{{ INSIGHTS_OAUTH2_KEY }}'
SOCIAL_AUTH_EDX_OIDC_SECRET: '{{ INSIGHTS_OAUTH2_SECRET }}'
SOCIAL_AUTH_EDX_OIDC_URL_ROOT: '{{ INSIGHTS_OAUTH2_URL_ROOT }}'
# This value should be the same as SOCIAL_AUTH_EDX_OIDC_SECRET
SOCIAL_AUTH_EDX_OIDC_ID_TOKEN_DECRYPTION_KEY: '{{ INSIGHTS_OAUTH2_SECRET }}'
ENABLE_AUTO_AUTH: $INSIGHTS_ENABLE_AUTO_AUTH
PLATFORM_NAME: '{{ INSIGHTS_PLATFORM_NAME }}'
APPLICATION_NAME: '{{ INSIGHTS_APPLICATION_NAME }}'
CACHES:
default: &default_generic_cache
BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
KEY_PREFIX: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-insights'
LOCATION: "{{ INSIGHTS_MEMCACHE }}"
TIME_ZONE: '{{ INSIGHTS_TIME_ZONE }}'
LANGUAGE_CODE: '{{ INSIGHTS_LANGUAGE_CODE }}'
# email config
EMAIL_HOST: '{{ INSIGHTS_EMAIL_HOST }}'
EMAIL_HOST_PASSWORD: '{{ INSIGHTS_EMAIL_HOST_PASSWORD }}'
EMAIL_HOST_USER: '{{ INSIGHTS_EMAIL_HOST_USER }}'
EMAIL_PORT: $INSIGHTS_EMAIL_PORT
# static file config
STATICFILES_DIRS: ["{{ insights_python_path }}/static"]
STATIC_ROOT: "{{ COMMON_DATA_DIR }}/{{ insights_service_name }}/staticfiles"
# db config
DATABASE_OPTIONS:
connect_timeout: 10
DATABASES: "{{ INSIGHTS_DATABASES }}"
INSIGHTS_VERSION: "master"
INSIGHTS_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-analytics-api"
INSIGHTS_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
INSIGHTS_NGINX_PORT: "18110"
INSIGHTS_GUNICORN_WORKERS: "2"
INSIGHTS_GUNICORN_EXTRA: ""
#
# vars are namespace with the module name.
#
insights_environment:
DJANGO_SETTINGS_MODULE: "analytics_dashboard.settings.production"
ANALYTICS_DASHBOARD_CFG: "{{ COMMON_CFG_DIR }}/{{ insights_service_name }}.yaml"
insights_role_name: "insights"
insights_service_name: "{{ insights_role_name }}"
insights_user: "{{ insights_role_name }}"
insights_app_dir: "{{ COMMON_APP_DIR }}/{{ insights_service_name }}"
insights_home: "{{ COMMON_APP_DIR }}/{{ insights_service_name }}"
insights_venv_base: "{{ insights_home }}/venvs"
insights_venv_dir: "{{ insights_venv_base }}/{{ insights_service_name }}"
insights_venv_bin: "{{ insights_venv_dir }}/bin"
insights_code_dir: "{{ insights_app_dir }}/edx-analytics-dashboard"
insights_python_path: "{{ insights_code_dir }}/analytics_dashboard"
insights_conf_dir: "{{ insights_home }}"
insights_log_dir: "{{ COMMON_LOG_DIR }}/{{ insights_service_name }}"
insights_nodeenv_dir: "{{ insights_home }}/nodeenvs/{{ insights_service_name }}"
insights_nodeenv_bin: "{{ insights_nodeenv_dir }}/bin"
insights_node_modules_dir: "{{ insights_code_dir }}/node_modules"
insights_node_bin: "{{ insights_node_modules_dir }}/.bin"
insights_gunicorn_host: "127.0.0.1"
insights_gunicorn_port: "8110"
insights_gunicorn_timeout: "300"
insights_wsgi: "analytics_dashboard.wsgi:application"
insights_django_settings: "analytics_dashboard.settings.production"
insights_source_repo: "git@{{ COMMON_GIT_MIRROR }}:/edx/edx-analytics-dashboard"
insights_git_ssh_opts: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ insights_git_identity_file }}"
insights_git_identity_file: "{{ insights_home }}/git-identity"
insights_manage: "{{ insights_code_dir }}/analytics_dashboard/manage.py"
insights_requirements_base: "{{ insights_code_dir }}/requirements"
insights_requirements:
- production.txt
- optional.txt
#
# OS packages
#
insights_debian_pkgs:
- 'libmysqlclient-dev'
- 'build-essential'
insights_redhat_pkgs:
- 'community-mysql-devel'
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role insights
#
# Overview:
#
#
- name: "restart insights"
supervisorctl_local: >
name={{ insights_service_name }}
supervisorctl_path={{ supervisor_ctl }}
config={{ supervisor_cfg }}
state=restarted
when: not disable_edx_services
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role insights
#
dependencies:
- role: edx_service
edx_role_name: "{{ insights_role_name }}"
edx_service_name: "{{ insights_service_name }}"
- supervisor
---
- name: install read-only ssh key
copy: >
content="{{ INSIGHTS_GIT_IDENTITY }}" dest={{ insights_git_identity_file }}
owner={{ insights_user }} group={{ insights_user }} mode=0600
- name: setup the insights env file
template: >
src="edx/app/insights/insights_env.j2"
dest="{{ insights_app_dir }}/insights_env"
owner={{ insights_user }}
group={{ insights_user }}
mode=0644
- name: checkout code
git: >
dest={{ insights_code_dir }} repo={{ insights_source_repo }} version={{ INSIGHTS_VERSION }}
accept_hostkey=yes
ssh_opts="{{ insights_git_ssh_opts }}"
register: insights_code_checkout
notify: restart insights
sudo_user: "{{ insights_user }}"
- name: write out app config file
template: >
src=edx/app/insights/insights.yaml.j2
dest={{ COMMON_CFG_DIR }}/{{ insights_service_name }}.yaml
mode=0644 owner={{ insights_user }} group={{ insights_user }}
notify: restart insights
- name: install application requirements
pip: >
requirements="{{ insights_requirements_base }}/{{ item }}"
virtualenv="{{ insights_venv_dir }}" state=present
extra_args="--exists-action w"
sudo_user: "{{ insights_user }}"
notify: restart insights
with_items: insights_requirements
- name: create nodeenv
shell: >
creates={{ insights_nodeenv_dir }}
{{ insights_venv_bin }}/nodeenv {{ insights_nodeenv_dir }}
sudo_user: "{{ insights_user }}"
- name: install node dependencies
npm: executable={{ insights_nodeenv_bin }}/npm path={{ insights_code_dir }} production=yes
sudo_user: "{{ insights_user }}"
- name: install bower dependencies
shell: >
chdir={{ insights_code_dir }}
. {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/bower install --production --config.interactive=false
sudo_user: "{{ insights_user }}"
- name: syncdb and migrate
shell: >
chdir={{ insights_code_dir }}
DB_MIGRATION_USER={{ COMMON_MYSQL_MIGRATE_USER }}
DB_MIGRATION_PASS={{ COMMON_MYSQL_MIGRATE_PASS }}
{{ insights_venv_bin }}/python {{ insights_manage }} syncdb --migrate --noinput
sudo_user: "{{ insights_user }}"
environment: "{{ insights_environment }}"
when: migrate_db is defined and migrate_db|lower == "yes"
- name: run r.js optimizer
shell: >
chdir={{ insights_code_dir }}
. {{ insights_nodeenv_bin }}/activate && {{ insights_node_bin }}/r.js -o build.js
sudo_user: "{{ insights_user }}"
- name: run collectstatic
shell: >
chdir={{ insights_code_dir }}
{{ insights_venv_bin }}/python {{ insights_manage }} {{ item }}
sudo_user: "{{ insights_user }}"
environment: "{{ insights_environment }}"
with_items:
- "collectstatic --noinput"
- "compress"
- name: write out the supervisior wrapper
template: >
src=edx/app/insights/insights.sh.j2
dest={{ insights_app_dir }}/{{ insights_service_name }}.sh
mode=0650 owner={{ supervisor_user }} group={{ common_web_user }}
notify: restart insights
- name: write supervisord config
template: >
src=edx/app/supervisor/conf.d.available/insights.conf.j2
dest="{{ supervisor_available_dir }}/{{ insights_service_name }}.conf"
owner={{ supervisor_user }} group={{ common_web_user }} mode=0644
notify: restart insights
- name: enable supervisor script
file: >
src={{ supervisor_available_dir }}/{{ insights_service_name }}.conf
dest={{ supervisor_cfg_dir }}/{{ insights_service_name }}.conf
state=link
force=yes
notify: restart insights
when: not disable_edx_services
- name: update supervisor configuration
shell: "{{ supervisor_ctl }} -c {{ supervisor_cfg }} update"
when: not disable_edx_services
- name: create symlinks from the venv bin dir
file: >
src="{{ insights_venv_bin }}/{{ item }}"
dest="{{ COMMON_BIN_DIR }}/{{ item.split('.')[0] }}.{{ insights_role_name }}"
state=link
with_items:
- python
- pip
- django-admin.py
- name: create manage.py symlink
file: >
src="{{ insights_manage }}"
dest="{{ COMMON_BIN_DIR }}/manage.{{ insights_role_name }}"
state=link
- name: remove read-only ssh key for the content repo
file: path={{ insights_git_identity_file }} state=absent
- include: tag_ec2.yml tags=deploy
when: COMMON_TAG_EC2_INSTANCE
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role insights
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- fail: msg="You must provide a private key for the Insights repo"
when: not INSIGHTS_GIT_IDENTITY
- include: deploy.yml tags=deploy
---
- name: get instance information
action: ec2_facts
- name: tag instance
ec2_tag: resource={{ ansible_ec2_instance_id }} region={{ ansible_ec2_placement_region }}
args:
tags:
"version:insights" : "{{ insights_source_repo }} {{ insights_code_checkout.after |truncate(7,True,'')}}"
when: insights_code_checkout.after is defined
#!/usr/bin/env bash
# {{ ansible_managed }}
{% if COMMON_ENABLE_NEWRELIC_APP %}
{% set executable = insights_venv_bin + '/newrelic-admin run-program ' + insights_venv_bin + '/gunicorn' %}
{% else %}
{% set executable = insights_venv_bin + '/gunicorn' %}
{% endif %}
{% if COMMON_ENABLE_NEWRELIC_APP %}
export NEW_RELIC_APP_NAME="{{ INSIGHTS_NEWRELIC_APPNAME }}"
export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}"
{% endif -%}
source {{ insights_app_dir }}/insights_env
{{ executable }} --pythonpath={{ insights_python_path }} -b {{ insights_gunicorn_host }}:{{ insights_gunicorn_port }} -w {{ INSIGHTS_GUNICORN_WORKERS }} --timeout={{ insights_gunicorn_timeout }} {{ INSIGHTS_GUNICORN_EXTRA }} {{ insights_wsgi }}
---
# {{ ansible_managed }}
{{ INSIGHTS_CONFIG | to_nice_yaml }}
# {{ ansible_managed }}
{% for name,value in insights_environment.items() -%}
{%- if value -%}
export {{ name }}="{{ value }}"
{% endif %}
{%- endfor %}
# {{ ansible_managed }}
[program:{{ insights_service_name }}]
command={{ insights_app_dir }}/insights.sh
user={{ common_web_user }}
directory={{ insights_code_dir }}
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
stopasgroup=true
......@@ -119,7 +119,7 @@ jenkins_admin_plugins:
- { name: "jquery", version: "1.7.2-1" }
- { name: "dashboard-view", version: "2.9.4" }
- { name: "build-pipeline-plugin", version: "1.4.3" }
- { name: "s3", version: "0.5" }
- { name: "s3", version: "0.6" }
- { name: "tmpcleaner", version: "1.1" }
- { name: "jobConfigHistory", version: "2.8" }
- { name: "build-timeout", version: "1.14" }
......
......@@ -9,12 +9,12 @@
#
##
# Role includes for role jenkins_admin
#
#
# Example:
#
# dependencies:
# - {
# role: my_role
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
......@@ -22,7 +22,7 @@ dependencies:
- common
- aws
- role: jenkins_master
jenkins_plugins: $jenkins_admin_plugins
jenkins_plugins: "{{ jenkins_admin_plugins }}"
- role: supervisor
supervisor_app_dir: "{{ jenkins_supervisor_app_dir }}"
supervisor_data_dir: "{{ jenkins_supervisor_data_dir }}"
......
......@@ -25,6 +25,7 @@
group="{{ jenkins_group }}"
mode="755"
sudo_user: "{{ jenkins_user }}"
notify: restart nat monitor
- name: create a supervisor config
template:
......
{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
[profile {{ deployment }}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
......
{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
[profile {{ deployment }}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
......
......@@ -46,7 +46,7 @@ rm -rf $BUILD_ID
<profileName>{{ JENKINS_ADMIN_S3_PROFILE.name }}</profileName>
<entries>
<hudson.plugins.s3.Entry>
<bucket>edx-jenkins-backups/{{JENKINS_ADMIN_NAME}}</bucket>
<bucket>edx-jenkins-backups/{{ JENKINS_ADMIN_NAME }}</bucket>
<sourceFile>${BUILD_ID}.tar.gz</sourceFile>
<storageClass>STANDARD</storageClass>
<selectedRegion>US_EAST_1</selectedRegion>
......
......@@ -12,6 +12,7 @@ jenkins_plugins:
- { name: "build-name-setter", version: "1.3" }
- { name: "build-pipeline-plugin", version: "1.4" }
- { name: "build-timeout", version: "1.11" }
- { name: "cloudbees-folder", version: "4.6.1" }
- { name: "cobertura", version: "1.9.2" }
- { name: "copyartifact", version: "1.28" }
- { name: "copy-to-slave", version: "1.4.3" }
......@@ -35,7 +36,7 @@ jenkins_plugins:
- { name: "postbuild-task", version: "1.8" }
- { name: "PrioritySorter", version: "2.8" }
- { name: "sauce-ondemand", version: "1.61" }
- { name: "s3", version: "0.5" }
- { name: "s3", version: "0.6" }
- { name: "ssh-agent", version: "1.3" }
- { name: "ssh-credentials", version: "1.5.1" }
- { name: "ssh-slaves", version: "1.4" }
......
......@@ -58,7 +58,7 @@
# Using this instead of the user module because the user module
# fails if the directory exists.
- name: set home directory for jenkins user
shell: usermod -d {{jenkins_home}} {{jenkins_user}}
shell: usermod -d {{ jenkins_home }} {{ jenkins_user }}
- name: make plugins directory
file:
......
---
- name: Download packer
get_url: url={{ packer_url }} dest=/var/tmp/packer.zip
shell: "curl -L {{ packer_url }} -o /var/tmp/packer.zip"
- name: Unzip packer
unarchive: src=/var/tmp/packer.zip dest=/usr/local/bin copy=no
......@@ -21,11 +21,11 @@ function (Settings) {
//elasticsearch: "http://"+window.location.hostname+":9200",
{% if NGINX_ENABLE_SSL %}
elasticsearch: "https://{{ KIBANA_SERVER_NAME }}/e",
elasticsearch: "https://{{ KIBANA_SERVER_NAME }}:{{ KIBANA_SSL_NGINX_PORT }}/e",
{% else %}
elasticsearch: "http://{{ KIBANA_SERVER_NAME }}/e",
elasticsearch: "http://{{ KIBANA_SERVER_NAME }}:{{ KIBANA_NGINX_PORT }}/e",
{% endif %}
......
......@@ -21,7 +21,7 @@
module: ec2
state: 'absent'
region: "{{ region }}"
instance_ids: ${tag_lookup.instance_ids}
instance_ids: "{{tag_lookup.instance_ids}}"
when: terminate_instance == true and tag_lookup.instance_ids|length == 1
- name: deregister instance from an an elb if it was in one
......@@ -45,7 +45,7 @@
assign_public_ip: yes
wait: true
region: "{{ region }}"
instance_tags: "{{instance_tags}}"
instance_tags: "{{ instance_tags }}"
volumes:
- device_name: /dev/sda1
volume_size: "{{ root_ebs_size }}"
......
......@@ -18,15 +18,15 @@
- name: create ora application config
copy:
src={{secure_dir}}/files/{{COMMON_ENVIRONMENT}}/legacy_ora/ora.env.json
dest={{ora_app_dir}}/env.json
src={{ secure_dir }}/files/{{ COMMON_ENVIRONMENT }}/legacy_ora/ora.env.json
dest={{ ora_app_dir }}/env.json
sudo_user: "{{ ora_user }}"
register: env_state
- name: create ora auth file
copy:
src={{secure_dir}}/files/{{COMMON_ENVIRONMENT}}/legacy_ora/ora.auth.json
dest={{ora_app_dir}}/auth.json
src={{ secure_dir }}/files/{{ COMMON_ENVIRONMENT }}/legacy_ora/ora.auth.json
dest={{ ora_app_dir }}/auth.json
sudo_user: "{{ ora_user }}"
register: auth_state
......
......@@ -32,7 +32,7 @@
get_url: url={{ logstash_url }} dest={{ logstash_app_dir }}/share/{{ logstash_file }}
- name: ensure symlink with no version exists at {{ logstash_app_dir }}/share/logstash.jar
file: src={{ logstash_app_dir }}/share/${logstash_file} dest={{ logstash_app_dir }}/share/logstash.jar state=link
file: src={{ logstash_app_dir }}/share/{{ logstash_file }} dest={{ logstash_app_dir }}/share/logstash.jar state=link
- name: start logstash
action: service name=logstash state=started enabled=yes
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role mariadb
#
MARIADB_APT_KEY_ID: '0xcbcb082a1bb943db'
# Note: version is determined by repo
MARIADB_REPO: "deb http://mirrors.syringanetworks.net/mariadb/repo/10.0/ubuntu precise main"
MARIADB_CREATE_DBS: yes
MARIADB_CLUSTERED: no
MARIADB_CLUSTER_USER_ADMIN: "mariadb_clu_root"
MARIADB_CLUSTER_PASSWORD_ADMIN: "password"
MARIADB_HOST_PRIV: '%'
MARIADB_HAPROXY_USER: 'haproxy'
MARIADB_HAPROXY_HOSTS:
- '192.168.33.100'
- '192.168.33.110'
- '192.168.33.120'
MARIADB_LISTEN_ALL: false
MARIADB_DATABASES:
- "{{ EDXAPP_MYSQL_DB_NAME|default('edxapp') }}"
- "{{ XQUEUE_MYSQL_DB_NAME|default('xqueue') }}"
- "{{ ORA_MYSQL_DB_NAME|default('ora') }}"
MARIADB_ANALYTICS_DATABASES:
- "{{ ANALYTICS_API_CONFIG['DATABASES']['default']['NAME']|default('analytics-api') }}"
- "{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['NAME']|default('reports') }}"
MARIADB_USERS:
- name: "{{ EDXAPP_MYSQL_USER|default('edxapp001') }}"
pass: "{{ EDXAPP_MYSQL_PASSWORD|default('password') }}"
priv: "{{ EDXAPP_MYSQL_DB_NAME|default('edxapp') }}.*:ALL"
host: "{{ MARIADB_HOST_PRIV }}"
- name: "{{ XQUEUE_MYSQL_USER|default('xqueue001') }}"
pass: "{{ XQUEUE_MYSQL_PASSWORD|default('password') }}"
priv: "{{ XQUEUE_MYSQL_DB_NAME|default('xqueue') }}.*:ALL"
host: "{{ MARIADB_HOST_PRIV }}"
- name: "{{ ORA_MYSQL_USER|default('ora001') }}"
pass: "{{ ORA_MYSQL_PASSWORD|default('password') }}"
priv: "{{ ORA_MYSQL_DB_NAME|default('ora') }}.*:ALL"
host: "{{ MARIADB_HOST_PRIV }}"
- name: "{{ COMMON_MYSQL_MIGRATE_USER|default('migrate') }}"
pass: "{{ COMMON_MYSQL_MIGRATE_PASSWORD|default('password') }}"
priv: "{{ EDXAPP_MYSQL_DB_NAME|default('edxapp') }}.*:ALL"
host: "{{ MARIADB_HOST_PRIV }}"
- name: "{{ COMMON_MYSQL_MIGRATE_USER|default('migrate') }}"
pass: "{{ COMMON_MYSQL_MIGRATE_PASSWORD|default('password') }}"
priv: "{{ XQUEUE_MYSQL_DB_NAME|default('xqueue') }}.*:ALL"
host: "{{ MARIADB_HOST_PRIV }}"
- name: "{{ COMMON_MYSQL_MIGRATE_USER|default('migrate') }}"
pass: "{{ COMMON_MYSQL_MIGRATE_PASSWORD|default('password') }}"
priv: "{{ ORA_MYSQL_DB_NAME|default('ora') }}.*:ALL"
host: "{{ MARIADB_HOST_PRIV }}"
- name: "{{ COMMON_MYSQL_READ_ONLY_USER|default('read_only') }}"
pass: "{{ COMMON_MYSQL_READ_ONLY_PASS|default('password') }}"
priv: "*.*:SELECT"
host: "{{ MARIADB_HOST_PRIV }}"
- name: "{{ COMMON_MYSQL_ADMIN_USER|default('admin') }}"
pass: "{{ COMMON_MYSQL_ADMIN_PASS|default('password') }}"
priv: "*.*:CREATE USER"
host: "{{ MARIADB_HOST_PRIV }}"
MARIADB_ANALYTICS_USERS:
- name: "{{ ANALYTICS_API_CONFIG['DATABASES']['default']['USER']|default('api001') }}"
pass: "{{ ANALYTICS_API_CONFIG['DATABASES']['default']['PASSWORD']|default('password') }}"
priv: "{{ ANALYTICS_API_CONFIG['DATABASES']['default']['NAME'] }}.*:ALL/reports.*:SELECT"
host: "{{ MARIADB_HOST_PRIV }}"
- name: "{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['USER']|default('reports001') }}"
pass: "{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['PASSWORD']|default('password') }}"
priv: "{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['NAME'] }}.*:SELECT"
host: "{{ MARIADB_HOST_PRIV }}"
- name: "{{ COMMON_MYSQL_MIGRATE_USER|default('migrate') }}"
pass: "{{ COMMON_MYSQL_MIGRATE_PASSWORD|default('password') }}"
priv: "{{ ANALYTICS_API_CONFIG['DATABASES']['default']['NAME']|default('analytics-api') }}.*:ALL"
host: "{{ MARIADB_HOST_PRIV }}"
- name: "{{ COMMON_MYSQL_MIGRATE_USER|default('migrate') }}"
pass: "{{ COMMON_MYSQL_MIGRATE_PASSWORD|default('password') }}"
priv: "{{ ANALYTICS_API_CONFIG['DATABASES']['reports']['NAME']|default('reports') }}.*:ALL"
host: "{{ MARIADB_HOST_PRIV }}"
#
# OS packages
#
mariadb_debian_pkgs:
- python-software-properties
- python-mysqldb
mariadb_redhat_pkgs: []
mariadb_apt_repository:
mariadb_solo_packages:
- mariadb-server
mariadb_cluster_packages:
- mariadb-galera-server
- galera
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role mariadb
#
# Example:
#
# dependencies:
# - {
# role: my_role
# my_role_var0: "foo"
# my_role_var1: "bar"
# }
dependencies:
- common
- name: copy galera cluster config
template: >
src="etc/mysql/conf.d/galera.cnf.j2"
dest="/etc/mysql/conf.d/galera.cnf"
owner="root"
group="root"
mode=0600
- name: check if we have already bootstrapped the cluster
stat: path=/etc/mysql/ansible_cluster_started
register: mariadb_bootstrap
- name: stop mysql for cluster bootstrap
service: name=mysql state=stopped
when: not mariadb_bootstrap.stat.exists
- name: setup bootstrap on primary
lineinfile: >
dest="/etc/mysql/conf.d/galera.cnf"
regexp="^wsrep_cluster_address=gcomm://{{ hostvars.keys()|sort|join(',') }}$"
line="wsrep_cluster_address=gcomm://"
when: ansible_hostname == hostvars[hostvars.keys()[0]].ansible_hostname and not mariadb_bootstrap.stat.exists
- name: fetch debian.cnf file so start-stop will work properly
fetch: >
src=/etc/mysql/debian.cnf
dest=/tmp/debian.cnf
fail_on_missing=yes
flat=yes
when: ansible_hostname == hostvars[hostvars.keys()[0]].ansible_hostname and not mariadb_bootstrap.stat.exists
register: mariadb_new_debian_cnf
- name: copy fetched file to other cluster members
copy: src=/tmp/debian.cnf dest=/etc/mysql/debian.cnf
when: mariadb_new_debian_cnf is defined
- name: start everything
service: name=mysql state=started
when: not mariadb_bootstrap.stat.exists
- name: reset galera cluster config since we are bootstrapped
template: >
src="etc/mysql/conf.d/galera.cnf.j2"
dest="/etc/mysql/conf.d/galera.cnf"
owner="root"
group="root"
mode=0600
when: not mariadb_bootstrap.stat.exists
- name: touch bootstrap file to confirm we are fully up
file: path="/etc/mysql/ansible_cluster_started" state=touch
# This is needed for mysql-check in haproxy or other mysql monitor
# scripts to prevent haproxy checks exceeding `max_connect_errors`.
- name: create haproxy monitor user
command: >
mysql -e "INSERT INTO mysql.user (Host,User) values ('{{ item }}','{{ MARIADB_HAPROXY_USER }}'); FLUSH PRIVILEGES;"
with_items: MARIADB_HAPROXY_HOSTS
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role mariadb
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- name: Install pre-req debian packages
apt: name={{ item }} state=present
with_items: mariadb_debian_pkgs
- name: Add mongo key
apt_key: url="{{ COMMON_UBUNTU_APT_KEYSERVER }}{{ MARIADB_APT_KEY_ID }}"
- name: add the mariadb repo to the sources list
apt_repository: >
repo='{{ MARIADB_REPO }}'
state=present
- name: install mariadb solo packages
apt: name={{ item }} update_cache=yes
with_items: mariadb_solo_packages
when: not MARIADB_CLUSTERED|bool
- name: install mariadb cluster packages
apt: name={{ item }} update_cache=yes
with_items: mariadb_cluster_packages
when: MARIADB_CLUSTERED|bool
- name: remove bind-address
lineinfile: >
dest=/etc/mysql/my.cnf
regexp="^bind-address\s+=\s+127\.0\.0\.1$"
state=absent
when: MARIADB_LISTEN_ALL|bool or MARIADB_CLUSTERED|bool
- include: cluster.yml
when: MARIADB_CLUSTERED|bool
- name: start everything
service: name=mysql state=started
- name: create all databases
mysql_db: >
db={{ item }}
state=present
encoding=utf8
with_items: MARIADB_DATABASES
when: MARIADB_CREATE_DBS|bool
- name: create all analytics dbs
mysql_db: >
db={{ item }}
state=present
encoding=utf8
with_items: MARIADB_ANALYTICS_DATABASES
when: MARIADB_CREATE_DBS|bool and ANALYTICS_API_CONFIG is defined
- name: create all users/privs
mysql_user: >
name="{{ item.name }}"
password="{{ item.pass }}"
priv="{{ item.priv }}"
host="{{ item.host }}"
append_privs=yes
with_items: MARIADB_USERS
when: MARIADB_CREATE_DBS|bool
- name: create all analytics users/privs
mysql_user: >
name="{{ item.name }}"
password="{{ item.pass }}"
priv="{{ item.priv }}"
host="{{ item.host }}"
append_privs=yes
with_items: MARIADB_ANALYTICS_USERS
when: MARIADB_CREATE_DBS|bool and ANALYTICS_API_CONFIG is defined
{%- set hosts= [] -%}
{%- for host in hostvars.keys()|sort -%}
{% do hosts.append(host) %}
{%- endfor %}
[mysqld]
binlog_format=ROW
innodb_autoinc_lock_mode=2
innodb_doublewrite=1
query_cache_size=0
wsrep_provider=/usr/lib/galera/libgalera_smm.so
wsrep_cluster_address=gcomm://{{ hosts|join(',') }}?pc.wait_prim=no
wsrep_sst_auth={{ MARIADB_CLUSTER_USER_ADMIN }}:{{ MARIADB_CLUSTER_PASSWORD_ADMIN }}
{% if vagrant_cluster|bool %}
wsrep_node_address={{ ansible_ssh_host }}
{% endif %}
......@@ -61,6 +61,8 @@
- "ProccessQuienscenceVoterCelery"
- "ProccessQuienscenceVoterGunicorn"
- "TrackingLogVoter"
- "ZippedTrackingLogVoter"
- "RolledTrackingLogVoter"
# Optional auth for git
- name: create ssh script for git (not authenticated)
......
RolledTrackingLogVoter:
config:
tracking_directory: '{{ COMMON_LOG_DIR }}/tracking'
\ No newline at end of file
......@@ -2,5 +2,5 @@ TrackingLogVoter:
config:
aws_profile: !!null
local_directory: '{{ COMMON_LOG_DIR }}/tracking'
s3_bucket: 'edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}'
s3_bucket: '{{ COMMON_AWS_SYNC_BUCKET }}'
bucket_path_prefix: 'logs/tracking'
ZippedTrackingLogVoter:
config:
tracking_directory: '{{ COMMON_LOG_DIR }}/tracking'
\ No newline at end of file
mongo_logappend: true
mongo_version: 2.4.7
mongo_version: 2.6.4
mongo_port: "27017"
mongo_extra_conf: ''
mongo_key_file: '/etc/mongodb_key'
......@@ -31,8 +31,8 @@ mongo_logpath: "{{ mongo_log_dir }}/mongodb.log"
mongo_dbpath: "{{ mongo_data_dir }}/mongodb"
# Have to use this conditional instead of ignore errors
# because the mongo_user module fails and doesn't ginore errors.
mongo_create_users: !!null
# because the mongo_user module fails and doesn't ignore errors.
mongo_create_users: true
# If the system is running out of an Amazon Web Services
# cloudformation stack, this group name can used to pull out
......@@ -42,3 +42,21 @@ mongo_aws_stack_name: "tag_aws_cloudformation_stack-name_"
# In environments that do not require durability (devstack / Jenkins)
# you can disable the journal to reduce disk usage
mongo_enable_journal: True
# We can do regular backups of MongoDB to S3.
MONGO_S3_BACKUP: false
# backup cron time:
MONGO_S3_BACKUP_HOUR: "*/12"
MONGO_S3_BACKUP_DAY: "*"
# override with a secondary node that will perform backups
MONGO_S3_BACKUP_NODE: "undefined"
# back up data into a specific S3 bucket
MONGO_S3_BACKUP_BUCKET: "undefined"
# temporary directory mongodump will use to store data
MONGO_S3_BACKUP_TEMPDIR: "{{ mongo_data_dir }}"
MONGO_S3_NOTIFY_EMAIL: "dummy@example.com"
mongo_s3_logfile: "{{ COMMON_LOG_DIR }}/mongo/s3-mongo-backup.log"
MONGO_S3_S3CMD_CONFIG: "{{ COMMON_DATA_DIR }}/mongo-s3-backup.s3cfg"
MONGO_S3_BACKUP_AWS_ACCESS_KEY: !!null
MONGO_S3_BACKUP_AWS_SECRET_KEY: !!null
---
- name: restart mongo
service: name=mongodb state=restarted
service: name=mongod state=restarted
---
- name: check to see that MongoDB 2.4 isn't installed
stat: path=/etc/init.d/mongodb
register: mongodb_needs_upgrade
- name: verify 2.4 not installed
fail: msg="MongoDB 2.4 is currently installed. If on a stand alone host (devstack), apt-get remove mongodb-10gen and re-run ansible. if on a cluster, read http://docs.mongodb.org/manual/release-notes/2.6-upgrade/#upgrade-considerations and upgrade to 2.6."
when: mongodb_needs_upgrade.stat.exists
- name: install python pymongo for mongo_user ansible module
pip: >
name=pymongo state=present
......@@ -8,7 +17,7 @@
- name: add the mongodb signing key
apt_key: >
id=7F0CEB10
url={{MONGODB_APT_KEY}}
url={{ MONGODB_APT_KEY }}
state=present
- name: add the mongodb repo to the sources list
......@@ -19,7 +28,7 @@
- name: install mongo server and recommends
apt: >
pkg=mongodb-10gen={{ mongo_version }}
pkg=mongodb-org={{ mongo_version }}
state=present install_recommends=yes
force=yes update_cache=yes
......@@ -33,8 +42,8 @@
- "{{ mongo_dbpath }}"
- "{{ mongo_log_dir }}"
- name: stop mongo service
service: name=mongodb state=stopped
- name: stop mongod service
service: name=mongod state=stopped
- name: move mongodb to {{ mongo_data_dir }}
command: mv /var/lib/mongodb {{ mongo_data_dir}}/. creates={{ mongo_data_dir }}/mongodb
......@@ -50,11 +59,11 @@
when: MONGO_CLUSTERED
- name: copy configuration template
template: src=mongodb.conf.j2 dest=/etc/mongodb.conf backup=yes
template: src=mongodb.conf.j2 dest=/etc/mongod.conf backup=yes
notify: restart mongo
- name: start mongo service
service: name=mongodb state=started
service: name=mongod state=started
- name: wait for mongo server to start
wait_for: port=27017 delay=2
......@@ -77,3 +86,39 @@
state=present
with_items: MONGO_USERS
when: mongo_create_users
- name: install s3cmd
pip: >
name="s3cmd"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
when: MONGO_S3_BACKUP
- name: configure s3cmd
template: >
dest="{{ MONGO_S3_S3CMD_CONFIG }}"
src=mongo-s3-backup-s3cfg.j2
owner=root
group=root
mode=0600
when: MONGO_S3_BACKUP
- name: install backup-mongo-to-s3 script
template: >
src=backup-mongo-to-s3.j2
dest=/edx/bin/backup-mongo-to-s3.sh
owner=root
group=root
mode=0700
when: MONGO_S3_BACKUP
- name: schedule backup-mongo-to-3s crontab
cron:
name="backup-mongo-to-s3"
job="/edx/bin/backup-mongo-to-s3.sh"
backup=yes
cron_file=backup-mongo-to-s3
user=root
hour="{{ MONGO_S3_BACKUP_HOUR }}"
minute="0"
day="{{ MONGO_S3_BACKUP_DAY }}"
{% set lb = '{' %}
{% set rb = '}' %}
#!/bin/bash
#
exec > >(tee "{{ mongo_s3_logfile }}")
exec 2>&1
shopt -s extglob
usage() {
cat<<EO
A script that will run a mongodump of all databases, tar/gz them
and upload to an s3 bucket, will send mail to
{{ MONGO_S3_NOTIFY_EMAIL }} on failures.
Usage: $PROG
-v add verbosity (set -x)
-n echo what will be done
-h this
EO
}
while getopts "vhn" opt; do
case $opt in
v)
set -x
shift
;;
h)
usage
exit 0
;;
n)
noop="echo Would have run: "
shift
;;
esac
done
if [[ "{{ MONGO_S3_BACKUP }}" != "true" ]]; then
# only run if explicitly enabled
exit
fi
MYNODENAME=$(echo "db.isMaster()" | mongo -u "{{ COMMON_MONGO_READ_ONLY_USER }}" -p"{{ COMMON_MONGO_READ_ONLY_PASS }}" "{{ EDXAPP_MONGO_DB_NAME }}" | grep \"me\" | cut -f 2 -d ':' | sed -e 's/ //' -e 's/,//' -e 's/"//');
if [[ "$MYNODENAME" != "{{ MONGO_S3_BACKUP_NODE }}" ]]; then
# only run on specified node
exit
fi
ISSECONDARY=$(echo "db.isMaster()" | mongo -u "{{ COMMON_MONGO_READ_ONLY_USER }}" -p"{{ COMMON_MONGO_READ_ONLY_PASS }}" "{{ EDXAPP_MONGO_DB_NAME }}" | grep secondary | cut -f 2 -d ':' | sed -e 's/ //' -e 's/,//' -e 's/"//')
if [[ "$ISSECONDARY" != "true" ]]; then
# backups should be run on secondary server
exit;
fi
MONGOOUTDIR=$(mktemp -d -p {{ MONGO_S3_BACKUP_TEMPDIR }})
DATESTAMP=$(date +'%Y-%m-%d-%H%M')
$noop mongodump --host {{ EDXAPP_MONGO_HOSTS[0] }} -u "{{ COMMON_MONGO_READ_ONLY_USER }}" -p"{{ COMMON_MONGO_READ_ONLY_PASS }}" -o $MONGOOUTDIR
cd $MONGOOUTDIR
$noop tar zcf {{ MONGO_S3_BACKUP_TEMPDIR }}/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-$DATESTAMP.tar.gz .
cd {{ MONGO_S3_BACKUP_TEMPDIR }}
$noop s3cmd -c {{ MONGO_S3_S3CMD_CONFIG }} sync {{ MONGO_S3_BACKUP_TEMPDIR }}/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-$DATESTAMP.tar.gz "s3://{{ MONGO_S3_BACKUP_BUCKET }}/mongo/"
rm -rf $MONGOOUTDIR {{ MONGO_S3_BACKUP_TEMPDIR }}/{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}-$DATESTAMP.tar.gz
[default]
access_key = {{ MONGO_S3_BACKUP_AWS_ACCESS_KEY }}
secret_key = {{ MONGO_S3_BACKUP_AWS_SECRET_KEY }}
bucket_location = US
......@@ -21,6 +21,10 @@ NEWRELIC_DEBIAN_KEY_ID: '548C16BF'
NEWRELIC_DEBIAN_KEY_URL: 'https://download.newrelic.com/{{ NEWRELIC_DEBIAN_KEY_ID }}.gpg'
NEWRELIC_LICENSE_KEY: "SPECIFY_KEY_HERE"
# Set this to newrelic logwatch
# agent template files to enable
# the logwatch agent
NEWRELIC_LOGWATCH: []
#
# OS packages
#
......@@ -30,3 +34,12 @@ newrelic_debian_pkgs:
newrelic_redhat_pkgs:
- newrelic-sysmond
newrelic_debian_plugin_pkgs:
- ruby-bundler
- rubygems
newrelic_logwatch_repo: https://github.com/railsware/newrelic_platform_plugins
newrelic_logwatch_repo_dir: /opt/newrelic_platform_plugins
newrelic_logwatch_dir: "{{ newrelic_logwatch_repo_dir }}/newrelic_logwatcher_agent"
newrelic_logwatch_version: "8edd6d214e462b27fdd07d41712eb7b4fff2f7d8"
---
- name: restart newrelic-logwatch-agent
service: name=newrelic-logwatch-agent state=restarted
......@@ -62,3 +62,12 @@
- name: ensure started and enabled
service: name=newrelic-sysmond state=restarted enabled=yes
# tags=deploy here so that if we are deploying
# an application update on a server the config
# for the plugin will be updated with the appropriate
# edp
- include: newrelic-logwatcher.yml tags=deploy
when:
- NEWRELIC_LOGWATCH
- ansible_distribution == 'Ubuntu'
---
# This task file is for the newrelic logwatcher plugin
# which is an agent that watches logfiles on the system
# for string matches
- name: Install newrelic plugin related packages
apt: pkg={{ item }} install_recommends=yes state=present
with_items: newrelic_debian_plugin_pkgs
- name: check out the newrelic logwatcher plugin
git: >
dest={{ newrelic_logwatch_repo_dir }}
repo={{ newrelic_logwatch_repo }} version={{ newrelic_logwatch_version }}
accept_hostkey=yes
- name: bundle install
shell: >
chdir={{ newrelic_logwatch_dir }}
creates=/var/lib/gems/1.8/gems/newrelic_plugin-1.0.2/
bundle install
notify: restart newrelic-logwatch-agent
- name: create agent configuration
template: >
src=opt/newrelic_platform_plugins/newrelic_logwatcher_agent/config/newrelic_plugin.yml.j2
dest={{ newrelic_logwatch_dir }}/config/newrelic_plugin.yml.copy
notify: restart newrelic-logwatch-agent
- template:
owner: root
src: etc/init/newrelic-logwatch-agent.conf.j2
dest: /etc/init/newrelic-logwatch-agent.conf
notify: restart newrelic-logwatch-agent
description "newrelic logwatch plugin"
start on runlevel [2345]
stop on runlevel [016]
respawn
chdir {{ newrelic_logwatch_dir }}
pre-start script
ami_id=$(ec2metadata --ami-id)
hostname=$(hostname)
sed "s/HOSTNAME/${ami_id}-${hostname}/" {{ newrelic_logwatch_dir }}/config/newrelic_plugin.yml.copy > {{ newrelic_logwatch_dir }}/config/newrelic_plugin.yml
end script
exec ruby newrelic_logwatcher_agent.rb
{% if SANDBOX_USERNAME is defined %}
{% set server_name = SANDBOX_USERNAME + '-nginx-503' %}
{% else %}
{% set server_name = COMMON_ENVIRONMENT|default('unknown-env') + '-' + COMMON_DEPLOYMENT|default('unknown-deployment') + '-nginx-503' + '-HOSTNAME' %}
{% endif %}
{{ server_name }}:
# Full path to the the log file
log_path: {{ nginx_log_dir|default('/edx/var/log/nginx') }}/error.log
# Returns the number of matches for this term. Use Linux Regex formatting.
term: "limiting requests"
# Provide any options to pass to grep when running.
# For example, to count non-matching lines, enter 'v'.
# Use the abbreviated format ('v' and not 'invert-match').
grep_options:
{% if SANDBOX_USERNAME is defined %}
{% set server_name = SANDBOX_USERNAME + '-certs-errors' %}
{% else %}
{% set server_name = COMMON_ENVIRONMENT|default('unknown-env') + '-' + COMMON_DEPLOYMENT|default('unknown-deployment') + '-certs-errors' + '-HOSTNAME'%}
{% endif %}
{{ server_name }}:
# Full path to the the log file
log_path: {{ COMMON_LOG_DIR|default('/edx/var/log') }}/certs/edx.log
# Returns the number of matches for this term. Use Linux Regex formatting.
term: " ERROR "
# Provide any options to pass to grep when running.
# For example, to count non-matching lines, enter 'v'.
# Use the abbreviated format ('v' and not 'invert-match').
grep_options:
{% if SANDBOX_USERNAME is defined %}
{% set server_name = SANDBOX_USERNAME + '-cms-errors' %}
{% else %}
{% set server_name = COMMON_ENVIRONMENT|default('unknown-env') + '-' + COMMON_DEPLOYMENT|default('unknown-deployment') + '-cms-errors' + '-HOSTNAME' %}
{% endif %}
{{ server_name }}:
# Full path to the the log file
log_path: {{ COMMON_LOG_DIR|default('/edx/var/log') }}/cms/edx.log
# Returns the number of matches for this term. Use Linux Regex formatting.
term: " ERROR "
# Provide any options to pass to grep when running.
# For example, to count non-matching lines, enter 'v'.
# Use the abbreviated format ('v' and not 'invert-match').
grep_options:
{% if SANDBOX_USERNAME is defined %}
{% set server_name = SANDBOX_USERNAME + '-lms-errors' %}
{% else %}
{% set server_name = COMMON_ENVIRONMENT|default('unknown-env') + '-' + COMMON_DEPLOYMENT|default('unknown-deployment') + '-lms-errors' + '-HOSTNAME' %}
{% endif %}
{{ server_name }}:
# Full path to the the log file
log_path: {{ COMMON_LOG_DIR|default('/edx/var/log') }}/lms/edx.log
# Returns the number of matches for this term. Use Linux Regex formatting.
term: " ERROR "
# Provide any options to pass to grep when running.
# For example, to count non-matching lines, enter 'v'.
# Use the abbreviated format ('v' and not 'invert-match').
grep_options:
{% if SANDBOX_USERNAME is defined %}
{% set server_name = SANDBOX_USERNAME + '-xqueue-errors' %}
{% else %}
{% set server_name = COMMON_ENVIRONMENT|default('unknown-env') + '-' + COMMON_DEPLOYMENT|default('unknown-deployment') + '-xqueue-errors' + '-HOSTNAME' %}
{% endif %}
{{ server_name }}:
# Full path to the the log file
log_path: {{ COMMON_LOG_DIR|default('/edx/var/log') }}/xqueue/edx.log
# Returns the number of matches for this term. Use Linux Regex formatting.
term: " ERROR "
# Provide any options to pass to grep when running.
# For example, to count non-matching lines, enter 'v'.
# Use the abbreviated format ('v' and not 'invert-match').
grep_options:
#
#
# This is a sample newrelic_plugin.yml file. Please move this file
# to the following location if it is not already there:
#
# ./config/newrelic_plugin.yml
#
# Where the current directory is the directory where your main program resides and is your current
# directory when you run the main program.
#
# Please make sure to update the license_key information with the license key for your New Relic
# account.
#
#
newrelic:
#
# Update with your New Relic account license key:
#
license_key: '{{ NEWRELIC_LICENSE_KEY }}'
#
# Set to '1' for verbose output, remove for normal output.
# All output goes to stdout/stderr.
#
# verbose: 1
#
# Agent Configuration:
#
agents:
{% for agent in NEWRELIC_LOGWATCH %}
{% include agent %}
{% endfor %}
......@@ -23,6 +23,7 @@ NGINX_ENABLE_SSL: False
NGINX_SSL_CERTIFICATE: 'ssl-cert-snakeoil.pem'
NGINX_SSL_KEY: 'ssl-cert-snakeoil.key'
NGINX_LOG_FORMAT_NAME: 'p_combined'
# When set to False, nginx will pass X-Forwarded-For, X-Forwarded-Port,
# and X-Forwarded-Proto headers through to the backend unmodified.
# This is desired when nginx is deployed behind another load balancer
......@@ -32,8 +33,24 @@ NGINX_SSL_KEY: 'ssl-cert-snakeoil.key'
# headers to reflect the properties of the incoming request.
NGINX_SET_X_FORWARDED_HEADERS: False
NGINX_SERVER_ERROR_IMG: 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/11/Pendleton_Sinking_Ship.jpg/640px-Pendleton_Sinking_Ship.jpg'
NGINX_SERVER_HTML_FILES:
- file: rate-limit.html
title: 'Rate limit exceeded'
msg: 'If think you have encountered this message in error please let us know at <a href="mailto:{{ EDXAPP_TECH_SUPPORT_EMAIL|default("technical@example.com") }}">{{ EDXAPP_TECH_SUPPORT_EMAIL|default("technical@example.com") }}</a>'
img: "{{ NGINX_SERVER_ERROR_IMG }}"
heading: 'Uh oh, we are having some server issues..'
- file: server-error.html
title: 'Server error'
msg: 'We have been notified of the error, if it persists please let us know at <a href="mailto:{{ EDXAPP_TECH_SUPPORT_EMAIL|default("technical@example.com") }}">{{ EDXAPP_TECH_SUPPORT_EMAIL|default("technical@example.com") }}</a>'
img: "{{ NGINX_SERVER_ERROR_IMG }}"
heading: 'Uh oh, we are having some server issues..'
nginx_app_dir: "{{ COMMON_APP_DIR }}/nginx"
nginx_data_dir: "{{ COMMON_DATA_DIR }}/nginx"
nginx_server_static_dir: "{{ nginx_data_dir }}/server-static"
nginx_conf_dir: "{{ nginx_app_dir }}/conf.d"
nginx_log_dir: "{{ COMMON_LOG_DIR }}/nginx"
nginx_sites_available_dir: "{{ nginx_app_dir }}/sites-available"
......@@ -63,6 +80,8 @@ nginx_cms_gunicorn_hosts:
- 127.0.0.1
nginx_analytics_api_gunicorn_hosts:
- 127.0.0.1
nginx_insights_gunicorn_hosts:
- 127.0.0.1
nginx_cfg:
# - link - turn on
......
......@@ -24,6 +24,7 @@
with_items:
- "{{ nginx_data_dir }}"
- "{{ nginx_log_dir }}"
- "{{ nginx_server_static_dir }}"
notify: restart nginx
- name: Install nginx packages
......@@ -113,6 +114,16 @@
notify: reload nginx
with_dict: nginx_redirects
# These are static pages that can be used
# for nginx rate limiting, 500 errors, etc.
- name: Create NGINX server templates
template: >
src=edx/var/nginx/server-static/server-template.j2
dest={{ nginx_server_static_dir }}/{{ item.file }}
owner=root group={{ common_web_user }} mode=0640
with_items: NGINX_SERVER_HTML_FILES
- name: Write out htpasswd file
htpasswd: >
name={{ COMMON_HTPASSWD_USER }}
......
......@@ -13,12 +13,6 @@ server {
}
location / {
{% include "basic-auth.j2" %}
try_files $uri @proxy_to_app;
}
# No basic auth security on the heartbeat url, so that ELB can use it
location /api/v0/status {
try_files $uri @proxy_to_app;
}
......
......@@ -4,6 +4,12 @@
allow 127.0.0.1;
allow 192.168.0.0/16;
allow 172.16.0.0/12;
allow 10.3.110.0/24;
allow 10.3.120.0/24;
allow 10.8.110.0/24;
allow 10.8.120.0/24;
deny all;
auth_basic "Restricted";
......
......@@ -13,10 +13,13 @@ upstream cms-backend {
server {
# CMS configuration file for nginx, templated by ansible
# 500 error pages
error_page 500 502 504 /server/server-error.html;
{% if NGINX_ENABLE_SSL %}
listen {{EDXAPP_CMS_NGINX_PORT}} {{default_site}};
listen {{EDXAPP_CMS_SSL_NGINX_PORT}} ssl;
listen {{ EDXAPP_CMS_NGINX_PORT }} {{ default_site }};
listen {{ EDXAPP_CMS_SSL_NGINX_PORT }} ssl;
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
......@@ -24,12 +27,12 @@ server {
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
{% else %}
listen {{EDXAPP_CMS_NGINX_PORT}} {{default_site}};
listen {{ EDXAPP_CMS_NGINX_PORT }} {{ default_site }};
{% endif %}
server_name {{ CMS_HOSTNAME }};
access_log {{ nginx_log_dir }}/access.log;
access_log {{ nginx_log_dir }}/access.log {{ NGINX_LOG_FORMAT_NAME }};
error_log {{ nginx_log_dir }}/error.log error;
# CS184 requires uploads of up to 4MB for submitting screenshots.
......
......@@ -32,7 +32,7 @@ upstream forum_app_server {
server {
server_name forum.*;
listen {{ FORUM_NGINX_PORT }} {{default_site}};
listen {{ FORUM_NGINX_PORT }} {{ default_site }};
client_max_body_size 1M;
keepalive_timeout 5;
......
upstream insights_app_server {
{% for host in nginx_insights_gunicorn_hosts %}
server {{ host }}:{{ insights_gunicorn_port }} fail_timeout=0;
{% endfor %}
}
server {
listen {{ INSIGHTS_NGINX_PORT }} default_server;
location ~ ^/static/(?P<file>.*) {
root {{ COMMON_DATA_DIR }}/{{ insights_service_name }};
try_files /staticfiles/$file =404;
}
location / {
try_files $uri @proxy_to_app;
}
# No basic auth security on the heartbeat url, so that ELB can use it
location /status {
try_files $uri @proxy_to_app;
}
{% include "robots.j2" %}
location @proxy_to_app {
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://insights_app_server;
}
}
......@@ -13,14 +13,14 @@ server {
{% if NGINX_ENABLE_SSL %}
listen {{KIBANA_NGINX_PORT}} {{default_site}};
listen {{KIBANA_SSL_NGINX_PORT}} {{default_site}} ssl;
listen {{ KIBANA_NGINX_PORT }} {{ default_site }};
listen {{ KIBANA_SSL_NGINX_PORT }} {{ default_site }} ssl;
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
{% else %}
listen {{KIBANA_NGINX_PORT}} {{default_site}};
listen {{ KIBANA_NGINX_PORT }} {{ default_site }};
{% endif %}
server_name {{ KIBANA_SERVER_NAME }};
......
......@@ -7,7 +7,7 @@ upstream lms-preview-backend {
server {
# LMS-preview configuration file for nginx, templated by ansible
listen {{EDXAPP_LMS_PREVIEW_NGINX_PORT}};
listen {{ EDXAPP_LMS_PREVIEW_NGINX_PORT }};
server_name preview.*;
......
......@@ -10,13 +10,21 @@ upstream lms-backend {
{% endfor %}
}
{%- if EDXAPP_ENABLE_RATE_LIMITING -%}
# Make Zone
limit_req_zone $cookie_{{ EDXAPP_SESSION_COOKIE_NAME }} zone=cookies:10m rate={{ EDXAPP_COURSES_REQUEST_RATE }};
{%- endif -%}
server {
# LMS configuration file for nginx, templated by ansible
# 500 error pages
error_page 500 502 504 /server/server-error.html;
{% if NGINX_ENABLE_SSL %}
listen {{EDXAPP_LMS_NGINX_PORT}} {{default_site}};
listen {{EDXAPP_LMS_SSL_NGINX_PORT}} {{default_site}} ssl;
listen {{ EDXAPP_LMS_NGINX_PORT }} {{ default_site }};
listen {{ EDXAPP_LMS_SSL_NGINX_PORT }} {{ default_site }} ssl;
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
......@@ -24,10 +32,10 @@ server {
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
{% else %}
listen {{EDXAPP_LMS_NGINX_PORT}} {{default_site}};
listen {{ EDXAPP_LMS_NGINX_PORT }} {{ default_site }};
{% endif %}
access_log {{ nginx_log_dir }}/access.log;
access_log {{ nginx_log_dir }}/access.log {{ NGINX_LOG_FORMAT_NAME }};
error_log {{ nginx_log_dir }}/error.log error;
# CS184 requires uploads of up to 4MB for submitting screenshots.
......@@ -57,17 +65,41 @@ server {
{% include "basic-auth.j2" %}
try_files $uri @proxy_to_lms_app;
}
# No basic auth for /segmentio/event
location /segmentio/event {
try_files $uri @proxy_to_lms_app;
}
location /notifier_api {
try_files $uri @proxy_to_lms_app;
}
# No basic auth security on the github_service_hook url, so that github can use it for cms
location /github_service_hook {
try_files $uri @proxy_to_lms_app;
}
# No basic auth security on oath2 endpoint
location /oauth2 {
try_files $uri @proxy_to_lms_app;
}
# No basic auth security on the heartbeat url, so that ELB can use it
location /heartbeat {
try_files $uri @proxy_to_lms_app;
}
location /courses {
{%- if EDXAPP_ENABLE_RATE_LIMITING -%}
# Set Limit
limit_req zone=cookies burst={{ EDXAPP_COURSE_REQUEST_BURST_RATE }};
error_page 503 = /server/rate-limit.html;
{%- endif -%}
{%- include "basic-auth.j2" %}
try_files $uri @proxy_to_lms_app;
}
{% include "robots.j2" %}
{% include "static-files.j2" %}
......
# static pages for server status
location ~ ^/server/(?P<file>.*) {
root /edx/var/nginx/server-static;
try_files /$file =404;
}
location ~ ^/static/(?P<file>.*) {
root {{ edxapp_data_dir }};
try_files /staticfiles/$file /course_static/$file =404;
......
<!DOCTYPE html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>{{ item.title }}</title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width, initial-scale=1">
<style media="screen" type="text/css">
h1, h2{
font-family: "Helvetica Neue",Helvetica,Roboto,Arial,sans-serif;
margin-bottom: .3em;
font-size: 2.0em;
line-height: 1.25em;
text-rendering: optimizeLegibility;
font-weight: bold;
color: #000000;
}
h2 {
font-size: 1.8em;
color: #5b5e63;
}
p {
font-family: Georgia,Cambria,"Times New Roman",Times,serif;
margin: auto;
margin-bottom: 1em;
font-weight: 200;
line-height: 1.4em;
font-size: 1.1em;
max-width: 80%;
}
</style>
</head>
<body>
<div style="margin: auto; width: 800px; text-align: center; padding:20px 0px 0px 0px;">
<h1>{{ item.heading }}</h1>
<img src="{{ item.img}}" alt="">
<h2>{{ item.title }}</h2>
<p>{{ item.msg }}
</div
</body>
</html>
......@@ -33,7 +33,7 @@ http {
# Logging Settings
##
log_format p_combined '$http_x_forwarded_for - $remote_addr - $remote_user [$time_local] '
log_format {{ NGINX_LOG_FORMAT_NAME }} '$http_x_forwarded_for - $remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent $request_time '
'"$http_referer" "$http_user_agent"';
......
......@@ -20,7 +20,7 @@
- name: unarchive nltk data
shell: >
unzip {{NLTK_DATA_DIR}}/{{ item.url|basename }} chdir="{{ NLTK_DATA_DIR }}/{{ item.path|dirname }}"
unzip {{ NLTK_DATA_DIR }}/{{ item.url|basename }} chdir="{{ NLTK_DATA_DIR }}/{{ item.path|dirname }}"
with_items: NLTK_DATA
when: nltk_download|changed
tags:
......
......@@ -8,11 +8,19 @@ NOTIFIER_DB_DIR: "{{ NOTIFIER_HOME }}/db"
NOTIFIER_SOURCE_REPO: "https://github.com/edx/notifier.git"
NOTIFIER_CODE_DIR: "{{ NOTIFIER_HOME }}/src"
NOTIFIER_VERSION: "master"
NOTIFIER_GIT_IDENTITY_PATH: "{{ secure_dir }}/files/git-identity"
NOTIFIER_REQUIREMENTS_FILE: "{{ NOTIFIER_CODE_DIR }}/requirements.txt"
NOTIFIER_LOG_LEVEL: "INFO"
NOTIFIER_RSYSLOG_ENABLED: "yes"
NOTIFIER_DIGEST_TASK_INTERVAL: "1440"
NOTIFIER_FORUM_DIGEST_TASK_BATCH_SIZE: "5"
NOTIFIER_FORUM_DIGEST_TASK_RATE_LIMIT: "60/m"
NOTIFIER_THEME_NAME: ""
NOTIFIER_THEME_REPO: ""
NOTIFIER_THEME_VERSION: "master"
notifier_git_ssh: "/tmp/notifier_git_ssh.sh"
NOTIFIER_GIT_IDENTITY: !!null
notifier_git_identity: "{{ NOTIFIER_HOME }}/notifier-git-identity"
NOTIFIER_DIGEST_EMAIL_SENDER: "notifications@example.com"
NOTIFIER_DIGEST_EMAIL_SUBJECT: "Daily Discussion Digest"
......@@ -20,6 +28,8 @@ NOTIFIER_DIGEST_EMAIL_TITLE: "Discussion Digest"
NOTIFIER_DIGEST_EMAIL_DESCRIPTION: "A digest of unread content from course discussions you are following."
NOTIFIER_EMAIL_SENDER_POSTAL_ADDRESS: ""
NOTIFIER_ENV_EXTRA: {}
NOTIFIER_LANGUAGE: ""
NOTIFIER_ENV: "Development"
......@@ -89,7 +99,7 @@ notifier_env_vars:
CS_API_KEY: "{{ NOTIFIER_COMMENT_SERVICE_API_KEY }}"
US_URL_BASE: "{{ NOTIFIER_USER_SERVICE_BASE }}"
US_API_KEY: "{{ NOTIFIER_USER_SERVICE_API_KEY }}"
DATADOG_API_KEY: "{{ DATADOG_API_KEY }}"
DATADOG_API_KEY: "{{ DATADOG_API_KEY|default('') }}"
LOG_LEVEL: "{{ NOTIFIER_LOG_LEVEL }}"
RSYSLOG_ENABLED: "{{ NOTIFIER_RSYSLOG_ENABLED }}"
BROKER_URL: "{{ NOTIFIER_CELERY_BROKER_URL }}"
......@@ -98,3 +108,5 @@ notifier_env_vars:
US_HTTP_AUTH_PASS: "{{ NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS }}"
FORUM_DIGEST_TASK_INTERVAL: "{{ NOTIFIER_DIGEST_TASK_INTERVAL }}"
LOGO_IMAGE_URL: "{{ NOTIFIER_LOGO_IMAGE_URL }}"
FORUM_DIGEST_TASK_BATCH_SIZE: "{{ NOTIFIER_FORUM_DIGEST_TASK_BATCH_SIZE }}"
FORUM_DIGEST_TASK_RATE_LIMIT: "{{ NOTIFIER_FORUM_DIGEST_TASK_RATE_LIMIT }}"
......@@ -11,6 +11,45 @@
- restart notifier-scheduler
- restart notifier-celery-workers
# Optional auth for git
- name: create ssh script for git (not authenticated)
template: >
src=git_ssh_noauth.sh.j2 dest={{ notifier_git_ssh }}
owner={{ NOTIFIER_USER }} mode=750
when: not NOTIFIER_USE_GIT_IDENTITY
- name: create ssh script for git (authenticated)
template: >
src=git_ssh_auth.sh.j2 dest={{ notifier_git_ssh }}
owner={{ NOTIFIER_USER }} mode=750
when: NOTIFIER_USE_GIT_IDENTITY
- name: install read-only ssh key
copy: >
content="{{ NOTIFIER_GIT_IDENTITY }}" dest={{ notifier_git_identity }}
force=yes owner={{ NOTIFIER_USER }} mode=0600
when: NOTIFIER_USE_GIT_IDENTITY
- name: checkout theme
git: >
dest={{ NOTIFIER_CODE_DIR }}/{{ NOTIFIER_THEME_NAME }}
repo={{ NOTIFIER_THEME_REPO }}
version={{ NOTIFIER_THEME_VERSION }}
accept_hostkey=yes
when: NOTIFIER_THEME_NAME != ''
sudo_user: "{{ NOTIFIER_USER }}"
environment:
GIT_SSH: "{{ notifier_git_ssh }}"
- name: write notifier local settings
template: >
src=settings_local.py.j2
dest={{ NOTIFIER_CODE_DIR }}/notifier/settings_local.py
mode=0555
when: NOTIFIER_THEME_NAME != ''
notify:
- restart notifier-celery-workers
- name: source repo group perms
file:
path={{ NOTIFIER_SOURCE_REPO }} mode=2775 state=directory
......
......@@ -52,6 +52,7 @@
template:
src=notifier_env.j2 dest={{ NOTIFIER_HOME }}/notifier_env
owner="{{ NOTIFIER_USER }}" group="{{ NOTIFIER_USER }}"
mode=655
- name: drop a bash_profile
copy: >
......@@ -85,6 +86,33 @@
file:
path="{{ NOTIFIER_HOME }}/bin" mode=2775 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }}
- name: create notifier/.ssh directory
file:
path="{{ NOTIFIER_HOME }}/.ssh" mode=2700 state=directory owner={{ NOTIFIER_USER }} group={{ NOTIFIER_USER }}
- name: create service log dir
file: >
path="{{ item }}"
state=directory
owner="syslog"
group="syslog"
with_items:
- "{{ COMMON_LOG_DIR }}/notifier"
- name: write supervisord wrapper for celery workers
template: >
src=notifier-celery-workers-supervisor.sh.j2
dest="{{ NOTIFIER_HOME }}/notifier-celery-workers-supervisor.sh"
mode=0775
sudo_user: "{{ NOTIFIER_USER }}"
- name: write supervisord wrapper for scheduler
template: >
src=notifier-scheduler-supervisor.sh.j2
dest="{{ NOTIFIER_HOME }}/notifier-scheduler-supervisor.sh"
mode=0775
sudo_user: "{{ NOTIFIER_USER }}"
- name: write supervisord config for celery workers
template: >
src=edx/app/supervisor/conf.d/notifier-celery-workers.conf.j2
......
......@@ -3,10 +3,11 @@
;
[program:notifier-celery-workers]
command={{ NOTIFIER_VENV_DIR }}/bin/python manage.py celery worker -l {{ NOTIFIER_LOG_LEVEL }}
command={{ NOTIFIER_HOME }}/notifier-celery-workers-supervisor.sh
process_name=%(program_name)s
numprocs=1
stopasgroup=true
directory={{ NOTIFIER_CODE_DIR }}
umask=022
autostart=true
......@@ -25,7 +26,3 @@ stderr_logfile={{ NOTIFIER_SUPERVISOR_LOG_DEST }}/notifier-celery-workers-stderr
stderr_logfile_maxbytes=1MB
stderr_logfile_backups=10
stderr_capture_maxbytes=1MB
environment=PID='/var/tmp/notifier-celery-workers.pid',LANG=en_US.UTF-8,
{%- for name,value in notifier_env_vars.items() -%}
{%- if value -%}{{name}}="{{value}}"{%- if not loop.last -%},{%- endif -%}{%- endif -%}
{%- endfor -%}
......@@ -3,10 +3,11 @@
;
[program:notifier-scheduler]
command={{ NOTIFIER_VENV_DIR }}/bin/python manage.py scheduler
command={{ NOTIFIER_HOME }}/notifier-scheduler-supervisor.sh
process_name=%(program_name)s
numprocs=1
stopasgroup=true
directory={{ NOTIFIER_CODE_DIR }}
umask=022
autostart=true
......@@ -25,7 +26,3 @@ stderr_logfile={{ NOTIFIER_SUPERVISOR_LOG_DEST }}/notifier-scheduler-stderr.log
stderr_logfile_maxbytes=1MB
stderr_logfile_backups=10
stderr_capture_maxbytes=1MB
environment=PID='/var/tmp/notifier-scheduler.pid',LANG=en_US.UTF-8,
{%- for name,value in notifier_env_vars.items() -%}
{%- if value -%}{{name}}="{{value}}"{%- if not loop.last -%},{%- endif -%}{%- endif -%}
{%- endfor -%}
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ notifier_git_identity }} "$@"
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no "$@"
#!/bin/bash
source {{ NOTIFIER_HOME }}/notifier_env
cd {{ NOTIFIER_CODE_DIR }}
export PID='/var/tmp/notifier-scheduler.pid'
export LANG=en_US.UTF-8
{{ NOTIFIER_VENV_DIR }}/bin/python manage.py celery worker -l {{ NOTIFIER_LOG_LEVEL }}
#!/bin/bash
source {{ NOTIFIER_HOME }}/notifier_env
cd {{ NOTIFIER_CODE_DIR }}
export PID='/var/tmp/notifier-celery-workers.pid'
export LANG=en_US.UTF-8
{{ NOTIFIER_VENV_DIR }}/bin/python manage.py scheduler
# {{ ansible_managed }}
{% do notifier_env_vars.update(NOTIFIER_ENV_EXTRA) %}
{% for name,value in notifier_env_vars.items() %}
{% if value %}
export {{ name }}="{{ value }}"
{% endif %}
{% endfor %}
{% if NOTIFIER_THEME_NAME != "" %}
export DJANGO_SETTINGS_MODULE=notifier.settings_local
{% endif %}
from .settings import *
FORUM_DIGEST_EMAIL_SUBJECT = '{{ NOTIFIER_DIGEST_EMAIL_SUBJECT }}'
CUSTOM_THEME_DIR = '{{ NOTIFIER_CODE_DIR }}/{{ NOTIFIER_THEME_NAME }}/'
TEMPLATE_DIRS = (CUSTOM_THEME_DIR + 'templates',)
# vars for the ORA role
---
ORA_NGINX_PORT: 18060
ORA_GUNICORN_EXTRA: ""
ora_app_dir: "{{ COMMON_APP_DIR }}/ora"
ora_code_dir: "{{ ora_app_dir }}/ora"
......@@ -86,61 +87,61 @@ ora_gunicorn_host: 127.0.0.1
# appropriate for running all edX
# services on a single server.
ora_env_config:
LOGGING_ENV: $ORA_LOGGING_ENV
LOGGING_ENV: "{{ ORA_LOGGING_ENV }}"
LOG_DIR: "{{ COMMON_DATA_DIR }}/logs/xqueue"
COURSE_DATA_PATH: "{{ ora_data_course_dir }}"
REQUESTS_TIMEOUT: $ORA_REQUESTS_TIMEOUT
QUEUES_TO_PULL_FROM: $ORA_QUEUES_TO_PULL_FROM
TIME_BETWEEN_XQUEUE_PULLS: $ORA_TIME_BETWEEN_XQUEUE_PULLS
TIME_BETWEEN_EXPIRED_CHECKS: $ORA_TIME_BETWEEN_EXPIRED_CHECKS
GRADER_SETTINGS_DIRECTORY: $ORA_GRADER_SETTINGS_DIRECTORY
MAX_NUMBER_OF_TIMES_TO_RETRY_GRADING: $ORA_MAX_NUMBER_OF_TIMES_TO_RETRY_GRADING
MIN_TO_USE_ML: $ORA_MIN_TO_USE_ML
ML_PATH: $ORA_ML_PATH
ML_MODEL_PATH: $ORA_ML_MODEL_PATH
TIME_BETWEEN_ML_CREATOR_CHECKS: $ORA_TIME_BETWEEN_ML_CREATOR_CHECKS
TIME_BETWEEN_ML_GRADER_CHECKS: $ORA_TIME_BETWEEN_ML_GRADER_CHECKS
MIN_TO_USE_PEER: $ORA_MIN_TO_USE_PEER
PEER_GRADER_COUNT: $ORA_PEER_GRADER_COUNT
PEER_GRADER_MINIMUM_TO_CALIBRATE: $ORA_PEER_GRADER_MINIMUM_TO_CALIBRATE
PEER_GRADER_MAXIMUM_TO_CALIBRATE: $ORA_PEER_GRADER_MAXIMUM_TO_CALIBRATE
PEER_GRADER_MIN_NORMALIZED_CALIBRATION_ERROR: $ORA_PEER_GRADER_MIN_NORMALIZED_CALIBRATION_ERROR
EXPIRE_SUBMISSIONS_AFTER: $ORA_EXPIRE_SUBMISSIONS_AFTER
RESET_SUBMISSIONS_AFTER: $ORA_RESET_SUBMISSIONS_AFTER
LOCAL_LOGLEVEL: $ORA_LOCAL_LOGLEVEL
DEBUG: $ORA_DEBUG
REQUESTS_TIMEOUT: "{{ ORA_REQUESTS_TIMEOUT }}"
QUEUES_TO_PULL_FROM: "{{ ORA_QUEUES_TO_PULL_FROM }}"
TIME_BETWEEN_XQUEUE_PULLS: "{{ ORA_TIME_BETWEEN_XQUEUE_PULLS }}"
TIME_BETWEEN_EXPIRED_CHECKS: "{{ ORA_TIME_BETWEEN_EXPIRED_CHECKS }}"
GRADER_SETTINGS_DIRECTORY: "{{ ORA_GRADER_SETTINGS_DIRECTORY }}"
MAX_NUMBER_OF_TIMES_TO_RETRY_GRADING: "{{ ORA_MAX_NUMBER_OF_TIMES_TO_RETRY_GRADING }}"
MIN_TO_USE_ML: "{{ ORA_MIN_TO_USE_ML }}"
ML_PATH: "{{ ORA_ML_PATH }}"
ML_MODEL_PATH: "{{ ORA_ML_MODEL_PATH }}"
TIME_BETWEEN_ML_CREATOR_CHECKS: "{{ ORA_TIME_BETWEEN_ML_CREATOR_CHECKS }}"
TIME_BETWEEN_ML_GRADER_CHECKS: "{{ ORA_TIME_BETWEEN_ML_GRADER_CHECKS }}"
MIN_TO_USE_PEER: "{{ ORA_MIN_TO_USE_PEER }}"
PEER_GRADER_COUNT: "{{ ORA_PEER_GRADER_COUNT }}"
PEER_GRADER_MINIMUM_TO_CALIBRATE: "{{ ORA_PEER_GRADER_MINIMUM_TO_CALIBRATE }}"
PEER_GRADER_MAXIMUM_TO_CALIBRATE: "{{ ORA_PEER_GRADER_MAXIMUM_TO_CALIBRATE }}"
PEER_GRADER_MIN_NORMALIZED_CALIBRATION_ERROR: "{{ ORA_PEER_GRADER_MIN_NORMALIZED_CALIBRATION_ERROR }}"
EXPIRE_SUBMISSIONS_AFTER: "{{ ORA_EXPIRE_SUBMISSIONS_AFTER }}"
RESET_SUBMISSIONS_AFTER: "{{ ORA_RESET_SUBMISSIONS_AFTER }}"
LOCAL_LOGLEVEL: "{{ ORA_LOCAL_LOGLEVEL }}"
DEBUG: "{{ ORA_DEBUG }}"
SYSLOG_SERVER: ORA_SYSLOG_SERVER
USE_S3_TO_STORE_MODELS: ORA_USE_S3_TO_STORE_MODELS
S3_BUCKETNAME: $ORA_S3_BUCKETNAME
S3_BUCKETNAME: "{{ ORA_S3_BUCKETNAME }}"
ora_auth_config:
USERS: $ORA_USERS
USERS: "{{ ORA_USERS }}"
XQUEUE_INTERFACE:
django_auth:
username: $ORA_XQUEUE_DJANGO_USER
password: $ORA_XQUEUE_DJANGO_PASSWORD
basic_auth: [ $ORA_XQUEUE_BASIC_AUTH_USER, $ORA_XQUEUE_BASIC_AUTH_PASSWORD ]
url: $ORA_XQUEUE_URL
username: "{{ ORA_XQUEUE_DJANGO_USER }}"
password: "{{ ORA_XQUEUE_DJANGO_PASSWORD }}"
basic_auth: [ "{{ ORA_XQUEUE_BASIC_AUTH_USER }}", "{{ORA_XQUEUE_BASIC_AUTH_PASSWORD}}" ]
url: "{{ ORA_XQUEUE_URL }}"
GRADING_CONTROLLER_INTERFACE:
django_auth:
password: $ORA_DJANGO_PASSWORD
username: $ORA_DJANGO_USER
url: $ORA_URL
password: "{{ ORA_DJANGO_PASSWORD }}"
username: "{{ ORA_DJANGO_USER }}"
url: "{{ ORA_URL }}"
DATABASES:
default:
ENGINE: 'django.db.backends.mysql'
NAME: $ORA_MYSQL_DB_NAME
USER: $ORA_MYSQL_USER
PASSWORD: $ORA_MYSQL_PASSWORD
HOST: $ORA_MYSQL_HOST
PORT: $ORA_MYSQL_PORT
AWS_ACCESS_KEY_ID: $ORA_AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $ORA_AWS_SECRET_ACCESS_KEY
NAME: "{{ ORA_MYSQL_DB_NAME }}"
USER: "{{ ORA_MYSQL_USER }}"
PASSWORD: "{{ ORA_MYSQL_PASSWORD }}"
HOST: "{{ ORA_MYSQL_HOST }}"
PORT: "{{ ORA_MYSQL_PORT }}"
AWS_ACCESS_KEY_ID: "{{ ORA_AWS_ACCESS_KEY_ID }}"
AWS_SECRET_ACCESS_KEY: "{{ ORA_AWS_SECRET_ACCESS_KEY }}"
ora_environment:
SERVICE_VARIANT: ora
LANG: $ORA_LANG
PATH: $ora_deploy_path
LANG: "{{ ORA_LANG }}"
PATH: "{{ ora_deploy_path }}"
ora_debian_pkgs:
- python-software-properties
......
......@@ -22,11 +22,11 @@
- include: ease.yml
- name: create ora application config
template: src=ora.env.json.j2 dest={{ora_app_dir}}/ora.env.json
template: src=ora.env.json.j2 dest={{ ora_app_dir }}/ora.env.json
sudo_user: "{{ ora_user }}"
- name: create ora auth file
template: src=ora.auth.json.j2 dest={{ora_app_dir}}/ora.auth.json
template: src=ora.auth.json.j2 dest={{ ora_app_dir }}/ora.auth.json
sudo_user: "{{ ora_user }}"
- name: setup the ora env
......@@ -80,7 +80,7 @@
- restart ora_celery
- name: syncdb and migrate
shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py syncdb --migrate --noinput --settings=edx_ora.aws --pythonpath={{ora_code_dir}}
shell: SERVICE_VARIANT=ora {{ ora_venv_dir }}/bin/django-admin.py syncdb --migrate --noinput --settings=edx_ora.aws --pythonpath={{ ora_code_dir }}
when: migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ ora_user }}"
notify:
......@@ -88,7 +88,7 @@
- restart ora_celery
- name: create users
shell: SERVICE_VARIANT=ora {{ora_venv_dir}}/bin/django-admin.py update_users --settings=edx_ora.aws --pythonpath={{ora_code_dir}}
shell: SERVICE_VARIANT=ora {{ ora_venv_dir }}/bin/django-admin.py update_users --settings=edx_ora.aws --pythonpath={{ ora_code_dir }}
sudo_user: "{{ ora_user }}"
notify:
- restart ora
......
# Do A Checkout
- name: git checkout ease repo into its base dir
git: >
dest={{ora_ease_code_dir}} repo={{ora_ease_source_repo}} version={{ora_ease_version}}
dest={{ ora_ease_code_dir }} repo={{ ora_ease_source_repo }} version={{ora_ease_version}}
accept_hostkey=yes
sudo_user: "{{ ora_user }}"
notify:
......@@ -9,7 +9,7 @@
- restart ora_celery
- name: install ease system packages
apt: pkg={{item}} state=present
apt: pkg={{ item }} state=present
with_items: ora_ease_debian_pkgs
notify:
- restart ora
......@@ -19,7 +19,7 @@
# Install the python pre requirements into {{ ora_ease_venv_dir }}
- name: install ease python pre-requirements
pip: >
requirements="{{ora_ease_pre_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present
requirements="{{ ora_ease_pre_requirements_file }}" virtualenv="{{ ora_ease_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ ora_user }}"
notify:
......@@ -29,7 +29,7 @@
# Install the python post requirements into {{ ora_ease_venv_dir }}
- name: install ease python post-requirements
pip: >
requirements="{{ora_ease_post_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present
requirements="{{ ora_ease_post_requirements_file }}" virtualenv="{{ ora_ease_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ ora_user }}"
notify:
......
......@@ -35,14 +35,14 @@
- "{{ ora_app_dir }}/ml_models"
- name: install debian packages that ora needs
apt: pkg={{item}} state=present
apt: pkg={{ item }} state=present
notify:
- restart ora
- restart ora_celery
with_items: ora_debian_pkgs
- name: install debian packages for ease that ora needs
apt: pkg={{item}} state=present
apt: pkg={{ item }} state=present
notify:
- restart ora
- restart ora_celery
......
[program:ora]
command={{ ora_venv_bin }}/gunicorn --preload -b {{ ora_gunicorn_host }}:{{ ora_gunicorn_port }} -w {{ ora_gunicorn_workers }} --timeout=90 --pythonpath={{ ora_code_dir}} edx_ora.wsgi
command={{ ora_venv_bin }}/gunicorn --preload -b {{ ora_gunicorn_host }}:{{ ora_gunicorn_port }} -w {{ ora_gunicorn_workers }} --timeout=90 --pythonpath={{ ora_code_dir}} {{ ORA_GUNICORN_EXTRA }} edx_ora.wsgi
user={{ common_web_user }}
directory={{ ora_code_dir }}
......
......@@ -15,9 +15,9 @@
- name: download Oracle Java
shell: >
curl -b gpw_e24=http%3A%2F%2Fwww.oracle.com -b oraclelicense=accept-securebackup-cookie -O -L {{ oraclejdk_url }}
executable=/bin/bash
chdir=/var/tmp
creates=/var/tmp/{{ oraclejdk_file }}
executable=/bin/bash
chdir=/var/tmp
creates=/var/tmp/{{ oraclejdk_file }}
- name: create jvm dir
file: >
......
export JAVA_HOME="{{oraclejdk_link}}"
export JAVA_HOME="{{ oraclejdk_link }}"
export PATH=$JAVA_HOME/bin:$PATH
......@@ -43,9 +43,9 @@ rabbitmq_debian_pkgs:
rabbitmq_config_dir: "/etc/rabbitmq"
rabbitmq_cookie_dir: "/var/lib/rabbitmq"
rabbitmq_cookie_location: "{{rabbitmq_cookie_dir}}/.erlang.cookie"
rabbitmq_cookie_location: "{{ rabbitmq_cookie_dir }}/.erlang.cookie"
rabbitmq_mnesia_folder: "{{rabbitmq_cookie_dir}}/mnesia"
rabbitmq_mnesia_folder: "{{ rabbitmq_cookie_dir }}/mnesia"
rabbitmq_port: 5672
rabbitmq_management_port: 15672
......@@ -53,8 +53,8 @@ rabbitmq_ip: "{{ ansible_default_ipv4.address }}"
# Structure for auth config file.
rabbitmq_auth_config:
erlang_cookie: $RABBIT_ERLANG_COOKIE
admins: $RABBIT_USERS
erlang_cookie: "{{ RABBIT_ERLANG_COOKIE }}"
admins: "{{ RABBIT_USERS }}"
rabbitmq_clustered_hosts: []
......
......@@ -4,13 +4,13 @@
# http://rabbitmq.1065348.n5.nabble.com/Rabbitmq-boot-failure-with-quot-tables-not-present-quot-td24494.html
- name: trust rabbit repository
apt_key: url={{rabbitmq_apt_key}} state=present
apt_key: url={{ rabbitmq_apt_key }} state=present
- name: install python-software-properties if debian
apt: pkg={{",".join(rabbitmq_debian_pkgs)}} state=present
apt: pkg={{ ",".join(rabbitmq_debian_pkgs) }} state=present
- name: add rabbit repository
apt_repository_1.8: repo="{{rabbitmq_repository}}" state=present update_cache=yes validate_certs=no
apt_repository_1.8: repo="{{ rabbitmq_repository }}" state=present update_cache=yes validate_certs=no
- name: fetch the rabbitmq server deb
get_url: >
......@@ -63,30 +63,30 @@
# Defaulting to /var/lib/rabbitmq
- name: create cookie directory
file: >
path={{rabbitmq_cookie_dir}}
path={{ rabbitmq_cookie_dir }}
owner=rabbitmq group=rabbitmq mode=0755 state=directory
- name: add rabbitmq erlang cookie
template: >
src=erlang.cookie.j2 dest={{rabbitmq_cookie_location}}
src=erlang.cookie.j2 dest={{ rabbitmq_cookie_location }}
owner=rabbitmq group=rabbitmq mode=0400
register: erlang_cookie
# Defaulting to /etc/rabbitmq
- name: create rabbitmq config directory
file: >
path={{rabbitmq_config_dir}}
path={{ rabbitmq_config_dir }}
owner=root group=root mode=0755 state=directory
- name: add rabbitmq environment configuration
template: >
src=rabbitmq-env.conf.j2 dest={{rabbitmq_config_dir}}/rabbitmq-env.conf
src=rabbitmq-env.conf.j2 dest={{ rabbitmq_config_dir }}/rabbitmq-env.conf
owner=root group=root mode=0644
- name: add rabbitmq cluster configuration
template: >
src=etc/rabbitmq/rabbitmq.config.j2
dest={{rabbitmq_config_dir}}/rabbitmq.config
dest={{ rabbitmq_config_dir }}/rabbitmq.config
owner=root group=root mode=0644
register: cluster_configuration
......@@ -98,7 +98,7 @@
# This folder should be deleted before clustering is setup because it retains data
# that can conflict with the clustering information.
- name: remove mnesia configuration
file: path={{rabbitmq_mnesia_folder}} state=absent
file: path={{ rabbitmq_mnesia_folder }} state=absent
when: erlang_cookie.changed or cluster_configuration.changed or rabbitmq_refresh
- name: start rabbit nodes
......@@ -124,7 +124,7 @@
configure_priv='.*' tags="administrator" state=present
vhost={{ item[1] }}
with_nested:
- ${rabbitmq_auth_config.admins}
- "{{rabbitmq_auth_config.admins}}"
- RABBITMQ_VHOSTS
when: "'admins' in rabbitmq_auth_config"
tags:
......
% {{ ansible_managed }}
[{rabbit, [
{log_levels, [{connection, info}]},
{% if RABBITMQ_CLUSTERED -%}
{%- set hosts= [] -%}
......@@ -7,14 +9,12 @@
{% do hosts.append("rabbit@ip-" + host.replace('.','-')) %}
{%- endfor %}
[{rabbit,
[{cluster_nodes, {['{{ hosts|join("\',\'") }}'], disc}}]}].
{cluster_nodes, {['{{ hosts|join("\',\'") }}'], disc}}
{%- else -%}
{# If rabbitmq_clustered_hosts is set, use that instead assuming an aws stack.
Note: That these names should include the node name prefix. eg. 'rabbit@hostname'
#}
[{rabbit,
[{cluster_nodes, {['{{ rabbitmq_clustered_hosts|join("\',\'") }}'], disc}}]}].
{%- endif -%}
{cluster_nodes, {['{{ rabbitmq_clustered_hosts|join("\',\'") }}'], disc}}
{%- endif %}
]}].
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role security
#
#
# vars are namespace with the module name.
#
security_role_name: security
# set to true to enable unattended upgrades nightly
SECURITY_UNATTENDED_UPGRADES: false
# set to true to upgrade all packages nightly. false will only upgrade from security repo.
SECURITY_UPDATE_ALL_PACKAGES: false
# set to true to run aptitute safe-upgrade whenever ansible is run
SECURITY_UPGRADE_ON_ANSIBLE: false
#
# OS packages
#
security_debian_pkgs:
- aptitude
- unattended-upgrades
security_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role security
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- include: security-ubuntu.yml
when:
- ansible_distribution == 'Ubuntu'
#### Enable periodic security updates
- name: install security packages
apt: name={{ item }} state=latest
with_items: security_debian_pkgs
- name: update all system packages
apt: upgrade=safe
when: SECURITY_UPGRADE_ON_ANSIBLE
- name: configure periodic unattended-upgrades
template: >
src=etc/apt/apt.conf.d/10periodic
dest=/etc/apt/apt.conf.d/10periodic
owner=root group=root mode=0644
when: SECURITY_UNATTENDED_UPGRADES
- name: disable unattended-upgrades
file: path=/etc/apt/apt.conf.d/10periodic state=absent
when: not SECURITY_UNATTENDED_UPGRADES
- name: only unattended-upgrade from security repo
template: >
src=etc/apt/apt.conf.d/20unattended-upgrade
dest=/etc/apt/apt.conf.d/20unattended-upgrade
owner=root group=root mode=0644
when: SECURITY_UNATTENDED_UPGRADES and not SECURITY_UPDATE_ALL_PACKAGES
- name: disable security only updates on unattended-upgrades
file: path=/etc/apt/apt.conf.d/20unattended-upgrade state=absent
when: SECURITY_UPDATE_ALL_PACKAGES or not SECURITY_UNATTENDED_UPGRADES
#### Bash security vulnerability
- name: Check if we are vulnerable
shell: executable=/bin/bash chdir=/tmp foo='() { echo vulnerable; }' bash -c foo
register: test_vuln
ignore_errors: yes
- name: Apply bash security update if we are vulnerable
apt: name=bash state=latest update_cache=true
when: "'vulnerable' in test_vuln.stdout"
- name: Check again and fail if we are still vulnerable
shell: executable=/bin/bash foo='() { echo vulnerable; }' bash -c foo
when: "'vulnerable' in test_vuln.stdout"
register: test_vuln
failed_when: "'vulnerable' in test_vuln.stdout"
APT::Periodic::Enable "1";
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Download-Upgradeable-Packages "1";
APT::Periodic::AutocleanInterval "7";
APT::Periodic::Unattended-Upgrade "1";
Unattended-Upgrade::Allowed-Origins {
"${distro_id} ${distro_codename}-security";
};
......@@ -2,7 +2,7 @@
---
- name: Installs shib and dependencies from apt
apt: pkg={{item}} install_recommends=no state=present update_cache=yes
apt: pkg={{ item }} install_recommends=no state=present update_cache=yes
with_items:
- shibboleth-sp2-schemas
- libshibsp-dev
......@@ -24,14 +24,14 @@
when: shib_download_metadata
- name: writes out key and pem file
template: src=sp.{{item}}.j2 dest=/etc/shibboleth/sp.{{item}} group=_shibd owner=_shibd mode=0600
template: src=sp.{{ item }}.j2 dest=/etc/shibboleth/sp.{{ item }} group=_shibd owner=_shibd mode=0600
with_items:
- key
- pem
notify: restart shibd
- name: writes out configuration files
template: src={{ shib_template_dir }}/{{item}}.j2 dest=/etc/shibboleth/{{item}} group=_shibd owner=_shibd mode=0644
template: src={{ shib_template_dir }}/{{ item }}.j2 dest=/etc/shibboleth/{{ item }} group=_shibd owner=_shibd mode=0644
with_items:
- attribute-map.xml
- shibboleth2.xml
......
......@@ -28,23 +28,23 @@ SPLUNKFORWARDER_SERVERS:
SPLUNKFORWARDER_LOG_ITEMS:
- source: '{{ COMMON_LOG_DIR }}/lms'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}'
sourcetype: 'edx'
- source: '{{ COMMON_LOG_DIR }}/cms'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}'
sourcetype: 'edx'
- source: '{{ COMMON_LOG_DIR }}'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}'
sourcetype: 'syslog'
- source: '/var/log'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}'
sourcetype: 'syslog'
- source: '{{ COMMON_LOG_DIR }}/nginx'
recursive: true
index: '{{COMMON_ENVIRONMENT}}-{{COMMON_DEPLOYMENT}}'
index: '{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}'
sourcetype: 'nginx'
#
......
......@@ -31,12 +31,12 @@
- name: download the splunk deb
get_url: >
dest="/tmp/{{SPLUNKFORWARDER_DEB}}"
url="{{SPLUNKFORWARDER_PACKAGE_URL}}"
dest="/tmp/{{ SPLUNKFORWARDER_DEB }}"
url="{{ SPLUNKFORWARDER_PACKAGE_URL }}"
register: download_deb
- name: install splunk forwarder
shell: gdebi -nq /tmp/{{SPLUNKFORWARDER_DEB}}
shell: gdebi -nq /tmp/{{ SPLUNKFORWARDER_DEB }}
when: download_deb.changed
# Create splunk user
......@@ -49,27 +49,27 @@
# to run some of the below commands.
- name: start splunk manually
shell: >
{{splunkforwarder_output_dir}}/bin/splunk start --accept-license --answer-yes --no-prompt
creates={{splunkforwarder_output_dir}}/var/lib/splunk
{{ splunkforwarder_output_dir }}/bin/splunk start --accept-license --answer-yes --no-prompt
creates={{ splunkforwarder_output_dir }}/var/lib/splunk
when: download_deb.changed
register: started_manually
- name: stop splunk manually
shell: >
{{splunkforwarder_output_dir}}/bin/splunk stop --accept-license --answer-yes --no-prompt
{{ splunkforwarder_output_dir }}/bin/splunk stop --accept-license --answer-yes --no-prompt
when: download_deb.changed and started_manually.changed
- name: create boot script
shell: >
{{splunkforwarder_output_dir}}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt
creates=/etc/init.d/splunk
{{ splunkforwarder_output_dir }}/bin/splunk enable boot-start -user splunk --accept-license --answer-yes --no-prompt
creates=/etc/init.d/splunk
register: create_boot_script
when: download_deb.changed
notify: restart splunkforwarder
# Update credentials
- name: update admin pasword
shell: "{{splunkforwarder_output_dir}}/bin/splunk edit user admin -password {{SPLUNKFORWARDER_PASSWORD}} -auth admin:changeme --accept-license --answer-yes --no-prompt"
shell: "{{ splunkforwarder_output_dir }}/bin/splunk edit user admin -password {{ SPLUNKFORWARDER_PASSWORD }} -auth admin:changeme --accept-license --answer-yes --no-prompt"
when: download_deb.changed
notify: restart splunkforwarder
......@@ -80,7 +80,7 @@
# Ensure permissions on splunk content
- name: ensure splunk forder permissions
file: path={{splunkforwarder_output_dir}} state=directory recurse=yes owner=splunk group=splunk
file: path={{ splunkforwarder_output_dir }} state=directory recurse=yes owner=splunk group=splunk
when: download_deb.changed
notify: restart splunkforwarder
......
......@@ -96,6 +96,16 @@ if __name__ == '__main__':
instance_id = get_instance_metadata()['instance-id']
prefix = instance_id
ec2 = boto.connect_ec2()
reservations = ec2.get_all_instances(instance_ids=[instance_id])
instance = reservations[0].instances[0]
if instance.instance_profile['arn'].endswith('/abbey'):
print("Running an abbey build. Not starting any services.")
# Needs to exit with 1 instead of 0 to prevent
# services from starting.
exit(1)
try:
environment, deployment, play = edp_for_instance(instance_id)
prefix = "{environment}-{deployment}-{play}-{instance_id}".format(
......@@ -103,6 +113,10 @@ if __name__ == '__main__':
deployment=deployment,
play=play,
instance_id=instance_id)
except:
print("Failed to get EDP for {}".format(instance_id))
try:
for service in services_for_instance(instance_id):
if service in MIGRATION_COMMANDS:
# Do extra migration related stuff.
......@@ -144,6 +158,7 @@ if __name__ == '__main__':
print(msg)
if notify:
notify(msg)
traceback.print_exc()
else:
msg = "{}: {}".format(prefix, " | ".join(report))
print(msg)
......
......@@ -97,13 +97,13 @@
- name: install supervisor in its venv
pip: >
name=supervisor virtualenv="{{supervisor_venv_dir}}" state=present
name=supervisor virtualenv="{{ supervisor_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ supervisor_user }}"
- name: install supervisor in its venv
pip: >
name={{ item }} virtualenv="{{supervisor_venv_dir}}" state=present
name={{ item }} virtualenv="{{ supervisor_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ supervisor_user }}"
with_items: supervisor_pip_pkgs
......@@ -156,7 +156,7 @@
- name: start supervisor
service: >
name={{supervisor_service}}
name={{ supervisor_service }}
state=started
register: start_supervisor
......
......@@ -5,4 +5,4 @@ task
setuid {{ supervisor_user }}
exec {{ supervisor_venv_dir }}/bin/python {{ supervisor_app_dir }}/pre_supervisor_checks.py --available={{supervisor_available_dir}} --enabled={{supervisor_cfg_dir}} {% if SUPERVISOR_HIPCHAT_API_KEY is defined %}--hipchat-api-key {{ SUPERVISOR_HIPCHAT_API_KEY }} --hipchat-room {{ SUPERVISOR_HIPCHAT_ROOM }} {% endif %} {% if edxapp_code_dir is defined %}--edxapp-python {{ COMMON_BIN_DIR }}/python.edxapp --edxapp-code-dir {{ edxapp_code_dir }}{% endif %} {% if xqueue_code_dir is defined %}--xqueue-code-dir {{ xqueue_code_dir }} --xqueue-python {{ COMMON_BIN_DIR }}/python.xqueue {% endif %}
exec {{ supervisor_venv_dir }}/bin/python {{ supervisor_app_dir }}/pre_supervisor_checks.py --available={{ supervisor_available_dir }} --enabled={{ supervisor_cfg_dir }} {% if SUPERVISOR_HIPCHAT_API_KEY is defined %}--hipchat-api-key {{ SUPERVISOR_HIPCHAT_API_KEY }} --hipchat-room {{ SUPERVISOR_HIPCHAT_ROOM }} {% endif %} {% if edxapp_code_dir is defined %}--edxapp-python {{ COMMON_BIN_DIR }}/python.edxapp --edxapp-code-dir {{ edxapp_code_dir }}{% endif %} {% if xqueue_code_dir is defined %}--xqueue-code-dir {{ xqueue_code_dir }} --xqueue-python {{ COMMON_BIN_DIR }}/python.xqueue {% endif %}
......@@ -35,11 +35,14 @@ paver test_system -t cms/djangoapps/course_creators/tests/test_views.py
paver test_js_run -s xmodule
# Run some of the bok-choy tests
paver test_bokchoy -t test_lms.py:RegistrationTest
paver test_bokchoy -t lms/test_lms.py:RegistrationTest
paver test_bokchoy -t discussion/test_discussion.py:DiscussionTabSingleThreadTest --fasttest
paver test_bokchoy -t studio/test_studio_with_ora_component.py:ORAComponentTest --fasttest
paver test_bokchoy -t lms/test_lms_matlab_problem.py:MatlabProblemTest --fasttest
# Run some of the lettuce acceptance tests
# paver test_acceptance -s lms --extra_args="lms/djangoapps/courseware/features/problems.feature"
# paver test_acceptance -s cms --extra_args="cms/djangoapps/contentstore/features/html-editor.feature"
paver test_acceptance -s lms --extra_args="lms/djangoapps/courseware/features/problems.feature -s 1"
paver test_acceptance -s cms --extra_args="cms/djangoapps/contentstore/features/html-editor.feature -s 1"
# Generate quality reports
paver run_quality
......@@ -15,7 +15,7 @@
- name: Untar the test courses
command: >
tar zxf {{ item.path|basename }}
chdir=/var/tmp/{{ item.path|basename }}
chdir=/var/tmp/{{ item.path|basename }}
with_items: TESTCOURSES_EXPORTS
sudo_user: "{{ common_web_user }}"
......
......@@ -111,9 +111,9 @@
# authorized_keys2 used here so that personal
# keys can be copied to authorized_keys
# force is set to yes here, otherwise the keys
# won't update if they haven't changed on the github
# side
# 2014/10/14 - using curl instead of get_url because
# get_url was failing due to certificate verification errors
- name: copy github key[s] to .ssh/authorized_keys2
shell: >
curl https://github.com/{{ item.name }}.keys -o /home/{{ item.name }}/.ssh/authorized_keys2
......
......@@ -2,6 +2,8 @@
# when the role is included
---
XQUEUE_NGINX_PORT: 18040
XQUEUE_GUNICORN_WORKERS_EXTRA: ""
XQUEUE_GUNICORN_EXTRA: ""
XQUEUE_QUEUES:
# push queue
'edX-Open_DemoX': 'http://localhost:18050'
......@@ -9,6 +11,7 @@ XQUEUE_QUEUES:
'test-pull': !!null
'certificates': !!null
'open-ended': !!null
'open-ended-message': !!null
XQUEUE_LOGGING_ENV: sandbox
XQUEUE_SYSLOG_SERVER: 'localhost'
XQUEUE_S3_BUCKET : 'sandbox-bucket'
......@@ -53,33 +56,33 @@ xqueue_gunicorn_port: 8040
xqueue_gunicorn_host: 127.0.0.1
xqueue_env_config:
XQUEUES: $XQUEUE_QUEUES
XQUEUES: "{{ XQUEUE_QUEUES }}"
XQUEUE_WORKERS_PER_QUEUE: $XQUEUE_WORKERS_PER_QUEUE
LOGGING_ENV : $XQUEUE_LOGGING_ENV
SYSLOG_SERVER: $XQUEUE_SYSLOG_SERVER
LOG_DIR : "{{ COMMON_DATA_DIR }}/logs/xqueue"
RABBIT_HOST : $XQUEUE_RABBITMQ_HOSTNAME
S3_BUCKET : $XQUEUE_S3_BUCKET
S3_PATH_PREFIX: $XQUEUE_S3_PATH_PREFIX
LOCAL_LOGLEVEL: $XQUEUE_LOCAL_LOGLEVEL
LOGGING_ENV: "{{ XQUEUE_LOGGING_ENV }}"
SYSLOG_SERVER: "{{ XQUEUE_SYSLOG_SERVER }}"
LOG_DIR: "{{ COMMON_DATA_DIR }}/logs/xqueue"
RABBIT_HOST: "{{ XQUEUE_RABBITMQ_HOSTNAME }}"
S3_BUCKET: "{{ XQUEUE_S3_BUCKET }}"
S3_PATH_PREFIX: "{{ XQUEUE_S3_PATH_PREFIX }}"
LOCAL_LOGLEVEL: "{{ XQUEUE_LOCAL_LOGLEVEL }}"
xqueue_auth_config:
AWS_ACCESS_KEY_ID: $XQUEUE_AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $XQUEUE_AWS_SECRET_ACCESS_KEY
REQUESTS_BASIC_AUTH: [$XQUEUE_BASIC_AUTH_USER, $XQUEUE_BASIC_AUTH_PASSWORD]
USERS: $XQUEUE_DJANGO_USERS
AWS_ACCESS_KEY_ID: "{{ XQUEUE_AWS_ACCESS_KEY_ID }}"
AWS_SECRET_ACCESS_KEY: "{{ XQUEUE_AWS_SECRET_ACCESS_KEY }}"
REQUESTS_BASIC_AUTH: ["{{ XQUEUE_BASIC_AUTH_USER }}", "{{XQUEUE_BASIC_AUTH_PASSWORD}}"]
USERS: "{{ XQUEUE_DJANGO_USERS }}"
DATABASES:
default:
ENGINE: "django.db.backends.mysql"
NAME: $XQUEUE_MYSQL_DB_NAME
USER: $XQUEUE_MYSQL_USER
PASSWORD: $XQUEUE_MYSQL_PASSWORD
HOST: $XQUEUE_MYSQL_HOST
PORT: $XQUEUE_MYSQL_PORT
RABBITMQ_USER: $XQUEUE_RABBITMQ_USER
RABBITMQ_PASS: $XQUEUE_RABBITMQ_PASS
NAME: "{{ XQUEUE_MYSQL_DB_NAME }}"
USER: "{{ XQUEUE_MYSQL_USER }}"
PASSWORD: "{{ XQUEUE_MYSQL_PASSWORD }}"
HOST: "{{ XQUEUE_MYSQL_HOST }}"
PORT: "{{ XQUEUE_MYSQL_PORT }}"
RABBITMQ_USER: "{{ XQUEUE_RABBITMQ_USER }}"
RABBITMQ_PASS: "{{ XQUEUE_RABBITMQ_PASS }}"
xqueue_source_repo: https://github.com/edx/xqueue.git
xqueue_source_repo: "https://github.com/edx/xqueue.git"
xqueue_version: 'HEAD'
xqueue_pre_requirements_file: "{{ xqueue_code_dir }}/pre-requirements.txt"
xqueue_post_requirements_file: "{{ xqueue_code_dir }}/requirements.txt"
......
......@@ -41,7 +41,7 @@
- name : install python pre-requirements
pip: >
requirements="{{ xqueue_pre_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w"
sudo_user: "{{ xqueue_user }}"
notify:
- restart xqueue
......@@ -50,7 +50,7 @@
- name : install python post-requirements
pip: >
requirements="{{ xqueue_post_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w"
sudo_user: "{{ xqueue_user }}"
notify:
- restart xqueue
......
......@@ -2,7 +2,7 @@
# - group_vars/all
# - common/tasks/main.yml
---
# Check out xqueue repo to {{xqueue_code_dir}}
# Check out xqueue repo to {{ xqueue_code_dir }}
#
#
......
......@@ -7,12 +7,12 @@
{% endif %}
{% if XQUEUE_WORKERS -%}
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ XQUEUE_WORKERS }} --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ XQUEUE_WORKERS }} --timeout=300 --pythonpath={{ xqueue_code_dir }} {{ XQUEUE_GUNICORN_WORKERS_EXTRA }} xqueue.wsgi
{% else -%}
{% if ansible_processor|length > 0 %}
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ ansible_processor|length * 2 }} --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w {{ ansible_processor|length * 2 }} --timeout=300 --pythonpath={{ xqueue_code_dir }} {{ XQUEUE_GUNICORN_EXTRA }} xqueue.wsgi
{% else -%}
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w 2 --timeout=300 --pythonpath={{ xqueue_code_dir }} xqueue.wsgi
command={{ executable }} --preload -b {{ xqueue_gunicorn_host }}:{{ xqueue_gunicorn_port }} -w 2 --timeout=300 --pythonpath={{ xqueue_code_dir }} {{ XQUEUE_GUNICORN_EXTRA }} xqueue.wsgi
{% endif -%}
{% endif -%}
......
[program:xqueue_consumer]
command={{xqueue_venv_bin}}/django-admin.py run_consumer --pythonpath={{xqueue_code_dir}} --settings=xqueue.aws_settings $WORKERS_PER_QUEUE
command={{ xqueue_venv_bin }}/django-admin.py run_consumer --pythonpath={{ xqueue_code_dir }} --settings=xqueue.aws_settings $WORKERS_PER_QUEUE
user={{ common_web_user }}
directory={{ xqueue_code_dir }}
......
......@@ -3,6 +3,7 @@
XSERVER_NGINX_PORT: 18050
XSERVER_GUNICORN_EXTRA: ""
XSERVER_RUN_URL: ''
XSERVER_LOGGING_ENV: 'sandbox'
XSERVER_SYSLOG_SERVER: ''
......@@ -31,11 +32,11 @@ xserver_grader_root: "{{ XSERVER_GRADER_DIR }}/graders"
xserver_git_identity: "{{ xserver_app_dir }}/xserver-identity"
xserver_env_config:
RUN_URL: $XSERVER_RUN_URL
GRADER_ROOT: $xserver_grader_root
LOGGING_ENV: $XSERVER_LOGGING_ENV
RUN_URL: "{{ XSERVER_RUN_URL }}"
GRADER_ROOT: "{{ xserver_grader_root }}"
LOGGING_ENV: "{{ XSERVER_LOGGING_ENV }}"
LOG_DIR: "{{ xserver_log_dir }}"
SYSLOG_SERVER: $XSERVER_SYSLOG_SERVER
SYSLOG_SERVER: "{{ XSERVER_SYSLOG_SERVER }}"
SANDBOX_PYTHON: '{{ xserver_venv_sandbox_dir }}/bin/python'
xserver_source_repo: "git://github.com/edx/xserver.git"
......
......@@ -13,7 +13,7 @@
- name: checkout code
git: >
dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}}
dest={{ xserver_code_dir }} repo={{ xserver_source_repo }} version={{xserver_version}}
accept_hostkey=yes
sudo_user: "{{ xserver_user }}"
register: xserver_checkout
......@@ -21,14 +21,14 @@
- name: install requirements
pip: >
requirements="{{xserver_requirements_file}}" virtualenv="{{ xserver_venv_dir }}" state=present
requirements="{{ xserver_requirements_file }}" virtualenv="{{ xserver_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ xserver_user }}"
notify: restart xserver
- name: install sandbox requirements
pip: >
requirements="{{xserver_requirements_file}}" virtualenv="{{xserver_venv_sandbox_dir}}" state=present
requirements="{{ xserver_requirements_file }}" virtualenv="{{ xserver_venv_sandbox_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ xserver_user }}"
notify: restart xserver
......
www-data ALL=({{ xserver_sandbox_user }}) NOPASSWD:{{xserver_venv_sandbox_dir}}/bin/python
www-data ALL=({{ xserver_sandbox_user }}) NOPASSWD:{{ xserver_venv_sandbox_dir }}/bin/python
[program:xserver]
command={{ xserver_venv_bin }}/gunicorn --preload -b {{ xserver_gunicorn_host }}:{{ xserver_gunicorn_port }} -w {{ xserver_gunicorn_workers }} --timeout=30 --pythonpath={{ xserver_code_dir }} pyxserver_wsgi:application
command={{ xserver_venv_bin }}/gunicorn --preload -b {{ xserver_gunicorn_host }}:{{ xserver_gunicorn_port }} -w {{ xserver_gunicorn_workers }} --timeout=30 --pythonpath={{ xserver_code_dir }} {{ XSERVER_GUNICORN_EXTRA }} pyxserver_wsgi:application
user={{ common_web_user }}
directory={{ xserver_code_dir }}
......
- name: Apply security role
hosts: all
sudo: yes
roles:
- security
- name: Configure group cluster
hosts: all
sudo: True
gather_facts: True
vars:
vagrant_cluster: yes
mongo_cluster_members:
- "cluster1"
- "cluster2"
- "cluster3"
MONGO_CLUSTERED: yes
MONGO_CLUSTER_KEY: 'password'
mongo_create_users: no
ELASTICSEARCH_CLUSTERED: yes
MARIADB_CLUSTERED: yes
MARIADB_CREATE_DBS: no
vars_files:
- "group_vars/all"
roles:
- user
- mongo
- oraclejdk
- elasticsearch
- mariadb
- edx_ansible
# Rabbit needs to be built serially
- name: Configure group cluster serial roles
hosts: all
sudo: True
serial: 1
gather_facts: True
vars:
rabbitmq_clustered_hosts:
- "rabbit@cluster1"
- "rabbit@cluster2"
- "rabbit@cluster3"
rabbitmq_ip: ""
vars_files:
- "group_vars/all"
roles:
- rabbitmq
# Mongo user doesn't handle slave's gracefully when
# creating users and there are race conditions
# in MariaDB occasionally so this play will work
# but will also show as failed
- name: Configure group with tasks that will always fail
hosts: all
sudo: True
gather_facts: True
vars:
mongo_cluster_members:
- "cluster1"
- "cluster2"
- "cluster3"
MONGO_CLUSTERED: yes
MONGO_CLUSTER_KEY: 'password'
mongo_create_users: yes
RABBITMQ_CLUSTERED: yes
MARIADB_CLUSTERED: yes
MARIADB_CREATE_DBS: yes
vars_files:
- "group_vars/all"
- "roles/analytics-api/defaults/main.yml"
roles:
- mongo
- mariadb
......@@ -7,7 +7,6 @@
openid_workaround: true
devstack: true
disable_edx_services: true
edx_platform_version: 'master'
mongo_enable_journal: false
EDXAPP_NO_PREREQ_INSTALL: 0
COMMON_MOTD_TEMPLATE: 'devstack_motd.tail.j2'
......
......@@ -5,11 +5,16 @@
vars:
migrate_db: 'yes'
openid_workaround: true
edx_platform_version: 'master'
EDXAPP_LMS_NGINX_PORT: '80'
EDX_ANSIBLE_DUMP_VARS: true
CERTS_DOWNLOAD_URL: 'http://192.168.33.10:18090'
CERTS_VERIFY_URL: 'http://192.168.33.10:18090'
# used for releases
edx_platform_version: '{{ OPENEDX_RELEASE | default("master") }}'
ora2_version: '{{ OPENEDX_RELEASE | default("master") }}'
certs_version: '{{ OPENEDX_RELEASE | default("master") }}'
forum_version: '{{ OPENEDX_RELEASE | default("master") }}'
xqueue_version: '{{ OPENEDX_RELEASE | default("master") }}'
vars_files:
- "group_vars/all"
roles:
......@@ -19,7 +24,6 @@
nginx_sites:
- cms
- lms
- ora
- forum
- xqueue
- certs
......@@ -36,7 +40,6 @@
- elasticsearch
- forum
- { role: "xqueue", update_users: True }
- ora
- certs
- role: analytics-api
when: ANALYTICS_API_GIT_IDENTITY
......
ansible==1.5.5
PyYAML==3.11
Jinja2==2.7.2
Jinja2==2.7.3
MarkupSafe==0.23
argparse==1.2.1
boto==2.29.1
ecdsa==0.11
paramiko==1.14.0
paramiko==1.15.1
pycrypto==2.6.1
wsgiref==0.1.2
docopt==0.6.1
python-simple-hipchat==0.2
prettytable==0.7.2
awscli==1.4.2
......@@ -174,6 +174,7 @@ EDXAPP_NEWRELIC_LMS_APPNAME: sandbox-${dns_name}-edxapp-lms
EDXAPP_NEWRELIC_CMS_APPNAME: sandbox-${dns_name}-edxapp-cms
XQUEUE_NEWRELIC_APPNAME: sandbox-${dns_name}-xqueue
FORUM_NEW_RELIC_APP_NAME: sandbox-${dns_name}-forums
SANDBOX_USERNAME: $github_username
EOF
fi
......
#!/bin/bash
cd configuration
pip install -r requirements.txt
env
ansible="ansible first_in_tag_Name_${environment}-${deployment}-worker -i playbooks/ec2.py -u ubuntu -s -U www-data -a"
manage="/edx/bin/python.edxapp /edx/bin/manage.edxapp lms --settings aws cert_whitelist"
echo "$username" > /tmp/username.txt
if [ "$addremove" = "add" ]; then
for x in $(cat /tmp/username.txt); do
echo "Adding $x"
$ansible "$manage --add $x -c $course_id"
done
elif [ "$addremove" = "remove" ]; then
for x in $(cat /tmp/username.txt); do
echo "Removing $x"
$ansible "$manage --del $x -c $course_id"
done
fi
rm /tmp/username.txt
cd configuration
pip install -r requirements.txt
env
ansible="ansible first_in_tag_Name_${environment}-${deployment}-worker -i playbooks/ec2.py -u ubuntu -s -U www-data -a"
manage="/edx/bin/python.edxapp /edx/bin/manage.edxapp lms change_enrollment --settings aws"
if [ "$noop" = true ]; then
$ansible "$manage --noop --course $course --to $to --from $from"
else
$ansible "$manage --course $course --to $to --from $from"
fi
cd configuration
pip install -r requirements.txt
env
ansible="ansible first_in_tag_Name_${environment}-${deployment}-worker -i playbooks/ec2.py -u ubuntu -s -U www-data -a"
manage="/edx/bin/python.edxapp /edx/bin/manage.edxapp lms change_enrollment --settings aws"
if [ "$noop" = true ]; then
$ansible "$manage --noop --course $course --user $name --to $to --from $from"
else
$ansible "$manage --course $course --user $name --to $to --from $from"
fi
#!/usr/bin/env bash
set -x
if [[
-z $WORKSPACE ||
-z $environment ||
-z $deployment
]]; then
if [[ -z $WORKSPACE ]]; then
echo "Environment incorrect for this wrapper script"
env
exit 1
......@@ -13,7 +9,7 @@ fi
env
cd $WORKSPACE/edx-platform
cd "$WORKSPACE/edx-platform"
# install requirements
# These requirements will be installed into the shinginpanda
......@@ -27,23 +23,43 @@ pip install --exists-action w -r requirements/edx/repo.txt
pip install --exists-action w -r requirements/edx/github.txt
pip install --exists-action w -r requirements/edx/local.txt
cd $WORKSPACE/configuration/playbooks/edx-east
if [[ $openid_workaround == "true" ]]; then
sed -i -e 's/claimed_id = models.TextField(max_length=2047, unique=True/claimed_id = models.TextField(max_length=2047/' "$VIRTUAL_ENV/lib/python2.7/site-packages/django_openid_auth/models.py"
fi
cd "$WORKSPACE/configuration/playbooks/edx-east"
if [[ -f ${WORKSPACE}/configuration-secure/ansible/vars/${deployment}.yml ]]; then
extra_var_args+=" -e@${WORKSPACE}/configuration-secure/ansible/vars/${deployment}.yml"
fi
if [[ $db_dry_run=="false" ]]; then
# Set this to an empty string if db_dry_run is
if [[ -z $syncdb ]]; then
syncdb="false"
fi
if [[ $db_dry_run == "false" ]]; then
# Set this to an empty string if db_dry_run is
# not set. By default the db_dry_run var is
# set to --db-dry-run
extra_var_args+=" -e db_dry_run=''"
else
# always skip syncdb unless dry run is unchecked
syncdb="false"
fi
if [[ -f ${WORKSPACE}/configuration-secure/ansible/vars/${environment}-${deployment}.yml ]]; then
extra_var_args+=" -e@${WORKSPACE}/configuration-secure/ansible/vars/${environment}-${deployment}.yml"
fi
extra_var_args+=" -e@${WORKSPACE}/configuration-secure/ansible/vars/${environment}-${deployment}.yml"
for extra_var in $extra_vars; do
extra_var_args+=" -e@${WORKSPACE}/configuration-secure/ansible/vars/$extra_var"
done
extra_var_args+=" -e edxapp_app_dir=${WORKSPACE}"
extra_var_args+=" -e edxapp_code_dir=${WORKSPACE}/edx-platform"
extra_var_args+=" -e edxapp_user=jenkins"
extra_var_args+=" -e syncdb=$syncdb"
# Generate the json configuration files
ansible-playbook -c local $extra_var_args --tags edxapp_cfg -i localhost, -s -U jenkins edxapp.yml
......
#!/bin/bash
cd configuration
pip install -r requirements.txt
env
ansible="ansible first_in_tag_Name_${environment}-${deployment}-worker -i playbooks/ec2.py -u ubuntu -s -U www-data -a"
manage="/edx/bin/python.edxapp ./manage.py chdir=/edx/app/edxapp/edx-platform"
if [ "$service_variant" != "UNSET" ]; then
manage="$manage $service_variant --settings aws"
fi
if [ "$help" = "true" ]; then
manage="$manage help"
fi
$ansible "$manage $command $options --settings aws"
#!/bin/bash
usage() {
prog=$(basename "$0")
cat<<EOF
This will clone a repo and look for release
candidate branches that will be returned as
a sorted list in json to be
parsed by the dynamic choice jenkins plugin
Usage: $prog
-v add verbosity (set -x)
-n echo what will be done
-h this
-r repo to look in
-f filter string for branch list
Example: $prog -r https://github.com/edx/edx-platform -f "rc/"
EOF
}
while getopts "vnhr:f:" opt; do
case $opt in
v)
set -x
shift
;;
h)
usage
exit 0
;;
n)
noop="echo Would have run: "
shift
;;
r)
repo=$OPTARG
;;
f)
filter=$OPTARG
;;
esac
done
if [[ -z $repo || -z $filter ]]; then
echo 'Need to specify a filter and a repo'
usage
exit 1
fi
repo_basename=$(basename "$repo")
cd /var/tmp
if [[ ! -d $repo_basename ]]; then
$noop git clone "$repo" "$repo_basename" --mirror > /dev/null 2>&1
else
$noop cd "/var/tmp/$repo_basename"
$noop git fetch > /dev/null > /dev/null 2>&1
fi
$noop cd "/var/tmp/$repo_basename"
if [[ -z $noop ]]; then
for branch in $(git branch -a | sort -r | tr -d ' ' | grep -E "$filter" ); do
echo "origin/${branch}"
done
for tag in $(git tag -l | sort -r | tr -d ' ' | grep -E "$filter"); do
echo "$tag"
done
else
echo "Would have checked for branches or tags using filter $filter"
fi
......@@ -4,13 +4,17 @@ cd configuration
pip install -r requirements.txt
env
ip=`python playbooks/ec2.py | jq -r '."tag_Name_prod-edx-worker"[0] | strings'`
ansible="ansible first_in_tag_Name_${environment}-${deployment}-worker -i playbooks/ec2.py -u ubuntu -s -U www-data -m shell -a"
manage="/edx/bin/python.edxapp /edx/bin/manage.edxapp lms --settings aws"
if [ "$report" = "true" ]; then
ssh ubuntu@$ip "cd /edx/app/edxapp/edx-platform && sudo -u www-data /edx/bin/python.edxapp ./manage.py lms gen_cert_report -c $course_id --settings aws"
$ansible "$manage gen_cert_report -c $course_id" | grep -A2 "Looking up certificate states for" | sed 's/rm:.*//'
elif [ "$regenerate" = "true" ] ; then
$ansible "$manage regenerate_user -c $course_id -u $username"
else
ssh ubuntu@$ip "cd /edx/app/edxapp/edx-platform && sudo -u www-data /edx/bin/python.edxapp ./manage.py lms ungenerated_certs -c $course_id --settings aws"
if [ ! -z "$force_certificate_state" ]; then
ssh ubuntu@$ip "cd /edx/app/edxapp/edx-platform && sudo -u www-data /edx/bin/python.edxapp ./manage.py lms ungenerated_certs -c $course_id -f $force_certificate_state --settings aws"
fi
if [ -n "$force_certificate_state" ]; then
$ansible "$manage ungenerated_certs -c $course_id -f $force_certificate_state && $manage gen_cert_report -c $course_id" | grep -A2 "Looking up certificate states for" | sed 's/rm:.*//'
else
$ansible "$manage ungenerated_certs -c $course_id && $manage gen_cert_report -c $course_id" | grep -A2 "Looking up certificate states for" | sed 's/rm:.*//'
fi
fi
#!/bin/bash
cd configuration
pip install -r requirements.txt
env
command="/edx/bin/supervisorctl restart xqueue"
ansible tag_Name_${environment}-${deployment}-commoncluster -i playbooks/ec2.py -u ubuntu -s -a "$command"
#!/bin/bash
cd configuration
pip install -r requirements.txt
env
command="/edx/bin/supervisorctl restart xqueue_consumer"
ansible tag_Name_${environment}-${deployment}-commoncluster -i playbooks/ec2.py -u ubuntu -s -a "$command"
#!/bin/bash
cd configuration
pip install -r requirements.txt
env
command="/edx/app/xqwatcher/venvs/supervisor/bin/supervisorctl -c /edx/app/xqwatcher/supervisor/supervisord.conf restart xqwatcher"
ansible tag_Name_${environment}-${deployment}-xqwatcher -i playbooks/ec2.py -u ubuntu -s -a "$command"
......@@ -21,11 +21,11 @@
"inline": ["rm -rf {{user `playbook_remote_dir`}}"]
}, {
"type": "file",
"source": "../../../configuration/playbooks",
"source": "../../playbooks",
"destination": "{{user `playbook_remote_dir`}}"
}, {
"type": "file",
"source": "../../../configuration/requirements.txt",
"source": "../../requirements.txt",
"destination": "{{user `playbook_remote_dir`}}/requirements.txt"
}, {
"type": "shell",
......@@ -40,6 +40,7 @@
"type": "shell",
"inline": ["cd {{user `playbook_remote_dir`}}",
". packer-venv/bin/activate",
"pip install -q -U ansible==1.7.1",
"ansible-playbook run_role.yml -i inventory.ini -c local -e role=test_build_server -vvvv"]
}]
}
......@@ -18,7 +18,7 @@ except ImportError:
from pprint import pprint
AMI_TIMEOUT = 1800 # time to wait for AMIs to complete(30 minutes)
AMI_TIMEOUT = 2700 # time to wait for AMIs to complete(45 minutes)
EC2_RUN_TIMEOUT = 180 # time to wait for ec2 state transition
EC2_STATUS_TIMEOUT = 300 # time to wait for ec2 system status checks
NUM_TASKS = 5 # number of tasks for time summary report
......@@ -187,6 +187,19 @@ def create_instance_args():
'tag:aws:cloudformation:stack-name': stack_name,
'tag:play': args.play}
)
if len(subnet) < 1:
#
# try scheme for non-cloudformation builds
#
subnet = vpc.get_all_subnets(
filters={
'tag:cluster': args.play,
'tag:environment': args.environment,
'tag:deployment': args.deployment}
)
if len(subnet) < 1:
sys.stderr.write("ERROR: Expected at least one subnet, got {}\n".format(
len(subnet)))
......
......@@ -22,6 +22,11 @@ import json
import subprocess
from boto.sqs.message import RawMessage
import logging
import os
from distutils import spawn
class MissingHostError(Exception):
pass
class LifecycleHandler:
......@@ -30,18 +35,27 @@ class LifecycleHandler:
NUM_MESSAGES = 10
WAIT_TIME_SECONDS = 10
def __init__(self, profile, queue, hook, bin_directory, dry_run):
def __init__(self, profile, queue, hook, dry_run, bin_directory=None):
logging.basicConfig(level=logging.INFO)
self.profile = profile
self.queue = queue
self.hook = hook
self.bin_directory = bin_directory
self.profile = profile
if bin_directory:
os.environ["PATH"] = bin_directory + os.pathsep + os.environ["PATH"]
self.aws_bin = spawn.find_executable('aws')
self.python_bin = spawn.find_executable('python')
self.base_cli_command ="{python_bin} {aws_bin} --profile {profile} ".format(
python_bin=self.python_bin,
aws_bin=self.aws_bin,
profile=self.profile)
self.dry_run = dry_run
self.ec2 = boto.connect_ec2(profile_name=self.profile)
self.ec2_con = boto.connect_ec2()
self.sqs_con = boto.connect_sqs()
def process_lifecycle_messages(self):
sqs_con = boto.connect_sqs()
queue = sqs_con.get_queue(self.queue)
queue = self.sqs_con.get_queue(self.queue)
# Needed to get unencoded message for ease of processing
queue.set_message_class(RawMessage)
......@@ -60,78 +74,84 @@ class LifecycleHandler:
asg = as_message['AutoScalingGroupName']
token = as_message['LifecycleActionToken']
if self.verify_ok_to_retire(as_message['EC2InstanceId']):
try:
if self.verify_ok_to_retire(as_message['EC2InstanceId']):
logging.info("Host is marked as OK to retire, retiring {instance}".format(
instance=instance_id))
logging.info("Host is marked as OK to retire, retiring {instance}".format(
instance=instance_id))
self.continue_lifecycle(asg,token,self.hook)
self.continue_lifecycle(asg, token, self.hook)
self.delete_sqs_message(queue, sqs_message, as_message, self.dry_run)
if not self.dry_run:
logging.info("Deleting message with body {message}".format(message=as_message))
sqs_con.delete_message(queue,sqs_message)
else:
logging.info("Would have deleted message with body {message}".format(message=as_message))
logging.info("Recording lifecycle heartbeat for instance {instance}".format(
instance=instance_id))
else:
logging.info("Recording lifecycle heartbeat for instance {instance}".format(
instance=instance_id))
self.record_lifecycle_action_heartbeat(asg, token, self.hook)
except MissingHostError as mhe:
logging.exception(mhe)
# There is nothing we can do to recover from this, so we
# still delete the message
self.delete_sqs_message(queue, sqs_message, as_message, self.dry_run)
self.record_lifecycle_action_heartbeat(asg, token,self.hook)
# These notifications are send when configuring a new lifecycle hook, they can be
# These notifications are sent when configuring a new lifecycle hook, they can be
# deleted safely
elif as_message['Event'] == LifecycleHandler.TEST_NOTIFICATION:
if not self.dry_run:
logging.info("Deleting message with body {message}".format(message=as_message))
sqs_con.delete_message(queue,sqs_message)
else:
logging.info("Would have deleted message with body {message}".format(message=as_message))
self.delete_sqs_message(queue, sqs_message, as_message, self.dry_run)
else:
raise NotImplemented("Encountered message, {message_id}, of unexpected type.".format(
message_id=as_message['MessageId']))
def delete_sqs_message(self, queue, sqs_message, as_message, dry_run):
if not dry_run:
logging.info("Deleting message with body {message}".format(message=as_message))
self.sqs_con.delete_message(queue, sqs_message)
else:
logging.info("Would have deleted message with body {message}".format(message=as_message))
def record_lifecycle_action_heartbeat(self, asg, token, hook):
command = "{path}/python " \
"{path}/aws " \
"autoscaling record-lifecycle-action-heartbeat " \
command = self.base_cli_command + "autoscaling record-lifecycle-action-heartbeat " \
"--lifecycle-hook-name {hook} " \
"--auto-scaling-group-name {asg} " \
"--lifecycle-action-token {token}".format(
path=self.bin_directory,hook=hook,asg=asg,token=token)
hook=hook,asg=asg,token=token)
self.run_subprocess_command(command, self.dry_run)
def continue_lifecycle(self, asg, token, hook):
command = "{path}/python " \
"{path}/aws autoscaling complete-lifecycle-action --lifecycle-hook-name {hook} " \
command = self.base_cli_command + "autoscaling complete-lifecycle-action --lifecycle-hook-name {hook} " \
"--auto-scaling-group-name {asg} --lifecycle-action-token {token} --lifecycle-action-result " \
"CONTINUE".format(
path=self.bin_directory, hook=hook, asg=asg, token=token)
hook=hook, asg=asg, token=token)
self.run_subprocess_command(command, self.dry_run)
def run_subprocess_command(self, command, dry_run):
logging.info("Running command {command}.".format(command=command))
message = "Running command {command}.".format(command=command)
if not dry_run:
logging.info(message)
try:
output = subprocess.check_output(command.split(' '))
logging.info("Output was {output}".format(output=output))
except Exception as e:
logging.exception(e)
raise e
else:
logging.info("Dry run: {message}".format(message=message))
def get_ec2_instance_by_id(self, instance_id):
"""
Simple boto call to get the instance based on the instance-id
"""
instances = self.ec2.get_only_instances([instance_id])
instances = self.ec2_con.get_only_instances([instance_id])
if len(instances) == 1:
return self.ec2.get_only_instances([instance_id])[0]
return self.ec2_con.get_only_instances([instance_id])[0]
else:
return None
......@@ -152,28 +172,28 @@ class LifecycleHandler:
else:
# No instance for id in SQS message this can happen if something else
# has terminated the instances outside of this workflow
logging.warn("Instance with id {id} is referenced in an SQS message, but does not exist.")
return True
message = "Instance with id {id} is referenced in an SQS message, but does not exist.".\
format(id=instance_id)
raise MissingHostError(message)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--profile',
help='The boto profile to use '
'per line.',default=None)
parser.add_argument('-b', '--bin', required=True,
parser.add_argument('-b', '--bin-directory', required=False, default=None,
help='The bin directory of the virtual env '
'from which tor run the AWS cli')
'from which to run the AWS cli (optional)')
parser.add_argument('-q', '--queue', required=True,
help="The SQS queue containing the lifecyle messages")
parser.add_argument('--hook', required=True,
help="The lifecyle hook to act upon.")
parser.add_argument('-d', "--dry-run", dest="dry_run", action="store_true",
help='Print the commands, but do not do anything')
parser.set_defaults(dry_run=False)
args = parser.parse_args()
lh = LifecycleHandler(args.profile, args.queue, args.hook, args.bin, args.dry_run)
lh = LifecycleHandler(args.profile, args.queue, args.hook, args.dry_run, args.bin_directory)
lh.process_lifecycle_messages()
......@@ -131,6 +131,8 @@ if __name__ == '__main__':
print("Waiting 15 seconds before checking to see if db is available")
time.sleep(15)
wait_on_db_status(restore_dbid)
print("Waiting another 15 seconds")
time.sleep(15)
if args.clean_wwc:
# Run the mysql clean sql file
sanitize_cmd = """mysql -u root -p{root_pass} -h{db_host} wwc < {sanitize_wwc_sql_file} """.format(
......@@ -157,7 +159,8 @@ if __name__ == '__main__':
db_cmd = """cd {play_path} && ansible-playbook -c local -i 127.0.0.1, create_dbs.yml """ \
"""{extra_args} -e "edxapp_db_root_user=root xqueue_db_root_user=root" """ \
""" -e "db_root_pass={root_pass}" """ \
"""EDXAPP_MYSQL_HOST={db_host}" """.format(
""" -e "EDXAPP_MYSQL_HOST={db_host}" """ \
""" -e "XQUEUE_MYSQL_HOST={db_host}" """.format(
root_pass=args.password,
extra_args=extra_args,
db_host=db_host,
......
# -*- mode: ruby -*-
# vi: set ft=ruby :
VAGRANTFILE_API_VERSION = "2"
Vagrant.require_version ">= 1.5.0"
$script = <<SCRIPT
# Silly Ubuntu 12.04 doesn't have the
# --stdin option in the passwd utility
echo root:vagrant | chpasswd
cat << EOF >> /etc/hosts
192.168.33.100 cluster1
192.168.33.110 cluster2
192.168.33.120 cluster3
EOF
SCRIPT
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "precise64"
config.vm.box_url = "http://files.vagrantup.com/precise64.box"
# Turn off shared folders
#config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true
# Begin cluster1
config.vm.define "cluster1" do |cluster1_config|
cluster1_config.vm.hostname = "cluster1"
cluster1_config.vm.provision "shell", inline: $script
cluster1_config.vm.network :private_network, ip: "192.168.33.100"
cluster1_config.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--memory", "2048"]
v.customize ["modifyvm", :id, "--cpus", "2"]
end
end
# End cluster1
# Begin cluster2
config.vm.define "cluster2" do |cluster2_config|
cluster2_config.vm.hostname = "cluster2"
cluster2_config.vm.provision "shell", inline: $script
cluster2_config.vm.network :private_network, ip: "192.168.33.110"
cluster2_config.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--memory", "2048"]
v.customize ["modifyvm", :id, "--cpus", "2"]
end
end
# End cluster2
# Begin cluster3
config.vm.define "cluster3" do |cluster3_config|
cluster3_config.vm.hostname = "cluster3"
cluster3_config.vm.provision "shell", inline: $script
cluster3_config.vm.network :private_network, ip: "192.168.33.120"
cluster3_config.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--memory", "2048"]
v.customize ["modifyvm", :id, "--cpus", "2"]
end
# Now that all machines are up, provision the group
# See https://github.com/mitchellh/vagrant/issues/1784 for why
# we do it here
cluster3_config.vm.provision :ansible do |ansible|
# point Vagrant at the location of your playbook you want to run
ansible.playbook = "../../../playbooks/vagrant-cluster.yml"
ansible.verbose = "vvv"
ansible.inventory_path = "inventory.ini"
ansible.limit = 'all'
end
end
# End cluster3
end
# config file for ansible -- http://ansible.github.com
# nearly all parameters can be overridden in ansible-playbook or with command line flags
# ansible will read ~/.ansible.cfg or /etc/ansible/ansible.cfg, whichever it finds first
[defaults]
jinja2_extensions=jinja2.ext.do
host_key_checking = False
roles_path=../../ansible-roles/roles:../../ansible-private/roles:../../ansible-roles/
[cluster]
cluster1 ansible_ssh_host=192.168.33.100 ansible_ssh_user=vagrant ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key
cluster2 ansible_ssh_host=192.168.33.110 ansible_ssh_user=vagrant ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key
cluster3 ansible_ssh_host=192.168.33.120 ansible_ssh_user=vagrant ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key
......@@ -102,8 +102,18 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
ansible.playbook = "../../../playbooks/vagrant-devstack.yml"
ansible.verbose = "vvvv"
ansible.extra_vars = {}
if ENV['ENABLE_LEGACY_ORA']
ansible.extra_vars = { ENABLE_LEGACY_ORA: true }
ansible.extra_vars['ENABLE_LEGACY_ORA'] = true
end
if ENV['OPENEDX_RELEASE']
ansible.extra_vars = {
edx_platform_version: ENV['OPENEDX_RELEASE'],
ora2_version: ENV['OPENEDX_RELEASE'],
certs_version: ENV['OPENEDX_RELEASE'],
forum_version: ENV['OPENEDX_RELEASE'],
xqueue_version: ENV['OPENEDX_RELEASE'],
}
end
end
end
......@@ -39,9 +39,18 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.provision :ansible do |ansible|
# point Vagrant at the location of your playbook you want to run
ansible.playbook = "../../../playbooks/vagrant-fullstack.yml"
ansible.verbose = "vvv"
# set extra-vars here instead of in the vagrant play so that
# they are written out to /edx/etc/server-vars.yml which can
# be used later when running ansible locally
ansible.verbose = "vvvv"
if ENV['OPENEDX_RELEASE']
ansible.extra_vars = {
edx_platform_version: ENV['OPENEDX_RELEASE'],
ora2_version: ENV['OPENEDX_RELEASE'],
certs_version: ENV['OPENEDX_RELEASE'],
forum_version: ENV['OPENEDX_RELEASE'],
xqueue_version: ENV['OPENEDX_RELEASE'],
}
end
end
end
......@@ -13,17 +13,31 @@ if [ ! -d /edx/app/edx_ansible ]; then
echo "Error: Base box is missing provisioning scripts." 1>&2
exit 1
fi
OPENEDX_RELEASE=$1
export PYTHONUNBUFFERED=1
source /edx/app/edx_ansible/venvs/edx_ansible/bin/activate
cd /edx/app/edx_ansible/edx_ansible/playbooks
# Did we specify an openedx release?
if [ -n "$OPENEDX_RELEASE" ]; then
EXTRA_VARS="-e edx_platform_version=$OPENEDX_RELEASE \
-e ora2_version=$OPENEDX_RELEASE \
-e certs_version=$OPENEDX_RELEASE \
-e forum_version=$OPENEDX_RELEASE \
-e xqueue_version=$OPENEDX_RELEASE \
"
CONFIG_VER=$OPENEDX_RELEASE
else
CONFIG_VER="release"
fi
# Need to ensure that the configuration repo is updated
# The vagrant-devstack.yml playbook will also do this, but only
# after loading the playbooks into memory. If these are out of date,
# this can cause problems (e.g. looking for templates that no longer exist).
/edx/bin/update configuration release
/edx/bin/update configuration $CONFIG_VER
ansible-playbook -i localhost, -c local vagrant-devstack.yml --tags=deploy -e configuration_version=release
ansible-playbook -i localhost, -c local vagrant-devstack.yml --tags=deploy -e configuration_version=$CONFIG_VER $EXTRA_VARS
SCRIPT
edx_platform_mount_dir = "edx-platform"
......@@ -40,11 +54,27 @@ if ENV['VAGRANT_MOUNT_BASE']
end
# map the name of the git branch that we use for a release
# to a name and a file path, which are used for retrieving
# a Vagrant box from the internet.
openedx_releases = {
"openedx/rc/aspen-2014-09-10" => {
:name => "aspen-devstack-rc1", :file => "20141009-aspen-devstack-rc1.box",
},
"aspen.1" => {
:name => "aspen-devstack-1", :file => "20141028-aspen-devstack-1.box",
},
}
openedx_releases.default = {
:name => "kifli-devstack", :file => "20140826-kifli-devstack.box"
}
rel = ENV['OPENEDX_RELEASE']
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates an edX devstack VM from an official release
config.vm.box = "kifli-devstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140826-kifli-devstack.box"
config.vm.box = openedx_releases[rel][:name]
config.vm.box_url = "http://files.edx.org/vagrant-images/#{openedx_releases[rel][:file]}"
config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000
......@@ -105,5 +135,5 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Assume that the base box has the edx_ansible role installed
# We can then tell the Vagrant instance to update itself.
config.vm.provision "shell", inline: $script
config.vm.provision "shell", inline: $script, args: rel
end
......@@ -5,11 +5,27 @@ VAGRANTFILE_API_VERSION = "2"
MEMORY = 2048
CPU_COUNT = 2
# map the name of the git branch that we use for a release
# to a name and a file path, which are used for retrieving
# a Vagrant box from the internet.
openedx_releases = {
"openedx/rc/aspen-2014-09-10" => {
:name => "aspen-fullstack-rc1", :file => "20141010-aspen-fullstack-rc1.box",
},
"aspen.1" => {
:name => "aspen-fullstack-1", :file => "20141028-aspen-fullstack-1.box",
},
}
openedx_releases.default = {
:name => "kifli-fullstack", :file => "20140826-kifli-fullstack.box"
}
rel = ENV['OPENEDX_RELEASE']
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates an edX fullstack VM from an official release
config.vm.box = "kifli-fullstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140826-kifli-fullstack.box"
config.vm.box = openedx_releases[rel][:name]
config.vm.box_url = "http://files.edx.org/vagrant-images/#{openedx_releases[rel][:file]}"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.ssh.insert_key = true
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment