Commit f713b79b by Feanil Patel

Merge pull request #1678 from edx/feanil/rc-lavash-0

Feanil/rc lavash 0
parents 3f695726 82d97ecb
- Role: edxapp
- A new var was added to make it easy ot invalidate the default
memcache store to make it easier to invalidate sessions. Updating
the edxapp env.json files will result in all users getting logged
out. This is a one time penalty as long as the value of `EDXAPP_DEFAULT_CACHE_VERSION`
is not explicitly changed.
- Role: nginx
- New html templates for server errors added.
Defaults for a ratelimiting static page and server error static page.
CMS/LMS are set to use them by default, wording can be changed in the
Nginx default vars.
- Role: edxapp
- We now have an all caps variable override for celery workers
- Role: common
- We now remove the default syslog.d conf file (50-default.conf) this will
break people who have hand edited that file.
......@@ -5,6 +20,10 @@
- Role: edxapp
- Updated the module store settings to match the new settings format.
- Update, possible breaking change: the edxapp role vars edxapp_lms_env and edxapp_cms_env have
been changed to EDXAPP_LMS_ENV and EDXAPP_CMS_ENV to indicate, via our convention,
that overridding them is expected. The default values remain the same.
- Role: analytics-api
- Added a new role for the analytics-api Django app. Currently a private repo
......@@ -29,3 +48,7 @@
- Role: Mongo
- Fixed case of variable used in if block that breaks cluster configuration
by changing mongo_clustered to MONGO_CLUSTERED.
- Role: Edxapp
- Added EDXAPP_LMS_AUTH_EXTRA and EDXAPP_CMS_AUTH_EXTRA for passing unique AUTH_EXTRA configurations to the LMS and CMS.
Both variables default to EDXAPP_AUTH_EXTRA for backward compatibility
import os
import prettytable
import hipchat
import time
import random
from ansible import utils
try:
import prettytable
except ImportError:
prettytable = None
try:
import hipchat
except ImportError:
hipchat = None
class CallbackModule(object):
......@@ -24,30 +29,40 @@ class CallbackModule(object):
"""
def __init__(self):
if 'HIPCHAT_TOKEN' in os.environ:
self.start_time = time.time()
self.task_report = []
self.last_task = None
self.last_task_changed = False
self.last_task_count = 0
self.last_task_delta = 0
self.last_task_start = time.time()
self.condensed_task_report = (os.getenv('HIPCHAT_CONDENSED', True) == True)
self.room = os.getenv('HIPCHAT_ROOM', 'ansible')
self.from_name = os.getenv('HIPCHAT_FROM', 'ansible')
self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') != 'false')
try:
self.hipchat_conn = hipchat.HipChat(token=os.getenv('HIPCHAT_TOKEN'))
except Exception as e:
utils.warning("Unable to connect to hipchat: {}".format(e))
self.hipchat_msg_prefix = os.getenv('HIPCHAT_MSG_PREFIX', '')
self.hipchat_msg_color = os.getenv('HIPCHAT_MSG_COLOR', '')
self.printed_playbook = False
self.playbook_name = None
self.enabled = True
else:
self.enabled = False
self.enabled = "HIPCHAT_TOKEN" in os.environ
if not self.enabled:
return
# make sure we got our imports
if not hipchat:
raise ImportError(
"The hipchat plugin requires the hipchat Python module, "
"which is not installed or was not found."
)
if not prettytable:
raise ImportError(
"The hipchat plugin requires the prettytable Python module, "
"which is not installed or was not found."
)
self.start_time = time.time()
self.task_report = []
self.last_task = None
self.last_task_changed = False
self.last_task_count = 0
self.last_task_delta = 0
self.last_task_start = time.time()
self.condensed_task_report = (os.getenv('HIPCHAT_CONDENSED', True) == True)
self.room = os.getenv('HIPCHAT_ROOM', 'ansible')
self.from_name = os.getenv('HIPCHAT_FROM', 'ansible')
self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') != 'false')
try:
self.hipchat_conn = hipchat.HipChat(token=os.getenv('HIPCHAT_TOKEN'))
except Exception as e:
utils.warning("Unable to connect to hipchat: {}".format(e))
self.hipchat_msg_prefix = os.getenv('HIPCHAT_MSG_PREFIX', '')
self.hipchat_msg_color = os.getenv('HIPCHAT_MSG_COLOR', '')
self.printed_playbook = False
self.playbook_name = None
def _send_hipchat(self, message, room=None, from_name=None, color=None, message_format='text'):
......@@ -221,7 +236,7 @@ class CallbackModule(object):
summary_output = "<b>{}</b>: <i>{}</i> - ".format(self.hipchat_msg_prefix, host)
for summary_item in ['ok', 'changed', 'unreachable', 'failures']:
if stats[summary_item] != 0:
summary_output += "<b>{}</b> - {} ".format(summary_item, stats[summary_item])
summary_output += "<b>{}</b> - {} ".format(summary_item, stats[summary_item])
summary_all_host_output.append(summary_output)
self._send_hipchat("<br />".join(summary_all_host_output), message_format='html')
msg = "<b>{description}</b>: Finished Ansible run for <b><i>{play}</i> in {min:02} minutes, {sec:02} seconds</b><br /><br />".format(
......
......@@ -22,11 +22,12 @@ import time
import json
import socket
try:
import boto
except ImportError:
boto = None
else:
import boto.sqs
from boto.exception import NoAuthHandlerFound
except ImportError:
print "Boto is required for the sqs_notify callback plugin"
raise
class CallbackModule(object):
......@@ -47,36 +48,42 @@ class CallbackModule(object):
- START events
"""
def __init__(self):
self.enable_sqs = 'ANSIBLE_ENABLE_SQS' in os.environ
if not self.enable_sqs:
return
# make sure we got our imports
if not boto:
raise ImportError(
"The sqs callback module requires the boto Python module, "
"which is not installed or was not found."
)
self.start_time = time.time()
if 'ANSIBLE_ENABLE_SQS' in os.environ:
self.enable_sqs = True
if not 'SQS_REGION' in os.environ:
print 'ANSIBLE_ENABLE_SQS enabled but SQS_REGION ' \
'not defined in environment'
sys.exit(1)
self.region = os.environ['SQS_REGION']
try:
self.sqs = boto.sqs.connect_to_region(self.region)
except NoAuthHandlerFound:
print 'ANSIBLE_ENABLE_SQS enabled but cannot connect ' \
'to AWS due invalid credentials'
sys.exit(1)
if not 'SQS_NAME' in os.environ:
print 'ANSIBLE_ENABLE_SQS enabled but SQS_NAME not ' \
'defined in environment'
sys.exit(1)
self.name = os.environ['SQS_NAME']
self.queue = self.sqs.create_queue(self.name)
if 'SQS_MSG_PREFIX' in os.environ:
self.prefix = os.environ['SQS_MSG_PREFIX']
else:
self.prefix = ''
self.last_seen_ts = {}
if not 'SQS_REGION' in os.environ:
print 'ANSIBLE_ENABLE_SQS enabled but SQS_REGION ' \
'not defined in environment'
sys.exit(1)
self.region = os.environ['SQS_REGION']
try:
self.sqs = boto.sqs.connect_to_region(self.region)
except NoAuthHandlerFound:
print 'ANSIBLE_ENABLE_SQS enabled but cannot connect ' \
'to AWS due invalid credentials'
sys.exit(1)
if not 'SQS_NAME' in os.environ:
print 'ANSIBLE_ENABLE_SQS enabled but SQS_NAME not ' \
'defined in environment'
sys.exit(1)
self.name = os.environ['SQS_NAME']
self.queue = self.sqs.create_queue(self.name)
if 'SQS_MSG_PREFIX' in os.environ:
self.prefix = os.environ['SQS_MSG_PREFIX']
else:
self.enable_sqs = False
self.prefix = ''
self.last_seen_ts = {}
def runner_on_failed(self, host, res, ignore_errors=False):
if self.enable_sqs:
......
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aide
- role: datadog
......
......@@ -3,5 +3,8 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- alton
- name: Deploy Antivirus Scanner
hosts: all
sudo: True
gather_facts: True
roles:
- antivirus
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
......@@ -2,5 +2,8 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
......@@ -2,5 +2,8 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- bastion
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- certs
......
# ansible-playbook -i ec2.py commoncluster.yml --limit tag_Name_stage-edx-commoncluster -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts
# ansible-playbook -i ec2.py cluster_rabbitmq.yml --limit tag_Name_stage-edx-commoncluster -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts
- hosts: all
sudo: True
......@@ -28,14 +28,9 @@
tasks:
- debug: msg="{{ ansible_ec2_local_ipv4 }}"
with_items: list.results
- shell: echo "rabbit@ip-{{ item|replace('.', '-') }}"
when: item != ansible_ec2_local_ipv4
with_items: hostvars.keys()
register: list
- command: rabbitmqctl stop_app
- command: rabbitmqctl join_cluster {{ item.stdout }}
when: item.stdout is defined
with_items: list.results
- command: rabbitmqctl join_cluster rabbit@ip-{{ hostvars.keys()[0]|replace('.', '-') }}
when: hostvars.keys()[0] != ansible_ec2_local_ipv4
- command: rabbitmqctl start_app
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
......
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common
- role: datadog
......
......@@ -31,7 +31,7 @@
- "EDXAPP_MONGO_HOSTS: {{ EDXAPP_MONGO_HOSTS }}"
- "EDXAPP_MONGO_DB_NAME: {{ EDXAPP_MONGO_DB_NAME }}"
- "EDXAPP_MONGO_USER: {{ EDXAPP_MONGO_USER }}"
- "EDXAPP_MONGO_PASS: {{ EDXAPP_MONGO_PASS }}"
- "EDXAPP_MONGO_PASSWORD: {{ EDXAPP_MONGO_PASSWORD }}"
tags: update_edxapp_mysql_host
- name: call update on edx-platform
......
......@@ -40,6 +40,10 @@
sudo: yes
with_items:
- python-mysqldb
# When this is run on jenkins the package will already
# exist and can't run as the jenkins user because it
# does not have sudo privs.
when: ansible_ssh_user != 'jenkins'
- name: create mysql databases for the edX stack
mysql_db: >
db={{ item[0] }}{{ item[1].db_name }}
......
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- demo
- role: datadog
......
......@@ -2,5 +2,8 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- devpi
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- role: nginx
......
- name: Deploy the edx_ansible role
hosts: all
sudo: True
gather_facts: False
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- edx_ansible
......@@ -12,7 +12,6 @@
nginx_sites:
- cms
- lms
- ora
- xqueue
- xserver
- certs
......@@ -31,7 +30,6 @@
- forum
- { role: "xqueue", update_users: True }
- xserver
- ora
- certs
- edx_ansible
- analytics-api
......
- name: Create ec2 instance
hosts: localhost
connection: local
gather_facts: False
gather_facts: True
vars:
keypair: continuous-integration
instance_type: t2.medium
......
......@@ -3,6 +3,8 @@
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- role: nginx
......@@ -20,6 +22,10 @@
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
NEWRELIC_LOGWATCH:
- logwatch-503.j2
- logwatch-cms-errors.j2
- logwatch-lms-errors.j2
when: COMMON_ENABLE_NEWRELIC
- role: minos
when: COMMON_ENABLE_MINOS
......@@ -4,15 +4,27 @@
gather_facts: False
vars:
db_dry_run: "--db-dry-run"
syncdb: false
tasks:
# Syncdb with migrate when the migrate user is overridden in extra vars
- name: syncdb and migrate
- name: migrate
shell: >
chdir={{ edxapp_code_dir }}
python manage.py {{ item }} syncdb --migrate --noinput --settings=aws_migrate {{ db_dry_run }}
python manage.py {{ item }} migrate --noinput {{ db_dry_run }} --settings=aws_migrate
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
with_items:
- lms
- cms
- name: syncdb
shell: >
chdir={{ edxapp_code_dir }}
python manage.py {{ item }} syncdb --noinput --settings=aws_migrate
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
when: syncdb
with_items:
- lms
- cms
......@@ -2,5 +2,8 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- flower
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- role: nginx
......
- name: Deploy Insights
hosts: all
sudo: True
gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: True
roles:
- role: nginx
nginx_sites:
- insights
- aws
- insights
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
......@@ -3,5 +3,8 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- jenkins_admin
......@@ -8,6 +8,8 @@
gather_facts: True
vars:
mongo_enable_journal: False
serial_count: 1
serial: "{{ serial_count }}"
vars_files:
- roles/edxapp/defaults/main.yml
- roles/ora/defaults/main.yml
......@@ -18,4 +20,5 @@
- edxlocal
- mongo
- browsers
- browsermob-proxy
- jenkins_worker
......@@ -3,7 +3,9 @@
hosts: all
sudo: True
gather_facts: True
serial: 1
vars:
serial_count: 1
serial: "{{ serial_count }}"
vars_files:
- "{{secure_dir}}/vars/{{COMMON_ENVIRONMENT}}/legacy-ora.yml"
roles:
......
......@@ -41,6 +41,18 @@ class LifecycleInventory():
parser = argparse.ArgumentParser()
self.profile = profile
def get_e_d_from_tags(self, group):
environment = "default_environment"
deployment = "default_deployment"
for r in group.tags:
if r.key == "environment":
environment = r.value
elif r.key == "deployment":
deployment = r.value
return environment,deployment
def get_instance_dict(self):
ec2 = boto.connect_ec2(profile_name=self.profile)
reservations = ec2.get_all_instances()
......@@ -64,10 +76,12 @@ class LifecycleInventory():
for instance in group.instances:
private_ip_address = instances[instance.instance_id].private_ip_address
inventory[group.name].append(private_ip_address)
inventory[group.name + "_" + instance.lifecycle_state].append(private_ip_address)
inventory[instance.lifecycle_state.replace(":","_")].append(private_ip_address)
if private_ip_address:
environment,deployment = self.get_e_d_from_tags(group)
inventory[environment + "_" + deployment + "_" + instance.lifecycle_state.replace(":","_")].append(private_ip_address)
inventory[group.name].append(private_ip_address)
inventory[group.name + "_" + instance.lifecycle_state.replace(":","_")].append(private_ip_address)
inventory[instance.lifecycle_state.replace(":","_")].append(private_ip_address)
print json.dumps(inventory, sort_keys=True, indent=2)
......@@ -77,8 +91,8 @@ if __name__=="__main__":
parser.add_argument('-p', '--profile', help='The aws profile to use when connecting.')
parser.add_argument('-l', '--list', help='Ansible passes this, we ignore it.', action='store_true', default=True)
args = parser.parse_args()
LifecycleInventory(args.profile).run()
......@@ -3,6 +3,9 @@
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- common
- minos
- aws
- minos
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- mongo
- mongo_mms
......
- name: Configure notifier instance
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- notifier
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- role: nginx
nginx_sites:
......
# ansible-playbook -i ./lifecycle_inventory.py ./retire_host.yml
# -e@/vars/env.yml --limit Terminating_Wait
# ansible-playbook -i ./lifecycle_inventory.py ./retire_host.yml
# -e@/vars/env.yml --limit Terminating_Wait -e TARGET="Terminating_Wait"
#
# Note that the target now must be specified as an argument
#
#
# This is separate because it's use of handlers
# leads to various race conditions.
#
- name: Stop all services
hosts: Terminating_Wait
hosts: "{{TARGET}}"
sudo: True
gather_facts: False
vars:
......@@ -15,41 +18,30 @@
- stop_all_edx_services
- name: Server retirement workflow
hosts: Terminating_Wait
hosts: "{{TARGET}}"
sudo: True
gather_facts: False
tasks:
- name: Force a log rotation
command: /usr/sbin/logrotate -f /etc/logrotate.d/{{ item }}
with_items:
- "apport"
- "apt"
- "aptitude"
- "dpkg"
- "hourly"
- "landscape-client"
- "newrelic-sysmond"
- "nginx"
- "nginx-access"
- "nginx-error"
- "ppp"
- "rsyslog"
- "ufw"
- "unattended-upgrades"
- "upstart"
- name: Force a log rotation
- name: Terminate existing s3 log sync
command: /usr/bin/pkill send-logs-to-s3 || true
- name: "Ensure send-logs-to-s3 script is in the logrotate file"
shell: grep send-logs-to-s3 /etc/logrotate.d/hourly/tracking.log
# We only force a rotation of edx logs.
# Forced rotation of system logfiles will only
# work if there hasn't already been a previous rotation
# The logrotate will also call send-logs-to-s3 but hasn't
# been updated for all servers yet.
- name: Force a log rotation which will call the log sync
command: /usr/sbin/logrotate -f /etc/logrotate.d/hourly/{{ item }}
with_items:
- "tracking.log"
- "edx-services"
- name: Terminate existing s3 log sync
command: /usr/bin/pkill send-logs-to-s3 || true
- name: Send logs to s3
command: /edx/bin/send-logs-to-s3
# This catches the case where tracking.log is 0b
- name: Sync again
command: /edx/bin/send-logs-to-s3 -d "{{ COMMON_LOG_DIR }}/tracking/*" -b "{{ COMMON_AWS_SYNC_BUCKET }}/logs/tracking"
- name: Run minos verification
hosts: Terminating_Wait
hosts: "{{TARGET}}"
sudo: True
gather_facts: False
tasks:
......
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- snort
- role: datadog
......
......@@ -2,5 +2,8 @@
hosts: all
sudo: True
gather_facts: False
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- stop_all_edx_services
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- testcourses
- role: datadog
......
......@@ -35,6 +35,8 @@
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
NEWRELIC_LOGWATCH:
- logwatch-xqueue-errors.j2
when: COMMON_ENABLE_NEWRELIC
post_tasks:
- debug: var="{{ ansible_ec2_instance_id }}"
......
......@@ -8,6 +8,8 @@
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- xqwatcher
......@@ -16,4 +18,4 @@
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
\ No newline at end of file
when: COMMON_ENABLE_NEWRELIC
......@@ -2,6 +2,9 @@
hosts: all
sudo: True
gather_facts: True
vars:
serial_count: 1
serial: "{{ serial_count }}"
roles:
- aws
- role: nginx
......
......@@ -31,14 +31,14 @@
tasks:
- name: edX configuration
cloudformation: >
stack_name="$name" state=present
region=$region disable_rollback=false
stack_name="{{ name }}" state=present
region="{{ region }}" disable_rollback=false
template=../cloudformation_templates/edx-server-multi-instance.json
args:
template_parameters:
KeyName: $key
KeyName: "{{key}}"
InstanceType: m1.small
GroupTag: $group
GroupTag: "{{group}}"
register: stack
- name: show stack outputs
debug: msg="My stack outputs are ${stack.stack_outputs}"
debug: msg="My stack outputs are {{stack.stack_outputs}}"
......@@ -18,6 +18,7 @@
# These should stay false for the public AMI
COMMON_ENABLE_DATADOG: False
COMMON_ENABLE_SPLUNKFORWARDER: False
ENABLE_LEGACY_ORA: !!null
roles:
- role: nginx
nginx_sites:
......@@ -38,7 +39,8 @@
- elasticsearch
- forum
- { role: "xqueue", update_users: True }
- ora
- role: ora
when: ENABLE_LEGACY_ORA
- certs
- edx_ansible
- role: datadog
......
- name: setup the alton env
template: >
src="alton_env.j2" dest="{{ alton_app_dir }}/alton_env"
owner="{{ alton_user }}" group="{{ common_web_user }}"
mode=0644
notify: restart alton
- name: configure the boto profiles for alton
template: >
src="boto.j2"
......
......@@ -34,11 +34,4 @@
- "{{ alton_app_dir }}"
- "{{ alton_venvs_dir }}"
- name: setup the alton env
template: >
src="alton_env.j2" dest="{{ alton_app_dir }}/alton_env"
owner="{{ alton_user }}" group="{{ common_web_user }}"
mode=0644
notify: restart alton
- include: deploy.yml tags=deploy
......@@ -6,3 +6,5 @@ export {{ name }}="{{ value }}"
{% endif %}
{%- endfor %}
export WILL_BOTO_PROFILES="{{ ALTON_AWS_CREDENTIALS|join(';') }}"
{% for deployment, creds in ALTON_AWS_CREDENTIALS.iteritems() %}
[profile {{deployment}}]
[profile {{ deployment }}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
......
......@@ -19,46 +19,58 @@ ANALYTICS_API_NEWRELIC_APPNAME: "{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }
ANALYTICS_API_PIP_EXTRA_ARGS: "-i {{ COMMON_PYPI_MIRROR_URL }}"
ANALYTICS_API_NGINX_PORT: "18100"
ANALYTICS_API_DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: 'analytics-api'
USER: 'api001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
# read-only user
reports:
ENGINE: 'django.db.backends.mysql'
NAME: 'reports'
USER: 'reports001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
ANALYTICS_API_VERSION: "master"
# Default dummy user, override this!!
ANALYTICS_API_USERS:
"dummy-api-user": "changeme"
ANALYTICS_API_SECRET_KEY: 'Your secret key here'
ANALYTICS_API_TIME_ZONE: 'UTC'
ANALYTICS_API_LANGUAGE_CODE: 'en-us'
ANALYTICS_API_EMAIL_HOST: 'localhost'
ANALYTICS_API_EMAIL_HOST_USER: 'mail_user'
ANALYTICS_API_EMAIL_HOST_PASSWORD: 'mail_password'
ANALYTICS_API_EMAIL_PORT: 587
ANALYTICS_API_AUTH_TOKEN: 'put-your-api-token-here'
ANALYTICS_API_CONFIG:
ANALYTICS_DATABASE: 'reports'
SECRET_KEY: 'Your secret key here'
TIME_ZONE: 'America/New_York'
LANGUAGE_CODE: 'en-us'
SECRET_KEY: '{{ ANALYTICS_API_SECRET_KEY }}'
TIME_ZONE: '{{ ANALYTICS_API_TIME_ZONE }}'
LANGUAGE_CODE: '{{ANALYTICS_API_LANGUAGE_CODE }}'
# email config
EMAIL_HOST: 'smtp.example.com'
EMAIL_HOST_PASSWORD: ""
EMAIL_HOST_USER: ""
EMAIL_PORT: 587
API_AUTH_TOKEN: 'put-your-api-token-here'
STATICFILES_DIRS: []
EMAIL_HOST: '{{ ANALYTICS_API_EMAIL_HOST }}'
EMAIL_HOST_PASSWORD: '{{ ANALYTICS_API_EMAIL_HOST_PASSWORD }}'
EMAIL_HOST_USER: '{{ ANALYTICS_API_EMAIL_HOST_USER }}'
EMAIL_PORT: $ANALYTICS_API_EMAIL_PORT
API_AUTH_TOKEN: '{{ ANALYTICS_API_AUTH_TOKEN }}'
STATICFILES_DIRS: ['static']
STATIC_ROOT: "{{ COMMON_DATA_DIR }}/{{ analytics_api_service_name }}/staticfiles"
# db config
DATABASE_OPTIONS:
connect_timeout: 10
DATABASES:
# rw user
default:
ENGINE: 'django.db.backends.mysql'
NAME: 'analytics-api'
USER: 'api001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
# read-only user
reports:
ENGINE: 'django.db.backends.mysql'
NAME: 'reports'
USER: 'reports001'
PASSWORD: 'password'
HOST: 'localhost'
PORT: '3306'
DATABASES: '{{ ANALYTICS_API_DATABASES }}'
ANALYTICS_API_GUNICORN_WORKERS: "2"
ANALYTICS_API_GUNICORN_EXTRA: ""
#
# vars are namespace with the module name.
#
......
......@@ -32,7 +32,7 @@
# ansible-playbook -i 'api.example.com,' ./analyticsapi.yml -e@/ansible/vars/deployment.yml -e@/ansible/vars/env-deployment.yml
#
- fail: msg="You must provide an private key for the analytics repo"
- fail: msg="You must provide a private key for the analytics repo"
when: not ANALYTICS_API_GIT_IDENTITY
- include: deploy.yml tags=deploy
......@@ -15,4 +15,4 @@ export NEW_RELIC_LICENSE_KEY="{{ NEWRELIC_LICENSE_KEY }}"
source {{ analytics_api_app_dir }}/analytics_api_env
{{ executable }} --pythonpath={{ analytics_api_code_dir }} -b {{ analytics_api_gunicorn_host }}:{{ analytics_api_gunicorn_port }} -w {{ ANALYTICS_API_GUNICORN_WORKERS }} --timeout={{ analytics_api_gunicorn_timeout }} analyticsdataserver.wsgi:application
{{ executable }} --pythonpath={{ analytics_api_code_dir }} -b {{ analytics_api_gunicorn_host }}:{{ analytics_api_gunicorn_port }} -w {{ ANALYTICS_API_GUNICORN_WORKERS }} --timeout={{ analytics_api_gunicorn_timeout }} {{ ANALYTICS_API_GUNICORN_EXTRA }} analyticsdataserver.wsgi:application
......@@ -19,6 +19,7 @@ AS_SERVER_PORT: '9000'
AS_ENV_LANG: 'en_US.UTF-8'
AS_LOG_LEVEL: 'INFO'
AS_WORKERS: '2'
AS_GUNICORN_EXTRA: ""
# add public keys to enable the automator user
# for running manage.py commands
......@@ -40,14 +41,14 @@ analytics_auth_config:
DATABASES:
analytics:
<<: *databases_default
USER: $AS_DB_ANALYTICS_USER
PASSWORD: $AS_DB_ANALYTICS_PASSWORD
HOST: $AS_DB_ANALYTICS_HOST
ANALYTICS_API_KEY: $AS_API_KEY
USER: "{{ AS_DB_ANALYTICS_USER }}"
PASSWORD: "{{ AS_DB_ANALYTICS_PASSWORD }}"
HOST: "{{ AS_DB_ANALYTICS_HOST }}"
ANALYTICS_API_KEY: "{{ AS_API_KEY }}"
ANALYTICS_RESULTS_DB:
MONGO_URI: $AS_DB_RESULTS_URL
MONGO_DB: $AS_DB_RESULTS_DB
MONGO_STORED_QUERIES_COLLECTION: $AS_DB_RESULTS_COLLECTION
MONGO_URI: "{{ AS_DB_RESULTS_URL }}"
MONGO_DB: "{{ AS_DB_RESULTS_DB }}"
MONGO_STORED_QUERIES_COLLECTION: "{{ AS_DB_RESULTS_COLLECTION }}"
as_role_name: "analytics-server"
as_user: "analytics-server"
......
......@@ -28,7 +28,7 @@
accept_hostkey=yes
version={{ as_version }} force=true
environment:
GIT_SSH: $as_git_ssh
GIT_SSH: "{{ as_git_ssh }}"
notify: restart the analytics service
notify: start the analytics service
tags:
......
......@@ -18,4 +18,4 @@ env DJANGO_SETTINGS_MODULE={{ as_django_settings }}
chdir {{ as_code_dir }}
setuid {{ as_web_user }}
exec {{ as_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ as_code_dir }}/anserv anserv.wsgi
exec {{ as_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ as_code_dir }}/anserv {{ AS_GUNICORN_EXTRA }} anserv.wsgi
......@@ -19,6 +19,7 @@ ANALYTICS_SERVER_PORT: '9000'
ANALYTICS_ENV_LANG: 'en_US.UTF-8'
ANALYTICS_LOG_LEVEL: 'INFO'
ANALYTICS_WORKERS: '2'
ANALYTICS_GUNICORN_EXTRA: ""
DATABASES:
default: &databases_default
......@@ -33,14 +34,14 @@ analytics_auth_config:
DATABASES:
analytics:
<<: *databases_default
USER: $ANALYTICS_DB_ANALYTICS_USER
PASSWORD: $ANALYTICS_DB_ANALYTICS_PASSWORD
HOST: $ANALYTICS_DB_ANALYTICS_HOST
ANALYTICS_API_KEY: $ANALYTICS_API_KEY
USER: "{{ ANALYTICS_DB_ANALYTICS_USER }}"
PASSWORD: "{{ ANALYTICS_DB_ANALYTICS_PASSWORD }}"
HOST: "{{ ANALYTICS_DB_ANALYTICS_HOST }}"
ANALYTICS_API_KEY: "{{ ANALYTICS_API_KEY }}"
ANALYTICS_RESULTS_DB:
MONGO_URI: $ANALYTICS_DB_RESULTS_URL
MONGO_DB: $ANALYTICS_DB_RESULTS_DB
MONGO_STORED_QUERIES_COLLECTION: $ANALYTICS_DB_RESULTS_COLLECTION
MONGO_URI: "{{ ANALYTICS_DB_RESULTS_URL }}"
MONGO_DB: "{{ ANALYTICS_DB_RESULTS_DB }}"
MONGO_STORED_QUERIES_COLLECTION: "{{ ANALYTICS_DB_RESULTS_COLLECTION }}"
analytics_role_name: "analytics"
analytics_user: "analytics"
......
......@@ -28,7 +28,7 @@
accept_hostkey=yes
version={{ analytics_version }} force=true
environment:
GIT_SSH: $analytics_git_ssh
GIT_SSH: "{{ analytics_git_ssh }}"
notify: restart the analytics service
notify: start the analytics service
tags:
......
......@@ -18,4 +18,4 @@ env DJANGO_SETTINGS_MODULE={{ analytics_django_settings }}
chdir {{ analytics_code_dir }}
setuid {{ analytics_web_user }}
exec {{ analytics_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ analytics_code_dir }}/anserv anserv.wsgi
exec {{ analytics_venv_dir }}/bin/gunicorn -b 0.0.0.0:$PORT -w $WORKERS --pythonpath={{ analytics_code_dir }}/anserv {{ ANALYTICS_GUNICORN_EXTRA }} anserv.wsgi
......@@ -10,7 +10,7 @@
when: role_exists | success
- name: create role directories
file: path=roles/{{role_name}}/{{ item }} state=directory
file: path=roles/{{ role_name }}/{{ item }} state=directory
with_items:
- tasks
- meta
......
......@@ -6,7 +6,7 @@
- debug
- name: Dump lms auth|env file
template: src=../../edxapp/templates/lms.{{item}}.json.j2 dest=/tmp/lms.{{item}}.json mode=0600
template: src=../../edxapp/templates/lms.{{ item }}.json.j2 dest=/tmp/lms.{{ item }}.json mode=0600
with_items:
- env
- auth
......@@ -16,7 +16,7 @@
- debug
- name: Dump lms-preview auth|env file
template: src=../../edxapp/templates/lms-preview.{{item}}.json.j2 dest=/tmp/lms-preview.{{item}}.json mode=0600
template: src=../../edxapp/templates/lms-preview.{{ item }}.json.j2 dest=/tmp/lms-preview.{{ item }}.json mode=0600
with_items:
- env
- auth
......@@ -26,7 +26,7 @@
- debug
- name: Dump cms auth|env file
template: src=../../edxapp/templates/cms.{{item}}.json.j2 dest=/tmp/cms.{{item}}.json mode=0600
template: src=../../edxapp/templates/cms.{{ item }}.json.j2 dest=/tmp/cms.{{ item }}.json mode=0600
with_items:
- env
- auth
......@@ -44,7 +44,7 @@
- name: fetch remote files
# fetch is fail-safe for remote files that don't exist
# setting mode is not an option
fetch: src=/tmp/{{item}} dest=/tmp/{{ansible_hostname}}-{{item}} flat=True
fetch: src=/tmp/{{ item }} dest=/tmp/{{ ansible_hostname }}-{{item}} flat=True
with_items:
- ansible.all.json
- ansible.all.yml
......
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Defaults for role antivirus
#
#
# vars are namespace with the module name.
#
antivirus_role_name: antivirus
#
# OS packages
#
antivirus_debian_pkgs: [clamav]
antivirus_redhat_pkgs: []
antivirus_pip_pkgs: []
antivirus_app_dir: /edx/app/antivirus
antivirus_user: "antivirus"
ANTIVIRUS_BUCKETS: !!null
ANTIVIRUS_MAILTO: "{{ EDXAPP_TECH_SUPPORT_EMAIL }}"
ANTIVIRUS_MAILFROM: "{{ EDXAPP_DEFAULT_FROM_EMAIL }}"
ANTIVIRUS_AWS_KEY: ""
ANTIVIRUS_AWS_SECRET: ""
ANTIVIRUS_S3_AWS_KEY: "{{ ANTIVIRUS_AWS_KEY }}"
ANTIVIRUS_SES_AWS_KEY: "{{ ANTIVIRUS_AWS_KEY }}"
ANTIVIRUS_S3_AWS_SECRET: "{{ ANTIVIRUS_AWS_SECRET}}"
ANTIVIRUS_SES_AWS_SECRET: "{{ ANTIVIRUS_AWS_SECRET}}"
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Handlers for role antivirus
#
# Overview:
#
#
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
##
# Role includes for role antivirus
#
dependencies:
- role: user
user_info: "{{ BASTION_USER_INFO }}"
- aws
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
#
# Tasks for role antivirus
#
# Overview:
#
#
# Dependencies:
#
#
# Example play:
#
#
- name: install antivirus system packages
apt: pkg={{ item }} install_recommends=yes state=present
with_items: antivirus_debian_pkgs
- name: create antivirus scanner user
user: >
name="{{ antivirus_user }}"
home="{{ antivirus_app_dir }}"
createhome=no
shell=/bin/false
- name: create antivirus app and data dirs
file: >
path="{{ item }}"
state=directory
owner="{{ antivirus_user }}"
group="{{ antivirus_user }}"
with_items:
- "{{ antivirus_app_dir }}"
- "{{ antivirus_app_dir }}/data"
- name: install antivirus s3 scanner script
template: >
src=s3_bucket_virus_scan.sh.j2
dest={{ antivirus_app_dir }}/s3_bucket_virus_scan.sh
mode=0555
owner={{ antivirus_user }}
group={{ antivirus_user }}
- name: install antivirus s3 scanner cronjob
cron: >
name="antivirus-{{ item }}"
job="{{ antivirus_app_dir }}/s3_bucket_virus_scan.sh -b '{{ item }}' -m '{{ ANTIVIRUS_MAILTO }}' -f '{{ ANTIVIRUS_MAILFROM }}'"
backup=yes
cron_file=antivirus-{{ item }}
user={{ antivirus_user }}
hour="*"
minute="0"
day="*"
with_items: ANTIVIRUS_BUCKETS
#! /bin/bash
DEBUG="false"
BUCKETNAME="none"
MAILTO=""
MAILFROM=""
ANTIVIRUS_S3_AWS_KEY="{{ ANTIVIRUS_S3_AWS_KEY }}"
ANTIVIRUS_SES_AWS_KEY="{{ ANTIVIRUS_SES_AWS_KEY }}"
ANTIVIRUS_S3_AWS_SECRET="{{ ANTIVIRUS_S3_AWS_SECRET}}"
ANTIVIRUS_SES_AWS_SECRET="{{ ANTIVIRUS_SES_AWS_SECRET}}"
AWS_DEFAULT_REGION="{{ aws_region }}"
function usage {
echo "$0 - $VERSION";
echo "Run ClamAV against the contents of an S3 Bucket.";
echo "Usage: $0 [options]";
echo "options:";
echo " -d Debug mode";
echo " -h Usage (this screen)";
echo " -b <bucket name>";
echo " -m <notify mail address>";
echo " -f <notify from address>";
echo " -k <AWS Key ID>";
echo " -s <AWS Secret Key>"
}
while getopts "dhb:m:f:k:s:" optionName; do
case "$optionName" in
d)
DEBUG="true"
;;
h)
usage;
exit;
;;
[?])
usage;
exit;
;;
b)
BUCKETNAME=$OPTARG;
;;
m)
MAILTO=$OPTARG;
;;
f)
MAILFROM=$OPTARG;
;;
k)
AWS_ACCESS_KEY_ID=$OPTARG;
ANTIVIRUS_S3_AWS_KEY=$OPTARG;
ANTIVIRUS_SES_AWS_KEY=$OPTARG;
;;
s)
AWS_SECRET_ACCESS_KEY=$OPTARG;
ANTIVIRUS_S3_AWS_SECRET=$OPTARG;
ANTIVIRUS_SES_AWS_SECRET=$OPTARG;
;;
esac
done
cd {{ antivirus_app_dir }}
export AWS_ACCESS_KEY_ID=$ANTIVIRUS_S3_AWS_KEY
export AWS_SECRET_ACCESS_KEY=$ANTIVIRUS_S3_AWS_SECRET
export AWS_DEFAULT_REGION
mkdir -p data/$BUCKETNAME
aws s3 sync s3://$BUCKETNAME/ data/$BUCKETNAME
CLAMOUT=$(clamscan -ri data/$BUCKETNAME);
if [[ $? -ne 0 ]]; then
export AWS_ACCESS_KEY_ID=$ANTIVIRUS_SES_AWS_KEY
export AWS_SECRET_ACCESS_KEY=$ANTIVIRUS_SES_AWS_SECRET
aws ses send-email --to $MAILTO --from $MAILFROM --subject "Virus Scanner malicious file on $BUCKETNAME" --text "$CLAMOUT"
fi
WSGIPythonHome {{ edxapp_venv_dir }}
WSGIRestrictEmbedded On
<VirtualHost *:{{ apache_port }}>
<VirtualHost *:*>
ServerName https://{{ lms_env_config.SITE_NAME }}
ServerAlias *.{{ lms_env_config.SITE_NAME }}
UseCanonicalName On
......
......@@ -23,29 +23,34 @@ AWS_S3_LOGS: false
# This relies on your server being able to send mail
AWS_S3_LOGS_NOTIFY_EMAIL: dummy@example.com
AWS_S3_LOGS_FROM_EMAIL: dummy@example.com
# Separate buckets for tracking logs and everything else
# You should be overriding the environment and deployment vars
# Order of precedence is left to right for exclude and include options
AWS_S3_LOG_PATHS:
- bucket: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/logs/tracking"
path: "{{ COMMON_LOG_DIR }}/tracking/*"
- bucket: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/logs/application"
path: "{{ COMMON_LOG_DIR }}/!(*tracking*)"
- bucket: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}/logs/system"
path: "/var/log/*"
#
# vars are namespace with the module name.
#
aws_role_name: aws
aws_data_dir: "{{ COMMON_DATA_DIR }}/aws"
aws_app_dir: "{{ COMMON_APP_DIR }}/aws"
aws_s3_sync_script: "{{ aws_app_dir }}/send-logs-to-s3"
aws_s3_logfile: "{{ aws_log_dir }}/s3-log-sync.log"
aws_log_dir: "{{ COMMON_LOG_DIR }}/aws"
aws_dirs:
home:
path: "{{ COMMON_APP_DIR }}/{{ aws_role_name }}"
owner: "root"
group: "root"
mode: "0755"
logs:
path: "{{ COMMON_LOG_DIR }}/{{ aws_role_name }}"
owner: "syslog"
group: "syslog"
mode: "0700"
data:
path: "{{ COMMON_DATA_DIR }}/{{ aws_role_name }}"
owner: "root"
group: "root"
mode: "0700"
aws_s3_sync_script: "{{ aws_dirs.home.path }}/send-logs-to-s3"
aws_s3_logfile: "{{ aws_dirs.logs.path }}/s3-log-sync.log"
aws_region: "us-east-1"
# default path to the aws binary
s3cmd_cmd: "{{ COMMON_BIN_DIR }}/s3cmd"
aws_s3cmd: "{{ COMMON_BIN_DIR }}/s3cmd"
aws_cmd: "/usr/local/bin/aws"
#
# OS packages
......
......@@ -21,26 +21,14 @@
#
#
- name: create data directories
- name: create all service directories
file: >
path={{ item }}
state=directory
owner=root
group=root
mode=0700
with_items:
- "{{ aws_data_dir }}"
- "{{ aws_log_dir }}"
- name: create app directory
file: >
path={{ item }}
state=directory
owner=root
group=root
mode=0755
with_items:
- "{{ aws_app_dir }}"
path="{{ item.value.path }}"
state="directory"
owner="{{ item.value.owner }}"
group="{{ item.value.group }}"
mode="{{ item.value.mode }}"
with_dict: aws_dirs
- name: install system packages
apt: >
......@@ -57,18 +45,18 @@
- name: get s3cmd
get_url: >
url={{ aws_s3cmd_url }}
dest={{ aws_data_dir }}/
dest={{ aws_dirs.data.path }}/
- name: untar s3cmd
shell: >
tar xf {{ aws_data_dir }}/{{ aws_s3cmd_version }}.tar.gz
creates={{ aws_app_dir }}/{{ aws_s3cmd_version }}/s3cmd
chdir={{ aws_app_dir }}
tar xf {{ aws_dirs.data.path }}/{{ aws_s3cmd_version }}.tar.gz
creates={{ aws_dirs.data.path }}/{{ aws_s3cmd_version }}/s3cmd
chdir={{ aws_dirs.home.path }}
- name: create symlink for s3cmd
file: >
src={{ aws_app_dir }}/{{ aws_s3cmd_version }}/s3cmd
dest={{ COMMON_BIN_DIR }}/s3cmd
src={{ aws_dirs.home.path }}/{{ aws_s3cmd_version }}/s3cmd
dest={{ aws_s3cmd }}
state=link
- name: create s3 log sync script
......@@ -84,7 +72,7 @@
dest={{ COMMON_BIN_DIR }}/{{ aws_s3_sync_script|basename }}
when: AWS_S3_LOGS
- name: run s3 log sync script on supervisor shutdown
- name: force logrotate on supervisor stop
template: >
src=etc/init/sync-on-stop.conf.j2
dest=/etc/init/sync-on-stop.conf
......@@ -99,4 +87,5 @@
user: root
minute: 0
job: "{{ aws_s3_sync_script }} > /dev/null 2>&1"
state: absent
when: AWS_S3_LOGS
start on stopped supervisor
description "sync s3 logs on supervisor shutdown"
description "sync tracking logs on supervisor shutdown"
script
/bin/bash {{ aws_s3_sync_script }}
/usr/sbin/logrotate -f /etc/logrotate.d/hourly/tracking.log
/usr/sbin/logrotate -f /etc/logrotate.d/hourly/edx-services
end script
......@@ -4,13 +4,23 @@
#
# This script can be called from logrotate
# to sync logs to s3
#
if (( $EUID != 0 )); then
echo "Please run as the root user"
exit 1
fi
exec > >(tee "{{ aws_s3_logfile }}")
#
# Ensure the log processors can read without
# running as root
if [ ! -f "{{ aws_s3_logfile }}" ]; then
sudo -u syslog touch "{{ aws_s3_logfile }}"
else
chown syslog.syslog "{{ aws_s3_logfile }}"
fi
exec > >(tee -a "{{ aws_s3_logfile }}")
exec 2>&1
# s3cmd sync requires a valid home
......@@ -31,10 +41,12 @@ usage() {
-v add verbosity (set -x)
-n echo what will be done
-h this
-d directory to sync
-b bucket path to sync to
EO
}
while getopts "vhn" opt; do
while getopts "vhnb:d:" opt; do
case $opt in
v)
set -x
......@@ -48,9 +60,21 @@ while getopts "vhn" opt; do
noop="echo Would have run: "
shift
;;
d)
directory=$OPTARG
;;
b)
bucket_path=$OPTARG
;;
esac
done
if [[ -z $bucket_path || -z $directory ]]; then
echo "ERROR: You must provide a directory and a bucket to sync!"
usage
exit 1
fi
# grab the first security group for the instance
# which will be used as a directory name in the s3
# bucket
......@@ -90,9 +114,7 @@ instance_id=$(ec2metadata --instance-id)
ip=$(ec2metadata --local-ipv4)
availability_zone=$(ec2metadata --availability-zone)
# region isn't available via the metadata service
region=${availability_zone:0:${{lb}}#availability_zone{{rb}} - 1}
region=${availability_zone:0:${{ lb }}#availability_zone{{ rb }} - 1}
s3_path="${2}/$sec_grp/"
{% for item in AWS_S3_LOG_PATHS -%}
$noop {{ s3cmd_cmd }} sync {{ item['path'] }} "s3://{{ item['bucket'] }}/$sec_grp/${instance_id}-${ip}/"
{% endfor %}
$noop {{ aws_s3cmd }} --multipart-chunk-size-mb 5120 --disable-multipart sync $directory "s3://${bucket_path}/${sec_grp}/${instance_id}-${ip}/"
#!/bin/sh
/etc/browsermob-proxy/bin/browsermob-proxy
/etc/browsermob-proxy/bin/browsermob-proxy $*
......@@ -13,7 +13,7 @@ browser_deb_pkgs:
# which often causes spurious acceptance test failures.
browser_s3_deb_pkgs:
- { name: "google-chrome-stable_30.0.1599.114-1_amd64.deb", url: "https://s3.amazonaws.com/vagrant.testeng.edx.org/google-chrome-stable_30.0.1599.114-1_amd64.deb" }
- { name: "firefox_25.0+build3-0ubuntu0.12.04.1_amd64.deb", url: "https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox_25.0%2Bbuild3-0ubuntu0.12.04.1_amd64.deb" }
- { name: "firefox_28.0+build2-0ubuntu0.12.04.1_amd64.deb", url: "https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox_28.0%2Bbuild2-0ubuntu0.12.04.1_amd64.deb" }
# Chrome and ChromeDriver
chromedriver_version: 2.6
......
......@@ -71,25 +71,25 @@ certs_env_config:
# CERTS_DATA is legacy, not used
CERT_DATA: {}
QUEUE_NAME: "certificates"
QUEUE_URL: $CERTS_QUEUE_URL
CERT_BUCKET: $CERTS_BUCKET
QUEUE_URL: "{{ CERTS_QUEUE_URL }}"
CERT_BUCKET: "{{ CERTS_BUCKET }}"
# gnupg signing key
CERT_KEY_ID: $CERTS_KEY_ID
CERT_KEY_ID: "{{ CERTS_KEY_ID }}"
LOGGING_ENV: ""
CERT_GPG_DIR: $certs_gpg_dir
CERT_URL: $CERTS_URL
CERT_DOWNLOAD_URL: $CERTS_DOWNLOAD_URL
CERT_WEB_ROOT: $CERTS_WEB_ROOT
COPY_TO_WEB_ROOT: $CERTS_COPY_TO_WEB_ROOT
S3_UPLOAD: $CERTS_S3_UPLOAD
CERT_VERIFY_URL: $CERTS_VERIFY_URL
TEMPLATE_DATA_DIR: $CERTS_TEMPLATE_DATA_DIR
CERT_GPG_DIR: "{{ certs_gpg_dir }}"
CERT_URL: "{{ CERTS_URL }}"
CERT_DOWNLOAD_URL: "{{ CERTS_DOWNLOAD_URL }}"
CERT_WEB_ROOT: "{{ CERTS_WEB_ROOT }}"
COPY_TO_WEB_ROOT: "{{ CERTS_COPY_TO_WEB_ROOT }}"
S3_UPLOAD: "{{ CERTS_S3_UPLOAD }}"
CERT_VERIFY_URL: "{{ CERTS_VERIFY_URL }}"
TEMPLATE_DATA_DIR: "{{ CERTS_TEMPLATE_DATA_DIR }}"
certs_auth_config:
QUEUE_USER: $CERTS_QUEUE_USER
QUEUE_PASS: $CERTS_QUEUE_PASS
QUEUE_AUTH_USER: $CERTS_XQUEUE_AUTH_USER
QUEUE_AUTH_PASS: $CERTS_XQUEUE_AUTH_PASS
CERT_KEY_ID: $CERTS_KEY_ID
CERT_AWS_ID: $CERTS_AWS_ID
CERT_AWS_KEY: $CERTS_AWS_KEY
QUEUE_USER: "{{ CERTS_QUEUE_USER }}"
QUEUE_PASS: "{{ CERTS_QUEUE_PASS }}"
QUEUE_AUTH_USER: "{{ CERTS_XQUEUE_AUTH_USER }}"
QUEUE_AUTH_PASS: "{{ CERTS_XQUEUE_AUTH_PASS }}"
CERT_KEY_ID: "{{ CERTS_KEY_ID }}"
CERT_AWS_ID: "{{ CERTS_AWS_ID }}"
CERT_AWS_KEY: "{{ CERTS_AWS_KEY }}"
......@@ -7,7 +7,12 @@
COMMON_ENABLE_BASIC_AUTH: False
COMMON_HTPASSWD_USER: edx
COMMON_HTPASSWD_PASS: edx
# Turn on syncing logs on rotation for edx
# application and tracking logs, must also
# have the AWS role installed
COMMON_AWS_SYNC: False
COMMON_AWS_SYNC_BUCKET: "edx-{{ COMMON_ENVIRONMENT }}-{{ COMMON_DEPLOYMENT }}"
COMMON_AWS_S3_SYNC_SCRIPT: "{{ COMMON_BIN_DIR }}/send-logs-to-s3"
COMMON_BASE_DIR: /edx
COMMON_DATA_DIR: "{{ COMMON_BASE_DIR}}/var"
COMMON_APP_DIR: "{{ COMMON_BASE_DIR}}/app"
......@@ -24,6 +29,7 @@ COMMON_ENVIRONMENT: 'default_env'
COMMON_DEPLOYMENT: 'default_deployment'
COMMON_PYPI_MIRROR_URL: 'https://pypi.python.org/simple'
COMMON_NPM_MIRROR_URL: 'http://registry.npmjs.org'
COMMON_UBUNTU_APT_KEYSERVER: "http://keyserver.ubuntu.com/pks/lookup?op=get&fingerprint=on&search="
# do not include http/https
COMMON_GIT_MIRROR: 'github.com'
# override this var to set a different hostname
......@@ -38,6 +44,7 @@ COMMON_CUSTOM_DHCLIENT_CONFIG: false
COMMON_MOTD_TEMPLATE: "motd.tail.j2"
COMMON_SSH_PASSWORD_AUTH: "no"
COMMON_SECURITY_UPDATES: no
# These are three maintenance accounts across all databases
# the read only user is is granted select privs on all dbs
# the admin user is granted create user privs on all dbs
......@@ -69,6 +76,7 @@ common_debian_pkgs:
- mosh
- rsyslog
- screen
- tmux
- tree
- git
- unzip
......@@ -102,6 +110,10 @@ disable_edx_services: False
# so different start scripts are generated in dev mode.
devstack: False
# Some cluster apps need special settings when in vagrant
# due to eth0 always being the same IP address
vagrant_cluster: False
common_debian_variants:
- Ubuntu
- Debian
......
......@@ -2,5 +2,5 @@
dependencies:
- role: user
user_info: "{{ COMMON_USER_INFO }}"
- role: security
when: COMMON_SECURITY_UPDATES
---
- name: Update CA Certificates
shell: >
/usr/sbin/update-ca-certificates
- name: Add user www-data
# This is the default user for nginx
user: >
......
......@@ -57,6 +57,6 @@ request subnet-mask, broadcast-address, time-offset, routers,
#}
interface "eth0" {
prepend domain-search {% for search in COMMON_DHCLIENT_DNS_SEARCH -%}"{{search}}"{%- if not loop.last -%},{%- endif -%}
prepend domain-search {% for search in COMMON_DHCLIENT_DNS_SEARCH -%}"{{ search }}"{%- if not loop.last -%},{%- endif -%}
{%- endfor -%};
}
......@@ -11,4 +11,9 @@
postrotate
/usr/bin/killall -HUP rsyslogd
endscript
lastaction
{% if COMMON_AWS_SYNC -%}
{{ COMMON_AWS_S3_SYNC_SCRIPT }} -d "{{ COMMON_LOG_DIR }}/tracking/*" -b "{{ COMMON_AWS_SYNC_BUCKET }}/logs/tracking"
{% endif -%}
endscript
}
---
DATADOG_API_KEY: "SPECIFY_KEY_HERE"
datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52"
datadog_agent_version: '1:5.0.4-516'
datadog_apt_key: "0x226AE980C7A7DA52"
datadog_debian_pkgs:
- apparmor-utils
- build-essential
......
......@@ -22,17 +22,22 @@
- datadog
- name: add apt key
apt_key: id=C7A7DA52 url={{datadog_apt_key}} state=present
apt_key: id=C7A7DA52 url={{ COMMON_UBUNTU_APT_KEYSERVER }}{{ datadog_apt_key }} state=present
tags:
- datadog
- name: remove unstable apt repository
apt_repository_1.8: repo='deb http://apt.datadoghq.com/ unstable main' validate_certs=no state=absent
tags:
- datadog
- name: install apt repository
apt_repository_1.8: repo='deb http://apt.datadoghq.com/ unstable main' update_cache=yes validate_certs=no
apt_repository_1.8: repo='deb http://apt.datadoghq.com/ stable main' update_cache=yes validate_certs=no
tags:
- datadog
- name: install datadog agent
apt: pkg="datadog-agent"
apt: pkg="datadog-agent={{ datadog_agent_version }}"
tags:
- datadog
......
......@@ -30,6 +30,6 @@ demo_test_users:
password: edx
demo_edxapp_user: 'edxapp'
demo_edxapp_venv_bin: '{{COMMON_APP_DIR}}/{{demo_edxapp_user}}/venvs/{{demo_edxapp_user}}/bin'
demo_edxapp_course_data_dir: '{{COMMON_DATA_DIR}}/{{demo_edxapp_user}}/data'
demo_edxapp_code_dir: '{{COMMON_APP_DIR}}/{{demo_edxapp_user}}/edx-platform'
demo_edxapp_venv_bin: '{{ COMMON_APP_DIR }}/{{ demo_edxapp_user }}/venvs/{{demo_edxapp_user}}/bin'
demo_edxapp_course_data_dir: '{{ COMMON_DATA_DIR }}/{{ demo_edxapp_user }}/data'
demo_edxapp_code_dir: '{{ COMMON_APP_DIR }}/{{ demo_edxapp_user }}/edx-platform'
......@@ -11,7 +11,7 @@ DISCERN_MYSQL_PASSWORD: 'password'
DISCERN_MYSQL_HOST: 'localhost'
DISCERN_MYSQL_PORT: '3306'
DISCERN_LANG: "en_US.UTF-8"
DISCERN_GUNICORN_EXTRA: ""
discern_app_dir: "{{ COMMON_APP_DIR }}/discern"
discern_code_dir: "{{ discern_app_dir }}/discern"
......@@ -53,23 +53,23 @@ discern_env_config:
discern_auth_config:
AWS_ACCESS_KEY_ID: $DISCERN_AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $DISCERN_SECRET_ACCESS_KEY
BROKER_URL: $DISCERN_BROKER_URL
AWS_ACCESS_KEY_ID: "{{ DISCERN_AWS_ACCESS_KEY_ID }}"
AWS_SECRET_ACCESS_KEY: "{{ DISCERN_SECRET_ACCESS_KEY }}"
BROKER_URL: "{{ DISCERN_BROKER_URL }}"
CACHES:
default:
BACKEND: 'django.core.cache.backends.memcached.MemcachedCache'
LOCATION: $DISCERN_MEMCACHE
CELERY_RESULT_BACKEND: $DISCERN_RESULT_BACKEND
LOCATION: "{{ DISCERN_MEMCACHE }}"
CELERY_RESULT_BACKEND: "{{ DISCERN_RESULT_BACKEND }}"
DATABASES:
default:
ENGINE: django.db.backends.mysql
HOST: $DISCERN_MYSQL_HOST
NAME: $DISCERN_MYSQL_DB_NAME
PASSWORD: $DISCERN_MYSQL_PASSWORD
PORT: $DISCERN_MYSQL_PORT
USER: $DISCERN_MYSQL_USER
GOOGLE_ANALYTICS_PROPERTY_ID: $DISCERN_GOOGLE_ANALYTICS_PROPERTY_ID
HOST: "{{ DISCERN_MYSQL_HOST }}"
NAME: "{{ DISCERN_MYSQL_DB_NAME }}"
PASSWORD: "{{ DISCERN_MYSQL_PASSWORD }}"
PORT: "{{ DISCERN_MYSQL_PORT }}"
USER: "{{ DISCERN_MYSQL_USER }}"
GOOGLE_ANALYTICS_PROPERTY_ID: "{{ DISCERN_GOOGLE_ANALYTICS_PROPERTY_ID }}"
discern_debian_pkgs:
......
......@@ -51,7 +51,7 @@
#Numpy has to be a pre-requirement in order for scipy to build
- name : install python pre-requirements for discern and ease
pip: >
requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
requirements={{ item }} virtualenv={{ discern_venv_dir }} state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ discern_user }}"
notify:
......@@ -62,7 +62,7 @@
- name : install python requirements for discern and ease
pip: >
requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
requirements={{ item }} virtualenv={{ discern_venv_dir }} state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ discern_user }}"
notify:
......@@ -84,8 +84,8 @@
tar zxf {{ discern_nltk_tmp_file }}
rm -f {{ discern_nltk_tmp_file }}
touch {{ discern_nltk_download_url|basename }}-installed
creates={{ discern_data_dir }}/{{ discern_nltk_download_url|basename }}-installed
chdir={{ discern_data_dir }}
creates={{ discern_data_dir }}/{{ discern_nltk_download_url|basename }}-installed
chdir={{ discern_data_dir }}
sudo_user: "{{ discern_user }}"
notify:
- restart discern
......@@ -95,8 +95,8 @@
#support virtualenvs as of this comment
- name: django syncdb migrate and collectstatic for discern
shell: >
{{ discern_venv_dir }}/bin/python {{discern_code_dir}}/manage.py {{item}} --noinput --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
{{ discern_venv_dir }}/bin/python {{ discern_code_dir }}/manage.py {{ item }} --noinput --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}"
notify:
- restart discern
......@@ -107,8 +107,8 @@
#Have this separate from the other three because it doesn't take the noinput flag
- name: django update_index for discern
shell: >
{{ discern_venv_dir}}/bin/python {{discern_code_dir}}/manage.py update_index --settings={{discern_settings}} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
{{ discern_venv_dir}}/bin/python {{ discern_code_dir }}/manage.py update_index --settings={{ discern_settings }} --pythonpath={{discern_code_dir}}
chdir={{ discern_code_dir }}
sudo_user: "{{ discern_user }}"
notify:
- restart discern
......
......@@ -9,9 +9,9 @@ stop on runlevel [!2345]
respawn
respawn limit 3 30
env DJANGO_SETTINGS_MODULE={{discern_settings}}
env DJANGO_SETTINGS_MODULE={{ discern_settings }}
chdir {{ discern_code_dir }}
setuid {{discern_user}}
setuid {{ discern_user }}
exec {{ discern_venv_dir }}/bin/python {{ discern_code_dir }}/manage.py celeryd --loglevel=info --settings={{ discern_settings }} --pythonpath={{ discern_code_dir }} -B --autoscale={{ ansible_processor_cores * 2 }},1
[program:discern]
{% if ansible_processor|length > 0 %}
command={{ discern_venv_bin }}/gunicorn --preload -b {{ discern_gunicorn_host }}:{{ discern_gunicorn_port }} -w {{ ansible_processor|length * discern_worker_mult }} --timeout=30 --pythonpath={{ discern_code_dir }} discern.wsgi
command={{ discern_venv_bin }}/gunicorn --preload -b {{ discern_gunicorn_host }}:{{ discern_gunicorn_port }} -w {{ ansible_processor|length * discern_worker_mult }} --timeout=30 --pythonpath={{ discern_code_dir }} {{ DISCERN_GUNICORN_EXTRA }} discern.wsgi
{% else %}
command={{ discern_venv_bin }}/gunicorn --preload -b {{ discern_gunicorn_host }}:{{ discern_gunicorn_port }} -w {{ discern_worker_mult }} --timeout=30 --pythonpath={{ discern_code_dir }} discern.wsgi
command={{ discern_venv_bin }}/gunicorn --preload -b {{ discern_gunicorn_host }}:{{ discern_gunicorn_port }} -w {{ discern_worker_mult }} --timeout=30 --pythonpath={{ discern_code_dir }} {{ DISCERN_GUNICORN_EXTRA }} discern.wsgi
{% endif %}
user={{ common_web_user }}
directory={{ discern_code_dir }}
......
......@@ -12,7 +12,7 @@ IFS=","
-v add verbosity to edx_ansible run
-h this
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code edx-analytics-data-api
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code, edx-analytics-data-api
<version> - can be a commit or tag
EO
......@@ -51,6 +51,7 @@ repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuratio
repos_to_cmd["read-only-certificate-code"]="$edx_ansible_cmd certs.yml -e 'certs_version=$2'"
repos_to_cmd["edx-analytics-data-api"]="$edx_ansible_cmd analyticsapi.yml -e 'ANALYTICS_API_VERSION=$2'"
repos_to_cmd["edx-ora2"]="$edx_ansible_cmd ora2.yml -e 'ora2_version=$2'"
repos_to_cmd["insights"]="$edx_ansible_cmd insights.yml -e 'INSIGHTS_VERSION=$2'"
if [[ -z $1 || -z $2 ]]; then
......
......@@ -27,11 +27,11 @@
when: EDXAPP_USE_GIT_IDENTITY
# Do A Checkout
- name: checkout edx-platform repo into {{edxapp_code_dir}}
- name: checkout edx-platform repo into {{ edxapp_code_dir }}
git: >
dest={{edxapp_code_dir}}
repo={{edx_platform_repo}}
version={{edx_platform_version}}
dest={{ edxapp_code_dir }}
repo={{ edx_platform_repo }}
version={{ edx_platform_version }}
accept_hostkey=yes
sudo_user: "{{ edxapp_user }}"
environment:
......@@ -42,7 +42,7 @@
- "restart edxapp_workers"
- name: git clean after checking out edx-platform
shell: cd {{edxapp_code_dir}} && git clean -xdf
shell: cd {{ edxapp_code_dir }} && git clean -xdf
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
......@@ -50,9 +50,9 @@
- name: checkout theme
git: >
dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}}
repo={{edxapp_theme_source_repo}}
version={{edxapp_theme_version}}
dest={{ edxapp_app_dir }}/themes/{{ edxapp_theme_name }}
repo={{ edxapp_theme_source_repo }}
version={{ edxapp_theme_version }}
accept_hostkey=yes
when: edxapp_theme_name != ''
sudo_user: "{{ edxapp_user }}"
......@@ -91,8 +91,8 @@
- name: gem install bundler
shell: >
gem install bundle
chdir={{ edxapp_code_dir }}
executable=/bin/bash
chdir={{ edxapp_code_dir }}
executable=/bin/bash
environment: "{{ edxapp_environment }}"
sudo_user: "{{ edxapp_user }}"
notify:
......@@ -102,8 +102,8 @@
- name: bundle install
shell: >
bundle install --binstubs
chdir={{ edxapp_code_dir }}
executable=/bin/bash
chdir={{ edxapp_code_dir }}
executable=/bin/bash
sudo_user: "{{ edxapp_user }}"
environment: "{{ edxapp_environment }}"
notify:
......@@ -144,8 +144,8 @@
# Install the python pre requirements into {{ edxapp_venv_dir }}
- name : install python pre-requirements
pip: >
requirements="{{pre_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}"
requirements="{{ pre_requirements_file }}"
virtualenv="{{ edxapp_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}"
......@@ -173,8 +173,8 @@
# Install the python post requirements into {{ edxapp_venv_dir }}
- name : install python post-requirements
pip: >
requirements="{{post_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}"
requirements="{{ post_requirements_file }}"
virtualenv="{{ edxapp_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}"
......@@ -187,8 +187,8 @@
# Install the python paver requirements into {{ edxapp_venv_dir }}
- name : install python paver-requirements
pip: >
requirements="{{paver_requirements_file}}"
virtualenv="{{edxapp_venv_dir}}"
requirements="{{ paver_requirements_file }}"
virtualenv="{{ edxapp_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edxapp_user }}"
......@@ -257,7 +257,7 @@
- name: install CAS attribute module
pip: >
name="{{ EDXAPP_CAS_ATTRIBUTE_PACKAGE }}"
virtualenv="{{edxapp_venv_dir}}"
virtualenv="{{ edxapp_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors"
sudo_user: "{{ edxapp_user }}"
......@@ -294,8 +294,8 @@
- name: code sandbox | Install base sandbox requirements and create sandbox virtualenv
pip: >
requirements="{{sandbox_base_requirements}}"
virtualenv="{{edxapp_sandbox_venv_dir}}"
requirements="{{ sandbox_base_requirements }}"
virtualenv="{{ edxapp_sandbox_venv_dir }}"
state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }} --exists-action w --use-mirrors"
sudo_user: "{{ edxapp_sandbox_user }}"
......
......@@ -26,6 +26,7 @@
- "{{ edxapp_theme_dir }}"
- "{{ edxapp_staticfile_dir }}"
- "{{ edxapp_course_static_dir }}"
- "{{ edxapp_course_data_dir }}"
# This is a symlink that has to exist because
# we currently can't override the DATA_DIR var
......@@ -38,7 +39,6 @@
state=link
owner="{{ edxapp_user }}"
group="{{ common_web_group }}"
- name: create edxapp log dir
file: >
......@@ -70,6 +70,12 @@
- "restart edxapp"
- "restart edxapp_workers"
- name: set up edxapp .npmrc
template:
src=.npmrc.j2 dest={{ edxapp_app_dir }}/.npmrc
owner={{ edxapp_user }} group={{ common_web_group }}
mode=0600
- name: create log directories for service variants
notify:
- "restart edxapp"
......@@ -81,7 +87,7 @@
with_items: service_variants_enabled
# Set up the python sandbox execution environment
- include: python_sandbox_env.yml
- include: python_sandbox_env.yml tags=deploy
when: EDXAPP_PYTHON_SANDBOX
- include: deploy.yml tags=deploy
# Set the alternatives this way for blas and lapack to work correctly for the
# MITx 6.341x course.
# TODO: Switch to using alternatives module in 1.6
- name: code sandbox | Use libblas for 3gf
command: update-alternatives --set libblas.so.3gf /usr/lib/libblas/libblas.so.3gf
# TODO: Switch to using alternatives module in 1.6
- name: code sandbox | Use liblapac for 3gf
command: update-alternatives --set liblapack.so.3gf /usr/lib/lapack/liblapack.so.3gf
- name: code sandbox | Create edxapp sandbox user
user: name={{ edxapp_sandbox_user }} shell=/bin/false home={{ edxapp_sandbox_venv_dir }}
notify:
......
- name: make the course data updatable by the edxapp user
file:
path="{{ edxapp_course_data_dir }}"
state=directory
recurse=yes
owner="{{ edxapp_user }}"
group="{{ edxapp_user }}"
- name: clone the xml course repo
git: >
repo="{{ item.repo_url }}"
......
registry={{ COMMON_NPM_MIRROR_URL }}
{% if devstack %}
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp
{{ edxapp_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/usr/bin/find /tmp/codejail-*/tmp -mindepth 1 -maxdepth 1 -exec rm -rf {} ;
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ edxapp_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{% else %}
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:{{ edxapp_sandbox_venv_dir }}/bin/python
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/bin/rm /tmp/codejail-*/tmp
{{ common_web_user }} ALL=({{ edxapp_sandbox_user }}) SETENV:NOPASSWD:/usr/bin/find /tmp/codejail-*/tmp -mindepth 1 -maxdepth 1 -exec rm -rf {} ;
{{ common_web_user }} ALL=(ALL) NOPASSWD:/bin/kill
{{ common_web_user }} ALL=(ALL) NOPASSWD:/usr/bin/pkill
{% endif %}
{% do cms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{% do cms_auth_config.update(EDXAPP_CMS_AUTH_EXTRA) %}
{% for key, value in cms_auth_config.iteritems() %}
{% if value == 'None' %}
{% do cms_auth_config.update({key: None }) %}
......
......@@ -16,15 +16,15 @@ command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host
{% else -%}
{# This is for backwards compatibility, set workers explicitely using EDXAPP_WORKERS #}
{% if ansible_processor|length > 0 -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} {{ EDXAPP_CMS_GUNICORN_EXTRA }} cms.wsgi
{% else -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} cms.wsgi
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_cms_gunicorn_host }}:{{ edxapp_cms_gunicorn_port }} -w {{ worker_core_mult.cms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} {{ EDXAPP_CMS_GUNICORN_EXTRA }} cms.wsgi
{% endif -%}
{% endif -%}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PORT={{edxapp_cms_gunicorn_port}},ADDRESS={{edxapp_cms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_cms_env }},SERVICE_VARIANT="cms"
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_CMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%}PORT={{ edxapp_cms_gunicorn_port }},ADDRESS={{ edxapp_cms_gunicorn_host }},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ EDXAPP_CMS_ENV }},SERVICE_VARIANT="cms"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
......
......@@ -9,7 +9,7 @@
/tmp/codejail-*/** wrix,
#
# Whitelist particiclar shared objects from the system
# Whitelist particular shared objects from the system
# python installation
#
/usr/lib/python2.7/lib-dynload/_json.so mr,
......@@ -21,6 +21,22 @@
/usr/lib/python2.7/lib-dynload/_elementtree.so mr,
/usr/lib/python2.7/lib-dynload/pyexpat.so mr,
# Matplot lib needs a place for temp caches
{{ edxapp_sandbox_venv_dir }}/.config/ wrix,
{{ edxapp_sandbox_venv_dir }}/.cache/ wrix,
{{ edxapp_sandbox_venv_dir }}/.config/** wrix,
{{ edxapp_sandbox_venv_dir }}/.cache/** wrix,
# Matplotlib related libraries
/usr/lib/python2.7/lib-dynload/termios.so mr,
/usr/lib/python2.7/lib-dynload/parser.so mr,
# Matplot lib needs fonts to make graphs
/usr/share/fonts/ r,
/usr/share/fonts/** r,
/usr/local/share/fonts/ r,
/usr/local/share/fonts/** r,
#
# Allow access to selections from /proc
#
......
{% do lms_auth_config.update(EDXAPP_AUTH_EXTRA) %}
{% do lms_auth_config.update(EDXAPP_LMS_AUTH_EXTRA) %}
{% for key, value in lms_auth_config.iteritems() %}
{% if value == 'None' %}
{% do lms_auth_config.update({key: None }) %}
......
......@@ -17,15 +17,15 @@ command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host
{% else -%}
{# This is for backwards compatibility, set workers explicitely using EDXAPP_WORKERS #}
{% if ansible_processor|length > 0 -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ ansible_processor|length * worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} {{ EDXAPP_LMS_GUNICORN_EXTRA }} lms.wsgi
{% else -%}
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} lms.wsgi
command={{ executable }} {{ max_req }} --preload -b {{ edxapp_lms_gunicorn_host }}:{{ edxapp_lms_gunicorn_port }} -w {{ worker_core_mult.lms }} --timeout=300 --pythonpath={{ edxapp_code_dir }} {{ EDXAPP_LMS_GUNICORN_EXTRA }} lms.wsgi
{% endif %}
{% endif %}
user={{ common_web_user }}
directory={{ edxapp_code_dir }}
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%} PORT={{edxapp_lms_gunicorn_port}},ADDRESS={{edxapp_lms_gunicorn_host}},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ edxapp_lms_env }},SERVICE_VARIANT="lms",PATH="{{ edxapp_deploy_path }}"
environment={% if COMMON_ENABLE_NEWRELIC_APP %}NEW_RELIC_APP_NAME={{ EDXAPP_NEWRELIC_LMS_APPNAME }},NEW_RELIC_LICENSE_KEY={{ NEWRELIC_LICENSE_KEY }},{% endif -%} PORT={{ edxapp_lms_gunicorn_port }},ADDRESS={{ edxapp_lms_gunicorn_host }},LANG={{ EDXAPP_LANG }},DJANGO_SETTINGS_MODULE={{ EDXAPP_LMS_ENV }},SERVICE_VARIANT="lms",PATH="{{ edxapp_deploy_path }}"
stdout_logfile={{ supervisor_log_dir }}/%(program_name)-stdout.log
stderr_logfile={{ supervisor_log_dir }}/%(program_name)-stderr.log
killasgroup=true
......
......@@ -17,7 +17,7 @@
mysql_user: >
name={{ EDXAPP_MYSQL_USER }}
password={{ EDXAPP_MYSQL_PASSWORD }}
priv='{{EDXAPP_MYSQL_DB_NAME}}.*:ALL'
priv='{{ EDXAPP_MYSQL_DB_NAME }}.*:ALL'
when: EDXAPP_MYSQL_USER is defined
- name: create a database for edxapp
......@@ -31,7 +31,7 @@
mysql_user: >
name={{ XQUEUE_MYSQL_USER }}
password={{ XQUEUE_MYSQL_PASSWORD }}
priv='{{XQUEUE_MYSQL_DB_NAME}}.*:ALL'
priv='{{ XQUEUE_MYSQL_DB_NAME }}.*:ALL'
when: XQUEUE_MYSQL_USER is defined
- name: create a database for xqueue
......@@ -45,7 +45,7 @@
mysql_user: >
name={{ ORA_MYSQL_USER }}
password={{ ORA_MYSQL_PASSWORD }}
priv='{{ORA_MYSQL_DB_NAME}}.*:ALL'
priv='{{ ORA_MYSQL_DB_NAME }}.*:ALL'
when: ORA_MYSQL_USER is defined
- name: create a database for ora
......
......@@ -3,11 +3,11 @@
# Path to directory where to store index data allocated for this node.
#
path.data: {{elasticsearch_data_dir}}
path.data: {{ elasticsearch_data_dir }}
# Path to log files:
#
path.logs: {{elasticsearch_log_dir}}
path.logs: {{ elasticsearch_log_dir }}
# ElasticSearch performs poorly when JVM starts swapping: you should ensure that
# it _never_ swaps.
......@@ -43,3 +43,8 @@ script.disable_dynamic: true
discovery.zen.ping.unicast.hosts: ['{{hosts|join("\',\'") }}']
{% endif -%}
{% if vagrant_cluster|bool %}
network:
host: {{ ansible_ssh_host }}
{% endif %}
......@@ -24,4 +24,4 @@ flower_deploy_path: "{{ flower_venv_bin }}:/usr/local/sbin:/usr/local/bin:/usr/b
flower_broker: "amqp://{{ FLOWER_BROKER_USERNAME }}:{{ FLOWER_BROKER_PASSWORD }}@{{ FLOWER_BROKER_HOST }}:{{ FLOWER_BROKER_PORT }}"
flower_environment:
PATH: $flower_deploy_path
PATH: "{{ flower_deploy_path }}"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment