Commit d7b11536 by John Jarvis

Merge pull request #1023 from edx/jarv/rc-injera

Jarv/rc injera
parents ccaaa24c fc0ab7bc
...@@ -17,3 +17,15 @@ Kevin Luo <kevluo@edx.org> ...@@ -17,3 +17,15 @@ Kevin Luo <kevluo@edx.org>
Carson Gee <x@carsongee.com> Carson Gee <x@carsongee.com>
Xavier Antoviaque <xavier@antoviaque.org> Xavier Antoviaque <xavier@antoviaque.org>
James Tauber <jtauber@jtauber.com> James Tauber <jtauber@jtauber.com>
Bertrand Marron <bertrand.marron@ionis-group.com>
Han Su Kim <hkim823@gmail.com>
Ned Batchelder <ned@nedbatchelder.com>
Dave St.Germain <dstgermain@edx.org>
Gabe Mulley <gabe@edx.org>
Greg Price <gprice@edx.org>
William Desloge <william.desloge@ionis-group.com>
Valera Rozuvan <valera.rozuvan@gmail.com>
Ker Ruben Ramos <xdiscent@gmail.com>
Fred Smith <derf@edx.org>
Wang Peifeng <pku9104038@hotmail.com>
Ray Hooker <ray.hooker@gmail.com>
...@@ -99,10 +99,20 @@ ...@@ -99,10 +99,20 @@
"Type":"Number", "Type":"Number",
"Default":"8090" "Default":"8090"
}, },
"MongoServicePort":{ "VPCSubnet":{
"Description":"The TCP port for the deployment mongo server", "Description":"The subnet CIDR for the whole VPC.",
"Type":"Number", "Type":"String",
"Default":"10001" "Default":"10.254.0.0/16"
},
"PrivateSubnet":{
"Description":"The subnet CIDR for the private VPC subnet.",
"Type":"String",
"Default":"10.254.0.0/24"
},
"PublicSubnet":{
"Description":"The subnet CIDR for the public VPC subnet.",
"Type":"String",
"Default":"10.254.1.0/24"
} }
}, },
"Mappings":{ "Mappings":{
...@@ -141,11 +151,6 @@ ...@@ -141,11 +151,6 @@
"ap-northeast-1": { "AMI":"ami-14d86d15" }, "ap-northeast-1": { "AMI":"ami-14d86d15" },
"sa-east-1": { "AMI":"ami-0439e619" } "sa-east-1": { "AMI":"ami-0439e619" }
}, },
"SubnetConfig":{
"VPC": { "CIDR":"10.0.0.0/16" },
"Public01": { "CIDR":"10.0.0.0/24" },
"Admin": { "CIDR":"10.0.185.0/24" }
},
"MapRegionsToAvailZones":{ "MapRegionsToAvailZones":{
"us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" }, "us-east-1": { "AZone2":"us-east-1d", "AZone0":"us-east-1b", "AZone1":"us-east-1c" },
"us-west-1": { "AZone0":"us-west-1a", "AZone2":"us-west-1b", "AZone1":"us-west-1c" }, "us-west-1": { "AZone0":"us-west-1a", "AZone2":"us-west-1b", "AZone1":"us-west-1c" },
...@@ -163,7 +168,7 @@ ...@@ -163,7 +168,7 @@
"Properties":{ "Properties":{
"EnableDnsSupport" : "true", "EnableDnsSupport" : "true",
"EnableDnsHostnames" : "true", "EnableDnsHostnames" : "true",
"CidrBlock":"10.0.0.0/16", "CidrBlock": { "Ref": "VPCSubnet" },
"InstanceTenancy":"default" "InstanceTenancy":"default"
} }
}, },
...@@ -173,13 +178,7 @@ ...@@ -173,13 +178,7 @@
"VpcId":{ "VpcId":{
"Ref":"AdminVPC" "Ref":"AdminVPC"
}, },
"CidrBlock":{ "CidrBlock":{ "Ref": "PublicSubnet" },
"Fn::FindInMap":[
"SubnetConfig",
"Public01",
"CIDR"
]
},
"AvailabilityZone":{ "AvailabilityZone":{
"Fn::FindInMap":[ "Fn::FindInMap":[
"MapRegionsToAvailZones", "MapRegionsToAvailZones",
...@@ -201,13 +200,7 @@ ...@@ -201,13 +200,7 @@
"VpcId":{ "VpcId":{
"Ref":"AdminVPC" "Ref":"AdminVPC"
}, },
"CidrBlock":{ "CidrBlock":{ "Ref": "PrivateSubnet" },
"Fn::FindInMap":[
"SubnetConfig",
"Admin",
"CIDR"
]
},
"AvailabilityZone":{ "AvailabilityZone":{
"Fn::FindInMap":[ "Fn::FindInMap":[
"MapRegionsToAvailZones", "MapRegionsToAvailZones",
...@@ -589,12 +582,6 @@ ...@@ -589,12 +582,6 @@
"FromPort":"443", "FromPort":"443",
"ToPort":"443", "ToPort":"443",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":{ "Ref": "MongoServicePort" },
"ToPort":{ "Ref": "MongoServicePort" },
"CidrIp":"0.0.0.0/0"
} }
], ],
"SecurityGroupEgress":[ "SecurityGroupEgress":[
...@@ -617,12 +604,6 @@ ...@@ -617,12 +604,6 @@
"FromPort":"443", "FromPort":"443",
"ToPort":"443", "ToPort":"443",
"CidrIp":"0.0.0.0/0" "CidrIp":"0.0.0.0/0"
},
{
"IpProtocol":"tcp",
"FromPort":{ "Ref": "MongoServicePort" },
"ToPort":{ "Ref": "MongoServicePort" },
"CidrIp":"0.0.0.0/0"
} }
] ]
} }
......
...@@ -6,4 +6,4 @@ ...@@ -6,4 +6,4 @@
jinja2_extensions=jinja2.ext.do jinja2_extensions=jinja2.ext.do
host_key_checking = False host_key_checking = False
roles_path=../../../ansible-roles roles_path=../../ansible-roles/roles:../../ansible-private/roles:../../ansible-roles/
...@@ -6,4 +6,4 @@ ...@@ -6,4 +6,4 @@
jinja2_extensions=jinja2.ext.do jinja2_extensions=jinja2.ext.do
host_key_checking=False host_key_checking=False
roles_path=../../../ansible-roles roles_path=../../../ansible-roles/roles:../../../ansible-private/roles:../../../ansible-roles/
# ansible-playbook -i ec2.py commoncluster.yml --extra-vars "deployment=edge env=stage" -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts # ansible-playbook -i ec2.py commoncluster.yml --limit tag_Name_stage-edx-commoncluster -e@/path/to/vars/env-deployment.yml -T 30 --list-hosts
- hosts: tag_play_commoncluster:&tag_environment_{{ env }}:&tag_deployment_{{ deployment }} - hosts: all
sudo: True sudo: True
serial: 1 serial: 1
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws
- role: nginx
nginx_sites:
- xqueue
- role: xqueue
- role: datadog
when: ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
- oraclejdk - oraclejdk
- elasticsearch - elasticsearch
- rabbitmq - rabbitmq
...@@ -13,7 +28,9 @@ ...@@ -13,7 +28,9 @@
# #
# In order to reconfigure the host resolution we are issuing a # In order to reconfigure the host resolution we are issuing a
# reboot. # reboot.
- hosts: tag_play_commoncluster:&tag_environment_{{ env }}:&tag_deployment_{{ deployment }} # TODO: We should probably poll to ensure the host comes back before moving
# to the next host so that we don't reboot all of the servers simultaneously
- hosts: all
sudo: True sudo: True
serial: 1 serial: 1
vars: vars:
...@@ -21,3 +38,5 @@ ...@@ -21,3 +38,5 @@
tasks: tasks:
- name: reboot - name: reboot
command: /sbin/shutdown -r now "Reboot is triggered by Ansible" command: /sbin/shutdown -r now "Reboot is triggered by Ansible"
when: reboot
tags: reboot
...@@ -83,18 +83,49 @@ ...@@ -83,18 +83,49 @@
login_host: "{{ item.db_host }}" login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}" login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}" login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%' host: '%'
when: item.db_user != 'None'
with_items: with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}" - db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}" db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ db_root_pass }}" db_pass: "{{ db_root_pass }}"
- db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}" - db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ xqueue_db_root_user }}" db_user: "{{ xqueue_db_root_user }}"
db_pass: "{{ db_root_pass }}" db_pass: "{{ db_root_pass }}"
- db_host: "{{ ORA_MYSQL_HOST|default('None') }}" - db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ ora_db_root_user }}" db_user: "{{ ora_db_root_user }}"
db_pass: "{{ db_root_pass }}" db_pass: "{{ db_root_pass }}"
- name: assign mysql user permissions for migrate user
mysql_user:
name: "{{ COMMON_MYSQL_MIGRATE_USER }}"
priv: "{{ item.db_name }}.*:SELECT,INSERT,UPDATE,DELETE,ALTER,CREATE,DROP,INDEX"
password: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%'
when: item.db_user != 'None'
with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_name: "{{ EDXAPP_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ edxapp_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- db_host: "{{ XQUEUE_MYSQL_HOST|default('None') }}"
db_name: "{{ XQUEUE_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ xqueue_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- db_host: "{{ ORA_MYSQL_HOST|default('None') }}"
db_name: "{{ ORA_MYSQL_DB_NAME|default('None') }}"
db_user: "{{ ora_db_root_user }}"
db_pass: "{{ db_root_pass }}"
- name: assign mysql user permissions for admin user - name: assign mysql user permissions for admin user
mysql_user: mysql_user:
name: "{{ COMMON_MYSQL_ADMIN_USER }}" name: "{{ COMMON_MYSQL_ADMIN_USER }}"
...@@ -103,7 +134,9 @@ ...@@ -103,7 +134,9 @@
login_host: "{{ item.db_host }}" login_host: "{{ item.db_host }}"
login_user: "{{ item.db_user }}" login_user: "{{ item.db_user }}"
login_password: "{{ item.db_pass }}" login_password: "{{ item.db_pass }}"
append_privs: yes
host: '%' host: '%'
when: item.db_user != 'None'
with_items: with_items:
- db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}" - db_host: "{{ EDXAPP_MYSQL_HOST|default('None') }}"
db_user: "{{ edxapp_db_root_user }}" db_user: "{{ edxapp_db_root_user }}"
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
- ora - ora
- xqueue - xqueue
- xserver - xserver
- certs
nginx_default_sites: nginx_default_sites:
- lms - lms
- edxlocal - edxlocal
......
...@@ -3,10 +3,19 @@ ...@@ -3,10 +3,19 @@
hosts: all hosts: all
sudo: True sudo: True
gather_facts: True gather_facts: True
vars:
ENABLE_DATADOG: False
ENABLE_SPLUNKFORWARDER: False
ENABLE_NEWRELIC: False
roles: roles:
- aws - aws
- edx_ansible - edx_ansible
- user - user
- jenkins_admin - jenkins_admin
- hotg - hotg
- newrelic - role: datadog
when: ENABLE_DATADOG
- role: splunkforwarder
when: ENABLE_SPLUNKFORWARDER
- role: newrelic
when: ENABLE_NEWRELIC
...@@ -8,6 +8,12 @@ ...@@ -8,6 +8,12 @@
gather_facts: True gather_facts: True
vars: vars:
mongo_enable_journal: False mongo_enable_journal: False
vars_files:
- roles/edxapp/defaults/main.yml
- roles/ora/defaults/main.yml
- roles/xqueue/defaults/main.yml
- roles/xserver/defaults/main.yml
- roles/forum/defaults/main.yml
roles: roles:
- common - common
- edxlocal - edxlocal
......
# Example ansible-playbook -i redirect.example.com -e@/path/to/secure/var/file.yml
#
# the secure var file will need to have the following vars defined:
#
# NGINX_ENABLE_SSL
# NGINX_SSL_CERTIFICATE
# NGINX_SSL_KEY
# # for the redirects use $scheme://example.com to match the protocol
#
# secure vars example:
# # Vars for setting up the nginx redirect instance
# NGINX_ENABLE_SSL: True
# NGINX_SSL_CERTIFICATE: '../../../example-secure/ssl/example.com.crt'
# NGINX_SSL_KEY: '../../../example-secure/ssl/example.com.key'
# nginx_redirects:
# - server_name: nginx-redirect.example.edx.org
# redirect: "http://www.example.com"
# - server_name: example.com
# redirect: "http://www.example.com"
# default: true
#
#
#
# - ...
- name: utility play to setup an nginx redirect
hosts: all
sudo: True
gather_facts: True
roles:
- role: nginx
nginx_sites:
- nginx_redirect
# Deploy a specific version of edx-ora2 and re-run migrations
# edx-ora2 is already included in the requirements for edx-platform,
# but we need to override that version when deploying to
# the continuous integration server for testing ora2 changes.
- name: Update edx-ora2
hosts: all
sudo: True
gather_facts: True
vars:
- edxapp_venv_dir: "/edx/app/edxapp/venvs/edxapp"
- edxapp_code_dir: "/edx/app/edxapp/edx-platform"
- edxapp_deploy_path: "{{ edxapp_venv_dir }}/bin:{{ edxapp_code_dir }}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- edxapp_user: "edxapp"
- edxapp_mysql_user: "migrate"
- edxapp_mysql_password: "password"
- supervisorctl_path: "/edx/bin/supervisorctl"
- ora2_version: "master"
- ora2_pip_req: "git+https://github.com/edx/edx-ora2.git@{{ ora2_version }}#egg=edx-ora2"
tasks:
- name: install edx-ora2
shell: >
{{ edxapp_venv_dir }}/bin/pip install -e {{ ora2_pip_req }}
chdir={{ edxapp_code_dir }}
environment:
PATH: "{{ edxapp_deploy_path }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- name: syncdb and migrate
shell: >
{{ edxapp_venv_dir }}/bin/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
chdir={{ edxapp_code_dir }}
environment:
DB_MIGRATION_USER: "{{ edxapp_mysql_user }}"
DB_MIGRATION_PASS: "{{ edxapp_mysql_password }}"
notify:
- "restart edxapp"
handlers:
- name: restart edxapp
shell: "{{ supervisorctl_path }} restart edxapp:{{ item }}"
with_items:
- lms
- cms
- name: Deploy rabbitmq - name: Deploy rabbitmq
hosts: all hosts: all
sudo: True sudo: True
gather_facts: False # The rabbitmq role depends on
# ansible_default_ipv4 so
# gather_facts must be set to True
gather_facts: True
roles: roles:
- aws - aws
- rabbitmq - rabbitmq
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
- forum - forum
- { role: "xqueue", update_users: True } - { role: "xqueue", update_users: True }
- ora - ora
- discern - certs
- edx_ansible - edx_ansible
- role: datadog - role: datadog
when: ENABLE_DATADOG when: ENABLE_DATADOG
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
- name: checkout code - name: checkout code
git: git:
dest={{ as_code_dir }} repo={{ as_source_repo }} dest={{ as_code_dir }} repo={{ as_source_repo }}
accept_hostkey=yes
version={{ as_version }} force=true version={{ as_version }} force=true
environment: environment:
GIT_SSH: $as_git_ssh GIT_SSH: $as_git_ssh
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
- name: checkout code - name: checkout code
git: git:
dest={{ analytics_code_dir }} repo={{ analytics_source_repo }} dest={{ analytics_code_dir }} repo={{ analytics_source_repo }}
accept_hostkey=yes
version={{ analytics_version }} force=true version={{ analytics_version }} force=true
environment: environment:
GIT_SSH: $analytics_git_ssh GIT_SSH: $analytics_git_ssh
......
...@@ -43,7 +43,7 @@ aws_app_dir: "{{ COMMON_APP_DIR }}/aws" ...@@ -43,7 +43,7 @@ aws_app_dir: "{{ COMMON_APP_DIR }}/aws"
aws_s3_sync_script: "{{ aws_app_dir }}/send-logs-to-s3" aws_s3_sync_script: "{{ aws_app_dir }}/send-logs-to-s3"
aws_s3_logfile: "{{ aws_log_dir }}/s3-log-sync.log" aws_s3_logfile: "{{ aws_log_dir }}/s3-log-sync.log"
aws_log_dir: "{{ COMMON_LOG_DIR }}/aws" aws_log_dir: "{{ COMMON_LOG_DIR }}/aws"
aws_region: "us-east-1"
# default path to the aws binary # default path to the aws binary
aws_cmd: "{{ COMMON_BIN_DIR }}/s3cmd" aws_cmd: "{{ COMMON_BIN_DIR }}/s3cmd"
# #
......
...@@ -10,12 +10,7 @@ if (( $EUID != 0 )); then ...@@ -10,12 +10,7 @@ if (( $EUID != 0 )); then
exit 1 exit 1
fi fi
S3_LOGFILE="{{ aws_s3_logfile }}" exec > >(tee "{{ aws_s3_logfile }}")
NOTIFY_EMAIL={{ AWS_S3_LOGS_NOTIFY_EMAIL }}
FROM_EMAIL={{ AWS_S3_LOGS_FROM_EMAIL }}
AWS_CMD={{ aws_cmd }}
exec > >(tee $S3_LOGFILE)
exec 2>&1 exec 2>&1
shopt -s extglob shopt -s extglob
...@@ -23,11 +18,11 @@ shopt -s extglob ...@@ -23,11 +18,11 @@ shopt -s extglob
usage() { usage() {
cat<<EO cat<<EO
A wrapper of s3cmd sync that will sync files to A wrapper of s3cmd sync that will sync files to
an s3 bucket, will send mail to {{ AWS_S3_LOGS_NOTIFY_EMAIL }} an s3 bucket, will send mail to {{ AWS_S3_LOGS_NOTIFY_EMAIL }}
on failures. on failures.
Usage: $PROG Usage: $PROG
-v add verbosity (set -x) -v add verbosity (set -x)
-n echo what will be done -n echo what will be done
...@@ -57,7 +52,7 @@ done ...@@ -57,7 +52,7 @@ done
# bucket # bucket
# If there are any errors from this point # If there are any errors from this point
# send mail to $NOTIFY_EMAIL # send mail to {{ AWS_S3_LOGS_NOTIFY_EMAIL }}
set -e set -e
...@@ -68,22 +63,22 @@ s3_path=unset ...@@ -68,22 +63,22 @@ s3_path=unset
onerror() { onerror() {
if [[ -z $noop ]]; then if [[ -z $noop ]]; then
message_file=/var/tmp/message-$$.json message_file=/var/tmp/message-$$.json
message_string="Error syncing $s3_path: inst_id=$instance_id ip=$ip region=$region" message_string="Error syncing $s3_path: inst_id=$instance_id ip=$ip region={{ aws_region }}"
if [[ -r $S3_LOGFILE ]]; then if [[ -r "{{ aws_s3_logfile }}" ]]; then
python -c "import json; d={'Subject':{'Data':'$message_string'},'Body':{'Text':{'Data':open('$S3_LOGFILE').read()}}};print json.dumps(d)" > $message_file python -c "import json; d={'Subject':{'Data':'$message_string'},'Body':{'Text':{'Data':open('"{{ aws_s3_logfile }}"').read()}}};print json.dumps(d)" > $message_file
else else
cat << EOF > $message_file cat << EOF > $message_file
{"Subject": { "Data": "$message_string" }, "Body": { "Text": { "Data": "!! ERROR !! no logfile" } } } {"Subject": { "Data": "$message_string" }, "Body": { "Text": { "Data": "!! ERROR !! no logfile" } } }
EOF EOF
fi fi
echo "ERROR: syncing $s3_path on $instance_id" echo "ERROR: syncing $s3_path on $instance_id"
$AWS_CMD ses send-email --from $FROM_EMAIL --to $NOTIFY_EMAIL --message file://$message_file --region $region {{ aws_cmd }} ses send-email --from {{ AWS_S3_LOGS_FROM_EMAIL }} --to {{ AWS_S3_LOGS_NOTIFY_EMAIL }} --message file://$message_file --region {{ aws_region }}
else else
echo "Error syncing $s3_path on $instance_id" echo "Error syncing $s3_path on $instance_id"
fi fi
} }
trap onerror ERR SIGHUP SIGINT SIGTERM trap onerror ERR SIGHUP SIGINT SIGTERM
# first security group is used as the directory name in the bucket # first security group is used as the directory name in the bucket
sec_grp=$(ec2metadata --security-groups | head -1) sec_grp=$(ec2metadata --security-groups | head -1)
...@@ -95,5 +90,5 @@ region=${availability_zone:0:${{lb}}#availability_zone{{rb}} - 1} ...@@ -95,5 +90,5 @@ region=${availability_zone:0:${{lb}}#availability_zone{{rb}} - 1}
s3_path="${2}/$sec_grp/" s3_path="${2}/$sec_grp/"
{% for item in AWS_S3_LOG_PATHS -%} {% for item in AWS_S3_LOG_PATHS -%}
$noop $AWS_CMD sync {{ item['path'] }} "s3://{{ item['bucket'] }}/$sec_grp/${instance_id}-${ip}/" $noop {{ aws_cmd }} sync {{ item['path'] }} "s3://{{ item['bucket'] }}/$sec_grp/${instance_id}-${ip}/"
{% endfor %} {% endfor %}
...@@ -28,6 +28,9 @@ ...@@ -28,6 +28,9 @@
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}" extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
with_items: bastion_pip_pkgs with_items: bastion_pip_pkgs
# These templates rely on there being a global
# read_only mysql user, you must override the default
# in order for these templates to be written out
- template: > - template: >
src=mysql.sh.j2 src=mysql.sh.j2
dest=/home/{{ item[0] }}/{{ item[1].script_name }} dest=/home/{{ item[0] }}/{{ item[1].script_name }}
...@@ -44,7 +47,11 @@ ...@@ -44,7 +47,11 @@
- db_host: "{{ ORA_MYSQL_HOST }}" - db_host: "{{ ORA_MYSQL_HOST }}"
db_name: "{{ ORA_MYSQL_DB_NAME }}" db_name: "{{ ORA_MYSQL_DB_NAME }}"
script_name: ora-rds.sh script_name: ora-rds.sh
when: COMMON_MYSQL_READ_ONLY_PASS
# These templates rely on there being a global
# read_only mongo user, you must override the default
# in order for these templates to be written out
- template: > - template: >
src=mongo.sh.j2 src=mongo.sh.j2
dest=/home/{{ item[0] }}/{{ item[1].script_name }} dest=/home/{{ item[0] }}/{{ item[1].script_name }}
...@@ -60,3 +67,4 @@ ...@@ -60,3 +67,4 @@
db_name: "{{ FORUM_MONGO_DATABASE }}" db_name: "{{ FORUM_MONGO_DATABASE }}"
db_port: "{{ FORUM_MONGO_PORT }}" db_port: "{{ FORUM_MONGO_PORT }}"
script_name: forum-mongo.sh script_name: forum-mongo.sh
when: COMMON_MONGO_READ_ONLY_PASS
...@@ -25,18 +25,39 @@ CERTS_AWS_KEY: "" ...@@ -25,18 +25,39 @@ CERTS_AWS_KEY: ""
CERTS_AWS_ID: "" CERTS_AWS_ID: ""
# GPG key ID, defaults to the dummy key # GPG key ID, defaults to the dummy key
CERTS_KEY_ID: "FEF8D954" CERTS_KEY_ID: "FEF8D954"
# Path to git identity file for pull access to # Contents of the identity for a private
# the edX certificates repo - REQUIRED # repo. Leave set to "none" if using the public
# Example - {{ secure_dir }}/files/git-identity # certificate repo
CERTS_GIT_IDENTITY: !!null CERTS_GIT_IDENTITY: "none"
# Path to public and private gpg key for signing # Path to public and private gpg key for signing
# the edX certificate. Default is a dummy key # the edX certificate. Default is a dummy key
CERTS_LOCAL_PRIVATE_KEY: "example-private-key.txt" CERTS_LOCAL_PRIVATE_KEY: "example-private-key.txt"
# This defaults to the public certificates repo which is
# used for open-edx
CERTS_REPO: "https://github.com/edx/read-only-certificate-code"
CERTS_NGINX_PORT: 18090
CERTS_WEB_ROOT: "{{ certs_data_dir }}/www-data"
CERTS_URL: "http://localhost:{{ CERTS_NGINX_PORT }}"
CERTS_DOWNLOAD_URL: "http://localhost:{{ CERTS_NGINX_PORT }}"
CERTS_VERIFY_URL: "http://localhost:{{ CERTS_NGINX_PORT }}"
# Set to false if using s3 or if you don't want certificates
# copied to the web root
CERTS_COPY_TO_WEB_ROOT: true
CERTS_S3_UPLOAD: false
# Can be set to a different repo for private
# templates, fonts, etc.
CERTS_TEMPLATE_DATA_DIR: 'template_data'
# this is the trust export, output of
# gpg --export-ownertrust
CERTS_OWNER_TRUST: "A9F9EAD11A0A6E7E5A037BDC044089B6FEF8D954:6:\n"
########## Internal role vars below ########## Internal role vars below
certs_user: certs certs_user: certs
certs_app_dir: "{{ COMMON_APP_DIR }}/certs" certs_app_dir: "{{ COMMON_APP_DIR }}/certs"
certs_data_dir: "{{ COMMON_DATA_DIR }}/certs"
certs_code_dir: "{{ certs_app_dir }}/certificates" certs_code_dir: "{{ certs_app_dir }}/certificates"
certs_venvs_dir: "{{ certs_app_dir }}/venvs" certs_venvs_dir: "{{ certs_app_dir }}/venvs"
certs_venv_dir: "{{ certs_venvs_dir }}/certs" certs_venv_dir: "{{ certs_venvs_dir }}/certs"
...@@ -44,7 +65,6 @@ certs_venv_bin: "{{ certs_venv_dir }}/bin" ...@@ -44,7 +65,6 @@ certs_venv_bin: "{{ certs_venv_dir }}/bin"
certs_git_ssh: /tmp/git_ssh.sh certs_git_ssh: /tmp/git_ssh.sh
certs_git_identity: "{{ certs_app_dir }}/certs-git-identity" certs_git_identity: "{{ certs_app_dir }}/certs-git-identity"
certs_requirements_file: "{{ certs_code_dir }}/requirements.txt" certs_requirements_file: "{{ certs_code_dir }}/requirements.txt"
certs_repo: "git@github.com:/edx/certificates"
certs_version: 'master' certs_version: 'master'
certs_gpg_dir: "{{ certs_app_dir }}/gnupg" certs_gpg_dir: "{{ certs_app_dir }}/gnupg"
certs_env_config: certs_env_config:
...@@ -57,6 +77,13 @@ certs_env_config: ...@@ -57,6 +77,13 @@ certs_env_config:
CERT_KEY_ID: $CERTS_KEY_ID CERT_KEY_ID: $CERTS_KEY_ID
LOGGING_ENV: "" LOGGING_ENV: ""
CERT_GPG_DIR: $certs_gpg_dir CERT_GPG_DIR: $certs_gpg_dir
CERT_URL: $CERTS_URL
CERT_DOWNLOAD_URL: $CERTS_DOWNLOAD_URL
CERT_WEB_ROOT: $CERTS_WEB_ROOT
COPY_TO_WEB_ROOT: $CERTS_COPY_TO_WEB_ROOT
S3_UPLOAD: $CERTS_S3_UPLOAD
CERT_VERIFY_URL: $CERTS_VERIFY_URL
TEMPLATE_DATA_DIR: $CERTS_TEMPLATE_DATA_DIR
certs_auth_config: certs_auth_config:
QUEUE_USER: $CERTS_QUEUE_USER QUEUE_USER: $CERTS_QUEUE_USER
......
A9F9EAD11A0A6E7E5A037BDC044089B6FEF8D954:6:
...@@ -36,14 +36,19 @@ ...@@ -36,14 +36,19 @@
owner={{ certs_user }} mode=750 owner={{ certs_user }} mode=750
notify: restart certs notify: restart certs
# This key is only needed if you are pulling down a private
# certificates repo
- name: install read-only ssh key for the certs repo - name: install read-only ssh key for the certs repo
copy: > copy: >
content="{{ CERTS_GIT_IDENTITY }}" dest={{ certs_git_identity }} content="{{ CERTS_GIT_IDENTITY }}" dest={{ certs_git_identity }}
force=yes owner={{ certs_user }} mode=0600 force=yes owner={{ certs_user }} mode=0600
when: CERTS_GIT_IDENTITY != "none"
notify: restart certs notify: restart certs
- name: checkout certificates repo into {{ certs_code_dir }} - name: checkout certificates repo into {{ certs_code_dir }}
git: dest={{ certs_code_dir }} repo={{ certs_repo }} version={{ certs_version }} git: >
dest={{ certs_code_dir }} repo={{ CERTS_REPO }} version={{ certs_version }}
accept_hostkey=yes
sudo_user: "{{ certs_user }}" sudo_user: "{{ certs_user }}"
environment: environment:
GIT_SSH: "{{ certs_git_ssh }}" GIT_SSH: "{{ certs_git_ssh }}"
...@@ -51,6 +56,7 @@ ...@@ -51,6 +56,7 @@
- name: remove read-only ssh key for the certs repo - name: remove read-only ssh key for the certs repo
file: path={{ certs_git_identity }} state=absent file: path={{ certs_git_identity }} state=absent
when: CERTS_GIT_IDENTITY != "none"
notify: restart certs notify: restart certs
- name : install python requirements - name : install python requirements
......
...@@ -31,10 +31,6 @@ ...@@ -31,10 +31,6 @@
# - supervisor # - supervisor
# - certs # - certs
# #
- name: Checking to see if git identity is set
fail: msg="You must set CERTS_GIT_IDENTITY var for this role!"
when: not CERTS_GIT_IDENTITY
- name: create application user - name: create application user
user: > user: >
name="{{ certs_user }}" name="{{ certs_user }}"
...@@ -43,7 +39,7 @@ ...@@ -43,7 +39,7 @@
shell=/bin/false shell=/bin/false
notify: restart certs notify: restart certs
- name: create certs app and data dirs - name: create certs app dirs
file: > file: >
path="{{ item }}" path="{{ item }}"
state=directory state=directory
...@@ -52,7 +48,20 @@ ...@@ -52,7 +48,20 @@
notify: restart certs notify: restart certs
with_items: with_items:
- "{{ certs_app_dir }}" - "{{ certs_app_dir }}"
# needed for the ansible 1.5 git module
- "{{ certs_app_dir }}/.ssh"
- "{{ certs_venvs_dir }}" - "{{ certs_venvs_dir }}"
- "{{ certs_data_dir }}"
# The certs web root must be owned
# by the web user so the certs service
# can write files there.
- name: create certs web root
file: >
path="{{ CERTS_WEB_ROOT }}"
state=directory
owner="{{ common_web_group }}"
group="{{ certs_user }}"
- name: create certs gpg dir - name: create certs gpg dir
file: > file: >
...@@ -69,6 +78,12 @@ ...@@ -69,6 +78,12 @@
notify: restart certs notify: restart certs
register: certs_gpg_key register: certs_gpg_key
- name: copy the pgp trust export
copy: >
content="{{ CERTS_OWNER_TRUST }}"
dest={{ certs_app_dir }}/trust.export
owner={{ common_web_user }} mode=0600
notify: restart certs
- name: load the gpg key - name: load the gpg key
shell: > shell: >
...@@ -77,4 +92,11 @@ ...@@ -77,4 +92,11 @@
when: certs_gpg_key.changed when: certs_gpg_key.changed
notify: restart certs notify: restart certs
- name: import the trust export
shell: >
/usr/bin/gpg --homedir {{ certs_gpg_dir }} --import-ownertrust {{ certs_app_dir }}/trust.export
sudo_user: "{{ common_web_user }}"
when: certs_gpg_key.changed
notify: restart certs
- include: deploy.yml tags=deploy - include: deploy.yml tags=deploy
...@@ -30,18 +30,22 @@ COMMON_CUSTOM_DHCLIENT_CONFIG: false ...@@ -30,18 +30,22 @@ COMMON_CUSTOM_DHCLIENT_CONFIG: false
COMMON_MOTD_TEMPLATE: "motd.tail.j2" COMMON_MOTD_TEMPLATE: "motd.tail.j2"
COMMON_SSH_PASSWORD_AUTH: "no"
# These are two special accounts across all databases # These are three maintenance accounts across all databases
# the read only user is is granted select privs on all dbs # the read only user is is granted select privs on all dbs
# the admin user is granted create user privs on all dbs # the admin user is granted create user privs on all dbs
# the migrate user is granted table alter privs on all dbs
COMMON_MYSQL_READ_ONLY_USER: 'read_only' COMMON_MYSQL_READ_ONLY_USER: 'read_only'
COMMON_MYSQL_READ_ONLY_PASS: 'password' COMMON_MYSQL_READ_ONLY_PASS: !!null
COMMON_MYSQL_ADMIN_USER: 'admin' COMMON_MYSQL_ADMIN_USER: 'admin'
COMMON_MYSQL_ADMIN_PASS: 'password' COMMON_MYSQL_ADMIN_PASS: !!null
COMMON_MYSQL_MIGRATE_USER: 'migrate'
COMMON_MYSQL_MIGRATE_PASS: !!null
COMMON_MONGO_READ_ONLY_USER: 'read_only' COMMON_MONGO_READ_ONLY_USER: 'read_only'
COMMON_MONGO_READ_ONLY_PASS: 'password' COMMON_MONGO_READ_ONLY_PASS: !!null
common_debian_pkgs: common_debian_pkgs:
- ntp - ntp
......
...@@ -51,7 +51,7 @@ PermitEmptyPasswords no ...@@ -51,7 +51,7 @@ PermitEmptyPasswords no
ChallengeResponseAuthentication no ChallengeResponseAuthentication no
# Change to no to disable tunnelled clear text passwords # Change to no to disable tunnelled clear text passwords
PasswordAuthentication no PasswordAuthentication {{ COMMON_SSH_PASSWORD_AUTH }}
# Kerberos options # Kerberos options
#KerberosAuthentication no #KerberosAuthentication no
......
--- ---
datadog_api_key: "PUT_YOUR_API_KEY_HERE" DATADOG_API_KEY: "PUT_YOUR_API_KEY_HERE"
datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52" datadog_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x226AE980C7A7DA52"
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
lineinfile: > lineinfile: >
dest="/etc/dd-agent/datadog.conf" dest="/etc/dd-agent/datadog.conf"
regexp="^api_key:.*" regexp="^api_key:.*"
line="api_key:{{ datadog_api_key }}" line="api_key:{{ DATADOG_API_KEY }}"
notify: notify:
- restart the datadog service - restart the datadog service
tags: tags:
......
--- ---
- name: check out the demo course - name: check out the demo course
git: dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }} git: >
dest={{ demo_code_dir }} repo={{ demo_repo }} version={{ demo_version }}
accept_hostkey=yes
sudo_user: "{{ demo_edxapp_user }}" sudo_user: "{{ demo_edxapp_user }}"
register: demo_checkout register: demo_checkout
......
...@@ -33,20 +33,26 @@ ...@@ -33,20 +33,26 @@
- restart discern - restart discern
- name: git checkout discern repo into discern_code_dir - name: git checkout discern repo into discern_code_dir
git: dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_version }} git: >
dest={{ discern_code_dir }} repo={{ discern_source_repo }} version={{ discern_version }}
accept_hostkey=yes
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- restart discern - restart discern
- name: git checkout ease repo into discern_ease_code_dir - name: git checkout ease repo into discern_ease_code_dir
git: dest={{ discern_ease_code_dir}} repo={{ discern_ease_source_repo }} version={{ discern_ease_version }} git: >
dest={{ discern_ease_code_dir}} repo={{ discern_ease_source_repo }} version={{ discern_ease_version }}
accept_hostkey=yes
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- restart discern - restart discern
#Numpy has to be a pre-requirement in order for scipy to build #Numpy has to be a pre-requirement in order for scipy to build
- name : install python pre-requirements for discern and ease - name : install python pre-requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present pip: >
requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- restart discern - restart discern
...@@ -55,7 +61,9 @@ ...@@ -55,7 +61,9 @@
- "{{ discern_ease_pre_requirements_file }}" - "{{ discern_ease_pre_requirements_file }}"
- name : install python requirements for discern and ease - name : install python requirements for discern and ease
pip: requirements={{item}} virtualenv={{ discern_venv_dir }} state=present pip: >
requirements={{item}} virtualenv={{ discern_venv_dir }} state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ discern_user }}" sudo_user: "{{ discern_user }}"
notify: notify:
- restart discern - restart discern
......
...@@ -29,6 +29,7 @@ edx_ansible_debian_pkgs: ...@@ -29,6 +29,7 @@ edx_ansible_debian_pkgs:
- libxml2-dev - libxml2-dev
- libxslt1-dev - libxslt1-dev
- curl - curl
- python-yaml
edx_ansible_app_dir: "{{ COMMON_APP_DIR }}/edx_ansible" edx_ansible_app_dir: "{{ COMMON_APP_DIR }}/edx_ansible"
edx_ansible_code_dir: "{{ edx_ansible_app_dir }}/edx_ansible" edx_ansible_code_dir: "{{ edx_ansible_app_dir }}/edx_ansible"
edx_ansible_data_dir: "{{ COMMON_DATA_DIR }}/edx_ansible" edx_ansible_data_dir: "{{ COMMON_DATA_DIR }}/edx_ansible"
......
--- ---
- name: git checkout edx_ansible repo into edx_ansible_code_dir - name: git checkout edx_ansible repo into edx_ansible_code_dir
git: dest={{ edx_ansible_code_dir }} repo={{ edx_ansible_source_repo }} version={{ configuration_version }} git: >
dest={{ edx_ansible_code_dir }} repo={{ edx_ansible_source_repo }} version={{ configuration_version }}
accept_hostkey=yes
sudo_user: "{{ edx_ansible_user }}" sudo_user: "{{ edx_ansible_user }}"
- name : install edx_ansible venv requirements - name : install edx_ansible venv requirements
pip: requirements="{{ edx_ansible_requirements_file }}" virtualenv="{{ edx_ansible_venv_dir }}" state=present pip: >
requirements="{{ edx_ansible_requirements_file }}" virtualenv="{{ edx_ansible_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ edx_ansible_user }}" sudo_user: "{{ edx_ansible_user }}"
- name: create update script - name: create update script
......
...@@ -12,7 +12,7 @@ IFS="," ...@@ -12,7 +12,7 @@ IFS=","
-v add verbosity to edx_ansible run -v add verbosity to edx_ansible run
-h this -h this
<repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, discern, edx-ora, configuration <repo> - must be one of edx-platform, xqueue, cs_comments_service, xserver, ease, edx-ora, configuration, read-only-certificate-code
<version> - can be a commit or tag <version> - can be a commit or tag
EO EO
...@@ -39,16 +39,17 @@ if [[ -f {{ edx_ansible_var_file }} ]]; then ...@@ -39,16 +39,17 @@ if [[ -f {{ edx_ansible_var_file }} ]]; then
fi fi
declare -A repos_to_cmd declare -A repos_to_cmd
edx_ansible_cmd="{{ edx_ansible_venv_bin}}/ansible-playbook -i localhost, -c local --tags deploy $extra_args " edx_ansible_cmd="{{ edx_ansible_venv_bin }}/ansible-playbook -i localhost, -c local --tags deploy $extra_args "
repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'" repos_to_cmd["edx-platform"]="$edx_ansible_cmd edxapp.yml -e 'edx_platform_version=$2'"
repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'" repos_to_cmd["xqueue"]="$edx_ansible_cmd xqueue.yml -e 'xqueue_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd xserver.yml -e 'xserver_version=$2'"
repos_to_cmd["cs_comments_service"]="$edx_ansible_cmd forum.yml -e 'forum_version=$2'" repos_to_cmd["cs_comments_service"]="$edx_ansible_cmd forum.yml -e 'forum_version=$2'"
repos_to_cmd["xserver"]="$edx_ansible_cmd forums.yml -e 'xserver_version=$2'" repos_to_cmd["xserver"]="$edx_ansible_cmd forums.yml -e 'xserver_version=$2'"
repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'" repos_to_cmd["ease"]="$edx_ansible_cmd discern.yml -e 'discern_ease_version=$2' && $edx_ansible_cmd ora.yml -e 'ora_ease_version=$2'"
repos_to_cmd["discern"]="$edx_ansible_cmd discern.yml -e 'discern_version=$2'"
repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'" repos_to_cmd["edx-ora"]="$edx_ansible_cmd ora.yml -e 'ora_version=$2'"
repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'" repos_to_cmd["configuration"]="$edx_ansible_cmd edx_ansible.yml -e 'configuration_version=$2'"
repos_to_cmd["read-only-certificate-code"]="$edx_ansible_cmd certs.yml -e 'certs_version=$2'"
if [[ -z $1 || -z $2 ]]; then if [[ -z $1 || -z $2 ]]; then
......
...@@ -67,6 +67,9 @@ EDXAPP_CAS_SERVER_URL: '' ...@@ -67,6 +67,9 @@ EDXAPP_CAS_SERVER_URL: ''
EDXAPP_CAS_EXTRA_LOGIN_PARAMS: '' EDXAPP_CAS_EXTRA_LOGIN_PARAMS: ''
EDXAPP_CAS_ATTRIBUTE_CALLBACK: '' EDXAPP_CAS_ATTRIBUTE_CALLBACK: ''
EDXAPP_CAS_ATTRIBUTE_PACKAGE: '' EDXAPP_CAS_ATTRIBUTE_PACKAGE: ''
# Enable an end-point that creates a user and logs them in
# Used for performance testing
EDXAPP_ENABLE_AUTO_AUTH: false
EDXAPP_FEATURES: EDXAPP_FEATURES:
AUTH_USE_OPENID_PROVIDER: true AUTH_USE_OPENID_PROVIDER: true
...@@ -78,6 +81,7 @@ EDXAPP_FEATURES: ...@@ -78,6 +81,7 @@ EDXAPP_FEATURES:
PREVIEW_LMS_BASE: $EDXAPP_PREVIEW_LMS_BASE PREVIEW_LMS_BASE: $EDXAPP_PREVIEW_LMS_BASE
ENABLE_S3_GRADE_DOWNLOADS: true ENABLE_S3_GRADE_DOWNLOADS: true
USE_CUSTOM_THEME: $edxapp_use_custom_theme USE_CUSTOM_THEME: $edxapp_use_custom_theme
AUTOMATIC_AUTH_FOR_TESTING: $EDXAPP_ENABLE_AUTO_AUTH
EDXAPP_BOOK_URL: '' EDXAPP_BOOK_URL: ''
# This needs to be set to localhost # This needs to be set to localhost
...@@ -125,6 +129,18 @@ EDXAPP_STATIC_URL_BASE: "/static/" ...@@ -125,6 +129,18 @@ EDXAPP_STATIC_URL_BASE: "/static/"
EDXAPP_GRADE_STORAGE_TYPE: 'localfs' EDXAPP_GRADE_STORAGE_TYPE: 'localfs'
EDXAPP_GRADE_BUCKET: 'edx-grades' EDXAPP_GRADE_BUCKET: 'edx-grades'
EDXAPP_GRADE_ROOT_PATH: '/tmp/edx-s3/grades' EDXAPP_GRADE_ROOT_PATH: '/tmp/edx-s3/grades'
# Credit card processor
# These are the same defaults set in common.py
EDXAPP_CC_PROCESSOR:
CyberSource:
SHARED_SECRET: ''
MERCHANT_ID: ''
SERIAL_NUMBER: ''
ORDERPAGE_VERSION: '7'
PURCHASE_ENDPOINT: ''
# does not affect verified students
EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY: ['usd', '$']
# Configure rake tasks in edx-platform to skip Python/Ruby/Node installation # Configure rake tasks in edx-platform to skip Python/Ruby/Node installation
EDXAPP_NO_PREREQ_INSTALL: 1 EDXAPP_NO_PREREQ_INSTALL: 1
...@@ -399,6 +415,7 @@ generic_env_config: &edxapp_generic_env ...@@ -399,6 +415,7 @@ generic_env_config: &edxapp_generic_env
lms_auth_config: lms_auth_config:
<<: *edxapp_generic_auth <<: *edxapp_generic_auth
CC_PROCESSOR: $EDXAPP_CC_PROCESSOR
MODULESTORE: MODULESTORE:
default: default:
ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore' ENGINE: 'xmodule.modulestore.mixed.MixedModuleStore'
...@@ -426,6 +443,7 @@ lms_auth_config: ...@@ -426,6 +443,7 @@ lms_auth_config:
lms_env_config: lms_env_config:
<<: *edxapp_generic_env <<: *edxapp_generic_env
PAID_COURSE_REGISTRATION_CURRENCY: $EDXAPP_PAID_COURSE_REGISTRATION_CURRENCY
'CODE_JAIL': 'CODE_JAIL':
# from https://github.com/edx/codejail/blob/master/codejail/django_integration.py#L24, '' should be same as None # from https://github.com/edx/codejail/blob/master/codejail/django_integration.py#L24, '' should be same as None
'python_bin': '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}' 'python_bin': '{% if EDXAPP_PYTHON_SANDBOX %}{{ edxapp_sandbox_venv_dir }}/bin/python{% endif %}'
......
...@@ -28,7 +28,9 @@ ...@@ -28,7 +28,9 @@
# Do A Checkout # Do A Checkout
- name: checkout edx-platform repo into {{edxapp_code_dir}} - name: checkout edx-platform repo into {{edxapp_code_dir}}
git: dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}} git: >
dest={{edxapp_code_dir}} repo={{edx_platform_repo}} version={{edx_platform_version}}
accept_hostkey=yes
register: chkout register: chkout
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: environment:
...@@ -45,7 +47,9 @@ ...@@ -45,7 +47,9 @@
- "restart edxapp_workers" - "restart edxapp_workers"
- name: checkout theme - name: checkout theme
git: dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}} git: >
dest={{ edxapp_app_dir }}/themes/{{edxapp_theme_name}} repo={{edxapp_theme_source_repo}} version={{edxapp_theme_version}}
accept_hostkey=yes
when: edxapp_theme_name != '' when: edxapp_theme_name != ''
sudo_user: "{{ edxapp_user }}" sudo_user: "{{ edxapp_user }}"
environment: environment:
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
- "restart edxapp_workers" - "restart edxapp_workers"
with_items: with_items:
- "{{ edxapp_app_dir }}" - "{{ edxapp_app_dir }}"
# needed for the ansible 1.5 git module
- "{{ edxapp_app_dir }}/.ssh"
- "{{ edxapp_data_dir }}" - "{{ edxapp_data_dir }}"
- "{{ edxapp_venvs_dir }}" - "{{ edxapp_venvs_dir }}"
- "{{ edxapp_theme_dir }}" - "{{ edxapp_theme_dir }}"
......
...@@ -77,7 +77,10 @@ ...@@ -77,7 +77,10 @@
# Fake syncdb with migrate, only when fake_migrations is defined # Fake syncdb with migrate, only when fake_migrations is defined
# This overrides the database name to be the test database which # This overrides the database name to be the test database which
# the default application user has full write access to # the default application user has full write access to.
#
# This is run in cases where you want to test to see if migrations
# work without actually runnning them (when creating AMIs for example).
- name: syncdb and migrate - name: syncdb and migrate
shell: > shell: >
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
...@@ -90,12 +93,27 @@ ...@@ -90,12 +93,27 @@
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
# Regular syncdb with migrate # Syncdb with migrate when the migrate user is overridden in extra vars
- name: syncdb and migrate
shell: >
chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes" and COMMON_MYSQL_MIGRATE_PASS
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
sudo_user: "{{ edxapp_user }}"
notify:
- "restart edxapp"
- "restart edxapp_workers"
# Syncdb with migrate when the default migrate user is not set,
# in this case use the EDXAPP_MYSQL_USER_MIGRATE user to run migrations
- name: syncdb and migrate - name: syncdb and migrate
shell: > shell: >
chdir={{ edxapp_code_dir }} chdir={{ edxapp_code_dir }}
{{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate {{ edxapp_venv_bin}}/python manage.py lms syncdb --migrate --noinput --settings=aws_migrate
when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes" when: fake_migrations is not defined and migrate_db is defined and migrate_db|lower == "yes" and not COMMON_MYSQL_MIGRATE_PASS
environment: environment:
DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}" DB_MIGRATION_USER: "{{ EDXAPP_MYSQL_USER_MIGRATE }}"
DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}" DB_MIGRATION_PASS: "{{ EDXAPP_MYSQL_PASSWORD_MIGRATE }}"
...@@ -104,6 +122,7 @@ ...@@ -104,6 +122,7 @@
- "restart edxapp" - "restart edxapp"
- "restart edxapp_workers" - "restart edxapp_workers"
# Fake migrate, only when fake_migrations is defined # Fake migrate, only when fake_migrations is defined
# This overrides the database name to be the test database which # This overrides the database name to be the test database which
# the default application user has full write access to # the default application user has full write access to
......
...@@ -48,8 +48,8 @@ forum_environment: ...@@ -48,8 +48,8 @@ forum_environment:
NEW_RELIC_LICENSE_KEY: "{{ FORUM_NEW_RELIC_LICENSE_KEY }}" NEW_RELIC_LICENSE_KEY: "{{ FORUM_NEW_RELIC_LICENSE_KEY }}"
WORKER_PROCESSES: "{{ FORUM_WORKER_PROCESSES }}" WORKER_PROCESSES: "{{ FORUM_WORKER_PROCESSES }}"
DATA_DIR: "{{ forum_data_dir }}" DATA_DIR: "{{ forum_data_dir }}"
FORUM_LISTEN_HOST: "{{ FORUM_LISTEN_HOST }}" LISTEN_HOST: "{{ FORUM_LISTEN_HOST }}"
FORUM_LISTEN_PORT: "{{ FORUM_LISTEN_PORT }}" LISTEN_PORT: "{{ FORUM_LISTEN_PORT }}"
forum_user: "forum" forum_user: "forum"
forum_ruby_version: "1.9.3-p448" forum_ruby_version: "1.9.3-p448"
......
...@@ -30,7 +30,9 @@ ...@@ -30,7 +30,9 @@
notify: restart the forum service notify: restart the forum service
- name: git checkout forum repo into {{ forum_code_dir }} - name: git checkout forum repo into {{ forum_code_dir }}
git: dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }} git: >
dest={{ forum_code_dir }} repo={{ forum_source_repo }} version={{ forum_version }}
accept_hostkey=yes
sudo_user: "{{ forum_user }}" sudo_user: "{{ forum_user }}"
notify: restart the forum service notify: restart the forum service
......
...@@ -75,6 +75,8 @@ jenkins_admin_debian_pkgs: ...@@ -75,6 +75,8 @@ jenkins_admin_debian_pkgs:
# for status.edx.org # for status.edx.org
- ruby - ruby
- ruby1.9.1 - ruby1.9.1
# for check-migrations
- mysql-client
jenkins_admin_gem_pkgs: jenkins_admin_gem_pkgs:
# for generating status.edx.org # for generating status.edx.org
......
...@@ -66,17 +66,17 @@ ...@@ -66,17 +66,17 @@
- name: configure s3 plugin - name: configure s3 plugin
template: > template: >
src="./{{jenkins_home}}/hudson.plugins.s3.S3BucketPublisher.xml.j2" src="./{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml.j2"
dest="{{jenkins_home}}/hudson.plugins.s3.S3BucketPublisher.xml" dest="{{ jenkins_home }}/hudson.plugins.s3.S3BucketPublisher.xml"
owner={{jenkins_user}} owner={{ jenkins_user }}
group={{jenkins_group}} group={{ jenkins_group }}
mode=0644 mode=0644
- name: create the ssh directory - name: create the ssh directory
file: > file: >
path={{jenkins_home}}/.ssh path={{ jenkins_home }}/.ssh
owner={{jenkins_user}} owner={{ jenkins_user }}
group={{jenkins_group}} group={{ jenkins_group }}
mode=0700 mode=0700
state=directory state=directory
...@@ -89,34 +89,34 @@ ...@@ -89,34 +89,34 @@
- name: drop the secure credentials - name: drop the secure credentials
copy: > copy: >
content="{{ JENKINS_ADMIN_GIT_KEY }}" content="{{ JENKINS_ADMIN_GIT_KEY }}"
dest={{jenkins_home}}/.ssh/id_rsa dest={{ jenkins_home }}/.ssh/id_rsa
owner={{jenkins_user}} owner={{ jenkins_user }}
group={{jenkins_group}} group={{ jenkins_group }}
mode=0600 mode=0600
- name: create job directory - name: create job directory
file: > file: >
path="{{jenkins_home}}/jobs" path="{{ jenkins_home }}/jobs"
owner="{{jenkins_user}}" owner="{{ jenkins_user }}"
group="{{jenkins_group}}" group="{{ jenkins_group }}"
mode=0755 mode=0755
state=directory state=directory
- name: create admin job directories - name: create admin job directories
file: > file: >
path="{{jenkins_home}}/jobs/{{item}}" path="{{ jenkins_home }}/jobs/{{ item }}"
owner={{jenkins_user}} owner={{ jenkins_user }}
group={{jenkins_group}} group={{ jenkins_group }}
mode=0755 mode=0755
state=directory state=directory
with_items: jenkins_admin_jobs with_items: jenkins_admin_jobs
- name: create admin job config files - name: create admin job config files
template: > template: >
src="./{{jenkins_home}}/jobs/{{item}}/config.xml.j2" src="./{{ jenkins_home }}/jobs/{{ item }}/config.xml.j2"
dest="{{jenkins_home}}/jobs/{{item}}/config.xml" dest="{{ jenkins_home }}/jobs/{{ item }}/config.xml"
owner={{jenkins_user}} owner={{ jenkins_user }}
group={{jenkins_group}} group={{ jenkins_group }}
mode=0644 mode=0644
with_items: jenkins_admin_jobs with_items: jenkins_admin_jobs
......
...@@ -2,9 +2,9 @@ ...@@ -2,9 +2,9 @@
<hudson.plugins.s3.S3BucketPublisher_-DescriptorImpl plugin="s3@0.5"> <hudson.plugins.s3.S3BucketPublisher_-DescriptorImpl plugin="s3@0.5">
<profiles> <profiles>
<hudson.plugins.s3.S3Profile> <hudson.plugins.s3.S3Profile>
<name>{{JENKINS_ADMIN_S3_PROFILE.name}}</name> <name>{{ JENKINS_ADMIN_S3_PROFILE.name }}</name>
<accessKey>{{JENKINS_ADMIN_S3_PROFILE.access_key}}</accessKey> <accessKey>{{ JENKINS_ADMIN_S3_PROFILE.access_key }}</accessKey>
<secretKey>{{JENKINS_ADMIN_S3_PROFILE.secret_key}}</secretKey> <secretKey>{{ JENKINS_ADMIN_S3_PROFILE.secret_key }}</secretKey>
</hudson.plugins.s3.S3Profile> </hudson.plugins.s3.S3Profile>
</profiles> </profiles>
</hudson.plugins.s3.S3BucketPublisher_-DescriptorImpl> </hudson.plugins.s3.S3BucketPublisher_-DescriptorImpl>
...@@ -44,7 +44,7 @@ rm -rf $BUILD_ID ...@@ -44,7 +44,7 @@ rm -rf $BUILD_ID
</builders> </builders>
<publishers> <publishers>
<hudson.plugins.s3.S3BucketPublisher plugin="s3@0.5"> <hudson.plugins.s3.S3BucketPublisher plugin="s3@0.5">
<profileName>{{JENKINS_ADMIN_S3_PROFILE.name}}</profileName> <profileName>{{ JENKINS_ADMIN_S3_PROFILE.name }}</profileName>
<entries> <entries>
<hudson.plugins.s3.Entry> <hudson.plugins.s3.Entry>
<bucket>edx-jenkins-backups/{{JENKINS_ADMIN_NAME}}</bucket> <bucket>edx-jenkins-backups/{{JENKINS_ADMIN_NAME}}</bucket>
......
...@@ -33,12 +33,12 @@ ...@@ -33,12 +33,12 @@
</hudson.model.TextParameterDefinition> </hudson.model.TextParameterDefinition>
<hudson.model.StringParameterDefinition> <hudson.model.StringParameterDefinition>
<name>configuration</name> <name>configuration</name>
<description>The GITREF of configuration to use.</description> <description>The GITREF of configuration to use. Leave blank to default to master.</description>
<defaultValue></defaultValue> <defaultValue></defaultValue>
</hudson.model.StringParameterDefinition> </hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition> <hudson.model.StringParameterDefinition>
<name>configuration_secure</name> <name>configuration_secure</name>
<description>The GITREF of configuration-secure repository to use.</description> <description>The GITREF of configuration-secure repository to use. Leave blank to default to master.</description>
<defaultValue></defaultValue> <defaultValue></defaultValue>
</hudson.model.StringParameterDefinition> </hudson.model.StringParameterDefinition>
<hudson.model.StringParameterDefinition> <hudson.model.StringParameterDefinition>
...@@ -46,6 +46,11 @@ ...@@ -46,6 +46,11 @@
<description></description> <description></description>
<defaultValue></defaultValue> <defaultValue></defaultValue>
</hudson.model.StringParameterDefinition> </hudson.model.StringParameterDefinition>
<hudson.model.BooleanParameterDefinition>
<name>use_blessed</name>
<description></description>
<defaultValue>true</defaultValue>
</hudson.model.BooleanParameterDefinition>
</parameterDefinitions> </parameterDefinitions>
</hudson.model.ParametersDefinitionProperty> </hudson.model.ParametersDefinitionProperty>
<com.sonyericsson.rebuild.RebuildSettings plugin="rebuild@1.20"> <com.sonyericsson.rebuild.RebuildSettings plugin="rebuild@1.20">
...@@ -60,7 +65,7 @@ ...@@ -60,7 +65,7 @@
<hudson.plugins.git.UserRemoteConfig> <hudson.plugins.git.UserRemoteConfig>
<name></name> <name></name>
<refspec></refspec> <refspec></refspec>
<url>{{JENKINS_ADMIN_CONFIGURATION_REPO}}</url> <url>{{ JENKINS_ADMIN_CONFIGURATION_REPO }}</url>
</hudson.plugins.git.UserRemoteConfig> </hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs> </userRemoteConfigs>
<branches> <branches>
...@@ -99,7 +104,7 @@ ...@@ -99,7 +104,7 @@
<hudson.plugins.git.UserRemoteConfig> <hudson.plugins.git.UserRemoteConfig>
<name></name> <name></name>
<refspec></refspec> <refspec></refspec>
<url>{{JENKINS_ADMIN_CONFIGURATION_SECURE_REPO}}</url> <url>{{ JENKINS_ADMIN_CONFIGURATION_SECURE_REPO }}</url>
</hudson.plugins.git.UserRemoteConfig> </hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs> </userRemoteConfigs>
<branches> <branches>
...@@ -151,40 +156,23 @@ ...@@ -151,40 +156,23 @@
<nature>shell</nature> <nature>shell</nature>
<command> <command>
#!/bin/bash -x #!/bin/bash -x
export jenkins_admin_ec2_key="{{ JENKINS_ADMIN_EC2_KEY }}"
export jenkins_admin_configuration_secure_repo="{{ JENKINS_ADMIN_CONFIGURATION_SECURE_REPO }}"
if [[ "\$play" == "" ]]; then configuration/util/jenkins/build-ami.sh
echo "No Play Specified. Nothing to Do."
exit 0
fi
export PYTHONUNBUFFERED=1
export PIP_DOWNLOAD_CACHE=\$WORKSPACE/pip-cache
cd configuration
pip install -r requirements.txt
cd util/vpc-tools/
echo "\$refs" > /var/tmp/$BUILD_ID-refs.yml
cat /var/tmp/$BUILD_ID-refs.yml
echo "\$vars" > /var/tmp/$BUILD_ID-extra-vars.yml
cat /var/tmp/$BUILD_ID-extra-vars.yml
python -u abbey.py -p \$play -t c1.medium -d \$deployment -e \$environment -i /edx/var/jenkins/.ssh/id_rsa -b \$base_ami --vars /var/tmp/\$BUILD_ID-extra-vars.yml --refs /var/tmp/\$BUILD_ID-refs.yml -c \$BUILD_NUMBER --configuration-version \$configuration --configuration-secure-version \$configuration_secure -k deployment --configuration-secure-repo "git@github.com:edx-ops/prod-secure.git"
</command> </command>
<ignoreExitCode>false</ignoreExitCode> <ignoreExitCode>false</ignoreExitCode>
</jenkins.plugins.shiningpanda.builders.VirtualenvBuilder> </jenkins.plugins.shiningpanda.builders.VirtualenvBuilder>
<hudson.tasks.Shell> <hudson.tasks.Shell>
<command>#!/bin/bash -x <command>#!/bin/bash -x
if [[(&quot;\$play&quot; == &quot;&quot;)]]; then if [[(&quot;$play&quot; == &quot;&quot;)]]; then
echo &quot;No Play Specified. Nothing to Do.&quot; echo &quot;No Play Specified. Nothing to Do.&quot;
exit 0 exit 0
fi fi
rm /var/tmp/\$BUILD_ID-extra-vars.yml rm /var/tmp/$BUILD_ID-extra-vars.yml
rm /var/tmp/\$BUILD_ID-refs.yml</command> rm /var/tmp/$BUILD_ID-refs.yml</command>
</hudson.tasks.Shell> </hudson.tasks.Shell>
</builders> </builders>
<publishers/> <publishers/>
......
...@@ -84,7 +84,9 @@ ...@@ -84,7 +84,9 @@
# upstream, we may be able to use the regular plugin install process. # upstream, we may be able to use the regular plugin install process.
# Until then, we compile and install the forks ourselves. # Until then, we compile and install the forks ourselves.
- name: checkout custom plugin repo - name: checkout custom plugin repo
git: repo={{ item.repo_url }} dest=/tmp/{{ item.repo_name }} version={{ item.version }} git: >
repo={{ item.repo_url }} dest=/tmp/{{ item.repo_name }} version={{ item.version }}
accept_hostkey=yes
with_items: jenkins_custom_plugins with_items: jenkins_custom_plugins
- name: compile custom plugins - name: compile custom plugins
......
...@@ -36,8 +36,8 @@ jenkins_wheels: ...@@ -36,8 +36,8 @@ jenkins_wheels:
- { pkg: "django-celery==3.0.17", wheel: "django_celery-3.0.17-py27-none-any.whl" } - { pkg: "django-celery==3.0.17", wheel: "django_celery-3.0.17-py27-none-any.whl" }
- { pkg: "beautifulsoup4==4.1.3", wheel: "beautifulsoup4-4.1.3-py27-none-any.whl"} - { pkg: "beautifulsoup4==4.1.3", wheel: "beautifulsoup4-4.1.3-py27-none-any.whl"}
- { pkg: "beautifulsoup==3.2.1", wheel: "BeautifulSoup-3.2.1-py27-none-any.whl" } - { pkg: "beautifulsoup==3.2.1", wheel: "BeautifulSoup-3.2.1-py27-none-any.whl" }
- { pkg: "bleach==1.2.2", wheel: "bleach-1.2.2-py27-none-any.whl" } - { pkg: "bleach==1.4", wheel: "bleach-1.4-py27-none-any.whl" }
- { pkg: "html5lib==0.95", wheel: "html5lib-0.95-py27-none-any.whl" } - { pkg: "html5lib==0.999", wheel: "html5lib-0.999-py27-none-any.whl" }
- { pkg: "boto==2.13.3", wheel: "boto-2.13.3-py27-none-any.whl" } - { pkg: "boto==2.13.3", wheel: "boto-2.13.3-py27-none-any.whl" }
- { pkg: "celery==3.0.19", wheel: "celery-3.0.19-py27-none-any.whl" } - { pkg: "celery==3.0.19", wheel: "celery-3.0.19-py27-none-any.whl" }
- { pkg: "dealer==0.2.3", wheel: "dealer-0.2.3-py27-none-any.whl" } - { pkg: "dealer==0.2.3", wheel: "dealer-0.2.3-py27-none-any.whl" }
...@@ -64,8 +64,9 @@ jenkins_wheels: ...@@ -64,8 +64,9 @@ jenkins_wheels:
- { pkg: "gunicorn==0.17.4", wheel: "gunicorn-0.17.4-py27-none-any.whl" } - { pkg: "gunicorn==0.17.4", wheel: "gunicorn-0.17.4-py27-none-any.whl" }
- { pkg: "lazy==1.1", wheel: "lazy-1.1-py27-none-any.whl" } - { pkg: "lazy==1.1", wheel: "lazy-1.1-py27-none-any.whl" }
- { pkg: "lxml==3.0.1", wheel: "lxml-3.0.1-cp27-none-linux_x86_64.whl" } - { pkg: "lxml==3.0.1", wheel: "lxml-3.0.1-cp27-none-linux_x86_64.whl" }
- { pkg: "mako==0.7.3", wheel: "Mako-0.7.3-py27-none-any.whl" } - { pkg: "mako==0.9.1", wheel: "Mako-0.9.1-py2.py3-none-any.whl" }
- { pkg: "Markdown==2.2.1", wheel: "Markdown-2.2.1-py27-none-any.whl" } - { pkg: "Markdown==2.2.1", wheel: "Markdown-2.2.1-py27-none-any.whl" }
- { pkg: "mongoengine==0.7.10", wheel: "mongoengine-0.7.10-py27-none-any.whl" }
- { pkg: "networkx==1.7", wheel: "networkx-1.7-py27-none-any.whl" } - { pkg: "networkx==1.7", wheel: "networkx-1.7-py27-none-any.whl" }
- { pkg: "nltk==2.0.4", wheel: "nltk-2.0.4-py27-none-any.whl" } - { pkg: "nltk==2.0.4", wheel: "nltk-2.0.4-py27-none-any.whl" }
- { pkg: "oauthlib==0.5.1", wheel: "oauthlib-0.5.1-py27-none-any.whl" } - { pkg: "oauthlib==0.5.1", wheel: "oauthlib-0.5.1-py27-none-any.whl" }
...@@ -79,7 +80,9 @@ jenkins_wheels: ...@@ -79,7 +80,9 @@ jenkins_wheels:
- { pkg: "pymongo==2.4.1", wheel: "pymongo-2.4.1-cp27-none-linux_x86_64.whl" } - { pkg: "pymongo==2.4.1", wheel: "pymongo-2.4.1-cp27-none-linux_x86_64.whl" }
- { pkg: "pyparsing==1.5.6", wheel: "pyparsing-1.5.6-py27-none-any.whl" } - { pkg: "pyparsing==1.5.6", wheel: "pyparsing-1.5.6-py27-none-any.whl" }
- { pkg: "python-memcached==1.48", wheel: "python_memcached-1.48-py27-none-any.whl" } - { pkg: "python-memcached==1.48", wheel: "python_memcached-1.48-py27-none-any.whl" }
- { pkg: "python-openid==2.2.5", wheel: "django_openid_auth-0.4-py27-none-any.whl" } - { pkg: "python-openid==2.2.5", wheel: "python_openid-2.2.5-py27-none-any.whl" }
- { pkg: "python-dateutil==2.1", wheel: "python_dateutil-2.1-py27-none-any.whl" }
- { pkg: "python-social-auth==0.1.21", wheel: "python_social_auth-0.1.21-py27-none-any.whl" }
- { pkg: "pytz==2012h", wheel: "pytz-2012h-py27-none-any.whl" } - { pkg: "pytz==2012h", wheel: "pytz-2012h-py27-none-any.whl" }
- { pkg: "pysrt==0.4.7", wheel: "pysrt-0.4.7-py27-none-any.whl" } - { pkg: "pysrt==0.4.7", wheel: "pysrt-0.4.7-py27-none-any.whl" }
- { pkg: "PyYAML==3.10", wheel: "PyYAML-3.10-cp27-none-linux_x86_64.whl" } - { pkg: "PyYAML==3.10", wheel: "PyYAML-3.10-cp27-none-linux_x86_64.whl" }
...@@ -92,26 +95,35 @@ jenkins_wheels: ...@@ -92,26 +95,35 @@ jenkins_wheels:
- { pkg: "sympy==0.7.1", wheel: "sympy-0.7.1-py27-none-any.whl" } - { pkg: "sympy==0.7.1", wheel: "sympy-0.7.1-py27-none-any.whl" }
- { pkg: "xmltodict==0.4.1", wheel: "xmltodict-0.4.1-py27-none-any.whl" } - { pkg: "xmltodict==0.4.1", wheel: "xmltodict-0.4.1-py27-none-any.whl" }
- { pkg: "django-ratelimit-backend==0.6", wheel: "django_ratelimit_backend-0.6-py27-none-any.whl" } - { pkg: "django-ratelimit-backend==0.6", wheel: "django_ratelimit_backend-0.6-py27-none-any.whl" }
- { pkg: "unicodecsv==0.9.4", wheel: "unicodecsv-0.9.4-py27-none-any.whl" }
- { pkg: "ipython==0.13.1", wheel: "ipython-0.13.1-py27-none-any.whl" } - { pkg: "ipython==0.13.1", wheel: "ipython-0.13.1-py27-none-any.whl" }
- { pkg: "watchdog==0.6.0", wheel: "watchdog-0.6.0-py27-none-any.whl" } - { pkg: "watchdog==0.6.0", wheel: "watchdog-0.6.0-py27-none-any.whl" }
- { pkg: "dogapi==1.2.1", wheel: "dogapi-1.2.1-py27-none-any.whl" } - { pkg: "dogapi==1.2.1", wheel: "dogapi-1.2.1-py27-none-any.whl" }
- { pkg: "newrelic==2.4.0.4", wheel: "newrelic-2.4.0.4-cp27-none-linux_x86_64.whl" } - { pkg: "newrelic==2.4.0.4", wheel: "newrelic-2.4.0.4-cp27-none-linux_x86_64.whl" }
- { pkg: "sphinx==1.1.3", wheel: "Sphinx-1.1.3-py27-none-any.whl" } - { pkg: "sphinx==1.1.3", wheel: "Sphinx-1.1.3-py27-none-any.whl" }
- { pkg: "sphinx_rtd_theme==0.1.5", wheel: "sphinx_rtd_theme-0.1.5-py27-none-any.whl" }
- { pkg: "Babel==1.3", wheel: "Babel-1.3-py27-none-any.whl" } - { pkg: "Babel==1.3", wheel: "Babel-1.3-py27-none-any.whl" }
- { pkg: "transifex-client==0.9.1", wheel: "transifex_client-0.9.1-py27-none-any.whl" } - { pkg: "transifex-client==0.10", wheel: "transifex_client-0.10-py27-none-any.whl" }
- { pkg: "django_debug_toolbar", wheel: "django_debug_toolbar-0.11.0-py2.py3-none-any.whl" }
- { pkg: "django-debug-toolbar-mongo", wheel: "django_debug_toolbar_mongo-0.1.10-py27-none-any.whl" }
- { pkg: "chrono==1.0.2", wheel: "chrono-1.0.2-py2.py3-none-any.whl" }
- { pkg: "coverage==3.6", wheel: "coverage-3.6-cp27-none-linux_x86_64.whl" } - { pkg: "coverage==3.6", wheel: "coverage-3.6-cp27-none-linux_x86_64.whl" }
- { pkg: "factory_boy==2.0.2", wheel: "factory_boy-2.0.2-py27-none-any.whl" } - { pkg: "ddt==0.7.1", wheel: "ddt-0.7.1-py27-none-any.whl" }
- { pkg: "django-crum==0.5", wheel: "django_crum-0.5-py27-none-any.whl" }
- { pkg: "django_nose==1.1", wheel: "django_nose-1.1-py27-none-any.whl" }
- { pkg: "factory_boy==2.1.2", wheel: "factory_boy-2.1.2-py27-none-any.whl" }
- { pkg: "freezegun==0.1.11", wheel: "freezegun-0.1.11-py27-none-any.whl" }
- { pkg: "mock==1.0.1", wheel: "mock-1.0.1-py27-none-any.whl" } - { pkg: "mock==1.0.1", wheel: "mock-1.0.1-py27-none-any.whl" }
- { pkg: "nosexcover==1.0.7", wheel: "nosexcover-1.0.7-py27-none-any.whl" } - { pkg: "nosexcover==1.0.7", wheel: "nosexcover-1.0.7-py27-none-any.whl" }
- { pkg: "pep8==1.4.5", wheel: "pep8-1.4.5-py27-none-any.whl" } - { pkg: "pep8==1.4.5", wheel: "pep8-1.4.5-py27-none-any.whl" }
- { pkg: "pylint==0.28", wheel: "pylint-0.28.0-py27-none-any.whl" } - { pkg: "pylint==0.28", wheel: "pylint-0.28.0-py27-none-any.whl" }
- { pkg: "python-subunit==0.0.16", wheel: "python_subunit-0.0.16-py27-none-any.whl" }
- { pkg: "rednose==0.3", wheel: "rednose-0.3-py27-none-any.whl" } - { pkg: "rednose==0.3", wheel: "rednose-0.3-py27-none-any.whl" }
- { pkg: "selenium==2.34.0", wheel: "selenium-2.34.0-py27-none-any.whl" } - { pkg: "selenium==2.39.0", wheel: "selenium-2.39.0-py27-none-any.whl" }
- { pkg: "splinter==0.5.4", wheel: "splinter-0.5.4-py27-none-any.whl" } - { pkg: "splinter==0.5.4", wheel: "splinter-0.5.4-py27-none-any.whl" }
- { pkg: "django_nose==1.1", wheel: "django_nose-1.1-py27-none-any.whl" } - { pkg: "testtools==0.9.34", wheel: "testtools-0.9.34-py27-none-any.whl" }
- { pkg: "django_debug_toolbar", wheel: "django_debug_toolbar-0.10.2-py2.py3-none-any.whl" } - { pkg: "Paver==1.2.1", wheel: "Paver-1.2.1-py27-none-any.whl" }
- { pkg: "django-debug-toolbar-mongo", wheel: "django_debug_toolbar_mongo-0.1.10-py27-none-any.whl" } - { pkg: "psutil==1.2.1", wheel: "psutil-1.2.1-cp27-none-linux_x86_64.whl" }
- { pkg: "nose-ignore-docstring", wheel: "nose_ignore_docstring-0.2-py27-none-any.whl" } - { pkg: "lazy==1.1", wheel: "lazy-1.1-py27-none-any.whl" }
- { pkg: "nose-exclude", wheel: "nose_exclude-0.1.10-py27-none-any.whl" } - { pkg: "path.py==3.0.1", wheel: "path.py-3.0.1-py27-none-any.whl" }
- { pkg: "django-crum==0.5", wheel: "django_crum-0.5-py27-none-any.whl" }
- { pkg: "MySQL-python==1.2.4", wheel: "MySQL_python-1.2.4-cp27-none-linux_x86_64.whl" } - { pkg: "MySQL-python==1.2.4", wheel: "MySQL_python-1.2.4-cp27-none-linux_x86_64.whl" }
...@@ -52,3 +52,5 @@ nginx_cfg: ...@@ -52,3 +52,5 @@ nginx_cfg:
# nginx configuration # nginx configuration
version_html: "{{ nginx_app_dir }}/versions.html" version_html: "{{ nginx_app_dir }}/versions.html"
version_json: "{{ nginx_app_dir }}/versions.json" version_json: "{{ nginx_app_dir }}/versions.json"
NGINX_ROBOT_RULES: [ ]
...@@ -31,16 +31,24 @@ ...@@ -31,16 +31,24 @@
- name: Server configuration file - name: Server configuration file
template: > template: >
src=nginx.conf.j2 dest=/etc/nginx/nginx.conf src=etc/nginx/nginx.conf.j2 dest=/etc/nginx/nginx.conf
owner=root group={{ common_web_user }} mode=0644 owner=root group={{ common_web_user }} mode=0644
notify: reload nginx notify: reload nginx
- name: Creating common nginx configuration - name: Creating common nginx configuration
template: > template: >
src=edx-release.j2 dest={{ nginx_sites_available_dir }}/edx-release src=edx/app/nginx/sites-available/edx-release.j2
dest={{ nginx_sites_available_dir }}/edx-release
owner=root group=root mode=0600 owner=root group=root mode=0600
notify: reload nginx notify: reload nginx
- name: Create robot rules
template: >
src=edx/app/nginx/robots.txt.j2 dest={{ nginx_app_dir }}/robots.txt
owner=root group={{ common_web_user }} mode=0644
notify: reload nginx
when: NGINX_ROBOT_RULES|length > 0
- name: Creating link for common nginx configuration - name: Creating link for common nginx configuration
file: > file: >
src={{ nginx_sites_available_dir }}/edx-release src={{ nginx_sites_available_dir }}/edx-release
...@@ -50,7 +58,8 @@ ...@@ -50,7 +58,8 @@
- name: Copying nginx configs for {{ nginx_sites }} - name: Copying nginx configs for {{ nginx_sites }}
template: > template: >
src={{ item }}.j2 dest={{ nginx_sites_available_dir }}/{{ item }} src=edx/app/nginx/sites-available/{{ item }}.j2
dest={{ nginx_sites_available_dir }}/{{ item }}
owner=root group={{ common_web_user }} mode=0640 owner=root group={{ common_web_user }} mode=0640
notify: reload nginx notify: reload nginx
with_items: nginx_sites with_items: nginx_sites
...@@ -113,24 +122,14 @@ ...@@ -113,24 +122,14 @@
- name: Set up nginx access log rotation - name: Set up nginx access log rotation
template: > template: >
dest=/etc/logrotate.d/nginx-access src=edx_logrotate_nginx_access.j2 src=etc/logrotate.d/edx_logrotate_nginx_access.j2
owner=root group=root mode=644 dest=/etc/logrotate.d/nginx-access
# removing default link
- name: Removing default nginx config and restart (enabled)
file: path={{ nginx_sites_enabled_dir }}/default state=absent
notify: reload nginx
# Note that nginx logs to /var/log until it reads its configuration, so /etc/logrotate.d/nginx is still good
- name: Set up nginx access log rotation
template: >
dest=/etc/logrotate.d/nginx-access src=edx_logrotate_nginx_access.j2
owner=root group=root mode=644 owner=root group=root mode=644
- name: Set up nginx access log rotation - name: Set up nginx access log rotation
template: > template: >
dest=/etc/logrotate.d/nginx-error src=edx_logrotate_nginx_error.j2 src=etc/logrotate.d/edx_logrotate_nginx_error.j2
dest=/etc/logrotate.d/nginx-error
owner=root group=root mode=644 owner=root group=root mode=644
# If tasks that notify restart nginx don't change the state of the remote system # If tasks that notify restart nginx don't change the state of the remote system
......
{% for item in NGINX_ROBOT_RULES %}
User-agent: {{ item.agent }}
Disallow: {{ item.disallow }}
{% endfor %}
server {
listen {{ CERTS_NGINX_PORT }} default_server;
location / {
root {{ CERTS_WEB_ROOT }};
{% include "basic-auth.j2" %}
try_files $uri $uri/valid.html =404;
}
}
...@@ -66,8 +66,10 @@ server { ...@@ -66,8 +66,10 @@ server {
try_files $uri @proxy_to_cms_app; try_files $uri @proxy_to_cms_app;
} }
{% include "robots.j2" %}
# Check security on this # Check security on this
location ~ /static/(?P<file>.*) { location ~ ^/static/(?P<file>.*) {
root {{ edxapp_data_dir }}; root {{ edxapp_data_dir }};
try_files /staticfiles/$file /course_static/$file =404; try_files /staticfiles/$file /course_static/$file =404;
......
...@@ -12,5 +12,7 @@ server { ...@@ -12,5 +12,7 @@ server {
proxy_set_header X-outside-url $scheme://$host; proxy_set_header X-outside-url $scheme://$host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
} }
{% include robots.j2 %}
} }
...@@ -40,6 +40,8 @@ server { ...@@ -40,6 +40,8 @@ server {
try_files $uri @proxy_to_app; try_files $uri @proxy_to_app;
} }
{% include "robots.j2" %}
location @proxy_to_app { location @proxy_to_app {
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port; proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
......
...@@ -46,8 +46,10 @@ server { ...@@ -46,8 +46,10 @@ server {
try_files $uri @proxy_to_lms-preview_app; try_files $uri @proxy_to_lms-preview_app;
} }
{% include "robots.j2" %}
# Check security on this # Check security on this
location ~ /static/(?P<file>.*) { location ~ ^/static/(?P<file>.*) {
root {{ edxapp_data_dir}}; root {{ edxapp_data_dir}};
try_files /staticfiles/$file /course_static/$file =404; try_files /staticfiles/$file /course_static/$file =404;
......
...@@ -62,8 +62,10 @@ server { ...@@ -62,8 +62,10 @@ server {
try_files $uri @proxy_to_lms_app; try_files $uri @proxy_to_lms_app;
} }
{% include "robots.j2" %}
# Check security on this # Check security on this
location ~ /static/(?P<file>.*) { location ~ ^/static/(?P<file>.*) {
root {{ edxapp_data_dir }}; root {{ edxapp_data_dir }};
try_files /staticfiles/$file /course_static/$file =404; try_files /staticfiles/$file /course_static/$file =404;
......
{% for item in nginx_redirects -%}
{%- if "default" in item -%}
{%- set default_site = "default" -%}
{%- else -%}
{%- set default_site = "" -%}
{%- endif -%}
server {
listen 80 {{ default_site }};
listen 443 {{ default_site }} ssl;
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
server_name {{ item['server_name'] }};
return 301 {{ item['redirect'] }}$request_uri;
}
{% endfor %}
...@@ -31,6 +31,8 @@ server { ...@@ -31,6 +31,8 @@ server {
expires epoch; expires epoch;
} }
{% include "robots.j2" %}
location @proxy_to_app { location @proxy_to_app {
client_max_body_size 75K; client_max_body_size 75K;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
......
{% if NGINX_ROBOT_RULES|length > 0 %}
location /robots.txt {
root {{ nginx_app_dir }};
try_files $uri /robots.txt =404;
}
{% endif %}
...@@ -17,6 +17,8 @@ server { ...@@ -17,6 +17,8 @@ server {
try_files $uri @proxy_to_app; try_files $uri @proxy_to_app;
} }
{% include "robots.j2" %}
location @proxy_to_app { location @proxy_to_app {
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port; proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
......
...@@ -41,8 +41,8 @@ NOTIFIER_COMMENT_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE" ...@@ -41,8 +41,8 @@ NOTIFIER_COMMENT_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE"
NOTIFIER_USER_SERVICE_BASE: "http://localhost:8000" NOTIFIER_USER_SERVICE_BASE: "http://localhost:8000"
NOTIFIER_USER_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE" NOTIFIER_USER_SERVICE_API_KEY: "PUT_YOUR_API_KEY_HERE"
NOTIFIER_USER_SERVICE_HTTP_AUTH_USER: !!null NOTIFIER_USER_SERVICE_HTTP_AUTH_USER: ""
NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS: !!null NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS: ""
NOTIFIER_CELERY_BROKER_URL: "django://" NOTIFIER_CELERY_BROKER_URL: "django://"
NOTIFIER_LOGO_IMAGE_URL: "{{ NOTIFIER_LMS_URL_BASE }}/static/images/header-logo.png" NOTIFIER_LOGO_IMAGE_URL: "{{ NOTIFIER_LMS_URL_BASE }}/static/images/header-logo.png"
NOTIFIER_SUPERVISOR_LOG_DEST: "{{ COMMON_DATA_DIR }}/log/supervisor" NOTIFIER_SUPERVISOR_LOG_DEST: "{{ COMMON_DATA_DIR }}/log/supervisor"
...@@ -68,33 +68,33 @@ notifier_debian_pkgs: ...@@ -68,33 +68,33 @@ notifier_debian_pkgs:
# the env variable for the supervisor job definition. # the env variable for the supervisor job definition.
# #
notifier_env_vars: notifier_env_vars:
FORUM_DIGEST_EMAIL_SENDER: $NOTIFIER_DIGEST_EMAIL_SENDER FORUM_DIGEST_EMAIL_SENDER: "{{ NOTIFIER_DIGEST_EMAIL_SENDER }}"
FORUM_DIGEST_EMAIL_SUBJECT: $NOTIFIER_DIGEST_EMAIL_SUBJECT FORUM_DIGEST_EMAIL_SUBJECT: "{{ NOTIFIER_DIGEST_EMAIL_SUBJECT }}"
FORUM_DIGEST_EMAIL_TITLE: $NOTIFIER_DIGEST_EMAIL_TITLE FORUM_DIGEST_EMAIL_TITLE: "{{ NOTIFIER_DIGEST_EMAIL_TITLE }}"
FORUM_DIGEST_EMAIL_DESCRIPTION: $NOTIFIER_DIGEST_EMAIL_DESCRIPTION FORUM_DIGEST_EMAIL_DESCRIPTION: "{{ NOTIFIER_DIGEST_EMAIL_DESCRIPTION }}"
EMAIL_SENDER_POSTAL_ADDRESS: $NOTIFIER_EMAIL_SENDER_POSTAL_ADDRESS EMAIL_SENDER_POSTAL_ADDRESS: "{{ NOTIFIER_EMAIL_SENDER_POSTAL_ADDRESS }}"
NOTIFIER_LANGUAGE: $NOTIFIER_LANGUAGE NOTIFIER_LANGUAGE: "{{ NOTIFIER_LANGUAGE }}"
NOTIFIER_ENV: $NOTIFIER_ENV NOTIFIER_ENV: "{{ NOTIFIER_ENV }}"
NOTIFIER_DB_DIR: $NOTIFIER_DB_DIR NOTIFIER_DB_DIR: "{{ NOTIFIER_DB_DIR }}"
EMAIL_BACKEND: $NOTIFIER_EMAIL_BACKEND EMAIL_BACKEND: "{{ NOTIFIER_EMAIL_BACKEND }}"
EMAIL_HOST: $NOTIFIER_EMAIL_HOST EMAIL_HOST: "{{ NOTIFIER_EMAIL_HOST }}"
EMAIL_PORT: $NOTIFIER_EMAIL_PORT EMAIL_PORT: "{{ NOTIFIER_EMAIL_PORT }}"
EMAIL_HOST_USER: $NOTIFIER_EMAIL_USER EMAIL_HOST_USER: "{{ NOTIFIER_EMAIL_USER }}"
EMAIL_HOST_PASSWORD: $NOTIFIER_EMAIL_PASS EMAIL_HOST_PASSWORD: "{{ NOTIFIER_EMAIL_PASS }}"
EMAIL_USE_TLS: $NOTIFIER_EMAIL_USE_TLS EMAIL_USE_TLS: "{{ NOTIFIER_EMAIL_USE_TLS }}"
EMAIL_REWRITE_RECIPIENT: $NOTIFIER_EMAIL_REWRITE_RECIPIENT EMAIL_REWRITE_RECIPIENT: "{{ NOTIFIER_EMAIL_REWRITE_RECIPIENT }}"
LMS_URL_BASE: $NOTIFIER_LMS_URL_BASE LMS_URL_BASE: "{{ NOTIFIER_LMS_URL_BASE }}"
SECRET_KEY: $NOTIFIER_LMS_SECRET_KEY SECRET_KEY: "{{ NOTIFIER_LMS_SECRET_KEY }}"
CS_URL_BASE: $NOTIFIER_COMMENT_SERVICE_BASE CS_URL_BASE: "{{ NOTIFIER_COMMENT_SERVICE_BASE }}"
CS_API_KEY: $NOTIFIER_COMMENT_SERVICE_API_KEY CS_API_KEY: "{{ NOTIFIER_COMMENT_SERVICE_API_KEY }}"
US_URL_BASE: $NOTIFIER_USER_SERVICE_BASE US_URL_BASE: "{{ NOTIFIER_USER_SERVICE_BASE }}"
US_API_KEY: $NOTIFIER_USER_SERVICE_API_KEY US_API_KEY: "{{ NOTIFIER_USER_SERVICE_API_KEY }}"
DATADOG_API_KEY: $NOTIFIER_DD_API_KEY DATADOG_API_KEY: "{{ DATADOG_API_KEY }}"
LOG_LEVEL: $NOTIFIER_LOG_LEVEL LOG_LEVEL: "{{ NOTIFIER_LOG_LEVEL }}"
RSYSLOG_ENABLED: $NOTIFIER_RSYSLOG_ENABLED RSYSLOG_ENABLED: "{{ NOTIFIER_RSYSLOG_ENABLED }}"
BROKER_URL: $NOTIFIER_CELERY_BROKER_URL BROKER_URL: "{{ NOTIFIER_CELERY_BROKER_URL }}"
REQUESTS_CA_BUNDLE: $NOTIFER_REQUESTS_CA_BUNDLE REQUESTS_CA_BUNDLE: "{{ NOTIFER_REQUESTS_CA_BUNDLE }}"
US_HTTP_AUTH_USER: $NOTIFIER_USER_SERVICE_HTTP_AUTH_USER US_HTTP_AUTH_USER: "{{ NOTIFIER_USER_SERVICE_HTTP_AUTH_USER }}"
US_HTTP_AUTH_PASS: $NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS US_HTTP_AUTH_PASS: "{{ NOTIFIER_USER_SERVICE_HTTP_AUTH_PASS }}"
FORUM_DIGEST_TASK_INTERVAL: $NOTIFIER_DIGEST_TASK_INTERVAL FORUM_DIGEST_TASK_INTERVAL: "{{ NOTIFIER_DIGEST_TASK_INTERVAL }}"
LOGO_IMAGE_URL: $NOTIFIER_LOGO_IMAGE_URL LOGO_IMAGE_URL: "{{ NOTIFIER_LOGO_IMAGE_URL }}"
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
git: git:
dest={{ NOTIFIER_CODE_DIR }} repo={{ NOTIFIER_SOURCE_REPO }} dest={{ NOTIFIER_CODE_DIR }} repo={{ NOTIFIER_SOURCE_REPO }}
version={{ NOTIFIER_VERSION }} version={{ NOTIFIER_VERSION }}
accept_hostkey=yes
sudo: true sudo: true
sudo_user: "{{ NOTIFIER_USER }}" sudo_user: "{{ NOTIFIER_USER }}"
notify: notify:
......
...@@ -27,5 +27,5 @@ stderr_logfile_backups=10 ...@@ -27,5 +27,5 @@ stderr_logfile_backups=10
stderr_capture_maxbytes=1MB stderr_capture_maxbytes=1MB
environment=PID='/var/tmp/notifier-celery-workers.pid',LANG=en_US.UTF-8, environment=PID='/var/tmp/notifier-celery-workers.pid',LANG=en_US.UTF-8,
{%- for name,value in notifier_env_vars.items() -%} {%- for name,value in notifier_env_vars.items() -%}
{{name}}="{{value}}"{%- if not loop.last -%},{%- endif -%} {%- if value -%}{{name}}="{{value}}"{%- if not loop.last -%},{%- endif -%}{%- endif -%}
{%- endfor -%} {%- endfor -%}
...@@ -27,5 +27,5 @@ stderr_logfile_backups=10 ...@@ -27,5 +27,5 @@ stderr_logfile_backups=10
stderr_capture_maxbytes=1MB stderr_capture_maxbytes=1MB
environment=PID='/var/tmp/notifier-scheduler.pid',LANG=en_US.UTF-8, environment=PID='/var/tmp/notifier-scheduler.pid',LANG=en_US.UTF-8,
{%- for name,value in notifier_env_vars.items() -%} {%- for name,value in notifier_env_vars.items() -%}
{{name}}="{{value}}"{%- if not loop.last -%},{%- endif -%} {%- if value -%}{{name}}="{{value}}"{%- if not loop.last -%},{%- endif -%}{%- endif -%}
{%- endfor -%} {%- endfor -%}
...@@ -40,7 +40,9 @@ ...@@ -40,7 +40,9 @@
# Do A Checkout # Do A Checkout
- name: git checkout ora repo into {{ ora_app_dir }} - name: git checkout ora repo into {{ ora_app_dir }}
git: dest={{ ora_code_dir }} repo={{ ora_source_repo }} version={{ ora_version }} git: >
dest={{ ora_code_dir }} repo={{ ora_source_repo }} version={{ ora_version }}
accept_hostkey=yes
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- restart ora - restart ora
...@@ -52,7 +54,9 @@ ...@@ -52,7 +54,9 @@
# Install the python pre requirements into {{ ora_venv_dir }} # Install the python pre requirements into {{ ora_venv_dir }}
- name: install python pre-requirements - name: install python pre-requirements
pip: requirements="{{ ora_pre_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present pip: >
requirements="{{ ora_pre_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- restart ora - restart ora
...@@ -60,7 +64,9 @@ ...@@ -60,7 +64,9 @@
# Install the python post requirements into {{ ora_venv_dir }} # Install the python post requirements into {{ ora_venv_dir }}
- name: install python post-requirements - name: install python post-requirements
pip: requirements="{{ ora_post_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present pip: >
requirements="{{ ora_post_requirements_file }}" virtualenv="{{ ora_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- restart ora - restart ora
......
# Do A Checkout # Do A Checkout
- name: git checkout ease repo into its base dir - name: git checkout ease repo into its base dir
git: dest={{ora_ease_code_dir}} repo={{ora_ease_source_repo}} version={{ora_ease_version}} git: >
dest={{ora_ease_code_dir}} repo={{ora_ease_source_repo}} version={{ora_ease_version}}
accept_hostkey=yes
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- restart ora - restart ora
...@@ -16,7 +18,9 @@ ...@@ -16,7 +18,9 @@
# Install the python pre requirements into {{ ora_ease_venv_dir }} # Install the python pre requirements into {{ ora_ease_venv_dir }}
- name: install ease python pre-requirements - name: install ease python pre-requirements
pip: requirements="{{ora_ease_pre_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present pip: >
requirements="{{ora_ease_pre_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- restart ora - restart ora
...@@ -24,7 +28,9 @@ ...@@ -24,7 +28,9 @@
# Install the python post requirements into {{ ora_ease_venv_dir }} # Install the python post requirements into {{ ora_ease_venv_dir }}
- name: install ease python post-requirements - name: install ease python post-requirements
pip: requirements="{{ora_ease_post_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present pip: >
requirements="{{ora_ease_post_requirements_file}}" virtualenv="{{ora_ease_venv_dir}}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ ora_user }}" sudo_user: "{{ ora_user }}"
notify: notify:
- restart ora - restart ora
......
...@@ -28,9 +28,15 @@ rabbitmq_refresh: false ...@@ -28,9 +28,15 @@ rabbitmq_refresh: false
rabbitmq_apt_key: "http://www.rabbitmq.com/rabbitmq-signing-key-public.asc" rabbitmq_apt_key: "http://www.rabbitmq.com/rabbitmq-signing-key-public.asc"
rabbitmq_repository: "deb http://www.rabbitmq.com/debian/ testing main" rabbitmq_repository: "deb http://www.rabbitmq.com/debian/ testing main"
# We mirror the deb package for rabbitmq-server because
# nodes need to be running the same version
rabbitmq_pkg_url: "http://files.edx.org/rabbitmq_packages/rabbitmq-server_3.2.3-1_all.deb"
rabbitmq_pkg: "rabbitmq-server" rabbitmq_pkg: "rabbitmq-server"
rabbitmq_debian_pkgs: rabbitmq_debian_pkgs:
- python-software-properties - python-software-properties
# for installing the deb package with
# dependencies
- gdebi
rabbitmq_config_dir: "/etc/rabbitmq" rabbitmq_config_dir: "/etc/rabbitmq"
rabbitmq_cookie_dir: "/var/lib/rabbitmq" rabbitmq_cookie_dir: "/var/lib/rabbitmq"
......
...@@ -10,10 +10,23 @@ ...@@ -10,10 +10,23 @@
apt: pkg={{",".join(rabbitmq_debian_pkgs)}} state=present apt: pkg={{",".join(rabbitmq_debian_pkgs)}} state=present
- name: add rabbit repository - name: add rabbit repository
apt_repository: repo="{{rabbitmq_repository}}" state=present apt_repository: repo="{{rabbitmq_repository}}" state=present update_cache=yes
- name: install rabbitmq - name: fetch the rabbitmq server deb
apt: pkg={{rabbitmq_pkg}} state=present update_cache=yes get_url: >
url={{ rabbitmq_pkg_url }}
dest=/var/tmp/{{ rabbitmq_pkg_url|basename }}
- name: check if rabbit is installed
shell: >
dpkg -s rabbitmq-server >/dev/null 2>&1 || echo "not installed"
register: is_installed
- name: install rabbit package using gdebi
shell: >
gdebi --n {{ rabbitmq_pkg_url|basename }}
chdir=/var/tmp
when: is_installed.stdout == "not installed"
- name: stop rabbit cluster - name: stop rabbit cluster
service: name=rabbitmq-server state=stopped service: name=rabbitmq-server state=stopped
...@@ -76,6 +89,9 @@ ...@@ -76,6 +89,9 @@
- name: add vhosts - name: add vhosts
rabbitmq_vhost: name={{ item }} state=present rabbitmq_vhost: name={{ item }} state=present
with_items: RABBITMQ_VHOSTS with_items: RABBITMQ_VHOSTS
tags:
- vhosts
- maintenance
- name: add admin users - name: add admin users
rabbitmq_user: > rabbitmq_user: >
...@@ -87,10 +103,16 @@ ...@@ -87,10 +103,16 @@
- ${rabbitmq_auth_config.admins} - ${rabbitmq_auth_config.admins}
- RABBITMQ_VHOSTS - RABBITMQ_VHOSTS
when: "'admins' in rabbitmq_auth_config" when: "'admins' in rabbitmq_auth_config"
tags:
- users
- maintenance
- name: make queues mirrored - name: make queues mirrored
shell: "/usr/sbin/rabbitmqctl set_policy HA '^(?!amq\\.).*' '{\"ha-mode\": \"all\"}'" shell: "/usr/sbin/rabbitmqctl set_policy HA '^(?!amq\\.).*' '{\"ha-mode\": \"all\"}'"
when: RABBITMQ_CLUSTERED or rabbitmq_clustered_hosts|length > 1 when: RABBITMQ_CLUSTERED or rabbitmq_clustered_hosts|length > 1
tags:
- ha
- maintenance
# #
# Depends upon the management plugin # Depends upon the management plugin
......
...@@ -53,6 +53,7 @@ ...@@ -53,6 +53,7 @@
git: > git: >
repo=https://github.com/sstephenson/rbenv.git repo=https://github.com/sstephenson/rbenv.git
dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }} dest={{ rbenv_dir }}/.rbenv version={{ rbenv_version }}
accept_hostkey=yes
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
- name: ensure ruby_env exists - name: ensure ruby_env exists
...@@ -79,7 +80,9 @@ ...@@ -79,7 +80,9 @@
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
- name: clone ruby-build repo - name: clone ruby-build repo
git: repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build git: >
repo=https://github.com/sstephenson/ruby-build.git dest={{ tempdir.stdout }}/ruby-build
accept_hostkey=yes
when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers) when: rbuild_present|failed or (installable_ruby_vers is defined and rbenv_ruby_version not in installable_ruby_vers)
sudo_user: "{{ rbenv_user }}" sudo_user: "{{ rbenv_user }}"
......
...@@ -87,11 +87,15 @@ ...@@ -87,11 +87,15 @@
- name: install supervisor in its venv - name: install supervisor in its venv
pip: name=supervisor virtualenv="{{supervisor_venv_dir}}" state=present pip: >
name=supervisor virtualenv="{{supervisor_venv_dir}}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
- name: install supervisor in its venv - name: install supervisor in its venv
pip: name=boto virtualenv="{{supervisor_venv_dir}}" state=present pip: >
name=boto virtualenv="{{supervisor_venv_dir}}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ supervisor_user }}" sudo_user: "{{ supervisor_user }}"
when: supervisor_service == "supervisor" and disable_edx_services and not devstack when: supervisor_service == "supervisor" and disable_edx_services and not devstack
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
user_role_name: user user_role_name: user
# override this var to add a prefix to the prompt # override this var to add a prefix to the prompt
# also need to set commont_update_bashrc for to # also need to set comment_update_bashrc for to
# update the system bashrc default # update the system bashrc default
USER_CMD_PROMPT: "" USER_CMD_PROMPT: ""
......
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
# #
# By default for restricted users we only allow sudo, if you # By default for restricted users we only allow sudo, if you
# want to provide more binaries add them to user_rbash_links # want to provide more binaries add them to user_rbash_links
# which can be passed in as a paramter to the role. # which can be passed in as a parameter to the role.
# #
- debug: var=user_info - debug: var=user_info
...@@ -74,6 +74,13 @@ ...@@ -74,6 +74,13 @@
- name: create the edxadmin group - name: create the edxadmin group
group: name=edxadmin state=present group: name=edxadmin state=present
# some AMIs (such as EMR master nodes) don't read the config files out of /etc/sudoers.d by default
- name: ensure sudoers.d is read
lineinfile: >
dest=/etc/sudoers state=present
regexp='^#includedir /etc/sudoers.d' line='#includedir /etc/sudoers.d'
validate='visudo -cf %s'
# give full sudo admin access to the edxadmin group # give full sudo admin access to the edxadmin group
- name: grant full sudo access to the edxadmin group - name: grant full sudo access to the edxadmin group
copy: > copy: >
...@@ -82,45 +89,47 @@ ...@@ -82,45 +89,47 @@
mode=0440 validate='visudo -cf %s' mode=0440 validate='visudo -cf %s'
- name: create the users - name: create the users
user: user: >
name={{ item.name }} name={{ item.name }}
shell=/bin/bash shell=/bin/bash
state={{ item.state | default('present') }}
with_items: user_info with_items: user_info
- name: create .ssh directory - name: create .ssh directory
file: file: >
path=/home/{{ item.name }}/.ssh state=directory mode=0750 path=/home/{{ item.name }}/.ssh state=directory mode=0750
owner={{ item.name }} owner={{ item.name }}
when: item.get('state', 'present') == 'present'
with_items: user_info with_items: user_info
- name: assign admin role to admin users - name: assign admin role to admin users
user: user: >
name={{ item.name }} name={{ item.name }}
groups=edxadmin groups=edxadmin
when: item.type is defined and item.type == 'admin' when: item.type is defined and item.type == 'admin' and item.get('state', 'present') == 'present'
with_items: user_info with_items: user_info
# authorized_keys2 used here so that personal # authorized_keys2 used here so that personal
# keys can be copied to authorized_keys # keys can be copied to authorized_keys
# force is set to yes here, otherwise the keys # force is set to yes here, otherwise the keys
# won't update if they haven't changed on teh github # won't update if they haven't changed on the github
# side # side
- name: copy github key[s] to .ssh/authorized_keys2 - name: copy github key[s] to .ssh/authorized_keys2
get_url: get_url: >
url=https://github.com/{{ item.name }}.keys url=https://github.com/{{ item.name }}.keys
force=yes force=yes
dest=/home/{{ item.name }}/.ssh/authorized_keys2 mode=0640 dest=/home/{{ item.name }}/.ssh/authorized_keys2 mode=0640
owner={{ item.name }} owner={{ item.name }}
when: item.github is defined when: item.github is defined and item.get('state', 'present') == 'present'
with_items: user_info with_items: user_info
- name: copy additional authorized keys - name: copy additional authorized keys
copy: > copy: >
content="{{ "\n".join(item.authorized_keys) }}" content="{{ '\n'.join(item.authorized_keys) }}"
dest=/home/{{ item.name }}/.ssh/authorized_keys mode=0640 dest=/home/{{ item.name }}/.ssh/authorized_keys mode=0640
owner={{ item.name }} owner={{ item.name }}
mode=0440 mode=0440
when: item.authorized_keys is defined when: item.authorized_keys is defined and item.get('state', 'present') == 'present'
with_items: user_info with_items: user_info
- name: create bashrc file for normal users - name: create bashrc file for normal users
...@@ -128,7 +137,7 @@ ...@@ -128,7 +137,7 @@
src=default.bashrc.j2 src=default.bashrc.j2
dest=/home/{{ item.name }}/.bashrc mode=0640 dest=/home/{{ item.name }}/.bashrc mode=0640
owner={{ item.name }} owner={{ item.name }}
when: not (item.type is defined and item.type == 'restricted') when: not (item.type is defined and item.type == 'restricted') and item.get('state', 'present') == 'present'
with_items: user_info with_items: user_info
- name: create .profile for all users - name: create .profile for all users
...@@ -136,16 +145,17 @@ ...@@ -136,16 +145,17 @@
src=default.profile.j2 src=default.profile.j2
dest=/home/{{ item.name }}/.profile mode=0640 dest=/home/{{ item.name }}/.profile mode=0640
owner={{ item.name }} owner={{ item.name }}
when: item.get('state', 'present') == 'present'
with_items: user_info with_items: user_info
######################################################## ########################################################
# All tasks below this line are for restricted users # All tasks below this line are for restricted users
- name: modify shell for restricted users - name: modify shell for restricted users
user: user: >
name={{ item.name }} name={{ item.name }}
shell=/bin/rbash shell=/bin/rbash
when: item.type is defined and item.type == 'restricted' when: item.type is defined and item.type == 'restricted' and item.get('state', 'present') == 'present'
with_items: user_info with_items: user_info
- name: create bashrc file for restricted users - name: create bashrc file for restricted users
...@@ -153,11 +163,11 @@ ...@@ -153,11 +163,11 @@
src=restricted.bashrc.j2 src=restricted.bashrc.j2
dest=/home/{{ item.name }}/.bashrc mode=0640 dest=/home/{{ item.name }}/.bashrc mode=0640
owner={{ item.name }} owner={{ item.name }}
when: item.type is defined and item.type == 'restricted' when: item.type is defined and item.type == 'restricted' and item.get('state', 'present') == 'present'
with_items: user_info with_items: user_info
- name: create sudoers file from template - name: create sudoers file from template
template: template: >
dest=/etc/sudoers.d/99-restricted dest=/etc/sudoers.d/99-restricted
src=restricted.sudoers.conf.j2 owner="root" src=restricted.sudoers.conf.j2 owner="root"
group="root" mode=0440 validate='visudo -cf %s' group="root" mode=0440 validate='visudo -cf %s'
...@@ -167,14 +177,14 @@ ...@@ -167,14 +177,14 @@
- name: change home directory ownership to root for restricted users - name: change home directory ownership to root for restricted users
shell: "chown -R root:{{ item.name }} /home/{{ item.name }}" shell: "chown -R root:{{ item.name }} /home/{{ item.name }}"
when: item.type is defined and item.type == 'restricted' when: item.type is defined and item.type == 'restricted' and item.get('state', 'present') == 'present'
with_items: user_info with_items: user_info
- name: create ~/bin directory - name: create ~/bin directory
file: file: >
path=/home/{{ item.name }}/bin state=directory mode=0750 path=/home/{{ item.name }}/bin state=directory mode=0750
owner="root" group={{ item.name }} owner="root" group={{ item.name }}
when: item.type is defined and item.type == 'restricted' when: item.type is defined and item.type == 'restricted' and item.get('state', 'present') == 'present'
with_items: user_info with_items: user_info
- name: create allowed command links - name: create allowed command links
...@@ -182,7 +192,7 @@ ...@@ -182,7 +192,7 @@
src: "{{ item[1] }}" src: "{{ item[1] }}"
dest: "/home/{{ item[0].name }}/bin/{{ item[1]|basename }}" dest: "/home/{{ item[0].name }}/bin/{{ item[1]|basename }}"
state: link state: link
when: item[0].type is defined and item[0].type == 'restricted' when: item[0].type is defined and item[0].type == 'restricted' and item[0].get('state', 'present') == 'present'
with_nested: with_nested:
- user_info - user_info
- user_rbash_links - user_rbash_links
...@@ -53,10 +53,12 @@ if [ -n "$force_color_prompt" ]; then ...@@ -53,10 +53,12 @@ if [ -n "$force_color_prompt" ]; then
fi fi
fi fi
command -v ec2metadata >/dev/null 2>&1 && { INSTANCEID=$(ec2metadata --instance-id); }
if [ "$color_prompt" = yes ]; then if [ "$color_prompt" = yes ]; then
PS1='{{ USER_CMD_PROMPT }}${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' PS1='{{ USER_CMD_PROMPT }}${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h $INSTANCEID\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else else
PS1='{{ USER_CMD_PROMPT}}${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' PS1='{{ USER_CMD_PROMPT}}${debian_chroot:+($debian_chroot)}\u@\h $INSTANCEID:\w\$ '
fi fi
unset color_prompt force_color_prompt unset color_prompt force_color_prompt
......
...@@ -2,21 +2,6 @@ ...@@ -2,21 +2,6 @@
# when the role is included # when the role is included
--- ---
XQUEUE_NGINX_PORT: 18040 XQUEUE_NGINX_PORT: 18040
xqueue_app_dir: "{{ COMMON_APP_DIR }}/xqueue"
xqueue_code_dir: "{{ xqueue_app_dir }}/xqueue"
xqueue_data_dir: "{{ COMMON_DATA_DIR }}/xqueue"
xqueue_venvs_dir: "{{ xqueue_app_dir }}/venvs"
xqueue_venv_dir: "{{ xqueue_venvs_dir }}/xqueue"
xqueue_venv_bin: "{{ xqueue_venv_dir }}/bin"
xqueue_user: "xqueue"
# Default nginx listen port
# These should be overrided if you want
# to serve all content on port 80
xqueue_gunicorn_port: 8040
xqueue_gunicorn_host: 127.0.0.1
XQUEUE_QUEUES: XQUEUE_QUEUES:
# push queue # push queue
'edX-Open_DemoX': 'http://localhost:18050' 'edX-Open_DemoX': 'http://localhost:18050'
...@@ -46,6 +31,23 @@ XQUEUE_MYSQL_PASSWORD: 'password' ...@@ -46,6 +31,23 @@ XQUEUE_MYSQL_PASSWORD: 'password'
XQUEUE_MYSQL_HOST: 'localhost' XQUEUE_MYSQL_HOST: 'localhost'
XQUEUE_MYSQL_PORT: '3306' XQUEUE_MYSQL_PORT: '3306'
# Internal vars below this line
#############################################
xqueue_app_dir: "{{ COMMON_APP_DIR }}/xqueue"
xqueue_code_dir: "{{ xqueue_app_dir }}/xqueue"
xqueue_data_dir: "{{ COMMON_DATA_DIR }}/xqueue"
xqueue_venvs_dir: "{{ xqueue_app_dir }}/venvs"
xqueue_venv_dir: "{{ xqueue_venvs_dir }}/xqueue"
xqueue_venv_bin: "{{ xqueue_venv_dir }}/bin"
xqueue_user: "xqueue"
# Default nginx listen port
# These should be overrided if you want
# to serve all content on port 80
xqueue_gunicorn_port: 8040
xqueue_gunicorn_host: 127.0.0.1
xqueue_env_config: xqueue_env_config:
XQUEUES: $XQUEUE_QUEUES XQUEUES: $XQUEUE_QUEUES
XQUEUE_WORKERS_PER_QUEUE: 12 XQUEUE_WORKERS_PER_QUEUE: 12
......
...@@ -28,7 +28,9 @@ ...@@ -28,7 +28,9 @@
# Do A Checkout # Do A Checkout
- name: git checkout xqueue repo into xqueue_code_dir - name: git checkout xqueue repo into xqueue_code_dir
git: dest={{ xqueue_code_dir }} repo={{ xqueue_source_repo }} version={{ xqueue_version }} git: >
dest={{ xqueue_code_dir }} repo={{ xqueue_source_repo }} version={{ xqueue_version }}
accept_hostkey=yes
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- restart xqueue - restart xqueue
...@@ -36,26 +38,44 @@ ...@@ -36,26 +38,44 @@
# Install the python pre requirements into {{ xqueue_venv_dir }} # Install the python pre requirements into {{ xqueue_venv_dir }}
- name : install python pre-requirements - name : install python pre-requirements
pip: requirements="{{ xqueue_pre_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present pip: >
requirements="{{ xqueue_pre_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- restart xqueue - restart xqueue
# Install the python post requirements into {{ xqueue_venv_dir }} # Install the python post requirements into {{ xqueue_venv_dir }}
- name : install python post-requirements - name : install python post-requirements
pip: requirements="{{ xqueue_post_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present pip: >
requirements="{{ xqueue_post_requirements_file }}" virtualenv="{{ xqueue_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
notify: notify:
- restart xqueue - restart xqueue
# If there is a common user for migrations run migrations using his username
# and credentials. If not we use the xqueue mysql user
- name: syncdb and migrate
shell: >
SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py syncdb --migrate --noinput --settings=xqueue.aws_migrate --pythonpath={{ xqueue_code_dir }}
sudo_user: "{{ xqueue_user }}"
environment:
DB_MIGRATION_USER: "{{ COMMON_MYSQL_MIGRATE_USER }}"
DB_MIGRATION_PASS: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
when: migrate_db is defined and migrate_db|lower == "yes" and COMMON_MYSQL_MIGRATE_PASS
notify:
- restart xqueue
- name: syncdb and migrate - name: syncdb and migrate
shell: > shell: >
SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py syncdb --migrate --noinput --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }} SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py syncdb --migrate --noinput --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }}
when: migrate_db is defined and migrate_db|lower == "yes"
sudo_user: "{{ xqueue_user }}" sudo_user: "{{ xqueue_user }}"
when: migrate_db is defined and migrate_db|lower == "yes" and not COMMON_MYSQL_MIGRATE_PASS
notify: notify:
- restart xqueue - restart xqueue
- name: create users - name: create users
shell: > shell: >
SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py update_users --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }} SERVICE_VARIANT=xqueue {{ xqueue_venv_bin }}/django-admin.py update_users --settings=xqueue.aws_settings --pythonpath={{ xqueue_code_dir }}
......
...@@ -12,17 +12,23 @@ ...@@ -12,17 +12,23 @@
when: not disable_edx_services when: not disable_edx_services
- name: checkout code - name: checkout code
git: dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}} git: >
dest={{xserver_code_dir}} repo={{xserver_source_repo}} version={{xserver_version}}
accept_hostkey=yes
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
notify: restart xserver notify: restart xserver
- name: install requirements - name: install requirements
pip: requirements="{{xserver_requirements_file}}" virtualenv="{{ xserver_venv_dir }}" state=present pip: >
requirements="{{xserver_requirements_file}}" virtualenv="{{ xserver_venv_dir }}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
notify: restart xserver notify: restart xserver
- name: install sandbox requirements - name: install sandbox requirements
pip: requirements="{{xserver_requirements_file}}" virtualenv="{{xserver_venv_sandbox_dir}}" state=present pip: >
requirements="{{xserver_requirements_file}}" virtualenv="{{xserver_venv_sandbox_dir}}" state=present
extra_args="-i {{ COMMON_PYPI_MIRROR_URL }}"
sudo_user: "{{ xserver_user }}" sudo_user: "{{ xserver_user }}"
notify: restart xserver notify: restart xserver
...@@ -44,7 +50,9 @@ ...@@ -44,7 +50,9 @@
notify: restart xserver notify: restart xserver
- name: checkout grader code - name: checkout grader code
git: dest={{ XSERVER_GRADER_DIR }} repo={{ XSERVER_GRADER_SOURCE }} version={{ xserver_grader_version }} git: >
dest={{ XSERVER_GRADER_DIR }} repo={{ XSERVER_GRADER_SOURCE }} version={{ xserver_grader_version }}
accept_hostkey=yes
environment: environment:
GIT_SSH: /tmp/git_ssh.sh GIT_SSH: /tmp/git_ssh.sh
notify: restart xserver notify: restart xserver
......
...@@ -32,6 +32,8 @@ ...@@ -32,6 +32,8 @@
group="{{ common_web_group }}" group="{{ common_web_group }}"
with_items: with_items:
- "{{ xserver_app_dir }}" - "{{ xserver_app_dir }}"
# needed for the ansible 1.5 git module
- "{{ xserver_app_dir }}/.ssh"
- "{{ xserver_venvs_dir }}" - "{{ xserver_venvs_dir }}"
- "{{ xserver_data_dir }}" - "{{ xserver_data_dir }}"
- "{{ xserver_data_dir }}/data" - "{{ xserver_data_dir }}/data"
......
#!/bin/sh #!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i {{ xserver_git_identity }} "$@" exec /usr/bin/ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ xserver_git_identity }} "$@"
...@@ -3,14 +3,15 @@ ...@@ -3,14 +3,15 @@
sudo: True sudo: True
gather_facts: True gather_facts: True
vars: vars:
migrate_db: "yes" migrate_db: 'yes'
openid_workaround: True openid_workaround: true
devstack: True devstack: true
disable_edx_services: True disable_edx_services: true
edx_platform_version: 'master' edx_platform_version: 'master'
mongo_enable_journal: False mongo_enable_journal: false
EDXAPP_NO_PREREQ_INSTALL: 0 EDXAPP_NO_PREREQ_INSTALL: 0
COMMON_MOTD_TEMPLATE: "devstack_motd.tail.j2" COMMON_MOTD_TEMPLATE: 'devstack_motd.tail.j2'
COMMON_SSH_PASSWORD_AUTH: "yes"
vars_files: vars_files:
- "group_vars/all" - "group_vars/all"
roles: roles:
......
...@@ -3,10 +3,13 @@ ...@@ -3,10 +3,13 @@
sudo: True sudo: True
gather_facts: True gather_facts: True
vars: vars:
migrate_db: "yes" migrate_db: 'yes'
openid_workaround: True openid_workaround: true
EDXAPP_LMS_NGINX_PORT: '80'
edx_platform_version: 'master' edx_platform_version: 'master'
EDXAPP_LMS_NGINX_PORT: '80'
EDX_ANSIBLE_DUMP_VARS: true
CERTS_DOWNLOAD_URL: 'http://192.168.33.10:18090'
CERTS_VERIFY_URL: 'http://192.168.33.10:18090'
vars_files: vars_files:
- "group_vars/all" - "group_vars/all"
roles: roles:
...@@ -19,6 +22,7 @@ ...@@ -19,6 +22,7 @@
- ora - ora
- forum - forum
- xqueue - xqueue
- certs
nginx_default_sites: nginx_default_sites:
- lms - lms
- cms - cms
...@@ -33,4 +37,5 @@ ...@@ -33,4 +37,5 @@
- forum - forum
- { role: "xqueue", update_users: True } - { role: "xqueue", update_users: True }
- ora - ora
- certs
- edx_ansible - edx_ansible
ansible==1.4.4 ansible==1.5.4
PyYAML==3.10 PyYAML==3.11
Jinja2==2.7.2 Jinja2==2.7.2
MarkupSafe==0.18 MarkupSafe==0.21
argparse==1.2.1 argparse==1.2.1
boto==2.20.1 boto==2.20.1
ecdsa==0.10 ecdsa==0.11
paramiko==1.12.0 paramiko==1.13.0
pycrypto==2.6.1 pycrypto==2.6.1
wsgiref==0.1.2 wsgiref==0.1.2
docopt==0.6.1 docopt==0.6.1
#!/bin/sh
## ##
## Installs the pre-requisites for running edX on a single Ubuntu 12.04 ## Installs the pre-requisites for running edX on a single Ubuntu 12.04
## instance. This script is provided as a convenience and any of these ## instance. This script is provided as a convenience and any of these
## steps could be executed manually. ## steps could be executed manually.
## ##
## Note that this script requires that you have the ability to run ## Note that this script requires that you have the ability to run
## commands as root via sudo. Caveat Emptor! ## commands as root via sudo. Caveat Emptor!
## ##
...@@ -27,7 +28,7 @@ sudo pip install --upgrade virtualenv ...@@ -27,7 +28,7 @@ sudo pip install --upgrade virtualenv
## Clone the configuration repository and run Ansible ## Clone the configuration repository and run Ansible
## ##
cd /var/tmp cd /var/tmp
git clone https://github.com/edx/configuration git clone -b release https://github.com/edx/configuration
## ##
## Install the ansible requirements ## Install the ansible requirements
...@@ -38,5 +39,4 @@ sudo pip install -r requirements.txt ...@@ -38,5 +39,4 @@ sudo pip install -r requirements.txt
## ##
## Run the edx_sandbox.yml playbook in the configuration/playbooks directory ## Run the edx_sandbox.yml playbook in the configuration/playbooks directory
## ##
cd /var/tmp/configuration/playbooks cd /var/tmp/configuration/playbooks && sudo ansible-playbook -c local ./edx_sandbox.yml -i "localhost,"
sudo ansible-playbook -c local ./edx_sandbox.yml -i "localhost,"
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# Ansible provisioning wrapper script that # Ansible provisioning wrapper script that
# assumes the following parameters set # assumes the following parameters set
# as environment variables # as environment variables
# #
# - github_username # - github_username
# - server_type # - server_type
# - instance_type # - instance_type
...@@ -41,7 +41,7 @@ if [[ ! -f $BOTO_CONFIG ]]; then ...@@ -41,7 +41,7 @@ if [[ ! -f $BOTO_CONFIG ]]; then
exit 1 exit 1
fi fi
extra_vars="/var/tmp/extra-vars-$$.yml" extra_vars_file="/var/tmp/extra-vars-$$.yml"
if [[ -z $region ]]; then if [[ -z $region ]]; then
region="us-east-1" region="us-east-1"
...@@ -65,14 +65,14 @@ fi ...@@ -65,14 +65,14 @@ fi
if [[ -z $ami ]]; then if [[ -z $ami ]]; then
if [[ $server_type == "full_edx_installation" ]]; then if [[ $server_type == "full_edx_installation" ]]; then
ami="ami-f551419c" ami="ami-97dbc3fe"
elif [[ $server_type == "ubuntu_12.04" || $server_type == "full_edx_installation_from_scratch" ]]; then elif [[ $server_type == "ubuntu_12.04" || $server_type == "full_edx_installation_from_scratch" ]]; then
ami="ami-59a4a230" ami="ami-59a4a230"
fi fi
fi fi
if [[ -z $instance_type ]]; then if [[ -z $instance_type ]]; then
instance_type="m1.medium" instance_type="m3.medium"
fi fi
deploy_host="${dns_name}.${dns_zone}" deploy_host="${dns_name}.${dns_zone}"
...@@ -80,13 +80,15 @@ ssh-keygen -f "/var/lib/jenkins/.ssh/known_hosts" -R "$deploy_host" ...@@ -80,13 +80,15 @@ ssh-keygen -f "/var/lib/jenkins/.ssh/known_hosts" -R "$deploy_host"
cd playbooks/edx-east cd playbooks/edx-east
cat << EOF > $extra_vars cat << EOF > $extra_vars_file
--- ---
ansible_ssh_private_key_file: /var/lib/jenkins/${keypair}.pem ansible_ssh_private_key_file: /var/lib/jenkins/${keypair}.pem
EDXAPP_PREVIEW_LMS_BASE: preview.${deploy_host} EDXAPP_PREVIEW_LMS_BASE: preview.${deploy_host}
EDXAPP_LMS_BASE: ${deploy_host} EDXAPP_LMS_BASE: ${deploy_host}
EDXAPP_CMS_BASE: studio.${deploy_host} EDXAPP_CMS_BASE: studio.${deploy_host}
EDXAPP_SITE_NAME: ${deploy_host} EDXAPP_SITE_NAME: ${deploy_host}
CERTS_DOWNLOAD_URL: "http://${deploy_host}:18090"
CERTS_VERIFY_URL: "http://${deploy_host}:18090"
edx_platform_version: $edxapp_version edx_platform_version: $edxapp_version
forum_version: $forum_version forum_version: $forum_version
xqueue_version: $xqueue_version xqueue_version: $xqueue_version
...@@ -96,11 +98,14 @@ ease_version: $ease_version ...@@ -96,11 +98,14 @@ ease_version: $ease_version
certs_version: $certs_version certs_version: $certs_version
discern_version: $discern_version discern_version: $discern_version
EDXAPP_STATIC_URL_BASE: $static_url_base EDXAPP_STATIC_URL_BASE: $static_url_base
# User provided extra vars
$extra_vars
EOF EOF
if [[ $basic_auth == "true" ]]; then if [[ $basic_auth == "true" ]]; then
# vars specific to provisioning added to $extra-vars # vars specific to provisioning added to $extra-vars
cat << EOF_AUTH >> $extra_vars cat << EOF_AUTH >> $extra_vars_file
NGINX_HTPASSWD_USER: $auth_user NGINX_HTPASSWD_USER: $auth_user
NGINX_HTPASSWD_PASS: $auth_pass NGINX_HTPASSWD_PASS: $auth_pass
EOF_AUTH EOF_AUTH
...@@ -109,15 +114,15 @@ fi ...@@ -109,15 +114,15 @@ fi
if [[ $recreate == "true" ]]; then if [[ $recreate == "true" ]]; then
# vars specific to provisioning added to $extra-vars # vars specific to provisioning added to $extra-vars
cat << EOF >> $extra_vars cat << EOF >> $extra_vars_file
dns_name: $dns_name dns_name: $dns_name
keypair: $keypair keypair: $keypair
instance_type: $instance_type instance_type: $instance_type
security_group: $security_group security_group: $security_group
ami: $ami ami: $ami
region: $region region: $region
zone: $zone zone: $zone
instance_tags: instance_tags:
environment: $environment environment: $environment
github_username: $github_username github_username: $github_username
Name: $name_tag Name: $name_tag
...@@ -137,14 +142,14 @@ elb: $elb ...@@ -137,14 +142,14 @@ elb: $elb
EOF EOF
# run the tasks to launch an ec2 instance from AMI # run the tasks to launch an ec2 instance from AMI
cat $extra_vars cat $extra_vars_file
ansible-playbook edx_provision.yml -i inventory.ini -e@${extra_vars} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu -v ansible-playbook edx_provision.yml -i inventory.ini -e@${extra_vars_file} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu -v
if [[ $server_type == "full_edx_installation" ]]; then if [[ $server_type == "full_edx_installation" ]]; then
# additional tasks that need to be run if the # additional tasks that need to be run if the
# entire edx stack is brought up from an AMI # entire edx stack is brought up from an AMI
ansible-playbook rabbitmq.yml -i "${deploy_host}," -e@${extra_vars} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu ansible-playbook rabbitmq.yml -i "${deploy_host}," -e@${extra_vars_file} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu
ansible-playbook restart_supervisor.yml -i "${deploy_host}," -e@${extra_vars} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu ansible-playbook restart_supervisor.yml -i "${deploy_host}," -e@${extra_vars_file} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu
fi fi
fi fi
...@@ -157,21 +162,21 @@ done ...@@ -157,21 +162,21 @@ done
# If reconfigure was selected or if starting from an ubuntu 12.04 AMI # If reconfigure was selected or if starting from an ubuntu 12.04 AMI
# run non-deploy tasks for all roles # run non-deploy tasks for all roles
if [[ $reconfigure == "true" || $server_type == "full_edx_installation_from_scratch" ]]; then if [[ $reconfigure == "true" || $server_type == "full_edx_installation_from_scratch" ]]; then
cat $extra_vars cat $extra_vars_file
ansible-playbook edx_continuous_integration.yml -i "${deploy_host}," -e@${extra_vars} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu --skip-tags deploy ansible-playbook edx_continuous_integration.yml -i "${deploy_host}," -e@${extra_vars_file} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu
fi fi
if [[ $server_type == "full_edx_installation" || $server_type == "full_edx_installation_from_scratch" ]]; then if [[ $server_type == "full_edx_installation" ]]; then
# Run deploy tasks for the roles selected # Run deploy tasks for the roles selected
for i in $roles; do for i in $roles; do
if [[ ${deploy[$i]} == "true" ]]; then if [[ ${deploy[$i]} == "true" ]]; then
cat $extra_vars cat $extra_vars_file
ansible-playbook ${i}.yml -i "${deploy_host}," -e@${extra_vars} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu --tags deploy ansible-playbook ${i}.yml -i "${deploy_host}," -e@${extra_vars_file} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu --tags deploy -v
fi fi
done done
fi fi
# deploy the edx_ansible role # deploy the edx_ansible role
ansible-playbook edx_ansible.yml -i "${deploy_host}," -e@${extra_vars} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu ansible-playbook edx_ansible.yml -i "${deploy_host}," -e@${extra_vars_file} -e@${WORKSPACE}/configuration-secure/ansible/vars/developer-sandbox.yml --user ubuntu
rm -f "$extra_vars" rm -f "$extra_vars_file"
#!/bin/bash -x
# This script is meant to be run from jenkins and expects the
# following variables to be set:
# - BUILD_ID - set by jenkins, Unique ID of build
# - BUILD_NUMBER - set by jenkins, Build number
# - refs - repo revisions to pass to abbey. This is provided in YAML syntax,
# and we put the contents in a file that abbey reads. Refs are
# different from 'vars' in that each ref is set as a tag on the
# output AMI.
# - vars - other vars to pass to abbey. This is provided in YAML syntax,
# and we put the contents in a file that abby reads.
# - deployment - edx, edge, etc
# - environment - stage,prod, etc
# - play - forum, edxapp, xqueue, etc
# - base_ami - Optional AMI to use as base AMI for abby instance
# - configuration - the version of the configuration repo to use
# - configuration_secure - the version of the secure repo to use
# - jenkins_admin_ec2_key - location of the ec2 key to pass to abbey
# - jenkins_admin_configuration_secure_repo - the git repo to use for secure vars
# - use_blessed - whether or not to use blessed AMIs
if [[ -z "$BUILD_ID" ]]; then
echo "BUILD_ID not specified."
exit -1
fi
if [[ -z "$BUILD_NUMBER" ]]; then
echo "BUILD_NUMBER not specified."
exit -1
fi
if [[ -z "$refs" ]]; then
echo "refs not specified."
exit -1
fi
if [[ -z "$deployment" ]]; then
echo "deployment not specified."
exit -1
fi
if [[ -z "$environment" ]]; then
echo "environment not specified."
exit -1
fi
if [[ -z "$play" ]]; then
echo "play not specified."
exit -1
fi
if [[ -z "$jenkins_admin_ec2_key" ]]; then
echo "jenkins_admin_ec2_key not specified."
exit -1
fi
if [[ -z "$jenkins_admin_configuration_secure_repo" ]]; then
echo "jenkins_admin_configuration_secure_repo not specified."
exit -1
fi
export PYTHONUNBUFFERED=1
if [[ -z $configuration ]]; then
cd configuration
configuration=`git rev-parse HEAD`
cd ..
fi
if [[ -z $configuration_secure ]]; then
cd configuration-secure
configuration_secure=`git rev-parse HEAD`
cd ..
fi
base_params=""
if [[ -n "$base_ami" ]]; then
base_params="-b $base_ami"
fi
blessed_params=""
if [[ "$use_blessed" == "true" ]]; then
blessed_params="--blessed"
fi
cd configuration
pip install -r requirements.txt
cd util/vpc-tools/
echo "$refs" > /var/tmp/$BUILD_ID-refs.yml
cat /var/tmp/$BUILD_ID-refs.yml
echo "$vars" > /var/tmp/$BUILD_ID-extra-vars.yml
cat /var/tmp/$BUILD_ID-extra-vars.yml
python -u abbey.py -p $play -t c1.medium -d $deployment -e $environment -i /edx/var/jenkins/.ssh/id_rsa $base_params $blessed_params --vars /var/tmp/$BUILD_ID-extra-vars.yml --refs /var/tmp/$BUILD_ID-refs.yml -c $BUILD_NUMBER --configuration-version $configuration --configuration-secure-version $configuration_secure -k $jenkins_admin_ec2_key --configuration-secure-repo $jenkins_admin_configuration_secure_repo
...@@ -71,9 +71,6 @@ def parse_args(): ...@@ -71,9 +71,6 @@ def parse_args():
help="path to extra var file", required=False) help="path to extra var file", required=False)
parser.add_argument('--refs', metavar="GIT_REFS_FILE", parser.add_argument('--refs', metavar="GIT_REFS_FILE",
help="path to a var file with app git refs", required=False) help="path to a var file with app git refs", required=False)
parser.add_argument('-a', '--application', required=False,
help="Application for subnet, defaults to admin",
default="admin")
parser.add_argument('--configuration-version', required=False, parser.add_argument('--configuration-version', required=False,
help="configuration repo branch(no hashes)", help="configuration repo branch(no hashes)",
default="master") default="master")
...@@ -85,9 +82,6 @@ def parse_args(): ...@@ -85,9 +82,6 @@ def parse_args():
help="repo to use for the secure files") help="repo to use for the secure files")
parser.add_argument('-c', '--cache-id', required=True, parser.add_argument('-c', '--cache-id', required=True,
help="unique id to use as part of cache prefix") help="unique id to use as part of cache prefix")
parser.add_argument('-b', '--base-ami', required=False,
help="ami to use as a base ami",
default="ami-0568456c")
parser.add_argument('-i', '--identity', required=False, parser.add_argument('-i', '--identity', required=False,
help="path to identity file for pulling " help="path to identity file for pulling "
"down configuration-secure", "down configuration-secure",
...@@ -108,6 +102,22 @@ def parse_args(): ...@@ -108,6 +102,22 @@ def parse_args():
default=5, default=5,
help="How long to delay message display from sqs " help="How long to delay message display from sqs "
"to ensure ordering") "to ensure ordering")
parser.add_argument("--hipchat-room-id", required=False,
default=None,
help="The API ID of the Hipchat room to post"
"status messages to")
parser.add_argument("--hipchat-api-token", required=False,
default=None,
help="The API token for Hipchat integration")
group = parser.add_mutually_exclusive_group()
group.add_argument('-b', '--base-ami', required=False,
help="ami to use as a base ami",
default="ami-0568456c")
group.add_argument('--blessed', action='store_true',
help="Look up blessed ami for env-dep-play.",
default=False)
return parser.parse_args() return parser.parse_args()
...@@ -126,6 +136,21 @@ def get_instance_sec_group(vpc_id): ...@@ -126,6 +136,21 @@ def get_instance_sec_group(vpc_id):
return grp_details[0].id return grp_details[0].id
def get_blessed_ami():
images = ec2.get_all_images(
filters={
'tag:environment': args.environment,
'tag:deployment': args.deployment,
'tag:play': args.play,
'tag:blessed': True
}
)
if len(images) != 1:
raise Exception("ERROR: Expected only one blessed ami, got {}\n".format(
len(images)))
return images[0].id
def create_instance_args(): def create_instance_args():
""" """
...@@ -292,7 +317,7 @@ rm -rf $base_dir ...@@ -292,7 +317,7 @@ rm -rf $base_dir
'security_group_ids': [security_group_id], 'security_group_ids': [security_group_id],
'subnet_id': subnet_id, 'subnet_id': subnet_id,
'key_name': args.keypair, 'key_name': args.keypair,
'image_id': args.base_ami, 'image_id': base_ami,
'instance_type': args.instance_type, 'instance_type': args.instance_type,
'instance_profile_name': args.role_name, 'instance_profile_name': args.role_name,
'user_data': user_data, 'user_data': user_data,
...@@ -448,7 +473,7 @@ def create_ami(instance_id, name, description): ...@@ -448,7 +473,7 @@ def create_ami(instance_id, name, description):
img.add_tag("cache_id", args.cache_id) img.add_tag("cache_id", args.cache_id)
time.sleep(AWS_API_WAIT_TIME) time.sleep(AWS_API_WAIT_TIME)
for repo, ref in git_refs.items(): for repo, ref in git_refs.items():
key = "vars:{}".format(repo) key = "refs:{}".format(repo)
img.add_tag(key, ref) img.add_tag(key, ref)
time.sleep(AWS_API_WAIT_TIME) time.sleep(AWS_API_WAIT_TIME)
break break
...@@ -466,7 +491,6 @@ def create_ami(instance_id, name, description): ...@@ -466,7 +491,6 @@ def create_ami(instance_id, name, description):
return image_id return image_id
def launch_and_configure(ec2_args): def launch_and_configure(ec2_args):
""" """
Creates an sqs queue, launches an ec2 instance, Creates an sqs queue, launches an ec2 instance,
...@@ -556,6 +580,17 @@ def launch_and_configure(ec2_args): ...@@ -556,6 +580,17 @@ def launch_and_configure(ec2_args):
return run_summary, ami return run_summary, ami
def send_hipchat_message(message):
#If hipchat is configured send the details to the specified room
if args.hipchat_api_token and args.hipchat_room_id:
import hipchat
try:
hipchat = hipchat.HipChat(token=args.hipchat_api_token)
hipchat.message_room(args.hipchat_room_id,'AbbeyNormal',
message)
except Exception as e:
print("Hipchat messaging resulted in an error: %s." % e)
if __name__ == '__main__': if __name__ == '__main__':
args = parse_args() args = parse_args()
...@@ -597,6 +632,11 @@ if __name__ == '__main__': ...@@ -597,6 +632,11 @@ if __name__ == '__main__':
print 'You must be able to connect to sqs and ec2 to use this script' print 'You must be able to connect to sqs and ec2 to use this script'
sys.exit(1) sys.exit(1)
if args.blessed:
base_ami = get_blessed_ami()
else:
base_ami = args.base_ami
try: try:
sqs_queue = None sqs_queue = None
instance_id = None instance_id = None
...@@ -620,6 +660,23 @@ if __name__ == '__main__': ...@@ -620,6 +660,23 @@ if __name__ == '__main__':
print "{:<30} {:0>2.0f}:{:0>5.2f}".format( print "{:<30} {:0>2.0f}:{:0>5.2f}".format(
run[0], run[1] / 60, run[1] % 60) run[0], run[1] / 60, run[1] % 60)
print "AMI: {}".format(ami) print "AMI: {}".format(ami)
message = 'Finished baking AMI {image_id} for {environment} ' \
'{deployment} {play}.'.format(
image_id=ami,
environment=args.environment,
deployment=args.deployment,
play=args.play)
send_hipchat_message(message)
except Exception as e:
message = 'An error occurred building AMI for {environment} ' \
'{deployment} {play}. The Exception was {exception}'.format(
environment=args.environment,
deployment=args.deployment,
play=args.play,
exception=repr(e))
send_hipchat_message(message)
finally: finally:
print print
if not args.no_cleanup and not args.noop: if not args.no_cleanup and not args.noop:
......
boto boto
docopt docopt
\ No newline at end of file python-simple-hipchat
Vagrant.require_version ">= 1.5.3"
VAGRANTFILE_API_VERSION = "2" VAGRANTFILE_API_VERSION = "2"
MEMORY = 2048 MEMORY = 2048
...@@ -26,7 +28,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ...@@ -26,7 +28,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.network :forwarded_port, guest: 8000, host: 8000 config.vm.network :forwarded_port, guest: 8000, host: 8000
config.vm.network :forwarded_port, guest: 8001, host: 8001 config.vm.network :forwarded_port, guest: 8001, host: 8001
config.vm.network :forwarded_port, guest: 4567, host: 4567 config.vm.network :forwarded_port, guest: 4567, host: 4567
config.ssh.insert_key = true
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform", :create => true, nfs: true config.vm.synced_folder "#{edx_platform_mount_dir}", "/edx/app/edxapp/edx-platform", :create => true, nfs: true
config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service", :create => true, nfs: true config.vm.synced_folder "#{forum_mount_dir}", "/edx/app/forum/cs_comments_service", :create => true, nfs: true
config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora", :create => true, nfs: true config.vm.synced_folder "#{ora_mount_dir}", "/edx/app/ora/ora", :create => true, nfs: true
...@@ -45,9 +49,13 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ...@@ -45,9 +49,13 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end end
# Make LC_ALL default to en_US.UTF-8 instead of en_US.
# See: https://github.com/mitchellh/vagrant/issues/1188
config.vm.provision "shell", inline: 'echo \'LC_ALL="en_US.UTF-8"\' > /etc/default/locale'
config.vm.provision :ansible do |ansible| config.vm.provision :ansible do |ansible|
ansible.playbook = "../../../playbooks/vagrant-devstack.yml" ansible.playbook = "../../../playbooks/vagrant-devstack.yml"
ansible.inventory_path = "../../../playbooks/vagrant/inventory.ini" ansible.verbose = "vvvv"
ansible.verbose = "extra"
end end
end end
Vagrant.require_version ">= 1.5.3"
VAGRANTFILE_API_VERSION = "2" VAGRANTFILE_API_VERSION = "2"
MEMORY = 2048 MEMORY = 2048
...@@ -6,7 +8,8 @@ CPU_COUNT = 2 ...@@ -6,7 +8,8 @@ CPU_COUNT = 2
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "precise64" config.vm.box = "precise64"
config.vm.box_url = "http://files.vagrantup.com/precise64.box" config.vm.box_url = "http://files.vagrantup.com/precise64.box"
config.ssh.insert_key = true
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.network :private_network, ip: "192.168.33.10" config.vm.network :private_network, ip: "192.168.33.10"
config.vm.provider :virtualbox do |vb| config.vm.provider :virtualbox do |vb|
...@@ -20,10 +23,16 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ...@@ -20,10 +23,16 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end end
# Make LC_ALL default to en_US.UTF-8 instead of en_US.
# See: https://github.com/mitchellh/vagrant/issues/1188
config.vm.provision "shell", inline: 'echo \'LC_ALL="en_US.UTF-8"\' > /etc/default/locale'
config.vm.provision :ansible do |ansible| config.vm.provision :ansible do |ansible|
# point Vagrant at the location of your playbook you want to run # point Vagrant at the location of your playbook you want to run
ansible.playbook = "../../../playbooks/vagrant-fullstack.yml" ansible.playbook = "../../../playbooks/vagrant-fullstack.yml"
ansible.inventory_path = "../../../playbooks/vagrant/inventory.ini" # set extra-vars here instead of in the vagrant play so that
ansible.verbose = "extra" # they are written out to /edx/etc/server-vars.yml which can
# be used later when running ansible locally
ansible.verbose = "vvvv"
end end
end end
Vagrant.require_version ">= 1.5.3"
VAGRANTFILE_API_VERSION = "2" VAGRANTFILE_API_VERSION = "2"
MEMORY = 2048 MEMORY = 2048
...@@ -18,7 +20,7 @@ cd /edx/app/edx_ansible/edx_ansible/playbooks ...@@ -18,7 +20,7 @@ cd /edx/app/edx_ansible/edx_ansible/playbooks
# this can cause problems (e.g. looking for templates that no longer exist). # this can cause problems (e.g. looking for templates that no longer exist).
/edx/bin/update configuration release /edx/bin/update configuration release
ansible-playbook -i localhost, -c local vagrant-devstack.yml --tags=deploy -e configuration_version=release ansible-playbook -i localhost, -c local vagrant-devstack.yml -e configuration_version=release
SCRIPT SCRIPT
edx_platform_mount_dir = "edx-platform" edx_platform_mount_dir = "edx-platform"
...@@ -36,13 +38,16 @@ end ...@@ -36,13 +38,16 @@ end
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates an edX devstack VM from an official release # Creates an edX devstack VM from an official release
config.vm.box = "himbasha-devstack" config.vm.box = "injera-devstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140325-himbasha-devstack.box" config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.box_url = "http://files.edx.org/vagrant-images/20140418-injera-devstack.box"
config.vm.network :private_network, ip: "192.168.33.10" config.vm.network :private_network, ip: "192.168.33.10"
config.vm.network :forwarded_port, guest: 8000, host: 8000 config.vm.network :forwarded_port, guest: 8000, host: 8000
config.vm.network :forwarded_port, guest: 8001, host: 8001 config.vm.network :forwarded_port, guest: 8001, host: 8001
config.vm.network :forwarded_port, guest: 4567, host: 4567 config.vm.network :forwarded_port, guest: 4567, host: 4567
config.vm.network :forwarded_port, guest: 8765, host: 8765
config.ssh.insert_key = true
# Enable X11 forwarding so we can interact with GUI applications # Enable X11 forwarding so we can interact with GUI applications
if ENV['VAGRANT_X11'] if ENV['VAGRANT_X11']
...@@ -62,6 +67,10 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ...@@ -62,6 +67,10 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end end
# Use vagrant-vbguest plugin to make sure Guest Additions are in sync
config.vbguest.auto_reboot = true
config.vbguest.auto_update = true
# Assume that the base box has the edx_ansible role installed # Assume that the base box has the edx_ansible role installed
# We can then tell the Vagrant instance to update itself. # We can then tell the Vagrant instance to update itself.
config.vm.provision "shell", inline: $script config.vm.provision "shell", inline: $script
......
Vagrant.require_version ">= 1.5.3"
VAGRANTFILE_API_VERSION = "2" VAGRANTFILE_API_VERSION = "2"
MEMORY = 2048 MEMORY = 2048
...@@ -6,8 +8,10 @@ CPU_COUNT = 2 ...@@ -6,8 +8,10 @@ CPU_COUNT = 2
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Creates an edX fullstack VM from an official release # Creates an edX fullstack VM from an official release
config.vm.box = "himbasha-fullstack" config.vm.box = "injera-fullstack"
config.vm.box_url = "http://files.edx.org/vagrant-images/20140325-himbasha-fullstack.box" config.vm.box_url = "http://files.edx.org/vagrant-images/20140418-injera-fullstack.box"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.ssh.insert_key = true
config.vm.network :private_network, ip: "192.168.33.10" config.vm.network :private_network, ip: "192.168.33.10"
config.hostsupdater.aliases = ["preview.localhost"] config.hostsupdater.aliases = ["preview.localhost"]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment