Commit 0bb8ac06 by Toby Lawrence Committed by GitHub

Merge branch 'master' into platform/update-forums-to-ruby23

parents b9cbf93e 2e8f1264
#FROM edxops/trusty-common:v3
FROM hacking/edxapp
MAINTAINER edxops
ADD . /edx/app/edx_ansible/edx_ansible
COPY docker/build/automated/ansible_overrides.yml /
WORKDIR /edx/app/edx_ansible/edx_ansible/docker/plays
RUN /edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook -vvvv automated.yml \
-i '127.0.0.1,' -c local \
-e@/ansible_overrides.yml
WORKDIR /edx/app
---
FLOCK_TLD: "edx"
AUTOMATED_USER: "ecom3"
AUTOMATED_RBASH_LINKS: ['/usr/bin/python']
AUTOMATED_SUDO_COMMANDS:
- command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py lms migrate --list --settings=aws"
sudo_user: "edxapp"
- command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py cms migrate --list --settings=aws"
sudo_user: "edxapp"
AUTOMATED_SUDOERS_TEMPLATE: "roles/automated/templates/99-sample.j2"
AUTOMATED_AUTHORIZED_KEYS: ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCx+OpJ6787GWnSo5FcwNPjiM7yqjXKi0FPkfpx8Dd3Oqts5PnJV/xokMP4vJTfXu6Zezh+/NvofgMlnxhnIwC3YIoGkLRhW5vKTZfohPjhyIRu0TyQOgmdocYk2o7xMQ1/fcrQh1sQMQqz79mv1ENKc47dVv7qfdBz803M5gschi4RjMYNMN97AA5rByz/AHJnrxQMWEndOZU+H2X9KRUn1TsWe8s99alILwFrNF1dZzF20r2zMErx48f4zfaczQnLOm+pJ1VrruPI8tQzS9X/kfy8GpBTbTX7X80SuM1Npuazr5sJAalXSZ2ccBVa1fKRwa6PmET21gdxOd2ZUsFYL5wZsPIF2f2ij5XwQxKco2lHH6QsvBzapY1BI5PZ/+mQzoaDO7w6WaaDvSDVxyuG/Sw0kOpA9uVEp3qTs8WT6CUYFmnBd+E8YnH6OwqbS9gfBkSNY9pwq1EpR+DCXVFuzWfYoSGQjxpTFbe7YsShB2Jyf/rZyA7NaS4lEkF8eABG6siEwckWvMOV5Z0lGGLTia1DCOZ3c6X09Te3xY4weYS1c0/Nx15C0rmYsMUeDYDonJWujBbvlOBNpx2opG2KPkSE9PAKWyS/mc4SrW0urJBxjAommVq9//dPTxo7IBmiCNWEcOuhXsQYp5tpDmj32Dh8nvNrkvOFYxb9SxuZgQ== automated@example.com']
SUDO_USER:
edxapp_venv_dir: "/usr"
\ No newline at end of file
......@@ -64,3 +64,17 @@ RUN pip install awscli
ADD docker/build/go-agent/files/go-agent-start.sh /etc/service/go-agent/run
ADD docker/build/go-agent/files/go-agent-env-vars /etc/default/go-agent
RUN update-java-alternatives -s java-7-oracle
# !!!!NOTICE!!!! ---- Runner of this pipeline take heed!! You must replace go_github_key.pem with the REAL key material
# that can checkout private github repositories used as pipeline materials. The key material here is faked and is only
# used to pass CI!
# setup the github identity
ADD docker/build/go-agent/files/go_github_key.pem /var/go/.ssh/id_rsa
RUN chmod 600 /var/go/.ssh/id_rsa && \
chown go:go /var/go/.ssh/id_rsa
# setup the known_hosts
RUN touch /var/go/.ssh/known_hosts && \
chmod 600 /var/go/.ssh/known_hosts && \
chown go:go /var/go/.ssh/known_hosts && \
ssh-keyscan -t rsa,dsa github.com > /var/go/.ssh/known_hosts
-----BEGIN RSA PRIVATE KEY-----
This file is junk, replace with the real key when
building the container.
-----END RSA PRIVATE KEY-----
- name: Deploy autom
hosts: all
sudo: True
gather_facts: True
roles:
- common_vars
- docker
- automated
# This playbook will run the refresh_course_metadata management command. It is intended solely for usage with the
# Catalog/Course Discovery Service.
#
# Required variables for this playbook:
#
# - APPLICATION_PATH - the top-level path of the Django application; the application lives underneath
# this directory in a directory with the same name as APPLICATION_NAME.
# - APPLICATION_NAME - The name of the application that we are migrating.
# - APPLICATION_USER - user which is meant to run the application
# - HIPCHAT_TOKEN - API token to send messages to hipchat
# - HIPCHAT_ROOM - ID or name of the room to send the notification
# - HIPCHAT_URL - URL of the hipchat API (defaults to v1 of the api)
#
# Example command line to run this playbook:
# ansible-playbook -vvvv -i "localhost," -c local \
# -e @overrides.yml \
# discovery_refresh_metadata.yml
#
- hosts: all
vars:
HIPCHAT_URL: https://api.hipchat.com/v2/
COMMAND_PREFIX: " . {{ APPLICATION_PATH }}/{{ APPLICATION_NAME }}_env; /edx/bin/python.{{ APPLICATION_NAME }} /edx/bin/manage.{{ APPLICATION_NAME }}"
gather_facts: False
tasks:
- name: Refresh metadata
shell: '{{ COMMAND_PREFIX }} refresh_course_metadata'
become_user: "{{ APPLICATION_USER }}"
- name: Send Hipchat notification
hipchat_2_0_0_1:
api: "{{ HIPCHAT_URL }}"
token: "{{ HIPCHAT_TOKEN }}"
room: "{{ HIPCHAT_ROOM }}"
msg: "Catalog metadata has been refreshed."
ignore_errors: yes
when: HIPCHAT_TOKEN is defined
......@@ -30,7 +30,8 @@
ebs_volume_size: 8
ec2_timeout: 500
ec2_assign_public_ip: no
gather_facts: False
automation_prefix: "gocd automation run -- {{ ansible_date_time.iso8601 }} -- "
gather_facts: True
connection: local
tasks:
......@@ -40,7 +41,7 @@
- name: Generate ec2 keypair to use for this instance
ec2_key:
name: "{{ unique_key_name.stdout }}"
name: "{{ automation_prefix }} {{ unique_key_name.stdout }}"
region: "{{ ec2_region }}"
register: ssh_key_register
......@@ -52,9 +53,9 @@
- name: Launch EC2 instance
ec2:
instance_tags: {"Name" : "gocd automation run -- {{ unique_key_name.stdout }}"}
instance_tags: {"Name" : "{{ automation_prefix }} {{ unique_key_name.stdout }}"}
region: "{{ ec2_region }}"
key_name: "{{ unique_key_name.stdout }}"
key_name: "{{ automation_prefix }} {{ unique_key_name.stdout }}"
instance_type: "{{ ec2_instance_type }}"
image: "{{ base_ami_id }}"
wait: yes
......
- name: Deploy automated role
hosts: all
sudo: True
gather_facts: True
roles:
# - aws
- automated
- role: datadog
when: COMMON_ENABLE_DATADOG
- role: splunkforwarder
when: COMMON_ENABLE_SPLUNKFORWARDER
- role: newrelic
when: COMMON_ENABLE_NEWRELIC
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
#
# Vars for role automated
#
automated_role_name: automated
AUTOMATED_USER: "changeme"
automated_sudoers_template: "roles/automated/templates/99-automated.j2"
#
# OS packages
#
automated_debian_pkgs: []
automated_redhat_pkgs: []
---
#
# edX Configuration
#
# github: https://github.com/edx/configuration
# wiki: https://github.com/edx/configuration/wiki
# code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions
# license: https://github.com/edx/configuration/blob/master/LICENSE.TXT
#
# Tasks for role automated
#
# Overview:
#
# This role is included as a dependency by other roles which provide
# automated jobs. Automation occurs over ssh. The automator user is
# is allowed to run explicitly listed commands via sudo.
#
# Dependencies:
#
# This role depends upon variables provided by an including role
# via the my_role/meta/main.yml file. Includes take the following forms:
#
# For example, the edxapp role might designate that ecom and analytics users
# are allowed to run specific management commands on edxapp instancs.
#
# EDXAPP_AUTOMATED_USERS:
# ecom:
# sudo_commands:
# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py lms migrate --list --settings=aws"
# sudo_user: "edxapp"
# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py cms migrate --list --settings=aws"
# sudo_user: "edxapp"
# authorized_keys:
# - 'ssh-rsa <REDACTED> ecom+admin@example.com'
# - 'ssh-rsa <REDACTED> ecom+devel@example.com'
# analytics:
# sudo_commands:
# - command: "/edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py lms help --settings=aws"
# sudo_user: "edxapp"
# authorized_keys: ['ssh-rsa <REDACTED> analytics@example.com']
#
# The play for the role enabling automation should include the role like so
#
# - role: automated
# AUTOMATED_USERS: "{{ EDXAPP_AUTOMATED_USERS | default({}) }}"
#
- name: Create automated user
user:
name: "{{ item.key }}"
state: present
shell: "/bin/bash"
createhome: yes
with_dict: "{{ AUTOMATED_USERS }}"
- name: Create sudoers file from template
template:
dest: "/etc/sudoers.d/99-{{ item.key }}"
src: "{{ automated_sudoers_template }}"
owner: "root"
group: "root"
mode: "0440"
validate: 'visudo -cf %s'
when: automated_sudoers_template
with_dict: "{{ AUTOMATED_USERS }}"
- name: Create .ssh directory
file:
path: "/home/{{ item.key }}/.ssh"
state: "directory"
mode: "0700"
owner: "{{ item.key }}"
group: "{{ item.key }}"
with_dict: "{{ AUTOMATED_USERS }}"
- name: Build authorized_keys file
template:
src: "home/automator/.ssh/authorized_keys.j2"
dest: "/home/{{ item.key }}/.ssh/authorized_keys"
mode: "0600"
owner: "{{ item.key }}"
group: "{{ item.key }}"
with_dict: "{{ AUTOMATED_USERS }}"
- name: Build known_hosts file
file:
path: "/home/{{ item.key }}/.ssh/known_hosts"
state: "touch"
mode: "0755"
owner: "{{ item.key }}"
group: "{{ item.key }}"
with_dict: "{{ AUTOMATED_USERS }}"
{% for command in item.value.sudo_commands %}
{{ item.key }} ALL=({{ command.sudo_user }}) SETENV:NOPASSWD:{{ command.command }}
{% endfor %}
# {{ ansible_managed }}
{% for line in item.value.authorized_keys -%}
{{ line }}
{% endfor %}
\ No newline at end of file
......@@ -158,3 +158,25 @@
state: restarted
become: True
when: config_templates.changed
- name: Add ntp alert script
template:
src: "log-ntp-alerts.sh.j2"
dest: "{{ COMMON_BIN_DIR }}/log-ntp-alerts.sh"
owner: root
group: root
mode: "0755"
- name: Set up a cron job to run the script
cron:
name: "log-ntp-alerts"
job: "{{ COMMON_BIN_DIR }}/log-ntp-alerts.sh >/dev/null 2>&1"
- name: install logrotate configuration
template:
src: etc/logrotate.d/ntp.j2
dest: /etc/logrotate.d/ntp
tags:
- "install"
- "install:configuration"
- "logrotate"
{{ COMMON_LOG_DIR }}/ntp.log {
compress
dateext
dateformat -%Y%m%d-%s
missingok
daily
rotate 3
}
#!/bin/bash
log_directory={{ COMMON_LOG_DIR }}
reach=$(ntpq -c associations | awk '{print $5}' | grep yes)
if [[ ${reach} == *"no"* ]]; then
echo $(date -u) $(hostname) "NTPD not synchronized - Please investigate" >> ${log_directory}/ntp.log
fi
limit=100 # limit in milliseconds
offsets=$(ntpq -nc peers | tail -n +3 | cut -c 62-66 | tr -d '-')
for offset in ${offsets}; do
if [ ${offset:-0} -ge ${limit:-100} ]; then
echo $(date -u) $(hostname) "An NTPD offset with value $offset is excessive - Please investigate" >> ${log_directory}/ntp.log
exit 1
fi
done
......@@ -68,7 +68,6 @@ COMMON_CUSTOM_DHCLIENT_CONFIG: false
# uncomment and specifity your domains.
# COMMON_DHCLIENT_DNS_SEARCH: ["ec2.internal","example.com"]
COMMON_MOTD_TEMPLATE: "motd.tail.j2"
COMMON_SSH_PASSWORD_AUTH: "no"
COMMON_SECURITY_UPDATES: no
......
......@@ -91,6 +91,17 @@ DISCOVERY_MARKETING_API_URL: 'https://example.org/api/catalog/v2/'
DISCOVERY_MARKETING_URL_ROOT: 'https://example.org/'
DISCOVERY_PROGRAMS_API_URL: 'https://replace-me/api/v1/'
DISCOVERY_DATA_DIR: '{{ COMMON_DATA_DIR }}/{{ discovery_service_name }}'
DISCOVERY_MEDIA_ROOT: '{{ DISCOVERY_DATA_DIR }}/media'
DISCOVERY_MEDIA_URL: '/media/'
DISCOVERY_MEDIA_STORAGE_BACKEND:
DEFAULT_FILE_STORAGE: 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT: '{{ DISCOVERY_MEDIA_ROOT }}'
MEDIA_URL: '{{ DISCOVERY_MEDIA_URL }}'
DISCOVERY_STATICFILES_STORAGE: 'django.contrib.staticfiles.storage.StaticFilesStorage'
DISCOVERY_EXTRA_APPS: []
DISCOVERY_SERVICE_CONFIG:
......@@ -132,6 +143,9 @@ DISCOVERY_SERVICE_CONFIG:
EXTRA_APPS: '{{ DISCOVERY_EXTRA_APPS }}'
MEDIA_STORAGE_BACKEND: '{{ DISCOVERY_MEDIA_STORAGE_BACKEND }}'
STATICFILES_STORAGE: '{{ DISCOVERY_STATICFILES_STORAGE }}'
DISCOVERY_REPOS:
- PROTOCOL: "{{ COMMON_GIT_PROTOCOL }}"
DOMAIN: "{{ COMMON_GIT_MIRROR }}"
......@@ -198,6 +212,9 @@ discovery_debian_pkgs:
- libmysqlclient-dev
- libssl-dev
- libffi-dev # Needed to install the Python cryptography library for asymmetric JWT signing
- libmemcached-dev # Needed for pylibmc
- libmemcached-dev # Needed for memcache
- libxml2-dev
- libxslt-dev
- libjpeg-dev
discovery_redhat_pkgs: []
......@@ -22,7 +22,29 @@
- install
- install:app-requirements
- name: Create update and show-repo-heads script
- name: Create update script
template:
dest: "{{ edx_ansible_app_dir}}/update"
src: "update.j2"
owner: "{{ edx_ansible_user }}"
group: "{{ edx_ansible_user }}"
mode: "0755"
when: devstack is not defined or not devstack
tags:
- install
- install:configuration
- name: Create symlinks for update script
file:
src: "{{ edx_ansible_app_dir }}/update"
dest: "{{ COMMON_BIN_DIR }}/update"
state: link
when: devstack is not defined or not devstack
tags:
- install
- install:configuration
- name: Create utility scripts
template:
dest: "{{ edx_ansible_app_dir}}/{{ item.dest }}"
src: "{{ item.src }}"
......@@ -30,19 +52,17 @@
group: "{{ edx_ansible_user }}"
mode: "0755"
with_items:
- { src: 'update.j2', dest: 'update' }
- { src: 'show-repo-heads.j2', dest: 'show-repo-heads' }
tags:
- install
- install:configuration
- name: Create symlinks for update and show-repo-heads scripts
- name: Create symlinks for utility scripts
file:
src: "{{ edx_ansible_app_dir }}/{{ item }}"
dest: "{{ COMMON_BIN_DIR }}/{{ item }}"
state: link
with_items:
- update
- show-repo-heads
tags:
- install
......
*******************************************************************
* _ __ __ *
* _ _| |\ \/ / This system is for the use of authorized *
* / -_) _` | > < users only. Usage of this system may be *
* \___\__,_|/_/\_\ monitored and recorded by system personnel. *
* ___ _ __ __ *
* / _ \ _ __ ___ _ _ ___ __| |\ \/ / (R) *
* | |_| | '_ \ -_) ' \ / -_) _` | > < *
* \___/| .__/___|_|_| \___\__,_|/_/\_\ *
* |_| *
* *
* This system is for the use of authorized users only. Usage of *
* this system may be monitored and recorded by system personnel. *
* *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide the *
* evidence from such monitoring to law enforcement officials. *
* *
* Need help? https://open.edx.org/getting-help *
* *
*******************************************************************
......@@ -8,7 +8,6 @@
disable_edx_services: true
mongo_enable_journal: false
EDXAPP_NO_PREREQ_INSTALL: 0
COMMON_MOTD_TEMPLATE: 'devstack_motd.tail.j2'
COMMON_SSH_PASSWORD_AUTH: "yes"
EDXAPP_LMS_BASE: 127.0.0.1:8000
EDXAPP_OAUTH_ENFORCE_SECURE: false
......
......@@ -8,7 +8,6 @@
disable_edx_services: true
mongo_enable_journal: false
EDXAPP_NO_PREREQ_INSTALL: 0
COMMON_MOTD_TEMPLATE: 'devstack_motd.tail.j2'
COMMON_SSH_PASSWORD_AUTH: "yes"
EDXAPP_LMS_BASE: 127.0.0.1:8000
EDXAPP_OAUTH_ENFORCE_SECURE: false
......
......@@ -6,7 +6,6 @@
devstack: true
disable_edx_services: true
mongo_enable_journal: false
COMMON_MOTD_TEMPLATE: 'devstack_motd.tail.j2'
COMMON_SSH_PASSWORD_AUTH: "yes"
EDXAPP_LMS_BASE: 127.0.0.1:8000
EDXAPP_OAUTH_ENFORCE_SECURE: false
......
......@@ -5,7 +5,6 @@
vars:
disable_edx_services: true
mongo_enable_journal: false
COMMON_MOTD_TEMPLATE: 'devstack_motd.tail.j2'
COMMON_SSH_PASSWORD_AUTH: "yes"
EDXAPP_LMS_BASE: 127.0.0.1:8000
EDXAPP_OAUTH_ENFORCE_SECURE: false
......
......@@ -2,10 +2,11 @@
yml_files:=$(shell find . -name "*.yml")
json_files:=$(shell find . -name "*.json")
jinja_files:=$(shell find . -name "*.j2")
images = $(shell git diff --name-only $(TRAVIS_COMMIT_RANGE) | python util/parsefiles.py)
test: test.syntax test.edx_east_roles
test.syntax: test.syntax.yml test.syntax.json test.syntax.jinja
test.syntax: test.syntax.yml test.syntax.json test.syntax.jinja test.syntax.dockerfiles
test.syntax.yml: $(patsubst %,test.syntax.yml/%,$(yml_files))
......@@ -22,5 +23,8 @@ test.syntax.jinja: $(patsubst %,test.syntax.jinja/%,$(jinja_files))
test.syntax.jinja/%:
cd playbooks && python ../tests/jinja_check.py ../$*
test.syntax.dockerfiles:
python util/check_dockerfile_coverage.py "$(images)"
test.edx_east_roles:
tests/test_edx_east_roles.sh
# How to add Dockerfiles to configuration file
The script that handles distributing build jobs across Travis CI shards relies on the parsefiles_config YAML file. This file contains a mapping from each application that has a Dockerfile to its corresponding weight/rank. The rank refers to the approximate running time of a Travis Docker build for that application's Dockerfile. When adding a new Dockerfile to the configuration repository, this configuration file needs to be manually updated in order to ensure that the Dockerfile is also built.
To modify configuration file:
1. Edit the docker.mk file:
1. Modify docker_test to include date commands.
Replace
```$(docker_test)%: .build/%/Dockerfile.test
docker build -t $*:test -f $< .```
with
```$(docker_test)%: .build/%/Dockerfile.test
date
docker build -t $*:test -f $< .
date```
2. Replace the command that runs the dependency analyzer with a line to build your Dockerfiles.
For example, if adding Dockerfile for ecommerce, rabbit mq, replace
`images:=$(shell git diff --name-only $(TRAVIS_COMMIT_RANGE) | python util/parsefiles.py)`
with
`images:= ecommerce rabbitmq`
3. Replace the command that runs the balancing script with a line to build all images.
Replace
`docker.test.shard: $(foreach image,$(shell echo $(images) | python util/balancecontainers.py $(SHARDS) | awk 'NR%$(SHARDS)==$(SHARD)'),$(docker_test)$(image))`
with
`docker.test.shard: $(foreach image,$(shell echo $(images) | tr ' ' '\n' | awk 'NR%$(SHARDS)==$(SHARD)'),$(docker_test)$(image))`
2. Commit and push to your branch.
3. Wait for Travis CI to run the builds.
4. Upon completion, examine the Travis CI logs to find where your Dockerfile was built (search for "docker build -t"). Find the amount of time the build took by comparing the output of the date command before the build command starts and the date command after the build command completes.
4. Round build time to a whole number, and add it to the configuration/util/parsefiles_config.yml file.
5. Undo steps 1a, 1b, 1c to revert back to the original state of the docker.mk file.
6. Commit and push to your branch. Your Dockerfile should now be built as a part of the Travis CI tests.
......@@ -5,64 +5,62 @@ import itertools
import sys
import argparse
import logging
import docker_images
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR")
CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml")
LOGGER = logging.getLogger(__name__)
def pack_containers(containers, num_shards):
def pack_shards(used_images, num_shards):
"""
Determines an approximation of the optimal way to pack the containers into a given number of shards so as to
Determines an approximation of the optimal way to pack the images into a given number of shards so as to
equalize the execution time amongst the shards.
Input:
containers: A set of Docker containers
num_shards: A number of shards amongst which to distribute the Docker containers
used_images: A set of Docker images and their ranks
num_shards: A number of shards amongst which to distribute the Docker images
"""
# open config file containing container weights
config_file_path = pathlib2.Path(CONFIG_FILE_PATH)
with (config_file_path.open(mode='r')) as file:
try:
config = yaml.load(file)
except yaml.YAMLError, exc:
LOGGER.error("error in configuration file: %s" % str(exc))
sys.exit(1)
# get container weights
weights = config.get("weights")
# convert all containers in config file to a list of tuples (<container>, <weight>)
weights_list = [x.items() for x in weights]
weights_list = list(itertools.chain.from_iterable(weights_list))
# performs intersection between weighted containers and input containers
used_containers = [x for x in weights_list if x[0] in containers]
# sorts used containers in descending order on the weight
sorted_containers = sorted(used_containers, key = lambda x: x[1], reverse=True)
sorted_images = sorted(used_images, key = lambda x: x[1], reverse=True)
shards = []
# for the number of shards
for i in range(0, num_shards):
# initialize initial dict
shards.append({"containers": [], "sum": 0})
shards.append({"images": [], "sum": 0})
# for each container
for container in sorted_containers:
for image in sorted_images:
# find the shard with the current minimum execution time
shard = min(shards, key = lambda x: x["sum"])
# add the current container to the shard
shard["containers"].append(container)
shard["images"].append(image)
# add the current container's weight to the shard's total expected execution time
shard["sum"] += container[1]
shard["sum"] += image[1]
return shards
def read_input():
"""
Reads input from standard input.
"""
images = []
# get images from standard in
for line in sys.stdin:
line = line.strip()
line = line.strip("[]")
items = line.split()
images.extend(items)
return images
def arg_parse():
parser = argparse.ArgumentParser(description = 'Given a list of containers as input and a number of shards, '
......@@ -79,24 +77,20 @@ if __name__ == '__main__':
# configure logging
logging.basicConfig()
containers = []
# get input from standard in
images = read_input()
# get containers from standard in
for line in sys.stdin:
line = line.strip()
line = line.strip("[]")
items = line.split()
containers.extend(items)
# get images that are used and described in configuration file
used_images = docker_images.get_used_images(images)
# find optimal packing of the containers amongst shards
shards = pack_containers(containers, args.num_shards)
# find optimal packing of the images amongst shards
shards = pack_shards(used_images, args.num_shards)
# print space separated list of containers for each shard
for shard in shards:
middle = " "
conts = [x[0] for x in shard["containers"]]
conts = [x[0] for x in shard["images"]]
line = middle.join(conts)
print line
import yaml
import os
import pathlib2
import itertools
import argparse
import logging
import sys
import docker_images
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR")
CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml")
LOGGER = logging.getLogger(__name__)
def check_coverage(images, used_images):
"""
Checks whether all images are described in parsefiles_config.yml and raises an error otherwise, directing toward documentation to resolving the error.
Input:
images: the set of images scheduled to be built
used_images: the subset of images with their ranks that are in the parsefiles_config.yml file
"""
# determine which Dockerfiles are not covered; i.e. the set difference of the Dockerfiles to build minus the Dockerfile
# available to be built is non-empty
uncovered = set(images) - set([x[0] for x in used_images])
# exit with error code if uncovered Dockerfiles exist
if uncovered:
LOGGER.error("The following Dockerfiles are not described in the parsefiles_config.yml file: {}. Please see the following documentation on how to add Dockerfile ranks to the configuration file: {}".format(uncovered, "https://github.com/edx/configuration/blob/master/util/README.md"))
sys.exit(1)
def arg_parse():
parser = argparse.ArgumentParser(description = 'Given a list of images as input checks that each input image is described correctly in parsefiles_config.yml')
parser.add_argument('images', help = "the Dockerfiles that need to be built as the result of some commit change and whose coverage is checked")
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
# configure logging
logging.basicConfig()
# read input
images = []
for i in args.images.split():
images.append(i)
# get images that are used and described in configuration file
used_images = docker_images.get_used_images(images)
check_coverage(images, used_images)
import yaml
import os
import pathlib2
import itertools
import sys
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR")
CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml")
def get_used_images(images):
"""
Returns the images and their ranks that are scheduled to be built and that exist in the configuration file.
Input:
images: A set of Docker images
"""
# open config file containing container weights
config_file_path = pathlib2.Path(CONFIG_FILE_PATH)
with (config_file_path.open(mode='r')) as file:
try:
config = yaml.load(file)
except yaml.YAMLError, exc:
LOGGER.error("error in configuration file: %s" % str(exc))
sys.exit(1)
# get container weights
weights = config.get("weights")
# convert all images in config file to a list of tuples (<image>, <weight>)
weights_list = [x.items() for x in weights]
weights_list = list(itertools.chain.from_iterable(weights_list))
# performs intersection between weighted images and input images
return [x for x in weights_list if x[0] in images]
#!/usr/bin/env bash
# Stop if any command fails
set -e
function usage
{
cat << EOM
--- install_stack.sh ---
Usage: $ bash install_stack.sh stack release [-b vagrant_mount_base] [-v] [-h]
Installs the Open edX devstack or fullstack. If you encounter any trouble or have
questions regarding installation of devstack/fullstack, head over to
https://open.edx.org/getting-help.
This script captures a log of all output produced during runtime, and saves it in a .log
file within the current directory. If you encounter an error during installation, this is
an invaluable tool for edX developers to help discover what went wrong, so please share it
if you reach out for support!
NOTE: This script assumes you have never installed devstack before. Installing multiple
versions of devstack can often cause conflicts that this script is not prepared to handle.
stack
Either 'fullstack' or 'devstack' (no quotes). Full stack mimics a production
environment, whereas devstack is useful if you plan on modifying the Open edX
code. You must specify this. If you choose fullstack, 'release' should be the
latest open-release. If you choose devstack, 'release' should be the latest
open-release or master.
release
The release of Open edX you wish to run. Install the given git ref 'release'.
You must specify this. Named releases are called "open-release/eucalyptus",
"open-release/eucalyptus.2", and so on. We recommend the latest stable open
release for general members of the open source community. Named releases can
be found at: https://openedx.atlassian.net/wiki/display/DOC/Open+edX+Releases.
If you plan on modifying the code, we recommend the "master" branch.
-b vagrant_mount_base
Customize the location of the source code that gets cloned during the
devstack provisioning. The default is the current directory. This option is
not valid if installing fullstack.
-v
Verbose output from ansible playbooks.
-h
Show this help and exit.
---------------------------
EOM
}
ERROR='\033[0;31m' # Red
WARN='\033[1;33m' # Yellow
SUCCESS='\033[0;32m' # Green
NC='\033[0m' # No Color
# Output verbosity
verbosity=0
# OPENEDX_RELEASE
release=""
# Vagrant source code provision location
vagrant_mount_location=""
if [[ $# -lt 2 || ${1:0:1} == '-' || ${2:0:1} == '-' ]]; then
usage
exit 1
fi
stack=$1
shift
release=$1
shift
while getopts "b:vh" opt; do
case "$opt" in
b)
if [[ $stack == "devstack" ]]; then
vagrant_mount_location=$OPTARG
else
echo -e "${ERROR}Fullstack has no mount location. The -b option is not valid for fullstack!${NC}"
exit 1
fi
;;
v)
verbosity=1
;;
h)
usage
exit
;;
*)
usage
exit 1
;;
esac
done
exec > >(tee install-$(date +%Y%m%d-%H%M%S).log) 2>&1
echo "Capturing output to install-$(date +%Y%m%d-%H%M%S).log."
export OPENEDX_RELEASE=$release
# Check if mount location was changed
if [[ $vagrant_mount_location != "" ]]; then
echo "Changing Vagrant provision location to $vagrant_mount_location..."
export VAGRANT_MOUNT_BASE=vagrant_mount_location
fi
if [[ -d "$stack" ]]; then
echo -e "${ERROR}A $stack directory already exists here. If you already tried installing $stack, make sure to vagrant destroy the $stack machine and rm -rf the $stack directory before trying to reinstall. If you would like to install a separate $stack, change to a different directory and try running the script again.${NC}"
exit 1
fi
if [[ $stack == "devstack" ]]; then # Install devstack
# Warn if release chosen is not master or open-release (Eucalyptus and up)
if [[ $release != "master" && $release != *"open-release"* ]]; then
echo -e "${WARN}The release you entered is not 'master' or an open-release. Please be aware that a branch other than master or a release other than the latest open-release could cause errors when installing $stack.${NC}"
fi
wiki_link="https://openedx.atlassian.net/wiki/display/OpenOPS/Running+Devstack"
mkdir devstack
cd devstack
curl -L https://raw.githubusercontent.com/edx/configuration/${OPENEDX_RELEASE}/vagrant/release/devstack/Vagrantfile > Vagrantfile
vagrant plugin install vagrant-vbguest
elif [[ $stack == "fullstack" ]]; then # Install fullstack
# Warn if release chosen is not open-release (Eucalyptus and up)
if [[ $release != *"open-release"* ]]; then
echo -e "${WARN}The release you entered is not an open-release. Please be aware that a branch other than the latest open-release could cause errors when installing $stack.${NC}"
fi
wiki_link="https://openedx.atlassian.net/wiki/display/OpenOPS/Running+Fullstack"
mkdir fullstack
cd fullstack
curl -L https://raw.githubusercontent.com/edx/configuration/${OPENEDX_RELEASE}/vagrant/release/fullstack/Vagrantfile > Vagrantfile
vagrant plugin install vagrant-hostsupdater
else # Throw error
echo -e "${ERROR}Unrecognized stack name, must be either devstack or fullstack!${NC}"
exit 1
fi
# Check for verbosity level
if [[ $verbosity == 1 ]]; then
sed -i '' 's/-e xqueue_version=\$OPENEDX_RELEASE/-e xqueue_version=\$OPENEDX_RELEASE \\\'$'\n -vvv/' Vagrantfile
fi
vagrant up --provider virtualbox
# Check if preview mode was chosen
if grep -q '192.168.33.10 preview.localhost' /etc/hosts; then
echo "Studio preview already enabled, skipping..."
else
echo "Enabling use of preview within Studio..."
sudo bash -c "echo '192.168.33.10 preview.localhost' >> /etc/hosts"
fi
echo -e "${SUCCESS}Finished installing! You may now 'cd $stack' and login using 'vagrant ssh'"
echo -e "Refer to the edX wiki ($wiki_link) for more information on using $stack.${NC}"
......@@ -81,8 +81,8 @@ def _map_roles_to_roles(graph, dirs, git_dir, key, type_1, type_2):
# add node for type_2, typically dependent role
node_2 = Node(name, type_2)
# add edge, typically role - dependent role
graph.add_edge(node_1, node_2)
# add edge, typically dependent role - role
graph.add_edge(node_2, node_1)
def _map_plays_to_roles(graph, dirs, git_dir, key, type_1, type_2):
"""
......@@ -130,8 +130,8 @@ def _map_plays_to_roles(graph, dirs, git_dir, key, type_1, type_2):
# add node for type_2, typically for role
node_2 = Node(name, type_2)
# add edge, typically playbook - role it uses
graph.add_edge(node_1, node_2)
# add edge, typically role - playbook that uses it
graph.add_edge(node_2, node_1)
def _open_yaml_file(file_str):
"""
......@@ -178,7 +178,20 @@ def change_set_to_roles(files, git_dir, roles_dirs, playbooks_dirs, graph):
# if the change set file is in the set of role files
if file_path in candidate_files:
# get name of role and add it to set of roles of the change set
items.add(_get_resource_name(file_path, "roles"))
items.add(_get_role_name_from_file(file_path))
return items
def get_plays(files, git_dir, playbooks_dirs):
"""
Determines which files in the change set are aws playbooks
files: A list of files modified by a commit range.
git_dir: A path to the top-most directory in the local git repository tool is to be run in.
playbook_dirs: A list of relative paths to directories in which Ansible playbooks reside.
"""
plays = set()
# for all directories containing playbooks
for play_dir in playbooks_dirs:
......@@ -192,33 +205,35 @@ def change_set_to_roles(files, git_dir, roles_dirs, playbooks_dirs, graph):
for f in files:
file_path = pathlib2.Path(git_dir, f)
# if the change set file is in teh set of playbook files
# if the change set file is in the set of playbook files
if file_path in candidate_files:
plays.add(_get_playbok_name_from_file(file_path))
# gets first level of children of playbook in graph, which represents
# all roles the playbook uses
descendants = nx.all_neighbors(graph, (file_path.stem, "aws_playbook"))
return plays
def _get_playbook_name_from_file(path):
"""
Gets name of playbook from the filepath, which is the last part of the filepath.
# adds all the roles that a playbook uses to set of roles of the change set
items |= {desc.name for desc in descendants}
return items
Input:
path: A path to the playbook
"""
# get last part of filepath
return path.stem
def _get_resource_name(path, kind):
def _get_role_name_from_file(path):
"""
Gets name of resource from the filepath, which is the directory following occurence of kind.
Gets name of role from the filepath, which is the directory following occurence of the word "roles".
Input:
path: A path to the resource (e.g. a role or a playbook)
kind: A description of the type of resource; this keyword precedes the name of a role or a playbook
in a file path and allows for the separation of its name;
e.g. for "configuration/playbooks/roles/discovery/...", kind = "roles" returns
"discovery" as the role name
path: A path to the role
"""
# get individual parts of a file path
dirs = path.parts
# type of resource is the next part of the file path after kind (e.g. after "roles" or "playbooks")
return dirs[dirs.index(kind)+1]
# name of role is the next part of the file path after "roles"
return dirs[dirs.index("roles")+1]
def get_dependencies(roles, graph):
"""
......@@ -355,6 +370,9 @@ if __name__ == '__main__':
# build graph
graph = build_graph(TRAVIS_BUILD_DIR, config["roles_paths"], config["aws_plays_paths"], config["docker_plays_paths"])
# gets any playbooks in the commit range
plays = get_plays(change_set, TRAVIS_BUILD_DIR, config["aws_plays_paths"])
# transforms list of roles and plays into list of original roles and the roles contained in the plays
roles = change_set_to_roles(change_set, TRAVIS_BUILD_DIR, config["roles_paths"], config["aws_plays_paths"], graph)
......@@ -364,6 +382,8 @@ if __name__ == '__main__':
# determine which docker plays cover at least one role
docker_plays = get_docker_plays(dependent_roles, graph)
docker_plays = docker_plays | plays
# filter out docker plays without a Dockerfile
docker_plays = filter_docker_plays(docker_plays, TRAVIS_BUILD_DIR)
......
......@@ -17,4 +17,6 @@ weights:
- nginx: 1
- xqueue: 2
- trusty-common: 5
- precise-common: 4
\ No newline at end of file
- precise-common: 4
- ecommerce: 6
- rabbitmq: 2
......@@ -26,11 +26,6 @@ if [ -n "$OPENEDX_RELEASE" ]; then
-e xqueue_version=$OPENEDX_RELEASE \
"
CONFIG_VER=$OPENEDX_RELEASE
# Need to ensure that the configuration repo is updated
# The vagrant-devstack.yml playbook will also do this, but only
# after loading the playbooks into memory. If these are out of date,
# this can cause problems (e.g. looking for templates that no longer exist).
/edx/bin/update configuration $CONFIG_VER
else
CONFIG_VER="master"
fi
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment