Commit 2209c1f0 authored by Jürgen Haas's avatar Jürgen Haas
Browse files

Merge branch 'release/1.2.0.0'

parents 0d6a17d7 98b1f353
......@@ -2,3 +2,5 @@
archive
inventory
*.pyc
2015-11-23 version 1.2
======================
Replace shell scripts with python scripts
Replace inithost and removehost shell scripts with Python scripts
Ignore pyc files
Dynamic Jiffybox inventory
Move dynamic inventory scripts to a central place
Fine tune inithost and removehost playbooks with the cloud as an option
Change location of plugins during runtime
Update documentation
Optimize initial setup by using submodules
Exclude localhost from initial provisioning
Documentation about boto when using AWS inventory
AWS provisioning
Support drush commands during deploy
2015-11-16 version 1.1
======================
Enhance deploy playbook
#!/usr/bin/env python
'''
Wrapper for the official Ansible Playbook application
=====================================================
'''
import os
import argparse
from subprocess import call
def callAnsible(playbook, inventory, args):
cmd = ['ansible-playbook', playbook + '.yml', '-i', inventory]
for arg in args:
cmd.append(arg)
call(cmd, cwd=path)
parser = argparse.ArgumentParser(description='Ansible Playbook wrapper by Paragon')
parser.add_argument('playbook',
help='Playbook to execute')
parser.add_argument('--local', action='store_true', default=False,
help='Set to true to work with the localhost only')
parser.add_argument('--custom', action='store_true', default=False,
help='Set to true to only execute playbook in the inventory directory')
args, extras = parser.parse_known_args()
path = os.path.dirname(os.path.realpath(__file__)) + '/'
pathSecrets = os.path.realpath(os.environ['HOME']) + '/.ansible/secrets'
pathCustom = path
# Start building command
cmd = []
# Determine inventory file
if args.local:
inventory = path + 'local.inventory'
pathCustom = False
elif os.path.exists(path + 'inventory/inventory'):
inventory = path + 'inventory/inventory'
pathCustom = path + 'inventory/'
else:
inventory = path + 'inventory/' + os.environ['ANSIBLE_COMPANY'] + '/inventory'
pathCustom = path + 'inventory/' + os.environ['ANSIBLE_COMPANY'] + '/'
if not os.path.exists(inventory):
raise SystemError('Inventory %s not found' % inventory)
# Check for the vault
if os.path.exists(pathSecrets):
cmd.append('-e')
cmd.append('@' + pathSecrets)
else:
cmd.append('--ask-sudo-pass')
# Append more CLI options
for extra in extras:
cmd.append(extra)
# Run the main playbook if not in custom mode
if not args.custom:
callAnsible(args.playbook, inventory, cmd)
# Optionally also run the custom playbook
if pathCustom and os.path.exists(pathCustom + args.playbook + '.yml'):
callAnsible(pathCustom + args.playbook, inventory, cmd)
#!/bin/bash
if [ -d ~/.ansible ]
then
SECRETS="$( cd ~/.ansible && pwd )/secrets"
fi
cd $( cd $(dirname $(readlink -f $0)) ; pwd )
if [ $1 == "local" ]
then
CUSTOM_DIR=false
INVENTORY=./local.inventory
shift
else
if [ -f ./inventory/inventory ]
then
CUSTOM_DIR=./inventory
else
CUSTOM_DIR=./inventory/$ANSIBLE_COMPANY
fi
INVENTORY=$CUSTOM_DIR/inventory
fi
if [ "$1" == "" ]
then
echo "Usage"
echo "ansible-playbook.sh PLAYBOOK ..."
exit 101;
fi
if [ $1 == "custom" ]
then
shift
PLAYBOOK_PREFIX=$CUSTOM_DIR/
else
PLAYBOOK_PREFIX=""
fi
PLAYBOOK=$PLAYBOOK_PREFIX$1.yml
shift
if [ ! -f $INVENTORY ]
then
echo "The inventory $INVENTORY does not exist!"
exit 102;
fi
if [ ! -f $PLAYBOOK ]
then
echo "The playbook $PLAYBOOK does not exist!"
exit 103;
fi
if [ $SECRETS ] && [ -f $SECRETS ]
then
ansible-playbook $PLAYBOOK -i $INVENTORY -e @$SECRETS "$@"
else
ansible-playbook $PLAYBOOK -i $INVENTORY --ask-sudo-pass "$@"
fi
if [ $CUSTOM_DIR != "false" ] && [ "$CUSTOM_DIR" != "$PLAYBOOK_PREFIX" ] && [ -f $CUSTOM_DIR/$PLAYBOOK ]
then
if [ $SECRETS ] && [ -f $SECRETS ]
then
ansible-playbook $CUSTOM_DIR/$PLAYBOOK -i $INVENTORY -e @$SECRETS "$@"
else
ansible-playbook $CUSTOM_DIR/$PLAYBOOK -i $INVENTORY --ask-sudo-pass "$@"
fi
fi
#!/usr/bin/env python
'''
Wrapper for the official Ansible application
============================================
'''
import os
import argparse
from subprocess import call
parser = argparse.ArgumentParser(description='Ansible wrapper by Paragon')
parser.add_argument('host',
help='Host name or pattern')
args, extras = parser.parse_known_args()
path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep
pathSecrets = os.path.realpath(os.environ['HOME']) + '/.ansible/secrets'
# Start building command
cmd = ['ansible', args.host]
# Determine inventory file
if os.path.exists(path + 'inventory/inventory'):
inventory = path + 'inventory/inventory'
else:
inventory = path + 'inventory/' + os.environ['ANSIBLE_COMPANY'] + '/inventory'
if not os.path.exists(inventory):
raise SystemError('Inventory %s not found' % inventory)
cmd.append('-i')
cmd.append(inventory)
# Check for the vault
if os.path.exists(pathSecrets):
cmd.append('-e')
cmd.append('@' + pathSecrets)
# Append more CLI options
for extra in extras:
cmd.append(extra)
call(cmd, cwd=path)
#!/bin/bash
if [ -d ~/.ansible ]
then
SECRETS="$( cd ~/.ansible && pwd )/secrets"
fi
cd $( cd $(dirname $(readlink -f $0)) ; pwd )
if [ -f ./inventory/inventory ]
then
INVENTORY=./inventory/inventory
else
INVENTORY=./inventory/$ANSIBLE_COMPANY/inventory
fi
if [ "$3" == "" ]
then
echo "Usage"
echo "ansible.sh HOSTS COMMAND OPTIONS"
exit 101;
fi
HOSTS=$1
shift
if [ ! -f $INVENTORY ]
then
echo "The inventory $INVENTORY does not exist!"
exit 102;
fi
if [ -f $SECRETS ]
then
ansible $HOSTS -i $INVENTORY -e @$SECRETS "$@"
else
ansible $HOSTS -i $INVENTORY "$@"
fi
##
# Dummy Ansible playbook
---
# file: cloud.yml
- name: "No cloud action required"
hosts: "localhost"
connection: local
gather_facts: false
sudo: no
tasks: []
# Ansible EC2 external inventory script settings
#
[ec2]
# to talk to a private eucalyptus instance uncomment these lines
# and edit edit eucalyptus_host to be the host name of your cloud controller
#eucalyptus = True
#eucalyptus_host = clc.cloud.domain.org
# AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
regions = us-east-1
regions_exclude = us-gov-west-1,cn-north-1
# When generating inventory, Ansible needs to know how to address a server.
# Each EC2 instance has a lot of variables associated with it. Here is the list:
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
# Below are 2 variables that are used as the address of a server:
# - destination_variable
# - vpc_destination_variable
# This is the normal destination variable to use. If you are running Ansible
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
# running Ansible from within EC2, then perhaps you want to use the internal
# address, and should set this to 'private_dns_name'. The key of an EC2 tag
# may optionally be used; however the boto instance variables hold precedence
# in the event of a collision.
destination_variable = public_dns_name
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from within EC2. The key of an EC2 tag may optionally be used; however
# the boto instance variables hold precedence in the event of a collision.
# WARNING: - instances that are in the private vpc, _without_ public ip address
# will not be listed in the inventory until You set:
# vpc_destination_variable = 'private_ip_address'
vpc_destination_variable = AnsibleHost
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
# To exclude RDS instances from the inventory, uncomment and set to False.
rds = False
# To exclude ElastiCache instances from the inventory, uncomment and set to False.
elasticache = False
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-separated list.
# route53_excluded_zones = samplezone1.com, samplezone2.com
# By default, only EC2 instances in the 'running' state are returned. Set
# 'all_instances' to True to return all instances regardless of state.
all_instances = False
# By default, only EC2 instances in the 'running' state are returned. Specify
# EC2 instance states to return as a comma-separated list. This
# option is overriden when 'all_instances' is True.
# instance_states = pending, running, shutting-down, terminated, stopping, stopped
# By default, only RDS instances in the 'available' state are returned. Set
# 'all_rds_instances' to True return all RDS instances regardless of state.
all_rds_instances = False
# By default, only ElastiCache clusters and nodes in the 'available' state
# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
# to True return all ElastiCache clusters and nodes, regardless of state.
#
# Note that all_elasticache_nodes only applies to listed clusters. That means
# if you set all_elastic_clusters to false, no node will be return from
# unavailable clusters, regardless of the state and to what you set for
# all_elasticache_nodes.
all_elasticache_replication_groups = False
all_elasticache_clusters = False
all_elasticache_nodes = False
# API calls to EC2 are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-ec2.cache
# - ansible-ec2.index
cache_path = ~/.ansible/tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 300
# Organize groups into a nested/hierarchy instead of a flat namespace.
nested_groups = False
# Replace - tags when creating groups to avoid issues with ansible
replace_dash_in_groups = False
# If set to true, any tag of the form "a,b,c" is expanded into a list
# and the results are used to create additional tag_* inventory groups.
expand_csv_tags = True
# The EC2 inventory output can become very large. To manage its size,
# configure which groups should be created.
group_by_instance_id = False
group_by_region = False
group_by_availability_zone = False
group_by_ami_id = False
group_by_instance_type = False
group_by_key_pair = False
group_by_vpc_id = False
group_by_security_group = False
group_by_tag_keys = False
group_by_tag_values = True
group_by_tag_none = False
group_by_route53_names = False
group_by_rds_engine = False
group_by_rds_parameter_group = False
group_by_elasticache_engine = False
group_by_elasticache_cluster = False
group_by_elasticache_parameter_group = False
group_by_elasticache_replication_group = False
# If you only want to include hosts that match a certain regular expression
# pattern_include = staging-*
# If you want to exclude any hosts that match a certain regular expression
# pattern_exclude = staging-*
# Instance filters can be used to control which instances are retrieved for
# inventory. For the full list of possible filters, please read the EC2 API
# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
# Filters are key/value pairs separated by '=', to list multiple filters use
# a list separated by commas. See examples below.
# Retrieve only instances with (key=value) env=staging tag
# instance_filters = tag:env=staging
# Retrieve only instances with role=webservers OR role=dbservers tag
# instance_filters = tag:role=webservers,tag:role=dbservers
# Retrieve only t1.micro instances OR instances with tag env=staging
# instance_filters = instance-type=t1.micro,tag:env=staging
# You can use wildcards in filter values also. Below will list instances which
# tag Name value matches webservers1*
# (ex. webservers15, webservers1a, webservers123 etc)
# instance_filters = tag:Name=webservers1*
# A boto configuration profile may be used to separate out credentials
# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
# boto_profile = some-boto-profile-name
This diff is collapsed.
# Ansible JiffyBox external inventory script settings
#
[jiffybox]
cache_max_age = 300
cache_path = ~/.ansible/tmp
expand_csv_tags = True
replace_dash_in_groups = False
#!/usr/bin/env python
'''
JiffyBox external inventory script
==================================
'''
import sys
import os
import argparse
import re
import requests
from time import time
import six
from six.moves import configparser
from ansible.errors import AnsibleError as ae
try:
import json
except ImportError:
import simplejson as json
class JiffyBoxInventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.set_host_groups:
data_to_print = self.set_host_groups()
elif self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on JiffyBox')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to JiffyBox (default: False - use cache files)')
parser.add_argument('--set-host-groups', action='store',
help='Set the inventory groups for the host(s)')
self.args = parser.parse_args()
def read_settings(self):
''' Reads the settings from the jiffybox.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
jiffybox_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'jiffybox.ini')
jiffybox_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('JIFFYBOX_INI_PATH', jiffybox_default_ini_path)))
config.read(jiffybox_ini_path)
if config.has_option('jiffybox', 'expand_csv_tags'):
self.expand_csv_tags = config.getboolean('jiffybox', 'expand_csv_tags')
else:
self.expand_csv_tags = False
# Replace dash or not in group names
if config.has_option('jiffybox', 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean('jiffybox', 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Return all JiffyBox instances?
if config.has_option('jiffybox', 'all_instances'):
self.all_instances = config.getboolean('jiffybox', 'all_instances')
else:
self.all_instances = False
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
jiffybox_valid_instance_states = [
'READY',
'CREATING',
'UPDATING',
'CHANGING PLAN',
'STATUS_READY',
'STATUS_CREATING',
'STATUS_UPDATING'
]
self.jiffybox_instance_states = []
if self.all_instances:
self.jiffybox_instance_states = jiffybox_valid_instance_states
elif config.has_option('jiffybox', 'instance_states'):
for instance_state in config.get('jiffybox', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in jiffybox_valid_instance_states:
continue
self.jiffybox_instance_states.append(instance_state)
else:
self.jiffybox_instance_states = ['READY']
# Cache related
cache_dir = os.path.expanduser(config.get('jiffybox', 'cache_path'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-jiffybox.cache"
self.cache_path_index = cache_dir + "/ansible-jiffybox.index"
self.cache_max_age = config.getint('jiffybox', 'cache_max_age')
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('jiffybox', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('jiffybox', 'pattern_exclude')
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
self.get_instances()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_instances(self):
''' Makes a JiffyBox API call to the list of instances '''
try:
conn = self.connect()
instances = conn.get_all_instances()
for instance in instances:
self.add_instance(instances[instance])
except StandardError as e:
self.fail_with_error(e.message, 'getting JiffyBox instances')
def connect(self):
connect_args = {}
conn = JiffyBoxConnect()
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
self.fail_with_error("Something went wrong during connection")
return conn
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''