diff --git a/.gitignore b/.gitignore index 7589096396a39df886f51ef29cef7fc9340ba838..46f68ca1aea5475dc18d0235cdc1a60a89dda11a 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,5 @@ archive inventory + +*.pyc diff --git a/CHANGELOG b/CHANGELOG index 825071a20dfb10c374628a7bb69f05b0c6772c88..3537893098ed35af2902fa10cc68013228963122 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,19 @@ +2015-11-23 version 1.2 +====================== +Replace shell scripts with python scripts +Replace inithost and removehost shell scripts with Python scripts +Ignore pyc files +Dynamic Jiffybox inventory +Move dynamic inventory scripts to a central place +Fine tune inithost and removehost playbooks with the cloud as an option +Change location of plugins during runtime +Update documentation +Optimize initial setup by using submodules +Exclude localhost from initial provisioning +Documentation about boto when using AWS inventory +AWS provisioning +Support drush commands during deploy + 2015-11-16 version 1.1 ====================== Enhance deploy playbook diff --git a/ansible-playbook.py b/ansible-playbook.py new file mode 100755 index 0000000000000000000000000000000000000000..3be711c6b8b13859ada36e8992a6d3ad53487dfe --- /dev/null +++ b/ansible-playbook.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +''' +Wrapper for the official Ansible Playbook application +===================================================== +''' + +import os +import argparse +from subprocess import call + +def callAnsible(playbook, inventory, args): + cmd = ['ansible-playbook', playbook + '.yml', '-i', inventory] + for arg in args: + cmd.append(arg) + call(cmd, cwd=path) + +parser = argparse.ArgumentParser(description='Ansible Playbook wrapper by Paragon') +parser.add_argument('playbook', + help='Playbook to execute') +parser.add_argument('--local', action='store_true', default=False, + help='Set to true to work with the localhost only') +parser.add_argument('--custom', action='store_true', default=False, + help='Set to true to only execute playbook in the inventory directory') + +args, extras = parser.parse_known_args() + +path = os.path.dirname(os.path.realpath(__file__)) + '/' +pathSecrets = os.path.realpath(os.environ['HOME']) + '/.ansible/secrets' +pathCustom = path + +# Start building command +cmd = [] + +# Determine inventory file +if args.local: + inventory = path + 'local.inventory' + pathCustom = False +elif os.path.exists(path + 'inventory/inventory'): + inventory = path + 'inventory/inventory' + pathCustom = path + 'inventory/' +else: + inventory = path + 'inventory/' + os.environ['ANSIBLE_COMPANY'] + '/inventory' + pathCustom = path + 'inventory/' + os.environ['ANSIBLE_COMPANY'] + '/' +if not os.path.exists(inventory): + raise SystemError('Inventory %s not found' % inventory) + +# Check for the vault +if os.path.exists(pathSecrets): + cmd.append('-e') + cmd.append('@' + pathSecrets) +else: + cmd.append('--ask-sudo-pass') + +# Append more CLI options +for extra in extras: + cmd.append(extra) + +# Run the main playbook if not in custom mode +if not args.custom: + callAnsible(args.playbook, inventory, cmd) + +# Optionally also run the custom playbook +if pathCustom and os.path.exists(pathCustom + args.playbook + '.yml'): + callAnsible(pathCustom + args.playbook, inventory, cmd) diff --git a/ansible-playbook.sh b/ansible-playbook.sh deleted file mode 100755 index 341bb2a6afefa316ff963154018b6f56fc7a6d44..0000000000000000000000000000000000000000 --- a/ansible-playbook.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash - -if [ -d ~/.ansible ] - then - SECRETS="$( cd ~/.ansible && pwd )/secrets" -fi -cd $( cd $(dirname $(readlink -f $0)) ; pwd ) - -if [ $1 == "local" ] - then - CUSTOM_DIR=false - INVENTORY=./local.inventory - shift - else - if [ -f ./inventory/inventory ] - then - CUSTOM_DIR=./inventory - else - CUSTOM_DIR=./inventory/$ANSIBLE_COMPANY - fi - INVENTORY=$CUSTOM_DIR/inventory -fi - -if [ "$1" == "" ] - then - echo "Usage" - echo "ansible-playbook.sh PLAYBOOK ..." - exit 101; -fi - -if [ $1 == "custom" ] - then - shift - PLAYBOOK_PREFIX=$CUSTOM_DIR/ - else - PLAYBOOK_PREFIX="" -fi -PLAYBOOK=$PLAYBOOK_PREFIX$1.yml -shift - -if [ ! -f $INVENTORY ] - then - echo "The inventory $INVENTORY does not exist!" - exit 102; -fi - -if [ ! -f $PLAYBOOK ] - then - echo "The playbook $PLAYBOOK does not exist!" - exit 103; -fi - -if [ $SECRETS ] && [ -f $SECRETS ] - then - ansible-playbook $PLAYBOOK -i $INVENTORY -e @$SECRETS "$@" - else - ansible-playbook $PLAYBOOK -i $INVENTORY --ask-sudo-pass "$@" -fi - -if [ $CUSTOM_DIR != "false" ] && [ "$CUSTOM_DIR" != "$PLAYBOOK_PREFIX" ] && [ -f $CUSTOM_DIR/$PLAYBOOK ] - then - if [ $SECRETS ] && [ -f $SECRETS ] - then - ansible-playbook $CUSTOM_DIR/$PLAYBOOK -i $INVENTORY -e @$SECRETS "$@" - else - ansible-playbook $CUSTOM_DIR/$PLAYBOOK -i $INVENTORY --ask-sudo-pass "$@" - fi -fi diff --git a/ansible.py b/ansible.py new file mode 100755 index 0000000000000000000000000000000000000000..76519b3f59c7c6cdc52cf64156fe1b65feb44b25 --- /dev/null +++ b/ansible.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python + +''' +Wrapper for the official Ansible application +============================================ +''' + +import os +import argparse +from subprocess import call + +parser = argparse.ArgumentParser(description='Ansible wrapper by Paragon') +parser.add_argument('host', + help='Host name or pattern') + +args, extras = parser.parse_known_args() + +path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep +pathSecrets = os.path.realpath(os.environ['HOME']) + '/.ansible/secrets' + +# Start building command +cmd = ['ansible', args.host] + +# Determine inventory file +if os.path.exists(path + 'inventory/inventory'): + inventory = path + 'inventory/inventory' +else: + inventory = path + 'inventory/' + os.environ['ANSIBLE_COMPANY'] + '/inventory' +if not os.path.exists(inventory): + raise SystemError('Inventory %s not found' % inventory) +cmd.append('-i') +cmd.append(inventory) + +# Check for the vault +if os.path.exists(pathSecrets): + cmd.append('-e') + cmd.append('@' + pathSecrets) + +# Append more CLI options +for extra in extras: + cmd.append(extra) + +call(cmd, cwd=path) diff --git a/ansible.sh b/ansible.sh deleted file mode 100755 index 76b733048434791c4c47a36296921ffc31c571d4..0000000000000000000000000000000000000000 --- a/ansible.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -if [ -d ~/.ansible ] - then - SECRETS="$( cd ~/.ansible && pwd )/secrets" -fi -cd $( cd $(dirname $(readlink -f $0)) ; pwd ) - -if [ -f ./inventory/inventory ] - then - INVENTORY=./inventory/inventory - else - INVENTORY=./inventory/$ANSIBLE_COMPANY/inventory -fi - -if [ "$3" == "" ] - then - echo "Usage" - echo "ansible.sh HOSTS COMMAND OPTIONS" - exit 101; -fi -HOSTS=$1 -shift - -if [ ! -f $INVENTORY ] - then - echo "The inventory $INVENTORY does not exist!" - exit 102; -fi - -if [ -f $SECRETS ] - then - ansible $HOSTS -i $INVENTORY -e @$SECRETS "$@" -else - ansible $HOSTS -i $INVENTORY "$@" -fi diff --git a/cloud.yml b/cloud.yml deleted file mode 100644 index 7294e5e884aa8c3fad9204af3fa6c28340687261..0000000000000000000000000000000000000000 --- a/cloud.yml +++ /dev/null @@ -1,12 +0,0 @@ -## -# Dummy Ansible playbook - ---- -# file: cloud.yml - -- name: "No cloud action required" - hosts: "localhost" - connection: local - gather_facts: false - sudo: no - tasks: [] diff --git a/cloud/ec2.ini b/cloud/ec2.ini new file mode 100644 index 0000000000000000000000000000000000000000..c048ac6da3cd0264c4fe7ee399b1e02435eca32e --- /dev/null +++ b/cloud/ec2.ini @@ -0,0 +1,153 @@ +# Ansible EC2 external inventory script settings +# + +[ec2] + +# to talk to a private eucalyptus instance uncomment these lines +# and edit edit eucalyptus_host to be the host name of your cloud controller +#eucalyptus = True +#eucalyptus_host = clc.cloud.domain.org + +# AWS regions to make calls to. Set this to 'all' to make request to all regions +# in AWS and merge the results together. Alternatively, set this to a comma +# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' +regions = us-east-1 +regions_exclude = us-gov-west-1,cn-north-1 + +# When generating inventory, Ansible needs to know how to address a server. +# Each EC2 instance has a lot of variables associated with it. Here is the list: +# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance +# Below are 2 variables that are used as the address of a server: +# - destination_variable +# - vpc_destination_variable + +# This is the normal destination variable to use. If you are running Ansible +# from outside EC2, then 'public_dns_name' makes the most sense. If you are +# running Ansible from within EC2, then perhaps you want to use the internal +# address, and should set this to 'private_dns_name'. The key of an EC2 tag +# may optionally be used; however the boto instance variables hold precedence +# in the event of a collision. +destination_variable = public_dns_name + +# For server inside a VPC, using DNS names may not make sense. When an instance +# has 'subnet_id' set, this variable is used. If the subnet is public, setting +# this to 'ip_address' will return the public IP address. For instances in a +# private subnet, this should be set to 'private_ip_address', and Ansible must +# be run from within EC2. The key of an EC2 tag may optionally be used; however +# the boto instance variables hold precedence in the event of a collision. +# WARNING: - instances that are in the private vpc, _without_ public ip address +# will not be listed in the inventory until You set: +# vpc_destination_variable = 'private_ip_address' +vpc_destination_variable = AnsibleHost + +# To tag instances on EC2 with the resource records that point to them from +# Route53, uncomment and set 'route53' to True. +route53 = False + +# To exclude RDS instances from the inventory, uncomment and set to False. +rds = False + +# To exclude ElastiCache instances from the inventory, uncomment and set to False. +elasticache = False + +# Additionally, you can specify the list of zones to exclude looking up in +# 'route53_excluded_zones' as a comma-separated list. +# route53_excluded_zones = samplezone1.com, samplezone2.com + +# By default, only EC2 instances in the 'running' state are returned. Set +# 'all_instances' to True to return all instances regardless of state. +all_instances = False + +# By default, only EC2 instances in the 'running' state are returned. Specify +# EC2 instance states to return as a comma-separated list. This +# option is overriden when 'all_instances' is True. +# instance_states = pending, running, shutting-down, terminated, stopping, stopped + +# By default, only RDS instances in the 'available' state are returned. Set +# 'all_rds_instances' to True return all RDS instances regardless of state. +all_rds_instances = False + +# By default, only ElastiCache clusters and nodes in the 'available' state +# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' +# to True return all ElastiCache clusters and nodes, regardless of state. +# +# Note that all_elasticache_nodes only applies to listed clusters. That means +# if you set all_elastic_clusters to false, no node will be return from +# unavailable clusters, regardless of the state and to what you set for +# all_elasticache_nodes. +all_elasticache_replication_groups = False +all_elasticache_clusters = False +all_elasticache_nodes = False + +# API calls to EC2 are slow. For this reason, we cache the results of an API +# call. Set this to the path you want cache files to be written to. Two files +# will be written to this directory: +# - ansible-ec2.cache +# - ansible-ec2.index +cache_path = ~/.ansible/tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# To disable the cache, set this value to 0 +cache_max_age = 300 + +# Organize groups into a nested/hierarchy instead of a flat namespace. +nested_groups = False + +# Replace - tags when creating groups to avoid issues with ansible +replace_dash_in_groups = False + +# If set to true, any tag of the form "a,b,c" is expanded into a list +# and the results are used to create additional tag_* inventory groups. +expand_csv_tags = True + +# The EC2 inventory output can become very large. To manage its size, +# configure which groups should be created. +group_by_instance_id = False +group_by_region = False +group_by_availability_zone = False +group_by_ami_id = False +group_by_instance_type = False +group_by_key_pair = False +group_by_vpc_id = False +group_by_security_group = False +group_by_tag_keys = False +group_by_tag_values = True +group_by_tag_none = False +group_by_route53_names = False +group_by_rds_engine = False +group_by_rds_parameter_group = False +group_by_elasticache_engine = False +group_by_elasticache_cluster = False +group_by_elasticache_parameter_group = False +group_by_elasticache_replication_group = False + +# If you only want to include hosts that match a certain regular expression +# pattern_include = staging-* + +# If you want to exclude any hosts that match a certain regular expression +# pattern_exclude = staging-* + +# Instance filters can be used to control which instances are retrieved for +# inventory. For the full list of possible filters, please read the EC2 API +# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters +# Filters are key/value pairs separated by '=', to list multiple filters use +# a list separated by commas. See examples below. + +# Retrieve only instances with (key=value) env=staging tag +# instance_filters = tag:env=staging + +# Retrieve only instances with role=webservers OR role=dbservers tag +# instance_filters = tag:role=webservers,tag:role=dbservers + +# Retrieve only t1.micro instances OR instances with tag env=staging +# instance_filters = instance-type=t1.micro,tag:env=staging + +# You can use wildcards in filter values also. Below will list instances which +# tag Name value matches webservers1* +# (ex. webservers15, webservers1a, webservers123 etc) +# instance_filters = tag:Name=webservers1* + +# A boto configuration profile may be used to separate out credentials +# see http://boto.readthedocs.org/en/latest/boto_config_tut.html +# boto_profile = some-boto-profile-name diff --git a/cloud/ec2.py b/cloud/ec2.py new file mode 100755 index 0000000000000000000000000000000000000000..6d2bae6a64dec617422f2acfc48253e10a865a61 --- /dev/null +++ b/cloud/ec2.py @@ -0,0 +1,1333 @@ +#!/usr/bin/env python + +''' +EC2 external inventory script +================================= + +Generates inventory that Ansible can understand by making API request to +AWS EC2 using the Boto library. + +NOTE: This script assumes Ansible is being executed where the environment +variables needed for Boto have already been set: + export AWS_ACCESS_KEY_ID='AK123' + export AWS_SECRET_ACCESS_KEY='abc123' + +This script also assumes there is an ec2.ini file alongside it. To specify a +different path to ec2.ini, define the EC2_INI_PATH environment variable: + + export EC2_INI_PATH=/path/to/my_ec2.ini + +If you're using eucalyptus you need to set the above variables and +you need to define: + + export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus + +If you're using boto profiles (requires boto>=2.24.0) you can choose a profile +using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using +the AWS_PROFILE variable: + + AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml + +For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html + +When run against a specific host, this script returns the following variables: + - ec2_ami_launch_index + - ec2_architecture + - ec2_association + - ec2_attachTime + - ec2_attachment + - ec2_attachmentId + - ec2_client_token + - ec2_deleteOnTermination + - ec2_description + - ec2_deviceIndex + - ec2_dns_name + - ec2_eventsSet + - ec2_group_name + - ec2_hypervisor + - ec2_id + - ec2_image_id + - ec2_instanceState + - ec2_instance_type + - ec2_ipOwnerId + - ec2_ip_address + - ec2_item + - ec2_kernel + - ec2_key_name + - ec2_launch_time + - ec2_monitored + - ec2_monitoring + - ec2_networkInterfaceId + - ec2_ownerId + - ec2_persistent + - ec2_placement + - ec2_platform + - ec2_previous_state + - ec2_private_dns_name + - ec2_private_ip_address + - ec2_publicIp + - ec2_public_dns_name + - ec2_ramdisk + - ec2_reason + - ec2_region + - ec2_requester_id + - ec2_root_device_name + - ec2_root_device_type + - ec2_security_group_ids + - ec2_security_group_names + - ec2_shutdown_state + - ec2_sourceDestCheck + - ec2_spot_instance_request_id + - ec2_state + - ec2_state_code + - ec2_state_reason + - ec2_status + - ec2_subnet_id + - ec2_tenancy + - ec2_virtualization_type + - ec2_vpc_id + +These variables are pulled out of a boto.ec2.instance object. There is a lack of +consistency with variable spellings (camelCase and underscores) since this +just loops through all variables the object exposes. It is preferred to use the +ones with underscores when multiple exist. + +In addition, if an instance has AWS Tags associated with it, each tag is a new +variable named: + - ec2_tag_[Key] = [Value] + +Security groups are comma-separated in 'ec2_security_group_ids' and +'ec2_security_group_names'. +''' + +# (c) 2012, Peter Sankauskas +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +###################################################################### + +import sys +import os +import argparse +import re +from time import time +import boto +from boto import ec2 +from boto import rds +from boto import elasticache +from boto import route53 +import six + +from six.moves import configparser +from collections import defaultdict + +try: + import json +except ImportError: + import simplejson as json + + +class Ec2Inventory(object): + + def _empty_inventory(self): + return {"_meta" : {"hostvars" : {}}} + + def __init__(self): + ''' Main execution path ''' + + # Inventory grouped by instance IDs, tags, security groups, regions, + # and availability zones + self.inventory = self._empty_inventory() + + # Index of hostname (address) to instance ID + self.index = {} + + # Boto profile to use (if any) + self.boto_profile = None + + # Read settings and parse CLI arguments + self.parse_cli_args() + self.read_settings() + + # Make sure that profile_name is not passed at all if not set + # as pre 2.24 boto will fall over otherwise + if self.boto_profile: + if not hasattr(boto.ec2.EC2Connection, 'profile_name'): + self.fail_with_error("boto version must be >= 2.24 to use profile") + + # Cache + if self.args.refresh_cache: + self.do_api_calls_update_cache() + elif not self.is_cache_valid(): + self.do_api_calls_update_cache() + + # Data to print + if self.args.host: + data_to_print = self.get_host_info() + + elif self.args.list: + # Display list of instances for inventory + if self.inventory == self._empty_inventory(): + data_to_print = self.get_inventory_from_cache() + else: + data_to_print = self.json_format_dict(self.inventory, True) + + print(data_to_print) + + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_index): + return True + + return False + + + def read_settings(self): + ''' Reads the settings from the ec2.ini file ''' + if six.PY3: + config = configparser.ConfigParser() + else: + config = configparser.SafeConfigParser() + ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') + ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path))) + config.read(ec2_ini_path) + + # is eucalyptus? + self.eucalyptus_host = None + self.eucalyptus = False + if config.has_option('ec2', 'eucalyptus'): + self.eucalyptus = config.getboolean('ec2', 'eucalyptus') + if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): + self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') + + # Regions + self.regions = [] + configRegions = config.get('ec2', 'regions') + configRegions_exclude = config.get('ec2', 'regions_exclude') + if (configRegions == 'all'): + if self.eucalyptus_host: + self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) + else: + for regionInfo in ec2.regions(): + if regionInfo.name not in configRegions_exclude: + self.regions.append(regionInfo.name) + else: + self.regions = configRegions.split(",") + + # Destination addresses + self.destination_variable = config.get('ec2', 'destination_variable') + self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') + + # Route53 + self.route53_enabled = config.getboolean('ec2', 'route53') + self.route53_excluded_zones = [] + if config.has_option('ec2', 'route53_excluded_zones'): + self.route53_excluded_zones.extend( + config.get('ec2', 'route53_excluded_zones', '').split(',')) + + # Include RDS instances? + self.rds_enabled = True + if config.has_option('ec2', 'rds'): + self.rds_enabled = config.getboolean('ec2', 'rds') + + # Include ElastiCache instances? + self.elasticache_enabled = True + if config.has_option('ec2', 'elasticache'): + self.elasticache_enabled = config.getboolean('ec2', 'elasticache') + + # Return all EC2 instances? + if config.has_option('ec2', 'all_instances'): + self.all_instances = config.getboolean('ec2', 'all_instances') + else: + self.all_instances = False + + # Instance states to be gathered in inventory. Default is 'running'. + # Setting 'all_instances' to 'yes' overrides this option. + ec2_valid_instance_states = [ + 'pending', + 'running', + 'shutting-down', + 'terminated', + 'stopping', + 'stopped' + ] + self.ec2_instance_states = [] + if self.all_instances: + self.ec2_instance_states = ec2_valid_instance_states + elif config.has_option('ec2', 'instance_states'): + for instance_state in config.get('ec2', 'instance_states').split(','): + instance_state = instance_state.strip() + if instance_state not in ec2_valid_instance_states: + continue + self.ec2_instance_states.append(instance_state) + else: + self.ec2_instance_states = ['running'] + + # Return all RDS instances? (if RDS is enabled) + if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: + self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') + else: + self.all_rds_instances = False + + # Return all ElastiCache replication groups? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: + self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') + else: + self.all_elasticache_replication_groups = False + + # Return all ElastiCache clusters? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: + self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') + else: + self.all_elasticache_clusters = False + + # Return all ElastiCache nodes? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: + self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') + else: + self.all_elasticache_nodes = False + + # boto configuration profile (prefer CLI argument) + self.boto_profile = self.args.boto_profile + if config.has_option('ec2', 'boto_profile') and not self.boto_profile: + self.boto_profile = config.get('ec2', 'boto_profile') + + # Cache related + cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) + if self.boto_profile: + cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + self.cache_path_cache = cache_dir + "/ansible-ec2.cache" + self.cache_path_index = cache_dir + "/ansible-ec2.index" + self.cache_max_age = config.getint('ec2', 'cache_max_age') + + if config.has_option('ec2', 'expand_csv_tags'): + self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') + else: + self.expand_csv_tags = False + + # Configure nested groups instead of flat namespace. + if config.has_option('ec2', 'nested_groups'): + self.nested_groups = config.getboolean('ec2', 'nested_groups') + else: + self.nested_groups = False + + # Replace dash or not in group names + if config.has_option('ec2', 'replace_dash_in_groups'): + self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') + else: + self.replace_dash_in_groups = True + + # Configure which groups should be created. + group_by_options = [ + 'group_by_instance_id', + 'group_by_region', + 'group_by_availability_zone', + 'group_by_ami_id', + 'group_by_instance_type', + 'group_by_key_pair', + 'group_by_vpc_id', + 'group_by_security_group', + 'group_by_tag_keys', + 'group_by_tag_values', + 'group_by_tag_none', + 'group_by_route53_names', + 'group_by_rds_engine', + 'group_by_rds_parameter_group', + 'group_by_elasticache_engine', + 'group_by_elasticache_cluster', + 'group_by_elasticache_parameter_group', + 'group_by_elasticache_replication_group', + ] + for option in group_by_options: + if config.has_option('ec2', option): + setattr(self, option, config.getboolean('ec2', option)) + else: + setattr(self, option, True) + + # Do we need to just include hosts that match a pattern? + try: + pattern_include = config.get('ec2', 'pattern_include') + if pattern_include and len(pattern_include) > 0: + self.pattern_include = re.compile(pattern_include) + else: + self.pattern_include = None + except configparser.NoOptionError: + self.pattern_include = None + + # Do we need to exclude hosts that match a pattern? + try: + pattern_exclude = config.get('ec2', 'pattern_exclude'); + if pattern_exclude and len(pattern_exclude) > 0: + self.pattern_exclude = re.compile(pattern_exclude) + else: + self.pattern_exclude = None + except configparser.NoOptionError: + self.pattern_exclude = None + + # Instance filters (see boto and EC2 API docs). Ignore invalid filters. + self.ec2_instance_filters = defaultdict(list) + if config.has_option('ec2', 'instance_filters'): + for instance_filter in config.get('ec2', 'instance_filters', '').split(','): + instance_filter = instance_filter.strip() + if not instance_filter or '=' not in instance_filter: + continue + filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] + if not filter_key: + continue + self.ec2_instance_filters[filter_key].append(filter_value) + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') + parser.add_argument('--boto-profile', action='store', + help='Use boto profile for connections to EC2') + self.args = parser.parse_args() + + + def do_api_calls_update_cache(self): + ''' Do API calls to each region, and save data in cache files ''' + + if self.route53_enabled: + self.get_route53_records() + + for region in self.regions: + self.get_instances_by_region(region) + if self.rds_enabled: + self.get_rds_instances_by_region(region) + if self.elasticache_enabled: + self.get_elasticache_clusters_by_region(region) + self.get_elasticache_replication_groups_by_region(region) + + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) + + def connect(self, region): + ''' create connection to api server''' + if self.eucalyptus: + conn = boto.connect_euca(host=self.eucalyptus_host) + conn.APIVersion = '2010-08-31' + else: + conn = self.connect_to_aws(ec2, region) + return conn + + def boto_fix_security_token_in_profile(self, connect_args): + ''' monkey patch for boto issue boto/boto#2100 ''' + profile = 'profile ' + self.boto_profile + if boto.config.has_option(profile, 'aws_security_token'): + connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') + return connect_args + + def connect_to_aws(self, module, region): + connect_args = {} + + # only pass the profile name if it's set (as it is not supported by older boto versions) + if self.boto_profile: + connect_args['profile_name'] = self.boto_profile + self.boto_fix_security_token_in_profile(connect_args) + + conn = module.connect_to_region(region, **connect_args) + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + return conn + + def get_instances_by_region(self, region): + ''' Makes an AWS EC2 API call to the list of instances in a particular + region ''' + + try: + conn = self.connect(region) + reservations = [] + if self.ec2_instance_filters: + for filter_key, filter_values in self.ec2_instance_filters.items(): + reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) + else: + reservations = conn.get_all_instances() + + for reservation in reservations: + for instance in reservation.instances: + self.add_instance(instance, region) + + except boto.exception.BotoServerError as e: + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + else: + backend = 'Eucalyptus' if self.eucalyptus else 'AWS' + error = "Error connecting to %s backend.\n%s" % (backend, e.message) + self.fail_with_error(error, 'getting EC2 instances') + + def get_rds_instances_by_region(self, region): + ''' Makes an AWS API call to the list of RDS instances in a particular + region ''' + + try: + conn = self.connect_to_aws(rds, region) + if conn: + instances = conn.get_all_dbinstances() + for instance in instances: + self.add_rds_instance(instance, region) + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS RDS is down:\n%s" % e.message + self.fail_with_error(error, 'getting RDS instances') + + def get_elasticache_clusters_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache clusters (with + nodes' info) in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_intances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = elasticache.connect_to_region(region) + if conn: + # show_cache_node_info = True + # because we also want nodes' information + response = conn.describe_cache_clusters(None, None, None, True) + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS ElastiCache is down:\n%s" % e.message + self.fail_with_error(error, 'getting ElastiCache clusters') + + try: + # Boto also doesn't provide wrapper classes to CacheClusters or + # CacheNodes. Because of that wo can't make use of the get_list + # method in the AWSQueryConnection. Let's do the work manually + clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] + + except KeyError as e: + error = "ElastiCache query to AWS failed (unexpected format)." + self.fail_with_error(error, 'getting ElastiCache clusters') + + for cluster in clusters: + self.add_elasticache_cluster(cluster, region) + + def get_elasticache_replication_groups_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache replication groups + in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_intances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = elasticache.connect_to_region(region) + if conn: + response = conn.describe_replication_groups() + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message + self.fail_with_error(error, 'getting ElastiCache clusters') + + try: + # Boto also doesn't provide wrapper classes to ReplicationGroups + # Because of that wo can't make use of the get_list method in the + # AWSQueryConnection. Let's do the work manually + replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] + + except KeyError as e: + error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." + self.fail_with_error(error, 'getting ElastiCache clusters') + + for replication_group in replication_groups: + self.add_elasticache_replication_group(replication_group, region) + + def get_auth_error_message(self): + ''' create an informative error message if there is an issue authenticating''' + errors = ["Authentication error retrieving ec2 inventory."] + if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: + errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') + else: + errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') + + boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] + boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) + if len(boto_config_found) > 0: + errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) + else: + errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) + + return '\n'.join(errors) + + def fail_with_error(self, err_msg, err_operation=None): + '''log an error to std err for ansible-playbook to consume and exit''' + if err_operation: + err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( + err_msg=err_msg, err_operation=err_operation) + sys.stderr.write(err_msg) + sys.exit(1) + + def get_instance(self, region, instance_id): + conn = self.connect(region) + + reservations = conn.get_all_instances([instance_id]) + for reservation in reservations: + for instance in reservation.instances: + return instance + + def add_instance(self, instance, region): + ''' Adds an instance to the inventory and index, as long as it is + addressable ''' + + # Only return instances with desired instance states + if instance.state not in self.ec2_instance_states: + return + + # Select the best destination address + if instance.subnet_id: + dest = getattr(instance, self.vpc_destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) + else: + dest = getattr(instance, self.destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.destination_variable, None) + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # if we only want to include hosts that match a pattern, skip those that don't + if self.pattern_include and not self.pattern_include.match(dest): + return + + # if we need to exclude hosts that match a pattern, skip those + if self.pattern_exclude and self.pattern_exclude.match(dest): + return + + # Add to index + self.index[dest] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[instance.id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, instance.placement, dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.placement) + self.push_group(self.inventory, 'zones', instance.placement) + + # Inventory: Group by Amazon Machine Image (AMI) ID + if self.group_by_ami_id: + ami_id = self.to_safe(instance.image_id) + self.push(self.inventory, ami_id, dest) + if self.nested_groups: + self.push_group(self.inventory, 'images', ami_id) + + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_type) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by key pair + if self.group_by_key_pair and instance.key_name: + key_name = self.to_safe('key_' + instance.key_name) + self.push(self.inventory, key_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'keys', key_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) + self.push(self.inventory, vpc_id_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + for group in instance.groups: + key = self.to_safe("security_group_" + group.name) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + except AttributeError: + self.fail_with_error('\n'.join(['Package boto seems a bit older.', + 'Please upgrade boto >= 2.3.0.'])) + + # Inventory: Group by tag keys + if self.group_by_tag_keys or self.group_by_tag_values: + for k, v in instance.tags.items(): + if self.expand_csv_tags and v and ',' in v: + values = map(lambda x: x.strip(), v.split(',')) + else: + values = [v] + + for v in values: + keys = [] + if v: + if self.group_by_tag_keys: + keys.append(self.to_safe("tag_" + k + "=" + v)) + if self.group_by_tag_values: + keys.append(self.to_safe(v)) + else: + keys.append(self.to_safe("tag_" + k)) + for key in keys: + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) + if v: + self.push_group(self.inventory, self.to_safe("tag_" + k), key) + + # Inventory: Group by Route53 domain names if enabled + if self.route53_enabled and self.group_by_route53_names: + route53_names = self.get_instance_route53_names(instance) + for name in route53_names: + self.push(self.inventory, name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'route53', name) + + # Global Tag: instances without tags + if self.group_by_tag_none and len(instance.tags) == 0: + self.push(self.inventory, 'tag_none', dest) + if self.nested_groups: + self.push_group(self.inventory, 'tags', 'tag_none') + + # Global Tag: tag all EC2 instances + self.push(self.inventory, 'ec2', dest) + + self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + + + def add_rds_instance(self, instance, region): + ''' Adds an RDS instance to the inventory and index, as long as it is + addressable ''' + + # Only want available instances unless all_rds_instances is True + if not self.all_rds_instances and instance.status != 'available': + return + + # Select the best destination address + dest = instance.endpoint[0] + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[instance.id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, instance.availability_zone, dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.availability_zone) + self.push_group(self.inventory, 'zones', instance.availability_zone) + + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_class) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) + self.push(self.inventory, vpc_id_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + if instance.security_group: + key = self.to_safe("security_group_" + instance.security_group.name) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + except AttributeError: + self.fail_with_error('\n'.join(['Package boto seems a bit older.', + 'Please upgrade boto >= 2.3.0.'])) + + + # Inventory: Group by engine + if self.group_by_rds_engine: + self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) + if self.nested_groups: + self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) + + # Inventory: Group by parameter group + if self.group_by_rds_parameter_group: + self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) + if self.nested_groups: + self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) + + # Global Tag: all RDS instances + self.push(self.inventory, 'rds', dest) + + self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + + def add_elasticache_cluster(self, cluster, region): + ''' Adds an ElastiCache cluster to the inventory and index, as long as + it's nodes are addressable ''' + + # Only want available clusters unless all_elasticache_clusters is True + if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': + return + + # Select the best destination address + if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: + # Memcached cluster + dest = cluster['ConfigurationEndpoint']['Address'] + is_redis = False + else: + # Redis sigle node cluster + # Because all Redis clusters are single nodes, we'll merge the + # info from the cluster with info about the node + dest = cluster['CacheNodes'][0]['Endpoint']['Address'] + is_redis = True + + if not dest: + # Skip clusters we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, cluster['CacheClusterId']] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[cluster['CacheClusterId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) + + # Inventory: Group by region + if self.group_by_region and not is_redis: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone and not is_redis: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type and not is_redis: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) + + # Inventory: Group by security group + if self.group_by_security_group and not is_redis: + + # Check for the existence of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine and not is_redis: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) + + # Inventory: Group by parameter group + if self.group_by_elasticache_parameter_group: + self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) + + # Inventory: Group by replication group + if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: + self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) + + host_info = self.get_host_info_dict_from_describe_dict(cluster) + + self.inventory["_meta"]["hostvars"][dest] = host_info + + # Add the nodes + for node in cluster['CacheNodes']: + self.add_elasticache_node(node, cluster, region) + + def add_elasticache_node(self, node, cluster, region): + ''' Adds an ElastiCache node to the inventory and index, as long as + it is addressable ''' + + # Only want available nodes unless all_elasticache_nodes is True + if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': + return + + # Select the best destination address + dest = node['Endpoint']['Address'] + + if not dest: + # Skip nodes we cannot address (e.g. private VPC subnet) + return + + node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) + + # Add to index + self.index[dest] = [region, node_id] + + # Inventory: Group by node ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[node_id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', node_id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) + + # Inventory: Group by security group + if self.group_by_security_group: + + # Check for the existence of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) + + # Inventory: Group by parameter group (done at cluster level) + + # Inventory: Group by replication group (done at cluster level) + + # Inventory: Group by ElastiCache Cluster + if self.group_by_elasticache_cluster: + self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) + + # Global Tag: all ElastiCache nodes + self.push(self.inventory, 'elasticache_nodes', dest) + + host_info = self.get_host_info_dict_from_describe_dict(node) + + if dest in self.inventory["_meta"]["hostvars"]: + self.inventory["_meta"]["hostvars"][dest].update(host_info) + else: + self.inventory["_meta"]["hostvars"][dest] = host_info + + def add_elasticache_replication_group(self, replication_group, region): + ''' Adds an ElastiCache replication group to the inventory and index ''' + + # Only want available clusters unless all_elasticache_replication_groups is True + if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': + return + + # Select the best destination address (PrimaryEndpoint) + dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] + + if not dest: + # Skip clusters we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, replication_group['ReplicationGroupId']] + + # Inventory: Group by ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[replication_group['ReplicationGroupId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone (doesn't apply to replication groups) + + # Inventory: Group by node type (doesn't apply to replication groups) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for replication groups + + # Inventory: Group by security group (doesn't apply to replication groups) + # Check this value in cluster level + + # Inventory: Group by engine (replication groups are always Redis) + if self.group_by_elasticache_engine: + self.push(self.inventory, 'elasticache_redis', dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', 'redis') + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) + + host_info = self.get_host_info_dict_from_describe_dict(replication_group) + + self.inventory["_meta"]["hostvars"][dest] = host_info + + def get_route53_records(self): + ''' Get and store the map of resource records to domain names that + point to them. ''' + + r53_conn = route53.Route53Connection() + all_zones = r53_conn.get_zones() + + route53_zones = [ zone for zone in all_zones if zone.name[:-1] + not in self.route53_excluded_zones ] + + self.route53_records = {} + + for zone in route53_zones: + rrsets = r53_conn.get_all_rrsets(zone.id) + + for record_set in rrsets: + record_name = record_set.name + + if record_name.endswith('.'): + record_name = record_name[:-1] + + for resource in record_set.resource_records: + self.route53_records.setdefault(resource, set()) + self.route53_records[resource].add(record_name) + + + def get_instance_route53_names(self, instance): + ''' Check if an instance is referenced in the records we have from + Route53. If it is, return the list of domain names pointing to said + instance. If nothing points to it, return an empty list. ''' + + instance_attributes = [ 'public_dns_name', 'private_dns_name', + 'ip_address', 'private_ip_address' ] + + name_list = set() + + for attrib in instance_attributes: + try: + value = getattr(instance, attrib) + except AttributeError: + continue + + if value in self.route53_records: + name_list.update(self.route53_records[value]) + + return list(name_list) + + def get_host_info_dict_from_instance(self, instance): + instance_vars = {} + for key in vars(instance): + value = getattr(instance, key) + key = self.to_safe('ec2_' + key) + + # Handle complex types + # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 + if key == 'ec2__state': + instance_vars['ec2_state'] = instance.state or '' + instance_vars['ec2_state_code'] = instance.state_code + elif key == 'ec2__previous_state': + instance_vars['ec2_previous_state'] = instance.previous_state or '' + instance_vars['ec2_previous_state_code'] = instance.previous_state_code + elif type(value) in [int, bool]: + instance_vars[key] = value + elif isinstance(value, six.string_types): + instance_vars[key] = value.strip() + elif type(value) == type(None): + instance_vars[key] = '' + elif key == 'ec2_region': + instance_vars[key] = value.name + elif key == 'ec2__placement': + instance_vars['ec2_placement'] = value.zone + elif key == 'ec2_tags': + for k, v in value.items(): + if self.expand_csv_tags and ',' in v: + v = map(lambda x: x.strip(), v.split(',')) + key = self.to_safe('ec2_tag_' + k) + instance_vars[key] = v + elif key == 'ec2_groups': + group_ids = [] + group_names = [] + for group in value: + group_ids.append(group.id) + group_names.append(group.name) + instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) + instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) + else: + pass + # TODO Product codes if someone finds them useful + #print key + #print type(value) + #print value + + return instance_vars + + def get_host_info_dict_from_describe_dict(self, describe_dict): + ''' Parses the dictionary returned by the API call into a flat list + of parameters. This method should be used only when 'describe' is + used directly because Boto doesn't provide specific classes. ''' + + # I really don't agree with prefixing everything with 'ec2' + # because EC2, RDS and ElastiCache are different services. + # I'm just following the pattern used until now to not break any + # compatibility. + + host_info = {} + for key in describe_dict: + value = describe_dict[key] + key = self.to_safe('ec2_' + self.uncammelize(key)) + + # Handle complex types + + # Target: Memcached Cache Clusters + if key == 'ec2_configuration_endpoint' and value: + host_info['ec2_configuration_endpoint_address'] = value['Address'] + host_info['ec2_configuration_endpoint_port'] = value['Port'] + + # Target: Cache Nodes and Redis Cache Clusters (single node) + if key == 'ec2_endpoint' and value: + host_info['ec2_endpoint_address'] = value['Address'] + host_info['ec2_endpoint_port'] = value['Port'] + + # Target: Redis Replication Groups + if key == 'ec2_node_groups' and value: + host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] + host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] + replica_count = 0 + for node in value[0]['NodeGroupMembers']: + if node['CurrentRole'] == 'primary': + host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] + host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] + host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] + elif node['CurrentRole'] == 'replica': + host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] + host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] + host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] + replica_count += 1 + + # Target: Redis Replication Groups + if key == 'ec2_member_clusters' and value: + host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) + + # Target: All Cache Clusters + elif key == 'ec2_cache_parameter_group': + host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) + host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] + host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] + + # Target: Almost everything + elif key == 'ec2_security_groups': + + # Skip if SecurityGroups is None + # (it is possible to have the key defined but no value in it). + if value is not None: + sg_ids = [] + for sg in value: + sg_ids.append(sg['SecurityGroupId']) + host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) + + # Target: Everything + # Preserve booleans and integers + elif type(value) in [int, bool]: + host_info[key] = value + + # Target: Everything + # Sanitize string values + elif isinstance(value, six.string_types): + host_info[key] = value.strip() + + # Target: Everything + # Replace None by an empty string + elif type(value) == type(None): + host_info[key] = '' + + else: + # Remove non-processed complex types + pass + + return host_info + + def get_host_info(self): + ''' Get variables about a specific host ''' + + if len(self.index) == 0: + # Need to load index from cache + self.load_index_from_cache() + + if not self.args.host in self.index: + # try updating the cache + self.do_api_calls_update_cache() + if not self.args.host in self.index: + # host might not exist anymore + return self.json_format_dict({}, True) + + (region, instance_id) = self.index[self.args.host] + + instance = self.get_instance(region, instance_id) + return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) + + def push(self, my_dict, key, element): + ''' Push an element onto an array that may not have been defined in + the dict ''' + group_info = my_dict.setdefault(key, []) + if isinstance(group_info, dict): + host_list = group_info.setdefault('hosts', []) + host_list.append(element) + else: + group_info.append(element) + + def push_group(self, my_dict, key, element): + ''' Push a group as a child of another group. ''' + parent_group = my_dict.setdefault(key, {}) + if not isinstance(parent_group, dict): + parent_group = my_dict[key] = {'hosts': parent_group} + child_groups = parent_group.setdefault('children', []) + if element not in child_groups: + child_groups.append(element) + + def get_inventory_from_cache(self): + ''' Reads the inventory from the cache file and returns it as a JSON + object ''' + + cache = open(self.cache_path_cache, 'r') + json_inventory = cache.read() + return json_inventory + + + def load_index_from_cache(self): + ''' Reads the index from the cache file sets self.index ''' + + cache = open(self.cache_path_index, 'r') + json_index = cache.read() + self.index = json.loads(json_index) + + + def write_to_cache(self, data, filename): + ''' Writes data in JSON format to a file ''' + + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + def uncammelize(self, key): + temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' + regex = "[^A-Za-z0-9\_" + if not self.replace_dash_in_groups: + regex += "\-" + return re.sub(regex + "]", "_", word) + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +# Run the script +Ec2Inventory() + diff --git a/cloud/jiffybox.ini b/cloud/jiffybox.ini new file mode 100644 index 0000000000000000000000000000000000000000..842908f5facc751ad8abaf27bf37525f025c8632 --- /dev/null +++ b/cloud/jiffybox.ini @@ -0,0 +1,8 @@ +# Ansible JiffyBox external inventory script settings +# + +[jiffybox] +cache_max_age = 300 +cache_path = ~/.ansible/tmp +expand_csv_tags = True +replace_dash_in_groups = False diff --git a/cloud/jiffybox.py b/cloud/jiffybox.py new file mode 100755 index 0000000000000000000000000000000000000000..152583ac6b55edf9b9133c31edd00fcdf5af0a0c --- /dev/null +++ b/cloud/jiffybox.py @@ -0,0 +1,438 @@ +#!/usr/bin/env python + +''' +JiffyBox external inventory script +================================== +''' + +import sys +import os +import argparse +import re +import requests +from time import time +import six + +from six.moves import configparser +from ansible.errors import AnsibleError as ae + +try: + import json +except ImportError: + import simplejson as json + + +class JiffyBoxInventory(object): + + def _empty_inventory(self): + return {"_meta" : {"hostvars" : {}}} + + def __init__(self): + ''' Main execution path ''' + + # Inventory grouped by instance IDs, tags, security groups, regions, + # and availability zones + self.inventory = self._empty_inventory() + + # Index of hostname (address) to instance ID + self.index = {} + + # Read settings and parse CLI arguments + self.parse_cli_args() + self.read_settings() + + # Cache + if self.args.refresh_cache: + self.do_api_calls_update_cache() + elif not self.is_cache_valid(): + self.do_api_calls_update_cache() + + # Data to print + if self.args.set_host_groups: + data_to_print = self.set_host_groups() + elif self.args.host: + data_to_print = self.get_host_info() + + elif self.args.list: + # Display list of instances for inventory + if self.inventory == self._empty_inventory(): + data_to_print = self.get_inventory_from_cache() + else: + data_to_print = self.json_format_dict(self.inventory, True) + + print(data_to_print) + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on JiffyBox') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to JiffyBox (default: False - use cache files)') + parser.add_argument('--set-host-groups', action='store', + help='Set the inventory groups for the host(s)') + self.args = parser.parse_args() + + def read_settings(self): + ''' Reads the settings from the jiffybox.ini file ''' + if six.PY3: + config = configparser.ConfigParser() + else: + config = configparser.SafeConfigParser() + jiffybox_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'jiffybox.ini') + jiffybox_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('JIFFYBOX_INI_PATH', jiffybox_default_ini_path))) + config.read(jiffybox_ini_path) + + if config.has_option('jiffybox', 'expand_csv_tags'): + self.expand_csv_tags = config.getboolean('jiffybox', 'expand_csv_tags') + else: + self.expand_csv_tags = False + + # Replace dash or not in group names + if config.has_option('jiffybox', 'replace_dash_in_groups'): + self.replace_dash_in_groups = config.getboolean('jiffybox', 'replace_dash_in_groups') + else: + self.replace_dash_in_groups = True + + # Return all JiffyBox instances? + if config.has_option('jiffybox', 'all_instances'): + self.all_instances = config.getboolean('jiffybox', 'all_instances') + else: + self.all_instances = False + + # Instance states to be gathered in inventory. Default is 'running'. + # Setting 'all_instances' to 'yes' overrides this option. + jiffybox_valid_instance_states = [ + 'READY', + 'CREATING', + 'UPDATING', + 'CHANGING PLAN', + 'STATUS_READY', + 'STATUS_CREATING', + 'STATUS_UPDATING' + ] + self.jiffybox_instance_states = [] + if self.all_instances: + self.jiffybox_instance_states = jiffybox_valid_instance_states + elif config.has_option('jiffybox', 'instance_states'): + for instance_state in config.get('jiffybox', 'instance_states').split(','): + instance_state = instance_state.strip() + if instance_state not in jiffybox_valid_instance_states: + continue + self.jiffybox_instance_states.append(instance_state) + else: + self.jiffybox_instance_states = ['READY'] + + # Cache related + cache_dir = os.path.expanduser(config.get('jiffybox', 'cache_path')) + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + self.cache_path_cache = cache_dir + "/ansible-jiffybox.cache" + self.cache_path_index = cache_dir + "/ansible-jiffybox.index" + self.cache_max_age = config.getint('jiffybox', 'cache_max_age') + + # Do we need to just include hosts that match a pattern? + try: + pattern_include = config.get('jiffybox', 'pattern_include') + if pattern_include and len(pattern_include) > 0: + self.pattern_include = re.compile(pattern_include) + else: + self.pattern_include = None + except configparser.NoOptionError: + self.pattern_include = None + + # Do we need to exclude hosts that match a pattern? + try: + pattern_exclude = config.get('jiffybox', 'pattern_exclude') + if pattern_exclude and len(pattern_exclude) > 0: + self.pattern_exclude = re.compile(pattern_exclude) + else: + self.pattern_exclude = None + except configparser.NoOptionError: + self.pattern_exclude = None + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_index): + return True + + return False + + def do_api_calls_update_cache(self): + ''' Do API calls to each region, and save data in cache files ''' + + self.get_instances() + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) + + def get_instances(self): + ''' Makes a JiffyBox API call to the list of instances ''' + + try: + conn = self.connect() + instances = conn.get_all_instances() + for instance in instances: + self.add_instance(instances[instance]) + + except StandardError as e: + self.fail_with_error(e.message, 'getting JiffyBox instances') + + def connect(self): + connect_args = {} + + conn = JiffyBoxConnect() + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + self.fail_with_error("Something went wrong during connection") + return conn + + def fail_with_error(self, err_msg, err_operation=None): + '''log an error to std err for ansible-playbook to consume and exit''' + if err_operation: + err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( + err_msg=err_msg, err_operation=err_operation) + sys.stderr.write(err_msg) + sys.exit(1) + + def get_inventory_from_cache(self): + ''' Reads the inventory from the cache file and returns it as a JSON + object ''' + + cache = open(self.cache_path_cache, 'r') + json_inventory = cache.read() + return json_inventory + + def load_index_from_cache(self): + ''' Reads the index from the cache file sets self.index ''' + + cache = open(self.cache_path_index, 'r') + json_index = cache.read() + self.index = json.loads(json_index) + + def write_to_cache(self, data, filename): + ''' Writes data in JSON format to a file ''' + + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' + regex = "[^A-Za-z0-9\_" + if not self.replace_dash_in_groups: + regex += "\-" + return re.sub(regex + "]", "_", word) + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + def get_instance(self, region, instance_id): + conn = self.connect() + + instances = conn.get_all_instances() + for instance in instances: + if int(instance) == instance_id: + return instances[instance] + + raise ae('Given host does not exist') + + def add_instance(self, instance): + ''' Adds an instance to the inventory and index, as long as it is + addressable ''' + + # Only return instances with desired instance states + if 'status' not in instance or instance['status'] not in self.jiffybox_instance_states: + return + + # Select the best destination address + dest = None + if 'name' in instance: + dest = instance['name'] + elif 'ips' in instance and 'public' in instance['ips']: + dest = instance['ips']['public'][0] + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # if we only want to include hosts that match a pattern, skip those that don't + if self.pattern_include and not self.pattern_include.match(dest): + return + + # if we need to exclude hosts that match a pattern, skip those + if self.pattern_exclude and self.pattern_exclude.match(dest): + return + + # Add to index + self.index[dest] = ['all', instance['id']] + + # Inventory: Group by tag keys + if 'metadata' in instance and 'ansibleGroups' in instance['metadata']: + v = instance['metadata']['ansibleGroups'] + if self.expand_csv_tags and v and ',' in v: + values = map(lambda x: x.strip(), v.split(',')) + else: + values = [v] + for v in values: + self.push(self.inventory, self.to_safe(v), dest) + + # Global Tag: tag all JiffyBox instances + self.push(self.inventory, 'jiffybox', dest) + + self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + + def push(self, my_dict, key, element): + ''' Push an element onto an array that may not have been defined in + the dict ''' + group_info = my_dict.setdefault(key, []) + if isinstance(group_info, dict): + host_list = group_info.setdefault('hosts', []) + host_list.append(element) + else: + group_info.append(element) + + def get_host_info_dict_from_instance(self, instance): + instance_vars = {} + for key in instance: + value = instance[key] + key = self.to_safe('jiffybox_' + key) + + # Handle complex types + if type(value) in [int, bool]: + instance_vars[key] = value + elif key == 'jiffybox_ips': + instance_vars[key] = value + elif isinstance(value, six.string_types): + instance_vars[key] = value.strip() + elif type(value) == type(None): + instance_vars[key] = '' + elif key == 'jiffybox_metadata': + for k, v in value.items(): + if self.expand_csv_tags and ',' in v: + v = map(lambda x: x.strip(), v.split(',')) + key = self.to_safe('jiffybox_metadata_' + k) + instance_vars[key] = v + else: + pass + + return instance_vars + + def get_host(self, host): + ''' Get variables about a specific host ''' + + if len(self.index) == 0: + # Need to load index from cache + self.load_index_from_cache() + + if not self.args.host in self.index: + # try updating the cache + self.do_api_calls_update_cache() + if not self.args.host in self.index: + # host might not exist anymore + return self.json_format_dict({}, True) + + (region, instance_id) = self.index[self.args.host] + + return self.get_instance(region, instance_id) + + def set_host_groups(self): + ''' Add host or hosts to a group ''' + + instance = self.get_host(self.args.host) + connect = self.connect() + connect.set_metadata(instance, 'ansibleGroups', self.args.set_host_groups) + return 'ok' + + def get_host_info(self): + ''' Get variables about a specific host ''' + + instance = self.get_host(self.args.host) + return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) + + +class JiffyBoxConnect(object): + + configPath = '/etc/jiffybox.cfg' + + def __init__(self): + self._readConfig() + self.url = 'https://api.jiffybox.de/' + self.api_token + '/v1.0/' + + def get_all_instances(self): + return self._request('jiffyBoxes') + + def set_metadata(self, instance, key, value): + self._request('jiffyBoxes/' + str(instance['id']), {'metadata[' + key + ']': value}) + + def _readConfig(self): + if six.PY3: + config = configparser.ConfigParser() + else: + config = configparser.SafeConfigParser() + config.read(self.configPath) + + # Find the best section + profile = os.environ.get('JIFFYBOX_PROFILE', None) + if profile: + section = 'profile ' + profile + else: + section = 'Credentials' + if config.has_option(section, 'api_token'): + self.api_token = config.get(section, 'api_token') + else: + raise ae('Can not find credentials') + + def _request(self, path, data = None, method = 'GET'): + encoder = json.JSONEncoder() + postData = {} + + if data: + method = 'POST' + for key in data: + item = data.get(key) + if type(item) is list or type(item) is dict: + if len(item) > 0: + item = encoder.encode(item) + if type(item) is int or type(item) is unicode or type(item) is bool: + item = str(item) + if item and type(item) is str and len(item) > 0: + postData.__setitem__(key, item) + + request_result = {} + try: + if method == 'GET': + request_result = requests.get(self.url + path) + elif method == 'POST': + request_result = requests.put(self.url + path, data = postData) + elif method == 'DELETE': + request_result = requests.delete(self.url + path) + except ae, e: + raise ae('No result from JiffyBox API') + + decoder = json.JSONDecoder() + content = decoder.decode(request_result.content) + if not content['result']: + msg = content['messages'] + raise ae('%s' % msg) + + return content['result'] + + +# Run the script +JiffyBoxInventory() diff --git a/drush_get_aliases.sh b/drush_get_aliases.sh deleted file mode 100755 index 2937ba6bd76ac345ee91f2bf99ddf58e7ed39808..0000000000000000000000000000000000000000 --- a/drush_get_aliases.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -cd $( cd $(dirname $(realpath $0)) ; pwd ) - -./role.sh webserver-drupal drush --tags="DrushUpdateAliases" "$@" diff --git a/ec2.yml b/ec2.yml deleted file mode 100644 index 8d74c1c32675fa7ffbff23776070a5d19372e660..0000000000000000000000000000000000000000 --- a/ec2.yml +++ /dev/null @@ -1,46 +0,0 @@ -## -# Ansible playbook for managing an ec2 inventory - ---- -# file: ec2.yml - -- name: "EC2: Launch a new host" - hosts: "localhost" - connection: local - gather_facts: false - sudo: no - tasks: - - name: "Create the new instance" - ec2: - instance_tags: '{"Name":"Ansible-Host-{{ host }}","AnsibleHost":"{{ host }}","AnsibleGroups":"{{ initgroups }}"}' - assign_public_ip: yes - group_id: "{{ ec2_group_id }}" - key_name: "{{ ec2_key_name }}" - image: "{{ ec2_ami_id }}" - instance_type: "{{ ec2_instance_type }}" - vpc_subnet_id: "{{ ec2_subnet_id }}" - region: "{{ ec2_region }}" - state: present - wait: yes - register: ec2 - - name: "Waiting for the new instance(s) to get up and running" - ec2: - instance_ids: "{{ ec2.instance_ids }}" - instance_type: "{{ ec2_instance_type }}" - region: "{{ ec2_region }}" - state: running - wait: yes - - name: "Add new instance(s) to the inventory" - add_host: - hostname="{{ host }}" - static_ipv4="{{ item.public_ip }}" - groups="{{ initgroups }}" - with_items: ec2.instances - - name: "Waiting for SSH service becoming available" - wait_for: - host="{{ item.public_ip }}" - port=22 - delay=10 - timeout=120 - state=present - with_items: ec2.instances diff --git a/hosts.yml b/hosts.yml new file mode 100644 index 0000000000000000000000000000000000000000..95599296ab9d81954056b4c6ae143e69c69030c1 --- /dev/null +++ b/hosts.yml @@ -0,0 +1,18 @@ +## +# Ansible playbook to update local hosts file + +--- +# file: hosts.yml + +- name: "Update local hosts file" + hosts: "all" + connection: local + gather_facts: false + sudo: yes + tasks: + - name: "Build hosts file" + lineinfile: + dest=/etc/hosts + regexp='.*{{ inventory_hostname }}$' + line="{{ static_ipv4|default('') }} {{ inventory_hostname }}" + state=present diff --git a/inithost.py b/inithost.py new file mode 100755 index 0000000000000000000000000000000000000000..7972421a5a5edd048e69251c7c2146eea2447ade --- /dev/null +++ b/inithost.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python + +''' +Script to start the inithost playbook +===================================== +''' + +import os +import argparse +from subprocess import call + +parser = argparse.ArgumentParser(description='Launch the Ansible playbook inithost') +parser.add_argument('host', + help='Name of the host to be created') + +cloudGroup = parser.add_argument_group('Cloud', 'When installing through a cloud proivider, use these options') +cloudGroup.add_argument('--cloud', action='store', choices=['ec2', 'jiffybox'], + help='The cloud provider') +cloudGroup.add_argument('--groups', action='store', + help='The inventory groups for the host as comma separated list') + +nonCloudGroup = parser.add_argument_group('Non-Cloud', 'When installing directly, use these options') +nonCloudGroup.add_argument('--ip', action='store', + help='The ip address of the host, only required if not installed in the cloud') + +parser.add_argument('--user', action='store', default=os.environ['USER'], + help='The username of the first created admin user') +parser.add_argument('--root', action='store', default='root', + help='The username to use initially') +parser.add_argument('--key', action='store', + help='File name with the private key to be used initially') + +args, extras = parser.parse_known_args() + +path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + +extraVars = ['host=' + args.host, 'distribute_keys=true', 'inituser=' + args.root, 'firstuser=' + args.user] +if args.cloud: + extraVars.append('cloud=' + args.cloud) +if args.groups: + extraVars.append('initgroups=' + args.groups) +if args.ip: + extraVars.append('inithostip=' + args.ip) + +cmd = [path + 'ansible-playbook.sh', 'inithost', '--extra-vars=' + ' '.join(extraVars)] + +if args.key: + cmd.append('--private-key=' + args.key) +else: + cmd.append('--ask-pass') + +for extra in extras: + cmd.append(extra) + +call(cmd) diff --git a/inithost.sh b/inithost.sh deleted file mode 100755 index 1675c2bd3038c0d5d2bd7a723003bc066ea7e6dd..0000000000000000000000000000000000000000 --- a/inithost.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -cd $( cd $(dirname $(realpath $0)) ; pwd ) - -if [ "$2" == "" ] - then - echo "Usage" - echo "inithost.sh HOST IP [USER [KEYFILE [GROUPS]]]" - exit 101; -fi - -CLOUD=cloud -HOST=$1 -IP=inithostip=$2 -ROOT=root -KEYFILE=--ask-pass -INITGROUPS=inventory -shift -shift -if [ "$1" != "" ] - then - ROOT=$1 - shift -fi -if [ "$1" != "" ] - then - KEYFILE=--private-key=$1 - shift -fi -if [ "$1" != "" ] - then - INITGROUPS=$1 - shift -fi - -if [ "$IP" == "inithostip=ec2" ] - then - CLOUD=ec2 - IP= -fi - -./ansible-playbook.sh inithost --extra-vars="cloud=$CLOUD host=$HOST inituser=$ROOT firstuser=$USER initgroups=$INITGROUPS $IP distribute_keys=true" $KEYFILE "$@" diff --git a/inithost.yml b/inithost.yml index 1b18e7350cb81c5f29e83447f3a0cbe9680e4ad1..77a0fed08c8185d9ad028008c18fa11019964298 100644 --- a/inithost.yml +++ b/inithost.yml @@ -4,8 +4,13 @@ --- # file: inithost.yml -# Check if we need to launch an instance there first -- include: "{{ cloud }}.yml" +- name: "Prepare cloud" + hosts: "localhost" + connection: local + gather_facts: false + sudo: no + roles: + - { role: cloud, mode: inithost } - name: "Prepare 1" hosts: "{{ host }}" diff --git a/plugins/_action/jiffybox.py b/plugins/_action/jiffybox.py new file mode 120000 index 0000000000000000000000000000000000000000..8e85018977eb188a715d6f20192b5ccaf59a200c --- /dev/null +++ b/plugins/_action/jiffybox.py @@ -0,0 +1 @@ +../jiffybox/action_plugins/jiffybox.py \ No newline at end of file diff --git a/plugins/_action/serverdensity.py b/plugins/_action/serverdensity.py new file mode 120000 index 0000000000000000000000000000000000000000..e361977cf489851806aa022c0ff98ea5d7523a07 --- /dev/null +++ b/plugins/_action/serverdensity.py @@ -0,0 +1 @@ +../serverdensity/action_plugins/serverdensity.py \ No newline at end of file diff --git a/removehost.py b/removehost.py new file mode 100755 index 0000000000000000000000000000000000000000..624af1b18cbafbe2a64efd45ce9ce7f4933246a3 --- /dev/null +++ b/removehost.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +''' +Script to start the removehost playbook +======================================= +''' + +import os +import argparse +from subprocess import call + +parser = argparse.ArgumentParser(description='Launch the Ansible playbook removehost') +parser.add_argument('host', + help='Name of the host to be removed') + +parser.add_argument('--cloud', action='store', choices=['ec2', 'jiffybox'], + help='The cloud provider') + +args, extras = parser.parse_known_args() + +path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + +extraVars = ['host=' + args.host] +if args.cloud: + extraVars.append('cloud=' + args.cloud) + +cmd = [path + 'ansible-playbook.sh', 'removehost', '--extra-vars=' + ' '.join(extraVars)] +for extra in extras: + cmd.append(extra) + +call(cmd) diff --git a/removehost.yml b/removehost.yml new file mode 100644 index 0000000000000000000000000000000000000000..dba7dfa851ee8db4153e531b9f580e003b2869eb --- /dev/null +++ b/removehost.yml @@ -0,0 +1,16 @@ +## +# Ansible playbook for removing a host from the cloud + +--- +# file: removehost.yml + +- name: "Prepare cloud" + hosts: "{{ host }}" + connection: local + gather_facts: false + sudo: no + vars_prompt: + - name: "are_you_sure" + prompt: "Really?" + roles: + - { role: cloud, mode: removehost } diff --git a/role.py b/role.py new file mode 100755 index 0000000000000000000000000000000000000000..9e7691f758cda30650dd86a255a9a0fc7e5dea16 --- /dev/null +++ b/role.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +''' +Wrapper for the role playbook +============================= +''' + +import os +import argparse +from subprocess import call + +parser = argparse.ArgumentParser(description='Wrapper for the role playbook') +parser.add_argument('role', + help='Role to execute') +parser.add_argument('hosts', + help='Host name or pattern') + +args, extras = parser.parse_known_args() + +path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + +# Start building command +cmd = [path + 'ansible-playbook.py', 'role', '--extra-vars=hosts=' + args.hosts + ' role=' + args.role] + +# Append more CLI options +for extra in extras: + cmd.append(extra) + +call(cmd, cwd=path) diff --git a/role.sh b/role.sh deleted file mode 100755 index 4512baa1e8c5e1f1579ece36c0151e0fed5484f6..0000000000000000000000000000000000000000 --- a/role.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -cd $( cd $(dirname $(readlink -f $0)) ; pwd ) - -if [ "$2" == "" ] - then - echo "Usage" - echo "role.sh HOSTS ROLE" - exit 101; -fi -HOSTS=$1 -ROLE=$2 -shift -shift - -./ansible-playbook.sh role --extra-vars="hosts=$HOSTS role=$ROLE" "$@" diff --git a/roles/cloud b/roles/cloud new file mode 160000 index 0000000000000000000000000000000000000000..13541ea51bc25b48aae84eb4f8a404391712977a --- /dev/null +++ b/roles/cloud @@ -0,0 +1 @@ +Subproject commit 13541ea51bc25b48aae84eb4f8a404391712977a diff --git a/roles/gitlab b/roles/gitlab index 88375878fab5f15c3f8604c6791a8fa2ba640291..3cce105145b8a30470cc201465ac8e7e7b0794b6 160000 --- a/roles/gitlab +++ b/roles/gitlab @@ -1 +1 @@ -Subproject commit 88375878fab5f15c3f8604c6791a8fa2ba640291 +Subproject commit 3cce105145b8a30470cc201465ac8e7e7b0794b6 diff --git a/roles/owncloud b/roles/owncloud index edb85ac71f98e6bb1a4f7b68780750a3b5a7563a..8cb1b3c9fe0dba1a3cde4bd13e4891784e87da23 160000 --- a/roles/owncloud +++ b/roles/owncloud @@ -1 +1 @@ -Subproject commit edb85ac71f98e6bb1a4f7b68780750a3b5a7563a +Subproject commit 8cb1b3c9fe0dba1a3cde4bd13e4891784e87da23 diff --git a/sanity.py b/sanity.py new file mode 100755 index 0000000000000000000000000000000000000000..0d63a998aee7317b3dd7c00f5443d127d4a5dca9 --- /dev/null +++ b/sanity.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python + +''' +Wrapper for the sanity playbook +=============================== +''' + +import os +import argparse +from subprocess import call + +parser = argparse.ArgumentParser(description='Wrapper for the sanity playbook') +parser.add_argument('mode', choices=['check', 'upgrade', 'reboot'], + help='Mode for the sanity play') + +args, extras = parser.parse_known_args() + +path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + +# Start building command +cmd = [path + 'ansible-playbook.py', 'sanity', '--tags=' + args.mode] + +# Append more CLI options +for extra in extras: + cmd.append(extra) + +call(cmd, cwd=path) diff --git a/sanity.sh b/sanity.sh deleted file mode 100755 index 723216ce588c415085020a9bbb87d2c7ba055c26..0000000000000000000000000000000000000000 --- a/sanity.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -cd $( cd $(dirname $(readlink -f $0)) ; pwd ) - -if [ "$1" == "" ] - then - echo "Usage" - echo "sanity.sh [check|upgrade|reboot]" - exit 101; -fi -TAGS=$1 -shift - -./ansible-playbook.sh sanity --tags="$TAGS" "$@" diff --git a/scripts/drush_get_aliases.sh b/scripts/drush_get_aliases.sh new file mode 100755 index 0000000000000000000000000000000000000000..a8d495a57488a46648de46708a1430830ea2c7e7 --- /dev/null +++ b/scripts/drush_get_aliases.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +cd $( cd $(dirname $(realpath $0)) ; pwd ) + +../role.py drush webserver-drupal --tags="DrushUpdateAliases" "$@" diff --git a/setup_local.py b/setup_local.py new file mode 100755 index 0000000000000000000000000000000000000000..f72a5f8e4c0e551eccecdf1559ac641a4fa8e029 --- /dev/null +++ b/setup_local.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python + +''' +Setup and update the Paragon wrapper for Ansible +================================================ +''' + +import os +import argparse +from subprocess import call + +parser = argparse.ArgumentParser(description='Wrapper for the role playbook') +parser.add_argument('company', + help='Name of the company in lowercase to specify the inventory') + +args, extras = parser.parse_known_args() + +path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + +# Start building command +cmd = [path + 'ansible-playbook.py', 'setup_local', '--local', '--extra-vars=company=' + args.company] + +# Append more CLI options +for extra in extras: + cmd.append(extra) + +call(cmd, cwd=path) diff --git a/setup_local.sh b/setup_local.sh deleted file mode 100755 index 8b92a90e5865ba069f39ebd0513179cd4db36e7c..0000000000000000000000000000000000000000 --- a/setup_local.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -cd $( cd $(dirname $(readlink -f $0)) ; pwd ) - -if [ "$1" == "" ] - then - echo "Usage" - echo "setup_local.sh COMPANY" - exit 101; -fi -COMPANY=$1 -shift - -./ansible-playbook.sh local setup_local --extra-vars="company=$COMPANY" "$@" diff --git a/setup_local.yml b/setup_local.yml index a7ddd6dbb1794eda76d4c1b58dca9f1f76bc09f9..76946f0d3ba35a5ab07a3d1cc620e43711702cad 100644 --- a/setup_local.yml +++ b/setup_local.yml @@ -63,6 +63,8 @@ value: "{{ lookup('env','HOME') }}/.ansible/facts" - option: "fact_caching_timeout" value: "86400" + - option: "action_plugins" + value: "{{ lookup('env','PWD') }}/plugins/_action" - option: "scp_if_ssh" value: "True" section: "ssh_connection"