Skip to content
Snippets Groups Projects
Commit 2209c1f0 authored by jurgenhaas's avatar jurgenhaas
Browse files

Merge branch 'release/1.2.0.0'

parents 0d6a17d7 98b1f353
No related branches found
Tags 1.2.0.0
No related merge requests found
......@@ -2,3 +2,5 @@
archive
inventory
*.pyc
2015-11-23 version 1.2
======================
Replace shell scripts with python scripts
Replace inithost and removehost shell scripts with Python scripts
Ignore pyc files
Dynamic Jiffybox inventory
Move dynamic inventory scripts to a central place
Fine tune inithost and removehost playbooks with the cloud as an option
Change location of plugins during runtime
Update documentation
Optimize initial setup by using submodules
Exclude localhost from initial provisioning
Documentation about boto when using AWS inventory
AWS provisioning
Support drush commands during deploy
2015-11-16 version 1.1
======================
Enhance deploy playbook
#!/usr/bin/env python
'''
Wrapper for the official Ansible Playbook application
=====================================================
'''
import os
import argparse
from subprocess import call
def callAnsible(playbook, inventory, args):
cmd = ['ansible-playbook', playbook + '.yml', '-i', inventory]
for arg in args:
cmd.append(arg)
call(cmd, cwd=path)
parser = argparse.ArgumentParser(description='Ansible Playbook wrapper by Paragon')
parser.add_argument('playbook',
help='Playbook to execute')
parser.add_argument('--local', action='store_true', default=False,
help='Set to true to work with the localhost only')
parser.add_argument('--custom', action='store_true', default=False,
help='Set to true to only execute playbook in the inventory directory')
args, extras = parser.parse_known_args()
path = os.path.dirname(os.path.realpath(__file__)) + '/'
pathSecrets = os.path.realpath(os.environ['HOME']) + '/.ansible/secrets'
pathCustom = path
# Start building command
cmd = []
# Determine inventory file
if args.local:
inventory = path + 'local.inventory'
pathCustom = False
elif os.path.exists(path + 'inventory/inventory'):
inventory = path + 'inventory/inventory'
pathCustom = path + 'inventory/'
else:
inventory = path + 'inventory/' + os.environ['ANSIBLE_COMPANY'] + '/inventory'
pathCustom = path + 'inventory/' + os.environ['ANSIBLE_COMPANY'] + '/'
if not os.path.exists(inventory):
raise SystemError('Inventory %s not found' % inventory)
# Check for the vault
if os.path.exists(pathSecrets):
cmd.append('-e')
cmd.append('@' + pathSecrets)
else:
cmd.append('--ask-sudo-pass')
# Append more CLI options
for extra in extras:
cmd.append(extra)
# Run the main playbook if not in custom mode
if not args.custom:
callAnsible(args.playbook, inventory, cmd)
# Optionally also run the custom playbook
if pathCustom and os.path.exists(pathCustom + args.playbook + '.yml'):
callAnsible(pathCustom + args.playbook, inventory, cmd)
#!/bin/bash
if [ -d ~/.ansible ]
then
SECRETS="$( cd ~/.ansible && pwd )/secrets"
fi
cd $( cd $(dirname $(readlink -f $0)) ; pwd )
if [ $1 == "local" ]
then
CUSTOM_DIR=false
INVENTORY=./local.inventory
shift
else
if [ -f ./inventory/inventory ]
then
CUSTOM_DIR=./inventory
else
CUSTOM_DIR=./inventory/$ANSIBLE_COMPANY
fi
INVENTORY=$CUSTOM_DIR/inventory
fi
if [ "$1" == "" ]
then
echo "Usage"
echo "ansible-playbook.sh PLAYBOOK ..."
exit 101;
fi
if [ $1 == "custom" ]
then
shift
PLAYBOOK_PREFIX=$CUSTOM_DIR/
else
PLAYBOOK_PREFIX=""
fi
PLAYBOOK=$PLAYBOOK_PREFIX$1.yml
shift
if [ ! -f $INVENTORY ]
then
echo "The inventory $INVENTORY does not exist!"
exit 102;
fi
if [ ! -f $PLAYBOOK ]
then
echo "The playbook $PLAYBOOK does not exist!"
exit 103;
fi
if [ $SECRETS ] && [ -f $SECRETS ]
then
ansible-playbook $PLAYBOOK -i $INVENTORY -e @$SECRETS "$@"
else
ansible-playbook $PLAYBOOK -i $INVENTORY --ask-sudo-pass "$@"
fi
if [ $CUSTOM_DIR != "false" ] && [ "$CUSTOM_DIR" != "$PLAYBOOK_PREFIX" ] && [ -f $CUSTOM_DIR/$PLAYBOOK ]
then
if [ $SECRETS ] && [ -f $SECRETS ]
then
ansible-playbook $CUSTOM_DIR/$PLAYBOOK -i $INVENTORY -e @$SECRETS "$@"
else
ansible-playbook $CUSTOM_DIR/$PLAYBOOK -i $INVENTORY --ask-sudo-pass "$@"
fi
fi
#!/usr/bin/env python
'''
Wrapper for the official Ansible application
============================================
'''
import os
import argparse
from subprocess import call
parser = argparse.ArgumentParser(description='Ansible wrapper by Paragon')
parser.add_argument('host',
help='Host name or pattern')
args, extras = parser.parse_known_args()
path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep
pathSecrets = os.path.realpath(os.environ['HOME']) + '/.ansible/secrets'
# Start building command
cmd = ['ansible', args.host]
# Determine inventory file
if os.path.exists(path + 'inventory/inventory'):
inventory = path + 'inventory/inventory'
else:
inventory = path + 'inventory/' + os.environ['ANSIBLE_COMPANY'] + '/inventory'
if not os.path.exists(inventory):
raise SystemError('Inventory %s not found' % inventory)
cmd.append('-i')
cmd.append(inventory)
# Check for the vault
if os.path.exists(pathSecrets):
cmd.append('-e')
cmd.append('@' + pathSecrets)
# Append more CLI options
for extra in extras:
cmd.append(extra)
call(cmd, cwd=path)
#!/bin/bash
if [ -d ~/.ansible ]
then
SECRETS="$( cd ~/.ansible && pwd )/secrets"
fi
cd $( cd $(dirname $(readlink -f $0)) ; pwd )
if [ -f ./inventory/inventory ]
then
INVENTORY=./inventory/inventory
else
INVENTORY=./inventory/$ANSIBLE_COMPANY/inventory
fi
if [ "$3" == "" ]
then
echo "Usage"
echo "ansible.sh HOSTS COMMAND OPTIONS"
exit 101;
fi
HOSTS=$1
shift
if [ ! -f $INVENTORY ]
then
echo "The inventory $INVENTORY does not exist!"
exit 102;
fi
if [ -f $SECRETS ]
then
ansible $HOSTS -i $INVENTORY -e @$SECRETS "$@"
else
ansible $HOSTS -i $INVENTORY "$@"
fi
##
# Dummy Ansible playbook
---
# file: cloud.yml
- name: "No cloud action required"
hosts: "localhost"
connection: local
gather_facts: false
sudo: no
tasks: []
# Ansible EC2 external inventory script settings
#
[ec2]
# to talk to a private eucalyptus instance uncomment these lines
# and edit edit eucalyptus_host to be the host name of your cloud controller
#eucalyptus = True
#eucalyptus_host = clc.cloud.domain.org
# AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
regions = us-east-1
regions_exclude = us-gov-west-1,cn-north-1
# When generating inventory, Ansible needs to know how to address a server.
# Each EC2 instance has a lot of variables associated with it. Here is the list:
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
# Below are 2 variables that are used as the address of a server:
# - destination_variable
# - vpc_destination_variable
# This is the normal destination variable to use. If you are running Ansible
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
# running Ansible from within EC2, then perhaps you want to use the internal
# address, and should set this to 'private_dns_name'. The key of an EC2 tag
# may optionally be used; however the boto instance variables hold precedence
# in the event of a collision.
destination_variable = public_dns_name
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from within EC2. The key of an EC2 tag may optionally be used; however
# the boto instance variables hold precedence in the event of a collision.
# WARNING: - instances that are in the private vpc, _without_ public ip address
# will not be listed in the inventory until You set:
# vpc_destination_variable = 'private_ip_address'
vpc_destination_variable = AnsibleHost
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
# To exclude RDS instances from the inventory, uncomment and set to False.
rds = False
# To exclude ElastiCache instances from the inventory, uncomment and set to False.
elasticache = False
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-separated list.
# route53_excluded_zones = samplezone1.com, samplezone2.com
# By default, only EC2 instances in the 'running' state are returned. Set
# 'all_instances' to True to return all instances regardless of state.
all_instances = False
# By default, only EC2 instances in the 'running' state are returned. Specify
# EC2 instance states to return as a comma-separated list. This
# option is overriden when 'all_instances' is True.
# instance_states = pending, running, shutting-down, terminated, stopping, stopped
# By default, only RDS instances in the 'available' state are returned. Set
# 'all_rds_instances' to True return all RDS instances regardless of state.
all_rds_instances = False
# By default, only ElastiCache clusters and nodes in the 'available' state
# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
# to True return all ElastiCache clusters and nodes, regardless of state.
#
# Note that all_elasticache_nodes only applies to listed clusters. That means
# if you set all_elastic_clusters to false, no node will be return from
# unavailable clusters, regardless of the state and to what you set for
# all_elasticache_nodes.
all_elasticache_replication_groups = False
all_elasticache_clusters = False
all_elasticache_nodes = False
# API calls to EC2 are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-ec2.cache
# - ansible-ec2.index
cache_path = ~/.ansible/tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 300
# Organize groups into a nested/hierarchy instead of a flat namespace.
nested_groups = False
# Replace - tags when creating groups to avoid issues with ansible
replace_dash_in_groups = False
# If set to true, any tag of the form "a,b,c" is expanded into a list
# and the results are used to create additional tag_* inventory groups.
expand_csv_tags = True
# The EC2 inventory output can become very large. To manage its size,
# configure which groups should be created.
group_by_instance_id = False
group_by_region = False
group_by_availability_zone = False
group_by_ami_id = False
group_by_instance_type = False
group_by_key_pair = False
group_by_vpc_id = False
group_by_security_group = False
group_by_tag_keys = False
group_by_tag_values = True
group_by_tag_none = False
group_by_route53_names = False
group_by_rds_engine = False
group_by_rds_parameter_group = False
group_by_elasticache_engine = False
group_by_elasticache_cluster = False
group_by_elasticache_parameter_group = False
group_by_elasticache_replication_group = False
# If you only want to include hosts that match a certain regular expression
# pattern_include = staging-*
# If you want to exclude any hosts that match a certain regular expression
# pattern_exclude = staging-*
# Instance filters can be used to control which instances are retrieved for
# inventory. For the full list of possible filters, please read the EC2 API
# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
# Filters are key/value pairs separated by '=', to list multiple filters use
# a list separated by commas. See examples below.
# Retrieve only instances with (key=value) env=staging tag
# instance_filters = tag:env=staging
# Retrieve only instances with role=webservers OR role=dbservers tag
# instance_filters = tag:role=webservers,tag:role=dbservers
# Retrieve only t1.micro instances OR instances with tag env=staging
# instance_filters = instance-type=t1.micro,tag:env=staging
# You can use wildcards in filter values also. Below will list instances which
# tag Name value matches webservers1*
# (ex. webservers15, webservers1a, webservers123 etc)
# instance_filters = tag:Name=webservers1*
# A boto configuration profile may be used to separate out credentials
# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
# boto_profile = some-boto-profile-name
This diff is collapsed.
# Ansible JiffyBox external inventory script settings
#
[jiffybox]
cache_max_age = 300
cache_path = ~/.ansible/tmp
expand_csv_tags = True
replace_dash_in_groups = False
#!/usr/bin/env python
'''
JiffyBox external inventory script
==================================
'''
import sys
import os
import argparse
import re
import requests
from time import time
import six
from six.moves import configparser
from ansible.errors import AnsibleError as ae
try:
import json
except ImportError:
import simplejson as json
class JiffyBoxInventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.set_host_groups:
data_to_print = self.set_host_groups()
elif self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on JiffyBox')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to JiffyBox (default: False - use cache files)')
parser.add_argument('--set-host-groups', action='store',
help='Set the inventory groups for the host(s)')
self.args = parser.parse_args()
def read_settings(self):
''' Reads the settings from the jiffybox.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
jiffybox_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'jiffybox.ini')
jiffybox_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('JIFFYBOX_INI_PATH', jiffybox_default_ini_path)))
config.read(jiffybox_ini_path)
if config.has_option('jiffybox', 'expand_csv_tags'):
self.expand_csv_tags = config.getboolean('jiffybox', 'expand_csv_tags')
else:
self.expand_csv_tags = False
# Replace dash or not in group names
if config.has_option('jiffybox', 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean('jiffybox', 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Return all JiffyBox instances?
if config.has_option('jiffybox', 'all_instances'):
self.all_instances = config.getboolean('jiffybox', 'all_instances')
else:
self.all_instances = False
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
jiffybox_valid_instance_states = [
'READY',
'CREATING',
'UPDATING',
'CHANGING PLAN',
'STATUS_READY',
'STATUS_CREATING',
'STATUS_UPDATING'
]
self.jiffybox_instance_states = []
if self.all_instances:
self.jiffybox_instance_states = jiffybox_valid_instance_states
elif config.has_option('jiffybox', 'instance_states'):
for instance_state in config.get('jiffybox', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in jiffybox_valid_instance_states:
continue
self.jiffybox_instance_states.append(instance_state)
else:
self.jiffybox_instance_states = ['READY']
# Cache related
cache_dir = os.path.expanduser(config.get('jiffybox', 'cache_path'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-jiffybox.cache"
self.cache_path_index = cache_dir + "/ansible-jiffybox.index"
self.cache_max_age = config.getint('jiffybox', 'cache_max_age')
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('jiffybox', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('jiffybox', 'pattern_exclude')
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
self.get_instances()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_instances(self):
''' Makes a JiffyBox API call to the list of instances '''
try:
conn = self.connect()
instances = conn.get_all_instances()
for instance in instances:
self.add_instance(instances[instance])
except StandardError as e:
self.fail_with_error(e.message, 'getting JiffyBox instances')
def connect(self):
connect_args = {}
conn = JiffyBoxConnect()
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
self.fail_with_error("Something went wrong during connection")
return conn
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def get_instance(self, region, instance_id):
conn = self.connect()
instances = conn.get_all_instances()
for instance in instances:
if int(instance) == instance_id:
return instances[instance]
raise ae('Given host does not exist')
def add_instance(self, instance):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only return instances with desired instance states
if 'status' not in instance or instance['status'] not in self.jiffybox_instance_states:
return
# Select the best destination address
dest = None
if 'name' in instance:
dest = instance['name']
elif 'ips' in instance and 'public' in instance['ips']:
dest = instance['ips']['public'][0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(dest):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(dest):
return
# Add to index
self.index[dest] = ['all', instance['id']]
# Inventory: Group by tag keys
if 'metadata' in instance and 'ansibleGroups' in instance['metadata']:
v = instance['metadata']['ansibleGroups']
if self.expand_csv_tags and v and ',' in v:
values = map(lambda x: x.strip(), v.split(','))
else:
values = [v]
for v in values:
self.push(self.inventory, self.to_safe(v), dest)
# Global Tag: tag all JiffyBox instances
self.push(self.inventory, 'jiffybox', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in instance:
value = instance[key]
key = self.to_safe('jiffybox_' + key)
# Handle complex types
if type(value) in [int, bool]:
instance_vars[key] = value
elif key == 'jiffybox_ips':
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'jiffybox_metadata':
for k, v in value.items():
if self.expand_csv_tags and ',' in v:
v = map(lambda x: x.strip(), v.split(','))
key = self.to_safe('jiffybox_metadata_' + k)
instance_vars[key] = v
else:
pass
return instance_vars
def get_host(self, host):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
return self.get_instance(region, instance_id)
def set_host_groups(self):
''' Add host or hosts to a group '''
instance = self.get_host(self.args.host)
connect = self.connect()
connect.set_metadata(instance, 'ansibleGroups', self.args.set_host_groups)
return 'ok'
def get_host_info(self):
''' Get variables about a specific host '''
instance = self.get_host(self.args.host)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
class JiffyBoxConnect(object):
configPath = '/etc/jiffybox.cfg'
def __init__(self):
self._readConfig()
self.url = 'https://api.jiffybox.de/' + self.api_token + '/v1.0/'
def get_all_instances(self):
return self._request('jiffyBoxes')
def set_metadata(self, instance, key, value):
self._request('jiffyBoxes/' + str(instance['id']), {'metadata[' + key + ']': value})
def _readConfig(self):
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
config.read(self.configPath)
# Find the best section
profile = os.environ.get('JIFFYBOX_PROFILE', None)
if profile:
section = 'profile ' + profile
else:
section = 'Credentials'
if config.has_option(section, 'api_token'):
self.api_token = config.get(section, 'api_token')
else:
raise ae('Can not find credentials')
def _request(self, path, data = None, method = 'GET'):
encoder = json.JSONEncoder()
postData = {}
if data:
method = 'POST'
for key in data:
item = data.get(key)
if type(item) is list or type(item) is dict:
if len(item) > 0:
item = encoder.encode(item)
if type(item) is int or type(item) is unicode or type(item) is bool:
item = str(item)
if item and type(item) is str and len(item) > 0:
postData.__setitem__(key, item)
request_result = {}
try:
if method == 'GET':
request_result = requests.get(self.url + path)
elif method == 'POST':
request_result = requests.put(self.url + path, data = postData)
elif method == 'DELETE':
request_result = requests.delete(self.url + path)
except ae, e:
raise ae('No result from JiffyBox API')
decoder = json.JSONDecoder()
content = decoder.decode(request_result.content)
if not content['result']:
msg = content['messages']
raise ae('%s' % msg)
return content['result']
# Run the script
JiffyBoxInventory()
##
# Ansible playbook for managing an ec2 inventory
---
# file: ec2.yml
- name: "EC2: Launch a new host"
hosts: "localhost"
connection: local
gather_facts: false
sudo: no
tasks:
- name: "Create the new instance"
ec2:
instance_tags: '{"Name":"Ansible-Host-{{ host }}","AnsibleHost":"{{ host }}","AnsibleGroups":"{{ initgroups }}"}'
assign_public_ip: yes
group_id: "{{ ec2_group_id }}"
key_name: "{{ ec2_key_name }}"
image: "{{ ec2_ami_id }}"
instance_type: "{{ ec2_instance_type }}"
vpc_subnet_id: "{{ ec2_subnet_id }}"
region: "{{ ec2_region }}"
state: present
wait: yes
register: ec2
- name: "Waiting for the new instance(s) to get up and running"
ec2:
instance_ids: "{{ ec2.instance_ids }}"
instance_type: "{{ ec2_instance_type }}"
region: "{{ ec2_region }}"
state: running
wait: yes
- name: "Add new instance(s) to the inventory"
add_host:
hostname="{{ host }}"
static_ipv4="{{ item.public_ip }}"
groups="{{ initgroups }}"
with_items: ec2.instances
- name: "Waiting for SSH service becoming available"
wait_for:
host="{{ item.public_ip }}"
port=22
delay=10
timeout=120
state=present
with_items: ec2.instances
##
# Ansible playbook to update local hosts file
---
# file: hosts.yml
- name: "Update local hosts file"
hosts: "all"
connection: local
gather_facts: false
sudo: yes
tasks:
- name: "Build hosts file"
lineinfile:
dest=/etc/hosts
regexp='.*{{ inventory_hostname }}$'
line="{{ static_ipv4|default('') }} {{ inventory_hostname }}"
state=present
#!/usr/bin/env python
'''
Script to start the inithost playbook
=====================================
'''
import os
import argparse
from subprocess import call
parser = argparse.ArgumentParser(description='Launch the Ansible playbook inithost')
parser.add_argument('host',
help='Name of the host to be created')
cloudGroup = parser.add_argument_group('Cloud', 'When installing through a cloud proivider, use these options')
cloudGroup.add_argument('--cloud', action='store', choices=['ec2', 'jiffybox'],
help='The cloud provider')
cloudGroup.add_argument('--groups', action='store',
help='The inventory groups for the host as comma separated list')
nonCloudGroup = parser.add_argument_group('Non-Cloud', 'When installing directly, use these options')
nonCloudGroup.add_argument('--ip', action='store',
help='The ip address of the host, only required if not installed in the cloud')
parser.add_argument('--user', action='store', default=os.environ['USER'],
help='The username of the first created admin user')
parser.add_argument('--root', action='store', default='root',
help='The username to use initially')
parser.add_argument('--key', action='store',
help='File name with the private key to be used initially')
args, extras = parser.parse_known_args()
path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep
extraVars = ['host=' + args.host, 'distribute_keys=true', 'inituser=' + args.root, 'firstuser=' + args.user]
if args.cloud:
extraVars.append('cloud=' + args.cloud)
if args.groups:
extraVars.append('initgroups=' + args.groups)
if args.ip:
extraVars.append('inithostip=' + args.ip)
cmd = [path + 'ansible-playbook.sh', 'inithost', '--extra-vars=' + ' '.join(extraVars)]
if args.key:
cmd.append('--private-key=' + args.key)
else:
cmd.append('--ask-pass')
for extra in extras:
cmd.append(extra)
call(cmd)
#!/bin/bash
cd $( cd $(dirname $(realpath $0)) ; pwd )
if [ "$2" == "" ]
then
echo "Usage"
echo "inithost.sh HOST IP [USER [KEYFILE [GROUPS]]]"
exit 101;
fi
CLOUD=cloud
HOST=$1
IP=inithostip=$2
ROOT=root
KEYFILE=--ask-pass
INITGROUPS=inventory
shift
shift
if [ "$1" != "" ]
then
ROOT=$1
shift
fi
if [ "$1" != "" ]
then
KEYFILE=--private-key=$1
shift
fi
if [ "$1" != "" ]
then
INITGROUPS=$1
shift
fi
if [ "$IP" == "inithostip=ec2" ]
then
CLOUD=ec2
IP=
fi
./ansible-playbook.sh inithost --extra-vars="cloud=$CLOUD host=$HOST inituser=$ROOT firstuser=$USER initgroups=$INITGROUPS $IP distribute_keys=true" $KEYFILE "$@"
......@@ -4,8 +4,13 @@
---
# file: inithost.yml
# Check if we need to launch an instance there first
- include: "{{ cloud }}.yml"
- name: "Prepare cloud"
hosts: "localhost"
connection: local
gather_facts: false
sudo: no
roles:
- { role: cloud, mode: inithost }
- name: "Prepare 1"
hosts: "{{ host }}"
......
../jiffybox/action_plugins/jiffybox.py
\ No newline at end of file
../serverdensity/action_plugins/serverdensity.py
\ No newline at end of file
#!/usr/bin/env python
'''
Script to start the removehost playbook
=======================================
'''
import os
import argparse
from subprocess import call
parser = argparse.ArgumentParser(description='Launch the Ansible playbook removehost')
parser.add_argument('host',
help='Name of the host to be removed')
parser.add_argument('--cloud', action='store', choices=['ec2', 'jiffybox'],
help='The cloud provider')
args, extras = parser.parse_known_args()
path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep
extraVars = ['host=' + args.host]
if args.cloud:
extraVars.append('cloud=' + args.cloud)
cmd = [path + 'ansible-playbook.sh', 'removehost', '--extra-vars=' + ' '.join(extraVars)]
for extra in extras:
cmd.append(extra)
call(cmd)
##
# Ansible playbook for removing a host from the cloud
---
# file: removehost.yml
- name: "Prepare cloud"
hosts: "{{ host }}"
connection: local
gather_facts: false
sudo: no
vars_prompt:
- name: "are_you_sure"
prompt: "Really?"
roles:
- { role: cloud, mode: removehost }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment