Correct handling of map in template

This commit is contained in:
Dale McDiarmid 2015-11-25 15:28:09 +00:00
parent d34d5e6103
commit d25fc792b6
19 changed files with 116 additions and 447 deletions

View file

@ -18,7 +18,6 @@ platforms:
- apt-get update && apt-get install -y software-properties-common && add-apt-repository -y ppa:ansible/ansible - apt-get update && apt-get install -y software-properties-common && add-apt-repository -y ppa:ansible/ansible
- apt-get update && apt-get -y -q install ansible python-apt python-pycurl - apt-get update && apt-get -y -q install ansible python-apt python-pycurl
use_sudo: false use_sudo: false
#run_command: "-e ENV ANSIBLE_HASH_BEHAVIOUR=merge"
- name: debian-7 - name: debian-7
driver_config: driver_config:
image: electrical/debian:7.3 image: electrical/debian:7.3
@ -51,8 +50,6 @@ platforms:
- sed -ri 's/^#?PasswordAuthentication .*/PasswordAuthentication yes/' /etc/ssh/sshd_config - sed -ri 's/^#?PasswordAuthentication .*/PasswordAuthentication yes/' /etc/ssh/sshd_config
- sed -ri 's/^#?UsePAM .*/UsePAM no/' /etc/ssh/sshd_config - sed -ri 's/^#?UsePAM .*/UsePAM no/' /etc/ssh/sshd_config
- yum -y install initscripts - yum -y install initscripts
# - BUSSER_ROOT="/tmp/verifier" GEM_HOME="/tmp/verifier/gems" GEM_PATH="/tmp/verifier/gems" GEM_CACHE="/tmp/verifier/gems/cache" gem install --no-rdoc --no-ri rake
# - chown kitchen:kitchen /tmp/verifier -R
- yum clean all - yum clean all
run_command: "/usr/sbin/init" run_command: "/usr/sbin/init"
privileged: true privileged: true

View file

@ -1,2 +1 @@
[defaults] [defaults]
hash_behaviour = merge

View file

@ -10,8 +10,8 @@ es_templates: false
es_user: elasticsearch es_user: elasticsearch
es_group: elasticsearch es_group: elasticsearch
es_config: { #Need to provide default directories
"cluster_name": elasticsearch, es_pid_dir: "/var/run/elasticsearch"
"multicast_enabled": false, es_data_dir: "/var/lib/elasticsearch"
"node_name_prefix": "" es_log_dir: "/var/log/elasticsearch"
} es_work_dir: "/tmp/elasticsearch"

View file

@ -1,10 +1,16 @@
# Check for mandatory parameters # Check for mandatory parameters
- fail: msg="Parameter 'es_http_port' must be defined when multicast is disabled" - fail: msg="es_instance_name must be specified"
when: es_config['multicast_enabled'] == false and es_config['http_port'] is not defined when: es_instance_name is not defined
- fail: msg="Parameter 'es_transport_tcp_port' must be defined when multicast is disabled" - fail: msg="Parameter 'http.port' must be defined when multicast is disabled"
when: es_config['multicast_enabled'] == false and es_config['transport_tcp_port'] is not defined when: es_config['discovery.zen.ping.multicast.enabled'] == false and es_config['http.port'] is not defined
- fail: msg="Parameter 'es_unicast_hosts' must be defined when multicast is disabled" - fail: msg="Parameter 'transport.tcp.port' must be defined when multicast is disabled"
when: es_config['multicast_enabled'] == false and es_config['unicast_hosts'] is not defined when: es_config['discovery.zen.ping.multicast.enabled'] == false and es_config['transport.tcp.port'] is not defined
- fail: msg="Parameter 'discovery.zen.ping.unicast.hosts' must be defined when multicast is disabled"
when: es_config['discovery.zen.ping.multicast.enabled'] == false and es_config['discovery.zen.ping.unicast.hosts'] is not defined
# If multicast is false OR if > 2.0 AND not defined as true (or explictly false)

View file

@ -2,6 +2,15 @@
# Configure Elasticsearch Node # Configure Elasticsearch Node
- set_fact: pid_dir={{es_pid_dir}}/{{inventory_hostname}}-{{ es_instance_name }}
- set_fact: data_dir={{ es_data_dir }}/{{inventory_hostname}}-{{ es_instance_name }}
- set_fact: log_dir={{ es_log_dir }}/{{inventory_hostname}}-{{ es_instance_name }}
- set_fact: work_dir={{ es_work_dir }}/{{inventory_hostname}}-{{ es_instance_name }}
# Create an instance specific default file # Create an instance specific default file
- name: Copy Default File for Instance - name: Copy Default File for Instance
command: creates={{instance_default_file}} cp "{{default_file}}" "{{instance_default_file}}" command: creates={{instance_default_file}} cp "{{default_file}}" "{{instance_default_file}}"
@ -44,46 +53,37 @@
when: es_max_open_files is defined when: es_max_open_files is defined
register: elasticsearch_configure register: elasticsearch_configure
#For directories we also use the {{ es_config['node_name_prefix'] }}{{inventory_hostname}} - this helps if we have a shared SAN. {{es_config['node_name_prefix']}}{{default_file | basename} could potentially be used -
#init script effectively means this is the default. #For directories we also use the {{inventory_hostname}}-{{ es_instance_name }} - this helps if we have a shared SAN.
#Create PID directory #Create PID directory
- name: Create PID Directory - name: Create PID Directory
file: path={{pid_dir}}/{{ es_config['node_name_prefix'] }}{{inventory_hostname}} state=directory owner={{ es_user }} group={{ es_group }} file: path={{ pid_dir }} state=directory owner={{ es_user }} group={{ es_group }}
- name: Configure PID directory - name: Configure PID directory
lineinfile: dest={{instance_default_file}} regexp="^PID_DIR" insertafter="^#PID_DIR" line="PID_DIR={{pid_dir}}/{{ es_config['node_name_prefix'] }}{{inventory_hostname}}" lineinfile: dest={{instance_default_file}} regexp="^PID_DIR" insertafter="^#PID_DIR" line="PID_DIR={{ pid_dir }}"
register: elasticsearch_configure register: elasticsearch_configure
- set_fact: es_data_dir={{default_data_dir}}
when: es_data_dir is undefined
#include the host name as potentially shared SAN #include the host name as potentially shared SAN
- name: Create data dir - name: Create data dir
file: state=directory path={{ es_data_dir }}/{{ es_config['node_name_prefix'] }}{{inventory_hostname}} owner={{ es_user }} group={{ es_group }} file: state=directory path={{ data_dir }} owner={{ es_user }} group={{ es_group }}
- name: Configure data store - name: Configure data store
lineinfile: dest={{instance_default_file}} regexp="^DATA_DIR" insertafter="^#DATA_DIR" line="DATA_DIR={{ es_data_dir }}/{{ es_config['node_name_prefix'] }}{{inventory_hostname}}" lineinfile: dest={{instance_default_file}} regexp="^DATA_DIR" insertafter="^#DATA_DIR" line="DATA_DIR={{ data_dir }}"
register: elasticsearch_configure register: elasticsearch_configure
- set_fact: es_work_dir={{default_work_dir}}
when: es_work_dir is undefined
- name: Create work dir - name: Create work dir
file: state=directory path={{ es_work_dir }}/{{ es_config['node_name_prefix'] }}{{inventory_hostname}} owner={{ es_user }} group={{ es_group }} file: state=directory path={{ work_dir }} owner={{ es_user }} group={{ es_group }}
- name: Configure work directory - name: Configure work directory
lineinfile: dest={{instance_default_file}} regexp="^WORK_DIR" insertafter="^#WORK_DIR" line="WORK_DIR={{ es_work_dir }}/{{ es_config['node_name_prefix'] }}{{inventory_hostname}}" lineinfile: dest={{instance_default_file}} regexp="^WORK_DIR" insertafter="^#WORK_DIR" line="WORK_DIR={{ work_dir }}"
register: elasticsearch_configure register: elasticsearch_configure
- set_fact: es_log_dir={{default_log_dir}}
when: es_log_dir is undefined
- name: Create log dir - name: Create log dir
file: state=directory path={{ es_log_dir }}/{{ es_config['node_name_prefix'] }}{{inventory_hostname}} owner={{ es_user }} group={{ es_group }} file: state=directory path={{ log_dir }} owner={{ es_user }} group={{ es_group }}
- name: Configure log directory - name: Configure log directory
lineinfile: dest={{instance_default_file}} regexp="^LOG_DIR" insertafter="^#LOG_DIR" line="LOG_DIR={{ es_log_dir }}/{{ es_config['node_name_prefix'] }}{{inventory_hostname}}" lineinfile: dest={{instance_default_file}} regexp="^LOG_DIR" insertafter="^#LOG_DIR" line="LOG_DIR={{ log_dir }}"
register: elasticsearch_configure register: elasticsearch_configure
#required so that the ES_HOME does not change between instances #required so that the ES_HOME does not change between instances
@ -104,7 +104,7 @@
#Apply changes to init script - NAME can be changed in Debian start script #Apply changes to init script - NAME can be changed in Debian start script
- name: Name Node in Init Script - name: Name Node in Init Script
lineinfile: dest={{instance_init_script}} regexp="^NAME" line="NAME={{es_config['node_name_prefix']}}{{default_file | basename}}" lineinfile: dest={{instance_init_script}} regexp="^NAME" line="NAME={{es_instance_name}}_{{default_file | basename}}"
register: elasticsearch_configure register: elasticsearch_configure
when: ansible_os_family == 'Debian' when: ansible_os_family == 'Debian'

View file

@ -1,4 +1,5 @@
--- ---
#TODO: How to handle in multi node
- name: Copy templates to elasticsearch - name: Copy templates to elasticsearch
copy: src=templates dest=/etc/elasticsearch/ copy: src=templates dest=/etc/elasticsearch/

View file

@ -1,10 +1,10 @@
--- ---
- set_fact: instance_default_file={{default_file | dirname}}/{{es_instance_name}}_{{default_file | basename}}
- set_fact: instance_init_script={{init_script | dirname }}/{{es_instance_name}}_{{init_script | basename}}
- set_fact: instance_config_directory={{ es_conf_dir }}/{{es_instance_name}}
- debug: msg="Node configuration {{ es_config }} " - debug: msg="Node configuration {{ es_config }} "
- set_fact: instance_default_file={{default_file | dirname}}/{{es_config["node_name_prefix"]}}{{default_file | basename}}
- set_fact: instance_init_script={{init_script | dirname }}/{{es_config['node_name_prefix']}}{{init_script | basename}}
- set_fact: instance_config_directory={{ es_conf_dir }}/{{es_config['node_name_prefix']}}elasticsearch
# Install OS specific elasticsearch - this can be abbreviated in version 2.0.0 # Install OS specific elasticsearch - this can be abbreviated in version 2.0.0
- name: Include specific Elasticsearch - name: Include specific Elasticsearch
@ -19,6 +19,9 @@
- name: Elasticsearch configuration - name: Elasticsearch configuration
include: elasticsearch-config.yml include: elasticsearch-config.yml
#We remove the node.name key as it may be set by another node on the same server
# Make sure the service is started, and restart if necessary # Make sure the service is started, and restart if necessary
- name: Start elasticsearch service - name: Start elasticsearch service
service: name={{instance_init_script | basename}} state=started service: name={{instance_init_script | basename}} state=started

View file

@ -7,8 +7,8 @@
- include: elasticsearch.yml - include: elasticsearch.yml
- include: elasticsearch-plugins.yml - include: elasticsearch-plugins.yml
when: es_plugins is defined when: es_plugins is defined
- include: elasticsearch-scripts.yml #- include: elasticsearch-scripts.yml
when: es_scripts # when: es_scripts
- include: elasticsearch-templates.yml #- include: elasticsearch-templates.yml
when: es_templates # when: es_templates

View file

@ -1,387 +1,20 @@
##################### Elasticsearch Configuration Example ##################### {{ es_config | to_nice_yaml }}
# This file contains an overview of various configuration settings, {% if es_config['cluster.name'] is not defined %}
# targeted at operations staff. Application developers should cluster.name: elasticsearch
# consult the guide at <http://elasticsearch.org/guide>.
#
# The installation procedure is covered at
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
#
# Elasticsearch comes with reasonable defaults for most settings,
# so you can try it out without bothering with configuration.
#
# Most of the time, these defaults are just fine for running a production
# cluster. If you're fine-tuning your cluster, or wondering about the
# effect of certain configuration option, please _do ask_ on the
# mailing list or IRC channel [http://elasticsearch.org/community].
# Any element in the configuration can be replaced with environment variables
# by placing them in ${...} notation. For example:
#
#node.rack: ${RACK_ENV_VAR}
# For information on supported formats and syntax for the config file, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
################################### Cluster ###################################
# Cluster name identifies your cluster for auto-discovery. If you're running
# multiple clusters on the same network, make sure you're using unique names.
#
cluster.name: {{ es_config['cluster_name'] }}
#################################### Node #####################################
# Node names are generated dynamically on startup, so you're relieved
# from configuring them manually. You can tie this node to a specific name:
#
node.name: {{ es_config['node_name_prefix'] }}{{inventory_hostname}}
# Every node can be configured to allow or deny being eligible as the master,
# and to allow or deny to store the data.
#
# Allow this node to be eligible as a master node (enabled by default):
#
{% if es_config['master_node'] is defined %}
node.master: {{ es_config['master_node'] | lower}}
{% endif %} {% endif %}
# {% if es_config['node.name'] is not defined %}
# Allow this node to store data (enabled by default): node.name: {{inventory_hostname}}-{{es_instance_name}}
#
{% if es_config['data_node'] is defined %}
node.data: {{ es_config['data_node'] | lower}}
{% endif %} {% endif %}
# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
# Node Info API [http://localhost:9200/_nodes] or GUI tools
# such as <http://www.elasticsearch.org/overview/marvel/>,
# <http://github.com/karmi/elasticsearch-paramedic>,
# <http://github.com/lukas-vlcek/bigdesk> and
# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
# A node can have generic attributes associated with it, which can later be used
# for customized shard allocation filtering, or allocation awareness. An attribute
# is a simple key value pair, similar to node.key: value, here is an example:
#
{% if es_config['node_rack'] is defined %}
node.rack: {{ es_config['node_rack'] }}
{% endif %}
# By default, multiple nodes are allowed to start from the same installation location
# to disable it, set the following:
#node.max_local_storage_nodes: 1
#################################### Index ####################################
# You can set a number of options (such as shard/replica options, mapping
# or analyzer definitions, translog settings, ...) for indices globally,
# in this file.
#
# Note, that it makes more sense to configure index settings specifically for
# a certain index, either when creating it or by using the index templates API.
#
# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
# for more information.
# Set the number of shards (splits) of an index (5 by default) if provided:
{% if es_config['index_number_of_shards'] is defined %}
index.number_of_shards: {{ es_config['index_number_of_shards'] }}
{% endif %}
# Set the number of replicas (additional copies) of an index (1 by default) if provided:
{% if es_config['index_number_of_replicas'] is defined %}
index.number_of_replicas: {{ es_config['index_number_of_replicas'] }}
{% endif %}
# These settings directly affect the performance of index and search operations
# in your cluster. Assuming you have enough machines to hold shards and
# replicas, the rule of thumb is:
#
# 1. Having more *shards* enhances the _indexing_ performance and allows to
# _distribute_ a big index across machines.
# 2. Having more *replicas* enhances the _search_ performance and improves the
# cluster _availability_.
#
# The "number_of_shards" is a one-time setting for an index.
#
# The "number_of_replicas" can be increased or decreased anytime,
# by using the Index Update Settings API.
#
# Elasticsearch takes care about load balancing, relocating, gathering the
# results from nodes, etc. Experiment with different settings to fine-tune
# your setup.
# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
# the index status.
#################################### Paths #################################### #################################### Paths ####################################
# Path to directory containing configuration (this file and logging.yml): # Path to directory containing configuration (this file and logging.yml):
path.conf: {{ instance_config_directory }} path.conf: {{ instance_config_directory }}
path.data: {{ data_dir }}
# Path to directory where to store index data allocated for this node. path.work: {{ work_dir }}
#
# Can optionally include more than one location, causing data to be striped across
# the locations (a la RAID 0) on a file level, favouring locations with most free
# space on creation. For example:
#
{% if es_data_dir is defined %}
path.data: {{ es_data_dir }}
{% endif %}
# Path to temporary files: path.logs: {{ log_dir }}
{% if es_work_dir is defined %}
path.work: {{ es_work_dir }}
{% endif %}
# Path to log files:
{% if es_log_dir is defined %}
path.logs: {{ es_log_dir }}
{% endif %}
# Path to where plugins are installed:
#
#path.plugins: /path/to/plugins
#################################### Plugin ###################################
# If a plugin listed here is not installed for current node, the node will not start.
#
#plugin.mandatory: mapper-attachments,lang-groovy
################################### Memory ####################################
# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
# it _never_ swaps.
#
# Set this property to true to lock the memory:
#
{% if es_config['m_lock_enabled'] is defined %}
bootstrap.mlockall: {{ es_config['m_lock_enabled'] | lower}}
{% endif %}
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
# to the same value, and that the machine has enough memory to allocate
# for Elasticsearch, leaving enough memory for the operating system itself.
#
# You should also make sure that the Elasticsearch process is allowed to lock
# the memory, eg. by using `ulimit -l unlimited`.
############################## Network And HTTP ###############################
# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
# communication. (the range means that if the port is busy, it will automatically
# try the next port).
# Set the bind address specifically (IPv4 or IPv6):
#
#network.bind_host: 192.168.0.1
# Set the address other nodes will use to communicate with this node. If not
# set, it is automatically derived. It must point to an actual IP address.
#
#network.publish_host: 192.168.0.1
# Set both 'bind_host' and 'publish_host':
#
{% if es_config['network_host'] is defined %}
network.host: {{ es_config['network_host'] }}
{% endif %}
# Set a custom port for the node to node communication (9300 by default):
#
{% if es_config['transport_tcp_port'] is defined %}
transport.tcp.port: {{ es_config['transport_tcp_port'] }}
{% endif %}
# Enable compression for all communication between nodes (disabled by default):
#
#transport.tcp.compress: true
# Set a custom port to listen for HTTP traffic (9200 by default):
#
{% if es_config['http_port'] is defined %}
http.port: {{ es_config['http_port'] }}
{% endif %}
# Set a custom allowed content length:
#
#http.max_content_length: 100mb
# Disable HTTP completely:
#
#http.enabled: false
################################### Gateway ###################################
# The gateway allows for persisting the cluster state between full cluster
# restarts. Every change to the state (such as adding an index) will be stored
# in the gateway, and when the cluster starts up for the first time,
# it will read its state from the gateway.
# There are several types of gateway implementations. For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
# The default gateway type is the "local" gateway (recommended):
#
#gateway.type: local
# Settings below control how and when to start the initial recovery process on
# a full cluster restart (to reuse as much local data as possible when using shared
# gateway).
# Allow recovery process after N nodes in a cluster are up:
#
#gateway.recover_after_nodes: 1
# Set the timeout to initiate the recovery process, once the N nodes
# from previous setting are up (accepts time value):
#
#gateway.recover_after_time: 5m
# Set how many nodes are expected in this cluster. Once these N nodes
# are up (and recover_after_nodes is met), begin recovery process immediately
# (without waiting for recover_after_time to expire):
#
#gateway.expected_nodes: 2
############################# Recovery Throttling #############################
# These settings allow to control the process of shards allocation between
# nodes during initial recovery, replica allocation, rebalancing,
# or when adding and removing nodes.
# Set the number of concurrent recoveries happening on a node:
#
# 1. During the initial recovery
#
#cluster.routing.allocation.node_initial_primaries_recoveries: 4
#
# 2. During adding/removing nodes, rebalancing, etc
#
#cluster.routing.allocation.node_concurrent_recoveries: 2
# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
#
#indices.recovery.max_bytes_per_sec: 20mb
# Set to limit the number of open concurrent streams when
# recovering a shard from a peer:
#
#indices.recovery.concurrent_streams: 5
################################## Discovery ##################################
# Discovery infrastructure ensures nodes can be found within a cluster
# and master node is elected. Multicast discovery is the default.
# Set to ensure a node sees N other master eligible nodes to be considered
# operational within the cluster. This should be set to a quorum/majority of
# the master-eligible nodes in the cluster.
#
#discovery.zen.minimum_master_nodes: 1
# Set the time to wait for ping responses from other nodes when discovering.
# Set this option to a higher value on a slow or congested network
# to minimize discovery failures:
#
#discovery.zen.ping.timeout: 3s
# For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
# Unicast discovery allows to explicitly control which nodes will be used
# to discover the cluster. It can be used when multicast is not present,
# or to restrict the cluster communication-wise.
#
# 1. Disable multicast discovery (enabled by default):
#
discovery.zen.ping.multicast.enabled: {{ es_config['multicast_enabled'] | lower }}
#
# 2. Configure an initial list of master nodes in the cluster
# to perform discovery when new nodes (master or data) are started:
#
#We put all the current eligible masters in here. If not specified, we assumes its a master
{% if es_config['multicast_enabled'] is defined and not es_config['multicast_enabled'] %}
discovery.zen.ping.unicast.hosts: {{ es_config['unicast_hosts'] }}
{% endif %}
# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
#
# You have to install the cloud-aws plugin for enabling the EC2 discovery.
#
# For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
#
# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
# for a step-by-step tutorial.
# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
#
# You have to install the cloud-gce plugin for enabling the GCE discovery.
#
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
# Azure discovery allows to use Azure API in order to perform discovery.
#
# You have to install the cloud-azure plugin for enabling the Azure discovery.
#
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
################################## Slow Log ##################################
# Shard level query and fetch threshold logging.
index.search.slowlog.threshold.query.warn: 2s
index.search.slowlog.threshold.query.info: 2s
index.search.slowlog.threshold.query.debug: 1s
index.search.slowlog.threshold.query.trace: 500ms
#index.search.slowlog.threshold.fetch.warn: 1s
#index.search.slowlog.threshold.fetch.info: 800ms
#index.search.slowlog.threshold.fetch.debug: 500ms
#index.search.slowlog.threshold.fetch.trace: 200ms
#index.indexing.slowlog.threshold.index.warn: 10s
#index.indexing.slowlog.threshold.index.info: 5s
#index.indexing.slowlog.threshold.index.debug: 2s
#index.indexing.slowlog.threshold.index.trace: 500ms
################################## GC Logging ################################
#monitor.jvm.gc.young.warn: 1000ms
#monitor.jvm.gc.young.info: 700ms
#monitor.jvm.gc.young.debug: 400ms
#monitor.jvm.gc.old.warn: 10s
#monitor.jvm.gc.old.info: 5s
#monitor.jvm.gc.old.debug: 2s
################################## Security ################################
# Uncomment if you want to enable JSONP as a valid return transport on the
# http server. With this enabled, it may pose a security risk, so disabling
# it unless you need it is recommended (it is disabled by default).
#
#http.jsonp.enable: true

View file

@ -4,9 +4,9 @@
hosts: localhost hosts: localhost
roles: roles:
#expand to all available parameters #expand to all available parameters
- { role: elasticsearch, es_config: { node_name_prefix: "node1_", cluster_name: "custom-cluster", unicast_hosts: "localhost:9301", http_port: "9201", transport_tcp_port: "9301", data_node: false, master_node: true, m_lock_enabled: true, multicast_enabled: false } } - { role: elasticsearch, es_instance_name: "node1", es_config: {node.name: "node1", cluster.name: "custom-cluster", discovery.zen.ping.unicast.hosts: "localhost:9301", http.port: 9201, transport.tcp.port: 9301, node.data: false, node.master: true, bootstrap.mlockall: true, discovery.zen.ping.multicast.enabled: false } }
vars: vars:
es_scripts: false es_scripts: false
es_templates: false es_templates: false
es_version_lock: false es_version_lock: false
es_config: { "m_lock_enabled": true } es_config: { "discovery.zen.ping.multicast.enabled": true }

View file

@ -14,20 +14,25 @@ context "basic tests" do
it { should be_installed } it { should be_installed }
end end
describe file('/etc/elasticsearch/node1_elasticsearch/elasticsearch.yml') do describe file('/etc/elasticsearch/node1/elasticsearch.yml') do
it { should be_file } it { should be_file }
end end
#test configuration parameters have been set - test all appropriately set in config file #test configuration parameters have been set - test all appropriately set in config file
describe file('/etc/elasticsearch/node1_elasticsearch/elasticsearch.yml') do describe file('/etc/elasticsearch/node1/elasticsearch.yml') do
it { should contain 'http.port: 9201' } it { should contain 'http.port: 9201' }
it { should contain 'transport.tcp.port: 9301' } it { should contain 'transport.tcp.port: 9301' }
it { should contain 'node.data: false' } it { should contain 'node.data: false' }
it { should contain 'node.master: true' } it { should contain 'node.master: true' }
it { should contain 'discovery.zen.ping.multicast.enabled: false' } it { should contain 'discovery.zen.ping.multicast.enabled: false' }
it { should contain 'cluster.name: custom-cluster' } it { should contain 'cluster.name: custom-cluster' }
it { should contain 'node.name: node1_localhost' } it { should contain 'node.name: node1' }
it { should contain 'bootstrap.mlockall: true' }
it { should contain 'discovery.zen.ping.unicast.hosts: localhost:9301' } it { should contain 'discovery.zen.ping.unicast.hosts: localhost:9301' }
it { should contain 'path.conf: /etc/elasticsearch/node1' }
it { should contain 'path.data: /var/lib/elasticsearch/localhost-node1' }
it { should contain 'path.work: /tmp/elasticsearch/localhost-node1' }
it { should contain 'path.logs: /var/log/elasticsearch/localhost-node1' }
end end
#test we started on the correct port was used #test we started on the correct port was used

View file

@ -3,6 +3,6 @@
- name: Elasticsearch Config tests - name: Elasticsearch Config tests
hosts: localhost hosts: localhost
roles: roles:
- { role: elasticsearch, es_config: { "multicast_enabled": false, node_name_prefix: "master_", unicast_hosts: "localhost:9300", http_port: "9200", transport_tcp_port: "9300", data_node: false, master_node: true, m_lock_enabled: true,multicast_enabled: false } } - { role: elasticsearch, es_instance_name: "master", es_config: { "discovery.zen.ping.multicast.enabled": false, discovery.zen.ping.unicast.hosts: "localhost:9300", http.port: 9200, transport.tcp.port: 9300, node.data: false, node.master: true, bootstrap.mlockall: true, discovery.zen.ping.multicast.enabled: false } }
- { role: elasticsearch, es_config: { "multicast_enabled": false, node_name_prefix: "node1_", unicast_hosts: "localhost:9300", http_port: "9201", transport_tcp_port: "9301", data_node: true, master_node: false, m_lock_enabled: true,multicast_enabled: false } } - { role: elasticsearch, es_instance_name: "node1", es_config: { "discovery.zen.ping.multicast.enabled": false, discovery.zen.ping.unicast.hosts: "localhost:9300", http.port: 9201, transport.tcp.port: 9301, node.data: true, node.master: false, discovery.zen.ping.multicast.enabled: false } }
vars: vars:

View file

@ -18,33 +18,43 @@ context "basic tests" do
it { should be_installed } it { should be_installed }
end end
describe file('/etc/elasticsearch/node1_elasticsearch/elasticsearch.yml') do describe file('/etc/elasticsearch/node1/elasticsearch.yml') do
it { should be_file } it { should be_file }
end end
describe file('/etc/elasticsearch/master_elasticsearch/elasticsearch.yml') do describe file('/etc/elasticsearch/master/elasticsearch.yml') do
it { should be_file } it { should be_file }
end end
#test configuration parameters have been set - test all appropriately set in config file #test configuration parameters have been set - test all appropriately set in config file
describe file('/etc/elasticsearch/node1_elasticsearch/elasticsearch.yml') do describe file('/etc/elasticsearch/node1/elasticsearch.yml') do
it { should contain 'http.port: 9201' } it { should contain 'http.port: 9201' }
it { should contain 'transport.tcp.port: 9301' } it { should contain 'transport.tcp.port: 9301' }
it { should contain 'node.data: true' } it { should contain 'node.data: true' }
it { should contain 'node.master: false' } it { should contain 'node.master: false' }
it { should contain 'discovery.zen.ping.multicast.enabled: false' } it { should contain 'discovery.zen.ping.multicast.enabled: false' }
it { should contain 'node.name: node1_localhost' } it { should contain 'node.name: localhost-node1' }
it { should_not contain 'bootstrap.mlockall: true' }
it { should contain 'path.conf: /etc/elasticsearch/node1' }
it { should contain 'path.data: /var/lib/elasticsearch/localhost-node1' }
it { should contain 'path.work: /tmp/elasticsearch/localhost-node1' }
it { should contain 'path.logs: /var/log/elasticsearch/localhost-node1' }
end end
#test configuration parameters have been set for master - test all appropriately set in config file #test configuration parameters have been set for master - test all appropriately set in config file
describe file('/etc/elasticsearch/master_elasticsearch/elasticsearch.yml') do describe file('/etc/elasticsearch/master/elasticsearch.yml') do
it { should contain 'http.port: 9200' } it { should contain 'http.port: 9200' }
it { should contain 'transport.tcp.port: 9300' } it { should contain 'transport.tcp.port: 9300' }
it { should contain 'node.data: false' } it { should contain 'node.data: false' }
it { should contain 'node.master: true' } it { should contain 'node.master: true' }
it { should contain 'discovery.zen.ping.multicast.enabled: false' } it { should contain 'discovery.zen.ping.multicast.enabled: false' }
it { should contain 'node.name: master_localhost' } it { should contain 'node.name: localhost-master' }
it { should contain 'bootstrap.mlockall: true' }
it { should contain 'path.conf: /etc/elasticsearch/master' }
it { should contain 'path.data: /var/lib/elasticsearch/localhost-master' }
it { should contain 'path.work: /tmp/elasticsearch/localhost-master' }
it { should contain 'path.logs: /var/log/elasticsearch/localhost-master' }
end end
describe 'Master listening' do describe 'Master listening' do

View file

@ -2,5 +2,5 @@
- name: Elasticsearch Package tests - name: Elasticsearch Package tests
hosts: localhost hosts: localhost
roles: roles:
- { role: elasticsearch, es_config: { "multicast_enabled": true } } - { role: elasticsearch, es_config: { "discovery.zen.ping.multicast.enabled": true }, es_instance_name: "node1" }
vars: vars:

View file

@ -6,7 +6,7 @@ context "basic tests" do
it { should exist } it { should exist }
end end
describe service('elasticsearch') do describe service('node1_elasticsearch') do
it { should be_running } it { should be_running }
end end
@ -14,9 +14,15 @@ context "basic tests" do
it { should be_installed } it { should be_installed }
end end
describe file('/etc/elasticsearch/elasticsearch/elasticsearch.yml') do describe file('/etc/elasticsearch/node1/elasticsearch.yml') do
it { should be_file } it { should be_file }
end end
describe 'Node listening' do
it 'listening in port 9200' do
expect(port 9200).to be_listening
end
end
end end

View file

@ -2,7 +2,7 @@
- name: wrapper playbook for kitchen testing "elasticsearch" - name: wrapper playbook for kitchen testing "elasticsearch"
hosts: localhost hosts: localhost
roles: roles:
- { role: elasticsearch, es_config: { "multicast_enabled": true } } - { role: elasticsearch, es_config: { "discovery.zen.ping.multicast.enabled": true }, es_instance_name: "node1" }
vars: vars:
es_use_repository: "true" es_use_repository: "true"
es_plugins: es_plugins:

View file

@ -6,7 +6,7 @@ context "basic tests" do
it { should exist } it { should exist }
end end
describe service('elasticsearch') do describe service('node1_elasticsearch') do
it { should be_running } it { should be_running }
end end
@ -14,10 +14,25 @@ context "basic tests" do
it { should be_installed } it { should be_installed }
end end
describe file('/etc/elasticsearch/elasticsearch/elasticsearch.yml') do describe file('/etc/elasticsearch/node1/elasticsearch.yml') do
it { should be_file } it { should be_file }
end end
describe file('/etc/elasticsearch/node1/elasticsearch.yml') do
it { should contain 'node.name: localhost-node1' }
it { should contain 'cluster.name: "elasticsearch"' }
it { should contain 'path.conf: /etc/elasticsearch/node1' }
it { should contain 'path.data: /var/lib/elasticsearch/localhost-node1' }
it { should contain 'path.work: /tmp/elasticsearch/localhost-node1' }
it { should contain 'path.logs: /var/log/elasticsearch/localhost-node1' }
end
describe 'Node listening' do
it 'listening in port 9200' do
expect(port 9200).to be_listening
end
end
describe 'plugin' do describe 'plugin' do
it 'should be reported as existing', :retry => 3, :retry_wait => 10 do it 'should be reported as existing', :retry => 3, :retry_wait => 10 do

View file

@ -2,5 +2,5 @@
java: "java-1.8.0-openjdk.x86_64" java: "java-1.8.0-openjdk.x86_64"
default_file: "/etc/sysconfig/elasticsearch" default_file: "/etc/sysconfig/elasticsearch"
init_script: "/etc/init.d/elasticsearch" init_script: "/etc/init.d/elasticsearch"
es_conf_dir: "/etc/elasticsearch/" es_conf_dir: "/etc/elasticsearch"
es_home: "/usr/share/elasticsearch" es_home: "/usr/share/elasticsearch"

View file

@ -1,8 +1,2 @@
--- ---
es_package_url: "https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch" es_package_url: "https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch"
pid_dir: "/var/run/elasticsearch"
#Needed to provide default directories
default_data_dir: "/var/lib/elasticsearch"
default_log_dir: "/var/log/elasticsearch"
default_work_dir: "/tmp/elasticsearch"