Support for configuration per node and multiple nodes per server
This commit is contained in:
parent
0c6f326bed
commit
d19297adda
30 changed files with 818 additions and 88 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -4,3 +4,4 @@
|
|||
.vendor
|
||||
.bundle
|
||||
Converging
|
||||
TODO
|
||||
|
|
|
|||
10
.kitchen.yml
10
.kitchen.yml
|
|
@ -64,4 +64,14 @@ suites:
|
|||
attributes:
|
||||
provisioner:
|
||||
playbook: test/integration/package.yml
|
||||
- name: config
|
||||
run_list:
|
||||
attributes:
|
||||
provisioner:
|
||||
playbook: test/integration/config.yml
|
||||
- name: multi
|
||||
run_list:
|
||||
attributes:
|
||||
provisioner:
|
||||
playbook: test/integration/multi.yml
|
||||
|
||||
|
|
|
|||
68
README.md
68
README.md
|
|
@ -24,24 +24,32 @@ Then create your playbook yaml adding the role elasticsearch and overriding any
|
|||
---
|
||||
hosts: my_host
|
||||
roles:
|
||||
- elasticsearch
|
||||
- { role: elasticsearch, es_multicast_enabled: true}
|
||||
tasks:
|
||||
- .... your tasks ...
|
||||
```
|
||||
|
||||
or more complex..
|
||||
By default es_multicast_enabled is false. If this is not to true, the user is required to specify the following additional parameters:
|
||||
|
||||
1. es_http_port - the http port for the node
|
||||
2. es_transport_tcp_port - the transport port for the node
|
||||
3. es_unicast_hosts - the unicast discovery list, in the comma separated format "<host>:<port>,<host>:<port>" (typically the clusters dedicated masters)
|
||||
|
||||
|
||||
If set to true, the ports will be auto defined and node discovery will be performed using multicast.
|
||||
|
||||
A more complex example:
|
||||
|
||||
```
|
||||
---
|
||||
hosts: my_host
|
||||
roles:
|
||||
- elasticsearch
|
||||
vars:
|
||||
java_packages:
|
||||
- "oracle-java7-installer"
|
||||
es_major_version: 1.4
|
||||
es_version: 1.4.4
|
||||
hosts: localhost
|
||||
roles:
|
||||
- { role: elasticsearch, es_unicast_hosts: "localhost:9301", es_http_port: "9201", es_transport_tcp_port: "9301", es_data_node: false, es_master_node: true, es_m_lock_enabled: true, es_multicast_enabled: false, es_node_name_prefix: "node1_", es_cluster_name: "custom-cluster" }
|
||||
vars:
|
||||
es_scripts: false
|
||||
es_templates: false
|
||||
es_version_lock: false
|
||||
es_m_lock_enabled: true
|
||||
es_start_service: false
|
||||
es_plugins_reinstall: false
|
||||
es_plugins:
|
||||
|
|
@ -57,11 +65,47 @@ hosts: my_host
|
|||
version: latest
|
||||
- plugin: lmenezes/elasticsearch-kopf
|
||||
version: master
|
||||
tasks:
|
||||
tasks:
|
||||
- .... your tasks ...
|
||||
```
|
||||
|
||||
Make sure your hosts are defined in your ```hosts``` file with the appropriate ```ansible_ssh_host```, ```ansible_ssh_user``` and ```ansible_ssh_private_key_file``` values.
|
||||
The above example illustrates the ability to control the configuration.
|
||||
|
||||
The application of a role results in the installation of a node on a host. Multiple roles equates to multiple nodes for a single host. If specifying multiple roles for a host, and thus multiple nodes, the user must:
|
||||
|
||||
1. Provide a es_node_name_prefix. This is used to ensure seperation of data, log, config and init scripts.
|
||||
2. Ensure those playbooks responsible for installing and starting master eligble roles are specified first. These are required for cluster initalization.
|
||||
|
||||
An example of a two server deployment, each with 1 node on one server and 2 nodes on another. The first server holds the master and is thus declared first.
|
||||
|
||||
```
|
||||
---
|
||||
hosts: masters
|
||||
roles:
|
||||
- { role: elasticsearch, es_node_name_prefix: "node1_", es_unicast_hosts: "localhost:9300", es_http_port: "9201", es_transport_tcp_port: "9301", es_data_node: true, es_master_node: false, es_m_lock_enabled: true, es_multicast_enabled: false }
|
||||
vars:
|
||||
es_scripts: false
|
||||
es_templates: false
|
||||
es_version_lock: false
|
||||
es_cluster_name: example-cluster
|
||||
m_lock_enabled: false
|
||||
|
||||
- hosts: data_nodes
|
||||
roles:
|
||||
- { role: elasticsearch, es_data_node: true, es_master_node: false, es_m_lock_enabled: true, es_multicast_enabled: false, es_node_name_prefix: "node1_" }
|
||||
- { role: elasticsearch, es_data_node: true, es_master_node: false, es_m_lock_enabled: true, es_multicast_enabled: false, es_node_name_prefix: "node2_" }
|
||||
vars:
|
||||
es_scripts: false
|
||||
es_templates: false
|
||||
es_version_lock: false
|
||||
es_cluster_name: example-cluster
|
||||
m_lock_enabled: true
|
||||
es_plugins:
|
||||
```
|
||||
|
||||
Parameters can additionally be assigned to hosts using the inventory file if desired.
|
||||
|
||||
Make sure your hosts are defined in your ```inventory``` file with the appropriate ```ansible_ssh_host```, ```ansible_ssh_user``` and ```ansible_ssh_private_key_file``` values.
|
||||
|
||||
Then run it:
|
||||
|
||||
|
|
|
|||
|
|
@ -9,3 +9,6 @@ es_scripts: false
|
|||
es_templates: false
|
||||
es_user: elasticsearch
|
||||
es_group: elasticsearch
|
||||
es_cluster_name: elasticsearch
|
||||
es_multicast_enabled: false
|
||||
es_node_name_prefix: ""
|
||||
9
elasticsearch.iml
Normal file
9
elasticsearch.iml
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="RUBY_MODULE" version="4">
|
||||
<component name="NewModuleRootManager" inherit-compiler-output="true">
|
||||
<exclude-output />
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
dependencies: []
|
||||
|
||||
allow_duplicates: yes
|
||||
|
||||
galaxy_info:
|
||||
author: Robin Clarke
|
||||
|
|
|
|||
10
tasks/checkParameters.yml
Normal file
10
tasks/checkParameters.yml
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
# Check for mandatory parameters
|
||||
|
||||
- fail: msg="Parameter 'es_http_port' must be defined when multicast is disabled"
|
||||
when: es_multicast_enabled == false and es_http_port is not defined
|
||||
|
||||
- fail: msg="Parameter 'es_transport_tcp_port' must be defined when multicast is disabled"
|
||||
when: es_multicast_enabled == false and es_transport_tcp_port is not defined
|
||||
|
||||
- fail: msg="Parameter 'es_unicast_hosts' must be defined when multicast is disabled"
|
||||
when: es_multicast_enabled == false and es_unicast_hosts is not defined
|
||||
|
|
@ -20,23 +20,3 @@
|
|||
apt: deb=/tmp/elasticsearch-{{ es_version }}.deb
|
||||
when: not es_use_repository
|
||||
register: elasticsearch_install
|
||||
|
||||
- name: Debian - configure memory
|
||||
lineinfile: dest=/etc/default/elasticsearch regexp="^ES_HEAP_SIZE" insertafter="^#ES_HEAP_SIZE" line="ES_HEAP_SIZE={{ es_heap_size }}"
|
||||
when: es_heap_size is defined
|
||||
register: elasticsearch_configure
|
||||
- name: Debian - configure data store
|
||||
lineinfile: dest=/etc/default/elasticsearch regexp="^DATA_DIR" insertafter="^#DATA_DIR" line="DATA_DIR={{ es_data_dir }}"
|
||||
when: es_data_dir is defined
|
||||
register: elasticsearch_configure
|
||||
- name: Debian - configure elasticsearch user
|
||||
lineinfile: dest=/etc/default/elasticsearch regexp="^ES_USER" insertafter="^#ES_USER" line="ES_USER={{ es_user }}"
|
||||
when: es_user is defined
|
||||
register: elasticsearch_configure
|
||||
- name: Debian - configure elasticsearch group
|
||||
lineinfile: dest=/etc/default/elasticsearch regexp="^ES_GROUP" insertafter="^#ES_GROUP" line="ES_GROUP={{ es_group }}"
|
||||
when: es_group is defined
|
||||
register: elasticsearch_configure
|
||||
- name: Debian - create data dir
|
||||
file: state=directory path={{ es_data_dir }} owner={{ es_user }} group={{ es_group }}
|
||||
when: es_data_dir is defined
|
||||
|
|
|
|||
|
|
@ -20,23 +20,3 @@
|
|||
yum: name={% if es_custom_package_url is defined %}{{ es_custom_package_url }}{% else %}{{ es_package_url }}-{{ es_version }}.noarch.rpm{% endif %} state=present
|
||||
when: not es_use_repository
|
||||
register: elasticsearch_install
|
||||
|
||||
- name: RedHat - configure memory
|
||||
lineinfile: dest=/etc/sysconfig/elasticsearch regexp="^ES_HEAP_SIZE" insertafter="^#ES_HEAP_SIZE" line="ES_HEAP_SIZE={{ es_heap_size }}"
|
||||
when: es_heap_size is defined
|
||||
register: elasticsearch_configure
|
||||
- name: RedHat - configure data store
|
||||
lineinfile: dest=/etc/sysconfig/elasticsearch regexp="^DATA_DIR" insertafter="^#DATA_DIR" line="DATA_DIR={{ es_data_dir }}"
|
||||
when: es_data_dir is defined
|
||||
register: elasticsearch_configure
|
||||
- name: RedHat - configure elasticsearch user
|
||||
lineinfile: dest=/etc/sysconfig/elasticsearch regexp="^ES_USER" insertafter="^#ES_USER" line="ES_USER={{ es_user }}"
|
||||
when: es_user is defined
|
||||
register: elasticsearch_configure
|
||||
- name: RedHat - configure elasticsearch group
|
||||
lineinfile: dest=/etc/sysconfig/elasticsearch regexp="^ES_GROUP" insertafter="^#ES_GROUP" line="ES_GROUP={{ es_group }}"
|
||||
when: es_group is defined
|
||||
register: elasticsearch_configure
|
||||
- name: RedHat - create data dir
|
||||
file: state=directory path={{ es_data_dir }} owner={{ es_user }} group={{ es_group }}
|
||||
when: es_data_dir is defined
|
||||
|
|
|
|||
117
tasks/elasticsearch-config.yml
Normal file
117
tasks/elasticsearch-config.yml
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
---
|
||||
|
||||
# Configure Elasticsearch Node
|
||||
|
||||
# Create an instance specific default file
|
||||
- name: Copy Default File for Instance
|
||||
command: creates={{instance_default_file}} cp "{{default_file}}" "{{instance_default_file}}"
|
||||
when: instance_default_file != default_file
|
||||
|
||||
- debug: msg="DEBUG {{ hostvars[inventory_hostname] }}"
|
||||
|
||||
# Create an instance specific init file
|
||||
- name: Copy Init File for Instance
|
||||
command: creates={{instance_init_script}} cp "{{init_script}}" "{{instance_init_script}}"
|
||||
when: instance_init_script != init_script
|
||||
|
||||
|
||||
#Create Config directory
|
||||
- name: Create Config Directory
|
||||
file: path={{ instance_config_directory }} state=directory owner={{ es_user }} group={{ es_group }}
|
||||
|
||||
#Copy the config template
|
||||
- name: Copy configuration file
|
||||
template: src=elasticsearch.yml.j2 dest={{instance_config_directory}}/elasticsearch.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes
|
||||
|
||||
# Apply changes to the default file for this instance
|
||||
- name: Configure config directory
|
||||
lineinfile: dest={{instance_default_file}} regexp="^CONF_DIR" insertafter="^#CONF_DIR" line="CONF_DIR={{ instance_config_directory }}"
|
||||
register: elasticsearch_configure
|
||||
|
||||
- name: Configure config file
|
||||
lineinfile: dest={{instance_default_file}} regexp="^CONF_FILE" insertafter="^#CONF_FILE" line="CONF_FILE={{ instance_config_directory }}/elasticsearch.yml"
|
||||
register: elasticsearch_configure
|
||||
|
||||
- name: Configure memory in defaults
|
||||
lineinfile: dest={{instance_default_file}} regexp="^ES_HEAP_SIZE" insertafter="^#ES_HEAP_SIZE" line="ES_HEAP_SIZE={{ es_heap_size }}"
|
||||
when: es_heap_size is defined
|
||||
register: elasticsearch_configure
|
||||
|
||||
#We only have to set these if they are specified. The start scripts will by default use the NAME set later on constructing directory names to avoid collisions.
|
||||
|
||||
- name: Configure max open files
|
||||
lineinfile: dest={{instance_default_file}} regexp="^MAX_OPEN_FILES" insertafter="^#MAX_OPEN_FILES" line="MAX_OPEN_FILES={{ es_max_open_files }}"
|
||||
when: es_max_open_files is defined
|
||||
register: elasticsearch_configure
|
||||
|
||||
#For directories we also use the {{ es_node_name_prefix }}{{inventory_hostname}} - this helps if we have a shared SAN. {{es_node_name_prefix}}{{default_file | basename} could potentially be used -
|
||||
#init script effectively means this is the default.
|
||||
|
||||
#Create PID directory
|
||||
- name: Create PID Directory
|
||||
file: path={{pid_dir}}/{{ es_node_name_prefix }}{{inventory_hostname}} state=directory owner={{ es_user }} group={{ es_group }}
|
||||
|
||||
- name: Configure PID directory
|
||||
lineinfile: dest={{instance_default_file}} regexp="^PID_DIR" insertafter="^#PID_DIR" line="PID_DIR={{pid_dir}}/{{ es_node_name_prefix }}{{inventory_hostname}}"
|
||||
register: elasticsearch_configure
|
||||
|
||||
- set_fact: es_data_dir={{default_data_dir}}
|
||||
when: es_data_dir is undefined
|
||||
|
||||
#include the host name as potentially shared SAN
|
||||
- name: Create data dir
|
||||
file: state=directory path={{ es_data_dir }}/{{ es_node_name_prefix }}{{inventory_hostname}} owner={{ es_user }} group={{ es_group }}
|
||||
|
||||
- name: Configure data store
|
||||
lineinfile: dest={{instance_default_file}} regexp="^DATA_DIR" insertafter="^#DATA_DIR" line="DATA_DIR={{ es_data_dir }}/{{ es_node_name_prefix }}{{inventory_hostname}}"
|
||||
register: elasticsearch_configure
|
||||
|
||||
- set_fact: es_work_dir={{default_work_dir}}
|
||||
when: es_work_dir is undefined
|
||||
|
||||
- name: Create work dir
|
||||
file: state=directory path={{ es_work_dir }}/{{ es_node_name_prefix }}{{inventory_hostname}} owner={{ es_user }} group={{ es_group }}
|
||||
|
||||
- name: Configure work directory
|
||||
lineinfile: dest={{instance_default_file}} regexp="^WORK_DIR" insertafter="^#WORK_DIR" line="WORK_DIR={{ es_work_dir }}/{{ es_node_name_prefix }}{{inventory_hostname}}"
|
||||
register: elasticsearch_configure
|
||||
|
||||
- set_fact: es_log_dir={{default_log_dir}}
|
||||
when: es_log_dir is undefined
|
||||
|
||||
- name: Create log dir
|
||||
file: state=directory path={{ es_log_dir }}/{{ es_node_name_prefix }}{{inventory_hostname}} owner={{ es_user }} group={{ es_group }}
|
||||
|
||||
- name: Configure log directory
|
||||
lineinfile: dest={{instance_default_file}} regexp="^LOG_DIR" insertafter="^#LOG_DIR" line="LOG_DIR={{ es_log_dir }}/{{ es_node_name_prefix }}{{inventory_hostname}}"
|
||||
register: elasticsearch_configure
|
||||
|
||||
#required so that the ES_HOME does not change between instances
|
||||
|
||||
- name: Configure elasticsearch home
|
||||
lineinfile: dest={{instance_default_file}} regexp="^ES_HOME" insertafter="^#ES_HOME" line="ES_HOME={{es_home}}"
|
||||
register: elasticsearch_configure
|
||||
|
||||
- name: Configure elasticsearch user
|
||||
lineinfile: dest={{instance_default_file}} regexp="^ES_USER" insertafter="^#ES_USER" line="ES_USER={{ es_user }}"
|
||||
when: es_user is defined
|
||||
register: elasticsearch_configure
|
||||
|
||||
- name: Configure elasticsearch group
|
||||
lineinfile: dest={{instance_default_file}} regexp="^ES_GROUP" insertafter="^#ES_GROUP" line="ES_GROUP={{ es_group }}"
|
||||
when: es_group is defined
|
||||
register: elasticsearch_configure
|
||||
|
||||
#Apply changes to init script - NAME can be changed in Debian start script
|
||||
- name: Name Node in Init Script
|
||||
lineinfile: dest={{instance_init_script}} regexp="^NAME" line="NAME={{es_node_name_prefix}}{{default_file | basename}}"
|
||||
register: elasticsearch_configure
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- debug: msg="For {{ instance_init_script }} using default {{ instance_default_file }}"
|
||||
|
||||
- name: Environment in Init Script
|
||||
lineinfile: dest={{instance_init_script}} regexp="^ES_ENV_FILE" line="ES_ENV_FILE={{instance_default_file}}"
|
||||
register: elasticsearch_configure
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
|
|
@ -1,21 +1,31 @@
|
|||
---
|
||||
# Trigger Debian section
|
||||
- name: Include Debian specific Elasticsearch
|
||||
- set_fact: instance_default_file={{default_file | dirname}}/{{es_node_name_prefix}}{{default_file | basename}}
|
||||
- set_fact: instance_init_script={{init_script | dirname }}/{{es_node_name_prefix}}{{init_script | basename}}
|
||||
- set_fact: instance_config_directory={{ es_conf_dir }}/{{es_node_name_prefix}}elasticsearch
|
||||
|
||||
|
||||
|
||||
# Install OS specific elasticsearch - this can be abbreviated in version 2.0.0
|
||||
- name: Include specific Elasticsearch
|
||||
include: elasticsearch-Debian.yml
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
# Trigger Redhat section
|
||||
- name: Include RedHat specific Elasticsearch
|
||||
- name: Include specific Elasticsearch
|
||||
include: elasticsearch-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
#Configuration file for elasticsearch
|
||||
- name: Elasticsearch configuration
|
||||
include: elasticsearch-config.yml
|
||||
|
||||
# Make sure the service is started, and restart if necessary
|
||||
- name: Start elasticsearch service
|
||||
service: name=elasticsearch state=started
|
||||
service: name={{instance_init_script | basename}} state=started
|
||||
when: es_start_service
|
||||
register: elasticsearch_started
|
||||
|
||||
- name: Restart elasticsearch service if new version installed
|
||||
service: name=elasticsearch state=restarted
|
||||
service: name={{instance_init_script | basename}} state=restarted
|
||||
when: es_start_service and
|
||||
( elasticsearch_install.changed or elasticsearch_configure.changed )
|
||||
and not elasticsearch_started.changed
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
---
|
||||
- name: RedHat - Ensure Java is installed
|
||||
yum: name={{ java_rhel }} state=latest
|
||||
yum: name={{ java }} state=latest
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Debian - Ensure Java is installed
|
||||
apt: name={{ java_debian }} state=present update_cache=yes
|
||||
apt: name={{ java }} state=present update_cache=yes force=yes
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
|
@ -1,4 +1,8 @@
|
|||
---
|
||||
- name: check-parameters
|
||||
include: checkParameters.yml
|
||||
- name: os-specific vars
|
||||
include_vars: "{{ansible_os_family}}.yml"
|
||||
- include: java.yml
|
||||
- include: elasticsearch.yml
|
||||
- include: elasticsearch-plugins.yml
|
||||
|
|
|
|||
390
templates/elasticsearch.yml.j2
Normal file
390
templates/elasticsearch.yml.j2
Normal file
|
|
@ -0,0 +1,390 @@
|
|||
##################### Elasticsearch Configuration Example #####################
|
||||
|
||||
# This file contains an overview of various configuration settings,
|
||||
# targeted at operations staff. Application developers should
|
||||
# consult the guide at <http://elasticsearch.org/guide>.
|
||||
#
|
||||
# The installation procedure is covered at
|
||||
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
|
||||
#
|
||||
# Elasticsearch comes with reasonable defaults for most settings,
|
||||
# so you can try it out without bothering with configuration.
|
||||
#
|
||||
# Most of the time, these defaults are just fine for running a production
|
||||
# cluster. If you're fine-tuning your cluster, or wondering about the
|
||||
# effect of certain configuration option, please _do ask_ on the
|
||||
# mailing list or IRC channel [http://elasticsearch.org/community].
|
||||
|
||||
# Any element in the configuration can be replaced with environment variables
|
||||
# by placing them in ${...} notation. For example:
|
||||
#
|
||||
#node.rack: ${RACK_ENV_VAR}
|
||||
|
||||
# For information on supported formats and syntax for the config file, see
|
||||
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
|
||||
|
||||
|
||||
################################### Cluster ###################################
|
||||
|
||||
# Cluster name identifies your cluster for auto-discovery. If you're running
|
||||
# multiple clusters on the same network, make sure you're using unique names.
|
||||
#
|
||||
cluster.name: {{ es_cluster_name }}
|
||||
|
||||
#################################### Node #####################################
|
||||
|
||||
# Node names are generated dynamically on startup, so you're relieved
|
||||
# from configuring them manually. You can tie this node to a specific name:
|
||||
#
|
||||
node.name: {{es_node_name_prefix}}{{inventory_hostname}}
|
||||
|
||||
# Every node can be configured to allow or deny being eligible as the master,
|
||||
# and to allow or deny to store the data.
|
||||
#
|
||||
# Allow this node to be eligible as a master node (enabled by default):
|
||||
#
|
||||
{% if es_master_node is defined %}
|
||||
node.master: {{es_master_node | lower}}
|
||||
{% endif %}
|
||||
|
||||
#
|
||||
# Allow this node to store data (enabled by default):
|
||||
#
|
||||
{% if es_data_node is defined %}
|
||||
node.data: {{es_data_node | lower}}
|
||||
{% endif %}
|
||||
|
||||
# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
|
||||
# Node Info API [http://localhost:9200/_nodes] or GUI tools
|
||||
# such as <http://www.elasticsearch.org/overview/marvel/>,
|
||||
# <http://github.com/karmi/elasticsearch-paramedic>,
|
||||
# <http://github.com/lukas-vlcek/bigdesk> and
|
||||
# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
|
||||
|
||||
# A node can have generic attributes associated with it, which can later be used
|
||||
# for customized shard allocation filtering, or allocation awareness. An attribute
|
||||
# is a simple key value pair, similar to node.key: value, here is an example:
|
||||
#
|
||||
{% if es_node_rack is defined %}
|
||||
node.rack: {{ es_node_rack }}
|
||||
{% endif %}
|
||||
|
||||
# By default, multiple nodes are allowed to start from the same installation location
|
||||
# to disable it, set the following:
|
||||
#node.max_local_storage_nodes: 1
|
||||
|
||||
|
||||
#################################### Index ####################################
|
||||
|
||||
# You can set a number of options (such as shard/replica options, mapping
|
||||
# or analyzer definitions, translog settings, ...) for indices globally,
|
||||
# in this file.
|
||||
#
|
||||
# Note, that it makes more sense to configure index settings specifically for
|
||||
# a certain index, either when creating it or by using the index templates API.
|
||||
#
|
||||
# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
|
||||
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
|
||||
# for more information.
|
||||
|
||||
# Set the number of shards (splits) of an index (5 by default) if provided:
|
||||
|
||||
{% if es_index_number_of_shards is defined %}
|
||||
index.number_of_shards: {{ es_index_number_of_shards }}
|
||||
{% endif %}
|
||||
|
||||
# Set the number of replicas (additional copies) of an index (1 by default) if provided:
|
||||
{% if es_index_number_of_replicas is defined %}
|
||||
index.number_of_replicas: {{ es_index_number_of_replicas }}
|
||||
{% endif %}
|
||||
|
||||
|
||||
# These settings directly affect the performance of index and search operations
|
||||
# in your cluster. Assuming you have enough machines to hold shards and
|
||||
# replicas, the rule of thumb is:
|
||||
#
|
||||
# 1. Having more *shards* enhances the _indexing_ performance and allows to
|
||||
# _distribute_ a big index across machines.
|
||||
# 2. Having more *replicas* enhances the _search_ performance and improves the
|
||||
# cluster _availability_.
|
||||
#
|
||||
# The "number_of_shards" is a one-time setting for an index.
|
||||
#
|
||||
# The "number_of_replicas" can be increased or decreased anytime,
|
||||
# by using the Index Update Settings API.
|
||||
#
|
||||
# Elasticsearch takes care about load balancing, relocating, gathering the
|
||||
# results from nodes, etc. Experiment with different settings to fine-tune
|
||||
# your setup.
|
||||
|
||||
# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
|
||||
# the index status.
|
||||
|
||||
|
||||
#################################### Paths ####################################
|
||||
|
||||
# Path to directory containing configuration (this file and logging.yml):
|
||||
#
|
||||
{% if es_conf_dir is defined %}
|
||||
path.conf: {{ es_conf_dir }}
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Path to directory where to store index data allocated for this node.
|
||||
#
|
||||
# Can optionally include more than one location, causing data to be striped across
|
||||
# the locations (a la RAID 0) on a file level, favouring locations with most free
|
||||
# space on creation. For example:
|
||||
#
|
||||
{% if es_data_dir is defined %}
|
||||
path.data: {{ es_data_dir }}
|
||||
{% endif %}
|
||||
|
||||
# Path to temporary files:
|
||||
{% if es_work_dir is defined %}
|
||||
path.work: {{ es_work_dir }}
|
||||
{% endif %}
|
||||
|
||||
# Path to log files:
|
||||
{% if es_log_dir is defined %}
|
||||
path.logs: {{ es_log_dir }}
|
||||
{% endif %}
|
||||
|
||||
# Path to where plugins are installed:
|
||||
#
|
||||
#path.plugins: /path/to/plugins
|
||||
|
||||
|
||||
#################################### Plugin ###################################
|
||||
|
||||
# If a plugin listed here is not installed for current node, the node will not start.
|
||||
#
|
||||
#plugin.mandatory: mapper-attachments,lang-groovy
|
||||
|
||||
|
||||
################################### Memory ####################################
|
||||
|
||||
# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
|
||||
# it _never_ swaps.
|
||||
#
|
||||
# Set this property to true to lock the memory:
|
||||
#
|
||||
{% if es_m_lock_enabled is defined %}
|
||||
bootstrap.mlockall: {{es_m_lock_enabled | lower}}
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
|
||||
# to the same value, and that the machine has enough memory to allocate
|
||||
# for Elasticsearch, leaving enough memory for the operating system itself.
|
||||
#
|
||||
# You should also make sure that the Elasticsearch process is allowed to lock
|
||||
# the memory, eg. by using `ulimit -l unlimited`.
|
||||
|
||||
|
||||
############################## Network And HTTP ###############################
|
||||
|
||||
# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
|
||||
# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
|
||||
# communication. (the range means that if the port is busy, it will automatically
|
||||
# try the next port).
|
||||
|
||||
# Set the bind address specifically (IPv4 or IPv6):
|
||||
#
|
||||
#network.bind_host: 192.168.0.1
|
||||
|
||||
# Set the address other nodes will use to communicate with this node. If not
|
||||
# set, it is automatically derived. It must point to an actual IP address.
|
||||
#
|
||||
#network.publish_host: 192.168.0.1
|
||||
|
||||
# Set both 'bind_host' and 'publish_host':
|
||||
#
|
||||
{% if es_network_host is defined %}
|
||||
network.host: {{ es_network_host }}
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Set a custom port for the node to node communication (9300 by default):
|
||||
#
|
||||
{% if es_transport_tcp_port is defined %}
|
||||
transport.tcp.port: {{ es_transport_tcp_port }}
|
||||
{% endif %}
|
||||
|
||||
# Enable compression for all communication between nodes (disabled by default):
|
||||
#
|
||||
#transport.tcp.compress: true
|
||||
|
||||
# Set a custom port to listen for HTTP traffic (9200 by default):
|
||||
#
|
||||
{% if es_http_port is defined %}
|
||||
http.port: {{ es_http_port }}
|
||||
{% endif %}
|
||||
|
||||
# Set a custom allowed content length:
|
||||
#
|
||||
#http.max_content_length: 100mb
|
||||
|
||||
# Disable HTTP completely:
|
||||
#
|
||||
#http.enabled: false
|
||||
|
||||
|
||||
################################### Gateway ###################################
|
||||
|
||||
# The gateway allows for persisting the cluster state between full cluster
|
||||
# restarts. Every change to the state (such as adding an index) will be stored
|
||||
# in the gateway, and when the cluster starts up for the first time,
|
||||
# it will read its state from the gateway.
|
||||
|
||||
# There are several types of gateway implementations. For more information, see
|
||||
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
|
||||
|
||||
# The default gateway type is the "local" gateway (recommended):
|
||||
#
|
||||
#gateway.type: local
|
||||
|
||||
# Settings below control how and when to start the initial recovery process on
|
||||
# a full cluster restart (to reuse as much local data as possible when using shared
|
||||
# gateway).
|
||||
|
||||
# Allow recovery process after N nodes in a cluster are up:
|
||||
#
|
||||
#gateway.recover_after_nodes: 1
|
||||
|
||||
# Set the timeout to initiate the recovery process, once the N nodes
|
||||
# from previous setting are up (accepts time value):
|
||||
#
|
||||
#gateway.recover_after_time: 5m
|
||||
|
||||
# Set how many nodes are expected in this cluster. Once these N nodes
|
||||
# are up (and recover_after_nodes is met), begin recovery process immediately
|
||||
# (without waiting for recover_after_time to expire):
|
||||
#
|
||||
#gateway.expected_nodes: 2
|
||||
|
||||
|
||||
############################# Recovery Throttling #############################
|
||||
|
||||
# These settings allow to control the process of shards allocation between
|
||||
# nodes during initial recovery, replica allocation, rebalancing,
|
||||
# or when adding and removing nodes.
|
||||
|
||||
# Set the number of concurrent recoveries happening on a node:
|
||||
#
|
||||
# 1. During the initial recovery
|
||||
#
|
||||
#cluster.routing.allocation.node_initial_primaries_recoveries: 4
|
||||
#
|
||||
# 2. During adding/removing nodes, rebalancing, etc
|
||||
#
|
||||
#cluster.routing.allocation.node_concurrent_recoveries: 2
|
||||
|
||||
# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
|
||||
#
|
||||
#indices.recovery.max_bytes_per_sec: 20mb
|
||||
|
||||
# Set to limit the number of open concurrent streams when
|
||||
# recovering a shard from a peer:
|
||||
#
|
||||
#indices.recovery.concurrent_streams: 5
|
||||
|
||||
|
||||
################################## Discovery ##################################
|
||||
|
||||
# Discovery infrastructure ensures nodes can be found within a cluster
|
||||
# and master node is elected. Multicast discovery is the default.
|
||||
|
||||
# Set to ensure a node sees N other master eligible nodes to be considered
|
||||
# operational within the cluster. This should be set to a quorum/majority of
|
||||
# the master-eligible nodes in the cluster.
|
||||
#
|
||||
#discovery.zen.minimum_master_nodes: 1
|
||||
|
||||
# Set the time to wait for ping responses from other nodes when discovering.
|
||||
# Set this option to a higher value on a slow or congested network
|
||||
# to minimize discovery failures:
|
||||
#
|
||||
#discovery.zen.ping.timeout: 3s
|
||||
|
||||
# For more information, see
|
||||
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
|
||||
|
||||
# Unicast discovery allows to explicitly control which nodes will be used
|
||||
# to discover the cluster. It can be used when multicast is not present,
|
||||
# or to restrict the cluster communication-wise.
|
||||
#
|
||||
# 1. Disable multicast discovery (enabled by default):
|
||||
#
|
||||
discovery.zen.ping.multicast.enabled: {{es_multicast_enabled | lower }}
|
||||
|
||||
#
|
||||
# 2. Configure an initial list of master nodes in the cluster
|
||||
# to perform discovery when new nodes (master or data) are started:
|
||||
#
|
||||
|
||||
#We put all the current eligible masters in here. If not specified, we assumes its a master
|
||||
|
||||
{% if es_multicast_enabled is defined and not es_multicast_enabled %}
|
||||
discovery.zen.ping.unicast.hosts: {{es_unicast_hosts}}
|
||||
{% endif %}
|
||||
|
||||
|
||||
# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
|
||||
#
|
||||
# You have to install the cloud-aws plugin for enabling the EC2 discovery.
|
||||
#
|
||||
# For more information, see
|
||||
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
|
||||
#
|
||||
# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
|
||||
# for a step-by-step tutorial.
|
||||
|
||||
# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
|
||||
#
|
||||
# You have to install the cloud-gce plugin for enabling the GCE discovery.
|
||||
#
|
||||
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
|
||||
|
||||
# Azure discovery allows to use Azure API in order to perform discovery.
|
||||
#
|
||||
# You have to install the cloud-azure plugin for enabling the Azure discovery.
|
||||
#
|
||||
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
|
||||
|
||||
################################## Slow Log ##################################
|
||||
|
||||
# Shard level query and fetch threshold logging.
|
||||
|
||||
index.search.slowlog.threshold.query.warn: 2s
|
||||
index.search.slowlog.threshold.query.info: 2s
|
||||
index.search.slowlog.threshold.query.debug: 1s
|
||||
index.search.slowlog.threshold.query.trace: 500ms
|
||||
|
||||
#index.search.slowlog.threshold.fetch.warn: 1s
|
||||
#index.search.slowlog.threshold.fetch.info: 800ms
|
||||
#index.search.slowlog.threshold.fetch.debug: 500ms
|
||||
#index.search.slowlog.threshold.fetch.trace: 200ms
|
||||
|
||||
#index.indexing.slowlog.threshold.index.warn: 10s
|
||||
#index.indexing.slowlog.threshold.index.info: 5s
|
||||
#index.indexing.slowlog.threshold.index.debug: 2s
|
||||
#index.indexing.slowlog.threshold.index.trace: 500ms
|
||||
|
||||
################################## GC Logging ################################
|
||||
|
||||
#monitor.jvm.gc.young.warn: 1000ms
|
||||
#monitor.jvm.gc.young.info: 700ms
|
||||
#monitor.jvm.gc.young.debug: 400ms
|
||||
|
||||
#monitor.jvm.gc.old.warn: 10s
|
||||
#monitor.jvm.gc.old.info: 5s
|
||||
#monitor.jvm.gc.old.debug: 2s
|
||||
|
||||
################################## Security ################################
|
||||
|
||||
# Uncomment if you want to enable JSONP as a valid return transport on the
|
||||
# http server. With this enabled, it may pose a security risk, so disabling
|
||||
# it unless you need it is recommended (it is disabled by default).
|
||||
#
|
||||
#http.jsonp.enable: true
|
||||
12
test/integration/config.yml
Normal file
12
test/integration/config.yml
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
#Test explicit setting of parameters and variables
|
||||
- name: Elasticsearch Package tests
|
||||
hosts: localhost
|
||||
roles:
|
||||
#expand to all available parameters
|
||||
- { role: elasticsearch, es_unicast_hosts: "localhost:9301", es_http_port: "9201", es_transport_tcp_port: "9301", es_data_node: false, es_master_node: true, es_m_lock_enabled: true, es_multicast_enabled: false, es_node_name_prefix: "node1_", es_cluster_name: "custom-cluster" }
|
||||
vars:
|
||||
es_scripts: false
|
||||
es_templates: false
|
||||
es_version_lock: false
|
||||
es_m_lock_enabled: true
|
||||
2
test/integration/config/config.yml
Normal file
2
test/integration/config/config.yml
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
- host: test-kitchen
|
||||
42
test/integration/config/serverspec/default_spec.rb
Normal file
42
test/integration/config/serverspec/default_spec.rb
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
require 'spec_helper'
|
||||
|
||||
context "basic tests" do
|
||||
|
||||
describe user('elasticsearch') do
|
||||
it { should exist }
|
||||
end
|
||||
|
||||
describe service('node1_elasticsearch') do
|
||||
it { should be_running }
|
||||
end
|
||||
|
||||
describe package('elasticsearch') do
|
||||
it { should be_installed }
|
||||
end
|
||||
|
||||
describe file('/etc/elasticsearch/node1_elasticsearch/elasticsearch.yml') do
|
||||
it { should be_file }
|
||||
end
|
||||
|
||||
#test configuration parameters have been set - test all appropriately set in config file
|
||||
describe file('/etc/elasticsearch/node1_elasticsearch/elasticsearch.yml') do
|
||||
it { should contain 'http.port: 9201' }
|
||||
it { should contain 'transport.tcp.port: 9301' }
|
||||
it { should contain 'node.data: false' }
|
||||
it { should contain 'node.master: true' }
|
||||
it { should contain 'discovery.zen.ping.multicast.enabled: false' }
|
||||
it { should contain 'cluster.name: custom-cluster' }
|
||||
it { should contain 'node.name: node1_localhost' }
|
||||
it { should contain 'discovery.zen.ping.unicast.hosts: localhost:9301' }
|
||||
end
|
||||
|
||||
#test we started on the correct port was used
|
||||
describe command('curl "localhost:9201" | grep status') do
|
||||
#TODO: This is returning an empty string
|
||||
#its(:stdout) { should match /\"status\" : 200/ }
|
||||
its(:exit_status) { should eq 0 }
|
||||
end
|
||||
|
||||
|
||||
end
|
||||
|
||||
2
test/integration/config/serverspec/spec_helper.rb
Normal file
2
test/integration/config/serverspec/spec_helper.rb
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
require 'serverspec'
|
||||
set :backend, :exec
|
||||
8
test/integration/multi.yml
Normal file
8
test/integration/multi.yml
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
#Test ability to deploy multiple instances to a machine
|
||||
- name: Elasticsearch Config tests
|
||||
hosts: localhost
|
||||
roles:
|
||||
- { role: elasticsearch, es_node_name_prefix: "node1_", es_unicast_hosts: "localhost:9300", es_http_port: "9201", es_transport_tcp_port: "9301", es_data_node: true, es_master_node: false, es_m_lock_enabled: true, es_multicast_enabled: false }
|
||||
- { role: elasticsearch, es_node_name_prefix: "master_", es_unicast_hosts: "localhost:9300", es_http_port: "9200", es_transport_tcp_port: "9300", es_data_node: false, es_master_node: true, es_m_lock_enabled: true, es_multicast_enabled: false }
|
||||
vars:
|
||||
2
test/integration/multi/multi.yml
Normal file
2
test/integration/multi/multi.yml
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
- host: test-kitchen
|
||||
81
test/integration/multi/serverspec/default_spec.rb
Normal file
81
test/integration/multi/serverspec/default_spec.rb
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
require 'spec_helper'
|
||||
|
||||
context "basic tests" do
|
||||
|
||||
describe user('elasticsearch') do
|
||||
it { should exist }
|
||||
end
|
||||
|
||||
describe service('node1_elasticsearch') do
|
||||
it { should be_running }
|
||||
end
|
||||
|
||||
describe service('master_elasticsearch') do
|
||||
it { should be_running }
|
||||
end
|
||||
|
||||
describe package('elasticsearch') do
|
||||
it { should be_installed }
|
||||
end
|
||||
|
||||
describe file('/etc/elasticsearch/node1_elasticsearch/elasticsearch.yml') do
|
||||
it { should be_file }
|
||||
end
|
||||
|
||||
describe file('/etc/elasticsearch/master_elasticsearch/elasticsearch.yml') do
|
||||
it { should be_file }
|
||||
end
|
||||
|
||||
#test configuration parameters have been set - test all appropriately set in config file
|
||||
describe file('/etc/elasticsearch/node1_elasticsearch/elasticsearch.yml') do
|
||||
it { should contain 'http.port: 9201' }
|
||||
it { should contain 'transport.tcp.port: 9301' }
|
||||
it { should contain 'node.data: true' }
|
||||
it { should contain 'node.master: false' }
|
||||
it { should contain 'discovery.zen.ping.multicast.enabled: false' }
|
||||
it { should contain 'node.name: node1_localhost' }
|
||||
end
|
||||
|
||||
|
||||
#test configuration parameters have been set for master - test all appropriately set in config file
|
||||
describe file('/etc/elasticsearch/master_elasticsearch/elasticsearch.yml') do
|
||||
it { should contain 'http.port: 9200' }
|
||||
it { should contain 'transport.tcp.port: 9300' }
|
||||
it { should contain 'node.data: false' }
|
||||
it { should contain 'node.master: true' }
|
||||
it { should contain 'discovery.zen.ping.multicast.enabled: false' }
|
||||
it { should contain 'node.name: master_localhost' }
|
||||
end
|
||||
|
||||
describe 'Master listening' do
|
||||
it 'listening in port 9200' do
|
||||
expect(port 9200).to be_listening
|
||||
end
|
||||
end
|
||||
|
||||
describe 'Node listening' do
|
||||
it 'node should be listening in port 9201' do
|
||||
expect(port 9201).to be_listening
|
||||
end
|
||||
end
|
||||
|
||||
#test we started on the correct port was used for master
|
||||
describe 'master started' do
|
||||
it 'master node should be running', :retry => 3, :retry_wait => 10 do
|
||||
command = command('curl "localhost:9200" | grep name')
|
||||
#expect(command.stdout).should match '/*master_localhost*/'
|
||||
expect(command.exit_status).to eq(0)
|
||||
end
|
||||
end
|
||||
|
||||
#test we started on the correct port was used for node 1
|
||||
describe 'node1 started' do
|
||||
it 'node should be running', :retry => 3, :retry_wait => 10 do
|
||||
command = command('curl "localhost:9201" | grep name')
|
||||
#expect(command.stdout).should match '/*node1_localhost*/'
|
||||
expect(command.exit_status).to eq(0)
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
2
test/integration/multi/serverspec/spec_helper.rb
Normal file
2
test/integration/multi/serverspec/spec_helper.rb
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
require 'serverspec'
|
||||
set :backend, :exec
|
||||
|
|
@ -2,5 +2,5 @@
|
|||
- name: Elasticsearch Package tests
|
||||
hosts: localhost
|
||||
roles:
|
||||
- elasticsearch
|
||||
- { role: elasticsearch, es_multicast_enabled: true}
|
||||
vars:
|
||||
|
|
@ -14,7 +14,7 @@ context "basic tests" do
|
|||
it { should be_installed }
|
||||
end
|
||||
|
||||
describe file('/etc/elasticsearch/elasticsearch.yml') do
|
||||
describe file('/etc/elasticsearch/elasticsearch/elasticsearch.yml') do
|
||||
it { should be_file }
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
- name: wrapper playbook for kitchen testing "elasticsearch"
|
||||
hosts: localhost
|
||||
roles:
|
||||
- elasticsearch
|
||||
- { role: elasticsearch, es_multicast_enabled: true}
|
||||
vars:
|
||||
es_use_repository: "true"
|
||||
es_plugins:
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ context "basic tests" do
|
|||
it { should be_installed }
|
||||
end
|
||||
|
||||
describe file('/etc/elasticsearch/elasticsearch.yml') do
|
||||
describe file('/etc/elasticsearch/elasticsearch/elasticsearch.yml') do
|
||||
it { should be_file }
|
||||
end
|
||||
|
||||
|
|
|
|||
11
vars/Debian.yml
Normal file
11
vars/Debian.yml
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
java: "openjdk-7-jre-headless"
|
||||
default_file: "/etc/default/elasticsearch"
|
||||
init_script: "/etc/init.d/elasticsearch"
|
||||
es_conf_dir: "/etc/elasticsearch"
|
||||
es_home: "/usr/share/elasticsearch"
|
||||
|
||||
default_data_dir: "/var/lib/elasticsearch"
|
||||
default_log_dir: "/var/log/elasticsearch"
|
||||
default_work_dir: "/tmp/elasticsearch"
|
||||
default_pid_dir: "/var/run/elasticsearch"
|
||||
|
|
@ -1 +0,0 @@
|
|||
---
|
||||
6
vars/RedHat.yml
Normal file
6
vars/RedHat.yml
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
java: "java-1.8.0-openjdk.x86_64"
|
||||
default_file: "/etc/sysconfig/elasticsearch"
|
||||
init_script: "/etc/init.d/elasticsearch"
|
||||
es_conf_dir: "/etc/elasticsearch/"
|
||||
es_home: "/usr/share/elasticsearch"
|
||||
|
|
@ -1,4 +1,8 @@
|
|||
---
|
||||
java_debian: "openjdk-7-jre-headless"
|
||||
java_rhel: "java-1.8.0-openjdk.x86_64"
|
||||
es_package_url: "https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch"
|
||||
pid_dir: "/var/run/elasticsearch"
|
||||
|
||||
#Needed to provide default directories
|
||||
default_data_dir: "/var/lib/elasticsearch"
|
||||
default_log_dir: "/var/log/elasticsearch"
|
||||
default_work_dir: "/tmp/elasticsearch"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue