diff --git a/.gitignore b/.gitignore index 625f5a1..0155da6 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ .vendor .bundle Converging +TODO diff --git a/.kitchen.yml b/.kitchen.yml index 488db67..1122b95 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -64,4 +64,14 @@ suites: attributes: provisioner: playbook: test/integration/package.yml + - name: config + run_list: + attributes: + provisioner: + playbook: test/integration/config.yml + - name: multi + run_list: + attributes: + provisioner: + playbook: test/integration/multi.yml diff --git a/README.md b/README.md index 063700d..f8b5517 100644 --- a/README.md +++ b/README.md @@ -24,44 +24,88 @@ Then create your playbook yaml adding the role elasticsearch and overriding any --- hosts: my_host roles: - - elasticsearch + - { role: elasticsearch, es_multicast_enabled: true} tasks: - .... your tasks ... ``` -or more complex.. +By default es_multicast_enabled is false. If this is not to true, the user is required to specify the following additional parameters: +1. es_http_port - the http port for the node +2. es_transport_tcp_port - the transport port for the node +3. es_unicast_hosts - the unicast discovery list, in the comma separated format ":,:" (typically the clusters dedicated masters) + + +If set to true, the ports will be auto defined and node discovery will be performed using multicast. + +A more complex example: ``` --- -hosts: my_host - roles: - - elasticsearch - vars: - java_packages: - - "oracle-java7-installer" - es_major_version: 1.4 - es_version: 1.4.4 - es_start_service: false - es_plugins_reinstall: false - es_plugins: - - plugin: elasticsearch/elasticsearch-cloud-aws - version: 2.5.0 - - plugin: elasticsearch/marvel - version: latest - - plugin: elasticsearch/license - version: latest - - plugin: elasticsearch/shield - version: latest - - plugin: elasticsearch/elasticsearch-support-diagnostics - version: latest - - plugin: lmenezes/elasticsearch-kopf - version: master - tasks: - - .... your tasks ... +hosts: localhost +roles: + - { role: elasticsearch, es_unicast_hosts: "localhost:9301", es_http_port: "9201", es_transport_tcp_port: "9301", es_data_node: false, es_master_node: true, es_m_lock_enabled: true, es_multicast_enabled: false, es_node_name_prefix: "node1_", es_cluster_name: "custom-cluster" } +vars: + es_scripts: false + es_templates: false + es_version_lock: false + es_m_lock_enabled: true + es_start_service: false + es_plugins_reinstall: false + es_plugins: + - plugin: elasticsearch/elasticsearch-cloud-aws + version: 2.5.0 + - plugin: elasticsearch/marvel + version: latest + - plugin: elasticsearch/license + version: latest + - plugin: elasticsearch/shield + version: latest + - plugin: elasticsearch/elasticsearch-support-diagnostics + version: latest + - plugin: lmenezes/elasticsearch-kopf + version: master +tasks: + - .... your tasks ... ``` -Make sure your hosts are defined in your ```hosts``` file with the appropriate ```ansible_ssh_host```, ```ansible_ssh_user``` and ```ansible_ssh_private_key_file``` values. +The above example illustrates the ability to control the configuration. + +The application of a role results in the installation of a node on a host. Multiple roles equates to multiple nodes for a single host. If specifying multiple roles for a host, and thus multiple nodes, the user must: + +1. Provide a es_node_name_prefix. This is used to ensure seperation of data, log, config and init scripts. +2. Ensure those playbooks responsible for installing and starting master eligble roles are specified first. These are required for cluster initalization. + +An example of a two server deployment, each with 1 node on one server and 2 nodes on another. The first server holds the master and is thus declared first. + +``` +--- +hosts: masters +roles: + - { role: elasticsearch, es_node_name_prefix: "node1_", es_unicast_hosts: "localhost:9300", es_http_port: "9201", es_transport_tcp_port: "9301", es_data_node: true, es_master_node: false, es_m_lock_enabled: true, es_multicast_enabled: false } +vars: + es_scripts: false + es_templates: false + es_version_lock: false + es_cluster_name: example-cluster + m_lock_enabled: false + +- hosts: data_nodes + roles: + - { role: elasticsearch, es_data_node: true, es_master_node: false, es_m_lock_enabled: true, es_multicast_enabled: false, es_node_name_prefix: "node1_" } + - { role: elasticsearch, es_data_node: true, es_master_node: false, es_m_lock_enabled: true, es_multicast_enabled: false, es_node_name_prefix: "node2_" } + vars: + es_scripts: false + es_templates: false + es_version_lock: false + es_cluster_name: example-cluster + m_lock_enabled: true + es_plugins: +``` + +Parameters can additionally be assigned to hosts using the inventory file if desired. + +Make sure your hosts are defined in your ```inventory``` file with the appropriate ```ansible_ssh_host```, ```ansible_ssh_user``` and ```ansible_ssh_private_key_file``` values. Then run it: diff --git a/defaults/main.yml b/defaults/main.yml index 6b00692..ff4abc0 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -9,3 +9,6 @@ es_scripts: false es_templates: false es_user: elasticsearch es_group: elasticsearch +es_cluster_name: elasticsearch +es_multicast_enabled: false +es_node_name_prefix: "" \ No newline at end of file diff --git a/elasticsearch.iml b/elasticsearch.iml new file mode 100644 index 0000000..af03825 --- /dev/null +++ b/elasticsearch.iml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/meta/main.yml b/meta/main.yml index 357f0fe..4f961d5 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -1,5 +1,6 @@ --- -dependencies: [] + +allow_duplicates: yes galaxy_info: author: Robin Clarke diff --git a/tasks/checkParameters.yml b/tasks/checkParameters.yml new file mode 100644 index 0000000..242c6a6 --- /dev/null +++ b/tasks/checkParameters.yml @@ -0,0 +1,10 @@ +# Check for mandatory parameters + +- fail: msg="Parameter 'es_http_port' must be defined when multicast is disabled" + when: es_multicast_enabled == false and es_http_port is not defined + +- fail: msg="Parameter 'es_transport_tcp_port' must be defined when multicast is disabled" + when: es_multicast_enabled == false and es_transport_tcp_port is not defined + +- fail: msg="Parameter 'es_unicast_hosts' must be defined when multicast is disabled" + when: es_multicast_enabled == false and es_unicast_hosts is not defined \ No newline at end of file diff --git a/tasks/elasticsearch-Debian.yml b/tasks/elasticsearch-Debian.yml index ea10dc8..15e697f 100644 --- a/tasks/elasticsearch-Debian.yml +++ b/tasks/elasticsearch-Debian.yml @@ -19,24 +19,4 @@ - name: Debian - Ensure elasticsearch is installed from downloaded package apt: deb=/tmp/elasticsearch-{{ es_version }}.deb when: not es_use_repository - register: elasticsearch_install - -- name: Debian - configure memory - lineinfile: dest=/etc/default/elasticsearch regexp="^ES_HEAP_SIZE" insertafter="^#ES_HEAP_SIZE" line="ES_HEAP_SIZE={{ es_heap_size }}" - when: es_heap_size is defined - register: elasticsearch_configure -- name: Debian - configure data store - lineinfile: dest=/etc/default/elasticsearch regexp="^DATA_DIR" insertafter="^#DATA_DIR" line="DATA_DIR={{ es_data_dir }}" - when: es_data_dir is defined - register: elasticsearch_configure -- name: Debian - configure elasticsearch user - lineinfile: dest=/etc/default/elasticsearch regexp="^ES_USER" insertafter="^#ES_USER" line="ES_USER={{ es_user }}" - when: es_user is defined - register: elasticsearch_configure -- name: Debian - configure elasticsearch group - lineinfile: dest=/etc/default/elasticsearch regexp="^ES_GROUP" insertafter="^#ES_GROUP" line="ES_GROUP={{ es_group }}" - when: es_group is defined - register: elasticsearch_configure -- name: Debian - create data dir - file: state=directory path={{ es_data_dir }} owner={{ es_user }} group={{ es_group }} - when: es_data_dir is defined + register: elasticsearch_install \ No newline at end of file diff --git a/tasks/elasticsearch-RedHat.yml b/tasks/elasticsearch-RedHat.yml index e256293..d01163d 100644 --- a/tasks/elasticsearch-RedHat.yml +++ b/tasks/elasticsearch-RedHat.yml @@ -19,24 +19,4 @@ - name: RedHat - Install Elasticsearch from url yum: name={% if es_custom_package_url is defined %}{{ es_custom_package_url }}{% else %}{{ es_package_url }}-{{ es_version }}.noarch.rpm{% endif %} state=present when: not es_use_repository - register: elasticsearch_install - -- name: RedHat - configure memory - lineinfile: dest=/etc/sysconfig/elasticsearch regexp="^ES_HEAP_SIZE" insertafter="^#ES_HEAP_SIZE" line="ES_HEAP_SIZE={{ es_heap_size }}" - when: es_heap_size is defined - register: elasticsearch_configure -- name: RedHat - configure data store - lineinfile: dest=/etc/sysconfig/elasticsearch regexp="^DATA_DIR" insertafter="^#DATA_DIR" line="DATA_DIR={{ es_data_dir }}" - when: es_data_dir is defined - register: elasticsearch_configure -- name: RedHat - configure elasticsearch user - lineinfile: dest=/etc/sysconfig/elasticsearch regexp="^ES_USER" insertafter="^#ES_USER" line="ES_USER={{ es_user }}" - when: es_user is defined - register: elasticsearch_configure -- name: RedHat - configure elasticsearch group - lineinfile: dest=/etc/sysconfig/elasticsearch regexp="^ES_GROUP" insertafter="^#ES_GROUP" line="ES_GROUP={{ es_group }}" - when: es_group is defined - register: elasticsearch_configure -- name: RedHat - create data dir - file: state=directory path={{ es_data_dir }} owner={{ es_user }} group={{ es_group }} - when: es_data_dir is defined + register: elasticsearch_install \ No newline at end of file diff --git a/tasks/elasticsearch-config.yml b/tasks/elasticsearch-config.yml new file mode 100644 index 0000000..98f7a68 --- /dev/null +++ b/tasks/elasticsearch-config.yml @@ -0,0 +1,117 @@ +--- + +# Configure Elasticsearch Node + +# Create an instance specific default file +- name: Copy Default File for Instance + command: creates={{instance_default_file}} cp "{{default_file}}" "{{instance_default_file}}" + when: instance_default_file != default_file + +- debug: msg="DEBUG {{ hostvars[inventory_hostname] }}" + +# Create an instance specific init file +- name: Copy Init File for Instance + command: creates={{instance_init_script}} cp "{{init_script}}" "{{instance_init_script}}" + when: instance_init_script != init_script + + +#Create Config directory +- name: Create Config Directory + file: path={{ instance_config_directory }} state=directory owner={{ es_user }} group={{ es_group }} + +#Copy the config template +- name: Copy configuration file + template: src=elasticsearch.yml.j2 dest={{instance_config_directory}}/elasticsearch.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes + +# Apply changes to the default file for this instance +- name: Configure config directory + lineinfile: dest={{instance_default_file}} regexp="^CONF_DIR" insertafter="^#CONF_DIR" line="CONF_DIR={{ instance_config_directory }}" + register: elasticsearch_configure + +- name: Configure config file + lineinfile: dest={{instance_default_file}} regexp="^CONF_FILE" insertafter="^#CONF_FILE" line="CONF_FILE={{ instance_config_directory }}/elasticsearch.yml" + register: elasticsearch_configure + +- name: Configure memory in defaults + lineinfile: dest={{instance_default_file}} regexp="^ES_HEAP_SIZE" insertafter="^#ES_HEAP_SIZE" line="ES_HEAP_SIZE={{ es_heap_size }}" + when: es_heap_size is defined + register: elasticsearch_configure + +#We only have to set these if they are specified. The start scripts will by default use the NAME set later on constructing directory names to avoid collisions. + +- name: Configure max open files + lineinfile: dest={{instance_default_file}} regexp="^MAX_OPEN_FILES" insertafter="^#MAX_OPEN_FILES" line="MAX_OPEN_FILES={{ es_max_open_files }}" + when: es_max_open_files is defined + register: elasticsearch_configure + +#For directories we also use the {{ es_node_name_prefix }}{{inventory_hostname}} - this helps if we have a shared SAN. {{es_node_name_prefix}}{{default_file | basename} could potentially be used - +#init script effectively means this is the default. + +#Create PID directory +- name: Create PID Directory + file: path={{pid_dir}}/{{ es_node_name_prefix }}{{inventory_hostname}} state=directory owner={{ es_user }} group={{ es_group }} + +- name: Configure PID directory + lineinfile: dest={{instance_default_file}} regexp="^PID_DIR" insertafter="^#PID_DIR" line="PID_DIR={{pid_dir}}/{{ es_node_name_prefix }}{{inventory_hostname}}" + register: elasticsearch_configure + +- set_fact: es_data_dir={{default_data_dir}} + when: es_data_dir is undefined + +#include the host name as potentially shared SAN +- name: Create data dir + file: state=directory path={{ es_data_dir }}/{{ es_node_name_prefix }}{{inventory_hostname}} owner={{ es_user }} group={{ es_group }} + +- name: Configure data store + lineinfile: dest={{instance_default_file}} regexp="^DATA_DIR" insertafter="^#DATA_DIR" line="DATA_DIR={{ es_data_dir }}/{{ es_node_name_prefix }}{{inventory_hostname}}" + register: elasticsearch_configure + +- set_fact: es_work_dir={{default_work_dir}} + when: es_work_dir is undefined + +- name: Create work dir + file: state=directory path={{ es_work_dir }}/{{ es_node_name_prefix }}{{inventory_hostname}} owner={{ es_user }} group={{ es_group }} + +- name: Configure work directory + lineinfile: dest={{instance_default_file}} regexp="^WORK_DIR" insertafter="^#WORK_DIR" line="WORK_DIR={{ es_work_dir }}/{{ es_node_name_prefix }}{{inventory_hostname}}" + register: elasticsearch_configure + +- set_fact: es_log_dir={{default_log_dir}} + when: es_log_dir is undefined + +- name: Create log dir + file: state=directory path={{ es_log_dir }}/{{ es_node_name_prefix }}{{inventory_hostname}} owner={{ es_user }} group={{ es_group }} + +- name: Configure log directory + lineinfile: dest={{instance_default_file}} regexp="^LOG_DIR" insertafter="^#LOG_DIR" line="LOG_DIR={{ es_log_dir }}/{{ es_node_name_prefix }}{{inventory_hostname}}" + register: elasticsearch_configure + +#required so that the ES_HOME does not change between instances + +- name: Configure elasticsearch home + lineinfile: dest={{instance_default_file}} regexp="^ES_HOME" insertafter="^#ES_HOME" line="ES_HOME={{es_home}}" + register: elasticsearch_configure + +- name: Configure elasticsearch user + lineinfile: dest={{instance_default_file}} regexp="^ES_USER" insertafter="^#ES_USER" line="ES_USER={{ es_user }}" + when: es_user is defined + register: elasticsearch_configure + +- name: Configure elasticsearch group + lineinfile: dest={{instance_default_file}} regexp="^ES_GROUP" insertafter="^#ES_GROUP" line="ES_GROUP={{ es_group }}" + when: es_group is defined + register: elasticsearch_configure + +#Apply changes to init script - NAME can be changed in Debian start script +- name: Name Node in Init Script + lineinfile: dest={{instance_init_script}} regexp="^NAME" line="NAME={{es_node_name_prefix}}{{default_file | basename}}" + register: elasticsearch_configure + when: ansible_os_family == 'Debian' + +- debug: msg="For {{ instance_init_script }} using default {{ instance_default_file }}" + +- name: Environment in Init Script + lineinfile: dest={{instance_init_script}} regexp="^ES_ENV_FILE" line="ES_ENV_FILE={{instance_default_file}}" + register: elasticsearch_configure + when: ansible_os_family == 'RedHat' + diff --git a/tasks/elasticsearch.yml b/tasks/elasticsearch.yml index 1d4073c..aca22c7 100644 --- a/tasks/elasticsearch.yml +++ b/tasks/elasticsearch.yml @@ -1,21 +1,31 @@ --- -# Trigger Debian section -- name: Include Debian specific Elasticsearch +- set_fact: instance_default_file={{default_file | dirname}}/{{es_node_name_prefix}}{{default_file | basename}} +- set_fact: instance_init_script={{init_script | dirname }}/{{es_node_name_prefix}}{{init_script | basename}} +- set_fact: instance_config_directory={{ es_conf_dir }}/{{es_node_name_prefix}}elasticsearch + + + +# Install OS specific elasticsearch - this can be abbreviated in version 2.0.0 +- name: Include specific Elasticsearch include: elasticsearch-Debian.yml when: ansible_os_family == 'Debian' -# Trigger Redhat section -- name: Include RedHat specific Elasticsearch +- name: Include specific Elasticsearch include: elasticsearch-RedHat.yml when: ansible_os_family == 'RedHat' +#Configuration file for elasticsearch +- name: Elasticsearch configuration + include: elasticsearch-config.yml + # Make sure the service is started, and restart if necessary - name: Start elasticsearch service - service: name=elasticsearch state=started + service: name={{instance_init_script | basename}} state=started when: es_start_service register: elasticsearch_started + - name: Restart elasticsearch service if new version installed - service: name=elasticsearch state=restarted + service: name={{instance_init_script | basename}} state=restarted when: es_start_service and ( elasticsearch_install.changed or elasticsearch_configure.changed ) and not elasticsearch_started.changed diff --git a/tasks/java.yml b/tasks/java.yml index 7b042b0..05fb5e7 100644 --- a/tasks/java.yml +++ b/tasks/java.yml @@ -1,8 +1,8 @@ --- - name: RedHat - Ensure Java is installed - yum: name={{ java_rhel }} state=latest + yum: name={{ java }} state=latest when: ansible_os_family == 'RedHat' - name: Debian - Ensure Java is installed - apt: name={{ java_debian }} state=present update_cache=yes + apt: name={{ java }} state=present update_cache=yes force=yes when: ansible_os_family == 'Debian' \ No newline at end of file diff --git a/tasks/main.yml b/tasks/main.yml index b492c29..5791593 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -1,4 +1,8 @@ --- +- name: check-parameters + include: checkParameters.yml +- name: os-specific vars + include_vars: "{{ansible_os_family}}.yml" - include: java.yml - include: elasticsearch.yml - include: elasticsearch-plugins.yml diff --git a/templates/elasticsearch.yml.j2 b/templates/elasticsearch.yml.j2 new file mode 100644 index 0000000..16237f6 --- /dev/null +++ b/templates/elasticsearch.yml.j2 @@ -0,0 +1,390 @@ +##################### Elasticsearch Configuration Example ##################### + +# This file contains an overview of various configuration settings, +# targeted at operations staff. Application developers should +# consult the guide at . +# +# The installation procedure is covered at +# . +# +# Elasticsearch comes with reasonable defaults for most settings, +# so you can try it out without bothering with configuration. +# +# Most of the time, these defaults are just fine for running a production +# cluster. If you're fine-tuning your cluster, or wondering about the +# effect of certain configuration option, please _do ask_ on the +# mailing list or IRC channel [http://elasticsearch.org/community]. + +# Any element in the configuration can be replaced with environment variables +# by placing them in ${...} notation. For example: +# +#node.rack: ${RACK_ENV_VAR} + +# For information on supported formats and syntax for the config file, see +# + + +################################### Cluster ################################### + +# Cluster name identifies your cluster for auto-discovery. If you're running +# multiple clusters on the same network, make sure you're using unique names. +# +cluster.name: {{ es_cluster_name }} + +#################################### Node ##################################### + +# Node names are generated dynamically on startup, so you're relieved +# from configuring them manually. You can tie this node to a specific name: +# +node.name: {{es_node_name_prefix}}{{inventory_hostname}} + +# Every node can be configured to allow or deny being eligible as the master, +# and to allow or deny to store the data. +# +# Allow this node to be eligible as a master node (enabled by default): +# +{% if es_master_node is defined %} +node.master: {{es_master_node | lower}} +{% endif %} + +# +# Allow this node to store data (enabled by default): +# +{% if es_data_node is defined %} +node.data: {{es_data_node | lower}} +{% endif %} + +# Use the Cluster Health API [http://localhost:9200/_cluster/health], the +# Node Info API [http://localhost:9200/_nodes] or GUI tools +# such as , +# , +# and +# to inspect the cluster state. + +# A node can have generic attributes associated with it, which can later be used +# for customized shard allocation filtering, or allocation awareness. An attribute +# is a simple key value pair, similar to node.key: value, here is an example: +# +{% if es_node_rack is defined %} +node.rack: {{ es_node_rack }} +{% endif %} + +# By default, multiple nodes are allowed to start from the same installation location +# to disable it, set the following: +#node.max_local_storage_nodes: 1 + + +#################################### Index #################################### + +# You can set a number of options (such as shard/replica options, mapping +# or analyzer definitions, translog settings, ...) for indices globally, +# in this file. +# +# Note, that it makes more sense to configure index settings specifically for +# a certain index, either when creating it or by using the index templates API. +# +# See and +# +# for more information. + +# Set the number of shards (splits) of an index (5 by default) if provided: + +{% if es_index_number_of_shards is defined %} +index.number_of_shards: {{ es_index_number_of_shards }} +{% endif %} + +# Set the number of replicas (additional copies) of an index (1 by default) if provided: +{% if es_index_number_of_replicas is defined %} +index.number_of_replicas: {{ es_index_number_of_replicas }} +{% endif %} + + +# These settings directly affect the performance of index and search operations +# in your cluster. Assuming you have enough machines to hold shards and +# replicas, the rule of thumb is: +# +# 1. Having more *shards* enhances the _indexing_ performance and allows to +# _distribute_ a big index across machines. +# 2. Having more *replicas* enhances the _search_ performance and improves the +# cluster _availability_. +# +# The "number_of_shards" is a one-time setting for an index. +# +# The "number_of_replicas" can be increased or decreased anytime, +# by using the Index Update Settings API. +# +# Elasticsearch takes care about load balancing, relocating, gathering the +# results from nodes, etc. Experiment with different settings to fine-tune +# your setup. + +# Use the Index Status API () to inspect +# the index status. + + +#################################### Paths #################################### + +# Path to directory containing configuration (this file and logging.yml): +# +{% if es_conf_dir is defined %} +path.conf: {{ es_conf_dir }} +{% endif %} + + +# Path to directory where to store index data allocated for this node. +# +# Can optionally include more than one location, causing data to be striped across +# the locations (a la RAID 0) on a file level, favouring locations with most free +# space on creation. For example: +# +{% if es_data_dir is defined %} +path.data: {{ es_data_dir }} +{% endif %} + +# Path to temporary files: +{% if es_work_dir is defined %} +path.work: {{ es_work_dir }} +{% endif %} + +# Path to log files: +{% if es_log_dir is defined %} +path.logs: {{ es_log_dir }} +{% endif %} + +# Path to where plugins are installed: +# +#path.plugins: /path/to/plugins + + +#################################### Plugin ################################### + +# If a plugin listed here is not installed for current node, the node will not start. +# +#plugin.mandatory: mapper-attachments,lang-groovy + + +################################### Memory #################################### + +# Elasticsearch performs poorly when JVM starts swapping: you should ensure that +# it _never_ swaps. +# +# Set this property to true to lock the memory: +# +{% if es_m_lock_enabled is defined %} +bootstrap.mlockall: {{es_m_lock_enabled | lower}} +{% endif %} + + +# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set +# to the same value, and that the machine has enough memory to allocate +# for Elasticsearch, leaving enough memory for the operating system itself. +# +# You should also make sure that the Elasticsearch process is allowed to lock +# the memory, eg. by using `ulimit -l unlimited`. + + +############################## Network And HTTP ############################### + +# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens +# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node +# communication. (the range means that if the port is busy, it will automatically +# try the next port). + +# Set the bind address specifically (IPv4 or IPv6): +# +#network.bind_host: 192.168.0.1 + +# Set the address other nodes will use to communicate with this node. If not +# set, it is automatically derived. It must point to an actual IP address. +# +#network.publish_host: 192.168.0.1 + +# Set both 'bind_host' and 'publish_host': +# +{% if es_network_host is defined %} +network.host: {{ es_network_host }} +{% endif %} + + +# Set a custom port for the node to node communication (9300 by default): +# +{% if es_transport_tcp_port is defined %} +transport.tcp.port: {{ es_transport_tcp_port }} +{% endif %} + +# Enable compression for all communication between nodes (disabled by default): +# +#transport.tcp.compress: true + +# Set a custom port to listen for HTTP traffic (9200 by default): +# +{% if es_http_port is defined %} +http.port: {{ es_http_port }} +{% endif %} + +# Set a custom allowed content length: +# +#http.max_content_length: 100mb + +# Disable HTTP completely: +# +#http.enabled: false + + +################################### Gateway ################################### + +# The gateway allows for persisting the cluster state between full cluster +# restarts. Every change to the state (such as adding an index) will be stored +# in the gateway, and when the cluster starts up for the first time, +# it will read its state from the gateway. + +# There are several types of gateway implementations. For more information, see +# . + +# The default gateway type is the "local" gateway (recommended): +# +#gateway.type: local + +# Settings below control how and when to start the initial recovery process on +# a full cluster restart (to reuse as much local data as possible when using shared +# gateway). + +# Allow recovery process after N nodes in a cluster are up: +# +#gateway.recover_after_nodes: 1 + +# Set the timeout to initiate the recovery process, once the N nodes +# from previous setting are up (accepts time value): +# +#gateway.recover_after_time: 5m + +# Set how many nodes are expected in this cluster. Once these N nodes +# are up (and recover_after_nodes is met), begin recovery process immediately +# (without waiting for recover_after_time to expire): +# +#gateway.expected_nodes: 2 + + +############################# Recovery Throttling ############################# + +# These settings allow to control the process of shards allocation between +# nodes during initial recovery, replica allocation, rebalancing, +# or when adding and removing nodes. + +# Set the number of concurrent recoveries happening on a node: +# +# 1. During the initial recovery +# +#cluster.routing.allocation.node_initial_primaries_recoveries: 4 +# +# 2. During adding/removing nodes, rebalancing, etc +# +#cluster.routing.allocation.node_concurrent_recoveries: 2 + +# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): +# +#indices.recovery.max_bytes_per_sec: 20mb + +# Set to limit the number of open concurrent streams when +# recovering a shard from a peer: +# +#indices.recovery.concurrent_streams: 5 + + +################################## Discovery ################################## + +# Discovery infrastructure ensures nodes can be found within a cluster +# and master node is elected. Multicast discovery is the default. + +# Set to ensure a node sees N other master eligible nodes to be considered +# operational within the cluster. This should be set to a quorum/majority of +# the master-eligible nodes in the cluster. +# +#discovery.zen.minimum_master_nodes: 1 + +# Set the time to wait for ping responses from other nodes when discovering. +# Set this option to a higher value on a slow or congested network +# to minimize discovery failures: +# +#discovery.zen.ping.timeout: 3s + +# For more information, see +# + +# Unicast discovery allows to explicitly control which nodes will be used +# to discover the cluster. It can be used when multicast is not present, +# or to restrict the cluster communication-wise. +# +# 1. Disable multicast discovery (enabled by default): +# +discovery.zen.ping.multicast.enabled: {{es_multicast_enabled | lower }} + +# +# 2. Configure an initial list of master nodes in the cluster +# to perform discovery when new nodes (master or data) are started: +# + +#We put all the current eligible masters in here. If not specified, we assumes its a master + +{% if es_multicast_enabled is defined and not es_multicast_enabled %} +discovery.zen.ping.unicast.hosts: {{es_unicast_hosts}} +{% endif %} + + +# EC2 discovery allows to use AWS EC2 API in order to perform discovery. +# +# You have to install the cloud-aws plugin for enabling the EC2 discovery. +# +# For more information, see +# +# +# See +# for a step-by-step tutorial. + +# GCE discovery allows to use Google Compute Engine API in order to perform discovery. +# +# You have to install the cloud-gce plugin for enabling the GCE discovery. +# +# For more information, see . + +# Azure discovery allows to use Azure API in order to perform discovery. +# +# You have to install the cloud-azure plugin for enabling the Azure discovery. +# +# For more information, see . + +################################## Slow Log ################################## + +# Shard level query and fetch threshold logging. + +index.search.slowlog.threshold.query.warn: 2s +index.search.slowlog.threshold.query.info: 2s +index.search.slowlog.threshold.query.debug: 1s +index.search.slowlog.threshold.query.trace: 500ms + +#index.search.slowlog.threshold.fetch.warn: 1s +#index.search.slowlog.threshold.fetch.info: 800ms +#index.search.slowlog.threshold.fetch.debug: 500ms +#index.search.slowlog.threshold.fetch.trace: 200ms + +#index.indexing.slowlog.threshold.index.warn: 10s +#index.indexing.slowlog.threshold.index.info: 5s +#index.indexing.slowlog.threshold.index.debug: 2s +#index.indexing.slowlog.threshold.index.trace: 500ms + +################################## GC Logging ################################ + +#monitor.jvm.gc.young.warn: 1000ms +#monitor.jvm.gc.young.info: 700ms +#monitor.jvm.gc.young.debug: 400ms + +#monitor.jvm.gc.old.warn: 10s +#monitor.jvm.gc.old.info: 5s +#monitor.jvm.gc.old.debug: 2s + +################################## Security ################################ + +# Uncomment if you want to enable JSONP as a valid return transport on the +# http server. With this enabled, it may pose a security risk, so disabling +# it unless you need it is recommended (it is disabled by default). +# +#http.jsonp.enable: true diff --git a/test/integration/config.yml b/test/integration/config.yml new file mode 100644 index 0000000..da421b5 --- /dev/null +++ b/test/integration/config.yml @@ -0,0 +1,12 @@ +--- +#Test explicit setting of parameters and variables +- name: Elasticsearch Package tests + hosts: localhost + roles: + #expand to all available parameters + - { role: elasticsearch, es_unicast_hosts: "localhost:9301", es_http_port: "9201", es_transport_tcp_port: "9301", es_data_node: false, es_master_node: true, es_m_lock_enabled: true, es_multicast_enabled: false, es_node_name_prefix: "node1_", es_cluster_name: "custom-cluster" } + vars: + es_scripts: false + es_templates: false + es_version_lock: false + es_m_lock_enabled: true \ No newline at end of file diff --git a/test/integration/config/config.yml b/test/integration/config/config.yml new file mode 100644 index 0000000..a3c37e1 --- /dev/null +++ b/test/integration/config/config.yml @@ -0,0 +1,2 @@ +--- +- host: test-kitchen diff --git a/test/integration/config/serverspec/default_spec.rb b/test/integration/config/serverspec/default_spec.rb new file mode 100644 index 0000000..9a616bd --- /dev/null +++ b/test/integration/config/serverspec/default_spec.rb @@ -0,0 +1,42 @@ +require 'spec_helper' + +context "basic tests" do + + describe user('elasticsearch') do + it { should exist } + end + + describe service('node1_elasticsearch') do + it { should be_running } + end + + describe package('elasticsearch') do + it { should be_installed } + end + + describe file('/etc/elasticsearch/node1_elasticsearch/elasticsearch.yml') do + it { should be_file } + end + + #test configuration parameters have been set - test all appropriately set in config file + describe file('/etc/elasticsearch/node1_elasticsearch/elasticsearch.yml') do + it { should contain 'http.port: 9201' } + it { should contain 'transport.tcp.port: 9301' } + it { should contain 'node.data: false' } + it { should contain 'node.master: true' } + it { should contain 'discovery.zen.ping.multicast.enabled: false' } + it { should contain 'cluster.name: custom-cluster' } + it { should contain 'node.name: node1_localhost' } + it { should contain 'discovery.zen.ping.unicast.hosts: localhost:9301' } + end + + #test we started on the correct port was used + describe command('curl "localhost:9201" | grep status') do + #TODO: This is returning an empty string + #its(:stdout) { should match /\"status\" : 200/ } + its(:exit_status) { should eq 0 } + end + + +end + diff --git a/test/integration/config/serverspec/spec_helper.rb b/test/integration/config/serverspec/spec_helper.rb new file mode 100644 index 0000000..590c2fa --- /dev/null +++ b/test/integration/config/serverspec/spec_helper.rb @@ -0,0 +1,2 @@ +require 'serverspec' +set :backend, :exec diff --git a/test/integration/multi.yml b/test/integration/multi.yml new file mode 100644 index 0000000..2a1447c --- /dev/null +++ b/test/integration/multi.yml @@ -0,0 +1,8 @@ +--- +#Test ability to deploy multiple instances to a machine +- name: Elasticsearch Config tests + hosts: localhost + roles: + - { role: elasticsearch, es_node_name_prefix: "node1_", es_unicast_hosts: "localhost:9300", es_http_port: "9201", es_transport_tcp_port: "9301", es_data_node: true, es_master_node: false, es_m_lock_enabled: true, es_multicast_enabled: false } + - { role: elasticsearch, es_node_name_prefix: "master_", es_unicast_hosts: "localhost:9300", es_http_port: "9200", es_transport_tcp_port: "9300", es_data_node: false, es_master_node: true, es_m_lock_enabled: true, es_multicast_enabled: false } + vars: diff --git a/test/integration/multi/multi.yml b/test/integration/multi/multi.yml new file mode 100644 index 0000000..a3c37e1 --- /dev/null +++ b/test/integration/multi/multi.yml @@ -0,0 +1,2 @@ +--- +- host: test-kitchen diff --git a/test/integration/multi/serverspec/default_spec.rb b/test/integration/multi/serverspec/default_spec.rb new file mode 100644 index 0000000..5269486 --- /dev/null +++ b/test/integration/multi/serverspec/default_spec.rb @@ -0,0 +1,81 @@ +require 'spec_helper' + +context "basic tests" do + + describe user('elasticsearch') do + it { should exist } + end + + describe service('node1_elasticsearch') do + it { should be_running } + end + + describe service('master_elasticsearch') do + it { should be_running } + end + + describe package('elasticsearch') do + it { should be_installed } + end + + describe file('/etc/elasticsearch/node1_elasticsearch/elasticsearch.yml') do + it { should be_file } + end + + describe file('/etc/elasticsearch/master_elasticsearch/elasticsearch.yml') do + it { should be_file } + end + + #test configuration parameters have been set - test all appropriately set in config file + describe file('/etc/elasticsearch/node1_elasticsearch/elasticsearch.yml') do + it { should contain 'http.port: 9201' } + it { should contain 'transport.tcp.port: 9301' } + it { should contain 'node.data: true' } + it { should contain 'node.master: false' } + it { should contain 'discovery.zen.ping.multicast.enabled: false' } + it { should contain 'node.name: node1_localhost' } + end + + + #test configuration parameters have been set for master - test all appropriately set in config file + describe file('/etc/elasticsearch/master_elasticsearch/elasticsearch.yml') do + it { should contain 'http.port: 9200' } + it { should contain 'transport.tcp.port: 9300' } + it { should contain 'node.data: false' } + it { should contain 'node.master: true' } + it { should contain 'discovery.zen.ping.multicast.enabled: false' } + it { should contain 'node.name: master_localhost' } + end + + describe 'Master listening' do + it 'listening in port 9200' do + expect(port 9200).to be_listening + end + end + + describe 'Node listening' do + it 'node should be listening in port 9201' do + expect(port 9201).to be_listening + end + end + + #test we started on the correct port was used for master + describe 'master started' do + it 'master node should be running', :retry => 3, :retry_wait => 10 do + command = command('curl "localhost:9200" | grep name') + #expect(command.stdout).should match '/*master_localhost*/' + expect(command.exit_status).to eq(0) + end + end + + #test we started on the correct port was used for node 1 + describe 'node1 started' do + it 'node should be running', :retry => 3, :retry_wait => 10 do + command = command('curl "localhost:9201" | grep name') + #expect(command.stdout).should match '/*node1_localhost*/' + expect(command.exit_status).to eq(0) + end + end + +end + diff --git a/test/integration/multi/serverspec/spec_helper.rb b/test/integration/multi/serverspec/spec_helper.rb new file mode 100644 index 0000000..590c2fa --- /dev/null +++ b/test/integration/multi/serverspec/spec_helper.rb @@ -0,0 +1,2 @@ +require 'serverspec' +set :backend, :exec diff --git a/test/integration/package.yml b/test/integration/package.yml index 6b0c7e2..23fd197 100644 --- a/test/integration/package.yml +++ b/test/integration/package.yml @@ -2,5 +2,5 @@ - name: Elasticsearch Package tests hosts: localhost roles: - - elasticsearch - vars: + - { role: elasticsearch, es_multicast_enabled: true} + vars: \ No newline at end of file diff --git a/test/integration/package/serverspec/default_spec.rb b/test/integration/package/serverspec/default_spec.rb index 10a66d3..31d025a 100644 --- a/test/integration/package/serverspec/default_spec.rb +++ b/test/integration/package/serverspec/default_spec.rb @@ -14,7 +14,7 @@ context "basic tests" do it { should be_installed } end - describe file('/etc/elasticsearch/elasticsearch.yml') do + describe file('/etc/elasticsearch/elasticsearch/elasticsearch.yml') do it { should be_file } end diff --git a/test/integration/standard.yml b/test/integration/standard.yml index da56a19..c12deb7 100644 --- a/test/integration/standard.yml +++ b/test/integration/standard.yml @@ -2,7 +2,7 @@ - name: wrapper playbook for kitchen testing "elasticsearch" hosts: localhost roles: - - elasticsearch + - { role: elasticsearch, es_multicast_enabled: true} vars: es_use_repository: "true" es_plugins: diff --git a/test/integration/standard/serverspec/default_spec.rb b/test/integration/standard/serverspec/default_spec.rb index 115a81a..d7471a3 100644 --- a/test/integration/standard/serverspec/default_spec.rb +++ b/test/integration/standard/serverspec/default_spec.rb @@ -14,7 +14,7 @@ context "basic tests" do it { should be_installed } end - describe file('/etc/elasticsearch/elasticsearch.yml') do + describe file('/etc/elasticsearch/elasticsearch/elasticsearch.yml') do it { should be_file } end diff --git a/vars/Debian.yml b/vars/Debian.yml new file mode 100644 index 0000000..5c07034 --- /dev/null +++ b/vars/Debian.yml @@ -0,0 +1,11 @@ +--- +java: "openjdk-7-jre-headless" +default_file: "/etc/default/elasticsearch" +init_script: "/etc/init.d/elasticsearch" +es_conf_dir: "/etc/elasticsearch" +es_home: "/usr/share/elasticsearch" + +default_data_dir: "/var/lib/elasticsearch" +default_log_dir: "/var/log/elasticsearch" +default_work_dir: "/tmp/elasticsearch" +default_pid_dir: "/var/run/elasticsearch" \ No newline at end of file diff --git a/vars/Linux.yml b/vars/Linux.yml deleted file mode 100644 index ed97d53..0000000 --- a/vars/Linux.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/vars/RedHat.yml b/vars/RedHat.yml new file mode 100644 index 0000000..c7be31d --- /dev/null +++ b/vars/RedHat.yml @@ -0,0 +1,6 @@ +--- +java: "java-1.8.0-openjdk.x86_64" +default_file: "/etc/sysconfig/elasticsearch" +init_script: "/etc/init.d/elasticsearch" +es_conf_dir: "/etc/elasticsearch/" +es_home: "/usr/share/elasticsearch" \ No newline at end of file diff --git a/vars/main.yml b/vars/main.yml index 7a6e693..d5bd5cf 100644 --- a/vars/main.yml +++ b/vars/main.yml @@ -1,4 +1,8 @@ --- -java_debian: "openjdk-7-jre-headless" -java_rhel: "java-1.8.0-openjdk.x86_64" -es_package_url: "https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch" \ No newline at end of file +es_package_url: "https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch" +pid_dir: "/var/run/elasticsearch" + +#Needed to provide default directories +default_data_dir: "/var/lib/elasticsearch" +default_log_dir: "/var/log/elasticsearch" +default_work_dir: "/tmp/elasticsearch"