merge upstream

This commit is contained in:
Jan Dreyer 2019-09-21 23:36:21 +02:00
commit 14d3a41edd
66 changed files with 963 additions and 1643 deletions

67
.ci/jobs/defaults.yml Normal file
View file

@ -0,0 +1,67 @@
---
##### GLOBAL METADATA
- meta:
cluster: devops-ci
##### JOB DEFAULTS
- job:
project-type: matrix
logrotate:
daysToKeep: 30
numToKeep: 100
parameters:
- string:
name: branch_specifier
default: master
description: the Git branch specifier to build (<branchName>, <tagName>,
<commitId>, etc.)
properties:
- github:
url: https://github.com/elastic/ansible-elasticsearch/
- inject:
properties-content: HOME=$JENKINS_HOME
concurrent: true
node: master
scm:
- git:
name: origin
credentials-id: f6c7695a-671e-4f4f-a331-acdce44ff9ba
reference-repo: /var/lib/jenkins/.git-references/ansible-elasticsearch.git
branches:
- ${branch_specifier}
url: git@github.com:elastic/ansible-elasticsearch.git
basedir: elasticsearch
wipe-workspace: 'False'
axes:
- axis:
type: slave
name: label
values:
- linux
- axis:
name: VERSION
filename: elasticsearch/test/matrix.yml
type: yaml
- axis:
name: OS
filename: elasticsearch/test/matrix.yml
type: yaml
- axis:
name: TEST_TYPE
filename: elasticsearch/test/matrix.yml
type: yaml
vault:
role_id: cff5d4e0-61bf-2497-645f-fcf019d10c13
wrappers:
- ansicolor
- timeout:
type: absolute
timeout: 360
fail: true
- timestamps
publishers:
- email:
recipients: infra-root+build@elastic.co

View file

@ -0,0 +1,26 @@
---
- job:
name: elastic+ansible-elasticsearch+master
display-name: elastic / ansible-elasticsearch - master
description: Master branch testing with test kitchen
triggers:
- timed: H H(02-04) * * *
builders:
- shell: |-
#!/usr/local/bin/runbld
set -euo pipefail
export RBENV_VERSION='2.3.0'
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
rbenv local $RBENV_VERSION
export ES_XPACK_LICENSE_FILE="$(pwd)/license.json"
echo "Getting xpack_license from secrets service"
set +x
VAULT_TOKEN=$( curl -s -X POST -H "Content-Type: application/json" -L -d "{\"role_id\":\"$VAULT_ROLE_ID\",\"secret_id\":\"$VAULT_SECRET_ID\"}" $VAULT_ADDR/v1/auth/approle/login | jq -r '.auth.client_token' )
curl -s -L -H "X-Vault-Token:$VAULT_TOKEN" $VAULT_ADDR/v1/secret/devops-ci/ansible-elasticsearch/xpack_license | jq -r '.data.value' > ${ES_XPACK_LICENSE_FILE}
set -x
echo "Finished getting xpack_license from secrets service"
make setup
make verify VERSION=$VERSION PATTERN=$TEST_TYPE-$OS

View file

@ -0,0 +1,39 @@
---
- job:
name: elastic+ansible-elasticsearch+pull-request
display-name: elastic / ansible-elasticsearch - pull-request
description: Pull request testing with test kitchen
project-type: matrix
parameters: []
scm:
- git:
branches:
- $ghprbActualCommit
refspec: +refs/pull/*:refs/remotes/origin/pr/*
triggers:
- github-pull-request:
github-hooks: true
org-list:
- elastic
allow-whitelist-orgs-as-admins: true
cancel-builds-on-update: true
status-context: devops-ci
builders:
- shell: |-
#!/usr/local/bin/runbld
set -euo pipefail
export RBENV_VERSION='2.3.0'
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
rbenv local $RBENV_VERSION
export ES_XPACK_LICENSE_FILE="$(pwd)/license.json"
echo "Getting xpack_license from secrets service"
set +x
VAULT_TOKEN=$( curl -s -X POST -H "Content-Type: application/json" -L -d "{\"role_id\":\"$VAULT_ROLE_ID\",\"secret_id\":\"$VAULT_SECRET_ID\"}" $VAULT_ADDR/v1/auth/approle/login | jq -r '.auth.client_token' )
curl -s -L -H "X-Vault-Token:$VAULT_TOKEN" $VAULT_ADDR/v1/secret/devops-ci/ansible-elasticsearch/xpack_license | jq -r '.data.value' > ${ES_XPACK_LICENSE_FILE}
set -x
echo "Finished getting xpack_license from secrets service"
make setup
make verify VERSION=$VERSION PATTERN=$TEST_TYPE-$OS

View file

@ -1,4 +1,3 @@
<!
<!-- <!--
** Please read the guidelines below. ** ** Please read the guidelines below. **

33
.github/stale.yml vendored Normal file
View file

@ -0,0 +1,33 @@
---
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 90
# Number of days of inactivity before an stale issue is closed
daysUntilClose: 30
# Label to use when marking an issue as stale
staleLabel: triage/stale
issues:
# Comment to post when marking an issue as stale.
markComment: |-
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when closing a stale issue.
closeComment: |-
This issue has been automatically closed because it has not had recent
activity since being marked as stale.
pulls:
# Comment to post when marking a PR as stale.
markComment: |-
This PR has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
To track this PR (even if closed), please open a corresponding issue if one does not already exist.
# Comment to post when closing a stale PR.
closeComment: |-
This PR has been automatically closed because it has not had recent
activity since being marked as stale.
Please reopen when work resumes.

View file

@ -6,11 +6,9 @@ provisioner:
name: ansible_playbook name: ansible_playbook
hosts: localhost hosts: localhost
roles_path: ../ roles_path: ../
require_ansible_repo: false require_ansible_repo: true
require_ansible_omnibus: false require_ansible_omnibus: false
require_ansible_source: false require_ansible_source: false
require_pip: true
ansible_version: 2.4.3.0
http_proxy: <%= ENV['HTTP_PROXY'] %> http_proxy: <%= ENV['HTTP_PROXY'] %>
https_proxy: <%= ENV['HTTPS_PROXY'] %> https_proxy: <%= ENV['HTTPS_PROXY'] %>
no_proxy: localhost,127.0.0.1 no_proxy: localhost,127.0.0.1
@ -20,8 +18,8 @@ provisioner:
attributes: attributes:
extra_vars: extra_vars:
es_major_version: "<%= ENV['VERSION'] %>" es_major_version: "<%= ENV['VERSION'] %>"
<% if ENV['VERSION'] == '5.x' %> <% if ENV['VERSION'] == '6.x' %>
es_version: '5.6.11' es_version: '6.8.0'
<% end %> <% end %>
<% end %> <% end %>
@ -34,12 +32,11 @@ platforms:
image: ubuntu:14.04 image: ubuntu:14.04
privileged: true privileged: true
provision_command: provision_command:
- apt-get update && apt-get install -y software-properties-common && add-apt-repository -y ppa:ansible/ansible && add-apt-repository -y ppa:openjdk-r/ppa - apt-get update -q && apt-get install -y -q software-properties-common && add-apt-repository -y ppa:ansible/ansible && add-apt-repository -y ppa:openjdk-r/ppa
- apt-get update && apt-get -y -q install python-apt python-pycurl python-pip python-openssl build-essential libssl-dev libffi-dev python-dev locales openjdk-8-jre - apt-get update -q && apt-get -y -q install ansible openjdk-8-jre python-jmespath
- locale-gen en_US.UTF-8 && localedef -i en_US -c -f UTF-8 en_US.UTF-8 - locale-gen en_US.UTF-8 && localedef -i en_US -c -f UTF-8 en_US.UTF-8
- pip install jmespath pyOpenSSL ndg-httpsclient cryptography==1.8.1
use_sudo: false use_sudo: false
volume: volume:
- <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json - <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json
- /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers - /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers
- name: ubuntu-16.04 - name: ubuntu-16.04
@ -47,13 +44,11 @@ platforms:
image: ubuntu:16.04 image: ubuntu:16.04
privileged: true privileged: true
provision_command: provision_command:
- apt-get update && apt-get install -y software-properties-common && add-apt-repository -y ppa:ansible/ansible - apt-get update -q && apt-get install -y -q iproute locales software-properties-common && add-apt-repository -y ppa:ansible/ansible
- apt-get install -y -q net-tools - apt-get update -q && apt-get install -y -q ansible python-jmespath
- apt-get update && apt-get -y -q install python-apt python-pycurl python-pip locales
- locale-gen en_US.UTF-8 && localedef -i en_US -c -f UTF-8 en_US.UTF-8 - locale-gen en_US.UTF-8 && localedef -i en_US -c -f UTF-8 en_US.UTF-8
- pip install jmespath
use_sudo: false use_sudo: false
volume: volume:
- <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json - <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json
- /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers - /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers
run_command: "/sbin/init" run_command: "/sbin/init"
@ -62,11 +57,9 @@ platforms:
image: ubuntu:18.04 image: ubuntu:18.04
privileged: true privileged: true
provision_command: provision_command:
- apt-get update && apt-get install -y software-properties-common && add-apt-repository -y ppa:ansible/ansible - apt-get install -y -q ansible iproute2 python-jmespath
- apt-get update && apt-get -y -q install python-apt python-pycurl python-pip net-tools iproute2
- pip install jmespath
use_sudo: false use_sudo: false
volume: volume:
- <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json - <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json
- /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers - /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers
run_command: "/sbin/init" run_command: "/sbin/init"
@ -75,14 +68,14 @@ platforms:
image: debian:8 image: debian:8
privileged: true privileged: true
provision_command: provision_command:
- echo "deb http://http.debian.net/debian jessie-backports main" > /etc/apt/sources.list.d/jessie-backports.list - apt-get update -q && apt-get install -y -q gnupg2 python-jmespath
- echo "deb http://archive.debian.org/debian jessie-backports main" > /etc/apt/sources.list.d/jessie-backports.list
- echo 'Acquire::Check-Valid-Until "false";' > /etc/apt/apt.conf
- apt-get update && apt-get -y install -t jessie-backports openjdk-8-jre-headless - apt-get update && apt-get -y install -t jessie-backports openjdk-8-jre-headless
- apt-get update && apt-get -y install python python-dev python-pip build-essential libyaml-dev python-yaml curl wget net-tools - echo "deb http://ppa.launchpad.net/ansible/ansible/ubuntu trusty main" > /etc/apt/sources.list.d/ansible.list
- sed -ri 's/^#?PermitRootLogin .*/PermitRootLogin yes/' /etc/ssh/sshd_config - apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 93C4A3FD7BB9C367
- sed -ri 's/^#?PasswordAuthentication .*/PasswordAuthentication yes/' /etc/ssh/sshd_config - apt-get update -q && apt-get install -y -q ansible
- sed -ri 's/^#?UsePAM .*/UsePAM no/' /etc/ssh/sshd_config volume:
- pip install jmespath setuptools --upgrade
volume:
- <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json - <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json
- /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers - /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers
use_sudo: false use_sudo: false
@ -92,13 +85,11 @@ platforms:
image: debian:9 image: debian:9
privileged: true privileged: true
provision_command: provision_command:
- apt-get update && apt-get -y install python python-dev python-pip build-essential libyaml-dev python-yaml curl wget systemd-sysv - apt-get update -q && apt-get install -y -q gnupg2 python-jmespath systemd-sysv
- apt-get install -y -q net-tools - echo "deb http://ppa.launchpad.net/ansible/ansible/ubuntu trusty main" > /etc/apt/sources.list.d/ansible.list
- sed -ri 's/^#?PermitRootLogin .*/PermitRootLogin yes/' /etc/ssh/sshd_config - apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 93C4A3FD7BB9C367
- sed -ri 's/^#?PasswordAuthentication .*/PasswordAuthentication yes/' /etc/ssh/sshd_config - apt-get update -q && apt-get install -y -q ansible
- sed -ri 's/^#?UsePAM .*/UsePAM no/' /etc/ssh/sshd_config volume:
- pip install jmespath
volume:
- <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json - <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json
- /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers - /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers
use_sudo: false use_sudo: false
@ -107,14 +98,9 @@ platforms:
driver_config: driver_config:
image: centos:7 image: centos:7
provision_command: provision_command:
- sed -ri 's/^#?PermitRootLogin .*/PermitRootLogin yes/' /etc/ssh/sshd_config
- sed -ri 's/^#?PasswordAuthentication .*/PasswordAuthentication yes/' /etc/ssh/sshd_config
- sed -ri 's/^#?UsePAM .*/UsePAM no/' /etc/ssh/sshd_config
- yum -y install epel-release - yum -y install epel-release
- yum -y install initscripts python-pip - yum -y install ansible iproute python2-jmespath
- yum clean all volume:
- pip install jmespath
volume:
- <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json - <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json
- /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers - /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers
run_command: "/usr/sbin/init" run_command: "/usr/sbin/init"
@ -142,10 +128,6 @@ suites:
provisioner: provisioner:
playbook: test/integration/xpack-upgrade.yml playbook: test/integration/xpack-upgrade.yml
idempotency_test: false idempotency_test: false
- name: multi
provisioner:
playbook: test/integration/multi.yml
idempotency_test: true
- name: issue-test - name: issue-test
provisioner: provisioner:
playbook: test/integration/issue-test.yml playbook: test/integration/issue-test.yml

1
.ruby-version Normal file
View file

@ -0,0 +1 @@
2.3.0

View file

@ -1,3 +1,56 @@
## 7.1.1 - 2019/06/04
### Breaking changes
#### End of multi-instance support
* Starting with ansible-elasticsearch:7.1.1, installing more than one instance of Elasticsearch **on the same host** is no longer supported.
* Configuration, datas, logs and PID directories are now using standard paths like in the official Elasticsearch packages.
* If you use only one instance but want to upgrade from an older ansible-elasticsearch version, follow [upgrade procedure](./docs/multi-instance.md#upgrade-procedure)
* If you install more than one instance of Elasticsearch on the same host (with different ports, directory and config files), **do not update to ansible-elasticsearch >= 7.1.1**, please follow this [workaround](./docs/multi-instance.md#workaround) instead.
* For multi-instances use cases, we are now recommending Docker containers using our official images (https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html).
#### Moved some security features to basic
You can now using basic authentication by overriding `es_api_basic_auth_username` and `es_api_basic_auth_password` variables without providing a license file.
### Features
* 7.1.1 as default Elasticsearch version
* [#539](https://github.com/elastic/ansible-elasticsearch/pull/539) and [#542](https://github.com/elastic/ansible-elasticsearch/pull/542) - @grzegorznowak - Make ansible role compatible with ansible [check mode](https://docs.ansible.com/ansible/latest/user_guide/playbooks_checkmode.html)
* [#558](https://github.com/elastic/ansible-elasticsearch/pull/558) - @jmlrt - Add support for Elasticsearch 7.x, remove 5.x support and update tests
* [#560](https://github.com/elastic/ansible-elasticsearch/pull/560) - @jmlrt - Use default xpack features and remove system_key deprecated feature
* [#562](https://github.com/elastic/ansible-elasticsearch/pull/562) - @hamishforbes - Allow to customize instance suffix
* [#566](https://github.com/elastic/ansible-elasticsearch/pull/566) - @jmlrt - Remove multi-instances support
* [#567](https://github.com/elastic/ansible-elasticsearch/pull/567) - @jmlrt - Remove file scripts deprecated feature
* [#568](https://github.com/elastic/ansible-elasticsearch/pull/568) - @jmlrt - Skip Java install for Elasticsearch 7.x (java is now embeded)
### Fixes
* [#543](https://github.com/elastic/ansible-elasticsearch/pull/543) - @victorgs - Fix typo in Makefile
* [#546](https://github.com/elastic/ansible-elasticsearch/pull/546) - @thiagonache - Fix README example
* [#550](https://github.com/elastic/ansible-elasticsearch/pull/550) - @pemontto - Fix template conditional
* [#556](https://github.com/elastic/ansible-elasticsearch/pull/556) - @jmlrt - Fix debian-8 test
* [#557](https://github.com/elastic/ansible-elasticsearch/pull/557) - @jmlrt - Bump gem dependencies to fix [CVE-2018-1000544](https://nvd.nist.gov/vuln/detail/CVE-2018-1000544) and [CVE-2018-1000201](https://nvd.nist.gov/vuln/detail/CVE-2018-1000201)
* [#564](https://github.com/elastic/ansible-elasticsearch/pull/564) - @jmlrt - Bump all gem dependencies to fix kitchen tests
## 6.6.0 - 2019/01/29
### Features
* 6.6.0 as default Elasticsearch version
* [#521](https://github.com/elastic/ansible-elasticsearch/pull/521) - @Crazybus - Allow switching between oss and standard packages
* [#528](https://github.com/elastic/ansible-elasticsearch/pull/528) - @Fra-nk - Use systemd's RequiresMountsFor
* [#530](https://github.com/elastic/ansible-elasticsearch/pull/530) - @lde - Use dpkg_selections to lock Elasticsearch version
### Fixes
* [#513](https://github.com/elastic/ansible-elasticsearch/pull/513) - @kakoni - Fix typo in elasticsearch-parameters.yml
* [#522](https://github.com/elastic/ansible-elasticsearch/pull/522) - @SlothOfAnarchy - Fix package download URL
* [#526](https://github.com/elastic/ansible-elasticsearch/pull/526) - @Fra-nk - Allow not installing Elasticsearch deb repository key
* [#527](https://github.com/elastic/ansible-elasticsearch/pull/527) - @katsukamaru - Execute java version check in check mode
## 6.5.1.1 - 2018/11/27 ## 6.5.1.1 - 2018/11/27
### Fixes ### Fixes
@ -14,7 +67,7 @@
* [#487](https://github.com/elastic/ansible-elasticsearch/pull/487) - @lazouz - Disable check mode to make install plugins idempotent * [#487](https://github.com/elastic/ansible-elasticsearch/pull/487) - @lazouz - Disable check mode to make install plugins idempotent
* [#501](https://github.com/elastic/ansible-elasticsearch/pull/501) - @kaxil - Make the order of configs consistent for comparing * [#501](https://github.com/elastic/ansible-elasticsearch/pull/501) - @kaxil - Make the order of configs consistent for comparing
* [#497](https://github.com/elastic/ansible-elasticsearch/pull/497) - @Crazybus - Document es_use_repository and es_custom_package_url * [#497](https://github.com/elastic/ansible-elasticsearch/pull/497) - @Crazybus - Document es_use_repository and es_custom_package_url
* [#504](https://github.com/elastic/ansible-elasticsearch/pull/504) - @victorgs - Using tests as filters is deprecated * [#504](https://github.com/elastic/ansible-elasticsearch/pull/504) - @victorgs - Using tests as filters is deprecated
* [#493](https://github.com/elastic/ansible-elasticsearch/pull/493) - @Crazybus - Only use the first found java version if there are multiple installed * [#493](https://github.com/elastic/ansible-elasticsearch/pull/493) - @Crazybus - Only use the first found java version if there are multiple installed
@ -56,12 +109,12 @@ When upgrading from module versions prior to 6.3, there are a number of upgrade
* oss to oss * oss to oss
* oss to xpack * oss to xpack
* xpack to xpack * xpack to xpack
* X-Pack configuration files which used to be in `${ES_PATH_CONF}/x-pack` are now in `${ES_PATH_CONF}/`. If you have any configuration files in this directory not managed by ansible you will need to move them manually. * X-Pack configuration files which used to be in `${ES_PATH_CONF}/x-pack` are now in `${ES_PATH_CONF}/`. If you have any configuration files in this directory not managed by ansible you will need to move them manually.
#### Features #### Features
* Integration testing has been refactored in [#457](https://github.com/elastic/ansible-elasticsearch/pull/457). This removed a lot of duplicate tests and added new tests to make sure all upgrade paths work. * Integration testing has been refactored in [#457](https://github.com/elastic/ansible-elasticsearch/pull/457). This removed a lot of duplicate tests and added new tests to make sure all upgrade paths work.
* It is now possible to test elasticsearch snapshot builds by setting `es_use_snapshot_release` to `true` * It is now possible to test Elasticsearch snapshot builds by setting `es_use_snapshot_release` to `true`
#### Fixes #### Fixes
@ -76,14 +129,14 @@ When upgrading from module versions prior to 6.3, there are a number of upgrade
## 6.2.4.1 - 2018/06/14 ## 6.2.4.1 - 2018/06/14
Patch release requested by @average-joe in #453 Patch release requested by @average-joe in #453
#### Pull requests #### Pull requests
* [#445](https://github.com/elastic/ansible-elasticsearch/pull/445) - @gekkeharry13 - Added configuration options for configuring x-pack notifications via email with some other nice fixes. * [#445](https://github.com/elastic/ansible-elasticsearch/pull/445) - @gekkeharry13 - Added configuration options for configuring x-pack notifications via email with some other nice fixes.
* [#450](https://github.com/elastic/ansible-elasticsearch/pull/450) - @Crazybus - improving some flakey tests which were randomly failing. * [#450](https://github.com/elastic/ansible-elasticsearch/pull/450) - @Crazybus - improving some flakey tests which were randomly failing.
* [#447](https://github.com/elastic/ansible-elasticsearch/pull/447) - @chaintng - Fix to make sure sudo is used when running `update-alternatives` for java. * [#447](https://github.com/elastic/ansible-elasticsearch/pull/447) - @chaintng - Fix to make sure sudo is used when running `update-alternatives` for java.
* [#423](https://github.com/elastic/ansible-elasticsearch/pull/423) - @eRadical - Fixing the until condition being used when installing rpms from a custom repository. * [#423](https://github.com/elastic/ansible-elasticsearch/pull/423) - @eRadical - Fixing the until condition being used when installing rpms from a custom repository.
## 6.2.4 - 2018/04/24 ## 6.2.4 - 2018/04/24
@ -104,9 +157,9 @@ Patch release requested by @average-joe in #453
## 6.1.3 - 2018/02/01 ## 6.1.3 - 2018/02/01
* `6.x` is now the default `es_major_version` with `6.1.3` as the default `es_version` * `6.x` is now the default `es_major_version` with `6.1.3` as the default `es_version`
* Special thanks to @shribigb, @toddlers and @remil1000 for their efforts in getting `6.x` support working! * Special thanks to @shribigb, @toddlers and @remil1000 for their efforts in getting `6.x` support working!
* `.kitchen.yml` has been updated to allow testing both `6.x` and `5.x` versions * `.kitchen.yml` has been updated to allow testing both `6.x` and `5.x` versions
* A new [Jenkins job](https://devops-ci.elastic.co/job/elastic+ansible-elasticsearch+pull-request/) has been added for pull requests to automatically test all combinations of `6.x` and `5.x` on ubuntu-1404, ubuntu-1604, debian-8 and centos-7 with the various test suites. * A new [Jenkins job](https://devops-ci.elastic.co/job/elastic+ansible-elasticsearch+pull-request/) has been added for pull requests to automatically test all combinations of `6.x` and `5.x` on ubuntu-1404, ubuntu-1604, debian-8 and centos-7 with the various test suites.
## 5.5.1 - 2017/08/20 ## 5.5.1 - 2017/08/20

View file

@ -1,6 +1,6 @@
source 'https://rubygems.org' source 'https://rubygems.org'
gem 'test-kitchen', '1.20.0' gem 'test-kitchen'
gem 'kitchen-docker', '2.6.0' gem 'kitchen-docker'
gem 'kitchen-ansible', '0.48.1' gem 'kitchen-ansible'
gem 'net-ssh', '4.2.0' gem 'net-ssh'

View file

@ -1,50 +1,91 @@
GEM GEM
remote: https://rubygems.org/ remote: https://rubygems.org/
specs: specs:
bcrypt_pbkdf (1.0.1)
builder (3.2.3) builder (3.2.3)
ed25519 (1.2.4)
equatable (0.5.0)
erubis (2.7.0) erubis (2.7.0)
ffi (1.9.18) ffi (1.10.0)
gssapi (1.2.0) gssapi (1.3.0)
ffi (>= 1.0.1) ffi (>= 1.0.1)
gyoku (1.3.1) gyoku (1.3.1)
builder (>= 2.1.2) builder (>= 2.1.2)
httpclient (2.8.3) httpclient (2.8.3)
kitchen-ansible (0.48.1) kitchen-ansible (0.50.0)
net-ssh (>= 3) net-ssh (>= 3)
test-kitchen (~> 1.4) test-kitchen (>= 1.4)
kitchen-docker (2.6.0) kitchen-docker (2.9.0)
test-kitchen (>= 1.0.0) test-kitchen (>= 1.0.0)
license-acceptance (1.0.11)
pastel (~> 0.7)
tomlrb (~> 1.2)
tty-box (~> 0.3)
tty-prompt (~> 0.18)
little-plugger (1.1.4) little-plugger (1.1.4)
logging (2.2.2) logging (2.2.2)
little-plugger (~> 1.1) little-plugger (~> 1.1)
multi_json (~> 1.10) multi_json (~> 1.10)
mixlib-install (3.9.0) mixlib-install (3.11.18)
mixlib-shellout mixlib-shellout
mixlib-versioning mixlib-versioning
thor thor
mixlib-shellout (2.3.2) mixlib-shellout (2.4.4)
mixlib-versioning (1.2.2) mixlib-versioning (1.2.7)
multi_json (1.13.1) multi_json (1.13.1)
net-scp (1.2.1) necromancer (0.4.0)
net-ssh (>= 2.6.5) net-scp (2.0.0)
net-ssh (4.2.0) net-ssh (>= 2.6.5, < 6.0.0)
net-ssh-gateway (1.3.0) net-ssh (5.2.0)
net-ssh (>= 2.6.5) net-ssh-gateway (2.0.0)
net-ssh (>= 4.0.0)
nori (2.6.0) nori (2.6.0)
pastel (0.7.2)
equatable (~> 0.5.0)
tty-color (~> 0.4.0)
rubyntlm (0.6.2) rubyntlm (0.6.2)
rubyzip (1.2.1) rubyzip (1.2.2)
test-kitchen (1.20.0) strings (0.1.5)
strings-ansi (~> 0.1)
unicode-display_width (~> 1.5)
unicode_utils (~> 1.4)
strings-ansi (0.1.0)
test-kitchen (2.2.5)
bcrypt_pbkdf (~> 1.0)
ed25519 (~> 1.2)
license-acceptance (~> 1.0, >= 1.0.11)
mixlib-install (~> 3.6) mixlib-install (~> 3.6)
mixlib-shellout (>= 1.2, < 3.0) mixlib-shellout (>= 1.2, < 3.0)
net-scp (~> 1.1) net-scp (>= 1.1, < 3.0)
net-ssh (>= 2.9, < 5.0) net-ssh (>= 2.9, < 6.0)
net-ssh-gateway (~> 1.2) net-ssh-gateway (>= 1.2, < 3.0)
thor (~> 0.19, < 0.19.2) thor (~> 0.19)
winrm (~> 2.0) winrm (~> 2.0)
winrm-elevated (~> 1.0) winrm-elevated (~> 1.0)
winrm-fs (~> 1.1.0) winrm-fs (~> 1.1)
thor (0.19.1) thor (0.20.3)
winrm (2.2.3) timers (4.3.0)
tomlrb (1.2.8)
tty-box (0.3.0)
pastel (~> 0.7.2)
strings (~> 0.1.4)
tty-cursor (~> 0.6.0)
tty-color (0.4.3)
tty-cursor (0.6.1)
tty-prompt (0.18.1)
necromancer (~> 0.4.0)
pastel (~> 0.7.0)
timers (~> 4.0)
tty-cursor (~> 0.6.0)
tty-reader (~> 0.5.0)
tty-reader (0.5.0)
tty-cursor (~> 0.6.0)
tty-screen (~> 0.6.4)
wisper (~> 2.0.0)
tty-screen (0.6.5)
unicode-display_width (1.6.0)
unicode_utils (1.4.0)
winrm (2.3.2)
builder (>= 2.1.2) builder (>= 2.1.2)
erubis (~> 2.7) erubis (~> 2.7)
gssapi (~> 1.2) gssapi (~> 1.2)
@ -53,23 +94,24 @@ GEM
logging (>= 1.6.1, < 3.0) logging (>= 1.6.1, < 3.0)
nori (~> 2.0) nori (~> 2.0)
rubyntlm (~> 0.6.0, >= 0.6.1) rubyntlm (~> 0.6.0, >= 0.6.1)
winrm-elevated (1.1.0) winrm-elevated (1.1.1)
winrm (~> 2.0) winrm (~> 2.0)
winrm-fs (~> 1.0) winrm-fs (~> 1.0)
winrm-fs (1.1.1) winrm-fs (1.3.2)
erubis (~> 2.7) erubis (~> 2.7)
logging (>= 1.6.1, < 3.0) logging (>= 1.6.1, < 3.0)
rubyzip (~> 1.1) rubyzip (~> 1.1)
winrm (~> 2.0) winrm (~> 2.0)
wisper (2.0.0)
PLATFORMS PLATFORMS
ruby ruby
DEPENDENCIES DEPENDENCIES
kitchen-ansible (= 0.48.1) kitchen-ansible
kitchen-docker (= 2.6.0) kitchen-docker
net-ssh (= 4.2.0) net-ssh
test-kitchen (= 1.20.0) test-kitchen
BUNDLED WITH BUNDLED WITH
1.16.1 1.17.0

View file

@ -1,10 +1,10 @@
default: build default: build
SHELL:=/bin/bash -eux SHELL:=/bin/bash -eux
VERSION := 6.x VERSION := 7.x
PATTERN := xpack-ubuntu-1604 PATTERN := xpack-ubuntu-1604
.PHONY: converge cerify test login destroy list .PHONY: converge verify test login destroy list
setup: setup:
bundle install bundle install

172
README.md
View file

@ -2,18 +2,28 @@
[![Build Status](https://img.shields.io/jenkins/s/https/devops-ci.elastic.co/job/elastic+ansible-elasticsearch+master.svg)](https://devops-ci.elastic.co/job/elastic+ansible-elasticsearch+master/) [![Build Status](https://img.shields.io/jenkins/s/https/devops-ci.elastic.co/job/elastic+ansible-elasticsearch+master.svg)](https://devops-ci.elastic.co/job/elastic+ansible-elasticsearch+master/)
[![Ansible Galaxy](https://img.shields.io/badge/ansible--galaxy-elastic.elasticsearch-blue.svg)](https://galaxy.ansible.com/elastic/elasticsearch/) [![Ansible Galaxy](https://img.shields.io/badge/ansible--galaxy-elastic.elasticsearch-blue.svg)](https://galaxy.ansible.com/elastic/elasticsearch/)
**THIS ROLE IS FOR 6.x, 5.x. FOR 2.x SUPPORT PLEASE USE THE 2.x BRANCH.** **THIS ROLE IS FOR 7.x & 6.x**
Ansible role for 6.x/5.x Elasticsearch. Currently this works on Debian and RedHat based linux systems. Tested platforms are: Ansible role for 7.x/6.x Elasticsearch. Currently this works on Debian and RedHat based linux systems. Tested platforms are:
* Ubuntu 14.04 * Ubuntu 14.04
* Ubuntu 16.04 * Ubuntu 16.04
* Ubuntu 18.04
* Debian 8 * Debian 8
* Debian 9
* CentOS 7 * CentOS 7
The latest Elasticsearch versions of 6.x and 5.x are actively tested. **Only Ansible versions > 2.4.3.0 are supported, as this is currently the only version tested.** The latest Elasticsearch versions of 7.x & 6.x are actively tested.
##### Dependency ## BREAKING CHANGES
### Notice about multi-instance support
* If you use only one instance but want to upgrade from an older ansible-elasticsearch version, follow [upgrade procedure](./docs/multi-instance.md#upgrade-procedure)
* If you install more than one instance of Elasticsearch on the same host (with different ports, directory and config files), **do not update to ansible-elasticsearch >= 7.1.1**, please follow this [workaround](./docs/multi-instance.md#workaround) instead.
* For multi-instances use cases, we are now recommending Docker containers using our official images (https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html).
## Dependency
This role uses the json_query filter which [requires jmespath](https://github.com/ansible/ansible/issues/24319) on the local machine. This role uses the json_query filter which [requires jmespath](https://github.com/ansible/ansible/issues/24319) on the local machine.
## Usage ## Usage
@ -21,23 +31,28 @@ This role uses the json_query filter which [requires jmespath](https://github.co
Create your Ansible playbook with your own tasks, and include the role elasticsearch. You will have to have this repository accessible within the context of playbook. Create your Ansible playbook with your own tasks, and include the role elasticsearch. You will have to have this repository accessible within the context of playbook.
```sh ```sh
ansible-galaxy install elastic.elasticsearch ansible-galaxy install elastic.elasticsearch,7.1.1
``` ```
Then create your playbook yaml adding the role elasticsearch. By default, the user is only required to specify a unique es_instance_name per role application. This should be unique per node. Then create your playbook yaml adding the role elasticsearch.
The application of the elasticsearch role results in the installation of a node on a host. The application of the elasticsearch role results in the installation of a node on a host.
The simplest configuration therefore consists of: The simplest configuration therefore consists of:
```yaml ```yaml
- name: Simple Example - name: Simple Example
hosts: localhost hosts: localhost
roles: roles:
- role: elastic.elasticsearch - role: elastic.elasticsearch
es_instance_name: "node1" vars:
es_version: 7.1.1
``` ```
The above installs a single node 'node1' on the hosts 'localhost'. The above installs Elasticsearch 7.1.1 in a single node 'node1' on the hosts 'localhost'.
**Note**:
Elasticsearch default version is described in [`es_version`](defaults/main.yml#L2). You can override this variable in your playbook to install another version.
While we are testing this role only with one 7.x and one 6.x version (respectively [7.1.1](defaults/main.yml#L2) and [6.8.0](.kitchen.yml#L22) at the time of writing), this role should work with others version also in most cases.
This role also uses [Ansible tags](http://docs.ansible.com/ansible/playbooks_tags.html). Run your playbook with the `--list-tasks` flag for more information. This role also uses [Ansible tags](http://docs.ansible.com/ansible/playbooks_tags.html). Run your playbook with the `--list-tasks` flag for more information.
@ -90,9 +105,9 @@ The `PATTERN` is a kitchen pattern which can match multiple suites. To run all t
$ make converge PATTERN=centos-7 $ make converge PATTERN=centos-7
``` ```
The default version is 6.x If you want to test 5.x you can override it with the `VERSION` variable to test 5.x The default version is 7.x. If you want to test 6.x you can override it with the `VERSION` variable, for example:
```sh ```sh
$ make converge VERSION=5.x PATTERN=oss-centos-7 $ make converge VERSION=6.x PATTERN=oss-centos-7
``` ```
When you are finished testing you can clean up everything with When you are finished testing you can clean up everything with
@ -102,7 +117,7 @@ $ make destroy-all
### Basic Elasticsearch Configuration ### Basic Elasticsearch Configuration
All Elasticsearch configuration parameters are supported. This is achieved using a configuration map parameter 'es_config' which is serialized into the elasticsearch.yml file. All Elasticsearch configuration parameters are supported. This is achieved using a configuration map parameter 'es_config' which is serialized into the elasticsearch.yml file.
The use of a map ensures the Ansible playbook does not need to be updated to reflect new/deprecated/plugin configuration parameters. The use of a map ensures the Ansible playbook does not need to be updated to reflect new/deprecated/plugin configuration parameters.
In addition to the es_config map, several other parameters are supported for additional functions e.g. script installation. These can be found in the role's defaults/main.yml file. In addition to the es_config map, several other parameters are supported for additional functions e.g. script installation. These can be found in the role's defaults/main.yml file.
@ -115,22 +130,18 @@ The following illustrates applying configuration parameters to an Elasticsearch
roles: roles:
- role: elastic.elasticsearch - role: elastic.elasticsearch
vars: vars:
es_instance_name: "node1"
es_data_dirs: es_data_dirs:
- "/opt/elasticsearch/data" - "/opt/elasticsearch/data"
es_log_dir: "/opt/elasticsearch/logs" es_log_dir: "/opt/elasticsearch/logs"
es_config: es_config:
node.name: "node1" node.name: "node1"
cluster.name: "custom-cluster" cluster.name: "custom-cluster"
discovery.zen.ping.unicast.hosts: "localhost:9301" discovery.seed_hosts: "localhost:9301"
http.port: 9201 http.port: 9201
transport.tcp.port: 9301 transport.port: 9301
node.data: false node.data: false
node.master: true node.master: true
bootstrap.memory_lock: true bootstrap.memory_lock: true
es_scripts: false
es_templates: false
es_version_lock: false
es_heap_size: 1g es_heap_size: 1g
es_api_port: 9201 es_api_port: 9201
``` ```
@ -138,11 +149,11 @@ The following illustrates applying configuration parameters to an Elasticsearch
Whilst the role installs Elasticsearch with the default configuration parameters, the following should be configured to ensure a cluster successfully forms: Whilst the role installs Elasticsearch with the default configuration parameters, the following should be configured to ensure a cluster successfully forms:
* ```es_config['http.port']``` - the http port for the node * ```es_config['http.port']``` - the http port for the node
* ```es_config['transport.tcp.port']``` - the transport port for the node * ```es_config['transport.port']``` - the transport port for the node
* ```es_config['discovery.zen.ping.unicast.hosts']``` - the unicast discovery list, in the comma separated format ```"<host>:<port>,<host>:<port>"``` (typically the clusters dedicated masters) * ```es_config['discovery.seed_hosts']``` - the unicast discovery list, in the comma separated format ```"<host>:<port>,<host>:<port>"``` (typically the clusters dedicated masters)
* ```es_config['network.host']``` - sets both network.bind_host and network.publish_host to the same host value. The network.bind_host setting allows to control the host different network components will bind on. * ```es_config['network.host']``` - sets both network.bind_host and network.publish_host to the same host value. The network.bind_host setting allows to control the host different network components will bind on.
The network.publish_host setting allows to control the host the node will publish itself within the cluster so other nodes will be able to connect to it. The network.publish_host setting allows to control the host the node will publish itself within the cluster so other nodes will be able to connect to it.
See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html for further details on default binding behaviour and available options. See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html for further details on default binding behaviour and available options.
The role makes no attempt to enforce the setting of these are requires users to specify them appropriately. IT is recommended master nodes are listed and thus deployed first where possible. The role makes no attempt to enforce the setting of these are requires users to specify them appropriately. IT is recommended master nodes are listed and thus deployed first where possible.
@ -155,111 +166,85 @@ A more complex example:
roles: roles:
- role: elastic.elasticsearch - role: elastic.elasticsearch
vars: vars:
es_instance_name: "node1"
es_data_dirs: es_data_dirs:
- "/opt/elasticsearch/data" - "/opt/elasticsearch/data"
es_log_dir: "/opt/elasticsearch/logs" es_log_dir: "/opt/elasticsearch/logs"
es_config: es_config:
node.name: "node1" node.name: "node1"
cluster.name: "custom-cluster" cluster.name: "custom-cluster"
discovery.zen.ping.unicast.hosts: "localhost:9301" discovery.seed_hosts: "localhost:9301"
http.port: 9201 http.port: 9201
transport.tcp.port: 9301 transport.port: 9301
node.data: false node.data: false
node.master: true node.master: true
bootstrap.memory_lock: true bootstrap.memory_lock: true
es_scripts: false
es_templates: false
es_version_lock: false
es_heap_size: 1g es_heap_size: 1g
es_start_service: false es_start_service: false
es_plugins_reinstall: false
es_api_port: 9201 es_api_port: 9201
es_plugins: es_plugins:
- plugin: ingest-geoip - plugin: ingest-attachment
proxy_host: proxy.example.com proxy_host: proxy.example.com
proxy_port: 8080 proxy_port: 8080
``` ```
#### Important Note #### Important Note
**The role uses es_api_host and es_api_port to communicate with the node for actions only achievable via http e.g. to install templates and to check the NODE IS ACTIVE. These default to "localhost" and 9200 respectively. **The role uses es_api_host and es_api_port to communicate with the node for actions only achievable via http e.g. to install templates and to check the NODE IS ACTIVE. These default to "localhost" and 9200 respectively.
If the node is deployed to bind on either a different host or port, these must be changed.** If the node is deployed to bind on either a different host or port, these must be changed.**
### Multi Node Server Installations ### Multi Node Server Installations
The application of the elasticsearch role results in the installation of a node on a host. Specifying the role multiple times for a host therefore results in the installation of multiple nodes for the host. The application of the elasticsearch role results in the installation of a node on a host. Specifying the role multiple times for a host therefore results in the installation of multiple nodes for the host.
An example of a two server deployment is shown below. The first server holds the master and is thus declared first. Whilst not mandatory, this is recommended in any multi node cluster configuration. The second server hosts two data nodes. An example of a three server deployment is shown below. The first server holds the master and is thus declared first. Whilst not mandatory, this is recommended in any multi node cluster configuration. The two others servers hosts data nodes.
**Note the structure of the below playbook for the data nodes. Whilst a more succinct structures are possible which allow the same role to be applied to a host multiple times, we have found the below structure to be the most reliable with respect to var behaviour. This is the tested approach.** **Note that we do not support anymore installation of more than one node in the same host**
```yaml ```yaml
- hosts: master_nodes - hosts: master_node
roles: roles:
- role: elastic.elasticsearch - role: elastic.elasticsearch
vars: vars:
es_instance_name: "node1"
es_heap_size: "1g" es_heap_size: "1g"
es_config: es_config:
cluster.name: "test-cluster" cluster.name: "test-cluster"
discovery.zen.ping.unicast.hosts: "elastic02:9300" discovery.seed_hosts: "elastic02:9300"
http.port: 9200 http.port: 9200
transport.tcp.port: 9300
node.data: false node.data: false
node.master: true node.master: true
bootstrap.memory_lock: false bootstrap.memory_lock: false
es_scripts: false
es_templates: false
es_version_lock: false
ansible_user: ansible
es_plugins: es_plugins:
- plugin: ingest-geoip - plugin: ingest-attachment
- hosts: data_nodes - hosts: data_node_1
roles: roles:
- role: elastic.elasticsearch - role: elastic.elasticsearch
vars: vars:
es_instance_name: "node1" es_data_dirs:
es_data_dirs:
- "/opt/elasticsearch" - "/opt/elasticsearch"
es_config: es_config:
cluster.name: "test-cluster" cluster.name: "test-cluster"
discovery.zen.ping.unicast.hosts: "elastic02:9300" discovery.seed_hosts: "elastic02:9300"
http.port: 9200 http.port: 9200
transport.tcp.port: 9300
node.data: true node.data: true
node.master: false node.master: false
bootstrap.memory_lock: false bootstrap.memory_lock: false
es_scripts: false
es_templates: false
es_version_lock: false
ansible_user: ansible
es_api_port: 9200
es_plugins: es_plugins:
- plugin: ingest-geoip - plugin: ingest-attachment
- hosts: data_nodes - hosts: data_node_2
roles: roles:
- role: elastic.elasticsearch - role: elastic.elasticsearch
vars: vars:
es_instance_name: "node2"
es_api_port: 9201
es_config: es_config:
discovery.zen.ping.unicast.hosts: "elastic02:9300" cluster.name: "test-cluster"
http.port: 9201 discovery.seed_hosts: "elastic02:9300"
transport.tcp.port: 9301 http.port: 9200
node.data: true node.data: true
node.master: false node.master: false
bootstrap.memory_lock: false bootstrap.memory_lock: false
cluster.name: "test-cluster"
es_scripts: false
es_templates: false
es_version_lock: false
es_api_port: 9201
ansible_user: ansible
es_plugins: es_plugins:
- plugin: ingest-geoip - plugin: ingest-attachment
``` ```
Parameters can additionally be assigned to hosts using the inventory file if desired. Parameters can additionally be assigned to hosts using the inventory file if desired.
@ -274,14 +259,11 @@ ansible-playbook -i hosts ./your-playbook.yml
### Installing X-Pack Features ### Installing X-Pack Features
X-Pack features, such as Security, are supported. This feature is currently experimental. X-Pack features, such as Security, are supported.
The parameter `es_xpack_features` by default enables all features i.e. it defaults to ["alerting","monitoring","graph","security","ml"] The parameter `es_xpack_features` allows to list xpack features to install (example: `["alerting","monitoring","graph","security","ml"]`).
When the list is empty, it install all features available with the current licence.
The following additional parameters allow X-Pack to be configured:
* ```es_message_auth_file``` System Key field to allow message authentication. This file should be placed in the 'files' directory.
* ```es_xpack_custom_url``` Url from which X-Pack can be downloaded. This can be used for installations in isolated environments where the elastic.co repo is not accessible. e.g. ```es_xpack_custom_url: "https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-5.5.1.zip"```
* ```es_role_mapping``` Role mappings file declared as yml as described [here](https://www.elastic.co/guide/en/x-pack/current/mapping-roles.html) * ```es_role_mapping``` Role mappings file declared as yml as described [here](https://www.elastic.co/guide/en/x-pack/current/mapping-roles.html)
@ -314,8 +296,8 @@ es_users:
- power_user - power_user
- user - user
``` ```
* ```es_roles``` - Elasticsearch roles can be declared here as yml. Two sub keys 'native' and 'file' determine how the role is created i.e. either through a file or http(native) call. Beneath each key list the roles with appropriate permissions, using the file based format described [here] (https://www.elastic.co/guide/en/x-pack/current/file-realm.html) e.g. * ```es_roles``` - Elasticsearch roles can be declared here as yml. Two sub keys 'native' and 'file' determine how the role is created i.e. either through a file or http(native) call. Beneath each key list the roles with appropriate permissions, using the file based format described [here] (https://www.elastic.co/guide/en/x-pack/current/file-realm.html) e.g.
```yaml ```yaml
@ -357,13 +339,13 @@ es_roles:
- write - write
- delete - delete
- create_index - create_index
``` ```
* ```es_xpack_license``` - X-Pack license. The license is a json blob. Set the variable directly (possibly protected by Ansible vault) or from a file in the Ansible project on the control machine via a lookup: * ```es_xpack_license``` - X-Pack license. The license is a json blob. Set the variable directly (possibly protected by Ansible vault) or from a file in the Ansible project on the control machine via a lookup:
```yaml ```yaml
es_xpack_license: "{{ lookup('file', playbook_dir + '/files/' + es_cluster_name + '/license.json') }}" es_xpack_license: "{{ lookup('file', playbook_dir + '/files/' + es_cluster_name + '/license.json') }}"
``` ```
X-Pack configuration parameters can be added to the elasticsearch.yml file using the normal `es_config` parameter. X-Pack configuration parameters can be added to the elasticsearch.yml file using the normal `es_config` parameter.
@ -384,8 +366,7 @@ These can either be set to a user declared in the file based realm, with admin p
In addition to es_config, the following parameters allow the customization of the Java and Elasticsearch versions as well as the role behaviour. Options include: In addition to es_config, the following parameters allow the customization of the Java and Elasticsearch versions as well as the role behaviour. Options include:
* ```es_enable_xpack``` Default `true`. Setting this to `false` will install the oss release of elasticsearch * ```es_enable_xpack``` Default `true`. Setting this to `false` will install the oss release of elasticsearch
* ```es_major_version``` Should be consistent with es_version. For versions >= 5.0 and < 6.0 this must be "5.x". For versions >= 6.0 this must be "6.x". * ```es_version``` (e.g. "7.1.1").
* ```es_version``` (e.g. "6.3.0").
* ```es_api_host``` The host name used for actions requiring HTTP e.g. installing templates. Defaults to "localhost". * ```es_api_host``` The host name used for actions requiring HTTP e.g. installing templates. Defaults to "localhost".
* ```es_api_port``` The port used for actions requiring HTTP e.g. installing templates. Defaults to 9200. **CHANGE IF THE HTTP PORT IS NOT 9200** * ```es_api_port``` The port used for actions requiring HTTP e.g. installing templates. Defaults to 9200. **CHANGE IF THE HTTP PORT IS NOT 9200**
* ```es_api_basic_auth_username``` The Elasticsearch username for making admin changing actions. Used if Security is enabled. Ensure this user is admin. * ```es_api_basic_auth_username``` The Elasticsearch username for making admin changing actions. Used if Security is enabled. Ensure this user is admin.
@ -395,23 +376,24 @@ In addition to es_config, the following parameters allow the customization of th
* ```es_plugins``` an array of plugin definitions e.g.: * ```es_plugins``` an array of plugin definitions e.g.:
```yaml ```yaml
es_plugins: es_plugins:
- plugin: ingest-geoip - plugin: ingest-attachment
``` ```
* ```es_path_repo``` Sets the whitelist for allowing local back-up repositories * ```es_path_repo``` Sets the whitelist for allowing local back-up repositories
* ```es_action_auto_create_index ``` Sets the value for auto index creation, use the syntax below for specifying indexes (else true/false): * ```es_action_auto_create_index ``` Sets the value for auto index creation, use the syntax below for specifying indexes (else true/false):
es_action_auto_create_index: '[".watches", ".triggered_watches", ".watcher-history-*"]' es_action_auto_create_index: '[".watches", ".triggered_watches", ".watcher-history-*"]'
* ```es_allow_downgrades``` For development purposes only. (true or false (default) ) * ```es_allow_downgrades``` For development purposes only. (true or false (default) )
* ```es_java_install``` If set to false, Java will not be installed. (true (default) or false) * ```es_java_install``` If set to true, Java will be installed. (false (default for 7.x) or true (default for 6.x))
* ```update_java``` Updates Java to the latest version. (true or false (default)) * ```update_java``` Updates Java to the latest version. (true or false (default))
* ```es_max_map_count``` maximum number of VMA (Virtual Memory Areas) a process can own. Defaults to 262144. * ```es_max_map_count``` maximum number of VMA (Virtual Memory Areas) a process can own. Defaults to 262144.
* ```es_max_open_files``` the maximum file descriptor number that can be opened by this process. Defaults to 65536. * ```es_max_open_files``` the maximum file descriptor number that can be opened by this process. Defaults to 65536.
* ```es_max_threads``` the maximum number of threads the process can start. Defaults to 2048 (the minimum required by elasticsearch). * ```es_max_threads``` the maximum number of threads the process can start. Defaults to 2048 (the minimum required by elasticsearch).
* ```es_debian_startup_timeout``` how long Debian-family SysV init scripts wait for the service to start, in seconds. Defaults to 10 seconds. * ```es_debian_startup_timeout``` how long Debian-family SysV init scripts wait for the service to start, in seconds. Defaults to 10 seconds.
* ```es_use_repository``` Setting this to `false` will stop Ansible from using the official Elastic package repositories. * ```es_use_repository``` Setting this to `false` will stop Ansible from using the official Elastic package from any repository configured on the system.
* ```es_add_repository``` Setting this to `false` will stop Ansible to add the official Elastic package repositories (if es_use_repository is true) if you want to use a repo already present.
* ```es_custom_package_url``` the URL to the rpm or deb package for Ansible to install. When using this you will also need to set `es_use_repository: false` and make sure that the `es_version` matches the version being installed from your custom URL. E.g. `es_custom_package_url: https://downloads.example.com/elasticsearch.rpm` * ```es_custom_package_url``` the URL to the rpm or deb package for Ansible to install. When using this you will also need to set `es_use_repository: false` and make sure that the `es_version` matches the version being installed from your custom URL. E.g. `es_custom_package_url: https://downloads.example.com/elasticsearch.rpm`
Earlier examples illustrate the installation of plugins using `es_plugins`. For officially supported plugins no version or source delimiter is required. The plugin script will determine the appropriate plugin version based on the target Elasticsearch version. For community based plugins include the full url. This approach should NOT be used for the X-Pack plugin. See X-Pack below for details here. Earlier examples illustrate the installation of plugins using `es_plugins`. For officially supported plugins no version or source delimiter is required. The plugin script will determine the appropriate plugin version based on the target Elasticsearch version. For community based plugins include the full url. This approach should NOT be used for the X-Pack plugin. See X-Pack below for details here.
If installing Monitoring or Alerting, ensure the license plugin is also specified. Security configuration currently has limited support, but more support is planned for later versions. If installing Monitoring or Alerting, ensure the license plugin is also specified. Security configuration currently has limited support, but more support is planned for later versions.
To configure X-pack to send mail, the following configuration can be added to the role. When require_auth is true, you will also need to provide the user and password. If not these can be removed: To configure X-pack to send mail, the following configuration can be added to the role. When require_auth is true, you will also need to provide the user and password. If not these can be removed:
@ -432,20 +414,12 @@ To configure X-pack to send mail, the following configuration can be added to th
* ```es_user_id``` - default is undefined. * ```es_user_id``` - default is undefined.
* ```es_group_id``` - default is undefined. * ```es_group_id``` - default is undefined.
Both ```es_user_id``` and ```es_group_id``` must be set for the user and group ids to be set. Both ```es_user_id``` and ```es_group_id``` must be set for the user and group ids to be set.
By default, each node on a host will be installed to use unique pid, plugin, work, data and log directories. These directories are created, using the instance and host name, beneath default locations ]
controlled by the following parameters:
* ```es_pid_dir``` - defaults to "/var/run/elasticsearch".
* ```es_data_dirs``` - defaults to "/var/lib/elasticsearch". This can be a list or comma separated string e.g. ["/opt/elasticsearch/data-1","/opt/elasticsearch/data-2"] or "/opt/elasticsearch/data-1,/opt/elasticsearch/data-2"
* ```es_log_dir``` - defaults to "/var/log/elasticsearch".
* ```es_restart_on_change``` - defaults to true. If false, changes will not result in Elasticsearch being restarted. * ```es_restart_on_change``` - defaults to true. If false, changes will not result in Elasticsearch being restarted.
* ```es_plugins_reinstall``` - defaults to false. If true, all currently installed plugins will be removed from a node. Listed plugins will then be re-installed. * ```es_plugins_reinstall``` - defaults to false. If true, all currently installed plugins will be removed from a node. Listed plugins will then be re-installed.
This role ships with sample scripts and templates located in the [files/scripts/](files/scripts) and [files/templates/](files/templates) directories, respectively. These variables are used with the Ansible [with_fileglob](http://docs.ansible.com/ansible/playbooks_loops.html#id4) loop. When setting the globs, be sure to use an absolute path. This role ships with sample templates located in the [files/templates/](files/templates) directory. `es_templates_fileglob` variable (defaults to `<role>/files/templates/`) is used with the Ansible [with_fileglob](http://docs.ansible.com/ansible/playbooks_loops.html#id4) loop. When setting the globs, be sure to use an absolute path.
* ```es_scripts_fileglob``` - defaults to `<role>/files/scripts/`.
* ```es_templates_fileglob``` - defaults to `<role>/files/templates/`.
### Proxy ### Proxy
@ -458,7 +432,7 @@ To define proxy only for a particular plugin during its installation:
```yaml ```yaml
es_plugins: es_plugins:
- plugin: ingest-geoip - plugin: ingest-attachment
proxy_host: proxy.example.com proxy_host: proxy.example.com
proxy_port: 8080 proxy_port: 8080
``` ```
@ -469,10 +443,8 @@ To define proxy only for a particular plugin during its installation:
* The role assumes the user/group exists on the server. The elasticsearch packages create the default elasticsearch user. If this needs to be changed, ensure the user exists. * The role assumes the user/group exists on the server. The elasticsearch packages create the default elasticsearch user. If this needs to be changed, ensure the user exists.
* The playbook relies on the inventory_name of each host to ensure its directories are unique * The playbook relies on the inventory_name of each host to ensure its directories are unique
* Changing an instance_name for a role application will result in the installation of a new component. The previous component will remain. * KitchenCI has been used for testing. This is used to confirm images reach the correct state after a play is first applied. We currently test the latest version of 7.x and 6.x on all supported platforms.
* KitchenCI has been used for testing. This is used to confirm images reach the correct state after a play is first applied. We currently test the latest version of 6.x and 5.x on all supported platforms.
* The role aims to be idempotent. Running the role multiple times, with no changes, should result in no state change on the server. If the configuration is changed, these will be applied and Elasticsearch restarted where required. * The role aims to be idempotent. Running the role multiple times, with no changes, should result in no state change on the server. If the configuration is changed, these will be applied and Elasticsearch restarted where required.
* Systemd is used for Ubuntu versions >= 15, Debian >=8, Centos >=7. All other versions use init for service scripts.
* In order to run x-pack tests a license file with security enabled is required. A trial license is appropriate. Set the environment variable `ES_XPACK_LICENSE_FILE` to the full path of the license file prior to running tests. * In order to run x-pack tests a license file with security enabled is required. A trial license is appropriate. Set the environment variable `ES_XPACK_LICENSE_FILE` to the full path of the license file prior to running tests.
## IMPORTANT NOTES RE PLUGIN MANAGEMENT ## IMPORTANT NOTES RE PLUGIN MANAGEMENT

View file

@ -1,47 +1,43 @@
--- ---
es_major_version: "6.x" es_version: "7.1.1"
es_version: "6.5.1"
es_use_snapshot_release: false es_use_snapshot_release: false
es_enable_xpack: true es_enable_xpack: true
es_package_name: "elasticsearch" es_package_name: "elasticsearch"
es_version_lock: false es_version_lock: false
es_use_repository: true es_use_repository: true
es_templates_fileglob: "files/templates/*.json" es_add_repository: true
es_apt_key: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" es_templates_fileglob: "files/templates-{{ es_major_version }}/*.json"
es_apt_url: "deb https://artifacts.elastic.co/packages/{{ es_repo_name }}/apt stable main" es_repo_base: "https://artifacts.elastic.co"
es_apt_key: "{{ es_repo_base }}/GPG-KEY-elasticsearch"
es_apt_url: "deb {{ es_repo_base }}/packages/{{ es_repo_name }}/apt stable main"
es_apt_url_old: "deb http://packages.elastic.co/elasticsearch/{{ es_repo_name }}/debian stable main" es_apt_url_old: "deb http://packages.elastic.co/elasticsearch/{{ es_repo_name }}/debian stable main"
es_start_service: true es_start_service: true
es_java_install: true es_java_install: "{{ false if (es_version is version_compare('7.0.0', '>=')) else true }}"
update_java: false update_java: false
es_restart_on_change: true es_restart_on_change: true
es_plugins_reinstall: false es_plugins_reinstall: false
es_scripts: false
es_templates: false es_templates: false
es_user: elasticsearch es_user: elasticsearch
es_group: elasticsearch es_group: elasticsearch
es_config: {} es_config: {}
es_config_log4j2: log4j2.properties.j2 es_config_log4j2: log4j2.properties.j2
#Need to provide default directories #Need to provide default directories
es_conf_dir: "/etc/elasticsearch"
es_pid_dir: "/var/run/elasticsearch" es_pid_dir: "/var/run/elasticsearch"
es_data_dirs: "/var/lib/elasticsearch" es_data_dirs:
- "/var/lib/elasticsearch"
es_log_dir: "/var/log/elasticsearch" es_log_dir: "/var/log/elasticsearch"
es_action_auto_create_index: true es_action_auto_create_index: true
es_max_open_files: 65536 es_max_open_files: 65536
es_max_threads: "{{ 2048 if ( es_version | version_compare('6.0.0', '<')) else 8192 }}" es_max_threads: 8192
es_max_map_count: 262144 es_max_map_count: 262144
es_allow_downgrades: false es_allow_downgrades: false
es_xpack_features: ["alerting","monitoring","graph","ml","security"] es_xpack_features: []
#These are used for internal operations performed by ansible. #These are used for internal operations performed by ansible.
#They do not affect the current configuration #They do not affect the current configuration
es_api_host: "localhost" es_api_host: "localhost"
es_api_port: 9200 es_api_port: 9200
es_debian_startup_timeout: 10 es_debian_startup_timeout: 10
# Since ansible 2.2 the following variables need to be defined
# to allow the role to be conditionally played with a when condition.
pid_dir: ''
log_dir: ''
conf_dir: ''
data_dirs: ''
# JVM custom parameters # JVM custom parameters
es_jvm_custom_parameters: '' es_jvm_custom_parameters: ''

145
docs/multi-instance.md Normal file
View file

@ -0,0 +1,145 @@
# Multi-instance Support
Starting with ansible-elasticsearch:7.1.1, installing more than one instance of Elasticsearch **on the same host** is no longer supported.
See [554#issuecomment-496804929](https://github.com/elastic/ansible-elasticsearch/issues/554#issuecomment-496804929) for more details about why we removed it.
## Upgrade procedure
If you have single-instances hosts and want to upgrade from previous versions of the role:
### Procedure with data move
This procedure will allow you to move your data to the new standard paths (see [#581](https://github.com/elastic/ansible-elasticsearch/issues/581)):
1. Stop Elasticsearch before the migration
2. Migrate your data to the new standard paths:
```
# mv /etc/elasticsearch/${ES_INSTANCE_NAME}/* /etc/elasticsearch/ && rm -fr /etc/elasticsearch/${ES_INSTANCE_NAME}/
mv: overwrite '/etc/elasticsearch/elasticsearch.keystore'? y
# mv /var/lib/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}/* /var/lib/elasticsearch/ && rm -fr /var/lib/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}/
# ls /var/lib/elasticsearch/
nodes
# mv /var/log/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}/* /var/log/elasticsearch/ && rm -fr /var/log/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}/
# rm -fr /var/run/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}/
```
3. Update playbook (remove `es_conf_dir`, `es_data_dirs`, `es_log_dir`, `es_pid_dir` and `es_instance_name` variables)
4. Update ansible-role to new version ([7.1.1](https://github.com/elastic/ansible-elasticsearch/releases/tag/7.1.1) at the time of writing) and deploy ansible-role
5. After ansible-role new deployment, you can do some cleanup of old Init file and Default file:
Example:
```
$ systemctl stop elasticsearch
$ mv /etc/elasticsearch/${ES_INSTANCE_NAME}/* /etc/elasticsearch/ && rm -fr /etc/elasticsearch/${ES_INSTANCE_NAME}/
mv: overwrite '/etc/elasticsearch/elasticsearch.keystore'? y
$ mv /var/lib/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}/* /var/lib/elasticsearch/ && rm -fr /var/lib/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}/
$ ls /var/lib/elasticsearch/
nodes
$ mv /var/log/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}/* /var/log/elasticsearch/ && rm -fr /var/log/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}/
$ rm -fr /var/run/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}/
$ ansible-galaxy install --force elastic.elasticsearch,7.1.1
- changing role elastic.elasticsearch from 6.6.0 to 7.1.1
- downloading role 'elasticsearch', owned by elastic
- downloading role from https://github.com/elastic/ansible-elasticsearch/archive/7.1.1.tar.gz
- extracting elastic.elasticsearch to /home/jmlrt/.ansible/roles/elastic.elasticsearch
- elastic.elasticsearch (7.1.1) was installed successfully
$ ansible-playbook playbook.yml
...
TASK [elastic.elasticsearch : Create Directories]
ok: [localhost] => (item=/var/run/elasticsearch)
ok: [localhost] => (item=/var/log/elasticsearch)
changed: [localhost] => (item=/etc/elasticsearch)
ok: [localhost] => (item=/var/lib/elasticsearch)
TASK [elastic.elasticsearch : Copy Configuration File]
changed: [localhost]
TASK [elastic.elasticsearch : Copy Default File]
changed: [localhost]
TASK [elastic.elasticsearch : Copy jvm.options File]
changed: [localhost]
...
RUNNING HANDLER [elastic.elasticsearch : restart elasticsearch]
changed: [localhost]
...
PLAY RECAP
localhost : ok=26 changed=6 unreachable=0 failed=0 skipped=116 rescued=0 ignored=0
$ find /etc -name '${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}*'
/etc/default/node1_elasticsearch
/etc/systemd/system/multi-user.target.wants/node1_elasticsearch.service
```
### Procedure without data move
This procedure will allow you to keep your data to the old paths:
1. Override these variables to match previous values:
```yaml
es_conf_dir: /etc/elasticsearch/${ES_INSTANCE_NAME}
es_data_dirs:
- /var/lib/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}
es_log_dir: /var/log/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}
es_pid_dir: /var/run/elasticsearch/${INVENTORY_HOSTNAME}-${ES_INSTANCE_NAME}
```
2. Deploy ansible-role. **Even if these variables are overrided, Elasticsearch config file and default option file will change, which imply an Elasticsearch restart.**
3. After ansible-role new deployment, you can do some cleanup of old Init file and Default file.
Example:
```bash
$ ansible-playbook -e '{"es_conf_dir":"/etc/elasticsearch/node1","es_data_dirs":["/var/lib/elasticsearch/localhost-node1"],"es_log_dir":"/var/log/elasticsearch/localhost-node1","es_pid_dir":"/var/run/elasticsearch/localhost-node1"}' playbook.yml
...
TASK [elasticsearch : Create Directories] **********************************************************************************************************************************************************************************************************************
ok: [localhost] => (item=/var/run/elasticsearch/localhost-node1)
ok: [localhost] => (item=/var/log/elasticsearch/localhost-node1)
ok: [localhost] => (item=/etc/elasticsearch/node1)
ok: [localhost] => (item=/var/lib/elasticsearch/localhost-node1)
TASK [elasticsearch : Copy Configuration File] *****************************************************************************************************************************************************************************************************************
changed: [localhost]
TASK [elasticsearch : Copy Default File] ***********************************************************************************************************************************************************************************************************************
changed: [localhost]
...
PLAY RECAP *****************************************************************************************************************************************************************************************************************************************************
localhost : ok=32 changed=3 unreachable=0 failed=0
$ find /etc -name 'node1_elasticsearch*'
/etc/default/node1_elasticsearch
/etc/systemd/system/multi-user.target.wants/node1_elasticsearch.service
$ rm /etc/default/node1_elasticsearch /etc/systemd/system/multi-user.target.wants/node1_elasticsearch.service
```
## Workaround
If you use more than one instance of Elasticsearch on the same host (with different ports, directory and config files), you are still be able to install Elasticsearch 6.x and 7.x in multi-instance mode by using ansible-elasticsearch commit [25bd09f](https://github.com/elastic/ansible-elasticsearch/commit/25bd09f6835b476b6a078676a7d614489a6739c5) (last commit before multi-instance removal) and overriding `es_version` variable:
```sh
$ cat << EOF >> requirements.yml # require git
- src: https://github.com/elastic/ansible-elasticsearch
version: 25bd09f
name: elasticsearch
EOF
$ ansible-galaxy install -r requirements.yml
$ cat << EOF >> playbook.yml
- hosts: localhost
roles:
- role: elasticsearch
vars:
es_instance_name: "node1"
es_version: 7.1.1 # or 6.8.0 for example
EOF
$ ansible-playbook playbook.yml
```

View file

@ -1 +0,0 @@
log(_score * 2) + my_modifier

Binary file not shown.

View file

@ -0,0 +1,2 @@
[Service]
LimitMEMLOCK=infinity

View file

@ -0,0 +1,9 @@
{
"index_patterns" : "te*",
"settings" : {
"number_of_shards" : 1
},
"mappings" : {
"_source" : { "enabled" : false }
}
}

View file

@ -7,7 +7,7 @@
- name: restart elasticsearch - name: restart elasticsearch
become: yes become: yes
service: name={{instance_init_script | basename}} state=restarted enabled=yes service: name=elasticsearch state=restarted enabled=yes
when: when:
- es_restart_on_change - es_restart_on_change
- es_start_service - es_start_service

View file

@ -7,7 +7,7 @@ galaxy_info:
description: Elasticsearch for Linux description: Elasticsearch for Linux
company: "Elastic.co" company: "Elastic.co"
license: "license (Apache)" license: "license (Apache)"
min_ansible_version: 2.3.2 min_ansible_version: 2.4.2
platforms: platforms:
- name: EL - name: EL
versions: versions:
@ -19,7 +19,9 @@ galaxy_info:
- name: Ubuntu - name: Ubuntu
versions: versions:
- all - all
categories: - galaxy_tags:
- system - elastic
- elasticsearch
- elk
- logging
dependencies: [] dependencies: []

View file

@ -8,31 +8,18 @@
- name: Set the defaults here otherwise they can't be overriden in the same play if the role is called twice - name: Set the defaults here otherwise they can't be overriden in the same play if the role is called twice
set_fact: set_fact:
es_open_xpack: true
es_install_xpack: false
es_users_path: "users"
es_xpack_conf_subdir: ""
es_repo_name: "{{ es_major_version }}" es_repo_name: "{{ es_major_version }}"
es_xpack_users_command: "elasticsearch-users" es_package_name: "elasticsearch"
es_other_package_name: "elasticsearch-oss"
- name: Detect if es_version is before X-Pack was open and included es_other_repo_name: "{{ 'oss-' + es_major_version }}"
set_fact: es_other_apt_url: "deb {{ es_repo_base }}/packages/{{ 'oss-' + es_major_version }}/apt stable main"
es_open_xpack: false
when: "es_version | version_compare('6.3.0', '<')"
- name: If this is an older version we need to install X-Pack as a plugin and use a differet users command
set_fact:
es_install_xpack: true
es_xpack_users_command: "x-pack/users"
es_xpack_conf_subdir: "/x-pack"
when:
- not es_open_xpack
- es_enable_xpack
- name: Use the oss repo and package if xpack is not being used - name: Use the oss repo and package if xpack is not being used
set_fact: set_fact:
es_repo_name: "{{ 'oss-' + es_major_version }}" es_repo_name: "{{ 'oss-' + es_major_version }}"
es_other_repo_name: "{{ es_major_version }}"
es_other_apt_url: "deb {{ es_repo_base }}/packages/{{ es_major_version }}/apt stable main"
es_package_name: "elasticsearch-oss" es_package_name: "elasticsearch-oss"
es_other_package_name: "elasticsearch"
when: when:
- es_open_xpack
- not es_enable_xpack - not es_enable_xpack

View file

@ -1,6 +0,0 @@
---
- name: Debian - hold elasticsearch version
become: yes
command: "apt-mark hold {{ es_package_name }}"
register: hold_elasticsearch_result
changed_when: "hold_elasticsearch_result.stdout != '{{ es_package_name }} was already set on hold.'"

View file

@ -7,7 +7,7 @@
set_fact: force_install=yes set_fact: force_install=yes
when: es_allow_downgrades when: es_allow_downgrades
- name: Debian - Install apt-transport-https to support https APT downloads - name: Gracefully stop and remove elasticsearch package if switching between OSS and standard
become: yes become: yes
apt: name=apt-transport-https state=present apt: name=apt-transport-https state=present
when: es_use_repository when: es_use_repository
@ -31,25 +31,75 @@
- es_package_name == 'elasticsearch-oss' - es_package_name == 'elasticsearch-oss'
block: block:
- name: Check if the elasticsearch package is installed - name: Check if the elasticsearch package is installed
shell: dpkg-query -W -f'${Status}' elasticsearch shell: "dpkg-query -W -f'${Status}' {{ es_other_package_name }}"
register: elasticsearch_package register: elasticsearch_package
failed_when: False failed_when: False
changed_when: False changed_when: False
check_mode: no
- name: unhold elasticsearch package when switching to a different package type
become: yes
dpkg_selections:
name: "{{ es_other_package_name }}"
selection: "install"
when: elasticsearch_package.stdout == 'install ok installed'
- name: stop elasticsearch - name: stop elasticsearch
become: yes
service: service:
name: '{{ instance_init_script | basename }}' name: 'elasticsearch'
state: stopped state: stopped
when: elasticsearch_package.stdout == 'install ok installed' when: elasticsearch_package.stdout == 'install ok installed'
- name: Debian - Remove elasticsearch package if we are installing the oss package - name: Debian - Remove elasticsearch package if we are switching to a different package type
become: yes
apt: apt:
name: 'elasticsearch' name: '{{ es_other_package_name }}'
state: absent state: absent
when: elasticsearch_package.stdout == 'install ok installed' when: elasticsearch_package.stdout == 'install ok installed'
- name: Install Elasticsearch repository
when: es_use_repository
become: yes
block:
- name: Debian - Install apt-transport-https to support https APT downloads
apt:
name: apt-transport-https
state: present
- name: Debian - Add Elasticsearch repository key
apt_key:
url: '{{ es_apt_key }}'
state: present
when: es_add_repository and es_apt_key | string
- name: Debian - Add elasticsearch repository
apt_repository:
repo: '{{ item.repo }}'
state: '{{ item.state }}'
when: es_add_repository
with_items:
- { repo: "{{ es_apt_url_old }}", state: "absent" }
- { repo: "{{ es_apt_url }}", state: "present" }
- { repo: "{{ es_other_apt_url }}", state: "absent" }
- name: Include optional user and group creation.
when: (es_user_id is defined) and (es_group_id is defined)
include: elasticsearch-optional-user.yml
- name: Debian - Get installed elasticsearch version
command: dpkg-query --showformat='${Version}' --show {{ es_package_name }}
register: installed_es_version
failed_when: False
changed_when: False
check_mode: no
- name: Debian - unhold elasticsearch version
become: yes
dpkg_selections:
name: "{{ es_package_name }}"
selection: "install"
when: not es_version_lock or (installed_es_version.stdout and installed_es_version.stdout != es_version)
- name: Debian - Ensure elasticsearch is installed - name: Debian - Ensure elasticsearch is installed
become: yes become: yes
apt: apt:
@ -62,10 +112,13 @@
register: debian_elasticsearch_install_from_repo register: debian_elasticsearch_install_from_repo
notify: restart elasticsearch notify: restart elasticsearch
environment: environment:
ES_PATH_CONF: "/etc/elasticsearch" ES_PATH_CONF: "{{ es_conf_dir }}"
- name: Debian - Include versionlock - name: Debian - hold elasticsearch version
include: elasticsearch-Debian-version-lock.yml become: yes
dpkg_selections:
name: "{{ es_package_name }}"
selection: "hold"
when: es_version_lock when: es_version_lock
- name: Debian - Download elasticsearch from url - name: Debian - Download elasticsearch from url
@ -78,3 +131,5 @@
when: not es_use_repository when: not es_use_repository
register: elasticsearch_install_from_package register: elasticsearch_install_from_package
notify: restart elasticsearch notify: restart elasticsearch
environment:
ES_PATH_CONF: "{{ es_conf_dir }}"

View file

@ -2,6 +2,35 @@
- name: RedHat - install yum-version-lock - name: RedHat - install yum-version-lock
become: yes become: yes
yum: name=yum-plugin-versionlock state=present update_cache=yes yum: name=yum-plugin-versionlock state=present update_cache=yes
- name: RedHat - check if requested elasticsearch version lock exists
become: yes
shell: yum versionlock list | grep -c {{es_package_name}}-{{es_version}}
register: es_requested_version_locked
args:
warn: false
failed_when: False
changed_when: False
- name: RedHat - lock elasticsearch version - name: RedHat - lock elasticsearch version
become: yes become: yes
shell: yum versionlock delete 0:elasticsearch* ; yum versionlock add {{ es_package_name }}{% if es_version is defined and es_version != "" %}-{{ es_version }}{% endif %} shell: yum versionlock delete 0:elasticsearch* ; yum versionlock add {{ es_package_name }}-{{ es_version }}
args:
warn: false
when: es_version_lock and es_requested_version_locked.stdout|int == 0
- name: RedHat - check if any elasticsearch version lock exists
become: yes
shell: yum versionlock list | grep -c elasticsearch
register: es_version_locked
args:
warn: false
failed_when: False
changed_when: False
- name: RedHat - unlock elasticsearch version
become: yes
shell: yum versionlock delete 0:elasticsearch*
args:
warn: false
when: not es_version_lock and es_version_locked.stdout|int > 0

View file

@ -13,19 +13,30 @@
- name: RedHat - add Elasticsearch repo - name: RedHat - add Elasticsearch repo
become: yes become: yes
template: src=elasticsearch.repo dest=/etc/yum.repos.d/elasticsearch-{{ es_repo_name }}.repo template:
src: 'elasticsearch.repo'
dest: '/etc/yum.repos.d/elasticsearch-{{ es_repo_name }}.repo'
when: es_use_repository and es_add_repository
- name: RedHat - remove unused Elasticsearch repo
become: yes
file:
path: '/etc/yum.repos.d/elasticsearch-{{ es_other_repo_name }}.repo'
state: absent
when: es_use_repository when: es_use_repository
- name: RedHat - include versionlock - name: RedHat - include versionlock
include: elasticsearch-RedHat-version-lock.yml include: elasticsearch-RedHat-version-lock.yml
when: es_version_lock
- name: RedHat - Remove non oss package if the old elasticsearch package is installed - name: RedHat - Remove the other elasticsearch package if switching between OSS and standard
become: yes become: yes
yum: yum:
name: 'elasticsearch' name: '{{ es_other_package_name }}'
state: 'absent' state: 'absent'
when: es_package_name == 'elasticsearch-oss'
- name: Include optional user and group creation.
when: (es_user_id is defined) and (es_group_id is defined)
include: elasticsearch-optional-user.yml
- name: RedHat - Install Elasticsearch - name: RedHat - Install Elasticsearch
become: yes become: yes
@ -41,7 +52,7 @@
retries: 5 retries: 5
delay: 10 delay: 10
environment: environment:
ES_PATH_CONF: "/etc/elasticsearch" ES_PATH_CONF: "{{ es_conf_dir }}"
- name: RedHat - Install Elasticsearch from url - name: RedHat - Install Elasticsearch from url
become: yes become: yes

View file

@ -1,129 +1,57 @@
--- ---
# Configure Elasticsearch Node # Configure Elasticsearch Node
#Create conf directory
- name: Create Configuration Directory
become: yes
file: path={{ es_conf_dir }} state=directory owner=root group={{ es_group }} mode=2750
#Create pid directory
- name: Create PID Directory
become: yes
file: path={{ es_pid_dir }} state=directory owner={{ es_user }} group={{ es_group }} mode=0755
#Create required directories #Create required directories
- name: Create Directories - name: Create Others Directories
become: yes become: yes
file: path={{ item }} state=directory owner={{ es_user }} group={{ es_group }} file: path={{ item }} state=directory owner={{ es_user }} group={{ es_group }} mode=2750
with_items: with_items:
- "{{pid_dir}}" - "{{ es_log_dir }}"
- "{{log_dir}}" - "{{ es_data_dirs }}"
- "{{conf_dir}}"
- name: Create Data Directories
become: yes
file: path={{ item }} state=directory owner={{ es_user }} group={{ es_group }}
with_items:
- "{{data_dirs}}"
#Copy the config template #Copy the config template
- name: Copy Configuration File - name: Copy Configuration File
become: yes become: yes
template: src=elasticsearch.yml.j2 dest={{conf_dir}}/elasticsearch.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes template: src=elasticsearch.yml.j2 dest={{ es_conf_dir }}/elasticsearch.yml owner=root group={{ es_group }} mode=0660 force=yes
register: system_change register: system_change
notify: restart elasticsearch notify: restart elasticsearch
#Copy the instance specific default file #Copy the default file
- name: Copy Default File for Instance - name: Copy Default File
become: yes become: yes
template: src=elasticsearch.j2 dest={{instance_default_file}} mode=0644 force=yes template: src=elasticsearch.j2 dest={{ default_file }} owner=root group={{ es_group }} mode=0660 force=yes
notify: restart elasticsearch
#Copy the instance specific init file
- name: Copy Debian Init File for Instance
become: yes
template: src=init/debian/elasticsearch.j2 dest={{instance_init_script}} mode=0755 force=yes
when: ansible_os_family == 'Debian' and not use_system_d
notify: restart elasticsearch
#Copy the instance specific init file
- name: Copy Redhat Init File for Instance
become: yes
template: src=init/redhat/elasticsearch.j2 dest={{instance_init_script}} mode=0755 force=yes
when: ansible_os_family == 'RedHat' and not use_system_d
notify: restart elasticsearch notify: restart elasticsearch
#Copy the systemd specific file if systemd is installed #Copy the systemd specific file if systemd is installed
- name: Copy Systemd File for Instance - when: use_system_d and m_lock_enabled
become: yes become: yes
template: src=systemd/elasticsearch.j2 dest={{instance_sysd_script}} mode=0644 force=yes block:
when: use_system_d - name: Make sure destination dir exists
notify: file: path={{ sysd_config_file | dirname }} state=directory mode=0755
- reload systemd configuration
- restart elasticsearch - name: Copy specific ElasticSearch Systemd config file
ini_file: path={{ sysd_config_file }} section=Service option=LimitMEMLOCK value=infinity mode=0644
notify:
- reload systemd configuration
- restart elasticsearch
#Copy the logging.yml #Copy the logging.yml
- name: Copy log4j2.properties File for Instance - name: Copy log4j2.properties File
become: yes become: yes
template: src={{es_config_log4j2}} dest={{conf_dir}}/log4j2.properties owner={{ es_user }} group={{ es_group }} mode=0644 force=yes template: src={{ es_config_log4j2 }} dest={{ es_conf_dir }}/log4j2.properties owner=root group={{ es_group }} mode=0660 force=yes
notify: restart elasticsearch notify: restart elasticsearch
- name: Copy jvm.options File for Instance - name: Copy jvm.options File
become: yes become: yes
template: src=jvm.options.j2 dest={{conf_dir}}/jvm.options owner={{ es_user }} group={{ es_group }} mode=0644 force=yes template: src=jvm.options.j2 dest={{ es_conf_dir }}/jvm.options owner=root group={{ es_group }} mode=0660 force=yes
notify: restart elasticsearch notify: restart elasticsearch
#Clean up un-wanted package scripts to avoid confusion
- name: Delete Default Init
become: yes
file: dest=/etc/init.d/elasticsearch state=absent
- name: Create empty default environment file
become: yes
changed_when: False
copy:
dest: /etc/default/elasticsearch
content: ''
when: ansible_os_family == 'Debian'
- name: Create empty default environment file
become: yes
changed_when: False
copy:
dest: /etc/sysconfig/elasticsearch
content: ''
when: ansible_os_family == 'RedHat'
- name: Symlink default systemd service to first instance of elasticsearch
when: use_system_d
block:
- name: Check if default systemd file exists
stat:
path: "{{ sysd_script }}"
register: sysd_stat_result
- name: Remove if it is a normal file
become: yes
file:
path: "{{ sysd_script }}"
state: absent
when: sysd_stat_result.stat.exists and not sysd_stat_result.stat.islnk
- name: Create a symbolic link to the default systemd location to the first instance running on this host
become: yes
file:
state: link
src: "{{ instance_sysd_script }}"
path: "{{ sysd_script }}"
when: sysd_stat_result.stat.exists and not sysd_stat_result.stat.islnk
notify:
- reload systemd configuration
- restart elasticsearch
- name: Delete Default Configuration File
become: yes
file: dest=/etc/elasticsearch/elasticsearch.yml state=absent
- name: Delete Default Logging File
become: yes
file: dest=/etc/elasticsearch/logging.yml state=absent
- name: Delete Default Logging File
become: yes
file: dest=/etc/elasticsearch/log4j2.properties state=absent
- name: Delete Default JVM Options File
become: yes
file: dest=/etc/elasticsearch/jvm.options state=absent

View file

@ -1,25 +1,9 @@
# Check for mandatory parameters # Check for mandatory parameters
- name: fail when es_instance is not defined
fail: msg="es_instance_name must be specified and cannot be blank"
when: es_instance_name is not defined or es_instance_name == ''
- name: fail when es_proxy_port is not defined or is blank - name: fail when es_proxy_port is not defined or is blank
fail: msg="es_proxy_port must be specified and cannot be blank when es_proxy_host is defined" fail: msg="es_proxy_port must be specified and cannot be blank when es_proxy_host is defined"
when: (es_proxy_port is not defined or es_proxy_port == '') and (es_proxy_host is defined and es_proxy_host != '') when: (es_proxy_port is not defined or es_proxy_port == '') and (es_proxy_host is defined and es_proxy_host != '')
- name: debug message
debug: msg="WARNING - It is recommended you specify the parameter 'http.port'"
when: es_config['http.port'] is not defined
- name: debug message
debug: msg="WARNING - It is recommended you specify the parameter 'transport.tcp.port'"
when: es_config['transport.tcp.port'] is not defined
- name: debug message
debug: msg="WARNING - It is recommended you specify the parameter 'discovery.zen.ping.unicast.hosts'"
when: es_config['discovery.zen.ping.unicast.hosts'] is not defined
#If the user attempts to lock memory they must specify a heap size #If the user attempts to lock memory they must specify a heap size
- name: fail when heap size is not specified when using memory lock - name: fail when heap size is not specified when using memory lock
fail: msg="If locking memory with bootstrap.memory_lock a heap size must be specified" fail: msg="If locking memory with bootstrap.memory_lock a heap size must be specified"
@ -28,48 +12,23 @@
#Check if working with security we have an es_api_basic_auth_username and es_api_basic_auth_username - otherwise any http calls wont work #Check if working with security we have an es_api_basic_auth_username and es_api_basic_auth_username - otherwise any http calls wont work
- name: fail when api credentials are not declared when using security - name: fail when api credentials are not declared when using security
fail: msg="Enabling security requires an es_api_basic_auth_username and es_api_basic_auth_password to be provided to allow cluster operations" fail: msg="Enabling security requires an es_api_basic_auth_username and es_api_basic_auth_password to be provided to allow cluster operations"
when: es_enable_xpack and ("security" in es_xpack_features) and es_api_basic_auth_username is not defined and es_api_basic_auth_password is not defined when:
- es_enable_xpack and "security" in es_xpack_features
- es_api_basic_auth_username is not defined
- es_api_basic_auth_password is not defined
- name: set fact file_reserved_users - name: set fact file_reserved_users
set_fact: file_reserved_users={{ es_users.file.keys() | intersect (reserved_xpack_users) }} set_fact: file_reserved_users={{ es_users.file.keys() | list | intersect (reserved_xpack_users) }}
when: es_users is defined and es_users.file is defined and (es_users.file.keys() | length > 0) and (es_users.file.keys() | intersect (reserved_xpack_users) | length > 0) when: es_users is defined and es_users.file is defined and (es_users.file.keys() | list | length > 0) and (es_users.file.keys() | list | intersect (reserved_xpack_users) | length > 0)
- name: fail when changing users through file realm - name: fail when changing users through file realm
fail: fail:
msg: "ERROR: INVALID CONFIG - YOU CANNOT CHANGE RESERVED USERS THROUGH THE FILE REALM. THE FOLLOWING CANNOT BE CHANGED: {{file_reserved_users}}. USE THE NATIVE REALM." msg: "ERROR: INVALID CONFIG - YOU CANNOT CHANGE RESERVED USERS THROUGH THE FILE REALM. THE FOLLOWING CANNOT BE CHANGED: {{file_reserved_users}}. USE THE NATIVE REALM."
when: file_reserved_users | default([]) | length > 0 when: file_reserved_users | default([]) | length > 0
- name: set fact instance_default_file
set_fact: instance_default_file={{default_file | dirname}}/{{es_instance_name}}_{{default_file | basename}}
- name: set fact instance_init_script
set_fact: instance_init_script={{init_script | dirname }}/{{es_instance_name}}_{{init_script | basename}}
- name: set fact conf_dir
set_fact: conf_dir={{ es_conf_dir }}/{{es_instance_name}}
- name: set fact m_lock_enabled - name: set fact m_lock_enabled
set_fact: m_lock_enabled={{ es_config['bootstrap.memory_lock'] is defined and es_config['bootstrap.memory_lock'] == True }} set_fact: m_lock_enabled={{ es_config['bootstrap.memory_lock'] is defined and es_config['bootstrap.memory_lock'] == True }}
#TODO - if transport.host is not local maybe error on boostrap checks
#Use systemd for the following distributions:
#Ubuntu 15 and up
#Debian 8 and up
#Centos 7 and up
#Relies on elasticsearch distribution installing a serviced script to determine whether one should be copied.
- name: set fact use_system_d - name: set fact use_system_d
set_fact: use_system_d={{(ansible_distribution == 'Debian' and ansible_distribution_version is version_compare('8', '>=')) or (ansible_distribution in ['RedHat','CentOS'] and ansible_distribution_version is version_compare('7', '>=')) or (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version_compare('15', '>=')) }} set_fact: use_system_d={{(ansible_distribution == 'Debian' and ansible_distribution_version is version_compare('8', '>=')) or (ansible_distribution in ['RedHat','CentOS'] and ansible_distribution_version is version_compare('7', '>=')) or (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version_compare('15', '>=')) }}
- name: set fact instance_sysd_script
set_fact: instance_sysd_script={{sysd_script | dirname }}/{{es_instance_name}}_{{sysd_script | basename}}
when: use_system_d
#For directories we also use the {{inventory_hostname}}-{{ es_instance_name }} - this helps if we have a shared SAN.
- name: set fact instance_suffix
set_fact: instance_suffix={{inventory_hostname}}-{{ es_instance_name }}
- name: set fact pid_dir
set_fact: pid_dir={{ es_pid_dir }}/{{instance_suffix}}
- name: set fact log_dir
set_fact: log_dir={{ es_log_dir }}/{{instance_suffix}}
- name: set fact log_dir
set_fact: data_dirs={{ es_data_dirs | append_to_list('/'+instance_suffix) }}

View file

@ -17,7 +17,6 @@
file: file:
dest: "{{ es_home }}/plugins/x-pack" dest: "{{ es_home }}/plugins/x-pack"
state: "absent" state: "absent"
when: es_open_xpack
#List currently installed plugins. We have to list the directories as the list commmand fails if the ES version is different than the plugin version. #List currently installed plugins. We have to list the directories as the list commmand fails if the ES version is different than the plugin version.
- name: Check installed elasticsearch plugins - name: Check installed elasticsearch plugins
@ -27,9 +26,9 @@
changed_when: False changed_when: False
ignore_errors: yes ignore_errors: yes
environment: environment:
CONF_DIR: "{{ conf_dir }}" CONF_DIR: "{{ es_conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}" ES_PATH_CONF: "{{ es_conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}" ES_INCLUDE: "{{ default_file }}"
check_mode: no check_mode: no
#if es_plugins_reinstall is set to true we remove ALL plugins #if es_plugins_reinstall is set to true we remove ALL plugins
@ -60,9 +59,9 @@
notify: restart elasticsearch notify: restart elasticsearch
register: plugin_removed register: plugin_removed
environment: environment:
CONF_DIR: "{{ conf_dir }}" CONF_DIR: "{{ es_conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}" ES_PATH_CONF: "{{ es_conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}" ES_INCLUDE: "{{ default_file }}"
- name: Install elasticsearch plugins - name: Install elasticsearch plugins
become: yes become: yes
@ -73,15 +72,10 @@
when: item.plugin in plugins_to_install when: item.plugin in plugins_to_install
notify: restart elasticsearch notify: restart elasticsearch
environment: environment:
CONF_DIR: "{{ conf_dir }}" CONF_DIR: "{{ es_conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}" ES_PATH_CONF: "{{ es_conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}" ES_INCLUDE: "{{ default_file }}"
ES_JAVA_OPTS: "{% if item.proxy_host is defined and item.proxy_host != '' and item.proxy_port is defined and item.proxy_port != ''%} -Dhttp.proxyHost={{ item.proxy_host }} -Dhttp.proxyPort={{ item.proxy_port }} -Dhttps.proxyHost={{ item.proxy_host }} -Dhttps.proxyPort={{ item.proxy_port }} {% elif es_proxy_host is defined and es_proxy_host != '' %} -Dhttp.proxyHost={{ es_proxy_host }} -Dhttp.proxyPort={{ es_proxy_port }} -Dhttps.proxyHost={{ es_proxy_host }} -Dhttps.proxyPort={{ es_proxy_port }} {% endif %}" ES_JAVA_OPTS: "{% if item.proxy_host is defined and item.proxy_host != '' and item.proxy_port is defined and item.proxy_port != ''%} -Dhttp.proxyHost={{ item.proxy_host }} -Dhttp.proxyPort={{ item.proxy_port }} -Dhttps.proxyHost={{ item.proxy_host }} -Dhttps.proxyPort={{ item.proxy_port }} {% elif es_proxy_host is defined and es_proxy_host != '' %} -Dhttp.proxyHost={{ es_proxy_host }} -Dhttp.proxyPort={{ es_proxy_port }} -Dhttps.proxyHost={{ es_proxy_host }} -Dhttps.proxyPort={{ es_proxy_port }} {% endif %}"
until: plugin_installed.rc == 0 until: plugin_installed.rc == 0
retries: 5 retries: 5
delay: 5 delay: 5
#Set permissions on plugins directory
- name: Set Plugin Directory Permissions
become: yes
file: state=directory path={{ es_home }}/plugins owner={{ es_user }} group={{ es_group }} recurse=yes

View file

@ -1,26 +0,0 @@
---
- name: set fact es_script_dir
set_fact: es_script_dir={{ es_conf_dir }}/{{es_instance_name}}
tags:
- always
- name: set fact es_script_dir when path.scripts
set_fact: es_script_dir={{es_config['path.scripts']}}
when: es_config['path.scripts'] is defined
tags:
- always
- name: Create script dir
become: yes
file: state=directory path={{ es_script_dir }} owner={{ es_user }} group={{ es_group }} recurse=yes
- name: Copy default scripts to elasticsearch
become: yes
copy: src=scripts dest={{ es_script_dir }} owner={{ es_user }} group={{ es_group }}
when: es_scripts_fileglob is not defined
- name: Copy scripts to elasticsearch
become: yes
copy: src={{ item }} dest={{ es_script_dir }} owner={{ es_user }} group={{ es_group }}
with_fileglob: "{{ es_scripts_fileglob | default('') }}"

View file

@ -2,40 +2,29 @@
- name: ensure templates dir is created - name: ensure templates dir is created
file: file:
path: /etc/elasticsearch/templates path: "{{ es_conf_dir }}/templates"
state: directory state: directory
owner: "{{ es_user }}" owner: root
group: "{{ es_group }}" group: "{{ es_group }}"
mode: 2750
- name: Copy templates to elasticsearch - name: Copy templates to elasticsearch
copy: src={{ item }} dest=/etc/elasticsearch/templates owner={{ es_user }} group={{ es_group }} copy: src={{ item }} dest={{ es_conf_dir }}/templates owner=root group={{ es_group }} mode=0660
register: load_templates register: load_templates
with_fileglob: with_fileglob:
- "{{ es_templates_fileglob | default('') }}" - "{{ es_templates_fileglob | default('') }}"
- name: Install templates without auth - name: Install templates
uri: uri:
url: "http://{{es_api_host}}:{{es_api_port}}/_template/{{item | filename}}" url: "http://{{es_api_host}}:{{es_api_port}}/_template/{{item | filename}}"
method: PUT method: PUT
status_code: 200 status_code: 200
body_format: json user: "{{es_api_basic_auth_username | default(omit)}}"
body: "{{ lookup('file', item) }}" password: "{{es_api_basic_auth_password | default(omit)}}"
when: load_templates.changed and es_start_service and not es_enable_xpack or not es_xpack_features is defined or "security" not in es_xpack_features
with_fileglob:
- "{{ es_templates_fileglob | default('') }}"
run_once: True
- name: Install templates with auth
uri:
url: "http://{{es_api_host}}:{{es_api_port}}/_template/{{item | filename}}"
method: PUT
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes force_basic_auth: yes
body_format: json body_format: json
body: "{{ lookup('file', item) }}" body: "{{ lookup('file', item) }}"
when: load_templates.changed and es_start_service and es_enable_xpack and es_xpack_features is defined and "security" in es_xpack_features when: load_templates.changed and es_start_service
with_fileglob: with_fileglob:
- "{{ es_templates_fileglob | default('') }}" - "{{ es_templates_fileglob | default('') }}"
run_once: True run_once: True

View file

@ -1,9 +1,5 @@
--- ---
- name: Include optional user and group creation.
when: (es_user_id is defined) and (es_group_id is defined)
include: elasticsearch-optional-user.yml
- name: Include specific Elasticsearch - name: Include specific Elasticsearch
include: elasticsearch-Debian.yml include: elasticsearch-Debian.yml
when: ansible_os_family == 'Debian' when: ansible_os_family == 'Debian'

View file

@ -18,6 +18,7 @@
register: java_full_path register: java_full_path
failed_when: False failed_when: False
changed_when: False changed_when: False
check_mode: no
when: ansible_os_family == 'RedHat' when: ansible_os_family == 'RedHat'
- name: correct java version selected - name: correct java version selected
@ -43,6 +44,7 @@
register: open_jdk register: open_jdk
ignore_errors: yes ignore_errors: yes
changed_when: false changed_when: false
check_mode: no
#https://github.com/docker-library/openjdk/issues/19 - ensures tests pass due to java 8 broken certs #https://github.com/docker-library/openjdk/issues/19 - ensures tests pass due to java 8 broken certs
- name: refresh the java ca-certificates - name: refresh the java ca-certificates

View file

@ -1,4 +1,11 @@
--- ---
- set_fact: "es_major_version={{ es_version.split('.')[0] }}.x"
when:
- es_major_version is undefined
tags:
- always
- name: os-specific vars - name: os-specific vars
include_vars: "{{ansible_os_family}}.yml" include_vars: "{{ansible_os_family}}.yml"
tags: tags:
@ -34,12 +41,6 @@
tags: tags:
- config - config
- name: include elasticsearch-scripts.yml
include: elasticsearch-scripts.yml
when: es_scripts
tags:
- scripts
- name: include elasticsearch-plugins.yml - name: include elasticsearch-plugins.yml
include: elasticsearch-plugins.yml include: elasticsearch-plugins.yml
when: es_plugins is defined or es_plugins_reinstall when: es_plugins is defined or es_plugins_reinstall
@ -57,7 +58,7 @@
- name: Make sure elasticsearch is started - name: Make sure elasticsearch is started
become: yes become: yes
service: name={{instance_init_script | basename}} state=started enabled=yes service: name=elasticsearch state=started enabled=yes
when: es_start_service when: es_start_service
- name: Wait for elasticsearch to startup - name: Wait for elasticsearch to startup
@ -69,11 +70,14 @@
- name: set fact manage_native_realm to true - name: set fact manage_native_realm to true
set_fact: manage_native_realm=true set_fact: manage_native_realm=true
when: es_start_service and (es_enable_xpack and "security" in es_xpack_features) and ((es_users is defined and es_users.native is defined) or (es_roles is defined and es_roles.native is defined)) when:
- es_start_service
- es_enable_xpack
- (es_users is defined and es_users.native is defined) or (es_roles is defined and es_roles.native is defined)
# If playbook runs too fast, Native commands could fail as the Native Realm is not yet up # If playbook runs too fast, Native commands could fail as the Native Realm is not yet up
- name: Wait 15 seconds for the Native Relm to come up - name: Wait 15 seconds for the Native Relm to come up
pause: seconds=15 command: sleep 15
when: manage_native_realm when: manage_native_realm
- name: activate-license - name: activate-license

View file

@ -27,6 +27,7 @@
delay: 1 delay: 1
ignore_errors: true ignore_errors: true
until: "'status' in snapshots and snapshots.status == 200" until: "'status' in snapshots and snapshots.status == 200"
check_mode: no
- name: use the custom package url instead of the repository - name: use the custom package url instead of the repository
set_fact: set_fact:

View file

@ -1,68 +0,0 @@
---
#Test if feature is installed
- name: Test if x-pack is installed
shell: "{{es_home}}/bin/elasticsearch-plugin list | grep x-pack"
become: yes
register: x_pack_installed
changed_when: False
failed_when: "'ERROR' in x_pack_installed.stdout"
check_mode: no
ignore_errors: yes
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
#Remove X-Pack if installed and its not been requested or the ES version has changed
- name: Remove x-pack plugin
become: yes
command: "{{es_home}}/bin/elasticsearch-plugin remove x-pack"
register: xpack_state
failed_when: "'ERROR' in xpack_state.stdout"
changed_when: xpack_state.rc == 0
when: x_pack_installed.rc == 0 and (not es_enable_xpack or es_version_changed)
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
#Install plugin if not installed, or the es version has changed (so removed above), and its been requested
- name: Download x-pack from url
get_url: url={{ es_xpack_custom_url }} dest=/tmp/x-pack-{{ es_version }}.zip
when: (x_pack_installed.rc == 1 or es_version_changed) and (es_enable_xpack and es_xpack_custom_url is defined)
- name: Install x-pack plugin from local
become: yes
command: >
{{es_home}}/bin/elasticsearch-plugin install --silent --batch file:///tmp/x-pack-{{ es_version }}.zip
register: xpack_state
changed_when: xpack_state.rc == 0
when: (x_pack_installed.rc == 1 or es_version_changed) and (es_enable_xpack and es_xpack_custom_url is defined)
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
- name: Delete x-pack zip file
file: dest=/tmp/x-pack-{{ es_version }}.zip state=absent
when: es_xpack_custom_url is defined
- name: Install x-pack plugin from elastic.co
become: yes
command: >
{{es_home}}/bin/elasticsearch-plugin install --silent --batch x-pack
register: xpack_state
failed_when: "'ERROR' in xpack_state.stdout"
changed_when: xpack_state.rc == 0
when: (x_pack_installed.rc == 1 or es_version_changed) and (es_enable_xpack and es_xpack_custom_url is not defined)
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
ES_JAVA_OPTS: "{% if es_proxy_host is defined and es_proxy_host != '' %}-Dhttp.proxyHost={{ es_proxy_host }} -Dhttp.proxyPort={{ es_proxy_port }} -Dhttps.proxyHost={{ es_proxy_host }} -Dhttps.proxyPort={{ es_proxy_port }}{% endif %}"

View file

@ -1,23 +1,11 @@
--- ---
- name: set fact es_version_changed
set_fact: es_version_changed={{ ((elasticsearch_install_from_package is defined and (debian_elasticsearch_install_from_repo.changed or redhat_elasticsearch_install_from_repo.changed)) or (elasticsearch_install_from_package is defined and elasticsearch_install_from_package.changed)) }}
- name: include elasticsearch-xpack-install.yml
include: elasticsearch-xpack-install.yml
when: es_install_xpack
#Security configuration #Security configuration
- name: include security/elasticsearch-security.yml - name: include security/elasticsearch-security.yml
include: security/elasticsearch-security.yml include: security/elasticsearch-security.yml
when: es_enable_xpack
#Add any feature specific configuration here
- name: Set Plugin Directory Permissions
become: yes
file: state=directory path={{ es_home }}/plugins owner={{ es_user }} group={{ es_group }} recurse=yes
#Make sure elasticsearch.keystore has correct Permissions #Make sure elasticsearch.keystore has correct Permissions
- name: Set elasticsearch.keystore Permissions - name: Set elasticsearch.keystore Permissions
become: yes become: yes
file: state=file path={{ conf_dir }}/elasticsearch.keystore owner={{ es_user }} group={{ es_group }} file: state=file path={{ es_conf_dir }}/elasticsearch.keystore owner=root group={{ es_group }} mode=0660
when: es_enable_xpack and "security" in es_xpack_features and (es_version | version_compare('6.0.0', '>'))

View file

@ -1,83 +1,82 @@
--- ---
- name: set fact manage_file_users - set_fact: manage_file_users=false
set_fact: manage_file_users=es_users is defined and es_users.file is defined and es_users.file.keys() | length > 0
- set_fact: manage_file_users=true
when: es_users is defined and es_users.file is defined and es_users.file.keys() | list | length > 0
# Users migration from elasticsearch < 6.3 versions
- name: Check if old users file exists - name: Check if old users file exists
stat: stat:
path: '{{ conf_dir }}/x-pack/users' path: '{{ es_conf_dir }}/x-pack/users'
register: old_users_file register: old_users_file
check_mode: no
- name: Copy the old users file from the old depreacted location - name: Copy the old users file from the old depreacted location
copy: copy:
remote_src: yes remote_src: yes
force: no # only copy it if the new path doesn't exist yet force: no # only copy it if the new path doesn't exist yet
src: "{{ conf_dir }}/x-pack/users" src: "{{ es_conf_dir }}/x-pack/users"
dest: "{{ conf_dir }}{{ es_xpack_conf_subdir }}/users" dest: "{{ es_conf_dir }}/users"
when: old_users_file.stat.exists
- name: Create the users file if it doesn't exist
copy:
content: ""
dest: "{{ conf_dir }}{{ es_xpack_conf_subdir }}/users"
force: no # this ensures it only creates it if it does not exist
group: "{{ es_group }}" group: "{{ es_group }}"
owner: "{{ es_user }}" owner: root
mode: 0555 when: old_users_file.stat.exists
# End of users migrations
#List current users #List current users
- name: List Users - name: List Users
become: yes become: yes
shell: cat {{conf_dir}}{{es_xpack_conf_subdir}}/users | awk -F':' '{print $1}' shell: cat {{ es_conf_dir }}/users | awk -F':' '{print $1}'
register: current_file_users register: current_file_users
when: manage_file_users when: manage_file_users
changed_when: False changed_when: False
check_mode: no
- name: set fact users_to_remove - name: set fact users_to_remove
set_fact: users_to_remove={{ current_file_users.stdout_lines | difference (es_users.file.keys()) }} set_fact: users_to_remove={{ current_file_users.stdout_lines | difference (es_users.file.keys() | list) }}
when: manage_file_users when: manage_file_users
#Remove users #Remove users
- name: Remove Users - name: Remove Users
become: yes become: yes
command: > command: >
{{es_home}}/bin/{{es_xpack_users_command}} userdel {{item}} {{es_home}}/bin/elasticsearch-users userdel {{item}}
with_items: "{{users_to_remove | default([])}}" with_items: "{{users_to_remove | default([])}}"
when: manage_file_users when: manage_file_users
environment: environment:
CONF_DIR: "{{ conf_dir }}" CONF_DIR: "{{ es_conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}" ES_PATH_CONF: "{{ es_conf_dir }}"
ES_HOME: "{{es_home}}" ES_HOME: "{{es_home}}"
- name: set fact users_to_add - name: set fact users_to_add
set_fact: users_to_add={{ es_users.file.keys() | difference (current_file_users.stdout_lines) }} set_fact: users_to_add={{ es_users.file.keys() | list | difference (current_file_users.stdout_lines) }}
when: manage_file_users when: manage_file_users
#Add users #Add users
- name: Add Users - name: Add Users
become: yes become: yes
command: > command: >
{{es_home}}/bin/{{es_xpack_users_command}} useradd {{item}} -p {{es_users.file[item].password}} {{es_home}}/bin/elasticsearch-users useradd {{item}} -p {{es_users.file[item].password}}
with_items: "{{ users_to_add | default([]) }}" with_items: "{{ users_to_add | default([]) }}"
when: manage_file_users when: manage_file_users
no_log: True no_log: True
environment: environment:
CONF_DIR: "{{ conf_dir }}" CONF_DIR: "{{ es_conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}" ES_PATH_CONF: "{{ es_conf_dir }}"
ES_HOME: "{{es_home}}" ES_HOME: "{{es_home}}"
#Set passwords for all users declared - Required as the useradd will not change existing user passwords #Set passwords for all users declared - Required as the useradd will not change existing user passwords
- name: Set User Passwords - name: Set User Passwords
become: yes become: yes
command: > command: >
{{es_home}}/bin/{{es_xpack_users_command}} passwd {{ item }} -p {{es_users.file[item].password}} {{es_home}}/bin/elasticsearch-users passwd {{ item }} -p {{es_users.file[item].password}}
with_items: "{{ es_users.file.keys() | default([]) }}" with_items: "{{ es_users.file.keys() | list }}"
when: manage_file_users when: manage_file_users
#Currently no easy way to figure out if the password has changed or to know what it currently is so we can skip. #Currently no easy way to figure out if the password has changed or to know what it currently is so we can skip.
changed_when: False changed_when: False
no_log: True no_log: True
environment: environment:
CONF_DIR: "{{ conf_dir }}" CONF_DIR: "{{ es_conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}" ES_PATH_CONF: "{{ es_conf_dir }}"
ES_HOME: "{{es_home}}" ES_HOME: "{{es_home}}"
- name: set fact users_roles - name: set fact users_roles
@ -87,16 +86,11 @@
#Copy Roles files #Copy Roles files
- name: Copy roles.yml File for Instance - name: Copy roles.yml File for Instance
become: yes become: yes
template: src=security/roles.yml.j2 dest={{conf_dir}}{{es_xpack_conf_subdir}}/roles.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes template: src=security/roles.yml.j2 dest={{ es_conf_dir }}/roles.yml owner=root group={{ es_group }} mode=0660 force=yes
when: es_roles is defined and es_roles.file is defined when: es_roles is defined and es_roles.file is defined
#Overwrite users_roles file #Overwrite users_roles file
- name: Copy User Roles - name: Copy User Roles
become: yes become: yes
template: src=security/users_roles.j2 dest={{conf_dir}}{{es_xpack_conf_subdir}}/users_roles mode=0644 force=yes template: src=security/users_roles.j2 dest={{ es_conf_dir }}/users_roles owner=root group={{ es_group }} mode=0660 force=yes
when: manage_file_users and users_roles | length > 0 when: manage_file_users and users_roles | length > 0
#Set permission on security directory. E.g. if 2 nodes are installed on the same machine, the second node will not get the users file created at install, causing the files being created at es_users call and then having the wrong Permissions.
- name: Set Security Directory Permissions Recursive
become: yes
file: state=directory path={{conf_dir}}{{es_xpack_conf_subdir}}/ owner={{ es_user }} group={{ es_group }} recurse=yes

View file

@ -7,14 +7,14 @@
- name: set fact manage_native_users to true - name: set fact manage_native_users to true
set_fact: manage_native_users=true set_fact: manage_native_users=true
when: es_users is defined and es_users.native is defined and es_users.native.keys() | length > 0 when: es_users is defined and es_users.native is defined and es_users.native.keys() | list | length > 0
- name: set fact manage_native_role to false - name: set fact manage_native_role to false
set_fact: manage_native_roles=false set_fact: manage_native_roles=false
- name: set fact manange_native_roles to true - name: set fact manange_native_roles to true
set_fact: manage_native_roles=true set_fact: manage_native_roles=true
when: es_roles is defined and es_roles.native is defined and es_roles.native.keys() | length > 0 when: es_roles is defined and es_roles.native is defined and es_roles.native.keys() | list | length > 0
#If the node has just has security installed it maybe either stopped or started 1. if stopped, we need to start to load native realms 2. if started, we need to restart to load #If the node has just has security installed it maybe either stopped or started 1. if stopped, we need to start to load native realms 2. if started, we need to restart to load
@ -29,6 +29,7 @@
status_code: 200 status_code: 200
register: user_list_response register: user_list_response
when: manage_native_users when: manage_native_users
check_mode: no
- name: set fact reserved_users equals user_list_response.json - name: set fact reserved_users equals user_list_response.json
set_fact: reserved_users={{ user_list_response.json | filter_reserved }} set_fact: reserved_users={{ user_list_response.json | filter_reserved }}
@ -36,7 +37,7 @@
#Current users not inc. those reserved #Current users not inc. those reserved
- name: set fact current_users equals user_list_response.json.keys not including reserved - name: set fact current_users equals user_list_response.json.keys not including reserved
set_fact: current_users={{ user_list_response.json.keys() | difference (reserved_users) }} set_fact: current_users={{ user_list_response.json.keys() | list | difference (reserved_users) }}
when: manage_native_users when: manage_native_users
#We are changing the es_api_basic_auth_username password, so we need to do it first and update the param #We are changing the es_api_basic_auth_username password, so we need to do it first and update the param
@ -66,7 +67,7 @@
#Identify users that are present in ES but not declared and thus should be removed #Identify users that are present in ES but not declared and thus should be removed
- name: set fact users_to_remove - name: set fact users_to_remove
set_fact: users_to_remove={{ current_users | difference ( native_users.keys() ) }} set_fact: users_to_remove={{ current_users | difference ( native_users.keys() | list) }}
when: manage_native_users when: manage_native_users
#Delete all non required users NOT inc. reserved #Delete all non required users NOT inc. reserved
@ -82,7 +83,7 @@
with_items: "{{ users_to_remove | default([]) }}" with_items: "{{ users_to_remove | default([]) }}"
- name: set fact users_to_ignore - name: set fact users_to_ignore
set_fact: users_to_ignore={{ native_users.keys() | intersect (reserved_users) }} set_fact: users_to_ignore={{ native_users.keys() | list | intersect (reserved_users) }}
when: manage_native_users when: manage_native_users
- name: debug message - name: debug message
@ -106,7 +107,7 @@
with_items: "{{ users_to_ignore | default([]) }}" with_items: "{{ users_to_ignore | default([]) }}"
- name: set fact users_to_modify - name: set fact users_to_modify
set_fact: users_to_modify={{ native_users.keys() | difference (reserved_users) }} set_fact: users_to_modify={{ native_users.keys() | list | difference (reserved_users) }}
when: manage_native_users when: manage_native_users
#Overwrite all other users NOT inc. those reserved #Overwrite all other users NOT inc. those reserved
@ -138,17 +139,18 @@
status_code: 200 status_code: 200
register: role_list_response register: role_list_response
when: manage_native_roles when: manage_native_roles
check_mode: no
- name: set fact reserved roles - name: set fact reserved roles
set_fact: reserved_roles={{ role_list_response.json | filter_reserved }} set_fact: reserved_roles={{ role_list_response.json | filter_reserved }}
when: manage_native_roles when: manage_native_roles
- name: set fact current roles - name: set fact current roles
set_fact: current_roles={{ role_list_response.json.keys() | difference (reserved_roles) }} set_fact: current_roles={{ role_list_response.json.keys() | list | difference (reserved_roles) }}
when: manage_native_roles when: manage_native_roles
- name: set fact roles to ignore - name: set fact roles to ignore
set_fact: roles_to_ignore={{ es_roles.native.keys() | intersect (reserved_roles) | default([]) }} set_fact: roles_to_ignore={{ es_roles.native.keys() | list | intersect (reserved_roles) | default([]) }}
when: manage_native_roles when: manage_native_roles
- name: debug message - name: debug message
@ -157,7 +159,7 @@
when: manage_native_roles and roles_to_ignore | length > 0 when: manage_native_roles and roles_to_ignore | length > 0
- name: set fact roles_to_remove - name: set fact roles_to_remove
set_fact: roles_to_remove={{ current_roles | difference ( es_roles.native.keys() ) }} set_fact: roles_to_remove={{ current_roles | difference ( es_roles.native.keys() | list) }}
when: manage_native_roles when: manage_native_roles
#Delete all non required roles NOT inc. reserved #Delete all non required roles NOT inc. reserved
@ -173,7 +175,7 @@
with_items: "{{roles_to_remove | default([]) }}" with_items: "{{roles_to_remove | default([]) }}"
- name: set fact roles_to_modify - name: set fact roles_to_modify
set_fact: roles_to_modify={{ es_roles.native.keys() | difference (reserved_roles) }} set_fact: roles_to_modify={{ es_roles.native.keys() | list | difference (reserved_roles) }}
when: manage_native_roles when: manage_native_roles
#Update other roles - NOT inc. reserved roles #Update other roles - NOT inc. reserved roles

View file

@ -3,27 +3,18 @@
#TODO: 1. Skip users with no password defined or error 2. Passwords | length > 6 #TODO: 1. Skip users with no password defined or error 2. Passwords | length > 6
#Ensure x-pack conf directory is created if necessary
- name: Ensure x-pack conf directory exists (file)
file: path={{ conf_dir }}{{ es_xpack_conf_subdir }} state=directory owner={{ es_user }} group={{ es_group }}
changed_when: False
when:
- es_enable_xpack and "security" in es_xpack_features
- (es_users is defined and es_users.file is defined) or (es_roles is defined and es_roles.file is defined) or (es_role_mapping is defined)
#-----------------------------Create Bootstrap User----------------------------------- #-----------------------------Create Bootstrap User-----------------------------------
### START BLOCK elasticsearch keystore ### ### START BLOCK elasticsearch keystore ###
- name: create the elasticsearch keystore - name: create the elasticsearch keystore
when: (es_enable_xpack and "security" in es_xpack_features) and (es_version | version_compare('6.0.0', '>'))
block: block:
- name: create the keystore if it doesn't exist yet - name: create the keystore if it doesn't exist yet
become: yes become: yes
command: > command: >
{{es_home}}/bin/elasticsearch-keystore create {{es_home}}/bin/elasticsearch-keystore create
args: args:
creates: "{{ conf_dir }}/elasticsearch.keystore" creates: "{{ es_conf_dir }}/elasticsearch.keystore"
environment: environment:
ES_PATH_CONF: "{{ conf_dir }}" ES_PATH_CONF: "{{ es_conf_dir }}"
- name: Check if bootstrap password is set - name: Check if bootstrap password is set
become: yes become: yes
@ -32,7 +23,8 @@
register: list_keystore register: list_keystore
changed_when: False changed_when: False
environment: environment:
ES_PATH_CONF: "{{ conf_dir }}" ES_PATH_CONF: "{{ es_conf_dir }}"
check_mode: no
- name: Create Bootstrap password for elastic user - name: Create Bootstrap password for elastic user
become: yes become: yes
@ -40,35 +32,19 @@
when: when:
- es_api_basic_auth_username is defined and list_keystore is defined and es_api_basic_auth_username == 'elastic' and 'bootstrap.password' not in list_keystore.stdout_lines - es_api_basic_auth_username is defined and list_keystore is defined and es_api_basic_auth_username == 'elastic' and 'bootstrap.password' not in list_keystore.stdout_lines
environment: environment:
ES_PATH_CONF: "{{ conf_dir }}" ES_PATH_CONF: "{{ es_conf_dir }}"
no_log: true no_log: true
### END BLOCK elasticsearch keystore ### ### END BLOCK elasticsearch keystore ###
#-----------------------------FILE BASED REALM---------------------------------------- #-----------------------------FILE BASED REALM----------------------------------------
- include: elasticsearch-security-file.yml - include: elasticsearch-security-file.yml
when: (es_enable_xpack and "security" in es_xpack_features) and ((es_users is defined and es_users.file is defined) or (es_roles is defined and es_roles.file is defined)) when: (es_users is defined and es_users.file is defined) or (es_roles is defined and es_roles.file is defined)
#-----------------------------ROLE MAPPING ---------------------------------------- #-----------------------------ROLE MAPPING ----------------------------------------
#Copy Roles files #Copy Roles files
- name: Copy role_mapping.yml File for Instance - name: Copy role_mapping.yml File for Instance
become: yes become: yes
template: src=security/role_mapping.yml.j2 dest={{conf_dir}}{{es_xpack_conf_subdir}}/role_mapping.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes template: src=security/role_mapping.yml.j2 dest={{ es_conf_dir }}/role_mapping.yml owner=root group={{ es_group }} mode=0660 force=yes
when: es_role_mapping is defined when: es_role_mapping is defined
#-----------------------------AUTH FILE----------------------------------------
- name: Copy message auth key to elasticsearch
become: yes
copy: src={{ es_message_auth_file }} dest={{conf_dir}}{{es_xpack_conf_subdir}}/system_key owner={{ es_user }} group={{ es_group }} mode=0600 force=yes
when: es_message_auth_file is defined
#------------------------------------------------------------------------------------
#Ensure security conf directory is created
- name: Ensure security conf directory exists
become: yes
file: path={{ conf_dir }}/security state=directory owner={{ es_user }} group={{ es_group }}
changed_when: False
when: es_enable_xpack and "security" in es_xpack_features

View file

@ -1,33 +1,16 @@
--- ---
- name: Activate ES license (without security authentication)
uri:
method: PUT
url: "http://{{es_api_host}}:{{es_api_port}}/_xpack/license?acknowledge=true"
body_format: json
body: "{{ es_xpack_license }}"
return_content: yes
register: license_activated
no_log: True
when: not "security" in es_xpack_features
failed_when: >
license_activated.status != 200 or
license_activated.json.license_status is not defined or
license_activated.json.license_status != 'valid'
- name: Activate ES license (with security authentication) - name: Activate ES license (with security authentication)
uri: uri:
method: PUT method: PUT
url: "http://{{es_api_host}}:{{es_api_port}}/_xpack/license?acknowledge=true" url: "http://{{es_api_host}}:{{es_api_port}}/_xpack/license?acknowledge=true"
user: "{{es_api_basic_auth_username}}" user: "{{es_api_basic_auth_username | default(omit)}}"
password: "{{es_api_basic_auth_password}}" password: "{{es_api_basic_auth_password | default(omit)}}"
body_format: json body_format: json
force_basic_auth: yes
body: "{{ es_xpack_license }}" body: "{{ es_xpack_license }}"
return_content: yes return_content: yes
force_basic_auth: yes
register: license_activated register: license_activated
no_log: True no_log: True
when: "'security' in es_xpack_features"
failed_when: > failed_when: >
license_activated.status != 200 or license_activated.status != 200 or
license_activated.json.license_status is not defined or license_activated.json.license_status is not defined or

View file

@ -9,19 +9,19 @@ ES_HOME={{es_home}}
#JAVA_HOME= #JAVA_HOME=
# Elasticsearch configuration directory # Elasticsearch configuration directory
CONF_DIR={{conf_dir}} CONF_DIR={{ es_conf_dir }}
ES_PATH_CONF={{conf_dir}} ES_PATH_CONF={{ es_conf_dir }}
# Elasticsearch data directory # Elasticsearch data directory
DATA_DIR={{ data_dirs | array_to_str }} DATA_DIR={{ es_data_dirs | array_to_str }}
# Elasticsearch logs directory # Elasticsearch logs directory
LOG_DIR={{log_dir}} LOG_DIR={{ es_log_dir }}
# Elasticsearch PID directory # Elasticsearch PID directory
PID_DIR={{pid_dir}} PID_DIR={{ es_pid_dir }}
ES_JVM_OPTIONS={{conf_dir}}/jvm.options ES_JVM_OPTIONS={{ es_conf_dir }}/jvm.options
# Configure restart on package upgrade (true, every other setting will lead to not restarting) # Configure restart on package upgrade (true, every other setting will lead to not restarting)
#ES_RESTART_ON_UPGRADE=true #ES_RESTART_ON_UPGRADE=true
@ -33,16 +33,6 @@ ES_JVM_OPTIONS={{conf_dir}}/jvm.options
# Elasticsearch service # Elasticsearch service
################################ ################################
# SysV init.d
#
# When executing the init script, this user will be used to run the elasticsearch service.
# The default value is 'elasticsearch' and is declared in the init.d file.
# Note that this setting is only used by the init script. If changed, make sure that
# the configured user can read and write into the data, work, plugins and log directories.
# For systemd service, the user is usually configured in file /usr/lib/systemd/system/elasticsearch.service
ES_USER={{es_user}}
ES_GROUP={{es_group}}
# The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process # The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process
ES_STARTUP_SLEEP_TIME=5 ES_STARTUP_SLEEP_TIME=5

View file

@ -1,8 +1,8 @@
[elasticsearch-{{ es_repo_name }}] [elasticsearch-{{ es_repo_name }}]
name=Elasticsearch repository for {{ es_repo_name }} packages name=Elasticsearch repository for {{ es_repo_name }} packages
baseurl=https://artifacts.elastic.co/packages/{{ es_repo_name }}/yum baseurl={{ es_repo_base }}/packages/{{ es_repo_name }}/yum
gpgcheck=1 gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch gpgkey={{ es_repo_base }}/GPG-KEY-elasticsearch
enabled=1 enabled=1
autorefresh=1 autorefresh=1
type=rpm-md type=rpm-md

View file

@ -8,20 +8,16 @@ cluster.name: elasticsearch
{% endif %} {% endif %}
{% if es_config['node.name'] is not defined %} {% if es_config['node.name'] is not defined %}
node.name: {{inventory_hostname}}-{{es_instance_name}} node.name: {{inventory_hostname}}
{% endif %} {% endif %}
#################################### Paths #################################### #################################### Paths ####################################
# Path to directory containing configuration (this file and logging.yml): # Path to directory containing configuration (this file and logging.yml):
{% if (es_version | version_compare('6.0.0', '<')) %} path.data: {{ es_data_dirs | array_to_str }}
path.conf: {{ conf_dir }}
{% endif %}
path.data: {{ data_dirs | array_to_str }} path.logs: {{ es_log_dir }}
path.logs: {{ log_dir }}
{% if es_path_repo is defined %} {% if es_path_repo is defined %}
path.repo: {{ es_path_repo }} path.repo: {{ es_path_repo }}
@ -35,26 +31,8 @@ action.auto_create_index: false
action.auto_create_index: {{ es_action_auto_create_index }} action.auto_create_index: {{ es_action_auto_create_index }}
{% endif %} {% endif %}
{% if es_enable_xpack %} {% if es_enable_xpack and es_api_basic_auth_username is defined and es_api_basic_auth_password is defined %}
{% if not "security" in es_xpack_features %} xpack.security.enabled: true
xpack.security.enabled: false
{% endif %}
{% if not "monitoring" in es_xpack_features %}
xpack.monitoring.enabled: false
{% endif %}
{% if not "alerting" in es_xpack_features %}
xpack.watcher.enabled: false
{% endif %}
{% if not "ml" in es_xpack_features %}
xpack.ml.enabled: false
{% endif %}
{% if not "graph" in es_xpack_features %}
xpack.graph.enabled: false
{% endif %}
{% endif %} {% endif %}
{% if es_mail_config is defined %} {% if es_mail_config is defined %}
@ -68,8 +46,8 @@ xpack.notification.email:
auth: {{ es_mail_config['require_auth'] }} auth: {{ es_mail_config['require_auth'] }}
host: {{ es_mail_config['host'] }} host: {{ es_mail_config['host'] }}
port: {{ es_mail_config['port'] }} port: {{ es_mail_config['port'] }}
{% if es_mail_config['require_auth'] == true %} {% if es_mail_config['require_auth'] == true -%}
user: {{ es_mail_config['user'] }} user: {{ es_mail_config['user'] }}
password: {{ es_mail_config['pass'] }} password: {{ es_mail_config['pass'] }}
{% endif %} {%- endif %}
{% endif %} {% endif %}

View file

@ -1,229 +0,0 @@
#!/bin/bash
#
# /etc/init.d/elasticsearch -- startup script for Elasticsearch
#
### BEGIN INIT INFO
# Provides: elasticsearch
# Required-Start: $network $remote_fs $named
# Required-Stop: $network $remote_fs $named
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts elasticsearch
# Description: Starts elasticsearch using start-stop-daemon
### END INIT INFO
PATH=/bin:/usr/bin:/sbin:/usr/sbin
NAME={{es_instance_name}}_{{default_file | basename}}
{% if es_config['node.name'] is defined %}
DESC="Elasticsearch Server - {{es_config['node.name']}}"
{% else %}
DESC="Elasticsearch Server - {{es_instance_name}}"
{% endif %}
DEFAULT=/etc/default/$NAME
if [ `id -u` -ne 0 ]; then
echo "You need root privileges to run this script"
exit 1
fi
. /lib/lsb/init-functions
if [ -r /etc/default/rcS ]; then
. /etc/default/rcS
fi
# The following variables can be overwritten in $DEFAULT
# Run Elasticsearch as this user ID and group ID
ES_USER={{es_user}}
ES_GROUP={{es_group}}
# Directory where the Elasticsearch binary distribution resides
ES_HOME={{es_home}}
# Maximum number of open files
{% if es_max_open_files is defined %}
MAX_OPEN_FILES={{es_max_open_files}}
{% endif %}
# Maximum amount of locked memory
#MAX_LOCKED_MEMORY=
{% if m_lock_enabled %}
MAX_LOCKED_MEMORY=unlimited
{% endif %}
# Elasticsearch log directory
LOG_DIR={{log_dir}}
# Elasticsearch data directory
DATA_DIR={{ data_dirs | array_to_str }}
# Elasticsearch configuration directory
CONF_DIR={{conf_dir}}
ES_PATH_CONF={{ conf_dir }}
# Maximum number of VMA (Virtual Memory Areas) a process can own
{% if es_max_map_count is defined %}
MAX_MAP_COUNT={{es_max_map_count}}
{% endif %}
# Elasticsearch PID file directory
PID_DIR={{pid_dir}}
ES_JVM_OPTIONS="{{conf_dir}}/jvm.options"
# End of variables that can be overwritten in $DEFAULT
# overwrite settings from default file
if [ -f "$DEFAULT" ]; then
. "$DEFAULT"
fi
# CONF_FILE setting was removed
if [ ! -z "$CONF_FILE" ]; then
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
exit 1
fi
if [ "$ES_USER" != "elasticsearch" ] || [ "$ES_GROUP" != "elasticsearch" ]; then
echo "WARNING: ES_USER and ES_GROUP are deprecated and will be removed in the next major version of Elasticsearch, got: [$ES_USER:$ES_GROUP]"
fi
# Define other required variables
PID_FILE="$PID_DIR/$NAME.pid"
DAEMON=$ES_HOME/bin/elasticsearch
{% if (es_version | version_compare('6.0.0', '<')) %}
DAEMON_OPTS="-d -p $PID_FILE -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR"
{% else %}
DAEMON_OPTS="-d -p $PID_FILE"
{% endif %}
export ES_JAVA_OPTS
export JAVA_HOME
export ES_INCLUDE
export ES_JVM_OPTIONS
export ES_PATH_CONF
# export unsupported variables so bin/elasticsearch can reject them and inform the user these are unsupported
if test -n "$ES_MIN_MEM"; then export ES_MIN_MEM; fi
if test -n "$ES_MAX_MEM"; then export ES_MAX_MEM; fi
if test -n "$ES_HEAP_SIZE"; then export ES_HEAP_SIZE; fi
if test -n "$ES_HEAP_NEWSIZE"; then export ES_HEAP_NEWSIZE; fi
if test -n "$ES_DIRECT_SIZE"; then export ES_DIRECT_SIZE; fi
if test -n "$ES_USE_IPV4"; then export ES_USE_IPV4; fi
if test -n "$ES_GC_OPTS"; then export ES_GC_OPTS; fi
if test -n "$ES_GC_LOG_FILE"; then export ES_GC_LOG_FILE; fi
# Check DAEMON exists
if [ ! -x "$DAEMON" ]; then
echo "The elasticsearch startup script does not exists or it is not executable, tried: $DAEMON"
exit 1
fi
checkJava() {
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
}
case "$1" in
start)
checkJava
log_daemon_msg "Starting $DESC"
pid=`pidofproc -p $PID_FILE elasticsearch`
if [ -n "$pid" ] ; then
log_begin_msg "Already running."
log_end_msg 0
exit 0
fi
# Ensure that the PID_DIR exists (it is cleaned at OS startup time)
if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then
mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR"
fi
if [ -n "$PID_FILE" ] && [ ! -e "$PID_FILE" ]; then
touch "$PID_FILE" && chown "$ES_USER":"$ES_GROUP" "$PID_FILE"
fi
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
if [ -n "$MAX_LOCKED_MEMORY" ]; then
ulimit -l $MAX_LOCKED_MEMORY
fi
if [ -n "$MAX_THREADS" ]; then
ulimit -u $MAX_THREADS
fi
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
fi
# Start Daemon
start-stop-daemon -d $ES_HOME --start --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS
return=$?
if [ $return -eq 0 ]; then
i=0
timeout={{es_debian_startup_timeout}}
# Wait for the process to be properly started before exiting
until { kill -0 `cat "$PID_FILE"`; } >/dev/null 2>&1
do
sleep 1
i=$(($i + 1))
if [ $i -gt $timeout ]; then
log_end_msg 1
exit 1
fi
done
fi
log_end_msg $return
exit $return
;;
stop)
log_daemon_msg "Stopping $DESC"
if [ -f "$PID_FILE" ]; then
start-stop-daemon --stop --pidfile "$PID_FILE" \
--user "$ES_USER" \
--quiet \
--retry forever/TERM/20 > /dev/null
if [ $? -eq 1 ]; then
log_progress_msg "$DESC is not running but pid file exists, cleaning up"
elif [ $? -eq 3 ]; then
PID="`cat $PID_FILE`"
log_failure_msg "Failed to stop $DESC (pid $PID)"
exit 1
fi
rm -f "$PID_FILE"
else
log_progress_msg "(not running)"
fi
log_end_msg 0
;;
status)
status_of_proc -p $PID_FILE elasticsearch elasticsearch && exit 0 || exit $?
;;
restart|force-reload)
if [ -f "$PID_FILE" ]; then
$0 stop
fi
$0 start
;;
*)
log_success_msg "Usage: $0 {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

View file

@ -1,217 +0,0 @@
#!/bin/bash
#
# elasticsearch <summary>
#
# chkconfig: 2345 80 20
# description: Starts and stops a single elasticsearch instance on this system
#
### BEGIN INIT INFO
# Provides: Elasticsearch
# Required-Start: $network $named
# Required-Stop: $network $named
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: This service manages the elasticsearch daemon
# Description: Elasticsearch is a very scalable, schema-free and high-performance search solution supporting multi-tenancy and near realtime search.
### END INIT INFO
#
# init.d / servicectl compatibility (openSUSE)
#
if [ -f /etc/rc.status ]; then
. /etc/rc.status
rc_reset
fi
#
# Source function library.
#
if [ -f /etc/rc.d/init.d/functions ]; then
. /etc/rc.d/init.d/functions
fi
# Sets the default values for elasticsearch variables used in this script
ES_USER="{{es_user}}"
ES_GROUP="{{es_group}}"
ES_HOME="{{es_home}}"
{% if es_max_open_files is defined %}
MAX_OPEN_FILES={{es_max_open_files}}
{% endif %}
# Maximum number of VMA (Virtual Memory Areas) a process can own
{% if es_max_map_count is defined %}
MAX_MAP_COUNT={{es_max_map_count}}
{% endif %}
LOG_DIR="{{log_dir}}"
DATA_DIR={{ data_dirs | array_to_str }}
CONF_DIR="{{conf_dir}}"
ES_PATH_CONF="{{ conf_dir }}"
PID_DIR="{{pid_dir}}"
# Source the default env file
ES_ENV_FILE="{{instance_default_file}}"
if [ -f "$ES_ENV_FILE" ]; then
. "$ES_ENV_FILE"
fi
if [ "$ES_USER" != "elasticsearch" ] || [ "$ES_GROUP" != "elasticsearch" ]; then
echo "WARNING: ES_USER and ES_GROUP are deprecated and will be removed in the next major version of Elasticsearch, got: [$ES_USER:$ES_GROUP]"
fi
# CONF_FILE setting was removed
if [ ! -z "$CONF_FILE" ]; then
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
exit 1
fi
exec="$ES_HOME/bin/elasticsearch"
prog="{{es_instance_name}}_{{default_file | basename}}"
pidfile="$PID_DIR/${prog}.pid"
export ES_JAVA_OPTS
export JAVA_HOME
export ES_INCLUDE
export ES_JVM_OPTIONS
export ES_STARTUP_SLEEP_TIME
export ES_PATH_CONF
# export unsupported variables so bin/elasticsearch can reject them and inform the user these are unsupported
if test -n "$ES_MIN_MEM"; then export ES_MIN_MEM; fi
if test -n "$ES_MAX_MEM"; then export ES_MAX_MEM; fi
if test -n "$ES_HEAP_SIZE"; then export ES_HEAP_SIZE; fi
if test -n "$ES_HEAP_NEWSIZE"; then export ES_HEAP_NEWSIZE; fi
if test -n "$ES_DIRECT_SIZE"; then export ES_DIRECT_SIZE; fi
if test -n "$ES_USE_IPV4"; then export ES_USE_IPV4; fi
if test -n "$ES_GC_OPTS"; then export ES_GC_OPTS; fi
if test -n "$ES_GC_LOG_FILE"; then export ES_GC_LOG_FILE; fi
lockfile=/var/lock/subsys/$prog
# backwards compatibility for old config sysconfig files, pre 0.90.1
if [ -n $USER ] && [ -z $ES_USER ] ; then
ES_USER=$USER
fi
if [ ! -x "$exec" ]; then
echo "The elasticsearch startup script does not exists or it is not executable, tried: $exec"
exit 1
fi
checkJava() {
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
}
start() {
checkJava
[ -x $exec ] || exit 5
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
if [ -n "$MAX_LOCKED_MEMORY" ]; then
ulimit -l $MAX_LOCKED_MEMORY
fi
if [ -n "$MAX_THREADS" ]; then
ulimit -u $MAX_THREADS
fi
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
fi
# Ensure that the PID_DIR exists (it is cleaned at OS startup time)
if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then
mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR"
fi
if [ -n "$pidfile" ] && [ ! -e "$pidfile" ]; then
touch "$pidfile" && chown "$ES_USER":"$ES_GROUP" "$pidfile"
fi
cd $ES_HOME
echo -n $"Starting $prog: "
# if not running, start it up here, usually something like "daemon $exec"
{% if (es_version | version_compare('6.0.0', '<')) %}
daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR
{% else %}
daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d
{% endif %}
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
# stop it here, often "killproc $prog"
killproc -p $pidfile -d 86400 $prog
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
# run checks to determine if the service is running or use generic status
status -p $pidfile $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?

View file

@ -103,14 +103,6 @@
#-XX:+UseGCLogFileRotation #-XX:+UseGCLogFileRotation
#-XX:NumberOfGCLogFiles=32 #-XX:NumberOfGCLogFiles=32
#-XX:GCLogFileSize=128M #-XX:GCLogFileSize=128M
# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
# If documents were already indexed with unquoted fields in a previous version
# of Elasticsearch, some operations may throw errors.
#
# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
# only for migration purposes.
#-Delasticsearch.json.allow_unquoted_field_names=true
{% if es_jvm_custom_parameters !='' %} {% if es_jvm_custom_parameters !='' %}
{% for item in es_jvm_custom_parameters %} {% for item in es_jvm_custom_parameters %}
{{ item }} {{ item }}

View file

@ -11,23 +11,14 @@ appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
appender.rolling.type = RollingFile appender.rolling.type = RollingFile
appender.rolling.name = rolling appender.rolling.name = rolling
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.rolling.fileName = ${sys:es.logs}.log
{% else %}
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
{% endif %}
appender.rolling.layout.type = PatternLayout appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
{% else %}
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
{% endif %}
appender.rolling.policies.type = Policies appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1 appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true appender.rolling.policies.time.modulate = true
{% if (es_version | version_compare('6.0.0', '>')) %}
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling.policies.size.size = 128MB appender.rolling.policies.size.size = 128MB
appender.rolling.strategy.type = DefaultRolloverStrategy appender.rolling.strategy.type = DefaultRolloverStrategy
@ -38,25 +29,16 @@ appender.rolling.strategy.action.condition.type = IfFileName
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
{% endif %}
rootLogger.level = info rootLogger.level = info
rootLogger.appenderRef.console.ref = console rootLogger.appenderRef.console.ref = console
rootLogger.appenderRef.rolling.ref = rolling rootLogger.appenderRef.rolling.ref = rolling
appender.deprecation_rolling.type = RollingFile appender.deprecation_rolling.type = RollingFile
appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.name = deprecation_rolling
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
{% else %}
appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
{% endif %}
appender.deprecation_rolling.layout.type = PatternLayout appender.deprecation_rolling.layout.type = PatternLayout
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
{% else %}
appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz
{% endif %}
appender.deprecation_rolling.policies.type = Policies appender.deprecation_rolling.policies.type = Policies
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.deprecation_rolling.policies.size.size = 1GB appender.deprecation_rolling.policies.size.size = 1GB
@ -70,18 +52,12 @@ logger.deprecation.additivity = false
appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.type = RollingFile
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
{% else %}
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
{% endif %}
appender.index_search_slowlog_rolling.layout.type = PatternLayout appender.index_search_slowlog_rolling.layout.type = PatternLayout
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
{% else %}
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log
{% endif %}
appender.index_search_slowlog_rolling.policies.type = Policies appender.index_search_slowlog_rolling.policies.type = Policies
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_search_slowlog_rolling.policies.time.interval = 1 appender.index_search_slowlog_rolling.policies.time.interval = 1
@ -94,18 +70,10 @@ logger.index_search_slowlog_rolling.additivity = false
appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.type = RollingFile
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
{% else %}
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
{% endif %}
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
{% else %}
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
{% endif %}
appender.index_indexing_slowlog_rolling.policies.type = Policies appender.index_indexing_slowlog_rolling.policies.type = Policies
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_indexing_slowlog_rolling.policies.time.interval = 1 appender.index_indexing_slowlog_rolling.policies.time.interval = 1

View file

@ -1,76 +0,0 @@
[Unit]
Description=Elasticsearch-{{es_instance_name}}
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
Environment=ES_HOME={{es_home}}
Environment=CONF_DIR={{conf_dir}}
Environment=ES_PATH_CONF={{conf_dir}}
Environment=DATA_DIR={{ data_dirs | array_to_str }}
Environment=LOG_DIR={{log_dir}}
Environment=PID_DIR={{pid_dir}}
EnvironmentFile=-{{instance_default_file}}
WorkingDirectory={{es_home}}
User={{es_user}}
Group={{es_group}}
{% if (es_version | version_compare('6.0.0', '<')) %}
ExecStartPre=/usr/share/elasticsearch/bin/elasticsearch-systemd-pre-exec
{% endif %}
ExecStart={{es_home}}/bin/elasticsearch \
-p ${PID_DIR}/elasticsearch.pid \
{% if (es_version | version_compare('6.0.0', '<')) %}
-Edefault.path.logs=${LOG_DIR} \
-Edefault.path.data=${DATA_DIR} \
-Edefault.path.conf=${CONF_DIR} \
{% endif %}
--quiet
# StandardOutput is configured to redirect to journalctl since
# some error messages may be logged in standard output before
# elasticsearch logging system is initialized. Elasticsearch
# stores its logs in /var/log/elasticsearch and does not use
# journalctl by default. If you also want to enable journalctl
# logging, you can simply remove the "quiet" option from ExecStart.
StandardOutput=journal
StandardError=inherit
# Specifies the maximum file descriptor number that can be opened by this process
{% if es_max_open_files is defined %}
LimitNOFILE={{es_max_open_files}}
{% endif %}
# Specifies the maximum number of bytes of memory that may be locked into RAM
# Set to "infinity" if you use the 'bootstrap.memory_lock: true' option
# in elasticsearch.yml and 'MAX_LOCKED_MEMORY=unlimited' in {{instance_default_file}}
{% if m_lock_enabled %}
LimitMEMLOCK=infinity
{% endif %}
# Specifies the maximum number of threads that can be started. Elasticsearch requires a
# minimum of 2048.
LimitNPROC={{ es_max_threads }}
# Disable timeout logic and wait until process is stopped
TimeoutStopSec=0
# SIGTERM signal is used to stop the Java process
KillSignal=SIGTERM
# Send the signal only to the JVM rather than its control group
KillMode=process
# Java process is never killed
SendSIGKILL=no
# When a JVM receives a SIGTERM signal it exits with code 143
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target

View file

@ -1,139 +0,0 @@
require 'spec_helper'
require 'json'
vars = JSON.parse(File.read('/tmp/vars.json'))
shared_examples 'multi::init' do |vars|
describe service('master_elasticsearch') do
it { should be_running }
end
#test configuration parameters have been set - test all appropriately set in config file
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/elasticsearch.yml") do
it { should be_file }
it { should contain 'http.port: 9201' }
it { should contain 'transport.tcp.port: 9301' }
it { should contain 'node.data: true' }
it { should contain 'node.master: false' }
it { should contain "node.name: localhost-#{vars['es_instance_name']}" }
it { should_not contain 'bootstrap.memory_lock: true' }
if vars['es_major_version'] == '6.x'
it { should_not contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" }
else
it { should contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" }
end
it { should contain "path.data: /opt/elasticsearch/data-1/localhost-#{vars['es_instance_name']},/opt/elasticsearch/data-2/localhost-#{vars['es_instance_name']}" }
it { should contain "path.logs: /var/log/elasticsearch/localhost-#{vars['es_instance_name']}" }
end
#test configuration parameters have been set for master - test all appropriately set in config file
describe file('/etc/elasticsearch/master/elasticsearch.yml') do
it { should be_file }
it { should contain 'http.port: 9200' }
it { should contain 'transport.tcp.port: 9300' }
it { should contain 'node.data: false' }
it { should contain 'node.master: true' }
it { should contain 'node.name: localhost-master' }
it { should contain 'bootstrap.memory_lock: true' }
if vars['es_major_version'] == '6.x'
it { should_not contain 'path.conf: /etc/elasticsearch/master' }
else
it { should contain 'path.conf: /etc/elasticsearch/master' }
end
it { should contain 'path.data: /opt/elasticsearch/master/localhost-master' }
it { should contain 'path.logs: /var/log/elasticsearch/localhost-master' }
end
describe 'Master listening' do
it 'listening in port 9200' do
expect(port 9200).to be_listening
end
end
#test we started on the correct port was used for master
describe 'master started' do
it 'master node should be running', :retry => 3, :retry_wait => 10 do
expect(curl_json('http://localhost:9200')['name']).to eq('localhost-master')
end
end
#test we started on the correct port was used for node 1
describe "#{vars['es_instance_name']} started" do
it 'node should be running', :retry => 3, :retry_wait => 10 do
expect(curl_json('http://localhost:9201')['name']).to eq("localhost-#{vars['es_instance_name']}")
end
end
#Confirm scripts are on both nodes
describe file('/etc/elasticsearch/master/scripts') do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe file('/etc/elasticsearch/master/scripts/calculate-score.groovy') do
it { should be_file }
it { should be_owned_by 'elasticsearch' }
end
#Confirm that the data directory has only been set for the first node
describe file('/opt/elasticsearch/master/localhost-master') do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe file("/opt/elasticsearch/data-1/localhost-#{vars['es_instance_name']}") do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe file("/opt/elasticsearch/data-2/localhost-#{vars['es_instance_name']}") do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
#test to make sure mlock was applied
describe command('curl -s "localhost:9200/_nodes/localhost-master/process?pretty=true" | grep mlockall') do
its(:stdout) { should match /true/ }
its(:exit_status) { should eq 0 }
end
#test to make sure mlock was not applied
describe command("curl -s 'localhost:9201/_nodes/localhost-#{vars['es_instance_name']}/process?pretty=true' | grep mlockall") do
its(:stdout) { should match /false/ }
its(:exit_status) { should eq 0 }
end
describe 'version check on master' do
it 'should be reported as version '+vars['es_version'] do
command = command('curl -s localhost:9200 | grep number')
expect(command.stdout).to match(vars['es_version'])
expect(command.exit_status).to eq(0)
end
end
describe 'version check on data' do
it 'should be reported as version '+vars['es_version'] do
command = command('curl -s localhost:9201 | grep number')
expect(command.stdout).to match(vars['es_version'])
expect(command.exit_status).to eq(0)
end
end
for plugin in vars['es_plugins']
plugin = plugin['plugin']
describe command('curl -s localhost:9200/_nodes/plugins?pretty=true | grep '+plugin) do
its(:exit_status) { should eq 0 }
end
describe command('curl -s localhost:9201/_nodes/plugins?pretty=true | grep '+plugin) do
its(:exit_status) { should eq 0 }
end
describe file('/usr/share/elasticsearch/plugins/'+plugin) do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
end
end

View file

@ -1,13 +1,13 @@
require 'spec_helper' require 'spec_helper'
shared_examples 'oss::init' do |vars| shared_examples 'oss::init' do |vars|
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/log4j2.properties") do describe file("/etc/elasticsearch/log4j2.properties") do
it { should be_file } it { should be_file }
it { should be_owned_by 'elasticsearch' } it { should be_owned_by 'root' }
it { should_not contain 'CUSTOM LOG4J FILE' } it { should_not contain 'CUSTOM LOG4J FILE' }
end end
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/jvm.options") do describe file("/etc/elasticsearch/jvm.options") do
it { should be_file } it { should be_file }
it { should be_owned_by vars['es_user'] } it { should be_owned_by 'root' }
end end
end end

View file

@ -21,6 +21,22 @@ es_api_url = "http://localhost:#{vars['es_api_port']}"
username = vars['es_api_basic_auth_username'] username = vars['es_api_basic_auth_username']
password = vars['es_api_basic_auth_password'] password = vars['es_api_basic_auth_password']
# Sample of default features status
features = {
'monitoring' => {
'enabled' => 'true',
'available' => 'true'
},
'ml' => {
'enabled' => 'true',
'available' => 'false'
},
'sql' => {
'enabled' => 'true',
'available' => 'true'
}
}
shared_examples 'shared::init' do |vars| shared_examples 'shared::init' do |vars|
describe 'version check' do describe 'version check' do
it 'should be reported as version '+vars['es_version'] do it 'should be reported as version '+vars['es_version'] do
@ -35,37 +51,33 @@ shared_examples 'shared::init' do |vars|
it 'xpack should be activated' do it 'xpack should be activated' do
expect(curl_json("#{es_api_url}/_license", username=username, password=password)['license']['status']).to eq('active') expect(curl_json("#{es_api_url}/_license", username=username, password=password)['license']['status']).to eq('active')
end end
features = curl_json("#{es_api_url}/_xpack", username=username, password=password) if vars.key?('es_xpack_features')
curl_json("#{es_api_url}/_xpack", username=username, password=password)['features'].each do |feature,values| curl_json("#{es_api_url}/_xpack", username=username, password=password)['features'].each do |feature,values|
enabled = vars['es_xpack_features'].include? feature enabled = vars['es_xpack_features'].include? feature
status = if enabled then 'enabled' else 'disabled' end status = if enabled then 'enabled' else 'disabled' end
it "the xpack feature '#{feature}' to be #{status}" do it "the xpack feature '#{feature}' to be #{status}" do
expect(values['enabled'] = enabled) expect(values['enabled'] = enabled)
end
end end
end else
# X-Pack is no longer installed as a plugin in elasticsearch features.each do |feature, status|
if vars['es_major_version'] == '5.x' feature_available = curl_json("#{es_api_url}/_xpack", username=username, password=password)['features'][feature]['available']
describe file('/usr/share/elasticsearch/plugins/x-pack') do if feature_available == "true"
it { should be_directory } status = "available"
it { should be_owned_by vars['es_user'] } else
end status = "unavailable"
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/x-pack") do end
it { should be_directory } it "the xpack feature '#{feature}' to be #{status}" do
it { should be_owned_by vars['es_user'] } expect(feature_available = status['available'])
end end
describe 'x-pack-core plugin' do feature_enabled = curl_json("#{es_api_url}/_xpack", username=username, password=password)['features'][feature]['enabled']
it 'should be installed with the correct version' do if feature_enabled == "true"
plugins = curl_json("#{es_api_url}/_nodes/plugins", username=username, password=password) status = "enabled"
node, data = plugins['nodes'].first else
version = 'plugin not found' status = "disabled"
name = 'x-pack' end
it "the xpack feature '#{feature}' to be #{status}" do
data['plugins'].each do |plugin| expect(feature_available = status['enabled'])
if plugin['name'] == name
version = plugin['version']
end
end
expect(version).to eql(vars['es_version'])
end end
end end
end end
@ -85,7 +97,7 @@ shared_examples 'shared::init' do |vars|
it { should be_installed } it { should be_installed }
end end
describe service("#{vars['es_instance_name']}_elasticsearch") do describe service("elasticsearch") do
it { should be_running } it { should be_running }
end end
@ -96,11 +108,11 @@ shared_examples 'shared::init' do |vars|
if vars['es_templates'] if vars['es_templates']
describe file('/etc/elasticsearch/templates') do describe file('/etc/elasticsearch/templates') do
it { should be_directory } it { should be_directory }
it { should be_owned_by vars['es_user'] } it { should be_owned_by 'root' }
end end
describe file('/etc/elasticsearch/templates/basic.json') do describe file('/etc/elasticsearch/templates/basic.json') do
it { should be_file } it { should be_file }
it { should be_owned_by vars['es_user'] } it { should be_owned_by 'root' }
end end
#This is possibly subject to format changes in the response across versions so may fail in the future #This is possibly subject to format changes in the response across versions so may fail in the future
describe 'Template Contents Correct' do describe 'Template Contents Correct' do
@ -108,42 +120,25 @@ shared_examples 'shared::init' do |vars|
template = curl_json("#{es_api_url}/_template/basic", username=username, password=password) template = curl_json("#{es_api_url}/_template/basic", username=username, password=password)
expect(template.key?('basic')) expect(template.key?('basic'))
expect(template['basic']['settings']['index']['number_of_shards']).to eq("1") expect(template['basic']['settings']['index']['number_of_shards']).to eq("1")
expect(template['basic']['mappings']['type1']['_source']['enabled']).to eq(false) if vars['es_major_version'] == '7.x'
expect(template['basic']['mappings']['_source']['enabled']).to eq(false)
else
expect(template['basic']['mappings']['type1']['_source']['enabled']).to eq(false)
end
end end
end end
end end
if vars['es_scripts']
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/scripts") do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/scripts/calculate-score.groovy") do
it { should be_file }
it { should be_owned_by 'elasticsearch' }
end
end
describe file('/etc/init.d/elasticsearch') do
it { should_not exist }
end
describe file(family['defaults_path']) do describe file(family['defaults_path']) do
its(:content) { should match '' } its(:content) { should match '' }
end end
describe file('/etc/elasticsearch/elasticsearch.yml') do
it { should_not exist }
end
describe file('/etc/elasticsearch/logging.yml') do
it { should_not exist }
end
if vars.key?('es_plugins') if vars.key?('es_plugins')
vars['es_plugins'].each do |plugin| vars['es_plugins'].each do |plugin|
name = plugin['plugin'] name = plugin['plugin']
describe file('/usr/share/elasticsearch/plugins/'+name) do describe file('/usr/share/elasticsearch/plugins/'+name) do
it { should be_directory } it { should be_directory }
it { should be_owned_by vars['es_user'] } it { should be_owned_by 'root' }
end end
it 'should be installed and the right version' do it 'should be installed and the right version' do
plugins = curl_json("#{es_api_url}/_nodes/plugins", username=username, password=password) plugins = curl_json("#{es_api_url}/_nodes/plugins", username=username, password=password)
@ -156,15 +151,37 @@ shared_examples 'shared::init' do |vars|
end end
end end
end end
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/elasticsearch.yml") do describe file("/etc/elasticsearch/elasticsearch.yml") do
it { should contain "node.name: localhost-#{vars['es_instance_name']}" } it { should be_owned_by 'root' }
it { should contain "node.name: localhost" }
it { should contain 'cluster.name: elasticsearch' } it { should contain 'cluster.name: elasticsearch' }
if vars['es_major_version'] == '6.x' it { should_not contain "path.conf: /etc/elasticsearch" }
it { should_not contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" } its(:content) { should match "path.data: #{vars['es_data_dirs'].join(',')}" }
else its(:content) { should match "path.logs: /var/log/elasticsearch" }
it { should contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" } end
if vars['es_use_repository']
if vars['ansible_os_family'] == 'RedHat'
describe file("/etc/yum.repos.d/elasticsearch-#{vars['es_repo_name']}.repo") do
it { should exist }
end
describe yumrepo("elasticsearch-#{vars['es_repo_name']}") do
it { should exist }
it { should be_enabled }
end
describe file("/etc/yum.repos.d/elasticsearch-#{vars['es_other_repo_name']}.repo") do
it { should_not exist }
end
describe yumrepo("elasticsearch-#{vars['es_other_repo_name']}") do
it { should_not exist }
it { should_not be_enabled }
end
end
if vars['ansible_os_family'] == 'Debian'
describe command('apt-cache policy') do
its(:stdout) { should match /elastic.co.*\/#{Regexp.quote(vars['es_repo_name'])}\//}
its(:stdout) { should_not match /elastic.co.*\/#{Regexp.quote(vars['es_other_repo_name'])}\//}
end
end end
its(:content) { should match "path.data: #{vars['data_dirs'].join(',')}" }
its(:content) { should match "path.logs: /var/log/elasticsearch/localhost-#{vars['es_instance_name']}" }
end end
end end

View file

@ -1,17 +0,0 @@
require 'spec_helper'
shared_examples 'xpack::init' do |vars|
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/elasticsearch.yml") do
it { should contain "node.name: localhost-#{vars['es_instance_name']}" }
it { should contain 'cluster.name: elasticsearch' }
if vars['es_major_version'] == '6.x'
it { should_not contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" }
else
it { should contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" }
end
it { should contain "path.data: /var/lib/elasticsearch/localhost-#{vars['es_instance_name']}" }
it { should contain "path.logs: /var/log/elasticsearch/localhost-#{vars['es_instance_name']}" }
it { should contain 'xpack.security.enabled: false' }
it { should contain 'xpack.watcher.enabled: false' }
end
end

View file

@ -3,27 +3,15 @@ require 'json'
vars = JSON.parse(File.read('/tmp/vars.json')) vars = JSON.parse(File.read('/tmp/vars.json'))
shared_examples 'xpack_upgrade::init' do |vars| shared_examples 'xpack_upgrade::init' do |vars|
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/elasticsearch.yml") do
it { should contain "node.name: localhost-#{vars['es_instance_name']}" }
it { should contain 'cluster.name: elasticsearch' }
if vars['es_major_version'] == '6.x'
it { should_not contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" }
else
it { should contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" }
end
it { should contain "path.data: /var/lib/elasticsearch/localhost-#{vars['es_instance_name']}" }
it { should contain "path.logs: /var/log/elasticsearch/localhost-#{vars['es_instance_name']}" }
end
#Test users file, users_roles and roles.yml #Test users file, users_roles and roles.yml
describe file("/etc/elasticsearch/#{vars['es_instance_name']}#{vars['es_xpack_conf_subdir']}/users_roles") do describe file("/etc/elasticsearch/users_roles") do
it { should be_owned_by 'elasticsearch' } it { should be_owned_by 'root' }
it { should contain 'admin:es_admin' } it { should contain 'admin:es_admin' }
it { should contain 'power_user:testUser' } it { should contain 'power_user:testUser' }
end end
describe file("/etc/elasticsearch/#{vars['es_instance_name']}#{vars['es_xpack_conf_subdir']}/users") do describe file("/etc/elasticsearch/users") do
it { should be_owned_by 'elasticsearch' } it { should be_owned_by 'root' }
it { should contain 'testUser:' } it { should contain 'testUser:' }
it { should contain 'es_admin:' } it { should contain 'es_admin:' }
end end
@ -35,16 +23,21 @@ shared_examples 'xpack_upgrade::init' do |vars|
end end
end end
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/elasticsearch.yml") do describe file("/etc/elasticsearch/elasticsearch.yml") do
it { should contain 'security.authc.realms.file1.order: 0' } if vars['es_major_version'] == '7.x'
it { should contain 'security.authc.realms.file1.type: file' } it { should contain 'security.authc.realms.file.file1.order: 0' }
it { should contain 'security.authc.realms.native1.order: 1' } it { should contain 'security.authc.realms.native.native1.order: 1' }
it { should contain 'security.authc.realms.native1.type: native' } else
it { should contain 'security.authc.realms.file1.order: 0' }
it { should contain 'security.authc.realms.file1.type: file' }
it { should contain 'security.authc.realms.native1.order: 1' }
it { should contain 'security.authc.realms.native1.type: native' }
end
end end
#Test contents of role_mapping.yml #Test contents of role_mapping.yml
describe file("/etc/elasticsearch/#{vars['es_instance_name']}#{vars['es_xpack_conf_subdir']}/role_mapping.yml") do describe file("/etc/elasticsearch/role_mapping.yml") do
it { should be_owned_by 'elasticsearch' } it { should be_owned_by 'root' }
it { should contain 'power_user:' } it { should contain 'power_user:' }
it { should contain '- cn=admins,dc=example,dc=com' } it { should contain '- cn=admins,dc=example,dc=com' }
it { should contain 'user:' } it { should contain 'user:' }
@ -91,13 +84,4 @@ shared_examples 'xpack_upgrade::init' do |vars|
expect(command.exit_status).to eq(0) expect(command.exit_status).to eq(0)
end end
end end
if vars['es_major_version'] == '5.x' # kibana default password has been removed in 6.x
describe 'kibana access check' do
it 'should be reported as version '+vars['es_version'] do
result = curl_json('http://localhost:9200/', username='kibana', password='changeme')
expect(result['version']['number']).to eq(vars['es_version'])
end
end
end
end end

View file

@ -10,18 +10,13 @@
roles: roles:
- elasticsearch - elasticsearch
vars: vars:
es_instance_name: "security_node"
es_xpack_license: "{{ lookup('file', '/tmp/license.json') }}" es_xpack_license: "{{ lookup('file', '/tmp/license.json') }}"
es_config: es_config:
xpack.security.enabled: True xpack.security.authc.realms.file.file1.order: 1
xpack.security.authc.realms.file1.type: "file" xpack.security.authc.realms.native.native1.type: "native"
xpack.security.authc.realms.file1.order: 1
xpack.security.authc.realms.native1.type: "native"
xpack.security.authc.realms.native1.order: 0
es_heap_size: "1g" es_heap_size: "1g"
es_enable_xpack: true
es_plugins: es_plugins:
- plugin: ingest-geoip - plugin: ingest-attachment
es_xpack_features: es_xpack_features:
- security - security
- alerting - alerting

View file

@ -1,51 +0,0 @@
---
# Test ability to deploy multiple instances to a machine
- name: Elasticsearch Multi test - master on 9200
hosts: localhost
post_tasks:
- include: elasticsearch/test/integration/debug.yml
roles:
- elasticsearch
vars:
es_instance_name: "master"
es_data_dirs:
- "/opt/elasticsearch/master"
es_config:
discovery.zen.ping.unicast.hosts: "localhost:9300"
http.port: 9200
transport.tcp.port: 9300
node.data: false
node.master: true
bootstrap.memory_lock: true
es_enable_xpack: false
es_scripts: true
es_templates: true
es_heap_size: "1g"
es_api_port: 9200
es_plugins:
- plugin: ingest-geoip
- name: Elasticsearch Multi test - data on 9201
hosts: localhost
post_tasks:
- include: elasticsearch/test/integration/debug.yml
roles:
- elasticsearch
vars:
es_enable_xpack: false
es_scripts: true
es_templates: true
es_heap_size: "1g"
es_api_port: 9201
es_plugins:
- plugin: ingest-geoip
es_instance_name: "node1"
es_data_dirs:
- "/opt/elasticsearch/data-1"
- "/opt/elasticsearch/data-2"
es_config:
discovery.zen.ping.unicast.hosts: "localhost:9300"
http.port: 9201
transport.tcp.port: 9301
node.data: true
node.master: false

View file

@ -1,2 +0,0 @@
---
- host: test-kitchen

View file

@ -1,9 +0,0 @@
require 'multi_spec'
require 'shared_spec'
require 'json'
vars = JSON.parse(File.read('/tmp/vars.json'))
describe 'Multi Tests' do
include_examples 'shared::init', vars
include_examples 'multi::init', vars
end

View file

@ -6,8 +6,7 @@
roles: roles:
- elasticsearch - elasticsearch
vars: vars:
es_instance_name: "node1" es_version: "{{ '7.0.0' if es_major_version == '7.x' else '6.7.1' }}" # This is set to an older version than the current default to force an upgrade
es_version: "{{ '6.2.4' if es_major_version == '6.x' else '5.6.9' }}" # This is set to an older version than the current default to force an upgrade
es_enable_xpack: false es_enable_xpack: false
es_heap_size: "1g" es_heap_size: "1g"
@ -18,10 +17,9 @@
roles: roles:
- elasticsearch - elasticsearch
vars: vars:
es_instance_name: "node1"
es_enable_xpack: true es_enable_xpack: true
es_api_basic_auth_username: elastic
es_api_basic_auth_password: changeme
es_heap_size: "1g" es_heap_size: "1g"
es_xpack_features: es_xpack_features:
- security - monitoring
- graph
- ml

View file

@ -6,8 +6,7 @@
roles: roles:
- elasticsearch - elasticsearch
vars: vars:
es_instance_name: "node1" es_version: "{{ '7.0.0' if es_major_version == '7.x' else '6.7.1' }}" # This is set to an older version than the current default to force an upgrade
es_version: "{{ '6.2.4' if es_major_version == '6.x' else '5.6.9' }}" # This is set to an older version than the current default to force an upgrade
es_enable_xpack: false es_enable_xpack: false
es_heap_size: "1g" es_heap_size: "1g"
@ -18,6 +17,5 @@
roles: roles:
- elasticsearch - elasticsearch
vars: vars:
es_instance_name: "node1"
es_enable_xpack: false es_enable_xpack: false
es_heap_size: "1g" es_heap_size: "1g"

View file

@ -6,10 +6,9 @@
roles: roles:
- elasticsearch - elasticsearch
vars: vars:
es_instance_name: "node1"
es_enable_xpack: false es_enable_xpack: false
es_heap_size: "1g" es_heap_size: "1g"
es_plugins: es_plugins:
- plugin: ingest-geoip - plugin: ingest-attachment
#Do not add tests here. This test is run twice and confirms idempotency. #Do not add tests here. This test is run twice and confirms idempotency.

View file

@ -6,29 +6,27 @@
roles: roles:
- elasticsearch - elasticsearch
vars: vars:
es_instance_name: "node1" es_config_6x:
es_api_port: 9200
es_config:
http.port: 9200
transport.tcp.port: 9300
discovery.zen.ping.unicast.hosts: "localhost:9300"
xpack.security.authc.realms.file1.type: "file"
xpack.security.authc.realms.file1.order: 0 xpack.security.authc.realms.file1.order: 0
xpack.security.authc.realms.native1.type: "native" xpack.security.authc.realms.file1.type: file
xpack.security.authc.realms.native1.order: 1 xpack.security.authc.realms.native1.order: 1
xpack.security.authc.realms.native1.type: native
es_config_7x:
xpack.security.authc.realms.file.file1.order: 0
xpack.security.authc.realms.native.native1.order: 1
es_config: "{{ es_config_7x if es_major_version == '7.x' else es_config_6x }}"
es_heap_size: "1g" es_heap_size: "1g"
es_templates: true es_templates: true
es_version: "{{ '6.2.4' if es_major_version == '6.x' else '5.6.9' }}" # This is set to an older version than the current default to force an upgrade es_major_version: "7.x"
es_enable_xpack: true es_version: "{{ '7.0.0' if es_major_version == '7.x' else '6.7.1' }}" # This is set to an older version than the current default to force an upgrade
es_xpack_license: "{{ lookup('file', '/tmp/license.json') }}" es_xpack_license: "{{ lookup('file', '/tmp/license.json') }}"
es_plugins: es_plugins:
- plugin: ingest-geoip - plugin: ingest-attachment
es_xpack_features: es_xpack_features:
- security - security
- alerting - alerting
es_api_basic_auth_username: elastic es_api_basic_auth_username: elastic
es_api_basic_auth_password: changeme es_api_basic_auth_password: changeme
es_message_auth_file: system_key
es_role_mapping: es_role_mapping:
power_user: power_user:
- "cn=admins,dc=example,dc=com" - "cn=admins,dc=example,dc=com"
@ -116,20 +114,17 @@
roles: roles:
- elasticsearch - elasticsearch
vars: vars:
es_api_port: 9200 es_config_6x:
es_instance_name: "node1"
es_config:
http.port: 9200
transport.tcp.port: 9300
discovery.zen.ping.unicast.hosts: "localhost:9300"
xpack.security.enabled: True
xpack.security.authc.realms.file1.type: "file"
xpack.security.authc.realms.file1.order: 0 xpack.security.authc.realms.file1.order: 0
xpack.security.authc.realms.native1.type: "native" xpack.security.authc.realms.file1.type: file
xpack.security.authc.realms.native1.order: 1 xpack.security.authc.realms.native1.order: 1
xpack.security.authc.realms.native1.type: native
es_config_7x:
xpack.security.authc.realms.file.file1.order: 0
xpack.security.authc.realms.native.native1.order: 1
es_config: "{{ es_config_7x if es_major_version == '7.x' else es_config_6x }}"
es_heap_size: "1g" es_heap_size: "1g"
es_templates: true es_templates: true
es_enable_xpack: true
es_xpack_license: "{{ lookup('file', '/tmp/license.json') }}" es_xpack_license: "{{ lookup('file', '/tmp/license.json') }}"
es_plugins: es_plugins:
- plugin: ingest-attachment - plugin: ingest-attachment

View file

@ -7,16 +7,4 @@
roles: roles:
- elasticsearch - elasticsearch
vars: vars:
es_api_port: 9200
es_instance_name: "node1"
es_config:
http.port: 9200
transport.tcp.port: 9300
discovery.zen.ping.unicast.hosts: "localhost:9300"
es_xpack_custom_url: "https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-{{ es_version }}.zip"
es_heap_size: 2g es_heap_size: 2g
es_enable_xpack: true
es_xpack_features:
- monitoring
- graph
- ml

View file

@ -1,9 +1,7 @@
require 'xpack_spec'
require 'shared_spec' require 'shared_spec'
require 'json' require 'json'
vars = JSON.parse(File.read('/tmp/vars.json')) vars = JSON.parse(File.read('/tmp/vars.json'))
describe 'Xpack upgrade Tests' do describe 'Xpack upgrade Tests' do
include_examples 'shared::init', vars include_examples 'shared::init', vars
include_examples 'xpack::init', vars
end end

View file

@ -1,6 +1,6 @@
VERSION: VERSION:
- 7.x
- 6.x - 6.x
- 5.x
OS: OS:
- ubuntu-1404 - ubuntu-1404
- ubuntu-1604 - ubuntu-1604
@ -14,4 +14,3 @@ TEST_TYPE:
- oss-to-xpack-upgrade - oss-to-xpack-upgrade
- xpack - xpack
- xpack-upgrade - xpack-upgrade
- multi

View file

@ -1,8 +1,4 @@
--- ---
es_package_url: "https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch" es_package_url: "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch"
es_conf_dir: "/etc/elasticsearch"
sysd_script: "/usr/lib/systemd/system/elasticsearch.service"
init_script: "/etc/init.d/elasticsearch"
#add supported features here
supported_xpack_features: ["alerting","monitoring","graph","security"]
reserved_xpack_users: ["elastic","kibana","logstash_system"] reserved_xpack_users: ["elastic","kibana","logstash_system"]
sysd_config_file: "/etc/systemd/system/elasticsearch.service.d/override.conf"