This commit is contained in:
Dimitrios Liappis 2016-07-25 15:32:36 +03:00
commit 74cf95caa3
39 changed files with 796 additions and 201 deletions

View file

@ -8,6 +8,7 @@ provisioner:
roles_path: ../
require_ansible_repo: true
ansible_verbose: true
ansible_version: 2.0.2
http_proxy: <%= ENV['HTTP_PROXY'] %>
https_proxy: <%= ENV['HTTPS_PROXY'] %>
no_proxy: localhost,127.0.0.1
@ -19,7 +20,7 @@ platforms:
privileged: true
provision_command:
- apt-get update && apt-get install -y software-properties-common && add-apt-repository -y ppa:ansible/ansible
- apt-get update && apt-get -y -q install ansible python-apt python-pycurl
- apt-get update && apt-get -y -q install python-apt python-pycurl
use_sudo: false
- name: debian-7
driver_config:
@ -27,7 +28,6 @@ platforms:
privileged: true
provision_command:
- apt-get update && apt-get -y install python python-dev python-pip build-essential libyaml-dev python-yaml
- pip install ansible
- apt-get install -y -q net-tools
use_sudo: false
- name: debian-8
@ -36,7 +36,6 @@ platforms:
privileged: true
provision_command:
- apt-get update && apt-get -y install python python-dev python-pip build-essential libyaml-dev python-yaml curl wget
- pip install ansible
- apt-get install -y -q net-tools
- sed -ri 's/^#?PermitRootLogin .*/PermitRootLogin yes/' /etc/ssh/sshd_config
- sed -ri 's/^#?PasswordAuthentication .*/PasswordAuthentication yes/' /etc/ssh/sshd_config
@ -75,8 +74,6 @@ suites:
es_plugins:
- plugin: lmenezes/elasticsearch-kopf
version: master
- plugin: license
- plugin: marvel-agent
provisioner:
playbook: test/integration/package.yml
- name: config-2x
@ -91,8 +88,6 @@ suites:
es_plugins:
- plugin: lmenezes/elasticsearch-kopf
version: master
- plugin: license
- plugin: marvel-agent
provisioner:
playbook: test/integration/multi.yml
- name: standard-1x
@ -137,3 +132,8 @@ suites:
version: latest
provisioner:
playbook: test/integration/multi.yml
- name: xpack-2x
run_list:
attributes:
provisioner:
playbook: test/integration/xpack.yml

View file

@ -1,6 +1,6 @@
source 'https://rubygems.org'
gem 'test-kitchen', '1.4.2'
gem "kitchen-docker", '2.1.0'
gem 'kitchen-ansible', '0.40.1'
gem 'net-ssh', '~> 2.0'
gem 'test-kitchen', '1.8.0'
gem "kitchen-docker", '2.5.0'
gem 'kitchen-ansible', '0.44.6'
gem 'net-ssh', '~> 3.0'

View file

@ -1,30 +1,27 @@
GEM
remote: https://rubygems.org/
specs:
faraday (0.9.2)
multipart-post (>= 1.2, < 3)
highline (1.7.8)
kitchen-ansible (0.40.1)
librarian-ansible
artifactory (2.3.3)
kitchen-ansible (0.44.6)
net-ssh (~> 3.0)
test-kitchen (~> 1.4)
kitchen-docker (2.1.0)
kitchen-docker (2.5.0)
test-kitchen (>= 1.0.0)
librarian (0.1.2)
highline
thor (~> 0.15)
librarian-ansible (3.0.0)
faraday
librarian (~> 0.1.0)
mixlib-install (1.1.0)
artifactory
mixlib-shellout
mixlib-versioning
mixlib-shellout (2.2.6)
multipart-post (2.0.0)
mixlib-versioning (1.1.0)
net-scp (1.2.1)
net-ssh (>= 2.6.5)
net-ssh (2.9.4)
net-ssh (3.2.0)
safe_yaml (1.0.4)
test-kitchen (1.4.2)
test-kitchen (1.8.0)
mixlib-install (~> 1.0, >= 1.0.4)
mixlib-shellout (>= 1.2, < 3.0)
net-scp (~> 1.1)
net-ssh (~> 2.7, < 2.10)
net-ssh (>= 2.9, < 4.0)
safe_yaml (~> 1.0)
thor (~> 0.18)
thor (0.19.1)
@ -33,10 +30,10 @@ PLATFORMS
ruby
DEPENDENCIES
kitchen-ansible (= 0.40.1)
kitchen-docker (= 2.1.0)
net-ssh (~> 2.0)
test-kitchen (= 1.4.2)
kitchen-ansible (= 0.44.6)
kitchen-docker (= 2.5.0)
net-ssh (~> 3.0)
test-kitchen (= 1.8.0)
BUNDLED WITH
1.11.2

View file

@ -251,7 +251,6 @@ controlled by the following parameters:
* ```es_data_dirs``` - defaults to "/var/lib/elasticsearch". This can be a list or comma separated string e.g. ["/opt/elasticsearch/data-1","/opt/elasticsearch/data-2"] or "/opt/elasticsearch/data-1,/opt/elasticsearch/data-2"
* ```es_log_dir``` - defaults to "/var/log/elasticsearch".
* ```es_work_dir``` - defaults to "/tmp/elasticsearch".
* ```es_plugin_dir``` - defaults to "/usr/share/elasticsearch/plugins".
* ```es_restart_on_change``` - defaults to true. If false, changes will not result in Elasticsearch being restarted.
* ```es_plugins_reinstall``` - defaults to false. If true, all currently installed plugins will be removed from a node. Listed plugins will then be re-installed.

View file

@ -1,6 +1,6 @@
---
es_major_version: "2.x"
es_version: "2.2.0"
es_version: "2.3.4"
es_version_lock: false
es_use_repository: true
es_start_service: true
@ -13,13 +13,16 @@ es_templates: false
es_user: elasticsearch
es_group: elasticsearch
es_config: {}
es_install_shield: false
#Need to provide default directories
es_pid_dir: "/var/run/elasticsearch"
es_data_dirs: "/var/lib/elasticsearch"
es_log_dir: "/var/log/elasticsearch"
es_work_dir: "/tmp/elasticsearch"
es_plugin_dir: "/usr/share/elasticsearch/plugins"
es_max_open_files: 65536
es_allow_downgrades: false
es_enable_xpack: false
es_xpack_features: []
#These are used for internal operations performed by ansible.
#They do not effect the current configuration
es_api_host: "localhost"
es_api_port: 9200

View file

@ -19,8 +19,18 @@ def append_to_list(values=[], suffix=''):
def array_to_str(values=[],separator=','):
return separator.join(values)
def extract_role_users(users={}):
role_users=[]
for user,details in users.iteritems():
if "roles" in details:
for role in details["roles"]:
role_users.append(role+":"+user)
return role_users
class FilterModule(object):
def filters(self):
return {'modify_list': modify_list,
'append_to_list':append_to_list,
'array_to_str':array_to_str}
'array_to_str':array_to_str,
'extract_role_users':extract_role_users}

View file

@ -0,0 +1,34 @@
---
- name: Ensure elasticsearch is started
service: name={{instance_init_script | basename}} state=started enabled=yes
- name: Wait for elasticsearch to startup
wait_for: port={{es_api_port}} delay=10
- name: Get template files
shell: find . -maxdepth 1 -type f | sed "s#\./##" | sed "s/.json//" chdir=/etc/elasticsearch/templates
register: resultstemplate
- name: Install templates without auth
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_template/{{item}}
method: PUT
status_code: 200
body_format: json
body: "{{ lookup('file', '/etc/elasticsearch/templates/'+item+'.json') }}"
when: not es_enable_xpack or not es_xpack_features is defined or not '"shield" in es_xpack_features'
with_items: "{{ resultstemplate.stdout_lines }}"
- name: Install templates with auth
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_template/{{item}}
method: PUT
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
body_format: json
body: "{{ lookup('file', '/etc/elasticsearch/templates/'+item+'.json') }}"
when: es_enable_xpack and es_xpack_features is defined and '"shield" in es_xpack_features'
with_items: "{{ resultstemplate.stdout_lines }}"

View file

@ -1,4 +1,17 @@
- name: restart elasticsearch
service: name={{instance_init_script | basename}} state=restarted enabled=yes
when: es_restart_on_change and es_start_service and not elasticsearch_started.changed and ((plugin_installed is defined and plugin_installed.changed) or (elasticsearch_install_from_repo.changed or elasticsearch_install_from_package.changed))
when: es_restart_on_change and es_start_service and not elasticsearch_started.changed and ((plugin_installed is defined and plugin_installed.changed) or (xpack_state.changed) or (elasticsearch_install_from_repo.changed or elasticsearch_install_from_package.changed))
- name: load-native-realms
include: ./handlers/shield/elasticsearch-shield-native.yml
when: (es_users is defined and es_users.native is defined) or (es_roles is defined and es_roles.native is defined)
#Templates are a handler as they need to come after a restart e.g. suppose user removes shield on a running node and doesn't
#specify es_api_basic_auth_username and es_api_basic_auth_password. The templates will subsequently not be removed if we don't wait for the node to restart.
#Templates done after restart therefore - as a handler.
- name: load-templates
include: ./handlers/elasticsearch-templates.yml
when: es_templates

View file

@ -0,0 +1,121 @@
---
- name: Ensure elasticsearch is started
service: name={{instance_init_script | basename}} state=started enabled=yes
- name: Wait for elasticsearch to startup
wait_for: port={{es_api_port}} delay=10
- set_fact: manage_native_users=false
- set_fact: manage_native_users=true
when: es_users is defined and es_users.native is defined
- set_fact: manage_native_roles=false
- set_fact: manage_native_roles=true
when: es_roles is defined and es_roles.native is defined
#If the node has just has shield installed it maybe either stopped or started 1. if stopped, we need to start to load native realms 2. if started, we need to restart to load
#List current users
- name: List Native Users
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_shield/user
method: GET
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
status_code: 200
register: user_list_response
when: manage_native_users
- set_fact: current_users={{user_list_response.json.keys() | list}}
when: manage_native_users
#Identify non declared users
- set_fact: users_to_remove={{ current_users | difference ( es_users.native.keys() ) }}
when: manage_native_users
#Delete all non required users
- name: Delete Native Users
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_shield/user/{{item}}
method: DELETE
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
when: manage_native_users and users_to_remove | length > 0
with_items: "{{users_to_remove}}"
#Overwrite all other users
- name: Update Native Users
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_shield/user/{{item.key}}
method: POST
body_format: json
body: "{{item.value | to_json}}"
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
when: manage_native_users and es_users.native.keys() > 0
with_dict: "{{es_users.native}}"
#List current roles
- name: List Native Roles
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_shield/role
method: GET
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
status_code: 200
register: role_list_response
when: manage_native_roles
#Identify undeclared roles
- set_fact: current_roles={{role_list_response.json.keys() | list}}
when: manage_native_users
- debug: msg="{{current_roles}}"
- set_fact: roles_to_remove={{ current_roles | difference ( es_roles.native.keys() ) }}
when: manage_native_roles
#Delete all non required roles
- name: Delete Native Roles
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_shield/role/{{item}}
method: DELETE
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
when: manage_native_roles and roles_to_remove | length > 0
with_items: "{{roles_to_remove}}"
#Update other roles
- name: Update Native Roles
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_shield/role/{{item.key}}
method: POST
body_format: json
body: "{{item.value | to_json}}"
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
when: manage_native_roles and es_roles.native.keys() > 0
with_dict: "{{es_roles.native}}"

View file

@ -8,7 +8,7 @@ galaxy_info:
company: "Elastic.co"
license: "license (Apache)"
# Require 1.6 for apt deb install
min_ansible_version: 1.6
min_ansible_version: 2.0
platforms:
- name: EL
versions:

View file

@ -1,22 +0,0 @@
# Check for mandatory parameters
- fail: msg="es_instance_name must be specified and cannot be blank"
when: es_instance_name is not defined or es_instance_name == ''
- fail: msg="es_proxy_port must be specified and cannot be blank when es_proxy_host is defined"
when: (es_proxy_port is not defined or es_proxy_port == '') and (es_proxy_host is defined and es_proxy_host != '')
- set_fact: multi_cast={{ (es_version | version_compare('2.0', '<') and es_config['discovery.zen.ping.multicast.enabled'] is not defined) or (es_config['discovery.zen.ping.multicast.enabled'] is defined and es_config['discovery.zen.ping.multicast.enabled'])}}
- debug: msg="WARNING - It is recommended you specify the parameter 'http.port' when multicast is disabled"
when: not multi_cast and es_config['http.port'] is not defined
- debug: msg="WARNING - It is recommended you specify the parameter 'transport.tcp.port' when multicast is disabled"
when: not multi_cast and es_config['transport.tcp.port'] is not defined
- debug: msg="WARNING - It is recommended you specify the parameter 'discovery.zen.ping.unicast.hosts' when multicast is disabled"
when: not multi_cast and es_config['discovery.zen.ping.unicast.hosts'] is not defined
#If the user attempts to lock memory they must specify a heap size
- fail: msg="If locking memory with bootstrap.mlockall a heap size must be specified"
when: es_config['bootstrap.mlockall'] is defined and es_config['bootstrap.mlockall'] == True and es_heap_size is not defined

View file

@ -26,3 +26,7 @@
apt: deb=/tmp/elasticsearch-{{ es_version }}.deb
when: not es_use_repository
register: elasticsearch_install_from_package
# ansible uri module requires httplib2
- name: pip httplib2
pip: name=httplib2 extra_args="--user"

View file

@ -20,3 +20,7 @@
yum: name={% if es_custom_package_url is defined %}{{ es_custom_package_url }}{% else %}{{ es_package_url }}-{{ es_version }}.noarch.rpm{% endif %} state=present
when: not es_use_repository
register: elasticsearch_install_from_package
# ansible uri module requires httplib2
- name: pip httplib2
pip: name=httplib2 extra_args="--user"

View file

@ -1,42 +1,6 @@
---
# Configure Elasticsearch Node
#Use systemd for the following distributions:
#
#Ubuntu 15 and up
#Debian 8 and up
#Centos 7 and up
#Relies on elasticsearch distribution installing a serviced script to determine whether one should be copied.
- set_fact: use_system_d={{(ansible_distribution == 'Debian' and ansible_distribution_version | version_compare('8', '>=')) or (ansible_distribution == 'CentOS' and ansible_distribution_version | version_compare('7', '>=')) or (ansible_distribution == 'Ubuntu' and ansible_distribution_version | version_compare('15', '>=')) }}
tags:
- always
- set_fact: instance_sysd_script={{sysd_script | dirname }}/{{es_instance_name}}_{{sysd_script | basename}}
when: use_system_d
tags:
- always
#For directories we also use the {{inventory_hostname}}-{{ es_instance_name }} - this helps if we have a shared SAN.
- set_fact: instance_suffix={{inventory_hostname}}-{{ es_instance_name }}
tags:
- always
- set_fact: pid_dir={{ es_pid_dir }}/{{instance_suffix}}
tags:
- always
- set_fact: log_dir={{ es_log_dir }}/{{instance_suffix}}
tags:
- always
- set_fact: work_dir={{ es_work_dir }}/{{instance_suffix}}
tags:
- always
#Create required directories
- name: Create Directories
file: path={{ item }} state=directory owner={{ es_user }} group={{ es_group }}
@ -45,11 +9,6 @@
- "{{work_dir}}"
- "{{log_dir}}"
- "{{conf_dir}}"
- "{{plugin_dir}}"
- set_fact: data_dirs={{ es_data_dirs | append_to_list('/'+instance_suffix) }}
tags:
- always
- name: Create Data Directories
file: path={{ item }} state=directory owner={{ es_user }} group={{ es_group }}
@ -112,4 +71,3 @@
- name: Delete Default Logging File
file: dest=/etc/elasticsearch/logging.yml state=absent
- debug: msg="Data Dirs {{data_dirs}}"

View file

@ -0,0 +1,54 @@
# Check for mandatory parameters
- fail: msg="es_instance_name must be specified and cannot be blank"
when: es_instance_name is not defined or es_instance_name == ''
- fail: msg="es_proxy_port must be specified and cannot be blank when es_proxy_host is defined"
when: (es_proxy_port is not defined or es_proxy_port == '') and (es_proxy_host is defined and es_proxy_host != '')
- set_fact: multi_cast={{ (es_version | version_compare('2.0', '<') and es_config['discovery.zen.ping.multicast.enabled'] is not defined) or (es_config['discovery.zen.ping.multicast.enabled'] is defined and es_config['discovery.zen.ping.multicast.enabled'])}}
- debug: msg="WARNING - It is recommended you specify the parameter 'http.port' when multicast is disabled"
when: not multi_cast and es_config['http.port'] is not defined
- debug: msg="WARNING - It is recommended you specify the parameter 'transport.tcp.port' when multicast is disabled"
when: not multi_cast and es_config['transport.tcp.port'] is not defined
- debug: msg="WARNING - It is recommended you specify the parameter 'discovery.zen.ping.unicast.hosts' when multicast is disabled"
when: not multi_cast and es_config['discovery.zen.ping.unicast.hosts'] is not defined
#If the user attempts to lock memory they must specify a heap size
- fail: msg="If locking memory with bootstrap.mlockall a heap size must be specified"
when: es_config['bootstrap.mlockall'] is defined and es_config['bootstrap.mlockall'] == True and es_heap_size is not defined
#Don't support xpack on versions < 2.0
- fail: msg="Use of the xpack notation is not supported on versions < 2.0. Marvel-agent and watcher can be installed as plugins. Version > 2.0 is required for shield."
when: es_enable_xpack and es_version | version_compare('2.0', '<')
#Check if working with shield we have an es_api_basic_auth_username and es_api_basic_auth_username - otherwise any http calls wont work
- fail: msg="Enabling shield requires an es_api_basic_auth_username and es_api_basic_auth_password to be provided to allow cluster operations"
when: es_enable_xpack and '"shield" in es_xpack_features' and es_api_basic_auth_username is not defined and es_api_basic_auth_password is not defined
- set_fact: instance_default_file={{default_file | dirname}}/{{es_instance_name}}_{{default_file | basename}}
- set_fact: instance_init_script={{init_script | dirname }}/{{es_instance_name}}_{{init_script | basename}}
- set_fact: conf_dir={{ es_conf_dir }}/{{es_instance_name}}
- set_fact: m_lock_enabled={{ es_config['bootstrap.mlockall'] is defined and es_config['bootstrap.mlockall'] == True }}
#Use systemd for the following distributions:
#Ubuntu 15 and up
#Debian 8 and up
#Centos 7 and up
#Relies on elasticsearch distribution installing a serviced script to determine whether one should be copied.
- set_fact: use_system_d={{(ansible_distribution == 'Debian' and ansible_distribution_version | version_compare('8', '>=')) or (ansible_distribution == 'CentOS' and ansible_distribution_version | version_compare('7', '>=')) or (ansible_distribution == 'Ubuntu' and ansible_distribution_version | version_compare('15', '>=')) }}
- set_fact: instance_sysd_script={{sysd_script | dirname }}/{{es_instance_name}}_{{sysd_script | basename}}
when: use_system_d
#For directories we also use the {{inventory_hostname}}-{{ es_instance_name }} - this helps if we have a shared SAN.
- set_fact: instance_suffix={{inventory_hostname}}-{{ es_instance_name }}
- set_fact: pid_dir={{ es_pid_dir }}/{{instance_suffix}}
- set_fact: log_dir={{ es_log_dir }}/{{instance_suffix}}
- set_fact: work_dir={{ es_work_dir }}/{{instance_suffix}}
- set_fact: data_dirs={{ es_data_dirs | append_to_list('/'+instance_suffix) }}

View file

@ -2,26 +2,24 @@
# es_plugins_reinstall will be set to true if elasticsearch_install_from_repo.changed or elasticsearch_install_from_package.changed
# i.e. we have changed ES version(or we have clean installation of ES), or if no plugins listed. Otherwise it is false and requires explicitly setting.
- set_fact: es_plugins_reinstall=true
when: ((elasticsearch_install_from_package is defined and elasticsearch_install_from_repo.changed) or (elasticsearch_install_from_package is defined and elasticsearch_install_from_package.changed)) or es_plugins is not defined or es_plugins is none
tags:
- always
- set_fact: es_plugins_reinstall={{ ((elasticsearch_install_from_package is defined and elasticsearch_install_from_repo.changed) or (elasticsearch_install_from_package is defined and elasticsearch_install_from_package.changed)) or es_plugins is not defined or es_plugins is none }}
- debug: msg="{{es_plugins_reinstall}}"
- set_fact: list_command="list"
tags:
- always
- set_fact: list_command="--list"
when: es_version | version_compare('2.0', '<')
tags:
- always
#List currently installed plugins
- shell: "{{es_home}}/bin/plugin {{list_command}} | sed -n '1!p' | cut -d '-' -f2-"
#List currently installed plugins - ignore xpack if > v 2.0
- shell: "{{es_home}}/bin/plugin {{list_command}} | sed -n '1!p' | cut -d '-' -f2-{% if es_version | version_compare('2.0', '>') %} | grep -vE '{{supported_xpack_features | join('|')}}|license'{% endif %}"
register: installed_plugins
failed_when: "'ERROR' in installed_plugins.stdout"
changed_when: False
ignore_errors: yes
environment:
CONF_DIR: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
CONF_DIR: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
#This needs to removes any currently installed plugins
- name: Remove elasticsearch plugins
@ -30,6 +28,7 @@
with_items: "{{ installed_plugins.stdout_lines }}"
when: es_plugins_reinstall and installed_plugins.stdout_lines | length > 0 and not 'No plugin detected' in installed_plugins.stdout_lines[0]
notify: restart elasticsearch
register: plugin_installed
environment:
CONF_DIR: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
@ -50,4 +49,5 @@
#Set permissions on plugins directory
- name: Set Plugin Directory Permissions
file: state=directory path={{ plugin_dir }} owner={{ es_user }} group={{ es_group }} recurse=yes
file: state=directory path={{ es_home }}/plugins owner={{ es_user }} group={{ es_group }} recurse=yes
when: es_enable_xpack

View file

@ -1,3 +0,0 @@
---

View file

@ -4,30 +4,12 @@
- name: Copy default templates to elasticsearch
copy: src=templates dest=/etc/elasticsearch/ owner={{ es_user }} group={{ es_group }}
notify: load-templates
when: es_templates_fileglob is not defined
- name: Copy templates to elasticsearch
copy: src={{ item }} dest=/etc/elasticsearch/templates owner={{ es_user }} group={{ es_group }}
when: es_templates_fileglob is defined
notify: load-templates
with_fileglob:
- "{{ es_templates_fileglob }}"
- set_fact: http_port=9200
tags:
- always
- set_fact: http_port={{es_config['http.port']}}
when: es_config['http.port'] is defined
tags:
- always
- name: Wait for elasticsearch to startup
wait_for: port={{http_port}} delay=10
- name: Get template files
shell: find . -maxdepth 1 -type f | sed "s#\./##" | sed "s/.json//" chdir=/etc/elasticsearch/templates
register: resultstemplate
- name: Install template(s)
command: "curl -sL -XPUT http://localhost:{{http_port}}/_template/{{item}} -d @/etc/elasticsearch/templates/{{item}}.json"
with_items: "{{ resultstemplate.stdout_lines }}"
- "{{ es_templates_fileglob }}"

View file

@ -1,24 +1,6 @@
---
- set_fact: instance_default_file={{default_file | dirname}}/{{es_instance_name}}_{{default_file | basename}}
tags:
- always
- set_fact: instance_init_script={{init_script | dirname }}/{{es_instance_name}}_{{init_script | basename}}
tags:
- always
- set_fact: conf_dir={{ es_conf_dir }}/{{es_instance_name}}
tags:
- always
- set_fact: plugin_dir={{ es_plugin_dir }}/{{es_instance_name}}
tags:
- always
- set_fact: m_lock_enabled={{ es_config['bootstrap.mlockall'] is defined and es_config['bootstrap.mlockall'] == True }}
tags:
- always
- debug: msg="Node configuration {{ es_config }} "
- name: Include optional user and group creation.
- name: Include optional user and group creation.
when: (es_user_id is defined) and (es_group_id is defined)
include: elasticsearch-optional-user.yml

View file

@ -1,12 +1,14 @@
---
- name: check-parameters
include: checkParameters.yml
tags:
- check
- name: os-specific vars
include_vars: "{{ansible_os_family}}.yml"
tags:
- always
- name: check-set-parameters
include: elasticsearch-parameters.yml
tags:
- always
- include: java.yml
when: es_java_install
tags:
@ -25,10 +27,10 @@
when: es_plugins is defined or es_plugins_reinstall
tags:
- plugins
- include: elasticsearch-shield.yml
when: es_install_shield
#We always execute xpack as we may need to remove features
- include: xpack/elasticsearch-xpack.yml
tags:
- shield
- xpack
- include: elasticsearch-service.yml
tags:
- service

View file

@ -0,0 +1,38 @@
---
#Test if feature is installed
- shell: "{{es_home}}/bin/plugin list | sed -n '1!p' | grep {{item}}"
register: feature_installed
changed_when: False
ignore_errors: yes
environment:
CONF_DIR: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
#Remove Plugin if installed and its not been requested or the ES version has changed
- name: Remove {{item}} plugin
command: >
{{es_home}}/bin/plugin remove shield
register: xpack_state
failed_when: "'ERROR' in xpack_state.stdout"
changed_when: xpack_state.rc == 0
when: feature_installed.rc == 0 and (not es_enable_xpack or not '"{{item}}" in es_xpack_features' or es_version_changed)
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
#Install plugin if not installed, or the es version has changed (so removed above), and its been requested
- name: Install {{item}} plugin
command: >
{{es_home}}/bin/plugin install {{item}}
register: xpack_state
failed_when: "'ERROR' in xpack_state.stdout"
changed_when: xpack_state.rc == 0
when: (feature_installed.rc == 1 or es_version_changed) and es_enable_xpack and "{{item}}" in es_xpack_features
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"

View file

@ -0,0 +1,55 @@
---
- set_fact: es_version_changed={{ ((elasticsearch_install_from_package is defined and elasticsearch_install_from_repo.changed) or (elasticsearch_install_from_package is defined and elasticsearch_install_from_package.changed)) }}
#enabling xpack installs the license. Not a xpack feature and does not need to be specified - TODO: we should append it to the list if xpack is enabled and remove this
#Check if license is installed
- name: Check License is installed
shell: >
{{es_home}}/bin/plugin list | tail -n +2 | grep license
register: license_installed
ignore_errors: yes
failed_when: "'ERROR' in license_installed.stdout"
changed_when: False
environment:
CONF_DIR: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
#Remove license if installed and xpack not enabled
- name: Remove license plugin
command: >
{{es_home}}/bin/plugin remove license
register: xpack_state
failed_when: "'ERROR' in xpack_state.stdout"
changed_when: xpack_state.rc == 0
when: license_installed.rc == 0 and (not es_enable_xpack or es_version_changed)
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
#Install License if not installed, or it needs to be reinstalled due to ES change (above task will have removed), and its been requested.
- name: Install license plugin
command: >
{{es_home}}/bin/plugin install license
register: xpack_state
failed_when: "'ERROR' in xpack_state.stdout"
changed_when: xpack_state.rc == 0
when: (license_installed.rc == 1 or es_version_changed) and es_enable_xpack
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
#We loop on all as we may need to remove some features.
- include: elasticsearch-xpack-install.yml
with_items: "{{supported_xpack_features}}"
#Shield configuration
- include: shield/elasticsearch-shield.yml
#Add any feature specific configuration here
- name: Set Plugin Directory Permissions
file: state=directory path={{ es_home }}/plugins owner={{ es_user }} group={{ es_group }} recurse=yes

View file

@ -0,0 +1,64 @@
---
- set_fact: manage_file_users=es_users is defined and es_users.file is defined
#List current users
- name: List Users
shell: cat {{conf_dir}}/shield/users | awk -F':' '{print $1}'
register: current_file_users
when: manage_file_users
changed_when: False
- set_fact: users_to_remove={{ current_file_users.stdout_lines | difference (es_users.file.keys()) }}
when: manage_file_users
#Remove users
- name: Remove Users
command: >
{{es_home}}/bin/shield/esusers userdel {{item}}
when: manage_file_users and (users_to_remove | length > 0)
with_items: "{{users_to_remove}}"
environment:
CONF_DIR: "{{ conf_dir }}"
ES_HOME: "{{es_home}}"
- set_fact: users_to_add={{ es_users.file.keys() | difference (current_file_users.stdout_lines) }}
when: manage_file_users
#Add users
- name: Add Users
command: >
{{es_home}}/bin/shield/esusers useradd {{item}} -p {{es_users.file[item].password}}
with_items: "{{users_to_add}}"
when: manage_file_users and users_to_add | length > 0
environment:
CONF_DIR: "{{ conf_dir }}"
ES_HOME: "{{es_home}}"
#Set passwords for all users declared - Required as the useradd will not change existing user passwords
- name: Set User Passwords
command: >
{{es_home}}/bin/shield/esusers passwd {{item.key}} -p {{item.value.password}}
with_dict: "{{es_users.file}}"
when: manage_file_users and es_users.file.keys() | length > 0
#Currently no easy way to figure out if the password has changed or to know what it currently is so we can skip.
changed_when: False
environment:
CONF_DIR: "{{ conf_dir }}"
ES_HOME: "{{es_home}}"
- set_fact: users_roles={{es_users.file | extract_role_users}}
when: manage_file_users
#Copy Roles files
- name: Copy roles.yml File for Instance
template: src=shield/roles.yml.j2 dest={{conf_dir}}/shield/roles.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes
when: es_roles is defined and es_roles.file is defined
#Overwrite users_roles file
- name: Copy User Roles
template: src=shield/users_roles.j2 dest={{conf_dir}}/shield/users_roles mode=0644 force=yes
when: manage_file_users and users_roles | length > 0
#TODO: Support for mapping file

View file

@ -0,0 +1,23 @@
---
#Shield specific configuration done here
#TODO: 1. Skip users with no password defined or error 2. Passwords | length > 6
#-----------------------------FILE BASED REALM----------------------------------------
- include: elasticsearch-shield-file.yml
when: (es_enable_xpack and '"shield" in es_xpack_features') and ((es_users is defined and es_users.file) or (es_roles is defined and es_roles.file is defined))
#-----------------------------NATIVE BASED REALM----------------------------------------
# The native realm requires the node to be started so we do as a handler
- command: /bin/true
notify: load-native-realms
when: (es_enable_xpack and '"shield" in es_xpack_features') and ((es_users is defined and es_users.native is defined) or (es_roles is defined and es_roles.native is defined))
#---------------------------------------------------------------------
#Ensure shield conf directory is created
- name: Ensure shield conf directory exists
file: path={{ conf_dir }}/shield state=directory owner={{ es_user }} group={{ es_group }}
changed_when: False
when: es_enable_xpack and '"shield" in es_xpack_features'

View file

@ -20,6 +20,4 @@ path.data: {{ data_dirs | array_to_str }}
path.work: {{ work_dir }}
path.logs: {{ log_dir }}
path.plugins: {{ plugin_dir }}
path.logs: {{ log_dir }}

View file

@ -0,0 +1 @@
{{ es_roles.file | to_nice_yaml }}

View file

@ -0,0 +1 @@
{{users_roles | join("\n") }}

View file

@ -1,6 +1,6 @@
require 'config_spec'
describe 'Config Tests v 2.x' do
include_examples 'config::init', "2.2.0"
include_examples 'config::init', "2.3.4"
end

View file

@ -173,28 +173,7 @@ shared_examples 'multi::init' do |es_version,plugins|
end
end
#Multi node plugin tests
describe file('/opt/elasticsearch/plugins/node1') do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe file('/opt/elasticsearch/plugins/master') do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
for plugin in plugins
describe file('/opt/elasticsearch/plugins/node1/'+plugin) do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe file('/opt/elasticsearch/plugins/master/'+plugin) do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe command('curl -s localhost:9200/_nodes/plugins?pretty=true | grep '+plugin) do
its(:exit_status) { should eq 0 }
@ -203,6 +182,11 @@ shared_examples 'multi::init' do |es_version,plugins|
describe command('curl -s localhost:9201/_nodes/plugins?pretty=true | grep '+plugin) do
its(:exit_status) { should eq 0 }
end
describe file('/usr/share/elasticsearch/plugins/'+plugin) do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
end
describe file('/etc/init.d/elasticsearch') do

View file

@ -16,7 +16,6 @@ shared_examples 'package::init' do |es_version,plugins|
describe file('/etc/elasticsearch/node1/elasticsearch.yml') do
it { should be_file }
it { should contain 'path.plugins: /usr/share/elasticsearch/plugins/node1' }
it { should contain 'http.port: 9200' }
it { should contain 'transport.tcp.port: 9300' }
it { should contain 'discovery.zen.ping.unicast.hosts: localhost:9300' }
@ -27,8 +26,6 @@ shared_examples 'package::init' do |es_version,plugins|
it { should be_owned_by 'elasticsearch' }
end
describe file('/etc/elasticsearch/node1/scripts/calculate-score.groovy') do
it { should be_file }
it { should be_owned_by 'elasticsearch' }
@ -66,14 +63,14 @@ shared_examples 'package::init' do |es_version,plugins|
end
end
describe file('/usr/share/elasticsearch/plugins/node1') do
describe file('/usr/share/elasticsearch/plugins') do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
for plugin in plugins
describe file('/usr/share/elasticsearch/plugins/node1/'+plugin) do
describe file('/usr/share/elasticsearch/plugins/'+plugin) do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end

View file

@ -0,0 +1,215 @@
require 'spec_helper'
shared_examples 'xpack::init' do |es_version|
describe user('elasticsearch') do
it { should exist }
end
describe service('shield_node_elasticsearch') do
it { should be_running }
end
describe package('elasticsearch') do
it { should be_installed }
end
describe file('/etc/elasticsearch/shield_node/elasticsearch.yml') do
it { should be_file }
it { should be_owned_by 'elasticsearch' }
end
describe file('/etc/elasticsearch/shield_node/logging.yml') do
it { should be_file }
it { should be_owned_by 'elasticsearch' }
end
describe file('/etc/elasticsearch/shield_node/elasticsearch.yml') do
it { should contain 'node.name: localhost-shield_node' }
it { should contain 'cluster.name: elasticsearch' }
it { should contain 'path.conf: /etc/elasticsearch/shield_node' }
it { should contain 'path.data: /var/lib/elasticsearch/localhost-shield_node' }
it { should contain 'path.work: /tmp/elasticsearch/localhost-shield_node' }
it { should contain 'path.logs: /var/log/elasticsearch/localhost-shield_node' }
end
describe 'Node listening' do
it 'listening in port 9200' do
expect(port 9200).to be_listening
end
end
describe 'version check' do
it 'should be reported as version '+es_version do
command = command('curl -s localhost:9200 -u es_admin:changeMe | grep number')
expect(command.stdout).to match(es_version)
expect(command.exit_status).to eq(0)
end
end
describe file('/etc/init.d/elasticsearch') do
it { should_not exist }
end
describe file('/etc/default/elasticsearch') do
it { should_not exist }
end
describe file('/etc/sysconfig/elasticsearch') do
it { should_not exist }
end
describe file('/usr/lib/systemd/system/elasticsearch.service') do
it { should_not exist }
end
describe file('/etc/elasticsearch/elasticsearch.yml') do
it { should_not exist }
end
describe file('/etc/elasticsearch/logging.yml') do
it { should_not exist }
end
#Xpack specific tests
describe file('/usr/share/elasticsearch/plugins') do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
#Check shield,watcher and license plugins are installed
describe file('/usr/share/elasticsearch/plugins/license') do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe command('curl -s localhost:9200/_nodes/plugins?pretty=true -u es_admin:changeMe | grep license') do
its(:exit_status) { should eq 0 }
end
describe file('/usr/share/elasticsearch/plugins/shield') do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe command('curl -s localhost:9200/_nodes/plugins?pretty=true -u es_admin:changeMe | grep shield') do
its(:exit_status) { should eq 0 }
end
describe file('/etc/elasticsearch/shield_node/shield') do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe file('/usr/share/elasticsearch/plugins/watcher') do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe command('curl -s localhost:9200/_nodes/plugins?pretty=true -u es_admin:changeMe | grep watcher') do
its(:exit_status) { should eq 0 }
end
describe file('/usr/share/elasticsearch/plugins/kopf') do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe command('curl -s localhost:9200/_nodes/plugins?pretty=true -u es_admin:changeMe | grep kopf') do
its(:exit_status) { should eq 0 }
end
#test we haven't installed graph or marvel-agent
describe file('/usr/share/elasticsearch/plugins/graph') do
it { should_not exist }
end
describe command('curl -s localhost:9200/_nodes/plugins?pretty=true -u es_admin:changeMe | grep graph') do
its(:exit_status) { should eq 1 }
end
describe file('/usr/share/elasticsearch/plugins/marvel-agent') do
it { should_not exist }
end
describe command('curl -s localhost:9200/_nodes/plugins?pretty=true -u es_admin:changeMe | grep marvel-agent') do
its(:exit_status) { should eq 1 }
end
#Test users file, users_roles and roles.yml
describe file('/etc/elasticsearch/shield_node/shield/users_roles') do
it { should be_owned_by 'elasticsearch' }
it { should contain 'admin:es_admin' }
it { should contain 'power_user:testUser' }
end
describe file('/etc/elasticsearch/shield_node/shield/users') do
it { should be_owned_by 'elasticsearch' }
it { should contain 'testUser:' }
it { should contain 'es_admin:' }
end
describe file('/etc/elasticsearch/shield_node/shield/roles.yml') do
it { should be_owned_by 'elasticsearch' }
#Test contents as expected
its(:md5sum) { should eq '7800182547287abd480c8b095bf26e9e' }
end
#Test native roles and users are loaded
describe command('curl -s localhost:9200/_shield/user -u es_admin:changeMe | md5sum | grep 557a730df7136694131b5b7012a5ffad') do
its(:exit_status) { should eq 0 }
end
describe command('curl -s localhost:9200/_shield/user -u es_admin:changeMe | grep "{\"kibana4_server\":{\"username\":\"kibana4_server\",\"roles\":\[\"kibana4_server\"\],\"full_name\":null,\"email\":null,\"metadata\":{}}}"') do
its(:exit_status) { should eq 0 }
end
describe command('curl -s localhost:9200/_shield/role -u es_admin:changeMe | grep "{\"logstash\":{\"cluster\":\[\"manage_index_templates\"\],\"indices\":\[{\"names\":\[\"logstash-\*\"\],\"privileges\":\[\"write\",\"delete\",\"create_index\"\]}\],\"run_as\":\[\]}}"') do
its(:exit_status) { should eq 0 }
end
describe command('curl -s localhost:9200/_shield/role -u es_admin:changeMe | md5sum | grep 6d14f09ef1eea64adf4d4a9c04229629') do
its(:exit_status) { should eq 0 }
end
describe file('/etc/elasticsearch/templates') do
it { should be_directory }
it { should be_owned_by 'elasticsearch' }
end
describe file('/etc/elasticsearch/templates/basic.json') do
it { should be_file }
it { should be_owned_by 'elasticsearch' }
end
describe 'Template Installed' do
it 'should be reported as being installed', :retry => 3, :retry_wait => 10 do
command = command('curl -s "localhost:9200/_template/basic" -u es_admin:changeMe')
expect(command.stdout).to match(/basic/)
expect(command.exit_status).to eq(0)
end
end
#This is possibly subject to format changes in the response across versions so may fail in the future
describe 'Template Contents Correct' do
it 'should be reported as being installed', :retry => 3, :retry_wait => 10 do
command = command('curl -s "localhost:9200/_template/basic" -u es_admin:changeMe | md5sum')
expect(command.stdout).to match(/153b1a45daf48ccee80395b85c61e332/)
end
end
#Test contents of Elasticsearch.yml file
describe file('/etc/elasticsearch/shield_node/elasticsearch.yml') do
it { should contain 'shield.authc.realms.file1.order: 0' }
it { should contain 'shield.authc.realms.file1.type: file' }
it { should contain 'shield.authc.realms.native1.order: 1' }
it { should contain 'shield.authc.realms.native1.type: native' }
end
end

View file

@ -2,7 +2,7 @@ require 'multi_spec'
describe 'Multi Tests v 2.x' do
include_examples 'multi::init', "2.2.0", ["kopf","license","marvel-agent"]
include_examples 'multi::init', "2.3.4", ["kopf"]
end

View file

@ -8,5 +8,4 @@
vars:
es_scripts: true
es_templates: true
es_plugin_dir: "/opt/elasticsearch/plugins"
#Plugins installed for this test are specified in .kitchen.yml under suite

View file

@ -2,5 +2,5 @@ require 'package_spec'
describe 'Package Tests v 2.x' do
include_examples 'package::init', "2.2.0", ["kopf","license","marvel-agent"]
include_examples 'package::init', "2.3.4", ["kopf"]
end

View file

@ -2,7 +2,7 @@ require 'standard_spec'
describe 'Standard Tests v 2.x' do
include_examples 'standard::init', "2.2.0"
include_examples 'standard::init', "2.3.4"
end

View file

@ -0,0 +1,5 @@
require 'xpack_spec'
describe 'Xpack Tests v 2.x' do
include_examples 'xpack::init', "2.3.4"
end

View file

@ -0,0 +1,2 @@
---
- host: test-kitchen

View file

@ -0,0 +1,73 @@
---
- name: Elasticsearch Xpack tests
hosts: localhost
roles:
- { role: elasticsearch, es_config: { "http.port": 9200, "transport.tcp.port":9300, discovery.zen.ping.unicast.hosts: "localhost:9300",
"shield.authc.realms.file1.type": "file","shield.authc.realms.file1.order": 0, "shield.authc.realms.native1.type": "native","shield.authc.realms.native1.order": 1 },
es_instance_name: "shield_node" }
vars:
es_templates: true
es_enable_xpack: true
es_plugins:
- plugin: lmenezes/elasticsearch-kopf
version: master
es_xpack_features:
- shield
- watcher
es_api_basic_auth_username: es_admin
es_api_basic_auth_password: changeMe
es_users:
native:
kibana4_server:
password: changeMe
roles:
- kibana4_server
file:
es_admin:
password: changeMe
roles:
- admin
testUser:
password: changeMeAlso!
roles:
- power_user
- user
es_roles:
file:
admin:
cluster:
- all
indices:
- names: '*'
privileges:
- all
power_user:
cluster:
- monitor
indices:
- names: '*'
privileges:
- all
user:
indices:
- names: '*'
privileges:
- read
kibana4_server:
cluster:
- monitor
indices:
- names: '.kibana'
privileges:
- all
native:
logstash:
cluster:
- manage_index_templates
indices:
- names: 'logstash-*'
privileges:
- write
- delete
- create_index

View file

@ -2,4 +2,6 @@
es_package_url: "https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch"
es_conf_dir: "/etc/elasticsearch"
sysd_script: "/usr/lib/systemd/system/elasticsearch.service"
init_script: "/etc/init.d/elasticsearch"
init_script: "/etc/init.d/elasticsearch"
#add supported features here
supported_xpack_features: ["watcher","marvel-agent","graph","shield"]