Adding 6.x support with Bootstrap user addition

This commit is contained in:
Shri Bodas 2018-01-08 16:59:44 -08:00 committed by Michael Russell
parent db1079ee4d
commit 9115bb4dff
No known key found for this signature in database
GPG key ID: A90C1696496085FE
13 changed files with 119 additions and 8 deletions

View file

@ -23,7 +23,7 @@ es_pid_dir: "/var/run/elasticsearch"
es_data_dirs: "/var/lib/elasticsearch"
es_log_dir: "/var/log/elasticsearch"
es_max_open_files: 65536
es_max_threads: 2048
es_max_threads: "{{ 2048 if ( es_version | version_compare('6.0.0', '<')) else 8192 }}"
es_max_map_count: 262144
es_allow_downgrades: false
es_enable_xpack: false

View file

@ -19,6 +19,7 @@
ignore_errors: yes
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
#if es_plugins_reinstall is set to true we remove ALL plugins
@ -47,6 +48,7 @@
register: plugin_removed
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
- name: Install elasticsearch plugins
@ -60,6 +62,7 @@
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
ES_JAVA_OPTS: "{% if item.proxy_host is defined and item.proxy_host != '' and item.proxy_port is defined and item.proxy_port != ''%} -Dhttp.proxyHost={{ item.proxy_host }} -Dhttp.proxyPort={{ item.proxy_port }} -Dhttps.proxyHost={{ item.proxy_host }} -Dhttps.proxyPort={{ item.proxy_port }} {% elif es_proxy_host is defined and es_proxy_host != '' %} -Dhttp.proxyHost={{ es_proxy_host }} -Dhttp.proxyPort={{ es_proxy_port }} -Dhttps.proxyHost={{ es_proxy_host }} -Dhttps.proxyPort={{ es_proxy_port }} {% endif %}"
until: plugin_installed.rc == 0

View file

@ -10,6 +10,12 @@
yum: name={{ java }} state={{java_state}}
when: ansible_os_family == 'RedHat'
- name: correct java version selected
alternatives:
name: java
path: /usr/bin/java8
link: /usr/bin/java
- name: Refresh java repo
become: yes
apt: update_cache=yes

View file

@ -10,6 +10,7 @@
ignore_errors: yes
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
@ -24,6 +25,7 @@
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
@ -42,6 +44,7 @@
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
- name: Delete x-pack zip file
@ -59,5 +62,6 @@
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
ES_JAVA_OPTS: "{% if es_proxy_host is defined and es_proxy_host != '' %}-Dhttp.proxyHost={{ es_proxy_host }} -Dhttp.proxyPort={{ es_proxy_port }} -Dhttps.proxyHost={{ es_proxy_host }} -Dhttps.proxyPort={{ es_proxy_port }}{% endif %}"

View file

@ -11,3 +11,8 @@
- name: Set Plugin Directory Permissions
become: yes
file: state=directory path={{ es_home }}/plugins owner={{ es_user }} group={{ es_group }} recurse=yes
#Make sure elasticsearch.keystore has correct Permissions
- name: Set elasticsearch.keystore Permissions
become: yes
file: state=file path={{ conf_dir }}/elasticsearch.keystore owner={{ es_user }} group={{ es_group }}

View file

@ -21,6 +21,7 @@
when: manage_file_users
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_HOME: "{{es_home}}"
- set_fact: users_to_add={{ es_users.file.keys() | difference (current_file_users.stdout_lines) }}
@ -36,6 +37,7 @@
no_log: True
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_HOME: "{{es_home}}"
#Set passwords for all users declared - Required as the useradd will not change existing user passwords
@ -50,6 +52,7 @@
no_log: True
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_HOME: "{{es_home}}"
- set_fact: users_roles={{es_users.file | extract_role_users () }}

View file

@ -11,6 +11,23 @@
- es_enable_xpack and '"security" in es_xpack_features'
- (es_users is defined and es_users.file is defined) or (es_roles is defined and es_roles.file is defined) or (es_role_mapping is defined)
#-----------------------------Create Bootstrap User-----------------------------------
- name: Check if bootstrap password is set
command: >
{{es_home}}/bin/elasticsearch-keystore list
register: list_keystore
environment:
ES_PATH_CONF: "{{ conf_dir }}"
- name: Create Bootstrap password for elastic user
shell: echo "{{es_api_basic_auth_password}}" | {{es_home}}/bin/elasticsearch-keystore add -x 'bootstrap.password'
when:
- es_api_basic_auth_username == 'elastic' and 'bootstrap.password' not in list_keystore.stdout_lines
environment:
ES_PATH_CONF: "{{ conf_dir }}"
no_log: true
#-----------------------------FILE BASED REALM----------------------------------------
- include: elasticsearch-security-file.yml

View file

@ -10,6 +10,7 @@ ES_HOME={{es_home}}
# Elasticsearch configuration directory
CONF_DIR={{conf_dir}}
ES_PATH_CONF={{conf_dir}}
# Elasticsearch data directory
DATA_DIR={{ data_dirs | array_to_str }}
@ -74,3 +75,9 @@ MAX_LOCKED_MEMORY=unlimited
{% if es_max_map_count is defined %}
MAX_MAP_COUNT={{es_max_map_count}}
{% endif %}
# Specifies the maximum number of threads that can be started.
# Elasticsearch requires a minimum of 2048.
{% if es_max_threads is defined %}
MAX_THREADS={{ es_max_threads }}
{% endif %}

View file

@ -14,7 +14,10 @@ node.name: {{inventory_hostname}}-{{es_instance_name}}
#################################### Paths ####################################
# Path to directory containing configuration (this file and logging.yml):
{% if (es_version | version_compare('6.0.0', '<')) %}
path.conf: {{ conf_dir }}
{% endif %}
path.data: {{ data_dirs | array_to_str }}

View file

@ -60,6 +60,7 @@ DATA_DIR={{ data_dirs | array_to_str }}
# Elasticsearch configuration directory
CONF_DIR={{conf_dir}}
ES_PATH_CONF={{ conf_dir }}
# Maximum number of VMA (Virtual Memory Areas) a process can own
{% if es_max_map_count is defined %}
@ -91,7 +92,11 @@ fi
# Define other required variables
PID_FILE="$PID_DIR/$NAME.pid"
DAEMON=$ES_HOME/bin/elasticsearch
{% if (es_version | version_compare('6.0.0', '<')) %}
DAEMON_OPTS="-d -p $PID_FILE -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR"
{% else %}
DAEMON_OPTS="-d -p $PID_FILE"
{% endif %}
export ES_JAVA_OPTS
export JAVA_HOME
@ -156,6 +161,10 @@ case "$1" in
ulimit -l $MAX_LOCKED_MEMORY
fi
if [ -n "$MAX_THREADS" ]; then
ulimit -u $MAX_THREADS
fi
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
fi

View file

@ -46,6 +46,7 @@ MAX_MAP_COUNT={{es_max_map_count}}
LOG_DIR="{{log_dir}}"
DATA_DIR={{ data_dirs | array_to_str }}
CONF_DIR="{{conf_dir}}"
ES_PATH_CONF="{{ conf_dir }}"
PID_DIR="{{pid_dir}}"
@ -74,6 +75,7 @@ export JAVA_HOME
export ES_INCLUDE
export ES_JVM_OPTIONS
export ES_STARTUP_SLEEP_TIME
export ES_PATH_CONF
# export unsupported variables so bin/elasticsearch can reject them and inform the user these are unsupported
if test -n "$ES_MIN_MEM"; then export ES_MIN_MEM; fi
@ -120,6 +122,9 @@ start() {
if [ -n "$MAX_LOCKED_MEMORY" ]; then
ulimit -l $MAX_LOCKED_MEMORY
fi
if [ -n "$MAX_THREADS" ]; then
ulimit -u $MAX_THREADS
fi
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
fi
@ -135,7 +140,11 @@ start() {
cd $ES_HOME
echo -n $"Starting $prog: "
# if not running, start it up here, usually something like "daemon $exec"
{% if (es_version | version_compare('6.0.0', '<')) %}
daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR
{% else %}
daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d
{% endif %}
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile

View file

@ -11,25 +11,52 @@ appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
appender.rolling.type = RollingFile
appender.rolling.name = rolling
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.rolling.fileName = ${sys:es.logs}.log
{% else %}
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
{% endif %}
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
{% else %}
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
{% endif %}
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
{% if (es_version | version_compare('6.0.0', '>')) %}
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling.policies.size.size = 128MB
appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.fileIndex = nomax
appender.rolling.strategy.action.type = Delete
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
appender.rolling.strategy.action.condition.type = IfFileName
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
{% endif %}
rootLogger.level = info
rootLogger.appenderRef.console.ref = console
rootLogger.appenderRef.rolling.ref = rolling
appender.deprecation_rolling.type = RollingFile
appender.deprecation_rolling.name = deprecation_rolling
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
{% else %}
appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
{% endif %}
appender.deprecation_rolling.layout.type = PatternLayout
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
{% else %}
appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz
{% endif %}
appender.deprecation_rolling.policies.type = Policies
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.deprecation_rolling.policies.size.size = 1GB
@ -43,10 +70,18 @@ logger.deprecation.additivity = false
appender.index_search_slowlog_rolling.type = RollingFile
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
{% else %}
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
{% endif %}
appender.index_search_slowlog_rolling.layout.type = PatternLayout
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
{% else %}
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log
{% endif %}
appender.index_search_slowlog_rolling.policies.type = Policies
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_search_slowlog_rolling.policies.time.interval = 1
@ -59,10 +94,18 @@ logger.index_search_slowlog_rolling.additivity = false
appender.index_indexing_slowlog_rolling.type = RollingFile
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
{% else %}
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
{% endif %}
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
{% else %}
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
{% endif %}
appender.index_indexing_slowlog_rolling.policies.type = Policies
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_indexing_slowlog_rolling.policies.time.interval = 1

View file

@ -7,6 +7,7 @@ After=network-online.target
[Service]
Environment=ES_HOME={{es_home}}
Environment=CONF_DIR={{conf_dir}}
Environment=ES_PATH_CONF={{conf_dir}}
Environment=DATA_DIR={{ data_dirs | array_to_str }}
Environment=LOG_DIR={{log_dir}}
Environment=PID_DIR={{pid_dir}}
@ -17,14 +18,18 @@ WorkingDirectory={{es_home}}
User={{es_user}}
Group={{es_group}}
{% if (es_version | version_compare('6.0.0', '<')) %}
ExecStartPre=/usr/share/elasticsearch/bin/elasticsearch-systemd-pre-exec
{% endif %}
ExecStart={{es_home}}/bin/elasticsearch \
-p ${PID_DIR}/elasticsearch.pid \
--quiet \
{% if (es_version | version_compare('6.0.0', '<')) %}
-Edefault.path.logs=${LOG_DIR} \
-Edefault.path.data=${DATA_DIR} \
-Edefault.path.conf=${CONF_DIR}
-Edefault.path.conf=${CONF_DIR} \
{% endif %}
--quiet
# StandardOutput is configured to redirect to journalctl since
@ -41,9 +46,6 @@ StandardError=inherit
LimitNOFILE={{es_max_open_files}}
{% endif %}
# Specifies the maximum number of processes
LimitNPROC=2048
# Specifies the maximum number of bytes of memory that may be locked into RAM
# Set to "infinity" if you use the 'bootstrap.memory_lock: true' option
# in elasticsearch.yml and 'MAX_LOCKED_MEMORY=unlimited' in {{instance_default_file}}