Logstash.yml
# Settings file in YAML
#
# Settings can is specified either in hierarchical form, e.g.:
#
# Pipeline:
# Batch:
# size:125
# Delay:5
#
# Or as flat keys:
#
# pipeline.batch.size:125
# Pipeline.batch.delay:5
#
#------------Node Identity------------
#
# Use a descriptive name for the node:
#
# Node.name:test
#
# If omitted the node name would default to the machine ' s host name
#
#------------Data Path------------------
#
# which directory should is used by Logstash and its plugins
# for any persistent needs. Defaults to Logstash_home/data
#数据位置
Path.data:/var/lib/logstash
#
#------------Pipeline Settings--------------
#
# The ID of the pipeline.
#
# Pipeline.id:main
#
# Set The number of workers that'll, in parallel, execute the filters+outputs
# Stage of the pipeline.
#
# This defaults to the number of the host ' s CPU cores.
#
# Pipeline.workers:2
#
# How many events to retrieve from inputs before sending to Filters+workers
#
# pipeline.batch.size:125
#
# How long-to-wait in milliseconds and polling for the next event
# before dispatching an undersized batch to filters+outputs
#
# pipeline.batch.delay:50
#
# Force Logstash to exit during shutdown even if there is still inflight
# Events in memory. By default, Logstash'll refuse to quit until all
# received events has been pushed to the outputs.
#
# warning:enabling This can leads to data loss during shutdown
#
# Pipeline.unsafe_shutdown:false
#
#------------Pipeline Configuration Settings--------------
#
# Where to fetch the pipeline configuration for the main pipeline
#
# Path.config:
#
# Pipeline configuration string for the main Pipeline
#
# config.string:
#
# at startup, test if the configuration is valid and exit (dry run)
#
# Config.test_and_exit:false
#
# periodically check if the configuration has changed and reload the pipeline
# This can also is triggered manually through the SIGHUP signal
#
#config. reload.automatic:true
#
# How often to check if the pipeline configuration have changed (in seconds)
#
# config.reload.interval:3s
#
# Show fully compiled configuration as debug log message
# Note:--log.level must be ' debug '
#
# Config.debug:false
#
# when enabled, process escaped characters such as \ n and \ "in Strings in the
# Pipeline configuration files.
#
# Config.support_escapes:false
#
#------------Module Settings---------------
# Define modules here. Modules definitions must is defined as an array.
# The simple-and-the-you-see-are-prepend each ' name ' with a '-', and keep
# all associated variables under the ' name ' they is associated with, and
# above the next, like this:
#
# modules:
#-Name:module_name
# var. PLUGINTYPE1. PLUGINNAME1. Key1:value
# var. PLUGINTYPE1. PLUGINNAME1. Key2:value
# var. PLUGINTYPE2. PLUGINNAME1. Key1:value
# var. PLUGINTYPE3. PLUGINNAME3. Key1:value
#
# Module variable names must is in the format of
#
# var. Plugin_type. Plugin_name. KEY
#
#modules:
#
#------------Cloud Settings---------------
# Define Elastic Cloud settings here.
# Format of Cloud.id is a Base64 value e.g. Dxmtzwfzdc0xlmf3cy5mb3vuzc5pbyrub3rhcmvhbcrpzgvudglmawvy
# and it may have a label prefix e.g. Staging:dxmtz ...
# This would overwrite ' var.elasticsearch.hosts ' and ' var.kibana.host '
# Cloud.id: <identifier>
#
# Format of Cloud.auth is: <user>:<pass>
# This is optional
# If supplied This would overwrite ' var.elasticsearch.username ' and ' Var.elasticsearch.password '
# If supplied This would overwrite ' var.kibana.username ' and ' Var.kibana.password '
# cloud.auth:elastic:<password>
#
#------------Queuing Settings--------------
#
# Internal queuing model, "Memory" for legacy in-memory based queuing and
# "persisted" for disk-based acked queueing. Defaults is memory
#
# queue.type:memory
#
# If using queue.type:persisted, the directory path where the data files would be stored.
# Default is Path.data/queue
#
# Path.queue:
#
# If using queue.type:persisted, the page data files size. The queue data consists of
# append-only data files separated into pages. Default is 64MB
#
# QUEUE.PAGE_CAPACITY:64MB
#
# If using queue.type:persisted, the maximum number of unread events in the queue.
# Default is 0 (unlimited)
#
# queue.max_events:0
#
# If using queue.type:persisted, the total capacity of the queue in number of bytes.
# If you would as more unacked events to being buffered in Logstash, you can increase the
# capacity using this setting. Sure your disk drive have capacity greater than
# the size specified here. If both Max_bytes and max_events are specified, Logstash would pick
# Whichever criteria is reached first
# Default is 1024MB or 1GB
#
# QUEUE.MAX_BYTES:1024MB
#
# If using queue.type:persisted, the maximum number of acked events before forcing a checkpoint
# Default is 1024x768, 0 for unlimited
#
# queue.checkpoint.acks:1024
#
# If using queue.type:persisted, the maximum number of written events before forcing a checkpoint
# Default is 1024x768, 0 for unlimited
#
# queue.checkpoint.writes:1024
#
# If using queue.type:persisted, the interval in milliseconds when a checkpoint are forced on the head page
# Default is $0 for no periodic checkpoint.
#
# queue.checkpoint.interval:1000
#
#------------Dead-letter Queue Settings--------------
# Flag to turn on Dead-letter queue.
#
# Dead_letter_queue.enable:false
# If using Dead_letter_queue.enable:true, the maximum size of each dead letter queue. Entries
# would be dropped if they would increase the size of the ' dead letter ' queue beyond this setting.
# Default is 1024MB
# DEAD_LETTER_QUEUE.MAX_BYTES:1024MB
# If using Dead_letter_queue.enable:true, the directory path where the data files would be stored.
# Default is Path.data/dead_letter_queue
#
# Path.dead_letter_queue:
#
#------------Metrics Settings--------------
#
# Bind address for the metrics REST endpoint
#
# Http.host: "127.0.0.1"
#
# Bind Port for the Metrics REST endpoint, this option also accept a range
# (9600-9700) and Logstash would pick up the first available ports.
#
# http.port:9600-9700
#
#------------Debugging Settings--------------
#
# Options for Log.level:
# * Fatal
# * ERROR
# * Warn
# * INFO (default)
# * Debug
# * Trace
#
# Log.level:info
#日志
Path.logs:/var/log/logstash
#
#------------Other Settings--------------
#
# Where to find custom plugins
# path.plugins: []
#xpack. monitoring.elasticsearch.url:http://192.168.10.196:9200
#xpack. Monitoring.elasticsearch.username:logstash_system
#xpack. Monitoring.elasticsearch.password:changeme
Jvm.options
# # JVM Configuration
# Xms represents the initial size of total heap space
# XMX represents the maximum size of total heap space
-xms512m
-xmx512m
################################################################
# # Expert Settings
################################################################
##
# # All settings below this section is considered
# # Expert settings. Don ' t tamper with them unless
# # you understand doing
##
################################################################
# # GC Configuration
-xx:+useparnewgc
-xx:+useconcmarksweepgc
-xx:cmsinitiatingoccupancyfraction=75
-xx:+usecmsinitiatingoccupancyonly
# # Optimizations
# Disable calls to SYSTEM#GC
-xx:+disableexplicitgc
# # Locale
# Set the locale language
#-duser.language=en
# Set the locale country
#-duser.country=us
# Set The locale variant, if any
#-duser.variant=
# # Basic
# Set the I/O temp directory
#-djava.io.tmpdir= $HOME
# set to headless, just in case
-djava.awt.headless=true
# ensure UTF-8 encoding by default (e.g. filenames)
-dfile.encoding=utf-8
# Use We provided JNA always versus the system one
#-djna.nosys=true
# # Heap Dumps
# Generate a heap dump when an allocation from the Java heap fails
# heap dumps is created in the working directory of the JVM
-xx:+heapdumponoutofmemoryerror
# Specify an alternative path for heap dumps
# Ensure the directory exists and has sufficient space
#-xx:heapdumppath=${logstash_home}/heapdump.hprof
# # GC Logging
#-xx:+printgcdetails
#-xx:+printgctimestamps
#-xx:+printgcdatestamps
#-xx:+printclasshistogram
#-xx:+printtenuringdistribution
#-xx:+printgcapplicationstoppedtime
# log GC status to a file with time stamps
# Ensure the directory exists
#-xloggc:${ls_gc_log_file}
Elk Deployment Detailed--logstash