1. Service allocation
es1:192.168.90.22 (Elasticsearch+kibana)
es2:192.168.90.23 (Elasticsearch+cerebro)
# #修改hosts文件 so that it can be accessed by domain name
2. Modify the maximum number of files that can be used by the user before setting up, maximum thread, maximum memory and other resource usage
vim/etc/security/limits.conf
* Soft nofile 65536
* Hard nofile 131072
* Soft nproc 4096
* Hard nproc 4096
vim/etc/security/limits.d/90-nproc.conf
* soft nproc 4096
Note: If you set the value is not large enough, when you start Elasticsearch will be error, you can according to the error message to adjust
3. Installing the Java Environment
Yum install-y java-1.8.0-openjdk java-1.8.0-openjdk-devel java-1.8.0-openjdk-headless
4. Deploying Elasticsearch
Yum localinstall-y elasticsearch-6.2.2.rpm
mkdir-pv/data/elk/{data,logs}
chown-r elasticsearch.elasticsearch/data/elk/
# #rpm包我都提前下了, Https://www.elastic.co/downloads
Vim/etc/elasticsearch/elasticsearch.yml
cluster.name:elk
node.name:es1 # #两台命名不一样, I'm using the host name
Path.data:/data/elk/data
path.logs:/data/elk/logs
network.host:0.0.0.0
http.port:9200
Discovery.zen.ping.unicast.hosts: ["Es1", "Es2"]
bootstrap.memory_lock:false
Bootstrap.system_call_ Filter:false
(because CENTOS6 does not support Seccomp, and ES default bootstrap.system_call_filter is true for detection, which causes the detection to fail, which directly causes ES to fail to start. )
# #直接用service命令启动就行了
# #如果你启动elasticsearch失败了, you can go to http://blog.csdn.net/qq942477618/article/details/53414983 to find there is no, but the specific changes or to see your error, This is just a little bit of a clue.
5. Kibana installed on the ES1
Yum Localinstall-y kibana-6.2.2-x86_64.rpm
# #修改修配置文件就可以直接启动了
[Root@es1 ~]# vim/etc/kibana/kibana.yml
server.port:5601
server.host: "192.168.90.22"
elasticsearch.url:http://localhost:9200
[root@es1 ~]#/etc/init.d/kibana start
6. Es2 installation of Cerebro to visualize cluster management
wget https://github.com/lmenezes/cerebro/releases/download/v0.7.2/cerebro-0.7.2.zip
[Root@es2 cerebro-0.7.2] # vim conf/application.conf
hosts = [
{
host = ' http://192.168.90.23:9200 '
name = ' Elk '
},
# #启动
./bin/cerebro-dhttp.port=1234-dhttp.address=192.168.90.23 &
# #通过1234端口访问
7. Installing Logstash
# #一般都是装在要收集日志的主机上, but I'm just experimenting, I just installed it on the es1.
Yum Localinstall-y logstash-6.2.2.rpm
# #这边的索引只是为了测试, so simply write, specifically also test the actual host log format to write
vim/etc/logstash/conf.d/test.conf
input{stdin{}}
output {
elasticsearch {
action = "Index"
hosts = "192.168.90.22:9200"
index = "Test"
}
}
# #如果你安装了nginx, you can write this
Input {
file {
path = '/var/log/nginx/*.log ' #日志存放位置, * is all, you can also select only one access.log
type = "Test"
Start_position = "Beginning" #从文件开始出读
}
}
filter{
grok {
match + = {
"message" = "%{" IPORHOST:IP}-%{user:user} \[%{httpdate:time}\] \ "%{word:http_method}%{notspace:request} HTTP/%{NUMBER:HTTP_ Version}\ "%{number:status} (?:%{number:bytes}|-) \" (?:%{uri:http_referer}|-) \ "\"%{greedydata:user_agent}\ "" # #nginx默认格式
}
}} Output {
Elasticsearch {
action ' = ' index '
hosts = ' 192.168.90.22:9200 '
index = "Nginx" # #索引名
}
stdout {
codec = Rubydebug
}
}
# #启动logstash
/usr/share/logstash/bin/logstash-f/etc/logstash/conf.d/test.conf
8. Visit Kibana
# #最后根据需要建立图表
9. Firewall port
# #默认情况下
Elasticsearch between 9300
Logstash is 9200.
Kibana page is 5601
Cerebro Port is the last time you start to enter how much is how much