Test 2 configuration of the latest ELK Stack version
Read this article
The detailed configuration is as follows:
Http://blog.chinaunix.net/uid-25057421-id-5567766.html
I. Client
1. nginx log format
Log_format logstash_json '{"@ timestamp": "$ time_iso8601 ",'
'"Host": "$ server_addr ",'
'"Clientip": "$ remote_addr ",'
'"Size": $ body_bytes_sent ,'
'"Responsetime": $ request_time ,'
'"Upstreamtime": "$ upstream_response_time ",'
'"Upstreamhost": "$ upstream_addr ",'
'"Http_host": "$ host ",'
'"Url": "$ uri ",'
'"Referrer": "$ http_referer ",'
'"Xff": "$ http_x_forwarded_for ",'
'"Agent": "$ http_user_agent ",'
'"Status": "$ status "}';
Access_log/data/wwwlogs/access_jerrymin.test.com.log logstash_json;
2. fielbeat configuration file
Filebeat:
Prospectors:
-
-/Data/wwwlogs/access_jerrymin.test.com.log
Doucmenttype: jerrymin.test.com
Output:
Logstash:
Enabled: true
Hosts: ["192.168.0.58: 5044"]
Shipper:
3. topbeat configuration file
Input:
# In seconds, defines how often to read server statistics
Period: 10
# Regular expression to match the processes that are monitored
# By default, all the processes are monitored
Procs: [". *"]
# Statistics to collect (all enabled by default)
Stats:
System: true
Proc: true
Filesystem: true
Output:
### Elasticsearch as output
Elasticsearch:
Hosts: ["192.168.0.58: 9200"]
Shipper:
Logging:
Files:
Rotateeverybytes: 10485760 # = 10 MB
2. Server Configuration
1. logstash configuration file
[Root @ localhost logstash] # cat/etc/logstash/conf. d/nginxconf. json
Input {
Beats {
Port = & gt; 5044
Codec => json
}
}
Filter {
Mutate {
Split => ["upstreamtime", ","]
}
Mutate {
Convert => ["upstreamtime", "float"]
}
}
Output {
Elasticsearch {
Hosts => "192.168.0.58: 9200"
Sniffing => true
Manage_template => false
# Index => "% {[@ metadata] [beat]}-% {+ YYYY. MM. dd }"
Index => "filebeat-% {type}-% {+ YYYY. MM. dd }"
Document_type => "% {[@ metadata] [type]}"
}
}
2. elasticsearch configuration file
[Root @ localhost logstash] # cat/etc/elasticsearch. yml | grep-Ev "^ # | ^ $"
Path. data:/data
Path. logs:/data/elklogs
Network. host: 192.168.0.58
Http. port: 9200
3. kibana configuration file
[Root @ localhost config] # cat/var/kibana/config/kibana. yml
# Kibana is served by a back end server. This controls which port to use.
Server. port: 5601
# The host to bind the server.
Server. host: "0.0.0.0"
# The Elasticsearch instance to use for all your queries.
Elasticsearch. url: "http: // 192.168.0.58: 9200"
3. Tengine reverse proxy configuration
Cat/usr/local/nginx/conf/vhosts_all/kibana. conf
Server
{
Listen 8888;
Server_name 192.168.0.58
Index index.html index.shtml;
Location /{
Proxy_pass http: // localhost: 5601;
Proxy_http_version 1.1;
Proxy_set_header Upgrade $ http_upgrade;
Proxy_set_header Connection 'upgrade ';
Proxy_set_header Host $ host;
Proxy_cache_bypass $ http_upgrade;
Auth_basic "Please input Username and Password ";
Auth_basic_user_file/usr/local/nginx/conf/. pass_file;
}
Access_log/data/wwwlogs/access. kibana. log access;
}
4. login Platform
Http: // 192.168.0.58: 8888
1. Create an index
2. Create a view
3. Create a panel