###1.ELK简介
###2.工作流程 这里我们还采用了一个与logstash配合的小插件,filebeat,可通过连接点过去了解下,可以动态监测日志文件的变化。 所以整个过程是
###3.关键配置及整合配置文件
#=========================== Filebeat prospectors =============================
filebeat.prospectors:
# Each - is a prospector. Most options can be set at the prospector level, so
# you can use different prospectors for various configurations.
# Below are the prospector specific configurations.
- type: log
# Change to true to enable this prospector configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
#- /var/log/*.log
- D:\ELK\sampleLog\*.log
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: true
# Period on which files under path should be checked for changes
#reload.period: 10s
#==================== Elasticsearch template setting ==========================
setup.template.settings:
index.number_of_shards: 3
#index.codec: best_compression
#_source.enabled: false
#----------------------------- Logstash output --------------------------------
output.logstash:
# The Logstash hosts
hosts: ["localhost:5044"]
input {
beats {
port => "5044"
}
}
filter {
grok {
match => {
"message" => "%{COMBINEDAPACHELOG}"
"message1" => "(?<remoteUer>=[A-Z]+[0-9]+)
(?<request>Begin\sRequest\[(\/[a-zA-Z]*)+\.do)
(?<time>[0-9]+\/[0-9]+ [0-9]+:[0-9]+:[0-9]+)" }
}
}
output {
elasticsearch {
hosts => "localhost:9200"
index => "aa-%{+YYYY.MM.dd}"
document_type => "wjb_log"
}
}
cluster.name: elasticsearch
node.name: "lcc_node"
node.master: true
# 指定该节点是否存储索引数据,默认为true。
node.data: true
network.host: 127.0.0.1
http.port: 9200
http.cors.enabled: true
http.cors.allow-origin: "*"
# 单个命令来删除所有数据可能会导致可怕的后果。这个设置使删除只限于特定名称指向的数据, 而不允许通过指定 _all 或通配符来删除指定索引库。
action.destructive_requires_name: true