简介
随着机房内的服务器和网络设备增加,日志管理和查询就成了让系统管理员头疼的事。系统管理员通常面临问题如下:
基于上述原因,在当前的网络环境中搭建一台用于日志集中管理的Rsyslog日志服务器就显得十分有必要了,设备的哭喊没有日志分析系统怎么能及时听得到。
部署架构
主机清单
hostname | ip | software |
---|---|---|
syslog | 192.168.99.50 | Rsyslog Filebeat |
elk-node1 | 192.168.99.185 | Elasticserch Logstash Kibana |
elk-node2 | 192.168.99.186 | Elasticserch |
系统环境及软件版本
CentOS Linuxrelease 7.5.1804 (Core)
Elasticserch-6.8.4
Kibana-6.8.4
Logstash-6.8.4
Filebeat-6.8.4
Rsyslog-8.24.0
# setenforce 0
# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
firewall-cmd --add-service=syslog --permanent
firewall-cmd --reload
[root@ZABBIX-Server ~]# rpm -qa |grep rsyslog
rsyslog-8.24.0-16.el7.x86_64
centos 7 默认会安装Rsyslog服务
Rsyslog 配置
[root@ZABBIX-Server ~]# egrep -v "*#|^$" /etc/rsyslog.conf
$ModLoad imudp
$UDPServerRun 514
$ModLoad imtcp
$InputTCPServerRun 514
$WorkDirectory /var/lib/rsyslog
$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
$IncludeConfig /etc/rsyslog.d/*.conf
$OmitLocalLogging on
$IMJournalStateFile imjournal.state
*.info;mail.none;authpriv.none;cron.none;local6.none;local5.none;local4.none /var/log/messages
authpriv.* /var/log/secure
mail.* -/var/log/maillog
cron.* /var/log/cron
*.emerg :omusrmsg:*
uucp,news.crit /var/log/spooler
local7.* /var/log/boot.log
$template h3c,"/mnt/h3c/%FROMHOST-IP%.log"
local6.* ?h3c
$template huawei,"/mnt/huawei/%FROMHOST-IP%.log"
local5.* ?huawei
$template ruijie,"/mnt/ruijie/%FROMHOST-IP%.log"
local4.* ?ruijie
Rsyslog参数介绍
$ModLoad imudp # immark是模块名,支持tcp协议
$ModLoad imudp # imupd是模块名,支持udp协议
$InputTCPServerRun 514
$UDPServerRun 514 #允许514端口接收使用UDP和TCP协议转发过来的日志
*.info;mail.none;authpriv.none;cron.none;local6.none;local5.none;local4.none /var/log/messages
#系统默认没有添加local6.none;local5.none;local4.none 命令,网络日志在写入对应的文件的同时会写入/var/log/messages 中
检查rsyslog服务状态并重启服务
[root@ZABBIX-Server ~]# netstat -auntlp |grep -w 514
tcp 0 0 0.0.0.0:514 0.0.0.0:* LISTEN 25339/rsyslogd
tcp6 0 0 :::514 :::* LISTEN 25339/rsyslogd
udp 0 0 0.0.0.0:514 0.0.0.0:* 25339/rsyslogd
udp6 0 0 :::514 :::* 25339/rsyslogd
[root@ZABBIX-Server ~]# systemctl restart rsysloy
[root@ZABBIX-Server ~]# ls /mnt/huawei/ /mnt/h3c/ /mnt/ruijie/
/mnt/h3c/:
172.16.9.253.log 172.17.1.199.log 172.17.1.21.log 172.17.1.22.log 172.17.1.30.log 192.168.99.253.log 192.168.99.254.log
/mnt/huawei/:
192.168.99.1.log 192.168.99.21.log 192.168.99.22.log
/mnt/ruijie/:
192.168.99.22.log
网络设备将日志指向syslog服务器,注意不同厂商的设备对应的local 不同,对应关系如下:
/mnt/huawei --- local6
/mnt/h3c --- local5
/mnt/ruijie --- local4
网络设备配置
Huawei:
info-center loghost source Vlanif99
info-center loghost 192.168.99.50 facility local5
H3C:
info-center loghost source Vlan-interface99
info-center loghost 192.168.99.50 facility local6
Ruijie:
logging buffered warnings
logging source interface VLAN 99
logging facility local6
logging server 192.168.99.50
在rsyslog 日志服务器上安装filebeat ,通过 filebeat 读取不同设备目录下的日志文件进行关键字过滤 、标签封装输出到logstash进行数据格式清洗。
[root@ZABBIX-Server ~]# egrep -v "*#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /mnt/huawei/*
tags: ["huawei"]
include_lines: ['Failed','failed','error','ERROR','\bDOWN\b','\bdown\b','\bUP\b','\bup\b']
drop_fields:
fields: ["beat","input_type","source","offset","prospector"]
- type: log
paths:
- /mnt/h3c/*
tags: ["h3c"]
include_lines: ['Failed','failed','error','ERROR','\bDOWN\b','\bdown\b','\bUP\b','\bup\b']
drop_fields:
fields: ["beat","input_type","source","offset","prospector"]
- type: log
paths:
- /mnt/ruijie/*
tags: ["ruijie"]
include_lines: ['Failed','failed','error','ERROR','\bDOWN\b','\bdown\b','\bUP\b','\bup\b']
drop_fields:
fields: ["beat","input_type","source","offset","prospector"]
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
setup.kibana:
output.logstash:
hosts: ["192.168.99.185:5044"]
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
logstash 配置
对filebeat传来的日志根据标签进行数据格式清洗,将处理完成的日志数据传到es上存储,并在kibana上做进一步的可视化展示。
[root@elk-node1 ~]# egrep -v "*#|^$" /etc/logstash/conf.d/networklog.conf
input {
beats {
port => 5044
}
}
filter {
if "huawei" in [tags] {
grok{
match => {"message" => "%{SYSLOGTIMESTAMP:time} %{DATA:hostname} %{GREEDYDATA:info}"}
}
}
else if "h3c" in [tags] {
grok{
match => {"message" => "%{SYSLOGTIMESTAMP:time} %{YEAR:year} %{DATA:hostname} %{GREEDYDATA:info}"}
}
}
else if "ruijie" in [tags] {
grok{
match => {"message" => "%{SYSLOGTIMESTAMP:time} %{DATA:hostname} %{GREEDYDATA:info}"}
}
}
mutate {
remove_field => ["message","time","year","offset","tags","path","host","@version","[log]","[prospector]","[beat]","[input][type]","[source]"]
}
}
output{
stdout{codec => rubydebug}
elasticsearch{
index => "networklogs-%{+YYYY.MM.dd}"
hosts => ["192.168.99.185:9200"]
sniffing => false
}
}
Kibana配置
创建名为 “networklog-*” 的索引模式匹配存储的网络设备日志索引
Discover 数据
创建可视化数据表
kibana的数据表可导出为CSV文件
创建可视化饼图
查看每台设备日志量占比日志量大的设备要重点关注
网络设备日志分析监控可视化仪表盘