首页 > 其他分享 >分布式收集日志

分布式收集日志

时间:2022-12-11 17:44:17浏览次数:50  
标签:log 收集 magedu nginx conf 日志 type logstash 分布式

基于logstash filter功能将nginx默认的访问日志及error log转换为json格式并写入elasticsearch

安装nginx

tar xf nginx-1.22.1.tar.gz
cd nginx-1.22.1/
./configure --prefix=/app/nginx
make && make install
make install

logstash正则

 root@awen:~# vim /usr/share/logstash/vendor/bundle/jruby/2.6.0/gems/logstash-patterns-core-4.3.4/patterns/legacy/grok-patterns 

 

root@awen:/etc/logstash/conf.d# cat nginxlog-to-es.conf
input {
file {
path => "/app/nginx/logs/access.log"
type => "nginx-accesslog"
stat_interval => "1"
start_position => "beginning"
}

file {
path => "/app/nginx/logs/error.log"
type => "nginx-errorlog"
stat_interval => "1"
start_position => "beginning"
}
}


filter {
if [type] == "nginx-accesslog" {
grok {
match => { "message" => ["%{IPORHOST:clientip} - %{DATA:username} \[%{HTTPDATE:request-time}\] \"%{WORD:request-method} %{DATA:request-uri} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent_bytes} \"%{DATA:referrer}\" \"%{DATA:useragent}\""] }
remove_field => "message"
add_field => { "project" => "magedu"}
}
mutate {
convert => [ "[response_code]", "integer"]
}
}
if [type] == "nginx-errorlog" {
grok {
match => { "message" => ["(?<timestamp>%{YEAR}[./]%{MONTHNUM}[./]%{MONTHDAY} %{TIME}) \[%{LOGLEVEL:loglevel}\] %{POSINT:pid}#%{NUMBER:threadid}\: \*%{NUMBER:connectionid} %{GREEDYDATA:message}, client: %{IPV4:clientip}, server: %{GREEDYDATA:server}, request: \"(?:%{WORD:request-method} %{NOTSPACE:request-uri}(?: HTTP/%{NUMBER:httpversion}))\", host: %{GREEDYDATA:domainname}"]}
remove_field => "message"
}
}
}

output {
if [type] == "nginx-accesslog" {
elasticsearch {
hosts => ["10.4.7.134:9200"]
index => "magedu-nginx-accesslog-%{+yyyy.MM.dd}"
user => "magedu"
password => "123456"
}}

if [type] == "nginx-errorlog" {
elasticsearch {
hosts => ["10.4.7.134:9200"]
index => "magedu-nginx-errorlog-%{+yyyy.MM.dd}"
user => "magedu"
password => "123456"
}}

}

 json格式日志

root@awen:/etc/logstash/conf.d# cat nginx-json-log-to-es.conf
input {
file {
path => "/var/log/nginx/access.log"
start_position => "end"
type => "nginx-json-accesslog"
stat_interval => "1"
codec => json
}
}

filter {
if [type] == "nginx-errorlog" {
grok {
match => { "message" => ["(?<timestamp>%{YEAR}[./]%{MONTHNUM}[./]%{MONTHDAY} %{TIME}) \[%{LOGLEVEL:loglevel}\] %{POSINT:pid}#%{NUMBER:threadid}\: \*%{NUMBER:connectionid} %{GREEDYDATA:message}, client: %{IPV4:clientip}, server: %{GREEDYDATA:server}, request: \"(?:%{WORD:request-method} %{NOTSPACE:request-uri}(?: HTTP/%{NUMBER:httpversion}))\", host: %{GREEDYDATA:domainname}"]}
remove_field => "message"
}
}
}


output {
if [type] == "nginx-json-accesslog" {
elasticsearch {
hosts => ["10.4.7.136:9200"]
index => "nginx-accesslog-2.107-%{+YYYY.MM.dd}"
user => "magedu"
password => "123456"
}}
}

cat /app/nginx/conf/nginx.conf

 

 

 

 

root@awen:/etc/logstash/conf.d# cat nginx-json-to-es.conf
input {
file {
path => "/app/nginx/logs/access.log"
start_position => "end"
type => "nginx-json-accesslog0"
stat_interval => "1"
codec => json
}

}


output {
if [type] == "nginx-json-accesslog0" {
elasticsearch {
hosts => ["10.4.7.136:9200"]
index => "magedu-nginx-accesslog1-%{+yyyy.MM.dd}"
user => "magedu"
password => "123456"
}}

}

cat /app/nginx/conf/nginx.conf
log_format access_json '{"@timestamp":"$time_iso8601",'
'"host":"$server_addr",'
'"clientip":"$remote_addr",'
'"size":$body_bytes_sent,'
'"responsetime":$request_time,'
'"upstreamtime":"$upstream_response_time",'
'"upstreamhost":"$upstream_addr",'
'"http_host":"$host",'
'"uri":"$uri",'
'"domain":"$host",'
'"xff":"$http_x_forwarded_for",'
'"referer":"$http_referer",'
'"tcp_xff":"$proxy_protocol_addr",'
'"http_user_agent":"$http_user_agent",'
'"status":"$status"}';
access_log logs/access.log access_json;

 

 

 

基于logstash收集java日志并实现多行合并

 

LogStash 收集java服务日志: LogStash 收集java服务日志示例: ES服务器安装logstash: root@es1:~# dpkg -i /usr/local/src/logstash-8.5.1-amd64.deb multiline(合并)插件文档: https://www.elastic.co/guide/en/logstash/current/plugins-codecs-multiline.html
codec编码指向multiline多行合并
    input {
      stdin {
        codec => multiline {
          pattern => "pattern, a regexp"   匹配正则,开始or结尾
          negate => "true" or "false"   匹配成功后合并还是匹配失败合并
          what => "previous" or "next"  方法,往前合并还是往后合并
        }
      }
    }

root@awen:/etc/logstash/conf.d# cat es-log-to-es.conf
input {
file {
path => "/data/eslogs/magedu-es-cluster1.log"
type => "eslog"
stat_interval => "1"
start_position => "beginning"
codec => multiline {
#pattern => "^\["
pattern => "^\[[0-9]{4}\-[0-9]{2}\-[0-9]{2}"
negate => "true"
what => "previous"
}
}
}

output {
if [type] == "eslog" {
elasticsearch {
hosts => ["10.4.7.136:9200"]
index => "magedu-eslog-%{+YYYY.ww}"
user => "magedu"
password => "123456"
}}
}

 

syslog 日志收集 LogStash 收集网络设备syslog日志(haproxy) 模拟网络设备 网络设备把日志发给logstash 再发给es 通过kibana展示

 

 

root@awen:~# apt install haproxy cat /etc/haproxy/haproxy.cfg

 修改haproxy日志写入位置

@@代表tcp

systemctl restart rsyslog

tail -f /var/log/logstash/logstash-plain.log 

 

LogStash 收集TCP日志 需要logstash监听tcp端口,发给elastichsearch,

 

 

root@awen:/etc/logstash/conf.d# cat tcp-log-to-es.conf
input {
tcp {
port => 9889
host => "10.4.7.139"
type => "magedu-tcplog"
mode => "server"
}
}

output {
if [type] == "magedu-tcplog" {
elasticsearch {
hosts => ["10.4.7.136:9200"]
index => "magedu-tcplog-%{+YYYY.MM.dd}"
user => "magedu"
password => "123456"
}}
}

tail -f /var/log/logstash/logstash-plain.log

 /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/tcp-log-to-es.conf

 systemctl restart logstash

启动成功

root@es3:~# echo "ERROR tcplog message1" > /dev/tcp/10.4.7.139/9889

 

 

root@es3:~# echo "nc test" | nc 10.4.7.139 9889
root@es3:~# nc 10.4.7.139 9889 < /etc/passwd  文件重定向通过nc命令进行发送

 

 

elasticsearch 索引的删除

root@es1:~# curl -u magedu:123456 -X DELETE "http://10.4.7.136:9200/magedu-nginx-accesslog1-2022.12.04?pretty"
{
"acknowledged" : true
}

 

 

 

 

root@es1:~# cat del-index
#!/bin/bash
DATE=`date -d "4 days ago" +%Y.%m.%d`
index="
network-syslog
"
for NAME in ${index};do
INDEX_NAME="$NAME-$DATE"
#echo $INDEX_NAME
curl -u magedu:123456 -XDELETE http://10.4.7.136:9200/${INDEX_NAME}?pretty
if [ $? -eq 0 ];then
echo $INDEX_NAME,"删除完成"
else
echo $INDEX_NAME,"删除失败"
fi
done

logstash基于redis的收集:  

 

 

   redis安装logstash2主机

 cat /etc/redis/redis.conf 

bind 0.0.0.0 requirepass 123456 systemctl restart redis-server.service 接受服务日志信息

root@logstash1:/etc/logstash/conf.d# cat redis.conf
input {
file {
path => "/app/nginx/log/access.log"
type => "magedu-nginx-accesslog"
start_position => "beginning"
stat_interval => "1"
codec => "json" #对json格式日志进行json解析
}

file {
path => "/app/nginx/logs/error.log"
type => "magedu-nginx-errorlog"
start_position => "beginning"
stat_interval => "1"
}
}

filter {
if [type] == "magedu-nginx-errorlog" {
grok {
match => { "message" => ["(?<timestamp>%{YEAR}[./]%{MONTHNUM}[./]%{MONTHDAY} %{TIME}) \[%{LOGLEVEL:loglevel}\] %{POSINT:pid}#%{NUMBER:threadid}\: \*%{NUMBER:connectionid} %{GREEDYDATA:message}, client: %{IPV4:clientip}, server: %{GREEDYDATA:server}, request: \"(?:%{WORD:request-method} %{NOTSPACE:request-uri}(?: HTTP/%{NUMBER:httpversion}))\", host: %{GREEDYDATA:domainname}"]}
remove_field => "message" #删除源日志
}
}
}


output {
if [type] == "magedu-nginx-accesslog" {
redis {
data_type => "list"
key => "magedu-nginx-accesslog"
host => "10.4.7.132"
port => "6379"
db => "0"
password => "123456"
}
}
if [type] == "magedu-nginx-errorlog" {
redis {
data_type => "list"
key => "magedu-nginx-errorlog"
host => "10.4.7.132"
port => "6379"
db => "0"
password => "123456"
}
}
}

消息取走以后,redis保存的信息就清空了

 

做个测试

root@logstash2:/etc/logstash/conf.d# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/redis-to-es.conf 

第二个logstash接受redis日志,发给es

root@logstash2:/etc/logstash/conf.d# cat redis-to-es.conf
input {
redis {
data_type => "list"
key => "magedu-nginx-accesslog"
host => "10.4.7.132"
port => "6379"
db => "0"
password => "123456"
}
redis {
data_type => "list"
key => "magedu-nginx-errorlog"
host => "10.4.7.132"
port => "6379"
db => "0"
password => "123456"
}
}

output {
if [type] == "magedu-nginx-accesslog" {
elasticsearch {
hosts => ["10.4.7.136:9200"]
index => "redis-magedu-nginx-accesslog-%{+yyyy.MM.dd}"
user => "magedu"
password => "123456"
}}

if [type] == "magedu-nginx-errorlog" {
elasticsearch {
hosts => ["10.4.7.136:9200"]
index => "redis-magedu-nginx-errorlog-%{+yyyy.MM.dd}"
user => "magedu"
password => "123456"
}}
}

 filebeat收集nginx日志并经过redis进行临时缓存后再消费至ES 集群

 

dpkg -i filebeat-8.5.1-amd64.deb

root@logstash1:/etc/filebeat# systemctl restart filebeat.service 

root@logstash2:~# redis-cli
127.0.0.1:6379> auth 123456
OK
127.0.0.1:6379> select 1
OK
127.0.0.1:6379[1]> keys *
1) "filebeat-magedu"
127.0.0.1:6379[1]>

服务日志输入filebeat

root@logstash1:/etc/filebeat# grep -v "#" filebeat.yml | grep -v "^$"
filebeat.inputs:
- type: filestream
id: magedu-nginx-accesslog-id
enabled: true
paths:
- /app/nginx/logs/access.log
fields:
type: magedu-app1-accesslog
project: magedu

- type: filestream
id: magedu-nginx-errorlog-id
enabled: true
paths:
- /app/nginx/logs/error.log
fields:
type: magedu-app1-errorlog
project: magedu
- type: filestream
id: magedu-nginx-syslog-id
enabled: true
paths:
- /var/log/syslog
fields:
type: magedu-app1-syslog
project: magedu
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
setup.kibana:
output.logstash:
hosts: ["10.4.7.139:5044"]
enabled: true
loadbalance: true
worker: 1
compression_level: 3
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~

 filebeat日志输入logstash

 

 

root@logstash1:/etc/filebeat# grep -v "#" /etc/logstash/conf.d/beats-magedu-to-redis.conf | grep -v "^$"
input {
beats {
port => 5044
codec => "json"
}
}
output {
if [fields][type] == "magedu-app1-accesslog" {
redis {
host => "10.4.7.132"
password => "123456"
port => "6379"
db => "0"
key => "magedu-app1-accesslog"
data_type => "list"
}
}
if [fields][type] == "magedu-app1-errorlog" {
redis {
host => "10.4.7.132"
password => "123456"
port => "6379"
db => "0"
key => "magedu-app1-errorlog"
data_type => "list"
}
}
if [fields][type] == "magedu-app1-syslog" {
redis {
host => "10.4.7.132"
password => "123456"
port => "6379"
db => "0"
key => "magedu-app1-syslog"
data_type => "list"
}
}
}

 

 logstash传给es

 

 filebeat输出到elasticsearch 

配置如下

output.elasticsearch:
    hosts: ["http://10.4.7.136:9200"]
    index: "-%{[agent.version]}-%{+yyyy.MM.dd}"
    username: "magedu"
    password: "123456"

setup.template.name: "magedu-template"
setup.template.pattern: "magedu-*"
setup.ilm.enabled: false

 

标签:log,收集,magedu,nginx,conf,日志,type,logstash,分布式
From: https://www.cnblogs.com/tshxawen/p/16937135.html

相关文章