架构:
已完成的部署
1、ES集群及kibana部署
https://blog.51cto.com/yht1990/6080981
2、kafaka+zookeeper集群
https://blog.51cto.com/yht1990/6081518
准备sidecar镜像(filebeat)
找一台服务器打镜像
[root@yw-test filebeat]# cat Dockerfile
FROM docker.elastic.co/beats/filebeat:7.9.0
COPY filebeat.yml /usr/share/filebeat/filebeat.yml
USER root
RUN chown root:filebeat /usr/share/filebeat/filebeat.yml
USER filebeat
[root@yw-test filebeat]#
[root@yw-test filebeat]#
[root@yw-test filebeat]# cat filebeat.yml
filebeat.inputs:
- input_type: log
paths:
- /data/logic/log/*.log
fields:
log_topic: "${TOPIC_ID}"
#tail_files: true
clean_inactive: 48h
ignore_older: 24h
close_inactive: 1m
output.kafka:
hosts: "${KAFKA_SERVER}"
topic: '%{[fields.log_topic]}'
partition.round_robin:
reachable_only: true
required_acks: 1
compression: gzip
max_message_bytes: 1000000
logging.level: error
[root@yw-test filebeat]# docker build . -t 10.0.7.12/k8s/filebeat/sidecar:7.9.0
[root@yw-test filebeat]# docker push 10.0.7.12/k8s/filebeat/sidecar:7.9.0
1.
2.
3.
4.
5.
6.
7.
8.
9.
10.
11.
12.
13.
14.
15.
16.
17.
18.
19.
20.
21.
22.
23.
24.
25.
26.
27.
28.
29.
30.
31.
32.
33.
扩展filebeat直接发送logstash
sh-4.2$ cat filebeat.yml
filebeat.inputs:
- input_type: log
paths:
- /var/log/pods/test-app*/*/*.log
- /var/log/pods/uat-app*/*/*.log
- /var/log/pods/pro-app*/*/*.log
clean_inactive: 48h
ignore_older: 24h
close_inactive: 1m
output.logstash:
hosts: "${LOGSTASH_SERVER}"
logging.level: error
1.
2.
3.
4.
5.
6.
7.
8.
9.
10.
11.
12.
13.
14.
15.
创建deployment
[root@k8s-test-manager src]# cat test-clavaplus-consume-sidecar-filebeat.yml
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app: test-clavaplus-consume-deployment-label
name: test-clavaplus-consume-deployment
namespace: test-app
spec:
replicas: 2
selector:
matchLabels:
app: test-clavaplus-consume-selector
template:
metadata:
labels:
app: test-clavaplus-consume-selector
spec:
imagePullSecrets:
- name: myregistrykey
containers:
- name: test-clavaplus-consume
image: registry.cn-shenzhen.aliyuncs.com/test/swoole:test-clavaplus-consume-518b279-2023-0227-1833
imagePullPolicy: IfNotPresent
#imagePullPolicy: Always
ports:
- containerPort: 9501
protocol: TCP
name: http
resources:
limits:
cpu: 1
memory: "512Mi"
requests:
cpu: 200m
memory: "512Mi"
volumeMounts:
- name: applogs
mountPath: /data/logic/log
- name: sidecar-filebeat
image: 10.0.7.12/k8s/filebeat/sidecar:7.9.0
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
env:
- name: "TOPIC_ID"
value: "test-clavaplus-consume"
- name: "KAFKA_SERVER"
value: '["10.0.7.53:9092", "10.0.7.54:9092", "10.0.7.55:9092"]'
volumeMounts:
- name: applogs
mountPath: /data/logic/log
volumes:
- name: applogs #定义通过emptyDir实现业务容器与sidecar容器的日志共享,以让sidecar收集业务容器中的日志
emptyDir: {}
1.
2.
3.
4.
5.
6.
7.
8.
9.
10.
11.
12.
13.
14.
15.
16.
17.
18.
19.
20.
21.
22.
23.
24.
25.
26.
27.
28.
29.
30.
31.
32.
33.
34.
35.
36.
37.
38.
39.
40.
41.
42.
43.
44.
45.
46.
47.
48.
49.
50.
51.
52.
53.
54.
运行deployement
kubectl apply -f test-clavaplus-consume-sidecar-filebeat.yml
1.
kafka工具检查有无数据
可以看到目前已有数据
部署logstash
yum -y install java-1.8.0-openjdk
yum -y install https://mirror.tuna.tsinghua.edu.cn/elasticstack/7.x/yum/7.9.0/logstash-7.9.0.rpm
systemctl start logstash
systemctl enable logstash
tail -f /var/log/logstash/logstash-plain.log
1.
2.
3.
4.
5.
创建logstash配置文件
# 将收集过来的json日志做json解析
codec => json {
charset => "UTF-8" # 支持中文搜索
}
# 将message消息赋值给source字段
filter {
json {
source => "message"
}
mutate {
# 删除没用的字段
remove_field => ["message","@version","agent","ecs"]
}
}
input {
kafka {
bootstrap_servers => "10.0.7.53:9092,10.0.7.54:9092,10.0.7.55:9092"
topics => ["test-clavaplus-consume"]
codec => json {
charset => "UTF-8"
}
}
}
filter {
json {
source => "message"
}
if [fields][log_topic] == "test-clavaplus-consume" {
mutate {
remove_field => ["message","@version","agent","ecs"]
}
}
}
output {
if [fields][log_topic] == "test-clavaplus-consume" {
elasticsearch {
hosts => ["10.0.7.46:9200","10.0.7.47:9200","10.0.7.48:9200"]
index => "test-clavaplus-consume-%{+YYYY.MM.dd}"
}
}
}
1.
2.
3.
4.
5.
6.
7.
8.
9.
10.
11.
12.
13.
14.
15.
16.
17.
18.
19.
20.
21.
22.
23.
24.
25.
26.
27.
28.
29.
启动logstash
systemctl restart logstash && tail -f /var/log/logstash/logstash-plain.log
1.
登陆kibana创建索引并查看数据
查询语法:
https://www.elastic.co/guide/en/kibana/7.9/kuery-query.html#kuery-query
-----------------------------------
©著作权归作者所有:来自51CTO博客作者小杨同学THY的原创作品,请联系作者获取转载授权,否则将追究法律责任
k8s通过sidecar模式收集pod的容器日志至ELK
https://blog.51cto.com/yht1990/6089320