1、helm upgrade后kibana采集不到数据,需要删除后重新INSTALL然后测试。这里打开了externalAccess参数。
# helm upgrade kafka -n logging .
# helm -n logging delete kafka //kibana采集不到容器数据
# vim /root/EFK/k8s/efk-7.10.2/kafka/kafka/values.yaml
externalAccess:
## Enable Kubernetes external cluster access to Kafka brokers
enabled: true
service:
## Service type. Allowed values: LoadBalancer or NodePort
type: NodePort
## Array of node ports used for each Kafka broker. Length must be the same as replicaCount
## Example:
## nodePorts:
## - 30001
## - 30002
nodePorts: [30001]
## Use worker host ips
useHostIPs: true
# helm -n logging install kafka . //部署后立即kibana可以采集到容器数据
[root@k8s-master01 kafka]# k -n logging get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kafka ClusterIP 10.16.35.19 <none> 9092/TCP 19m
kafka-0-external NodePort 10.16.102.31 <none> 9094:30001/TCP 19m
kafka-headless ClusterIP None <none> 9092/TCP,9093/TCP 19m
2、在AirNet-FDP1上使用filebeat采集日志,集群外主机使用kafka-0-external服务。
filebeat.inputs:标签:filebeat,OK,log,30001,beat,kafka,---,##,集群 From: https://blog.51cto.com/u_3029920/5913296
- input_type: log
paths:
- /var/log/messages
# - /home/cdatc/AirNet/bin/log/FDP1_scc_20221205_03.log # ---OK
# - /home/cdatc/AirNet/bin/log/* # ---OK,目录下所有文件,包括归档tar.gz文件
fields:
# to_test: "kafka-clusterIP: 10.16.35.19:9092" # ---node04上filebeat进程 NOK
to_test: "kafka-0-external: 192.168.31.217:30001" # ---AirNet-FDP1上filebeat进程:OK
output.kafka:
hosts: ["192.168.31.217:30001"]
topic: "filebeat-mi"
codec.json:
pretty: false
keep_alive: 30s
# ./filebeat -e -d "*" # ---kibana采集日志正常