0,创建elk配置文件夹
mkdir -p /home/songyan/data/docker/elk
1,es部署docker pull elasticsearch:8.7.0
docker network create elastic docker run -d --name es --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:8.7.0 mkdir -p /home/songyan/data/docker/elk/es/config
mkdir -p /home/songyan/data/docker/elk/es/data
docker cp es:/usr/share/elasticsearch/config /home/songyan/data/docker/elk/es
cd /home/songyan/data/docker/elk/es
chmod 777 -R config/
docker rm -f es
docker run -it --name es --net elastic -p 9200:9200 -p 9300:9300 -p 5601:5601 -e "discovery.type=single-node" -v /home/songyan/data/docker/elk/es/config:/usr/share/elasticsearch/config elasticsearch:8.7.0
docker restart es
✅ Elasticsearch security features have been automatically configured!
✅ Authentication is enabled and cluster connections are encrypted.
ℹ️ Password for the elastic user (reset with `bin/elasticsearch-reset-password -u elastic`):
Yy4AfQyYo9vSIL2TVXmO
ℹ️ HTTP CA certificate SHA-256 fingerprint:
1ff2312a9c3f6135c9002fe5ca85b7b509982409f1868e41fec917d96d27c631
ℹ️ Configure Kibana to use this cluster:
• Run Kibana and click the configuration link in the terminal when Kibana starts.
• Copy the following enrollment token and paste it into Kibana in your browser (valid for the next 30 minutes):
eyJ2ZXIiOiI4LjcuMCIsImFkciI6WyIxNzIuMTkuMC4yOjkyMDAiXSwiZmdyIjoiMWZmMjMxMmE5YzNmNjEzNWM5MDAyZmU1Y2E4NWI3YjUwOTk4MjQwOWYxODY4ZTQxZmVjOTE3ZDk2ZDI3YzYzMSIsImtleSI6ImJOSkNJNGdCVjZiUEYwcHAzMFREOkFyMFVjQzM0UTFTMC1KdG1PZnVKd0EifQ==
ℹ️ Configure other nodes to join this cluster:
• Copy the following enrollment token and start new Elasticsearch nodes with `bin/elasticsearch --enrollment-token <token>` (valid for the next 30 minutes):
eyJ2ZXIiOiI4LjcuMCIsImFkciI6WyIxNzIuMTkuMC4yOjkyMDAiXSwiZmdyIjoiMWZmMjMxMmE5YzNmNjEzNWM5MDAyZmU1Y2E4NWI3YjUwOTk4MjQwOWYxODY4ZTQxZmVjOTE3ZDk2ZDI3YzYzMSIsImtleSI6ImF0SkNJNGdCVjZiUEYwcHAzMFRCOkRXeE5CalVuVFlPOWlILVRLb0NoY2cifQ==
If you're running in Docker, copy the enrollment token and run:
`docker run -e "ENROLLMENT_TOKEN=<token>" docker.elastic.co/elasticsearch/elasticsearch:8.7.0`
docker exec -it es /bin/bash
./bin/elasticsearch-reset-password -u kibana
复制生成的密码(后面kibana文件会用到)
kibana/S8wJj_tK3mTP*74BjVa9
部署完成访问10.8.0.103:9200测试,返回以下内容为部署成功:
{ "name" : "9069bfa0d64b", "cluster_name" : "docker-cluster", "cluster_uuid" : "y8aCwEPTSn66SoA3oFBmLQ", "version" : { "number" : "8.7.0", "build_flavor" : "default", "build_type" : "docker", "build_hash" : "09520b59b6bc1057340b55750186466ea715e30e", "build_date" : "2023-03-27T16:31:09.816451435Z", "build_snapshot" : false, "lucene_version" : "9.5.0", "minimum_wire_compatibility_version" : "7.17.0", "minimum_index_compatibility_version" : "7.0.0" }, "tagline" : "You Know, for Search" }
2,logstash部署【弃用docker pull kibana:8.7.0docker run -it --name logstash --network=elastic -d logstash:8.7.0
mkdir -p /home/songyan/data/docker/elk/logstash/config
docker run -it --name logstash \
--network=elastic \
-p 5603:5603 \
-d logstash:8.7.0
docker cp logstash:/usr/share/logstash/config /home/songyan/data/docker/elk/logstash cd /home/songyan/data/docker/elk/logstash/config vim logstash.yml #修改为一下内容 node.name: logstash-203 # 日志文件目录配置 path.logs: /usr/share/logstash/logs # 验证配置文件及存在性 config.test_and_exit: false # 配置文件改变时是否自动加载 config.reload.automatic: false # 重新加载配置文件间隔 config.reload.interval: 60s # debug模式 开启后会打印解析后的配置文件 包括密码等信息 慎用 # 需要同时配置日志等级为debug config.debug: true log.level: debug # The bind address for the metrics REST endpoint. http.host: 0.0.0.0 touch logstash-scheduler-instance1.conf vim logstash-scheduler-instance1.conf
input {
gelf {
host => "0.0.0.0"
port => 5603
use_tcp => true
}
}
filter {
json {
source => "message"
}
grok {
match => { "log" => "%{DATA:log_time1} %{DATA:log_time2} \[%{DATA:thread_info}\] %{DATA:log_level} %{DATA:class_name} - %{GREEDYDATA:message_detail}" }
}
date {
match => [ "timestamp", "yyyy-MM-dd-HH:mm:ss" ]
locale => "cn"
}
}
output{
elasticsearch {
action => "index"
hosts => ["10.8.0.102:9200"]
index => "%{[index]}"
}
}
vim pipelines.yml
vim pipelines.yml #加入下列内容 - pipeline.id: scheduler-instance1 path.config: "/usr/share/logstash/config/logstash-scheduler-instance1.conf" docker rm -f logstash
docker run -it --name logstash \
--network=elastic \
-p 5603:5603 \
-v /home/songyan/data/docker/elk/logstash/config:/usr/share/logstash/config \
-v /home/songyan/data/docker/elk/logstash/logs:/usr/share/logstash/logs \
-v /home/songyan/data/docker/elk/logstash/http_ca.crt:/usr/share/logstash/ca.crt \
-v /home/songyan/data/docker/elk/logstash/pipeline:/usr/share/logstash/pipeline \
-d logstash:8.7.0
3,kibana部署
docker pull kibana:8.7.0 docker run -it -d --name kibana --network=container:es kibana:8.7.0 mkdir -p /home/songyan/data/docker/elk/kibana/config cd /home/songyan/data/docker/elk/kibana/config touch kibana.yml vim kibana.yml
#
# ** THIS IS AN AUTO-GENERATED FILE **
#
# Default Kibana configuration for docker target
server.host: "0.0.0.0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "https://172.19.0.2:9200" ]
elasticsearch.ssl.certificateAuthorities: [ "/usr/share/kibana/cert/elasticsearch.crt" ]
elasticsearch.username: "kibana"
elasticsearch.password: "r5k1Xl+_NoqL+I7pAI4J"
monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"
server.basePath: '/kibana'
server.rewriteBasePath: true
server.publicBaseUrl: 'https://10.8.0.102:5601/kibana'
touch node.options vim node.options 写入以下内容 ## Node command line options ## See `node --help` and `node --v8-options` for available options ## Please note you should specify one option per line ## max size of old space in megabytes #--max-old-space-size=4096 ## do not terminate process on unhandled promise rejection --unhandled-rejections=warn docker rm -f kibana
cp /home/songyan/data/docker/elk/es/config/certs/http_ca.crt /home/songyan/data/docker/elk/kibana/http_ca.crt
docker run -it -d --name kibana --network=container:es -v /home/songyan/data/docker/elk/kibana/config:/usr/share/kibana/config -v /home/songyan/data/docker/elk/kibana/http_ca.crt:/usr/share/kibana/cert/elasticsearch.crt kibana:8.7.0
4,docker部署filebeat
docker pull elastic/filebeat:8.7.0 docker run -d --name filebeat \ -v /home/songyan/data/docker/containers/:/home \ -v /home/songyan/data/docker/newelk/filebeat/:/usr/share/filebeat/ \ --user root \ elastic/filebeat:8.7.0 filebeat.yml内容如下: filebeat.inputs: - type: log enabled: true paths: - /home/12c09eda3c592b392f7cc5179b454f5f5de6fcd663d1243e6a59fe24e4e76ca6/*.log tags: ["scheduler-instance6"] fields: server: 192.168.0.104 #自定义字段,用来区分的 fields_under_root: true - type: log enabled: true paths: - /home/33810667db6ee74a27f19c6e06c20ec907a406f6e8e8dc50b903ea8ac8bdf916/*.log tags: ["scheduler-instance7"] fields: server: 192.168.0.104 #自定义字段,用来区分的 fields_under_root: true output.elasticsearch: hosts: ["192.168.0.104:9200"] indices: index: "logstash-scheduler-logs" setup.template.name: "docker" setup.template.pattern: "docker-*" setup.template.enabled: false setup.template.overwrite: true docker restart filebeat
标签:elk,部署,kibana,home,docker,data,logstash From: https://www.cnblogs.com/excellencesy/p/17405685.html