1. 首先查看etcd集群节点信息
[root@host105 cert]# ETCDCTL_API=3 etcdctl --cacert=/opt/cert//etcd.pem --cert=/opt/cert//etcd.pem --key=/opt/cert//etcd-key.pem --endpoints="https://192.168.0.105:2379,https://192.168.0.106:2379,https://192.168.0.189:2379" endpoint health
https://192.168.0.106:2379 is healthy: successfully committed proposal: took = 13.787391ms
https://192.168.0.105:2379 is healthy: successfully committed proposal: took = 13.955548ms
https://192.168.0.189:2379 is healthy: successfully committed proposal: took = 14.370631ms 检查节点列表 [root@host106 ~]# ETCDCTL_API=3 etcdctl --cacert=/opt/cert//etcd.pem --cert=/opt/cert//etcd.pem --key=/opt/cert//etcd-key.pem --endpoints="https://192.168.0.105:2379,https://192.168.0.106:2379,https://192.168.0.189:2379" endpoint status --write-out='table' +----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ | ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS | +----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ | https://192.168.0.105:2379 | a1521095cffde44b | 3.5.4 | 20 kB | false | false | 5 | 16 | 16 | | | https://192.168.0.106:2379 | eb83c838a536671f | 3.5.4 | 20 kB | true | false | 5 | 16 | 16 | | | https://192.168.0.189:2379 | ce9e5937db5e0599 | 3.5.4 | 20 kB | false | false | 5 | 16 | 16 | | +----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
2. 在leader节点备份etcd数据
[root@host106 ~]# etcdctl --cacert=/opt/cert//etcd.pem --cert=/opt/cert//etcd.pem --key=/opt/cert//etcd-key.pem --endpoints="https://192.168.0.106:2379" snapshot save etcd02-bak-20230526.db {"level":"info","ts":"2023-05-26T16:32:39.442+0800","caller":"snapshot/v3_snapshot.go:65","msg":"created temporary db file","path":"etcd02-bak-20230526.db.part"} {"level":"info","ts":"2023-05-26T16:32:39.448+0800","logger":"client","caller":"v3/maintenance.go:211","msg":"opened snapshot stream; downloading"} {"level":"info","ts":"2023-05-26T16:32:39.448+0800","caller":"snapshot/v3_snapshot.go:73","msg":"fetching snapshot","endpoint":"https://192.168.0.106:2379"} {"level":"info","ts":"2023-05-26T16:32:39.477+0800","logger":"client","caller":"v3/maintenance.go:219","msg":"completed snapshot read; closing"} {"level":"info","ts":"2023-05-26T16:32:39.498+0800","caller":"snapshot/v3_snapshot.go:88","msg":"fetched snapshot","endpoint":"https://192.168.0.106:2379","size":"20 kB","took":"now"} {"level":"info","ts":"2023-05-26T16:32:39.498+0800","caller":"snapshot/v3_snapshot.go:97","msg":"saved","path":"etcd02-bak-20230526.db"} Snapshot saved at etcd02-bak-20230526.db
3. 停止etcd节点
systemctl stop etcd.service
4. 三个节点上分别都删除数据
rm /opt/etcd-v3.5.4/data/* -rf
5. 复制备份到每个节点
scp etcd02-bak-20230526.db root@105:/root
6. 恢复etcd数据
节点2恢复命令如下:
ETCDCTL_API=3 etcdctl snapshot restore /root/etcd02-bak-20230526.db \ --name etcd2 \ --initial-cluster="etcd1=https://192.168.0.105:2380,etcd2=https://192.168.0.106:2380,etcd3=https://192.168.0.189:2380" \ --initial-cluster-token=etcd-cluster \ --initial-advertise-peer-urls=https://192.168.0.106:2380 \ --data-dir=/opt/etcd-v3.5.4/data/
运行结果如下:
[root@host106 data]# ETCDCTL_API=3 etcdctl snapshot restore /root/etcd02-bak-20230526.db \ > --name etcd2 \ > --initial-cluster="etcd1=https://192.168.0.105:2380,etcd2=https://192.168.0.106:2380,etcd3=https://192.168.0.189:2380" \ > --initial-cluster-token=etcd-cluster \ > --initial-advertise-peer-urls=https://192.168.0.106:2380 \ > --data-dir=/opt/etcd-v3.5.4/data/ Deprecated: Use `etcdutl snapshot restore` instead. 2023-05-29T08:53:54+08:00 info snapshot/v3_snapshot.go:248 restoring snapshot {"path": "/root/etcd02-bak-20230526.db", "wal-dir": "/opt/etcd-v3.5.4/data/member/wal", "data-dir": "/opt/etcd-v3.5.4/data/", "snap-dir": "/opt/etcd-v3.5.4/data/member/snap", "stack": "go.etcd.io/etcd/etcdutl/v3/snapshot.(*v3Manager).Restore\n\t/go/src/go.etcd.io/etcd/release/etcd/etcdutl/snapshot/v3_snapshot.go:254\ngo.etcd.io/etcd/etcdutl/v3/etcdutl.SnapshotRestoreCommandFunc\n\t/go/src/go.etcd.io/etcd/release/etcd/etcdutl/etcdutl/snapshot_command.go:147\ngo.etcd.io/etcd/etcdctl/v3/ctlv3/command.snapshotRestoreCommandFunc\n\t/go/src/go.etcd.io/etcd/release/etcd/etcdctl/ctlv3/command/snapshot_command.go:129\ngithub.com/spf13/cobra.(*Command).execute\n\t/go/pkg/mod/github.com/spf13/[email protected]/command.go:856\ngithub.com/spf13/cobra.(*Command).ExecuteC\n\t/go/pkg/mod/github.com/spf13/[email protected]/command.go:960\ngithub.com/spf13/cobra.(*Command).Execute\n\t/go/pkg/mod/github.com/spf13/[email protected]/command.go:897\ngo.etcd.io/etcd/etcdctl/v3/ctlv3.Start\n\t/go/src/go.etcd.io/etcd/release/etcd/etcdctl/ctlv3/ctl.go:107\ngo.etcd.io/etcd/etcdctl/v3/ctlv3.MustStart\n\t/go/src/go.etcd.io/etcd/release/etcd/etcdctl/ctlv3/ctl.go:111\nmain.main\n\t/go/src/go.etcd.io/etcd/release/etcd/etcdctl/main.go:59\nruntime.main\n\t/go/gos/go1.16.15/src/runtime/proc.go:225"} 2023-05-29T08:53:54+08:00 info membership/store.go:141 Trimming membership information from the backend... 2023-05-29T08:53:55+08:00 info membership/cluster.go:421 added member {"cluster-id": "5b962c26198c3782", "local-member-id": "0", "added-peer-id": "a1521095cffde44b", "added-peer-peer-urls": ["https://192.168.0.105:2380"]} 2023-05-29T08:53:55+08:00 info membership/cluster.go:421 added member {"cluster-id": "5b962c26198c3782", "local-member-id": "0", "added-peer-id": "ce9e5937db5e0599", "added-peer-peer-urls": ["https://192.168.0.189:2380"]} 2023-05-29T08:53:55+08:00 info membership/cluster.go:421 added member {"cluster-id": "5b962c26198c3782", "local-member-id": "0", "added-peer-id": "eb83c838a536671f", "added-peer-peer-urls": ["https://192.168.0.106:2380"]} 2023-05-29T08:53:55+08:00 info snapshot/v3_snapshot.go:269 restored snapshot {"path": "/root/etcd02-bak-20230526.db", "wal-dir": "/opt/etcd-v3.5.4/data/member/wal", "data-dir": "/opt/etcd-v3.5.4/data/", "snap-dir": "/opt/etcd-v3.5.4/data/member/snap"}
节点1恢复命令如下:
ETCDCTL_API=3 etcdctl snapshot restore /root/etcd02-bak-20230526.db \ --name etcd1 \
# 这里一定要改
--initial-cluster="etcd1=https://192.168.0.105:2380,etcd2=https://192.168.0.106:2380,etcd3=https://192.168.0.189:2380" \ --initial-cluster-token=etcd-cluster \ --initial-advertise-peer-urls=https://192.168.0.105:2380 \ --data-dir=/opt/etcd-v3.5.4/data/
节点3恢复命令如下:
ETCDCTL_API=3 etcdctl snapshot restore /root/etcd02-bak-20230526.db \ --name etcd3 \ # 这里一定要改 --initial-cluster="etcd1=https://192.168.0.105:2380,etcd2=https://192.168.0.106:2380,etcd3=https://192.168.0.189:2380" \ --initial-cluster-token=etcd-cluster \ --initial-advertise-peer-urls=https://192.168.0.189:2380 \
# 这里一定要改
--data-dir=/opt/etcd-v3.5.4/data/
7. 数据恢复后查看etcd数据目录结构
[root@host106 data]# tree ./member/ ./member/ ├── snap │ ├── 0000000000000001-0000000000000003.snap │ └── db └── wal └── 0000000000000000-0000000000000000.wal 2 directories, 3 files
8. 启动etcd集群
[root@host106 data]# systemctl start etcd
[root@host105 ~]# systemctl start etcd
root@mytest:~# systemctl start etcd
9. 节点健康检查
[root@host106 data]# ETCDCTL_API=3 etcdctl --cacert=/opt/cert//etcd.pem --cert=/opt/cert//etcd.pem --key=/opt/cert//etcd-key.pem --endpoints="https://192.168.0.105:2379,https://192.168.0.106:2379,https://192.168.0.189:2379" endpoint health https://192.168.0.106:2379 is healthy: successfully committed proposal: took = 12.54057ms https://192.168.0.189:2379 is healthy: successfully committed proposal: took = 13.779164ms https://192.168.0.105:2379 is healthy: successfully committed proposal: took = 52.266177ms
标签:https,--,恢复,备份,192.168,snapshot,etcd,go From: https://www.cnblogs.com/mjxi/p/17439452.html