一、环境
软件版本:
服务 | 版本 |
操作系统 | Ubuntu 22.04.1 LTS |
运行时 | containerd.io 1.6.8 |
kubeasz | 3.4.1 |
k8s | v1.25.3 |
网络插件 | calico v3.23.3 |
节点说明:
类型 | IP | 主机名 | 服务/组件 |
集群控制节点 | 192.168.33.110 | kubeasz | kubeasz |
镜像仓库 | 192.168.33.111 | harbor | myharbor.belkuy.top |
负载均衡 | 192.168.33.118 | ha1 | haproxy、keepalived |
负载均衡 | 192.168.33.119 | ha2 | haproxy、keepalived |
VIP | 192.168.33.120 | ||
k8s master | 192.168.33.121 | u2m1 | etcd |
k8s master | 192.168.33.122 | u2m2 | etcd |
k8s master | 192.168.33.123 | u2m3 | etcd |
k8s node | 192.168.33.131 | u2n1 | |
k8s node | 192.168.33.132 | u2n2 | |
k8s node | 192.168.33.133 | u2n3 |
二、基于 kubeasz 二进制部署 k8s
2.1 部署k8s集群API访问入口的高可用
2.1.1 安装haproxy
利用haproxy实现kubeapi服务的负载均衡
#修改内核参数
cat >> /etc/sysctl.conf <<EOF
net.ipv4.ip_nonlocal_bind = 1
EOF
sysctl -p
#安装haproxy
apt update && apt -y install haproxy
#配置haproxy,添加下面行
cat >> /etc/haproxy/haproxy.cfg <<EOF
listen stats
mode http
bind 0.0.0.0:8888
stats enable
log global
stats uri /status
stats auth admin:fgAgh734dsf0
listen kubernetes-api-6443
bind 192.168.33.120:6443
mode tcp
server master1 192.168.33.121:6443 check inter 3s fall 3 rise 3
server master2 192.168.33.122:6443 check inter 3s fall 3 rise 3
server master3 192.168.33.123:6443 check inter 3s fall 3 rise 3
EOF
systemctl restart haproxy
2.1.2 安装keepalived
安装keepalived实现haproxy的高可用
# 安装keepalived
apt update && apt -y install keepalived
cat >> /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
#指定router_id,#在ha2上为u-ha2
router_id u-ha1
#添加运行健康检查脚本的用户或者组
script_user root
enable_script_security
}
vrrp_script check_haproxy {
#定义检查脚本
script "/etc/keepalived/check_haproxy.sh"
interval 1
weight -30
fall 3
rise 2
timeout 2
}
vrrp_instance VI_1 {
state MASTER #在ha2上为BACKUP
interface ens33
garp_master_delay 10
smtp_alert
virtual_router_id 66 #指定虚拟路由器ID,ha1和ha2此值必须相同
priority 100 #在ha2上为80
advert_int 1
authentication {
auth_type PASS
auth_pass GPmH8Ql8 #指定验证密码,ha1和ha2此值必须相同
}
virtual_ipaddress {
192.168.33.120/24 dev ens33 label ens33:1 #指定VIP,ha1和ha2此值必须相同
}
track_script {
check_haproxy #调用上面定义的脚本
}
}
EOF
cat > /etc/keepalived/check_haproxy.sh <<EOF
#!/bin/bash
LOGFILE="/var/log/keepalived-haproxy-state.log"
date >> \$LOGFILE
counter=\$( ps -C haproxy --no-heading | wc -l)
if [ "\$counter" = "0" ] ; then
echo "failed: check haproxy status!" >> \$LOGFILE
systemctl restart haproxy
sleep 2
counter=\$( ps -C haproxy --no-heading | wc -l)
if [ "\$counter" = "0" ] ; then
echo "failed after 2s: check haproxy status, stop keepalived" >> \$LOGFILE
systemctl stop keepalived
fi
fi
EOF
chmod a+x /etc/keepalived/check_haproxy.sh
systemctl restart keepalived
2.2 kubeasz 安装 k8s
2.2.1 环境准备
在集群控制节点执行:
#安装sshpass命令⽤于同步公钥到各k8s服务器
apt install ansible sshpass
ssh-keygen
cat > /opt/pre.sh <<EOF
#!/bin/bash
# 目标主机列表
IP="
192.168.33.110
192.168.33.121
192.168.33.122
192.168.33.123
192.168.33.131
192.168.33.132
192.168.33.133
"
for node in \${IP};
do
sshpass -p 123456 ssh-copy-id \${node} -o StrictHostKeyChecking=no
echo "\${node} 秘钥copy完成"
ssh \${node} ln -sv /usr/bin/python3 /usr/bin/python
echo "\${node} /usr/bin/python3 软连接创建完成"
done
EOF
# 执行脚本
bash pre.sh
2.2.2 下载 kubeasz 项目及组件
官方github:https://github.com/easzlab/kubeasz
下载最新二进制文件ezdown 到集群控制节点 /opt,安装后在 /etc/kubeasz 目录。
cd /opt
chmod +x ezdown
# 修改 ezdown,将其中的仓库改为自己的 harbor 私有仓库
sed -i "s#http://easzlab.io.local:5000#https://myharbor.belkuy.top#g" ezdown
sed -i "s#easzlab.io.local:5000#myharbor.belkuy.top#g" ezdown
# 下载kubeasz代码、二进制、默认容器镜像
./ezdown -D
# 下载额外容器镜像(cilium,flannel,prometheus等)
./ezdown -X
2.2.3 自定义hosts⽂件
cd /etc/kubeasz/
# 定义新的集群配置文件
./ezctl new u2-k8s
- 指定etcd节点、 master节点、 node节点、 VIP、运⾏时、⽹络组建类型、 service IP与pod IP范围等配置信息。
# 'etcd' cluster should have odd member(s) (1,3,5,...)
[etcd]
192.168.33.121
192.168.33.122
192.168.33.123
# master node(s)
[kube_master]
192.168.33.121
192.168.33.122
192.168.33.123
# work node(s)
[kube_node]
192.168.33.131
192.168.33.132
# [optional] harbor server, a private docker registry
# 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one
[harbor]
#192.168.1.8 NEW_INSTALL=false
# [optional] loadbalance for accessing k8s from outside
[ex_lb]
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
# [optional] ntp server for the cluster
[chrony]
#192.168.1.1
[all:vars]
# --------- Main Variables ---------------
# Secure port for apiservers
SECURE_PORT="6443"
# Cluster container-runtime supported: docker, containerd
# if k8s version >= 1.24, docker is not supported
CONTAINER_RUNTIME="containerd"
# Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn
CLUSTER_NETWORK="calico"
# Service proxy mode of kube-proxy: 'iptables' or 'ipvs'
PROXY_MODE="ipvs"
# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.68.0.0/16"
# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="10.168.0.0/16"
# NodePort Range
NODE_PORT_RANGE="30000-32767"
# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="cluster.local"
# -------- Additional Variables (don't change the default value right now) ---
# Binaries Directory
bin_dir="/usr/local/bin"
# Deploy Directory (kubeasz workspace)
base_dir="/etc/kubeasz"
# Directory for a specific cluster
cluster_dir="{{ base_dir }}/clusters/u2-k8s"
# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"
- config.yml⽂件
############################
# prepare
############################
# 可选离线安装系统软件包 (offline|online)
INSTALL_SOURCE: "online"
# 可选进行系统安全加固 github.com/dev-sec/ansible-collection-hardening
OS_HARDEN: false
############################
# role:deploy
############################
# default: ca will expire in 100 years
# default: certs issued by the ca will expire in 50 years
CA_EXPIRY: "876000h"
CERT_EXPIRY: "438000h"
# kubeconfig 配置参数
CLUSTER_NAME: "cluster1"
CONTEXT_NAME: "context-{{ CLUSTER_NAME }}"
# k8s version
K8S_VER: "1.25.3"
############################
# role:etcd
############################
# 设置不同的wal目录,可以避免磁盘io竞争,提高性能
ETCD_DATA_DIR: "/var/lib/etcd"
ETCD_WAL_DIR: ""
############################
# role:runtime [containerd,docker]
############################
# ------------------------------------------- containerd
# [.]启用容器仓库镜像
ENABLE_MIRROR_REGISTRY: true
# [containerd]基础容器镜像
SANDBOX_IMAGE: "myharbor.belkuy.top/easzlab/pause:3.8"
# [containerd]容器持久化存储目录
CONTAINERD_STORAGE_DIR: "/var/lib/containerd"
# ------------------------------------------- docker
# [docker]容器存储目录
DOCKER_STORAGE_DIR: "/var/lib/docker"
# [docker]开启Restful API
ENABLE_REMOTE_API: false
# [docker]信任的HTTP仓库
INSECURE_REG: '["https://myharbor.belkuy.top"]'
############################
# role:kube-master
############################
# k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名)
MASTER_CERT_HOSTS:
- "192.168.33.120"
- "u2k8s.belkuy.top"
#- "www.test.com"
# node 节点上 pod 网段掩码长度(决定每个节点最多能分配的pod ip地址)
# 如果flannel 使用 --kube-subnet-mgr 参数,那么它将读取该设置为每个节点分配pod网段
# https://github.com/coreos/flannel/issues/847
NODE_CIDR_LEN: 24
############################
# role:kube-node
############################
# Kubelet 根目录
KUBELET_ROOT_DIR: "/var/lib/kubelet"
# node节点最大pod 数
MAX_PODS: 110
# 配置为kube组件(kubelet,kube-proxy,dockerd等)预留的资源量
# 数值设置详见templates/kubelet-config.yaml.j2
KUBE_RESERVED_ENABLED: "no"
# k8s 官方不建议草率开启 system-reserved, 除非你基于长期监控,了解系统的资源占用状况;
# 并且随着系统运行时间,需要适当增加资源预留,数值设置详见templates/kubelet-config.yaml.j2
# 系统预留设置基于 4c/8g 虚机,最小化安装系统服务,如果使用高性能物理机可以适当增加预留
# 另外,集群安装时候apiserver等资源占用会短时较大,建议至少预留1g内存
SYS_RESERVED_ENABLED: "no"
############################
# role:network [flannel,calico,cilium,kube-ovn,kube-router]
############################
# ------------------------------------------- flannel
# [flannel]设置flannel 后端"host-gw","vxlan"等
FLANNEL_BACKEND: "vxlan"
DIRECT_ROUTING: false
# [flannel]
flannel_ver: "v0.19.2"
# ------------------------------------------- calico
# [calico] IPIP隧道模式可选项有: [Always, CrossSubnet, Never],跨子网可以配置为Always与CrossSubnet(公有云建议使用always比较省事,其他的话需要修改各自公有云的网络配置,具体可以参考各个公有云说明)
# 其次CrossSubnet为隧道+BGP路由混合模式可以提升网络性能,同子网配置为Never即可.
CALICO_IPV4POOL_IPIP: "Always"
# [calico]设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现
IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube_master'][0] }}"
# [calico]设置calico 网络 backend: brid, vxlan, none
CALICO_NETWORKING_BACKEND: "brid"
# [calico]设置calico 是否使用route reflectors
# 如果集群规模超过50个节点,建议启用该特性
CALICO_RR_ENABLED: false
# CALICO_RR_NODES 配置route reflectors的节点,如果未设置默认使用集群master节点
# CALICO_RR_NODES: ["192.168.1.1", "192.168.1.2"]
CALICO_RR_NODES: []
# [calico]更新支持calico 版本: ["3.19", "3.23"]
calico_ver: "v3.23.3"
# [calico]calico 主版本
calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"
# ------------------------------------------- cilium
# [cilium]镜像版本
cilium_ver: "1.12.2"
cilium_connectivity_check: true
cilium_hubble_enabled: false
cilium_hubble_ui_enabled: false
# ------------------------------------------- kube-ovn
# [kube-ovn]选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点
OVN_DB_NODE: "{{ groups['kube_master'][0] }}"
# [kube-ovn]离线镜像tar包
kube_ovn_ver: "v1.5.3"
# ------------------------------------------- kube-router
# [kube-router]公有云上存在限制,一般需要始终开启 ipinip;自有环境可以设置为 "subnet"
OVERLAY_TYPE: "full"
# [kube-router]NetworkPolicy 支持开关
FIREWALL_ENABLE: true
# [kube-router]kube-router 镜像版本
kube_router_ver: "v0.3.1"
busybox_ver: "1.28.4"
############################
# role:cluster-addon
############################
# coredns 自动安装
dns_install: "no"
corednsVer: "1.9.3"
ENABLE_LOCAL_DNS_CACHE: false
dnsNodeCacheVer: "1.22.8"
# 设置 local dns cache 地址
LOCAL_DNS_CACHE: "169.254.20.10"
# metric server 自动安装
metricsserver_install: "no"
metricsVer: "v0.5.2"
# dashboard 自动安装
dashboard_install: "no"
dashboardVer: "v2.6.1"
dashboardMetricsScraperVer: "v1.0.8"
# prometheus 自动安装
prom_install: "no"
prom_namespace: "monitor"
prom_chart_ver: "39.11.0"
# nfs-provisioner 自动安装
nfs_provisioner_install: "no"
nfs_provisioner_namespace: "kube-system"
nfs_provisioner_ver: "v4.0.2"
nfs_storage_class: "managed-nfs-storage"
nfs_server: "192.168.1.10"
nfs_path: "/data/nfs"
# network-check 自动安装
network_check_enabled: false
network_check_schedule: "*/5 * * * *"
############################
# role:harbor
############################
# harbor version,完整版本号
HARBOR_VER: "v2.1.3"
HARBOR_DOMAIN: "harbor.easzlab.io.local"
HARBOR_TLS_PORT: 8443
# if set 'false', you need to put certs named harbor.pem and harbor-key.pem in directory 'down'
HARBOR_SELF_SIGNED_CERT: true
# install extra component
HARBOR_WITH_NOTARY: false
HARBOR_WITH_TRIVY: false
HARBOR_WITH_CLAIR: false
HARBOR_WITH_CHARTMUSEUM: true
2.2.4 部署k8s集群
通过 ansible 方式初始化环境及部署 k8s ⾼可⽤集群。
cd /etc/kubeasz
# 准备CA和基础系统设置
./ezctl setup u2-k8s 01
# 部署etcd集群
./ezctl setup u2-k8s 02
# 部署运⾏时
./ezctl setup u2-k8s 03
# 部署 master 节点
./ezctl setup u2-k8s 04
# 部署 node 节点
./ezctl setup u2-k8s 05
# 部署网络组件
# 修改配置文件中的镜像地址
sed -i "s#easzlab.io.local:5000#myharbor.belkuy.top#g" \
roles/calico/templates/calico-v3.23.yaml.j2
./ezctl setup u2-k8s 06
# 添加 node 节点
./ezctl setup u2-k8s 192.168.33.133
2.2.5 部署 DNS 模块
部署 coredns,部署清单⽂件可修改 k8s 官方 github 中的模板。
https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns/coredns
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 3
revisionHistoryLimit: 10
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
priorityClassName: system-cluster-critical
serviceAccountName: coredns
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
containers:
- name: coredns
image: myharbor.belkuy.top/coredns/coredns:1.9.3
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 250Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.68.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
2.3 测试&验证
2.3.1 验证集群
在控制节点执行:
# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.33.121 Ready,SchedulingDisabled master 7h55m v1.25.3
192.168.33.122 Ready,SchedulingDisabled master 7h55m v1.25.3
192.168.33.123 Ready,SchedulingDisabled master 7h55m v1.25.3
192.168.33.131 Ready node 7h50m v1.25.3
192.168.33.132 Ready node 7h50m v1.25.3
192.168.33.133 Ready node 6h51m v1.25.3
2.3.2 验证网络
在 k8s 任意节点执行,验证 calico 状态:
# calicoctl node status
Calico process is running.
IPv4 BGP status
+----------------+-------------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+----------------+-------------------+-------+----------+-------------+
| 192.168.33.122 | node-to-node mesh | up | 05:59:05 | Established |
| 192.168.33.123 | node-to-node mesh | up | 05:59:04 | Established |
| 192.168.33.131 | node-to-node mesh | up | 05:59:05 | Established |
| 192.168.33.132 | node-to-node mesh | up | 05:59:10 | Established |
| 192.168.33.133 | node-to-node mesh | up | 06:45:50 | Established |
+----------------+-------------------+-------+----------+-------------+
IPv6 BGP status
No IPv6 peers found.
在控制节点执行,验证 pod 内外网络域名解析,网络访问状态:
root@kubeasz-110:/# kubectl run busybox -it --image=busybox:1.28 --image-pull-policy='IfNotPresent' --restart=Never --rm busybox -- sh
If you don't see a command prompt, try pressing enter.
/ # nslookup kubernetes.default
Server: 10.68.0.2
Address 1: 10.68.0.2 kube-dns.kube-system.svc.cluster.local
Name: kubernetes.default
Address 1: 10.68.0.1 kubernetes.default.svc.cluster.local
/ # ping kubernetes.default -c 2
PING kubernetes.default (10.68.0.1): 56 data bytes
64 bytes from 10.68.0.1: seq=0 ttl=64 time=0.318 ms
64 bytes from 10.68.0.1: seq=1 ttl=64 time=0.087 ms
--- kubernetes.default ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.087/0.202/0.318 ms
/ # nslookup www.baidu.com
Server: 10.68.0.2
Address 1: 10.68.0.2 kube-dns.kube-system.svc.cluster.local
Name: www.baidu.com
Address 1: 14.215.177.39
Address 2: 14.215.177.38
/ # ping www.baidu.com -c 2
PING www.baidu.com (14.215.177.39): 56 data bytes
64 bytes from 14.215.177.39: seq=0 ttl=127 time=7.411 ms
64 bytes from 14.215.177.39: seq=1 ttl=127 time=7.337 ms
--- www.baidu.com ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 7.337/7.374/7.411 ms
/ #
2.4 etcd 备份
2.4.1 etcdctl 方式
# 在u2m1节点上备份
ETCDCTL_API=3 etcdctl snapshot save etcd_snap_save.db \
--endpoints=https://192.168.33.121:2379 \
--cacert=/etc/kubernetes/ssl/ca.pem \
--cert=/etc/kubernetes/ssl/etcd.pem \
--key=/etc/kubernetes/ssl/etcd-key.pem
# 在每个 etcd 节点上都执行
# 在 u2m1 上执行
## 移除配置和删除数据
mv /etc/kubernetes/manifests/etcd.yaml /opt/
rm -rf /var/lib/etcd/
ETCDCTL_API=3 etcdctl snapshot restore snap-save.db \
--cacert=/etc/kubernetes/ssl/ca.pem \
--cert=/etc/kubernetes/ssl/etcd.pem \
--key=/etc/kubernetes/ssl/etcd-key.pem \
--data-dir=/var/lib/etcd/ \
--endpoints=https://127.0.0.1:2379,https://192.168.33.181:2379 \
--initial-cluster=u1m2=https://192.168.33.122:2380,u1m3=https://192.168.33.123:2380,u2m1=https://192.168.33.121:2380 \
--name=u1m1 \
--initial-advertise-peer-urls=https://192.168.33.121:2380
####配置还原
mv /opt/etcd.yaml /etc/kubernetes/manifests/
2.4.2 kubeasz 方式
cd /opt/kubeasz标签:node,kube,kubernetes,kubeasz,192.168,二进制,io,k8s From: https://blog.51cto.com/belbert/5890450
# 生成的备份文件目录为 /etc/kubeasz/clusters/u2-k8s/backup
./ezctl backup u2-k8s
./ezctl restore u2-k8s