搭建条件
centos-stream-8
[root@localhost ~]# cat /etc/os-release
NAME="CentOS Stream"
| 主机名 | IP地址 | role |
| master-worker-node-1 | 192.168.122.89/24 | control-plain,master,worker |
| master-worker-node-2 | 192.168.122.106/24 | control-plain,master,worker |
| only-worker-node-3 | 192.168.122.132/24 | worker |
| only-worker-node-4 | 192.168.122.182/24 | worker |
kubelet、kubeadm、kubectl版本为1.25.3
runc:containerd
nginx、keepalived
VIP:192.168.122.253/24
基础配置
4个节点都需要进行基础配置
包括:主机名、IP地址、DNS、关闭SWAP、关闭selinux、关闭firewall、开启IP转发、配置yum
源、配置NTP、安装基础软件包、配置主机互信
这个步骤与其他环境部署一样,此处省略
安装容器运行时containerd
# 安装containerd
yum install containerd.io-1.6.10 -y
# 新版本中,已经在/etc/containerd目录下创建了配置文件,但是该配置文件为空文件(container官方指出,通过yum安装的containerd是默认禁用cni的),这里采用命令生成一份配置文件。
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml
[21:55:29 remote-server root ~] # for i in {89,106,132,182};do echo "----->>>> 192.168.122.$i"; ssh 192.168.122.$i containerd config default \> /etc/containerd/config.toml ;done
----->>>> 192.168.122.89
----->>>> 192.168.122.106
----->>>> 192.168.122.132
----->>>> 192.168.122.182
# 针对配置文件中的参数进行一定修改,主要修改cgroup driver(启用systemd驱动)、sandbox_image(配置国内的镜像站)和config_path(配置文件路径)
[root@only-worker-node-4 containerd]# sed -i.bak -e '/SystemdCgroup/s@false@true@g' -e '/sandbox_image/s@registry.k8s.io/pause:3.6@registry.aliyuncs.com/google_containers/pause:3.7@g' -e '/config_path/s@""@"/etc/containerd/cert.d"@g' /etc/containerd/config.toml
[root@only-worker-node-4 containerd]# diff /etc/containerd/config.toml /etc/containerd/config.toml.bak
配置containerd容器镜像加速
配置containerd的镜像加速
[root@master-worker-node-1 ~]# mkdir -p /etc/containerd/cert.d/docker.io
[root@master-worker-node-1 ~]# cat > /etc/containerd/cert.d/docker.io/host.toml << eof
> ["https://4wgtxa6q.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn"]
> capabilities = ['pull']
> eof
# 重启containerd服务,并配置开机自启动
systemctl daemon-reload
systemctl restart containerd
systemctl enable containerd
systemctl status containerd
配置nginx和keepalive实现master节点api-server高可用
在master上执行
#!/bin/bash
# 安装nginx和keepalived实现master节点高可用
yum install -y nginx keepalived
# 要想实现多master节点,其实组建一个高可用的apiserver
# 修改nginx配置文件
cat > /etc/nginx/nginx.conf <<eof
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
# 四层负载均衡,为2台Master apiserver组件提供负载均衡
stream {
log_format main '\$remote_addr \$upstream_addr - [\$time_local] \$status \$upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168.122.89:6443 weight=5 max_fails=3 fail_timeout=30s; # Master1 APISERVER IP:PORT
server 192.168.122.106:6443 weight=5 max_fails=3 fail_timeout=30s; # Master2 APISERVER IP:PORT
}
server {
listen 16443; # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
proxy_pass k8s-apiserver;
}
}
http {
log_format main '\$remote_addr - \$remote_user [\$time_local] "\$request" '
'\$status \$body_bytes_sent "\$http_referer" '
'"\$http_user_agent" "\$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
server {
listen 80 default_server;
server_name _;
location / {
}
}
}
eof
# 修改keepalived配置文件
cat > /etc/keepalived/keepalived.conf <<eof
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/etc/keepalived/check_nginx.sh"
}
vrrp_instance VI_1 {
state $1
interface ens3 # 修改为实际网卡名
virtual_router_id 20 # VRRP 路由 ID实例,每个实例是唯一的
priority $2 # 优先级
advert_int 2 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
# 虚拟IP
virtual_ipaddress {
192.168.122.253/24
}
track_script {
check_nginx
}
}
eof
cat > /etc/keepalived/check_nginx.sh <<eof
#!/bin/bash
counter=\$(ps -ef | grep nginx | grep sbin | egrep -cv \"grep | \$\$\")
if [ \$counter -eq 0 ]; then
systemctl restart nginx
sleep 3
counter=\$(ps -ef | grep sbin/nginx | grep -Ecv \"grep | \$\$\")
if [ \$counter -eq 0 ]; then
systemctl stop keepalived
fi
fi
eof
chmod u+x /etc/keepalived/check_nginx.sh
# 启动nginx和keepalive
systemctl daemon-reload && systemctl enable nginx --now && systemctl enable keepalived
[root@master-worker-node-1 ~]# sh nginx-keepalive.sh MASTER 100
[root@master-worker-node-2 ~]# sh nginx-keepalive.sh BACKUP 80
[root@master-worker-node-2 keepalived]# tcpdump -i ens3 host 224.0.0.18 -nnvvv
dropped privs to tcpdump
tcpdump: listening on ens3, link-type EN10MB (Ethernet), capture size 262144 bytes
23:33:51.162033 IP (tos 0xc0, ttl 255, id 26, offset 0, flags [none], proto VRRP (112), length 40)
192.168.122.89 > 224.0.0.18: vrrp 192.168.122.89 > 224.0.0.18: VRRPv2, Advertisement, vrid 20, prio 100, authtype simple, intvl 2s, length 20, addrs: 192.168.122.253 auth "1111^@^@^@^@"
[root@master-worker-node-1 ~]# ip add show ens3
2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 52:54:00:53:50:5f brd ff:ff:ff:ff:ff:ff
inet 192.168.122.89/24 brd 192.168.122.255 scope global noprefixroute ens3
valid_lft forever preferred_lft forever
inet 192.168.122.253/24 scope global secondary ens3
valid_lft forever preferred_lft forever
inet6 fe80::df8:5c2e:6181:c92b/64 scope link noprefixroute
valid_lft forever preferred_lft forever
安装初始化K8S集群软件包
[root@master-worker-node-1 ~]# yum install kubeadm-1.25.3 kubelet-1.25.3 kubectl-1.25.3 -y
[root@master-worker-node-2 ~]# yum install kubeadm-1.25.3 kubelet-1.25.3 kubectl-1.25.3 -y
[root@only-worker-node-3 ~]# yum install kubeadm-1.25.3 kubelet-1.25.3 kubectl-1.25.3 -y
[root@only-worker-node-4 ~]# yum install kubeadm-1.25.3 kubelet-1.25.3 kubectl-1.25.3 -y
修改k8s集群环境的容器运行时
crictl,是container runtime interface,是kubelet安装过程中依赖安装的cri-tools包。
在k8s环境中,并不一定是采用docker作为容器运行时,只要遵循cri就行。cri-tools就是依照容器运行时的具体接口对容器进行管理。
本例中,采用container作为容器运行时,管理容器当然可以使用container自带的工具,cnctl。
也可以通过crictl调用containerd的接口对容器进行管理。
每个节点都要执行
[root@only-worker-node-4 ~]# cat > /etc/crictl.yaml << EOF
> runtime-endpoint: unix:///run/containerd/containerd.sock
> image-endpoint: unix:///run/containerd/containerd.sock
> timeout: 10
> debug: false
> EOF
[root@only-worker-node-4 ~]# crictl config runtime-endpoint /run/containerd/containerd.sock
初始化k8s集群
使用kubeadm生成一份默认的配置文件
[root@master-worker-node-1 ~]# kubeadm config print init-defaults >> kubeadm-config.yaml
对配置文件进行修改
[root@master-worker-node-1 ~]# sed -i.bak \
-e '/advertiseAddress/s@1.2.3.4@192.168.122.253@g' \
-e '/imageRepository/s@registry.k8s.io@registry.cnhangzhou.aliyuncs.com/google_containers@g' \
-e 's@1.25.0@1.25.3@g' \
-e '/name/s@node@master-worker-node-1@g' \
-e 's@6443@16443@g' \
-e '/serviceSubnet/i \ \ podSubnet:10.244.0.0/16' \
-e '24i controlPlaneEndpoint: 192.168.122.253:16443' /root/kubeadm-config.yaml
cat >> /root/kubeadm-config.yaml <<eof
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
eof
根据kubeadm配置文件提前拉取镜像
[root@master-worker-node-1 ~]# kubeadm config images pull --config kubeadm-config.yaml
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.25.3
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.25.3
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.25.3
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.25.3
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.8
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.4-0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.9.3
集群拉起
[root@master-worker-node-1 ~]# kubeadm init --config kubeadm-config.yaml
[init] Using Kubernetes version: v1.25.3
……………………………………………………
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.122.253:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:5479784e271e4dc059eb751c980e3ddf725875a8519f371f986a7506c07de706
添加第二个master节点
生成加入的token
[root@master-worker-node-1 ~]# kubeadm token create --print-join-command
kubeadm join 192.168.122.253:6443 --token rjrzu1.n3jbpdmkjmz9fjio --discovery-token-ca-cert-hash sha256:5479784e271e4dc059eb751c980e3ddf725875a8519f371f986a7506c07de706
拷贝证书信息
[root@master-worker-node-1 pki]# ssh root@192.168.122.106 mkdir -p /etc/kubernetes/pki/etcd
[root@master-worker-node-1 pki]# scp ca.* root@192.168.122.106:/etc/kubernetes/pki
ca.crt 100% 1099 361.1KB/s 00:00
ca.key 100% 1679 575.1KB/s 00:00
[root@master-worker-node-1 pki]# scp sa.* root@192.168.122.106:/etc/kubernetes/pki
sa.key 100% 1679 471.2KB/s 00:00
sa.pub 100% 451 142.5KB/s 00:00
[root@master-worker-node-1 pki]# scp front-proxy-ca.* root@192.168.122.106:/etc/kubernetes/pki
front-proxy-ca.crt 100% 1115 162.3KB/s 00:00
front-proxy-ca.key 100% 1679 632.3KB/s 00:00
[root@master-worker-node-1 pki]# scp ./etcd/ca.* root@192.168.122.106:/etc/kubernetes/pki/etcd
ca.crt 100% 1086 146.6KB/s 00:00
ca.key 100% 1675 628.9KB/s 00:00
加入集群,并成为master节点
[root@master-worker-node-2 ~]# kubeadm join 192.168.122.253:16443 --token fu4ggk.lt3m35z2n202t6py --discovery-token-ca-cert-hash sha256:fc529c2d5753ac2b951d98882c51d2d0a5e7f975c731d5f49789b2d374b07d56 --control-plane
加入工作节点
[root@only-worker-node-3 ~]# kubeadm join 192.168.122.253:16443 --token trjod3.726itxycytw4z1uy --discovery-token-ca-cert-hash sha256:20a94b48435df84732d62b08e734d6f5ef09f90e4ed1a8d004ab2d75091714ca
[root@only-worker-node-4 ~]# kubeadm join 192.168.122.253:16443 --token 812w68.998dvba7b84xnu1o --discovery-token-ca-cert-hash sha256:20a94b48435df84732d62b08e734d6f5ef09f90e4ed1a8d004ab2d75091714ca
[root@master-worker-node-1 ~]# kubectl label nodes only-worker-node-3 node-role.kubernetes.io/worker=
node/only-worker-node-3 labeled
[root@master-worker-node-1 ~]# kubectl label nodes only-worker-node-4 node-role.kubernetes.io/worker=
node/only-worker-node-4 labeled
[root@master-worker-node-1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master-worker-node-1 NotReady control-plane 3h55m v1.25.3
master-worker-node-2 NotReady control-plane 139m v1.25.3
only-worker-node-3 NotReady worker 132m v1.25.3
only-worker-node-4 NotReady worker 131m v1.25.3
安装calico网络插件
[root@master-worker-node-1 ~]# kubectl apply -f calico-1.yaml
[root@master-worker-node-1 ~]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-6744f6b6d5-k49sk 1/1 Running 0 2m43s
kube-system calico-node-49qt2 1/1 Running 0 2m44s
kube-system calico-node-c28bx 1/1 Running 0 2m44s
kube-system calico-node-q2wpg 1/1 Running 0 2m44s
kube-system calico-node-tpxlp 1/1 Running 0 2m44s
kube-system coredns-7f8cbcb969-cgkjf 1/1 Running 0 4h23m
kube-system coredns-7f8cbcb969-w9ls9 1/1 Running 0 4h23m
kube-system etcd-master-worker-node-1 1/1 Running 5 4h23m
kube-system etcd-master-worker-node-2 1/1 Running 0 167m
kube-system kube-apiserver-master-worker-node-1 1/1 Running 32 4h23m
kube-system kube-apiserver-master-worker-node-2 1/1 Running 1 (167m ago) 167m
kube-system kube-controller-manager-master-worker-node-1 1/1 Running 10 (119s ago) 4h23m
kube-system kube-controller-manager-master-worker-node-2 1/1 Running 0 167m
kube-system kube-proxy-78hcj 1/1 Running 0 161m
kube-system kube-proxy-7gjz9 1/1 Running 0 4h23m
kube-system kube-proxy-c4d2m 1/1 Running 0 167m
kube-system kube-proxy-nqdzd 1/1 Running 0 160m
kube-system kube-scheduler-master-worker-node-1 1/1 Running 8 (116s ago) 4h23m
kube-system kube-scheduler-master-worker-node-2 1/1 Running 0 167m
测试
创建一个pod
[root@master-worker-node-1 pod]# cat test-busybox.yaml
apiVersion: v1
kind: Pod
metadata:
name: test-busybox
labels:
func: test
spec:
containers:
- name: test-busybox
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c","sleep 123456"]
[root@master-worker-node-1 pod]# kubectl apply -f test-busybox.yaml
pod/test-busybox created
[root@master-worker-node-1 pod]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test-busybox 1/1 Running 0 7m56s 10.244.54.4 only-worker-node-4 <none> <none>
[root@master-worker-node-1 pod]# kubectl exec -it test-busybox -- sh
/ # ping -c 2 www.baidu.com
PING www.baidu.com (110.242.68.3): 56 data bytes
64 bytes from 110.242.68.3: seq=0 ttl=49 time=45.571 ms
64 bytes from 110.242.68.3: seq=1 ttl=49 time=45.545 ms
--- www.baidu.com ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 45.545/45.558/45.571 ms
coredns解析正常
/ # nslookup kubernetes.default.svc.cluster.local
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kubernetes.default.svc.cluster.local
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
calico网络跨节点通讯正常
[root@master-worker-node-1 pod]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test-busybox 1/1 Running 0 36m 10.244.54.4 only-worker-node-4 <none> <none>
test-busybox-2 1/1 Running 0 15m 10.244.31.3 only-worker-node-3 <none> <none>
[root@master-worker-node-1 pod]# kubectl exec -it test-busybox -- ping -c 2 10.244.31.3
PING 10.244.31.3 (10.244.31.3): 56 data bytes
64 bytes from 10.244.31.3: seq=0 ttl=62 time=1.672 ms
64 bytes from 10.244.31.3: seq=1 ttl=62 time=1.845 ms
--- 10.244.31.3 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 1.672/1.758/1.845 ms
标签:node,kube,--,containerd,worker,---,master,K8S,root
From: https://www.cnblogs.com/woshinidaye123/p/16926392.html