- 底层走docker
底层走containerd容器
操作系统:openEuler-24.03
主机名:cat /etc/hosts
主机3台
192.168.80.54 lyc-80-54 master
192.168.80.55 lyc-80-55 master
192.168.80.56 lyc-80-56 master
192.168.80.56 lyc-80-57 worker
192.168.80.56 lyc-80-58 worker
系统关闭selinux/firewalld清空iptables规则
做好 系统的 ssh 无密钥认证
- kubeadmin HA + EXTERNAL ETCD 部署架构
一、系统初始化
1、关闭防火墙-5台
systemctl stop firewalld
systemctl disable firewalld
systemctl status firewalld
2、关闭selinux-5台
vim /etc/selinux/config
将SELINUX=enforcing改为SELINUX=disabled
3、修改网络IP-5台
vim /etc/sysconfig/network-scripts/ifcfg-ens33
4、修改hosts文件-5台
vim /etc/hosts
5、修改欧拉24.03源-5台
修改openeuler 22.03x64 的源
cd /etc/yum.repos.d/
cp openEuler.repo openEuler.repo.bak
vim openEuler.repo
-------------
[OS]
name=OS
baseurl=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/OS/$basearch/
enabled=1
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/OS/$basearch/RPM-GPG-KEY-openEuler
[everything]
name=everything
baseurl=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/everything/$basearch/
enabled=1
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/everything/$basearch/RPM-GPG-KEY-openEuler
[EPOL]
name=EPOL
baseurl=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/EPOL/main/$basearch/
enabled=1
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/OS/$basearch/RPM-GPG-KEY-openEuler
[debuginfo]
name=debuginfo
baseurl=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/debuginfo/$basearch/
enabled=1
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/debuginfo/$basearch/RPM-GPG-KEY-openEuler
[source]
name=source
baseurl=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/source/
enabled=1
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/source/RPM-GPG-KEY-openEuler
[update]
name=update
baseurl=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/update/$basearch/
enabled=1
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/OS/$basearch/RPM-GPG-KEY-openEuler
[update-source]
name=update-source
baseurl=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/update/source/
enabled=1
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/openeuler/openEuler-22.03-LTS-SP2/source/RPM-GPG-KEY-openEuler
---------------
yum clean all
yum makecache
6、安装Epel(可以不做)
1、备份配置文件:
cp -a /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel.repo.backup
如果不存在文件epel.repo
cd /etc/yum.repos.d/
touch epel.repo
2、修改epel.repo文件,取消baseurl开头的行的注释,并增加mirrorlist开头的行的注释。
将文件中的
http://download.fedoraproject.org/pub替换成https://mirrors.huaweicloud.com,
可以参考如下命令:
sed -i "s/#baseurl/baseurl/g" /etc/yum.repos.d/epel.repo
sed -i "s/metalink/#metalink/g" /etc/yum.repos.d/epel.repo
sed -i "s@https?://download.fedoraproject.org/pub@https://mirrors.huaweicloud.com@g" /etc/yum.repos.d/epel.repo
3、执行以下命令更新:
yum update
7、时间同步-5台
sudo yum install chrony
vim /etc/chrony.conf
-----------------------
添加同步服务器
server ntp1.aliyun.com iburst
server ntp.aliyun.com iburst
stratumweight 0
driftfile /var/lib/chrony/drift
rtcsync
makestep 10 3
bindcmdaddress 127.0.0.1
bindcmdaddress ::1
keyfile /etc/chrony.keys
commandkey 1
generatecommandkey
logchange 0.5
logdir /var/log/chrony
----------------------
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' > /etc/timezone
chkconfig chronyd on
启动系统服务chronyd,并设为开机自启
systemctl restart chronyd #重启校时服务
systemctl enable chronyd #开机自启
输入date 查看时间
timedatectl ------查看2个yes,表示同步成功
8、关闭swap-5台
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
查看关闭状态
cat /etc/fstab
查看swap服务
yum install htop
htop
9、开放网络规则-5台
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -F
iptables -L -n
查看规则
iptables -L
-------------------
Chain INPUT (policy ACCEPT)
10、安装依赖包-5台
yum -y install wget jq psmisc vim net-tools nfs-utils socat telnet device-mapper-persistent-data lvm2 git network-scripts tar curl -y
yum install -y conntrack ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git
11、开启ipvs转发-5台
yum install ipvsadm ipset sysstat conntrack libseccomp -y
--------------------------
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
---------------------------
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack
-----------------------------------
重启服务
systemctl restart systemd-modules-load.service
-----------------------------------------------------
查看到以下内容
lsmod | grep -e ip_vs -e nf_conntrack
-------------------------------
ip_vs_sh 16384 0
ip_vs_wrr 16384 0
ip_vs_rr 16384 0
ip_vs 180224 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 176128 1 ip_vs
nf_defrag_ipv6 24576 2 nf_conntrack,ip_vs
nf_defrag_ipv4 16384 1 nf_conntrack
libcrc32c 16384 3 nf_conntrack,xfs,ip_vs
-----------------------------
cat >> /etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
systemctl restart systemd-modules-load.service
lsmod | grep -e ip_vs -e nf_conntrack
执行以下命令,到5台机器上,不然初始化k8s集群的时候会报错
echo 'net.ipv4.ip_forward=1' | sudo tee -a /etc/sysctl.conf
sudo sysctl -p
sudo service network restart
12、句柄数最大-5台
ulimit -SHn 65535
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd
EOF
查看修改结果
ulimit -a
13、资源实现最大-5台
vim /etc/default/grub
添加 numa=off
查看
sudo yum install numactl
numactl --show
14、系统优化-5台
cat > /etc/sysctl.d/k8s_better.conf << EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
modprobe br_netfilter
lsmod |grep conntrack
modprobe ip_conntrack
sysctl -p /etc/sysctl.d/k8s_better.conf
-----------------------------------
#确保每台机器的uuid不一致,如果是克隆机器,
修改网卡配置文件删除uuid那一行
cat /sys/class/dmi/id/product_uuid
15、执行shh免密-5台
更改初始密码
echo root | passwd root --stdin //改集群密码为root
敲3下回车,生成秘钥
ssh-keygen -t rsa
ssh-copy-id lyc-80-54
ssh-copy-id lyc-80-55
ssh-copy-id lyc-80-56
ssh-copy-id lyc-80-57
ssh-copy-id lyc-80-58
-----------以上命令,每台机器都需要执行
也可以使用免密脚本
./fgssh -user root -hosts "lyc-80-54 lyc-80-55 lyc-80-56 lyc-80-57 lyc-80-58" -advanced -exverify -confirm
【到此,初始化已完毕,请关闭3台机器,进行快照备份!!!】
二、下载工具准备
下载以下安装包,有需求的可联系我进行分享
1)下载kubernetes1.28.+的二进制包
github二进制包下载地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md
wget https://dl.k8s.io/v1.28.1/kubernetes-server-linux-amd64.tar.gz
2)下载etcdctl二进制包
github二进制包下载地址:https://github.com/etcd-io/etcd/releases
wget https://github.com/etcd-io/etcd/releases/download/v3.5.5/etcd-v3.5.5-linux-amd64.tar.gz
3)docker-ce二进制包下载地址
二进制包下载地址:https://download.docker.com/linux/static/stable/x86_64/
这里需要下载20.10.+版本
wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.24.tgz
4)下载cri-docker
二进制包下载地址:https://github.com/Mirantis/cri-dockerd/releases/
wget https://ghproxy.com/https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.6/cri-dockerd-0.2.6.amd64.tgz
5)containerd二进制包下载
github下载地址:https://github.com/containerd/containerd/releases
下载时下载带cni插件的二进制包
wget https://github.com/containerd/containerd/releases/download/v1.6.6/cri-containerd-cni-1.6.6-linux-amd64.tar.gz
6)下载cfssl二进制包
github二进制包下载地址:https://github.com/cloudflare/cfssl/releases
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl-certinfo_1.6.1_linux_amd64
7)cni插件下载
github下载地址:https://github.com/containernetworking/plugins/releases
wget https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz
8)crictl客户端二进制下载
github下载:https://github.com/kubernetes-sigs/cri-tools/releases
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.2/crictl-v1.24.2-linux-amd64.tar.gz
三、安装docker【5台全安装】
1、二进制安装docker-20.10.17-5台
上传二进制包docker-20.10.24.tar
分发到55-58机器上
scp docker-20.10.24.tar lyc-80-55:/root
scp docker-20.10.24.tar lyc-80-56:/root
scp docker-20.10.24.tar lyc-80-57:/root
scp docker-20.10.24.tar lyc-80-58:/root
使用集群命令,控制5台机器执行
解压
tar xf docker-20.10.24.tar
将解压后docker下的文件进行mv
mv docker/* /usr/bin/
2、准备docker的service文件-5台
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
EOF
3、准备docker的socket文件-5台
cat > /etc/systemd/system/docker.socket <<EOF
[Unit]
Description=Docker Socket for the API
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
-------------
#创建docker组
groupadd docker
#启动docker
systemctl enable --now docker.socket && systemctl enable --now docker.service
4、验证docker安装
docker -v
5、创建containerd的service文件-5台
-----------------------------------
cat >/etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
--------------------------
设置开机启动
systemctl enable --now containerd.service
6、配置镜像加速-5台
cat >/etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://docker.1panel.live",
"https://docker.m.daocloud.io",
"https://huecker.io"
],
"insecure-registries":["reg.xiaochun.test"],
"max-concurrent-downloads": 10,
"log-driver": "json-file",
"log-level": "warn",
"log-opts": {
"max-size": "10m",
"max-file": "3"
},
"data-root": "/var/lib/docker"
}
EOF
----------------------
设置开机启动
systemctl enable docker
systemctl restart docker
systemctl status docker
下载镜像测试下
docker pull nginx
7、安装cri-dockerd 垫片-5台
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.1/cri-dockerd-0.3.1.amd64.tgz
解压cri-docker
tar -zxvf cri-dockerd-0.3.1.amd64.tgz
cp cri-dockerd/cri-dockerd /usr/bin/
chmod +x /usr/bin/cri-dockerd
写入启动配置文件
cat > /usr/lib/systemd/system/cri-docker.service <<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
写入socket配置文件
------------------
cat > /usr/lib/systemd/system/cri-docker.socket <<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
-------------------
# 进行启动cri-docker
systemctl daemon-reload
systemctl enable cri-docker --now
四、负载均衡高可用安装
安装keepalived 与 haproxy
1、配置keepalived-3台
master机器: 54 55 56
# 安装 keepalived haproxy
yum -y install keepalived haproxy
cd /etc/keepalived/
# 注意 这里的配置文件要改下名字
mv keepalived.conf.sample keepalived.conf
chmod 644 keepalived.conf
cp -p keepalived.conf keepalived.conf.bak
配置 lyc-80-54 master机器
------------------------
cat >/etc/keepalived/keepalived.conf<<EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33
mcast_src_ip 192.168.80.54
virtual_router_id 54
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.80.200
}
track_script {
chk_apiserver
}
}
EOF
-----------------
启动服务,并设置开机启动
service keepalived start
systemctl enable keepalived
配置 lyc-80-55 master机器
------------------------
cat >/etc/keepalived/keepalived.conf<<EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33
mcast_src_ip 192.168.80.55
virtual_router_id 54
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.80.200
}
track_script {
chk_apiserver
}
}
EOF
-----------------
启动服务,并设置开机启动
service keepalived start
systemctl enable keepalived
配置 lyc-80-56 master机器
------------------------
cat >/etc/keepalived/keepalived.conf<<EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33
mcast_src_ip 192.168.80.56
virtual_router_id 54
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.80.200
}
track_script {
chk_apiserver
}
}
EOF
-----------------
启动服务,并设置开机启动
service keepalived start
systemctl enable keepalived
2、配置健康检查脚本-3台
###为54、55、56 3台master机器配置健康检查脚本
--------
cat > /etc/keepalived/check_apiserver.sh <<"EOF"
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
---------
查看负载IP
ip addr
3、测试脚本-3台
测试 keepalived的
ping 192.168.80.200 //////此时5台机器 都可以ping通负载IP
停掉lyc-80-54的 keepalived 负载 VIP 会票移到lyc-80-55 \ lyc-80-56上面
再次启动lyc-80-54的keepalived vip 会自动切回来
停掉负载IP机器的keepalived服务,再次查看负载IP,会自动飘逸到 55 / 56 这2台机器中的1台
service keepalived stop
继续停掉负载IP lyc-80-55机器的keepalived服务,再次查看负载IP,会自动飘逸到56这台机器
service keepalived stop
在lyc80-54机器上再次开启keepalived服务
service keepalived start
查看负载IP 地址有没有切回来
ip addr
ping 192.168.80.200
4、配置haproxy-3台
修改 /etc/sysctl.conf配置文件【3台master机器都要配置下】
vim /etc/sysctl.conf
---
net.ipv4.ip_nonlocal_bind=1
---
刷新配置
sysctl -p
3个master机器全部进行以下配置,并启动
cd /etc/haproxy/
mv haproxy.cfg haproxy.cfg.bak
--------------------------------------
cat > /etc/haproxy/haproxy.cfg << EOF
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
frontend k8s-master
bind 192.168.80.200:16443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-masters
backend k8s-masters
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server lyc-80-54 192.168.80.54:6443 check
server lyc-80-55 192.168.80.55:6443 check
server lyc-80-56 192.168.80.56:6443 check
EOF
-----------------------------------------
service haproxy start
systemctl enable haproxy
五、安装K8s
1、配置k8syum
源-5台
cat > /etc/yum.repos.d/k8s.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
-------------------------------
yum makecache
2、安装k8s1.28.1-5台
安装最新的版本
yum -y install kubeadm kubelet kubectl
3、设置kubelet-5台
vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
---------------------------------------------
核对下刚添加的配置
cat /etc/sysconfig/kubelet
设置kubelet为开机自启动即可,由于没有生成配置文件,集群初始化后自动启动
systemctl enable kubelet
systemctl enable kubelet.service
4、准备docker镜像-5台
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers --cri-socket=unix://var/run/cri-dockerd.sock
查看下载好的镜像
docker images
要求重点理解这几个镜像,这样会对k8s架构有底层的认识,也方便二进制安装的时候对k8s原理能有所了解!!
5、初始化集群-54节点
在54的master机器上执行初始化命令
kubeadm init --control-plane-endpoint=192.168.80.200:16443 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.28.13 --service-cidr=10.10.0.0/16 --pod-network-cidr=10.244.0.0/16 --cri-socket unix://var/run/cri-dockerd.sock
保存初始化内容信息
[root@lyc-80-54 haproxy]# kubeadm init --control-plane-endpoint=192.168.80.200:16443 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.28.13 --service-cidr=10.10.0.0/16 --pod-network-cidr=10.244.0.0/16 --cri-socket unix://var/run/cri-dockerd.sock
[init] Using Kubernetes version: v1.28.13
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local lyc-80-54] and IPs [10.10.0.1 192.168.80.54 192.168.80.200]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost lyc-80-54] and IPs [192.168.80.54 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost lyc-80-54] and IPs [192.168.80.54 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
W0913 09:03:06.837044 5183 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
W0913 09:03:06.969643 5183 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
W0913 09:03:07.180958 5183 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
W0913 09:03:07.287731 5183 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 14.521600 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node lyc-80-54 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node lyc-80-54 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: 388yeb.mpz8idy4asnv9jr5
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
W0913 09:03:28.790283 5183 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.80.200:16443 --token 388yeb.mpz8idy4asnv9jr5 \
--discovery-token-ca-cert-hash sha256:b7ba87d8f4bdf81530e62ca9eef0eea1f7993199927617ffb86766c01566e3ab \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.80.200:16443 --token 388yeb.mpz8idy4asnv9jr5 \
--discovery-token-ca-cert-hash sha256:b7ba87d8f4bdf81530e62ca9eef0eea1f7993199927617ffb86766c01566e3ab
看到这段话,说明k8s初始化成功
Your Kubernetes control-plane has initialized successfully!
----
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.80.200:16443 --token 388yeb.mpz8idy4asnv9jr5 \
--discovery-token-ca-cert-hash sha256:b7ba87d8f4bdf81530e62ca9eef0eea1f7993199927617ffb86766c01566e3ab \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.80.200:16443 --token 388yeb.mpz8idy4asnv9jr5 \
--discovery-token-ca-cert-hash sha256:b7ba87d8f4bdf81530e62ca9eef0eea1f7993199927617ffb86766c01566e3ab
6、主节点54执行提示语句
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
7、54同步key到其他master
## 机器55 56 做完必须操作之后 在加入集群
# 54 同步 key 到 55 56
scp -r /etc/kubernetes/pki lyc-80-55:/etc/kubernetes/
scp -r /etc/kubernetes/pki lyc-80-56:/etc/kubernetes/
8、删除不需要的证书-55/56
可以把不需要的证书删除掉,删除lyc-80-55和lyc-80-56多余证书。
cd /etc/kubernetes/pki/
rm -rf apiserver*
rm -rf etcd/peer.*
rm -rf etcd/server.*
9、master节点55/56加入
【master节点55】
kubeadm join 192.168.80.200:16443 --token 388yeb.mpz8idy4asnv9jr5 \
--discovery-token-ca-cert-hash sha256:b7ba87d8f4bdf81530e62ca9eef0eea1f7993199927617ffb86766c01566e3ab \
--control-plane --cri-socket unix://var/run/cri-dockerd.sock
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
---------------------------------------------------------------
【master节点56】
kubeadm join 192.168.80.200:16443 --token 388yeb.mpz8idy4asnv9jr5 \
--discovery-token-ca-cert-hash sha256:b7ba87d8f4bdf81530e62ca9eef0eea1f7993199927617ffb86766c01566e3ab \
--control-plane --cri-socket unix://var/run/cri-dockerd.sock
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
10、worker节点加入-57/58
【57机器】
kubeadm join 192.168.80.200:16443 --token 388yeb.mpz8idy4asnv9jr5 \
--discovery-token-ca-cert-hash sha256:b7ba87d8f4bdf81530e62ca9eef0eea1f7993199927617ffb86766c01566e3ab --cri-socket unix://var/run/cri-dockerd.sock
--------------------------------------------------
【58机器】
kubeadm join 192.168.80.200:16443 --token 388yeb.mpz8idy4asnv9jr5 \
--discovery-token-ca-cert-hash sha256:b7ba87d8f4bdf81530e62ca9eef0eea1f7993199927617ffb86766c01566e3ab --cri-socket unix://var/run/cri-dockerd.sock
11、查看集群状态-54
kubectl get nodes
六、安装网络插件calico
1、下载并更改配置文件-54
网络组件有很多种,只需要部署其中一个即可,推荐Calico。
Calico是一个纯三层的数据中心网络方案,Calico支持广泛的平台,包括Kubernetes、OpenStack等。
Calico 在每一个计算节点利用 Linux Kernel 实现了一个高效的虚拟路由器(vRouter)来负责数据转发,
而每个 vRouter 通过 BGP 协议负责把自己上运行的 workload 的路由信息向整个 Calico 网络内传播。
此外,Calico 项目还实现了 Kubernetes 网络策略,提供ACL功能。
***************************************************
下载Calico配置文件
wget --no-check-certificate https://raw.githubusercontent.com/projectcalico/calico/v3.28.0/manifests/calico.yaml
这里我用的是提前准备好的配置文件
还记得初始化k8s集群时填的这个地址不?
--pod-network-cidr=10.224.0.0/16
安装网络插件calico的时候,配置成这个地址才行
vim calico.yaml
---------------
注意看2个地方的配置
搜索查看文件:
:/10.224 【注意IP、网卡名,我第一次就因为网卡名没起来】
:/image 版本v3.28.0
-------------------
效果与上边wget的效果一样,只是版本不一样,最新版本v3.28.0
2、部署calico网络-54
查看所需镜像
cat calico.yaml | grep image
执行部署
kubectl apply -f calico.yaml
也可以将镜像提前拉取下来
docker pull calico/cni:v3.28.0
docker pull calico/node:v3.28.0
docker pull calico/kube-controllers:v3.28.0
-------------------
3、查看网络部署后状态-54
kubectl get node
kubectl get pod -n kube-system
等待几分钟查看pod情况
kubectl get nodes -o wide
kubectl get pods --all-namespaces -o wide
4、主节点去污-54
k8s中默认主节点是不执行pod的,带了污点,
这个时候如果要主节点也执行一定量的pod
那就需要下掉污点
当前调度状态为:不调度
kubectl describe node lyc-80-54 | grep -i taint
kubectl describe node lyc-80-55 | grep -i taint
kubectl describe node lyc-80-56 | grep -i taint
【拿掉污点】
kubectl taint node lyc-80-54 node-role.kubernetes.io/control-plane:NoSchedule-
kubectl taint node lyc-80-55 node-role.kubernetes.io/control-plane:NoSchedule-
kubectl taint node lyc-80-56 node-role.kubernetes.io/control-plane:NoSchedule-
再次查看调度状态为:调度
kubectl describe node lyc-80-54 | grep -i taint
kubectl describe node lyc-80-55 | grep -i taint
kubectl describe node lyc-80-56 | grep -i taint
5、安装metrics-server服务-54
metrics-server主要是采集k8s集群资源信息的,参考linux系统中的htop命令
vim components.yaml
--------------------
注意注意以下内容
135 - --cert-dir=/tmp
136 - --secure-port=4443
137 - --kubelet-preferred-address-types=InternalIP
138 - --kubelet-use-node-status-port
139 - --metric-resolution=15s
140 - --kubelet-insecure-tls
141 image: registry.aliyuncs.com/google_containers/metrics-server:v0.6.1
142 imagePullPolicy: IfNotPresent
-------------------
执行安装
kubectl apply -f components.yaml
查看pod
kubectl get pod -n kube-system
查看k8s集群资源
kubectl top node
kubectl top pod -n kube-system
6、安装dashboard控制面板
下载yaml文件,目前最新版本为v2.7.0
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
编辑文件,注意修改内容
vim recommended.yaml
--------------
spec:
40 ports:
41 - port: 443
42 targetPort: 8443
43 nodePort: 30001 //改动第43行端口为30001
44 type: NodePort //改动第44行端口为NodePort
45 selector:
46 k8s-app: kubernetes-dashboard
----------------
193 containers:
194 - name: kubernetes-dashboard
195 image: registry.cn-hangzhou.aliyuncs.com/ywflyfish/dashboard:v2.7.0 //换成国内镜像
启动recommended文件
kubectl apply -f recommended.yaml
-------------------
查看pod情况
kubectl get ns
kubectl get pods -n kubernetes-dashboard
kubectl get svc-n kubernetes-dashboard
7、创建dashboard用户
下载文件
wget https://raw.githubusercontent.com/cby-chen/Kubernetes/main/yaml/dashboard-user.yaml
vim dashboard-user.yaml
----------------------
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard ///命名空间
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding ///最大用户
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole ///组用户
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user ///单个用户
namespace: kubernetes-dashboard
-----------------------
【说明】
用户权限传递:admin-user---->>>ClusterRole---->>>ClusterRoleBinding
创建用户权限
kubectl apply -f dashboard-user.yaml
查看用户权限
kubectl get sa
创建用户token
kubectl create token admin-user -n kubernetes-dashboard
///复制token
eyJhbGciOiJSUzI1NiIsImtpZCI6Im1wOE4tZDhsY1A5RGREV25nYWUzakVlTWpEcFFveUtFQUdkSVNUUHFPZWcifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzI2MjA5NjI1LCJpYXQiOjE3MjYyMDYwMjUsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiZmYxYWFhOTctMjE5Ni00MDBhLThhNTItMWMxNDZmOTE0ZjY2In19LCJuYmYiOjE3MjYyMDYwMjUsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.SjoqE2cOFHptiNkvI_9_a_Fo6takM8Pt75jbhdD9u-Kux4YnaATFItDzMv7qns15XADH6VPOjYNxp9IWZtpwWuMCbq9UFiTvbjezuVNm8zhFmF4nX-DZRsVJTYU-Vwi65cDM-x62PFFtNZHJRdhJRZgMpgRRWb61JD8eB2yC7592_XVPet2B6s1ML7aKzn_OcsH0XHxxG8K1hJnccEShEXPyS4g4CsDSd1-y1U6ZqWP79Zx4FsJvXiBAgHsWGfdGlXpcTnq0DQHZ_AlQf5Vfk9J7q7p4arH-LXFWenaoVf1FnoD6aPeQJqvbPRghI4zEL2OY-5nXzA7JdF406FfoRA
查看pod状态
kubectl get pod -n kubernetes-dashboard
查看网络信息
kubectl get pod -n kubernetes-dashboard -o wide
查看deploy状态
kubectl get deploy -n kubernetes-dashboard
查看ep状态
kubectl get ep -n kubernetes-dashboard
查看svc状态
kubectl get svc -n kubernetes-dashboard
【注意网络数据流3次转发,类似LVS转发】
查看kube-proxy端口信息
ps -ef |grep kube-proxy
////5台机器都可以查看到kube-proxy,说明每个IP都可以登录dashboard页面
过滤端口号
netstat -nutlp |grep 30001
8、登录dashboard页面
5台机器都可以测试登录下,能登录说明kube-proxy端口正常
登录dashboard页面
https://192.168.80.54:30001
输入token进行登录
eyJhbGciOiJSUzI1NiIsImtpZCI6Im1wOE4tZDhsY1A5RGREV25nYWUzakVlTWpEcFFveUtFQUdkSVNUUHFPZWcifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzI2MjA5NjI1LCJpYXQiOjE3MjYyMDYwMjUsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiZmYxYWFhOTctMjE5Ni00MDBhLThhNTItMWMxNDZmOTE0ZjY2In19LCJuYmYiOjE3MjYyMDYwMjUsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.SjoqE2cOFHptiNkvI_9_a_Fo6takM8Pt75jbhdD9u-Kux4YnaATFItDzMv7qns15XADH6VPOjYNxp9IWZtpwWuMCbq9UFiTvbjezuVNm8zhFmF4nX-DZRsVJTYU-Vwi65cDM-x62PFFtNZHJRdhJRZgMpgRRWb61JD8eB2yC7592_XVPet2B6s1ML7aKzn_OcsH0XHxxG8K1hJnccEShEXPyS4g4CsDSd1-y1U6ZqWP79Zx4FsJvXiBAgHsWGfdGlXpcTnq0DQHZ_AlQf5Vfk9J7q7p4arH-LXFWenaoVf1FnoD6aPeQJqvbPRghI4zEL2OY-5nXzA7JdF406FfoRA
查看命名空间
[root@lyc-80-54 yaml]# kubectl get ns
NAME STATUS AGE
default Active 6h10m
kube-node-lease Active 6h10m
kube-public Active 6h10m
kube-system Active 6h10m
kubernetes-dashboard Active 54m
9、测试集群性能
部署一个nginx:
vim nginx-web.yaml
------------
kind: Deployment ////控制器
#apiVersion: extensions/v1beta1 ///已被弃用
apiVersion: apps/v1 【app端】
metadata:
labels:
app: nginx-deployment-label
name: nginx-deployment
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: nginx-selector
template:
metadata:
labels:
app: nginx-selector
spec:
containers:
- name: nginx-container
image: registry.cn-hangzhou.aliyuncs.com/ywflyfish/nginx:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
protocol: TCP
name: http
- containerPort: 443
protocol: TCP
name: https
---------------
kind: Service 【服务端】
apiVersion: v1
metadata:
labels:
app: nginx-service-label
name: nginx-service
namespace: default
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
nodePort: 31180
- name: https
port: 443
protocol: TCP
targetPort: 443
nodePort: 31443
selector:
app: nginx-selector
------------------------
启动web文件
kubectl apply -f nginx-web.yaml
查看服务:
kubectl get pod
kubectl get pod -o wide
kubectl get svc
ping 10.244.225.194
浏览器访问:
http://192.168.80.57:31180
改为3个pod试试呢?
浏览器访问:
http://192.168.80.54:31180
http://192.168.80.58:31180
kubectl get node
kubectl get pod
到此,说明k8s高可用集群底层走docker搭建正常!!!!!
标签:kubernetes,--,etc,1.28,lyc,kubeadm,k8s,com,80 From: https://blog.51cto.com/u_14016919/12014424