kubeadm方式部署
部署准备
改主机名称
#master节点
[root@k8s-master ~]# echo "k8s-master" > /etc/hostname
#node01节点
[root@k8s-node01 ~]# echo "k8s-node01" > /etc/hostname
设通信标识
#本机IP地址
[root@k8s-master ~]# cat >> /etc/hosts <<EOF
> 192.168.18.244 k8s-master
> EOF
关闭防火墙
[root@k8s-master ~]# systemctl disable firewalld --now
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
关闭selinux
[root@k8s-master ~]# setenforce 0
[root@k8s-master ~]# sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
关闭swap分区
[root@k8s-master ~]# sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
更新yum源
#更改CentOS-Base.repo
[root@k8s-master ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
--2023-01-12 17:09:06-- http://mirrors.aliyun.com/repo/Centos-7.repo
Resolving mirrors.aliyun.com (mirrors.aliyun.com)... 119.96.90.238, 119.96.90.243, 119.96.90.241, ...
Connecting to mirrors.aliyun.com (mirrors.aliyun.com)|119.96.90.238|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 2523 (2.5K) [application/octet-stream]
Saving to: ‘/etc/yum.repos.d/CentOS-Base.repo’
100%[============================================================================================>] 2,523 --.-K/s in 0.01s
2023-01-12 17:09:06 (207 KB/s) - ‘/etc/yum.repos.d/CentOS-Base.repo’ saved [2523/2523]
#新增docker.repo
[root@k8s-master ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
--2023-01-12 17:10:17-- https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
Resolving mirrors.aliyun.com (mirrors.aliyun.com)... 119.96.90.239, 119.96.90.240, 119.96.90.241, ...
Connecting to mirrors.aliyun.com (mirrors.aliyun.com)|119.96.90.239|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 2081 (2.0K) [application/octet-stream]
Saving to: ‘/etc/yum.repos.d/docker-ce.repo’
100%[============================================================================================>] 2,081 --.-K/s in 0.01s
2023-01-12 17:10:17 (153 KB/s) - ‘/etc/yum.repos.d/docker-ce.repo’ saved [2081/2081]
#新增k8s.repo
[root@k8s-master ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@k8s-master ~]# yum repolist
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.aliyun.com
* updates: mirrors.aliyun.com
docker-ce-stable | 3.5 kB 00:00:00
kubernetes | 1.4 kB 00:00:00
(1/3): docker-ce-stable/7/x86_64/updateinfo | 55 B 00:00:00
(2/3): docker-ce-stable/7/x86_64/primary_db | 91 kB 00:00:00
(3/3): kubernetes/primary | 122 kB 00:00:00
kubernetes 906/906
repo id repo name status
base/7/x86_64 CentOS-7 - Base - mirrors.aliyun.com 10,072
docker-ce-stable/7/x86_64 Docker CE Stable - x86_64 194
extras/7/x86_64 CentOS-7 - Extras - mirrors.aliyun.com 515
kubernetes Kubernetes 906
updates/7/x86_64 CentOS-7 - Updates - mirrors.aliyun.com 4,538
repolist: 16,225
时间同步
#安装时间同步服务
[root@k8s-master ~]# yum install -y ntpdate
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.aliyun.com
* updates: mirrors.aliyun.com
Package ntpdate-4.2.6p5-29.el7.centos.2.x86_64 already installed and latest version
Nothing to do
#设置定时同步任务
[root@k8s-master ~]# crontab -e
[root@k8s-master cron]# cat root
0 */1 * * * /usr/sbin/ntpdate ntp1.aliyun.com
流量桥接
[root@k8s-master cron]# cat > /etc/sysctl.d/kubernetes.conf <<EOF
> net.bridge.bridge-nf-call-iptables=1
> net.bridge.bridge-nf-call-ip6tables=1
> net.ipv4.ip_forward=1
> vm.swappiness=0
> EOF
[root@k8s-master cron]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
kernel.kptr_restrict = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /usr/lib/sysctl.d/60-libvirtd.conf ...
fs.aio-max-nr = 1048576
* Applying /usr/lib/sysctl.d/70-dirsrv.conf ...
vm.swappiness = 20
net.ipv4.tcp_slow_start_after_idle = 0
net.ipv4.tcp_fastopen = 1027
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_keepalive_time = 300
net.ipv4.tcp_max_tw_buckets = 262144
net.ipv4.tcp_max_syn_backlog = 4096
net.core.default_qdisc = fq_codel
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/kubernetes.conf ...
net.ipv4.ip_forward = 1
vm.swappiness = 0
* Applying /etc/sysctl.conf ...
部署过程
docker安装
[root@k8s-node02 ~]# yum install docker-ce-19.03.* -y
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.aliyun.com
* updates: mirrors.aliyun.com
Resolving Dependencies
--> Running transaction check
---> Package docker-ce.x86_64 3:19.03.15-3.el7 will be installed
--> Processing Dependency: container-selinux >= 2:2.74 for package: 3:docker-ce-19.03.15-3.el7.x86_64
--> Processing Dependency: containerd.io >= 1.2.2-3 for package: 3:docker-ce-19.03.15-3.el7.x86_64
--> Processing Dependency: docker-ce-cli for package: 3:docker-ce-19.03.15-3.el7.x86_64
--> Running transaction check
---> Package container-selinux.noarch 2:2.119.2-1.911c772.el7_8 will be installed
---> Package containerd.io.x86_64 0:1.6.15-3.1.el7 will be installed
---> Package docker-ce-cli.x86_64 1:20.10.22-3.el7 will be installed
--> Processing Dependency: docker-scan-plugin(x86-64) for package: 1:docker-ce-cli-20.10.22-3.el7.x86_64
--> Running transaction check
---> Package docker-scan-plugin.x86_64 0:0.23.0-3.el7 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
======================================================================================================================================
Package Arch Version Repository Size
======================================================================================================================================
Installing:
docker-ce x86_64 3:19.03.15-3.el7 docker-ce-stable 24 M
Installing for dependencies:
container-selinux noarch 2:2.119.2-1.911c772.el7_8 extras 40 k
containerd.io x86_64 1.6.15-3.1.el7 docker-ce-stable 33 M
docker-ce-cli x86_64 1:20.10.22-3.el7 docker-ce-stable 30 M
docker-scan-plugin x86_64 0.23.0-3.el7 docker-ce-stable 3.8 M
Transaction Summary
======================================================================================================================================
Install 1 Package (+4 Dependent packages)
Total download size: 91 M
Installed size: 358 M
Downloading packages:
warning: /var/cache/yum/x86_64/7/extras/packages/container-selinux-2.119.2-1.911c772.el7_8.noarch.rpm: Header V3 RSA/SHA256 Signature, key ID f4a80eb5: NOKEY
Public key for container-selinux-2.119.2-1.911c772.el7_8.noarch.rpm is not installed
(1/5): container-selinux-2.119.2-1.911c772.el7_8.noarch.rpm | 40 kB 00:00:00
warning: /var/cache/yum/x86_64/7/docker-ce-stable/packages/docker-ce-19.03.15-3.el7.x86_64.rpm: Header V4 RSA/SHA512 Signature, key ID 621e9f35: NOKEY
Public key for docker-ce-19.03.15-3.el7.x86_64.rpm is not installed
(2/5): docker-ce-19.03.15-3.el7.x86_64.rpm | 24 MB 00:01:31
(3/5): containerd.io-1.6.15-3.1.el7.x86_64.rpm | 33 MB 00:02:08
(4/5): docker-scan-plugin-0.23.0-3.el7.x86_64.rpm | 3.8 MB 00:00:14
(5/5): docker-ce-cli-20.10.22-3.el7.x86_64.rpm | 30 MB 00:01:51
--------------------------------------------------------------------------------------------------------------------------------------
Total 458 kB/s | 91 MB 00:03:22
Retrieving key from https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
Importing GPG key 0x621E9F35:
Userid : "Docker Release (CE rpm) <[email protected]>"
Fingerprint: 060a 61c5 1b55 8a7f 742b 77aa c52f eb6b 621e 9f35
From : https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
Retrieving key from http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
Importing GPG key 0xF4A80EB5:
Userid : "CentOS-7 Key (CentOS 7 Official Signing Key) <[email protected]>"
Fingerprint: 6341 ab27 53d7 8a78 a7c2 7bb1 24c6 a8a7 f4a8 0eb5
From : http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : 2:container-selinux-2.119.2-1.911c772.el7_8.noarch 1/5
Installing : containerd.io-1.6.15-3.1.el7.x86_64 2/5
Installing : docker-scan-plugin-0.23.0-3.el7.x86_64 3/5
Installing : 1:docker-ce-cli-20.10.22-3.el7.x86_64 4/5
Installing : 3:docker-ce-19.03.15-3.el7.x86_64 5/5
Verifying : 1:docker-ce-cli-20.10.22-3.el7.x86_64 1/5
Verifying : 3:docker-ce-19.03.15-3.el7.x86_64 2/5
Verifying : containerd.io-1.6.15-3.1.el7.x86_64 3/5
Verifying : docker-scan-plugin-0.23.0-3.el7.x86_64 4/5
Verifying : 2:container-selinux-2.119.2-1.911c772.el7_8.noarch 5/5
Installed:
docker-ce.x86_64 3:19.03.15-3.el7
Dependency Installed:
container-selinux.noarch 2:2.119.2-1.911c772.el7_8 containerd.io.x86_64 0:1.6.15-3.1.el7 docker-ce-cli.x86_64 1:20.10.22-3.el7
docker-scan-plugin.x86_64 0:0.23.0-3.el7
Complete!
启动docker
[root@k8s-master cron]# systemctl enable docker --now
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
docker镜像加速及修改cgroup方式
[root@k8s-master ~]# cat > /etc/docker/daemon.json <<EOF
> {
> "registry-mirrors": ["https://v16stybc.mirror.aliyuncs.com"],
> "exec-opts":["native.cgroupdriver=systemd"],
> "log-driver":"json-file",
> "log-opts": {
> "max-size": "100m"
> },
> "storage-driver":"overlay2",
> "storage-opts": [
> "overlay2.override_kernel_check=true"
> ]
> }
> EOF
[root@k8s-master ~]# systemctl restart docker
kubernetes安装
[root@k8s-node01 ~]# yum install -y kubelet-1.18.6 kubeadm-1.18.6 kubectl-1.18.6
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.aliyun.com
* updates: mirrors.aliyun.com
Resolving Dependencies
--> Running transaction check
---> Package kubeadm.x86_64 0:1.18.6-0 will be installed
--> Processing Dependency: kubernetes-cni >= 0.8.6 for package: kubeadm-1.18.6-0.x86_64
--> Processing Dependency: cri-tools >= 1.13.0 for package: kubeadm-1.18.6-0.x86_64
---> Package kubectl.x86_64 0:1.18.6-0 will be installed
---> Package kubelet.x86_64 0:1.18.6-0 will be installed
--> Processing Dependency: socat for package: kubelet-1.18.6-0.x86_64
--> Processing Dependency: conntrack for package: kubelet-1.18.6-0.x86_64
--> Running transaction check
---> Package conntrack-tools.x86_64 0:1.4.4-7.el7 will be installed
--> Processing Dependency: libnetfilter_cttimeout.so.1(LIBNETFILTER_CTTIMEOUT_1.1)(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64
--> Processing Dependency: libnetfilter_cttimeout.so.1(LIBNETFILTER_CTTIMEOUT_1.0)(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64
--> Processing Dependency: libnetfilter_cthelper.so.0(LIBNETFILTER_CTHELPER_1.0)(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64
--> Processing Dependency: libnetfilter_queue.so.1()(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64
--> Processing Dependency: libnetfilter_cttimeout.so.1()(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64
--> Processing Dependency: libnetfilter_cthelper.so.0()(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64
---> Package cri-tools.x86_64 0:1.25.0-0 will be installed
---> Package kubernetes-cni.x86_64 0:1.1.1-0 will be installed
---> Package socat.x86_64 0:1.7.3.2-2.el7 will be installed
--> Running transaction check
---> Package libnetfilter_cthelper.x86_64 0:1.0.0-11.el7 will be installed
---> Package libnetfilter_cttimeout.x86_64 0:1.0.0-7.el7 will be installed
---> Package libnetfilter_queue.x86_64 0:1.0.2-2.el7_2 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
=======================================================================================================
Package Arch Version Repository Size
=======================================================================================================
Installing:
kubeadm x86_64 1.18.6-0 kubernetes 8.8 M
kubectl x86_64 1.18.6-0 kubernetes 9.5 M
kubelet x86_64 1.18.6-0 kubernetes 21 M
Installing for dependencies:
conntrack-tools x86_64 1.4.4-7.el7 base 187 k
cri-tools x86_64 1.25.0-0 kubernetes 8.2 M
kubernetes-cni x86_64 1.1.1-0 kubernetes 15 M
libnetfilter_cthelper x86_64 1.0.0-11.el7 base 18 k
libnetfilter_cttimeout x86_64 1.0.0-7.el7 base 18 k
libnetfilter_queue x86_64 1.0.2-2.el7_2 base 23 k
socat x86_64 1.7.3.2-2.el7 base 290 k
Transaction Summary
=======================================================================================================
Install 3 Packages (+7 Dependent packages)
Total download size: 63 M
Installed size: 273 M
Downloading packages:
(1/10): 20eefd52d2aee73b3c52abc3d43ed689cb1d79387f5d627faa4a1acc7b4406f9-kubead | 8.8 MB 00:00:11
(2/10): e382ead81273ab8ebcddf14cc15bf977e44e1fd541a2cfda6ebe5741c255e59f-cri-to | 8.2 MB 00:00:12
(3/10): 9fe14ad1137ad4e42eca5df1df99c735098e1ef43ead5184ee2af108d31ecb44-kubect | 9.5 MB 00:00:09
libnetfilter_cthelper-1.0.0-11 FAILED
http://mirrors.cloud.aliyuncs.com/centos/7/os/x86_64/Packages/libnetfilter_cthelper-1.0.0-11.el7.x86_64.rpm: [Errno 14] curl#6 - "Could not resolve host: mirrors.cloud.aliyuncs.com; Unknown error"
Trying other mirror.
(4/10): libnetfilter_queue-1.0.2-2.el7_2.x86_64.rpm | 23 kB 00:00:00
(5/10): socat-1.7.3.2-2.el7.x86_64.rpm | 290 kB 00:00:00
(6/10): libnetfilter_cthelper-1.0.0-11.el7.x86_64.rpm | 18 kB 00:00:00
conntrack-tools-1.4.4-7.el7.x8 FAILED | 50 MB 00:00:06 ETA
http://mirrors.aliyuncs.com/centos/7/os/x86_64/Packages/conntrack-tools-1.4.4-7.el7.x86_64.rpm: [Errno 12] Timeout on http://mirrors.aliyuncs.com/centos/7/os/x86_64/Packages/conntrack-tools-1.4.4-7.el7.x86_64.rpm: (28, 'Connection timed out after 30002 milliseconds')
Trying other mirror.
(7/10): conntrack-tools-1.4.4-7.el7.x86_64.rpm | 187 kB 00:00:00
(8/10): 14083ac8b11792469524dae98ebb6905b3921923937d6d733b8abb58113082b7-kubern | 15 MB 00:00:12
(9/10): 155c953863e5dc40f1d0cd5010d4df91b45d8c62edc5e93f7fc508516015fcb1-kubele | 21 MB 00:00:24
libnetfilter_cttimeout-1.0.0-7 FAILED
http://mirrors.aliyuncs.com/centos/7/os/x86_64/Packages/libnetfilter_cttimeout-1.0.0-7.el7.x86_64.rpm: [Errno 12] Timeout on http://mirrors.aliyuncs.com/centos/7/os/x86_64/Packages/libnetfilter_cttimeout-1.0.0-7.el7.x86_64.rpm: (28, 'Connection timed out after 30001 milliseconds')
Trying other mirror.
(10/10): libnetfilter_cttimeout-1.0.0-7.el7.x86_64.rpm | 18 kB 00:00:00
-------------------------------------------------------------------------------------------------------
Total 1.2 MB/s | 63 MB 00:00:52
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : libnetfilter_cthelper-1.0.0-11.el7.x86_64 1/10
Installing : socat-1.7.3.2-2.el7.x86_64 2/10
Installing : libnetfilter_cttimeout-1.0.0-7.el7.x86_64 3/10
Installing : kubectl-1.18.6-0.x86_64 4/10
Installing : libnetfilter_queue-1.0.2-2.el7_2.x86_64 5/10
Installing : conntrack-tools-1.4.4-7.el7.x86_64 6/10
Installing : kubelet-1.18.6-0.x86_64 7/10
Installing : kubernetes-cni-1.1.1-0.x86_64 8/10
Installing : cri-tools-1.25.0-0.x86_64 9/10
Installing : kubeadm-1.18.6-0.x86_64 10/10
Verifying : cri-tools-1.25.0-0.x86_64 1/10
Verifying : kubernetes-cni-1.1.1-0.x86_64 2/10
Verifying : libnetfilter_queue-1.0.2-2.el7_2.x86_64 3/10
Verifying : kubectl-1.18.6-0.x86_64 4/10
Verifying : kubeadm-1.18.6-0.x86_64 5/10
Verifying : kubelet-1.18.6-0.x86_64 6/10
Verifying : libnetfilter_cttimeout-1.0.0-7.el7.x86_64 7/10
Verifying : socat-1.7.3.2-2.el7.x86_64 8/10
Verifying : libnetfilter_cthelper-1.0.0-11.el7.x86_64 9/10
Verifying : conntrack-tools-1.4.4-7.el7.x86_64 10/10
Installed:
kubeadm.x86_64 0:1.18.6-0 kubectl.x86_64 0:1.18.6-0 kubelet.x86_64 0:1.18.6-0
Dependency Installed:
conntrack-tools.x86_64 0:1.4.4-7.el7 cri-tools.x86_64 0:1.25.0-0
kubernetes-cni.x86_64 0:1.1.1-0 libnetfilter_cthelper.x86_64 0:1.0.0-11.el7
libnetfilter_cttimeout.x86_64 0:1.0.0-7.el7 libnetfilter_queue.x86_64 0:1.0.2-2.el7_2
socat.x86_64 0:1.7.3.2-2.el7
Complete!
启动kubelet
[root@k8s-master ~]# systemctl enable kubelet --now
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
管理节点
master节点初始化
[root@k8s-master ~]# kubeadm init \
> --apiserver-advertise-address=192.168.18.244 \
> --image-repository registry.aliyuncs.com/google_containers \
> --kubernetes-version v1.18.6 \
> --service-cidr=172.16.0.0/16 \
> --pod-network-cidr=172.18.0.0/16 \
> --ignore-preflight-errors=all
W0112 21:17:33.096068 68515 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.18.6
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [172.16.0.1 192.168.18.244]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.18.244 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.18.244 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W0112 21:18:07.170632 68515 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W0112 21:18:07.171184 68515 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 21.003335 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 6sulfy.7w50csxcygf8hlq2
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.18.244:6443 --token 6sulfy.7w50csxcygf8hlq2 \
--discovery-token-ca-cert-hash sha256:8f0eadc6abf6b9fb90b9063b967f4777978a1fdefed15ae180ec50f0d4b539da
关联主机
#设置配置文件
[root@k8s-master ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
#配置文件生效
[root@k8s-master ~]# source ~/.bash_profile
安装网络组件calico
#下载calico服务的yaml文件
[root@k8s-master ~]# wget https://docs.projectcalico.org/v3.15/manifests/calico.yaml
--2023-01-12 21:19:39-- https://docs.projectcalico.org/v3.15/manifests/calico.yaml
Resolving docs.projectcalico.org (docs.projectcalico.org)... 34.124.149.177, 34.124.186.36, 2406:da18:880:3801::c8, ...
Connecting to docs.projectcalico.org (docs.projectcalico.org)|34.124.149.177|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 181004 (177K) [text/yaml]
Saving to: ‘calico.yaml’
100%[=============================================================>] 181,004 316KB/s in 0.6s
2023-01-12 21:19:40 (316 KB/s) - ‘calico.yaml’ saved [181004/181004]
#启动calico服务
[root@k8s-master ~]# kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
生成集群token
[root@k8s-master ~]# kubeadm token create --print-join-command
W0112 21:31:47.499543 90015 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
kubeadm join 192.168.18.244:6443 --token 1rblcs.zuez0528ukoso8bf --discovery-token-ca-cert-hash sha256:8f0eadc6abf6b9fb90b9063b967f4777978a1fdefed15ae180ec50f0d4b539da
工作节点
启动kubelet
[root@k8s-master ~]# systemctl enable kubelet --now
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
token加入集群
[root@k8s-node01 ~]# kubeadm join 192.168.18.244:6443 --token 1rblcs.zuez0528ukoso8bf --discovery-token-ca-cert-hash sha256:8f0eadc6abf6b9fb90b9063b967f4777978a1fdefed15ae180ec50f0d4b539da
W0112 21:32:54.503261 69434 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[WARNING Hostname]: hostname "k8s-node01" could not be reached
[WARNING Hostname]: hostname "k8s-node01": lookup k8s-node01 on 192.168.18.1:53: no such host
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
设置节点角色
[root@k8s-master ~]# kubectl label node k8s-node02 node-role.kubernetes.io/worker=worker
node/k8s-node02 labeled
标签:00,x86,kubernetes,部署,64,集群,docker,k8s,el7
From: https://www.cnblogs.com/yingxin20000303/p/17047091.html