首页 > 其他分享 >K8S二进制安装

K8S二进制安装

时间:2023-02-20 22:57:25浏览次数:38  
标签:opt k8s 二进制 安装 master01 -- etcd K8S root

K8S二进制安装

192.168.111.20 master01

192.168.111.21 master02

192.168.111.22 node01

192.168.111.23 node02

192.168.111.20节点1

192.168.111.22节点2

192.168.111.23节点3

192.168.111.24 负载均衡1

192.168.111.25 负载均衡2

操作系统初始化配置

[root@20 ~]# systemctl stop firewalld

[root@20 ~]# systemctl disable firewalld

[root@20 ~]# iptables -F && iptables -t nat -F && iptables -t mangle -F && iptab

[root@20 ~]# setenforce 0

[root@20 ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config

[root@20 ~]# swapoff -a

[root@20 ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab

[root@20 ~]# hostnamectl set-hostname master01

[root@master01 ~]# cat >> /etc/hosts << EOF

> 192.168.111.20 master01

> 192.168.111.22 node01

> 192.168.111.23 node02

> EOF

[root@master01 ~]# cat > /etc/sysctl.d/k8s.conf << EOF

> net.bridge.bridge-nf-call-ip6tables = 1

> net.bridge.bridge-nf-call-iptables = 1

> net.ipv6.conf.all.disable_ipv6=1

> net.ipv4.ip_forward=1

> EOF

[root@master01 ~]# sysctl --system

* Applying /usr/lib/sysctl.d/00-system.conf ...

* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...

[root@master01 ~]# ntpdate time.windows.com

8 Feb 16:37:36 ntpdate[50630]: adjust time server 52.231.114.183 offset 0.021157 sec

[root@master01 ~]#

[root@master01 ~]# ntpdate ntp.aliyun.com

8 Feb 16:39:02 ntpdate[50647]: adjust time server 203.107.6.88 offset -0.037662 sec

[root@master01 ~]#

[root@master01 ~]# crontab -e

no crontab for root - using an empty one

crontab: installing new crontab

[root@master01 ~]# crontab -l

*/30 * * * * /usr/sbin/ntpdate ntp.aliyun.com

[root@master01 bin]# cd /opt/k8s

[root@master01 k8s]# ls

[root@master01 k8s]# chmod +x etcd-cert.sh etcd.sh

[root@master01 k8s]# mkdir /opt/k8s/etcd-cert

[root@master01 k8s]# mv etcd-cert.sh etcd-cert/

[root@master01 k8s]# cd /opt/k8s/etcd-cert/

[root@master01 etcd-cert]# ./etcd-cert.sh

[root@master01 etcd-cert]# vim etcd-cert.sh

[root@master01 etcd-cert]# ./etcd-cert.sh

2023/02/08 17:40:15 [INFO] generating a new CA key and certificate from CSR

2023/02/08 17:40:15 [INFO] generate received request

2023/02/08 17:40:15 [INFO] received CSR

2023/02/08 17:40:15 [INFO] generating key: rsa-2048

2023/02/08 17:40:15 [INFO] encoded CSR

2023/02/08 17:40:15 [INFO] signed certificate with serial number 486791340110854064490858271330117688015685769408

2023/02/08 17:40:15 [INFO] generate received request

2023/02/08 17:40:15 [INFO] received CSR

2023/02/08 17:40:15 [INFO] generating key: rsa-2048

2023/02/08 17:40:15 [INFO] encoded CSR

2023/02/08 17:40:15 [INFO] signed certificate with serial number 112281900160138437735824673256266218854565683307

2023/02/08 17:40:15 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for

websites. For more information see the Baseline Requirements for the Issuance and Management

of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);

specifically, section 10.2.3 ("Information Requirements").

[root@master01 etcd-cert]# ls

ca-config.json ca-csr.json ca.pem server.csr server-key.pem

ca.csr ca-key.pem etcd-cert.sh server-csr.json server.pem

[root@master01 etcd-cert]# cd /opt/k8s/

[root@master01 k8s]# tar zxvf etcd-v3.4.9-linux-amd64.tar.gz

[root@master01 k8s]# ls etcd-v3.4.9-linux-amd64

[root@master01 k8s]# mkdir -p /opt/etcd/{cfg,bin,ssl}

[root@master01 k8s]# cd /opt/k8s/etcd-v3.4.9-linux-amd64/

[root@master01 etcd-v3.4.9-linux-amd64]# mv etcd etcdctl /opt/etcd/bin/

[root@master01 etcd-v3.4.9-linux-amd64]# cp /opt/k8s/etcd-cert/*.pem /opt/etcd/ssl/

[root@master01 etcd-v3.4.9-linux-amd64]# cd /opt/k8s/

[root@master01 ~]# ps -ef | grep etcd

root 51989 50329 0 18:06 pts/1 00:00:00 /bin/bash ./etcd.sh etcd01 192 .168.111.20 etcd02=https://192.168.111.22:2380,etcd03=https://192.168.111.23:238 0

root 52034 51989 0 18:06 pts/1 00:00:00 systemctl restart etcd

root 52041 1 3 18:06 ? 00:00:01 /opt/etcd/bin/etcd --cert-file =/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --trusted-ca-f ile=/opt/etcd/ssl/ca.pem --peer-cert-file=/opt/etcd/ssl/server.pem --peer-key-fi le=/opt/etcd/ssl/server-key.pem --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem --lo gger=zap --enable-v2

root 52069 51506 0 18:07 pts/2 00:00:00 grep --color=auto etcd

[root@master01 ~]# scp -r /opt/etcd/ [email protected]:/opt/

The authenticity of host '192.168.111.22 (192.168.111.22)' can't be established.

ECDSA key fingerprint is SHA256:vqyU3FwioF/w0Y3AUg9EitcqWX3Vt+HCiqX9579jBvY.

ECDSA key fingerprint is MD5:43:2f:eb:de:64:93:82:5c:f5:fa:37:0c:02:ae:e1:17.

Are you sure you want to continue connecting (yes/no)? yes

Warning: Permanently added '192.168.111.22' (ECDSA) to the list of known hosts.

[email protected]'s password:

etcd 100% 516 439.2KB/s 00:00

etcd 100% 23MB 91.3MB/s 00:00

etcdctl 100% 17MB 109.5MB/s 00:00

ca-key.pem 100% 1679 2.8MB/s 00:00

ca.pem 100% 1257 2.5MB/s 00:00

server-key.pem 100% 1679 3.8MB/s 00:00

server.pem 100% 1346 3.1MB/s 00:00

[root@master01 ~]# scp -r /opt/etcd/ [email protected]:/opt/

The authenticity of host '192.168.111.23 (192.168.111.23)' can't be established.

ECDSA key fingerprint is SHA256:5Kq4jP4lwc+9mzHeZLdCPfIjQUK6G9lIDDaW8CC6VWc.

ECDSA key fingerprint is MD5:cd:f1:a4:b5:16:14:74:91:4d:24:d8:1d:67:bd:55:2f.

Are you sure you want to continue connecting (yes/no)? yes

Warning: Permanently added '192.168.111.23' (ECDSA) to the list of known hosts.

[email protected]'s password:

etcd 100% 516 849.3KB/s 00:00

etcd 100% 23MB 78.2MB/s 00:00

etcdctl 100% 17MB 87.8MB/s 00:00

ca-key.pem 100% 1679 405.8KB/s 00:00

ca.pem 100% 1257 2.4MB/s 00:00

server-key.pem 100% 1679 2.1MB/s 00:00

server.pem 100% 1346 2.8MB/s 00:00

[root@master01 ~]# scp /usr/lib/systemd/system/etcd.service [email protected]:/usr/lib/systemd/system/

[email protected]'s password:

etcd.service 100% 542 533.5KB/s 00:00

[root@master01 ~]# scp /usr/lib/systemd/system/etcd.service [email protected]:/usr/lib/systemd/system/

[email protected]'s password:

etcd.service 100% 542 403.7KB/s 00:00

安装docker

22,23一样步骤

[root@23 ~]# systemctl stop firewalld

[root@23 ~]# systemctl disable firewalld

[root@23 ~]# iptables -F && iptables -t nat -F && iptables -t mangle -F && iptab les -X

[root@23 ~]# setenforce 0

[root@23 ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config

[root@23 ~]# swapoff -a

[root@23 ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab

[root@23 ~]# hostnamectl set-hostname node02

[root@23 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2

已加载插件:fastestmirror, langpacks

[root@23 ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/ linux/centos/docker-ce.repo

已加载插件:fastestmirror, langpacks

adding repo from: https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.re po

grabbing file https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo t o /etc/yum.repos.d/docker-ce.repo

repo saved to /etc/yum.repos.d/docker-ce.repo

[root@23 ~]# yum install -y docker-ce docker-ce-cli containerd.io

已加载插件:fastestmirror, langpacks

docker-ce-stable | 3.5 kB 00:00

[root@23 ~]# vim /opt/etcd/cfg/etcd

[root@23 ~]# systemctl start etcd

[root@23 ~]# systemctl enable etcd

Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.

[root@23 ~]# systemctl status etcd

● etcd.service - Etcd Server

Loaded: loaded (/usr/lib/systemd/system/etcd.service; enabled; vendor preset: disabled)

Active: active (running) since 三 2023-02-08 19:11:22 CST; 16s ago

Main PID: 16422 (etcd)

CGroup: /system.slice/etcd.service

└─16422 /opt/etcd/bin/etcd --cert-file=/opt/etcd/ssl/server.pem --...

2月 08 19:11:22 node02 etcd[16422]: {"level":"info","ts":"2023-02-08T19:11..."}

2月 08 19:11:22 node02 etcd[16422]: {"level":"info","ts":"2023-02-08T19:11:2...

2月 08 19:11:22 node02 etcd[16422]: {"level":"info","ts":"2023-02-08T19:11..."}

[root@master01 k8s]# cd /opt/k8s/kubernetes/server/bin

[root@master01 bin]# cp kube-apiserver kubectl kube-controller-manager kube-sche duler /opt/kubernetes/bin/

[root@master01 bin]# ln -s /opt/kubernetes/bin/* /usr/local/bin/

[root@master01 bin]# cd /opt/k8s/

[root@master01 k8s]# vim token.sh

[root@master01 k8s]#

[root@master01 k8s]# chmod +x token.sh

[root@master01 k8s]# ./token.sh

[root@master01 k8s]# cat /opt/kubernetes/cfg/token.csv

059b9280f2899b0e0674e54e1430503c,kubelet-bootstrap,10001,"system:kubelet-bootstr ap"

[root@master01 k8s]# cd /opt/k8s/

[root@master01 k8s]# ls

etcd-cert kubernetes

etcd.sh kubernetes-server-linux-amd64.tar.gz

etcd-v3.4.9-linux-amd64 master

etcd-v3.4.9-linux-amd64.tar.gz master.zip

k8s-cert token.sh

[root@master01 k8s]# ls

admin.sh etcd-v3.4.9-linux-amd64 master

apiserver.sh etcd-v3.4.9-linux-amd64.tar.gz master.zip

controller-manager.sh k8s-cert scheduler.sh

etcd-cert kubernetes token.sh

etcd.sh kubernetes-server-linux-amd64.tar.gz

[root@master01 k8s]# vim apiserver.sh

[root@master01 k8s]# ./apiserver.sh 192.168.111.20 https://192.168.111.20:2379,h ttps://192.168.111.21:2379,https://192.168.111.23:2379

-bash: ./apiserver.sh: 权限不够

[root@master01 k8s]# chmod +x apiserver.sh

[root@master01 k8s]# ./apiserver.sh 192.168.111.20 https://192.168.111.20:2379,h ttps://192.168.111.21:2379,https://192.168.111.23:2379

Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver. service to /usr/lib/systemd/system/kube-apiserver.service.

[root@master01 k8s]# ps aux | grep kube-apiserver

root 16611 49.6 4.4 1098376 356516 ? Ssl 19:51 0:04 /opt/kubernete s/bin/kube-apiserver --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs -- etcd-servers=https://192.168.111.20:2379,https://192.168.111.21:2379,https://192 .168.111.23:2379 --bind-address=192.168.111.20 --secure-port=6443 --advertise-ad dress=192.168.111.20 --allow-privileged=true --service-cluster-ip-range=10.0.0.0 /24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,Res ourceQuota,NodeRestriction --authorization-mode=RBAC,Node --enable-bootstrap-tok en-auth=true --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port -range=30000-50000 --kubelet-client-certificate=/opt/kubernetes/ssl/apiserver.pe m --kubelet-client-key=/opt/kubernetes/ssl/apiserver-key.pem --tls-cert-file=/op t/kubernetes/ssl/apiserver.pem --tls-private-key-file=/opt/kubernetes/ssl/apiser ver-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-fi le=/opt/kubernetes/ssl/ca-key.pem --service-account-issuer=api --service-account -signing-key-file=/opt/kubernetes/ssl/apiserver-key.pem --etcd-cafile=/opt/etcd/ ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl /server-key.pem --requestheader-client-ca-file=/opt/kubernetes/ssl/ca.pem --prox y-client-cert-file=/opt/kubernetes/ssl/apiserver.pem --proxy-client-key-file=/op t/kubernetes/ssl/apiserver-key.pem --requestheader-allowed-names=kubernetes --re questheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X -Remote-Group --requestheader-username-headers=X-Remote-User --enable-aggregator -routing=true --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize= 100 --audit-log-path=/opt/kubernetes/logs/k8s-audit.log

root 16626 0.0 0.0 112676 984 pts/3 S+ 19:51 0:00 grep --color=a uto kube-apiserver

[root@master01 k8s]# netstat -natp | grep 6443

tcp 0 0 192.168.111.20:6443 0.0.0.0:* LISTEN 16611/kube-apiserve

tcp 0 0 192.168.111.20:6443 192.168.111.20:36952 ESTABLISHED 16611/kube-apiserve

tcp 0 0 192.168.111.20:36978 192.168.111.20:6443 TIME_WAIT -

tcp 0 0 192.168.111.20:36952 192.168.111.20:6443 ESTABLISHED 16611/kube-apiserve

[root@master01 k8s]# cd /opt/k8s/

[root@master01 k8s]# ./scheduler.sh

-bash: ./scheduler.sh: 权限不够

[root@master01 k8s]# chmod +x scheduler.sh

[root@master01 k8s]# vim scheduler.sh

[root@master01 k8s]# chmod +x scheduler.sh

[root@master01 k8s]# ./scheduler.sh

2023/02/08 19:52:59 [INFO] generate received request

2023/02/08 19:52:59 [INFO] received CSR

2023/02/08 19:52:59 [INFO] generating key: rsa-2048

2023/02/08 19:52:59 [INFO] encoded CSR

2023/02/08 19:52:59 [INFO] signed certificate with serial number 328425565151026 905623003009717880918215995981478

2023/02/08 19:52:59 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for

websites. For more information see the Baseline Requirements for the Issuance an d Management

of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://ca bforum.org);

specifically, section 10.2.3 ("Information Requirements").

Cluster "kubernetes" set.

User "kube-scheduler" set.

Context "default" created.

Switched to context "default".

Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler. service to /usr/lib/systemd/system/kube-scheduler.service.

[root@master01 k8s]# ps aux | grep kube-scheduler

root 16749 10.1 0.4 749492 36332 ? Ssl 19:52 0:00 /opt/kubernete s/bin/kube-scheduler --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs -- leader-elect=true --kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig --b ind-address=127.0.0.1

root 16771 0.0 0.0 112676 980 pts/3 S+ 19:53 0:00 grep --color=a uto kube-scheduler

[root@master01 k8s]# vim controller-manager.sh

[root@master01 k8s]# ./controller-manager.sh

-bash: ./controller-manager.sh: 权限不够

[root@master01 k8s]# chmod +x controller-manager.sh

[root@master01 k8s]# ./controller-manager.sh

2023/02/08 19:54:03 [INFO] generate received request

2023/02/08 19:54:03 [INFO] received CSR

2023/02/08 19:54:03 [INFO] generating key: rsa-2048

2023/02/08 19:54:03 [INFO] encoded CSR

2023/02/08 19:54:03 [INFO] signed certificate with serial number 720419635979968 297497046157735188394894973113764

2023/02/08 19:54:03 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for

websites. For more information see the Baseline Requirements for the Issuance an d Management

of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://ca bforum.org);

specifically, section 10.2.3 ("Information Requirements").

Cluster "kubernetes" set.

User "kube-controller-manager" set.

Context "default" created.

Switched to context "default".

Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller -manager.service to /usr/lib/systemd/system/kube-controller-manager.service.

[root@master01 k8s]# ps aux | grep kube-controller-manager

root 50876 10.8 0.6 816984 52260 ? Ssl 19:54 0:00 /opt/kubernete s/bin/kube-controller-manager --logtostderr=false --v=2 --log-dir=/opt/kubernete s/logs --leader-elect=true --kubeconfig=/opt/kubernetes/cfg/kube-controller-mana ger.kubeconfig --bind-address=127.0.0.1 --allocate-node-cidrs=true --cluster-cid r=10.244.0.0/16 --service-cluster-ip-range=10.0.0.0/24 --cluster-signing-cert-fi le=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca- key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key- file=/opt/kubernetes/ssl/ca-key.pem --cluster-signing-duration=87600h0m0s

root 50888 0.0 0.0 112676 984 pts/3 S+ 19:54 0:00 grep --color=a uto kube-controller-manager

[root@master01 k8s]# ./admin.sh

-bash: ./admin.sh: 权限不够

[root@master01 k8s]# vim admin.sh

[root@master01 k8s]# ./admin.sh

-bash: ./admin.sh: 权限不够

[root@master01 k8s]# chmod +x a

admin.sh apiserver.sh

[root@master01 k8s]# chmod +x admin.sh

[root@master01 k8s]# ./admin.sh

Cluster "kubernetes" set.

User "cluster-admin" set.

Context "default" created.

Switched to context "default".

[root@master01 k8s]# kubectl get cs

Warning: v1 ComponentStatus is deprecated in v1.19+

NAME STATUS MESSAGE ERROR

scheduler Healthy ok

controller-manager Healthy ok

etcd-1 Healthy {"health":"true"}

etcd-2 Healthy {"health":"true"}

etcd-0 Healthy {"health":"true"}

[root@master01 k8s]#

[root@master01 ~]# cd /opt/k8s/

[root@master01 k8s]# unzip master.zip

Archive: master.zip

creating: master/

inflating: master/admin.sh

inflating: master/apiserver.sh

inflating: master/controller-manager.sh

inflating: master/scheduler.sh

[root@master01 k8s]# chmod +x *.sh

[root@master01 k8s]# mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}

[root@master01 k8s]# mkdir /opt/k8s/k8s-cert

[root@master01 k8s]# mv /opt/k8s/k8s-cert.sh /opt/k8s/k8s-cert

[root@master01 k8s]# cd /opt/k8s/k8s-cert/

[root@master01 k8s-cert]# ls

k8s-cert.sh

[root@master01 k8s-cert]# vim k8s-cert.sh

[root@master01 k8s-cert]# ./k8s-cert.sh

2023/02/08 19:46:15 [INFO] generating a new CA key and cert

标签:opt,k8s,二进制,安装,master01,--,etcd,K8S,root
From: https://www.cnblogs.com/123456789SI/p/17137786.html

相关文章

  • k8s安装2
    [root@master01~]#cd/opt/k8s/kubernetes/server/bin[root@master01bin]#[email protected]:/opt/kubernetes/[root@master01bin]#cd......
  • Python3.11 修改 pip 安装包安装位置
    操作步骤修改pip安装路径命令行输入pipconfigsetglobal.targetE:\DevEnv\Repository\Python\site-packages来修改pip安装包路径或者命令行输入py-mpipcon......
  • 制作ubuntu启动U盘,并安装ubuntu
    四步:1.将U盘格式化,成NTFS格式2.打开ss,点击文件,找到我们下载的ubuntu镜像3.点击启动,写入磁盘映像4.点击写入 问题:我U盘容量很大,但是还是......
  • 安装过程:oracle RAC 19c,节点2的root.sh执行失败,节点2集群启动失败,报错:CRS-1705/C
    环境介绍:个人笔记本电脑,VMwareWorkstationPro工具软件在两节点添加SCSI共享磁盘,LinuxOS7.6x64,安装OracleRAC19.3c具体日志报错信息如下:2023/02/1620:09:1......
  • 周一安卓开发安装测试环境
    今天周一,从头开始学习Android开发。Android基于手机的Unix内核开发。使用的软件是Androidstudio,然后是安装SDK,软件开发工具包。发布helloworld。......
  • K8SPod进阶资源限制以及探针
    一、Pod进阶1、资源限制当定义Pod时可以选择性地为每个容器设定所需要的资源数量。最常见的可设定资源是CPU和内存大小,以及其他类型的资源。当为Pod中的容器指......
  • Debian 只下载不安装
    LinuxDebian系统离线安装包下载--apt下载安装包apt-getinstall-d*.deb     #只下载不安装方法一aptitudedownload*.deb    #只下载不安装方法二/var......
  • 用kubeadm部署K8S
    一、kubeadm部署K8S集群架构主机名IP地址安装组件master(2C/4G,cpu核心数要求大于2)192.168.2.66docker、kubeadm、kubelet、kubectl、flannelnode01(2C/2G)19......
  • K8S多节点二进制部署
    一、多Maser集群架构的了解Kubernetes作为容器集群系统,通过健康检查+重启策略实现了Pod故障自我修复能力,通过调度算法实现将Pod分布式部署,并保持预期副本数,根据Node失效......
  • K8S概述
    一、什么是Kubernetes?Kubernetes是一个可移植、可扩展的开源平台,用于管理容器化工作负载和服务,有助于声明式配置和自动化。它拥有庞大且快速发展的生态系统。Kubernetes......