首页 > 其他分享 >k8s安装2

k8s安装2

时间:2023-02-20 22:56:30浏览次数:42  
标签:opt 安装 master01 kubelet sh k8s root

[root@master01 ~]# cd /opt/k8s/kubernetes/server/bin

[root@master01 bin]# scp kubelet kube-proxy [email protected]:/opt/kubernetes/

[root@master01 bin]# cd /opt/k8s/kubeconfig

[root@master01 kubeconfig]# chmod +x kubeconfig.sh

[root@master01 kubeconfig]# vim kubeconfig.sh

[root@master01 kubeconfig]# ./kubeconfig.sh 192.168.111.20 /opt/k8s/k8s-cert/

Cluster "kubernetes" set.

User "kubelet-bootstrap" set.

Context "default" created.

Switched to context "default".

Cluster "kubernetes" set.

User "kube-proxy" set.

Context "default" created.

Switched to context "default".

[root@master01 kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@ 192.168.111.21:/opt/kubernetes/cfg/

[email protected]'s password:

bootstrap.kubeconfig 100% 2168 2.8MB/s 00:00

kube-proxy.kubeconfig 100% 6270 7.5MB/s 00:00

[root@master01 kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@ 192.168.111.23:/opt/kubernetes/cfg/

[email protected]'s password:

bootstrap.kubeconfig 100% 2168 3.1MB/s 00:00

kube-proxy.kubeconfig 100% 6270 9.9MB/s 00:00

[root@master01 kubeconfig]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

[root@master01 kubeconfig]# kubectl get csr

NAME AGE SIGNERNAME REQUESTOR CONDITION

node-csr-bgmn7KV6TyEcKGbfStZcNXSOkH8uxCMgQtBt9fs1_Ig 62s kubernetes.io/kube- apiserver-client-kubelet kubelet-bootstrap Pending

[root@master01 kubeconfig]# kubectl certificate approve node-csr-bgmn7KV6TyEcKGb fStZcNXSOkH8uxCMgQtBt9fs1_Ig

certificatesigningrequest.certificates.k8s.io/node-csr-bgmn7KV6TyEcKGbfStZcNXSOk H8uxCMgQtBt9fs1_Ig approved

[root@master01 kubeconfig]# kubectl get csr

NAME AGE SIGNERNAME REQUESTOR CONDITION

[root@master01 kubeconfig]# kubectl get node

NAME STATUS ROLES AGE VERSION

192.168.111.21 NotReady <none> 64s v1.20.11

[root@master01 kubeconfig]# cd /opt/k8s

[root@master01 k8s]# kubectl apply -f kube-flannel.yml

podsecuritypolicy.policy/psp.flannel.unprivileged created

clusterrole.rbac.authorization.k8s.io/flannel created

clusterrolebinding.rbac.authorization.k8s.io/flannel created

serviceaccount/flannel created

configmap/kube-flannel-cfg created

daemonset.apps/kube-flannel-ds created

[root@master01 k8s]# kubectl get pods -n kube-system

NAME READY STATUS RESTARTS AGE

kube-flannel-ds-5t8xq 0/1 Init:0/1 0 11s

[root@master01 k8s]# kubectl get nodes

NAME STATUS ROLES AGE VERSION

192.168.111.21 NotReady <none> 7m37s v1.20.11

[root@master01 k8s]# vim calico.yaml

[root@master01 k8s]# kubectl get pods -n kube-system

NAME READY STATUS RESTARTS AGE

kube-flannel-ds-5t8xq 1/1 Running 0 7m12s

[root@master01 k8s]# vim calico.yaml

[root@master01 k8s]# kubectl get pods -n kube-system

NAME READY STATUS RESTARTS AGE

kube-flannel-ds-5t8xq 1/1 Running 0 13m

[root@master01 k8s]# kubectl get csr

NAME AGE SIGNERNAME REQUESTOR CONDITION

node-csr-bgmn7KV6TyEcKGbfStZcNXSOkH8uxCMgQtBt9fs1_Ig 31m kubernetes.io/kube- apiserver-client-kubelet kubelet-bootstrap Approved,Issued

node-csr-owWHRCQCfhR0mIU01uMb4sqPBl2FsORPr83Khy9OFi0 27s kubernetes.io/kube- apiserver-client-kubelet kubelet-bootstrap Pending

[root@master01 k8s]# kubectl certificate approve node-csr-owWHRCQCfhR0mIU01uMb4s qPBl2FsORPr83Khy9OFi0

certificatesigningrequest.certificates.k8s.io/node-csr-owWHRCQCfhR0mIU01uMb4sqPB l2FsORPr83Khy9OFi0 approved

[root@master01 k8s]# kubectl get csr

NAME AGE SIGNERNAME REQUESTOR CONDITION

node-csr-bgmn7KV6TyEcKGbfStZcNXSOkH8uxCMgQtBt9fs1_Ig 32m kubernetes.io/kube- apiserver-client-kubelet kubelet-bootstrap Approved,Issued

node-csr-owWHRCQCfhR0mIU01uMb4sqPBl2FsORPr83Khy9OFi0 78s kubernetes.io/kube- apiserver-client-kubelet kubelet-bootstrap Approved,Issued

[root@master01 k8s]# for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfi lter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done

ip_vs_dh

ip_vs_ftp

ip_vs

ip_vs_lblc

ip_vs_lblcr

ip_vs_lc

ip_vs_nq

ip_vs_pe_sip

ip_vs_rr

ip_vs_sed

ip_vs_sh

ip_vs_wlc

ip_vs_wrr

[root@master01 k8s]# cd /opt/

[root@master01 opt]# ls

etcd k8s kubernetes rh

[root@master01 opt]# ls

etcd k8s kubernetes rh

[root@master01 opt]# ls

etcd k8s kubernetes rh

[root@master01 opt]# vim proxy.sh

[root@master01 opt]# cd k8s

[root@master01 k8s]# ls

admin.sh kubeconfig

apiserver.sh kube-flannel.yml

calico.yaml kubernetes

controller-manager.sh kubernetes-server-linux-amd64.tar.gz

etcd-cert master

etcd.sh master.zip

etcd-v3.4.9-linux-amd64 proxy.sh

etcd-v3.4.9-linux-amd64.tar.gz scheduler.sh

k8s-cert token.sh

[root@master01 k8s]# vim proxy.sh

[root@master01 k8s]# ./proxy.sh 192.168.111.23

-bash: ./proxy.sh: 权限不够

[root@master01 k8s]# chmod +x proxy.sh

[root@master01 k8s]# ./proxy.sh 192.168.111.23

Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.serv ice to /usr/lib/systemd/system/kube-proxy.service.

[root@master01 k8s]# kubectl get nodes

NAME STATUS ROLES AGE VERSION

192.168.111.21 Ready <none> 40m v1.20.11

192.168.111.23 Ready <none> 11m v1.20.11

[root@master01 k8s]# kubectl apply -f coredns.yaml

serviceaccount/coredns created

clusterrole.rbac.authorization.k8s.io/system:coredns created

clusterrolebinding.rbac.authorization.k8s.io/system:coredns created

configmap/coredns created

deployment.apps/coredns created

service/kube-dns created

[root@master01 k8s]#

[root@master01 k8s]#

[root@master01 k8s]# kubectl get pods -n kube-system

NAME READY STATUS RESTARTS AGE

coredns-6954c77b9b-swgt4 1/1 Running 0 13s

kube-flannel-ds-5t8xq 1/1 Running 0 37m

kube-flannel-ds-xs652 0/1 CrashLoopBackOff 7 17m

[root@master01 k8s]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous

clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created

[root@master01 k8s]# kubectl run -it --rm dns-test --image=busybox:1.28.4 sh If you don't see a command prompt, try pressing enter.

/ # kubectl get pods -n kube-system

sh: kubectl: not found

/ # nslookup kubernetes

Server: 10.0.0.2

Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local

Name: kubernetes

Address 1: 10.0.0.1 kubernetes.default.svc.cluster.local

/ #

[root@node01 opt]# chmod +x kubelet.sh proxy.sh

[root@node01 opt]# vim kubelet.sh

[root@node01 opt]# vim proxy.sh

[root@node01 opt]# mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}

[root@node01 opt]# ./kubelet.sh 192.168.111.21

Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

[root@node01 opt]#

[root@node01 opt]#

[root@node01 opt]# ps aux | grep kubelet

root 54571 1.5 0.6 1181088 54892 ? Ssl 16:13 0:00 /opt/kubernete s/bin/kubelet --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --hostnam

[root@node01 opt]# for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilt er/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2> &1 && /sbin/modprobe $i;done

ip_vs_dh

ip_vs_ftp

ip_vs

[root@node01 opt]# cd /opt/

[root@node01 opt]# ./proxy.sh 192.168.111.21

Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.serv ice to /usr/lib/systemd/system/kube-proxy.service.

[root@node01 opt]# ps aux | grep kube-proxy

root 55482 3.4 0.4 744836 33564 ? S

[root@node01 opt]# docker load -i flannel.tar

777b2c648970: Loading layer 5.848MB/5.848MB

815dff9e0b57: Loading layer 11.42MB/11.42MB

2e16188127c8: Loading layer 2.267MB/2.267MB

eb738177d102: Loading layer 49.34MB/49.34MB

b613d890216c: Loading layer 5.12kB/5.12kB

[root@node01 opt]# kubectl get csr

bash: kubectl: 未找到命令...

[root@node01 opt]# docker load -i coredns.tar

225df95e717c: Loading layer 336.4kB/336.4kB

96d17b0b58a7: Loading layer 45.02MB/45.02MB

Loaded image: k8s.gcr.io/coredns:1.7.0

[root@node01 opt]#

[root@node01 opt]# cd /opt/k8s

-bash: cd: /opt/k8s: 没有那个文件或目录

[root@node01 opt]# mkdir k8s

[root@node01 opt]# ls

cni coredns.tar k8s node.zip

cni-plugins-linux-amd64-v0.8.6.tgz etcd kubelet.sh proxy.sh

containerd flannel.tar kubernetes rh

[root@node01 opt]# cd k8s

[root@node01 k8s]# kubectl apply -f coredns.yaml

bash: kubectl: 未找到命令...

[root@node01 k8s]# cd /opt

[root@node01 opt]# ls

cni coredns.tar kubelet.sh proxy.sh

cni-plugins-linux-amd64-v0.8.6.tgz etcd kubernetes rh

containerd flannel.tar node.zip

[root@node01 opt]#

[root@node02 ~]# mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}

[root@node02 ~]# cd /opt

[root@node02 opt]# ls

etcd kubelet.sh kubernetes node.zip proxy.sh rh

[root@node02 opt]# chmod +x kubelet.sh proxy.sh

[root@node02 opt]# cd

[root@node02 ~]# systemctl status firewalld.service

● firewalld.service - firewalld - dynamic firewall daemon

Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor p reset: enabled)

Active: inactive (dead)

Docs: man:firewalld(1)

12月 25 04:06:29 localhost.localdomain systemd[1]: Starting firewalld - dyna...

12月 25 04:06:30 localhost.localdomain systemd[1]: Started firewalld - dynam...

12月 25 04:06:30 localhost.localdomain firewalld[899]: WARNING: ICMP type 'b...

12月 25 04:06:30 localhost.localdomain firewalld[899]: WARNING: beyond-scope...

12月 25 04:06:30 localhost.localdomain firewalld[899]: WARNING: ICMP type 'f...

12月 25 04:06:30 localhost.localdomain firewalld[899]: WARNING: failed-polic...

12月 25 04:06:30 localhost.localdomain firewalld[899]: WARNING: ICMP type 'r...

12月 25 04:06:30 localhost.localdomain firewalld[899]: WARNING: reject-route...

12月 25 04:08:18 localhost.localdomain systemd[1]: Stopping firewalld - dyna...

12月 25 04:08:18 localhost.localdomain systemd[1]: Stopped firewalld - dynam...

Hint: Some lines were ellipsized, use -l to show in full.

[root@node02 ~]# cd /opt/

[root@node02 opt]# ls

cni etcd kubelet.sh kubernetes node.zip proxy.sh rh

[root@node02 opt]# vim kubelet.sh

[root@node02 opt]# chmod +x kubelet.sh

[root@node02 opt]# ./kubelet.sh 192.168.111.23

Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

标签:opt,安装,master01,kubelet,sh,k8s,root
From: https://www.cnblogs.com/123456789SI/p/17137794.html

相关文章

  • Python3.11 修改 pip 安装包安装位置
    操作步骤修改pip安装路径命令行输入pipconfigsetglobal.targetE:\DevEnv\Repository\Python\site-packages来修改pip安装包路径或者命令行输入py-mpipcon......
  • 制作ubuntu启动U盘,并安装ubuntu
    四步:1.将U盘格式化,成NTFS格式2.打开ss,点击文件,找到我们下载的ubuntu镜像3.点击启动,写入磁盘映像4.点击写入 问题:我U盘容量很大,但是还是......
  • 安装过程:oracle RAC 19c,节点2的root.sh执行失败,节点2集群启动失败,报错:CRS-1705/C
    环境介绍:个人笔记本电脑,VMwareWorkstationPro工具软件在两节点添加SCSI共享磁盘,LinuxOS7.6x64,安装OracleRAC19.3c具体日志报错信息如下:2023/02/1620:09:1......
  • 周一安卓开发安装测试环境
    今天周一,从头开始学习Android开发。Android基于手机的Unix内核开发。使用的软件是Androidstudio,然后是安装SDK,软件开发工具包。发布helloworld。......
  • K8SPod进阶资源限制以及探针
    一、Pod进阶1、资源限制当定义Pod时可以选择性地为每个容器设定所需要的资源数量。最常见的可设定资源是CPU和内存大小,以及其他类型的资源。当为Pod中的容器指......
  • Debian 只下载不安装
    LinuxDebian系统离线安装包下载--apt下载安装包apt-getinstall-d*.deb     #只下载不安装方法一aptitudedownload*.deb    #只下载不安装方法二/var......
  • 用kubeadm部署K8S
    一、kubeadm部署K8S集群架构主机名IP地址安装组件master(2C/4G,cpu核心数要求大于2)192.168.2.66docker、kubeadm、kubelet、kubectl、flannelnode01(2C/2G)19......
  • K8S多节点二进制部署
    一、多Maser集群架构的了解Kubernetes作为容器集群系统,通过健康检查+重启策略实现了Pod故障自我修复能力,通过调度算法实现将Pod分布式部署,并保持预期副本数,根据Node失效......
  • K8S概述
    一、什么是Kubernetes?Kubernetes是一个可移植、可扩展的开源平台,用于管理容器化工作负载和服务,有助于声明式配置和自动化。它拥有庞大且快速发展的生态系统。Kubernetes......
  • kubeadm部署k8s集群(云原生)
    内容预知架构说明1.环境准备2.所有节点安装docker3.所有节点安装kubeadm,kubelet和kubectl4.部署Dashboard5.安装部署与k8s集群对接的Harbor仓库内核参数......