首页 > 其他分享 >快速搭建一个k8s来练习

快速搭建一个k8s来练习

时间:2022-12-29 13:23:26浏览次数:33  
标签:master1 cri -- com 练习 k8s root example 搭建

目录

环境介绍

Hostname IP CPU/Mem/Disk OS Role
master1.lab.example.com 192.168.1.111/24 2C/4G/60G Ubuntu22.04.1 Master/etcd
master2.lab.example.com 192.168.1.112/24 2C/4G/60G Ubuntu22.04.1 Master/etcd
master3.lab.example.com 192.168.1.113/24 2C/4G/60G Ubuntu22.04.1 Master/etcd
node1.lab.example.com 192.168.1.114/24 2C/4G/60G Ubuntu22.04.1 node
node2.lab.example.com 192.168.1.115/24 2C/4G/60G Ubuntu22.04.1 node
node3.lab.example.com 192.168.1.116/24 2C/4G/60G Ubuntu22.04.1 node
ha1.lab.example.com 192.168.1.117/24 1C/2G/60G Ubuntu22.04.1 keepalived
haproxy
ha2.lab.example.com 192.168.1.118/24 1C/2G/60G Ubuntu22.04.1 keepalived
haproxy
VIP 192.168.1.119/24

忘记root密码后

# 在 Ubuntu 22.04 / 20.04 LTS 中重新设置 sudo 密码
启动时按下 SHIFT + ESC 键,进入 GRUB 引导加载器页面
接着会出现几种模式,分别是高级模式、内存测试
选择第一个选项Advanced options for Ubuntu(进入高级模式),选择Ubuntym with Kubyx 5.15.0-52-generic (注意,非带recovery mode字样的项)并按 e 键进入编辑模式
在以 linux 开头的一行末尾,删除字符串 $vt_handoff 并添加字符串 ro quiet splash systemd.unit=rescue.target 
做完修改后,按 Ctrl + X 或 F10 在救援模式下启动
进到(or press Control-D to continue): 界面是按回车即可
sudo passwd             # 密码必须是大小写字母、数字、特殊字符的组合
修改完后执行systemctl reboot进行重启

前期配置

# 配置IP
sudo vim /etc/netplan/00-installer-config.yaml
network:
  ethernets:
    ens33:
      dhcp4: false
      dhcp6: false
      addresses:
        - 172.16.186.111/24
      routes:
        - to: default
          via: 172.16.186.2
      nameservers:
        addresses: [172.16.186.2]
  version: 2

# 是配置文件生效
sudo netplan apply


# 配置主机名
hostnamectl set-hostname master1.lab.example.com


# 允许root用户远程登录
sudo vim /etc/ssh/sshd_config
PermitRootLogin yes

# 重启sshd服务
sudo systemctl restart sshd


# 先安装ansible,将master1作为控制节点,其他节点作为被控节点(使用自带的源)
root@master1:~# 
ssh-keygen -t rsa -P ''
for i in {1..8};do ssh-copy-id [email protected]$i;done
for i in {1..8};do ssh-copy-id [email protected]$i;done
echo "rambo ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/rambo
for i in {2..8};do scp /etc/sudoers.d/rambo [email protected]$i:/etc/sudoers.d/;done
apt install -y ansible
mkdir /etc/ansible

# 新建该文件
root@master1:~# vim /etc/ansible/ansible.cfg
[defaults]
inventory = ./inventory
host_key_checking = False
remote_user = rambo
ask_pass = False

[privilege_escalation]
become=true
become_method=sudo
become_user=root
become_ask_pass=False


root@master1:~# vim /etc/ansible/inventory
[master]
172.16.186.11[1:3]
[master1]
172.16.186.11[2:3]
[node]
172.16.186.11[4:6]
[lb]
172.16.186.11[7:8]


root@master1:~# ansible all -m ping


# 配置hosts
root@master1:~# vim /etc/hosts
172.16.186.111  master1.lab.example.com   master1
172.16.186.112  master2.lab.example.com   master2
172.16.186.113  master3.lab.example.com   master3
172.16.186.114  node1.lab.example.com     node1
172.16.186.115  node2.lab.example.com     node2
172.16.186.116  node3.lab.example.com     node3
172.16.186.117  ha1.lab.example.com       ha1
172.16.186.118  ha2.lab.example.com       ha2

root@master1:~# ansible master1,node,lb -m copy -a "src=/etc/hosts dest=/etc/  backup=yes"


# 关闭swap
root@master1:~# ansible all -m shell -a "sed -ri '/swap/s/^/#/' /etc/fstab && swapoff -a && systemctl mask swap.target"

# 安装常用软件
root@master1:~# 
ansible all -m apt -a "name=net-tools,vim,bash-completion  state=present"
# ansible all -m shell -a "source /usr/share/bash-completion/bash_completion"


# 配置时间同步
root@master1:~# 
ansible all -m apt -a "name=chrony state=present"
cp /etc/chrony/chrony.conf .

root@master1:~# vim /etc/chrony/chrony.conf
#pool ntp.ubuntu.com        iburst maxsources 4
#pool 0.ubuntu.pool.ntp.org iburst maxsources 1
#pool 1.ubuntu.pool.ntp.org iburst maxsources 1
#pool 2.ubuntu.pool.ntp.org iburst maxsources 2
pool ntp.aliyun.com iburst


root@master1:~# vim chrony.conf 
#pool ntp.ubuntu.com        iburst maxsources 4
#pool 0.ubuntu.pool.ntp.org iburst maxsources 1
#pool 1.ubuntu.pool.ntp.org iburst maxsources 1
#pool 2.ubuntu.pool.ntp.org iburst maxsources 2
pool  172.16.186.111  iburst

root@master1:~# 
ansible master1,node,lb -m copy -a "src=/root/chrony.conf dest=/etc/chrony/ backup=yes"
ansible all -m service -a "name=chronyd state=restarted"
ansible all -m shell -a "chronyc sources|tail -n1"

高可用安装

安装haproxy

# 修改内核参数(所有装haproxy的节点上执行)
root@ha*:~# sudo cat >> /etc/sysctl.conf <<-EOF
net.ipv4.ip_nonlocal_bind = 1
EOF

sysctl -p

# 安装配置haproxy
root@master1:~# ansible lb -m apt -a "name=haproxy state=present"

# 追加以下代码而不是覆盖
[root@lb1 ~]# cat >> /etc/haproxy/haproxy.cfg <<-EOF    
listen stats
  mode http
  bind 0.0.0.0:8888
  stats enable
  log global
  stats uri /status
  stats auth admin:123456

listen kubernetes-api-6443
  mode tcp
  bind 172.16.186.119:6443
  server  master1.lab.example.com  172.16.186.111:6443 check inter 3s fall 3 rise 3
  server  master2.lab.example.com  172.16.186.112:6443 check inter 3s fall 3 rise 3
  server  master3.lab.example.com  172.16.186.113:6443 check inter 3s fall 3 rise 3
EOF

# 启动haproxy
root@master1:~# 
ansible lb -m service -a "name=haproxy state=restarted enabled=yes"
ansible lb -m shell -a "systemctl status haproxy | grep Active"
ansible all -m apt -a "name=net-tools state=present"


root@master1:~# ansible lb -m shell -a "ss -tlun | egrep '(6443|8888)'"
172.16.186.118 | CHANGED | rc=0 >>
tcp   LISTEN 0      4096          0.0.0.0:8888      0.0.0.0:*        # haproxy的IP       
tcp   LISTEN 0      4096   172.16.186.119:6443      0.0.0.0:*        # VIP         
172.16.186.117 | CHANGED | rc=0 >>
tcp   LISTEN 0      4096   172.16.186.119:6443      0.0.0.0:*          
tcp   LISTEN 0      4096          0.0.0.0:8888      0.0.0.0:* 

安装keepalived

root@master1:~# ansible lb -m apt -a "name=keepalived state=present"
# lb1配置keepalived master节点
root@master1 ~# cat > keepalived-lb1.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id lb1.lab.example.com
}
vrrp_script chk_haproxy {
    script "/etc/keepalived/check_haproxy.sh"
    interval 5
    weight -5
    fall 2
    rise 1
}
vrrp_instance VI_1 {
    state MASTER
    interface ens33
    garp_master_delay 10
    smtp_alert
    virtual_router_id 66
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass 123456
    }
    virtual_ipaddress {
        172.16.186.119/24 dev ens33 label ens33:0
    }
    track_script {
      chk_haproxy
} }
EOF

root@master1 ~# ansible 172.16.186.117 -m copy -a "src=/root/keepalived-lb1.conf  dest=/etc/keepalived/keepalived.conf backup=yes"

# lb2配置keepalived backup节点
root@master1 ~# cat > keepalived-lb2.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id lb3.lab.example.com
}
vrrp_script chk_haproxy {
    script "/etc/keepalived/check_haproxy.sh"
    interval 5
    weight -5
    fall 2
    rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    garp_master_delay 10
    smtp_alert
    virtual_router_id 66
    priority 80
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass 123456
    }
    virtual_ipaddress {
        172.16.186.119/24 dev ens33 label ens33:0
    }
    track_script {
      chk_haproxy
} }
EOF

[root@master1 ~]# ansible 172.16.186.118 -m copy -a "src=/root/keepalived-lb2.conf  dest=/etc/keepalived/keepalived.conf backup=yes"



# 健康检查脚本配置(两台lb主机)
root@master1 ~# cat > check_haproxy.sh <<-EOF
#!/bin/bash
/usr/bin/killall -0 haproxy || systemctl restart haproxy
EOF

[root@master1 ~]# ansible lb -m copy -a "src=/root/check_haproxy.sh  dest=/etc/keepalived/ mode=0755"

# 启动服务
root@master1 ~# 
ansible lb -m systemd -a "daemon-reload=yes"
ansible lb -m service -a "name=haproxy state=restarted enabled=yes"
ansible lb -m service -a "name=keepalived state=restarted enabled=yes"
ansible lb -m shell -a "systemctl status haproxy | grep Active"
ansible lb -m shell -a "systemctl status keepalived | grep Active"

root@master1:~# ansible lb -m shell -a "hostname -I"
172.16.186.118 | CHANGED | rc=0 >>
172.16.186.118 fd15:4ba5:5a2b:1008:20c:29ff:fe09:8be2 
172.16.186.117 | CHANGED | rc=0 >>
172.16.186.117 172.16.186.119 fd15:4ba5:5a2b:1008:20c:29ff:feaf:d5c1        # VIP在117这台机上

k8s配置(基于docker)

所有节点安装docker

root@master1:~# ansible master,node -m apt -a "name=docker.io state=present"
root@master1:~# cat > /etc/docker/daemon.json <<-EOF
{
  "registry-mirrors":[
     "https://docker.mirrors.usts.edu.cn",
     "https://hub-mirror.c.163.com",
     "https://reg-mirror.qiniu.com",
     "https://registry.docker-cn.com"
    ],
	"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

root@master1:~# ansible master1,node -m copy -a "src=/etc/docker/daemon.json dest=/etc/docker/"
root@master1:~# ansible master,node -m service -a "name=docker state=restarted"
root@master1:~# ansible master,node -m shell -a "systemctl status docker | grep Active"

所有节点安装kubeadm、kubelet、kubectl

kubeadm: 实现创建集群用的工具
kubelet: 在某台主机上实现控制容器的一个工具
kubectl: 管理工具


root@master1:~# echo '
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -

cat >> /etc/apt/sources.list.d/kubernetes.list <<-EOF
deb https://mirrors.aliyun.com/kubernetes/apt/  kubernetes-xenial  main
EOF' > sources.sh

root@master1:~# ansible master,node -m copy -a "src=/root/sources.sh dest=/root/ mode='0755'"
root@master1:~# ansible master,node -m shell -a "bash /root/sources.sh"
root@master1:~# ansible master,node -m shell -a "apt update"

# 查看可安装的版本
root@master1:~# apt-cache madison kubeadm | head
root@master1:~# apt-cache madison {kubeadm,kubelet,kubectl} | head

# 安装指定版本(该文档用的该方法安装1.25.5-00)
root@master1:~# ansible master,node -m shell -a "apt install -y kubeadm=1.25.5-00 kubelet=1.25.5-00 kubectl=1.25.5-00"

# 安装最新版本
root@master1:~# ansible master,node -m yum -a "name=kubeadm,kubelet,kubectl state=present"

所有主机安装cri-docker

k8s从1.24开始移除了对docker-shim的支持,而docker Engine默认又不支持CRI规范,因而二者直接完成整合,为此mirantis和docker联合创建了cri-dockerd项目,用于为docker Enginx提供一个能够支持到CRI规范的垫片,从而能够让k8s基于cri控制docker

# 方法1(能科 o 学上网的情况)
root@master1:~# curl -LO https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.5/cri-dockerd_0.2.5.3-0.ubuntu-focal_amd64.deb
root@master1:~# ansible master1,node -m copy -a "src=/root/cri-dockerd_0.2.5.3-0.ubuntu-focal_amd64.deb dest=/root/"
root@master1:~# ansible master,node -m shell -a "dpkg -i /root/cri-dockerd_0.2.5.3-0.ubuntu-focal_amd64.deb"
注:安装完后相应的服务cri-dockerd.service会自动启动


# 所有主机配置cri-dockerd
注:由于cri-dockerd在下载镜像的时候要从国外下载镜像,所以要改成国内的下载地址
root@master1:~# vim /lib/systemd/system/cri-docker.service
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd://  --pod-infra-container-image  registry.aliyuncs.com/google_containers/pause:3.7
注:加上--pod-infra-container-image  registry.aliyuncs.com/google_containers/pause:3.7这一句

# 将改过的配置文件发送到其他master、node节点
root@master1:~# ansible master1,node -m copy -a "src=/lib/systemd/system/cri-docker.service dest=/lib/systemd/system/"

# 所有节点起服务
root@master1:~# 
ansible master,node -m systemd -a "daemon-reload=yes"
ansible master,node -m service -a "name=cri-docker state=restarted"
ansible master,node -m shell -a "systemctl status cri-docker | grep Active"

# 提前准备k8s初始化所需镜像
# 查看国内镜像
root@master1:~# kubeadm config images list --image-repository registry.aliyuncs.com/google_containers
I1228 04:11:28.768956   17697 version.go:256] remote version is much newer: v1.26.0; falling back to: stable-1.25
registry.aliyuncs.com/google_containers/kube-apiserver:v1.25.5
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.25.5
registry.aliyuncs.com/google_containers/kube-scheduler:v1.25.5
registry.aliyuncs.com/google_containers/kube-proxy:v1.25.5
registry.aliyuncs.com/google_containers/pause:3.8
registry.aliyuncs.com/google_containers/etcd:3.5.6-0
registry.aliyuncs.com/google_containers/coredns:v1.9.3


# 从国内镜像拉取镜像,1.24以上还需要指定--cri-socket路径
kubeadm config images pull --kubernetes-version=v1.25.5 --image-repository registry.aliyuncs.com/google_containers --cri-socket unix:///run/cri-dockerd.sock

# pull镜像,这里有可能失败,需要一台一台pull,一台一台也有可能失败,多pull几次
root@master1:~# ansible master1,node -m shell -a "kubeadm config images pull --kubernetes-version=v1.25.5 --image-repository registry.aliyuncs.com/google_containers --cri-socket unix:///run/cri-dockerd.sock"


# 导出镜像(可选)
root@master1:~# docker image save `docker image ls --format "{{.Repository}}:{{.Tag}}"` -o k8s-images-v1.25.5.tar
root@master1:~# ll -h k8s-images-v1.25.5.tar 
-rw------- 1 root root 667M Dec 28 05:21 k8s-images-v1.25.5.tar

初始化k8s集群

# 实现kubectl命令补全
kubectl completion bash > /etc/profile.d/kubectl_completion.sh
source  /etc/profile.d/kubectl_completion.sh



--kubernetes-version: kubernete组件的版本号,它必须要与安装的kubelet程序包的版本相同
--control-plane-endpoint: 多主节点必选项,用于指定控制平面的因定访问地址,可是IP地址或DNS名称,会被用于集群管理员及集群组的kubeconfig配置文件的API Server的访问地址,如果是单主节点的控制平面部署时不使用该选项,注能: kubeadhm不支持将没有--contrc-plane-endpoint 参数的单个控制平面集群转换为高可用性集
--pod-network-cidr: Pod网络的地址范围,其值为CIDR格式的网络地址,通常情况下F1anne1网络插件的默认为10.244.0.0/16, Calico网络插件的默认值为192.168.0.0/16
--service-cidr: service的网络地址范围,其值为CIDR格式的网络地址,默认为10.96.0.0/12:通常,仅Flanne1一类的网络插件需要手动指定该地址
--service-dns-domain <string>: 指定k8s集群城名,默认是cluster.1oca1,会自动通过相应的DNS服务实现解析
--apiserver-advertise-address: API服务器所公布的其正在监听的IP地址。如果未设置则使用默认网路接口。 apiserver通告给其他组件的IP地址,一般应该为Master节点的用于集群内部通信的IP地址,0.0.0.0表示此节点上所有可用地址,非必选项
--image-repository <string>: 设置镜像仓库地址,默认为k8s.gcr.io ,此地址国内可能无法访问,可以指向国内的镜像地址
--token-ttl: 共令牌(token)的过期时长,默认为24小时,0表示水不过期:为防止不安全存留等原因导致的令牌泄露危及集群安全。建议为其设定过期时长。未设定该选项时,在token过期后,若期望再向集群中加入其它节点,可使用如下命令重新创建token并生成节点加集群,
命令: kubeadm token create --print-join-command 
--ignore-preflight-errors=Swap: 若各节点未禁用Swap设备,还需附加该选项从而让kubeadm忽略该错误
--upload-certs: 将控制平面证书上传到 kubeadm-certs Secret 
--cri-socket: v1.24版之后需要指定连接cri的socket文件路径,注意;不同的CRI连接文件不同
#如果是 cRI 是containerd ,则使用﹣-cri-socket unix:///run/containerd/containerd.sock 
#如果是 cRI 是docker,则使用﹣-cri-socket unix:///var/run/cri-dockerd.sock 
#如果是 CRI 是 CRI-o ,则使用﹣-cri-socket unix:///var/run/crio/crio.sock
注意: CRI-o 与 containerd 的容器管理机制不一样,所以镜像文件不能通用


# 初始化集群
root@master1:~# kubeadm init --control-plane-endpoint="172.16.186.119" --kubernetes-version=v1.25.5 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --token-ttl=0 --cri-socket unix:///run/cri-dockerd.sock --image-repository registry.aliyuncs.com/google_containers  --upload-certs 
注:不支持\换行,以下是回显

....
	....
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 172.16.186.119:6443 --token 9uizwb.m1a1jvjw9fcf9zmr \
	--discovery-token-ca-cert-hash sha256:f388a7588aa0030e8cd12e9e243b10c6854718f3df7f3adfbed9f6486d7c7c9e \
	--control-plane --certificate-key 4d6278111bc612b5267afbc2eb0a613fd662a6694d81a71931b95b3d536381e8

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.16.186.119:6443 --token 9uizwb.m1a1jvjw9fcf9zmr \
	--discovery-token-ca-cert-hash sha256:f388a7588aa0030e8cd12e9e243b10c6854718f3df7f3adfbed9f6486d7c7c9e 


# 在第一个master节点生成kubectl命令的授权文件 
root@master1:~# 
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config


# 在其他节master节点上执行以下命令,就是将其他主节点加入到集群(一台一台在其他master节点上执行,多台节点同时加入有可能会出问题)
kubeadm join 172.16.186.119:6443 --token 9uizwb.m1a1jvjw9fcf9zmr \
	--discovery-token-ca-cert-hash sha256:f388a7588aa0030e8cd12e9e243b10c6854718f3df7f3adfbed9f6486d7c7c9e \
	--control-plane --certificate-key 4d6278111bc612b5267afbc2eb0a613fd662a6694d81a71931b95b3d536381e8 \
	--cri-socket unix:///run/cri-dockerd.sock

注1:--cri-socket unix:///run/cri-dockerd.sock是手动添加上去的,必须项
注2:按回显中提示的创建.kube目录并给权限
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

注3:
报错 missing optional cgroups: blkio
解决:再执行一次,找到报错,将提示的端口杀掉,提示的文件也删掉,而后再执行一次




# 将其他node节点加入到集群(一台一台在其他所有node节点上执行,多台节点同时加入有可能会出问题)
kubeadm join 172.16.186.119:6443 --token 9uizwb.m1a1jvjw9fcf9zmr \
	--discovery-token-ca-cert-hash sha256:f388a7588aa0030e8cd12e9e243b10c6854718f3df7f3adfbed9f6486d7c7c9e \
	--cri-socket unix:///run/cri-dockerd.sock
注1:
报错 missing optional cgroups: blkio
解决:再执行一次,找到报错,将提示的端口杀掉,提示的文件也删掉,而后再执行一次


# 在master节点上查看各node节点
root@master1:~# kubectl get nodes
NAME                      STATUS     ROLES           AGE     VERSION
master1.lab.example.com   NotReady   control-plane   3h23m   v1.25.5
master2.lab.example.com   NotReady   control-plane   3h1m    v1.25.5
master3.lab.example.com   NotReady   control-plane   3h      v1.25.5
node1.lab.example.com     NotReady   <none>          9m58s   v1.25.5
node2.lab.example.com     NotReady   <none>          155m    v1.25.5
node3.lab.example.com     NotReady   <none>          148m    v1.25.5



# 在第一个master节点上配置网络组件
root@master1:~# wget https://files.cnblogs.com/files/smlile-you-me/kube-flannel.yml.sh
root@master1:~# mv kube-flannel.yml.sh  kube-flannel.yml
root@master1:~# kubectl apply -f kube-flannel.yml
注:这里需要等10分钟左右,待docker images | grep flan查看有flannel镜像后再查看nodes就会变成Ready状态

root@master1:~# kubectl get nodes
NAME                      STATUS   ROLES           AGE     VERSION
master1.lab.example.com   Ready    control-plane   3h54m   v1.25.5
master2.lab.example.com   Ready    control-plane   3h32m   v1.25.5
master3.lab.example.com   Ready    control-plane   3h31m   v1.25.5
node1.lab.example.com     Ready    <none>          41m     v1.25.5
node2.lab.example.com     Ready    <none>          3h6m    v1.25.5
node3.lab.example.com     Ready    <none>          3h      v1.25.5


# 测试应用编排及服务访问
root@master1:~# kubectl create deployment testapp --image=ikubernetes/demoapp:v1.0 --replicas=3
root@master1:~# kubectl get pod -o wide
NAME                      READY   STATUS   RESTARTS   AGE     IP            NODE               NOMINATED NODE   READINESS GATES
testapp-c88bfb9f7-6vxcb   1/1     Running   0        3m13s   10.244.4.4   node2.lab.example.com   <none>           <none>
testapp-c88bfb9f7-kffjr   1/1     Running   0        3m13s   10.244.3.2   node1.lab.example.com   <none>           <none>
testapp-c88bfb9f7-vl785   1/1     Running   0        3m13s   10.244.5.2   node3.lab.example.com   <none>           <none>


# 前端创建一个service(集群端口:pod端口)
root@master1:~# kubectl create service nodeport testapp --tcp=80:80
# 确定service的地址
root@master1:~# kubectl get  svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        103m       
testapp      NodePort    10.102.225.131  <none>        80:30616/TCP   19m          # 上面手动创建的service的IP
释义:
当访问10.102.225.131这个service的地址将会被调度到后端node1、2、3节点上


# 测试轮训效果
root@master1:~# for((i=1;i<5;i++));do curl 10.102.225.131;sleep 3;done
iKubernetes demoapp v1.0 !! ClientIP: 10.244.0.0, ServerName: testapp-c88bfb9f7-6vxcb, ServerIP: 10.244.4.4!
iKubernetes demoapp v1.0 !! ClientIP: 10.244.0.0, ServerName: testapp-c88bfb9f7-vl785, ServerIP: 10.244.5.2!
iKubernetes demoapp v1.0 !! ClientIP: 10.244.0.0, ServerName: testapp-c88bfb9f7-6vxcb, ServerIP: 10.244.4.4!
iKubernetes demoapp v1.0 !! ClientIP: 10.244.0.0, ServerName: testapp-c88bfb9f7-kffjr, ServerIP: 10.244.3.2!



# 扩容
root@master1:~# kubectl scale deployment testapp --replicas=5
# 缩容
root@master1:~# kubectl scale deployment testapp --replicas=2

标签:master1,cri,--,com,练习,k8s,root,example,搭建
From: https://www.cnblogs.com/smlile-you-me/p/17012204.html

相关文章

  • K8S各资源之间的关系
    在K8S中一切都是资源。而这些资源包含了很多,如Namespace、Pod、Label、Deployment、Service。这些资源之间又有相互的联系。NamespaceNamespace是K8S中比较重要的资源,主......
  • 青少年CTF练习12
    T11PingMe02 题目难度:★题目描述:诶?又是一道Ping题目诶!打开靶机  先把这个ip复制(顺手赋值)上网页先得到下图模式   然后我加入了管道符  可以正常执......
  • 青少年CTF练习11
    T11 CheckMe06题目难度:★题目描述:这竟然让我登陆?靶机下面有个txt,打开后发现里面有一大批登入的密码和账号,打开靶机     使用Burpsuite抓包爆破即可! ......
  • 青少年CTF练习10
    T10 CheckMe05    原创题题目难度:★题目描述:世界上还有一种东西,可以让两个不同的东西相等,你知道是什么吗?盲猜一手MD5===强类型的打开靶机  这题跟我......
  • 微服务项目:尚融宝(19)(后端搭建:阿里云短信服务整合)
    认清现实,放弃幻想,准备斗争阿里云短信服务一、使用RAM子用户1、进入子用户管理页面2、添加用户 3、获取子用户keyAccessKeyId,AccessKeySecret这个玩意是找不回来的,创建的......
  • (一)Vue 3 项目搭建
    确保已安装16.0或更高版本的Node.js,然后在命令行中运行以下命令:npminitvue@latest这一指令将会安装并执行create-vue,它是Vue官方的项目脚手架工具。你将会看到......
  • 40-Zabbix在Grafana中展示和Zabbix性能优化以及Zabbix高可用集群搭建
    Zabbix利用Grafana进行图形展示Grafana是一款采用go语言编写的开源应用,主要用于大规模指标数据的可视化展现,是网络架构和应用分析中最流行的时序数据展示工具,目前已经......
  • 微服务链路追踪-skywalking搭建
    下载并解压skywalkingwgethttps://archive.apache.org/dist/skywalking/8.5.0/apache-skywalking-apm-es7-8.5.0.tar.gztar-zxvf apache-skywalking-apm-es7-8.5.0.t......
  • 跟光磊学Java-macOS版Java8开发环境搭建(基于Intel x86 64-bit)
    Java语言核心技术     日常办公和软件开发除了可以使用Windows系统以外,还可以使用macOS系统,至于具体使用什么系统取决于你入职公司之后公司给你发的什么电......
  • 跟光磊学Java-macOS版Java17开发环境搭建
    Java语言核心技术    日常办公和软件开发除了可以使用Windows系统以外,还可以使用macOS系统,至于具体使用什么系统取决于你入职公司之后公司给你发的什么电脑......