首页 > 其他分享 >kubeasz搭建k8s集群1.21.0

kubeasz搭建k8s集群1.21.0

时间:2023-01-05 17:47:29浏览次数:58  
标签:cn etc kubeasz calico harbor 1.21 k8s docker

系统版本
root@harbor:~# cat /etc/issue
Ubuntu 20.04.2 LTS \n \l
IP分配和集群机器配置:
192.168.10.110 k8s-deploy-harbor   2c2g
192.168.10.111 k8s-master1-etcd1-haproxy1 2c4g
192.168.10.112 k8s-master2-etcd2-haproxy2 2c4g
192.168.10.113 k8s-master3-etcd3-haproxy3 2c4g
192.168.10.114 k8s-node1 2c3g
192.168.10.115 k8s-node2 2c3g
192.168.10.116 k8s-node3 2c3g
VIP:
192.168.10.118

 

一.环境初始优化

#1、在所有主机上执行部署清华镜像源
cat > /etc/apt/sources.list <<'EOF'
# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security main restricted universe multiverse
EOF
apt update

#重启
systemctl daemon-reload
systemctl restart docker 

#环境初始化,在所有主机上执行 
#2、部署基础命令
apt install iproute2  ntpdate  tcpdump telnet traceroute nfs-kernel-server nfs-common  lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev gcc openssh-server iotop unzip zip  apt-transport-https ca-certificates curl software-properties-common vim-common inetutils-ping iptables net-tools   -y

#3、时间同步
apt install cron -y
systemctl status cron.service  
echo "*/5 * * * * /usr/sbin/ntpdate time1.aliyun.com &> /dev/null && hwclock -w" >> /var/spool/cron/crontabs/root
/usr/sbin/ntpdate time1.aliyun.com &> /dev/null && hwclock -w
rm -rf /etc/localtime
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
cat >> /etc/default/locale << 'EOF'
LANG=en_US.UTF-8
LC_TIME=en_DK.UTF-8
EOF
  
#4、修改内核参数
cat >/etc/sysctl.conf <<EOF
# Controls source route verification
net.ipv4.conf.default.rp_filter = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
   
# Do not accept source routing
net.ipv4.conf.default.accept_source_route = 0
   
# Controls the System Request debugging functionality of the kernel
kernel.sysrq = 0
   
# Controls whether core dumps will append the PID to the core filename.
# Useful for debugging multi-threaded
applications. kernel.core_uses_pid = 1
   
# Controls the use of TCP syncookies
net.ipv4.tcp_syncookies = 1
   
# Disable netfilter on bridges.
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0
   
# Controls the default maxmimum size of a mesage queue
kernel.msgmnb = 65536
   
# # Controls the maximum size of a message, in bytes
kernel.msgmax = 65536
   
# Controls the maximum shared segment size, in bytes
kernel.shmmax = 68719476736
   
# # Controls the maximum number of shared memory segments, in pages
kernel.shmall = 4294967296
   
# TCP kernel paramater
net.ipv4.tcp_mem = 786432 1048576 1572864
net.ipv4.tcp_rmem = 4096        87380   4194304
net.ipv4.tcp_wmem = 4096        16384   4194304 n
et.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_sack = 1
   
# socket buffer
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 262144
net.core.somaxconn = 20480
net.core.optmem_max = 81920
   
# TCP conn
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_syn_retries = 3
net.ipv4.tcp_retries1 = 3
net.ipv4.tcp_retries2 = 15
   
# tcp conn reuse
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_tw_reuse = 0
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_fin_timeout = 1
   
   
net.ipv4.tcp_max_tw_buckets = 20000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syncookies = 1
   
# keepalive conn
net.ipv4.tcp_keepalive_time = 300
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.ip_local_port_range = 10001    65000
   
# swap
vm.overcommit_memory = 0
vm.swappiness = 10
   
#net.ipv4.conf.eth1.rp_filter = 0
#net.ipv4.conf.lo.arp_ignore = 1
#net.ipv4.conf.lo.arp_announce = 2
#net.ipv4.conf.all.arp_ignore = 1
#net.ipv4.conf.all.arp_announce = 2
EOF
  
#5、修改文件参数
  
cat >> /etc/security/limits.conf <<EOF
root                soft    core            unlimited
root                hard    core            unlimited
root                soft    nproc           1000000
root                hard    nproc           1000000
root                soft    nofile          1000000
root                hard    nofile          1000000
root                soft    memlock         32000
root                hard    memlock         32000
root                soft    msgqueue        8192000
root                hard    msgqueue        8192000
   
  
*                soft    core            unlimited
*                hard    core            unlimited
*                soft    nproc           1000000
*                hard    nproc           1000000
*                soft    nofile          1000000
*                hard    nofile          1000000
*                soft    memlock         32000
*                hard    memlock         32000
*                soft    msgqueue        8192000
*                hard    msgqueue        8192000
EOF
  
#6、hosts文件
cat >> /etc/hosts <<'EOF'

192.168.10.110 k8s-deploy-harbor
192.168.10.111 k8s-master1-etcd1-haproxy1
192.168.10.112 k8s-master2-etcd2-haproxy2
192.168.10.113 k8s-master3-etcd3-haproxy3
192.168.10.114 k8s-node1
192.168.10.115 k8s-node2
192.168.10.116 k8s-node3
192.168.10.110 harbor.wbiao.cn

EOF
  
#7、关闭swap
swapoff -a
  
vim /etc/fstab
# /etc/fstab: static file system information.
#
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# <file system> <mount point>   <type>  <options>       <dump>  <pass>
# / was on /dev/sda2 during curtin installation
/dev/disk/by-uuid/d70a7e92-2d0d-4014-a9a1-4cd95db5e242 / xfs defaults 0 0
#/swap.img      none    swap    sw      0       0

#8、修改主机名称
sed -i "s#ubuntu20-04#k8s-harbor-deploy#g" /etc/hostname && hostname k8s-harbor-deploy
sed -i "s#ubuntu20-04#k8s-master1-etcd1-haproxy1#g" /etc/hostname && hostname k8s-master1-etcd1-haproxy1
sed -i "s#ubuntu20-04#k8s-master2-etcd2-haproxy2#g" /etc/hostname && hostname k8s-master2-etcd2-haproxy2
sed -i "s#ubuntu20-04#k8s-master3-etcd3-haproxy3#g" /etc/hostname && hostname k8s-master3-etcd3-haproxy3
sed -i "s#ubuntu20-04#k8s-node1#g" /etc/hostname && hostname k8s-node1
sed -i "s#ubuntu20-04#k8s-node2#g" /etc/hostname && hostname k8s-node2
sed -i "s#ubuntu20-04#k8s-node3#g" /etc/hostname && hostname k8s-node3

 

 

二.下载安装脚本ezdown和部署harbor

1.#下载工具脚本ezdown
export release=3.3.1
wget https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown
chmod +x ./ezdown

2.#修改默认的版本信息如下
vim ezdown
# default settings, can be overridden by cmd line options, see usage
DOCKER_VER=19.03.15  #docker的安装版本
KUBEASZ_VER=3.1.0    #kubeasz的版本
K8S_BIN_VER=v1.21.0 #k8s的版本可以x修改
EXT_BIN_VER=0.9.4
SYS_PKG_VER=0.4.1
HARBOR_VER=v2.1.3 
REGISTRY_MIRROR=CN
3.#下载kubeasz,默认下载到/etc/kubeasz目录,执行bash ./ezdown -D会自动安装docker
bash ./ezdown -D

#下载完成查看apiserver的版本
/etc/kubeasz/bin/kube-apiserver --version

#下载的目录

 

4.#先下载好需要的镜像
#本地批量下载
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-kubeasz:3.1.0 
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-kubeasz-ext-bin:0.9.4
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-kubeasz-k8s-bin:v1.21.0
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-nfs-subdir-external-provisioner:v4.0.1
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-kubernetesui-dashboard:v2.2.0
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-k8s-dns-node-cache:1.17.0
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-pause-amd64:3.4.1
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-coredns-coredns:1.8.0
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-kubernetesui-metrics-scraper:v1.0.6
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-flannel:v0.13.0-amd64
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-calico-node:v3.15.3
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-calico-pod2daemon-flexvol:v3.15.3
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-calico-cni:v3.15.3
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-calico-kube-controllers:v3.15.3
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-mirrorgooglecontainers-metrics-server-amd64:v0.3.6
docker pull registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-quay.io-coreos-flannel:v0.14.0
#下载本地修改tag
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-kubeasz:3.1.0  easzlab/kubeasz:3.1.0
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-kubeasz-ext-bin:0.9.4 easzlab/kubeasz-ext-bin:0.9.4
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-kubeasz-k8s-bin:v1.21.0 easzlab/kubeasz-k8s-bin:v1.21.0 
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-nfs-subdir-external-provisioner:v4.0.1 easzlab/nfs-subdir-external-provisioner:v4.0.1
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-kubernetesui-dashboard:v2.2.0 kubernetesui/dashboard:v2.2.0
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-k8s-dns-node-cache:1.17.0 easzlab/k8s-dns-node-cache:1.17.0
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-pause-amd64:3.4.1 easzlab/pause-amd64:3.4.1
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-coredns-coredns:1.8.0 coredns/coredns:1.8.0
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-kubernetesui-metrics-scraper:v1.0.6 kubernetesui/metrics-scraper:v1.0.6
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-easzlab-flannel:v0.13.0-amd64 easzlab/flannel:v0.13.0-amd64
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-calico-node:v3.15.3 calico/node:v3.15.3
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-calico-pod2daemon-flexvol:v3.15.3 calico/pod2daemon-flexvol:v3.15.3
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-calico-cni:v3.15.3 calico/cni:v3.15.3
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-calico-kube-controllers:v3.15.3 calico/kube-controllers:v3.15.3
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-mirrorgooglecontainers-metrics-server-amd64:v0.3.6 mirrorgooglecontainers/metrics-server-amd64:v0.3.6
docker tag registry.cn-guangzhou.aliyuncs.com/centos-http/k8s-v1.21.0-quay.io-coreos-flannel:v0.14.0  quay.io/coreos/flannel:v0.14.0

5.#部署harbor
haorbor部署在192.168.1.110上
域名:harbor.wbiao.cn
#下载harbor-offine-installer-v2.3.2.tgz,并解压
mkdir /apps && cd /apps

#上传安装包解压
tar -xf harbor-offline-installer-v2.3.2.tgz

#创建镜像保存目录
mkdir -p /apps/harbor-images-data

#创建证书
cd /apps/harbor/
mkdir -p /apps/harbor/certs && cd certs
openssl genrsa -out harbor-ca.key 2048
openssl req -x509 -new -nodes -key harbor-ca.key -subj "/CN=harbor.wbiao.cn" -days 3650 -out harbor-ca.crt

#修改harbor的yml文件
/apps/harbor/certs/harbor-ca.crt
/apps/harbor/certs/harbor-ca.key

#修改配置文件
cp -ar /apps/harbor/harbor.yml.tmpl /apps/harbor/harbor.yml
vim /apps/harbor/harbor.yml


  #安装docker-compose
  curl -L "https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
  chmod +x /usr/local/bin/docker-compose
  docker-compose --version


  #打开扫描执行安装
  cd /apps/harbor
  ./install.sh --with-trivy

#配置截图

在harbor服务器192.168.10.110执行创建目录,最后的一级子目录名必须是harbor服务器的域名不然无法登陆,证书存放在此目录
mkdir /etc/docker/certs.d/harbor.wbiao.cn -p
cp -ar /apps/harbor/certs/harbor-ca.crt /etc/docker/certs.d/harbor.wbiao.cn
cp -ar /apps/harbor/certs/harbor-ca.key /etc/docker/certs.d/harbor.wbiao.cn
cp -ar /apps/harbor/certs/harbor-ca.crt /etc/docker/certs.d/harbor.wbiao.cn/harbor-ca.cert
#安装后配置好hosts直接登录
docker login harbor.wbiao.cn

 #在网页上传创建镜像仓库上传镜像进行测阿

测试上传镜像
#打tag
docker pull nginx:latest
docker tag nginx:latest harbor.wbiao.cn/baseimages/nginx:latest
#上传镜像
docker push harbor.wbiao.cn/baseimages/nginx:latest

#其中下载好安装需要的镜像

 

三、#在k8s-master1-etcd1-haproxy1、k8s-master1-etcd1-haproxy1、k8s-master3-etcd3-haproxy3部署keepalived和haproxy实现vip飘移的高可用

1.#安装keepalived和haproxy
apt install -y keepalived haproxy
  
2.  配置keepalived.conf配置文件
#修改k8s-master1-etcd1-haproxy1的keepalived配置文件
cat >/etc/keepalived/keepalived.conf<<'EOF'
! Configuration File for keepalived
  
global_defs {
   notification_email {
     acassen
   }
   notification_email_from [email protected]
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
  
vrrp_instance VI_1 {
    interface ens160
    virtual_router_id 50
    nopreempt
    priority 100
    advert_int 1
    virtual_ipaddress {
        192.168.10.118 dev ens160 label ens160:0
        192.168.10.119 dev ens160 label ens160:1
        192.168.10.120 dev ens160 label ens160:2
        192.168.10.121 dev ens160 label ens160:3
    }
}
EOF


#修改k8s-master2-etcd2-haproxy2的keepalived配置文件
cat >/etc/keepalived/keepalived.conf<<'EOF'
! Configuration File for keepalived
  
global_defs {
   notification_email {
     acassen
   }
   notification_email_from [email protected]
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
  
vrrp_instance VI_1 {
    interface ens160
    virtual_router_id 50
    nopreempt
    priority 80
    advert_int 1
    virtual_ipaddress {
        192.168.10.118 dev ens160 label ens160:0
        192.168.10.119 dev ens160 label ens160:1
        192.168.10.120 dev ens160 label ens160:2
        192.168.10.121 dev ens160 label ens160:3
    }
}
EOF


#修改k8s-master3-etcd3-haproxy3的keepalived配置文件
cat >/etc/keepalived/keepalived.conf<<'EOF'
! Configuration File for keepalived
  
global_defs {
   notification_email {
     acassen
   }
   notification_email_from [email protected]
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
  
vrrp_instance VI_1 {
    interface ens160
    virtual_router_id 50
    nopreempt
    priority 50
    advert_int 1
    virtual_ipaddress {
        192.168.10.118 dev ens160 label ens160:0
        192.168.10.119 dev ens160 label ens160:1
        192.168.10.120 dev ens160 label ens160:2
        192.168.10.121 dev ens160 label ens160:3
    }
}
EOF

#启动和配置自启动
systemctl restart keepalived
systemctl enable keepalived




#分别停止k8s-master1-etcd1-haproxy1、k8s-master2-etcd2-haproxy2、k8s-master3-etcd3-haproxy3进行测试

  k8s-master2-etcd2-haproxy2

 

k8s-master3-etcd3-haproxy3

 

3.#配置haproxy.cfg
#haproxy01和haproxy02、haproxy03是一样的配置
cat >/etc/haproxy/haproxy.cfg<<'EOF'
global
        log /dev/log    local0
        log /dev/log    local1 notice
        chroot /var/lib/haproxy
        stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
        stats timeout 30s
        user haproxy
        group haproxy
        daemon

        # Default SSL material locations
        ca-base /etc/ssl/certs
        crt-base /etc/ssl/private

        # See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate
        ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
        ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
        ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets

defaults
        log     global
        mode    http
        option  httplog
        option  dontlognull
        timeout connect 5000
        timeout client  50000
        timeout server  50000
        errorfile 400 /etc/haproxy/errors/400.http
        errorfile 403 /etc/haproxy/errors/403.http
        errorfile 408 /etc/haproxy/errors/408.http
        errorfile 500 /etc/haproxy/errors/500.http
        errorfile 502 /etc/haproxy/errors/502.http
        errorfile 503 /etc/haproxy/errors/503.http
        errorfile 504 /etc/haproxy/errors/504.http
#listen stats
#        mode http
#        bind 0.0.0.0:9999
#        stats enable
#        log global
#        stats uri /haproxy-status
#        stats auth haadmin:123456

listen k8s-8443
        bind 192.168.10.118:8443
        mode tcp
        server k8s-master1-etcd1-haproxy1 192.168.10.111:6443 check inter 3000s fall 3 rise 5
        server k8s-master2-etcd2-haproxy2 192.168.10.112:6443 check inter 3000s fall 3 rise 5
        server k8s-master3-etcd3-haproxy3 192.168.10.113:6443 check inter 3000s fall 3 rise 5
EOF

#检查是否已配置内核参数实现非本地的端口绑定
grep 'net.ipv4.ip_nonlocal_bind ' /etc/sysctl.conf
没有配置在/etc/sysctl.conf添加此参数net.ipv4.ip_nonlocal_bind = 1
#执行命令让配置立即生效
sysctl -p
  
#启动haproxy
systemctl restart haproxy
systemctl enable haproxy
#检查
systemctl status haproxy && ss -tnlp | grep -E "8443"

 

三、#用kubeasz部署kubernetes

1.#配置免密钥认证
#安装sshpass
apt install -y sshpass
  
#生成ssh key
ssh-keygen
  
#密钥分发脚本
cat >/tools/ssh-scp.sh<<'EOF'
#!/bin/bash
#目标主机列表
IP="
192.168.10.110
192.168.10.111
192.168.10.112
192.168.10.113
192.168.10.114
192.168.10.115
192.168.10.116
"
for node in ${IP};do
    sshpass -p "qwe123" ssh-copy-id -i /root/.ssh/id_rsa.pub -o StrictHostKeyChecking=no root@${node} &> /dev/null
   if [ $? -eq 0 ];then
     echo "${node} 秘钥copy完成"
   else
     echo "${node} 秘钥copy失败"
   fi
done
EOF
  
#密钥分发
bash ssh-scp.sh
#分发完毕ssh [email protected]进行测试是否正常 2.#部署ansible #kubeasz使用ansible进行部署,这里使用pip安装ansible #或者直接安装ansible apt install python3-pip git -y #使用阿里云加速安装ansible pip3 install ansible -i https://mirrors.aliyun.com/pypi/simple/ root@k8s-harbor-deploy:/tools# ansible --version ansible 2.10.8 #ubuntu22.04安装ansible方法 curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 pip3.10 install ansible 3.#分发harbor的证书和登录harbor的文件 cat >/tools/ca.sh<<'EOF' #!/bin/bash #目标主机列表 IP=" 192.168.10.111 192.168.10.112 192.168.10.113 192.168.10.114 192.168.10.115 192.168.10.116 " for node in ${IP};do sshpass -p "qwe123" ssh-copy-id -o StrictHostKeyChecking=no root@${node} &> /dev/null if [ $? -eq 0 ];then echo "${node} 秘钥copy完成" echo "${node} 秘钥copy完成,准备环境初始化....." ssh root@${node} "mkdir -p /etc/docker/certs.d/harbor.wbiao.cn && mkdir -p /root/.docker" echo "Harbor证书目录创建成功" scp /etc/docker/certs.d/harbor.wbiao.cn/* root@${node}:/etc/docker/certs.d/harbor.wbiao.cn/ echo "Harbor证书拷贝成功" ssh root@${node} "echo '192.168.10.110 harbor.wbiao.cn' >>/etc/hosts" echo "Harbor认证文件拷贝成功" scp /root/.docker/config.json root@${node}:/root/.docker/ else echo "${node} 秘钥copy失败" fi done EOF sh -x ca.sh
分发完成在所有机器docker login harbor.wbiao.cn登录一遍
4.#新建一个k8s集群
./ezctl new 自己起的集群名称-01
./ezctl new k8s-01

#执行./ezctl new k8s-01后会产生/etc/kubeasz/clusters/k8s-01/hosts和/etc/kubeasz/clusters/k8s-01/config.yml、/etc/kubeasz/playbooks/01.prepare.yml文件,对这2个文件进行修改
#####################修改/etc/kubeasz/clusters/k8s-01/hosts文件########################## cat >/etc/kubeasz/clusters/k8s-01/hosts<<'EOF' # 'etcd' cluster should have odd member(s) (1,3,5,...) [etcd] 192.168.10.111 192.168.10.112 192.168.10.113 # master node(s) [kube_master] 192.168.10.111 192.168.10.112 # work node(s) [kube_node] 192.168.10.114 192.168.10.115 # [optional] harbor server, a private docker registry # 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one [harbor] #192.168.1.8 NEW_INSTALL=false # [optional] loadbalance for accessing k8s from outside [ex_lb] 192.168.10.111 LB_ROLE=master EX_APISERVER_VIP=192.168.10.118 EX_APISERVER_PORT=8443 192.168.10.112 LB_ROLE=backup EX_APISERVER_VIP=192.168.10.118 EX_APISERVER_PORT=8443 192.168.10.113 LB_ROLE=backup EX_APISERVER_VIP=192.168.10.118 EX_APISERVER_PORT=8443 # [optional] ntp server for the cluster [chrony] #192.168.1.1 [all:vars] # --------- Main Variables --------------- # Secure port for apiservers SECURE_PORT="6443" # Cluster container-runtime supported: docker, containerd CONTAINER_RUNTIME="docker" # Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn CLUSTER_NETWORK="calico" # Service proxy mode of kube-proxy: 'iptables' or 'ipvs' PROXY_MODE="ipvs" # K8S Service CIDR, not overlap with node(host) networking SERVICE_CIDR="10.100.0.0/16" # Cluster CIDR (Pod CIDR), not overlap with node(host) networking CLUSTER_CIDR="10.200.0.0/16" # NodePort Range NODE_PORT_RANGE="30000-65000" # Cluster DNS Domain CLUSTER_DNS_DOMAIN="wbiao.local" # -------- Additional Variables (don't change the default value right now) --- # Binaries Directory bin_dir="/usr/local/bin" # Deploy Directory (kubeasz workspace) base_dir="/etc/kubeasz" # Directory for a specific cluster cluster_dir="{{ base_dir }}/clusters/k8s-01" # CA and other components cert/key Directory ca_dir="/etc/kubernetes/ssl" EOF #####################修改/etc/kubeasz/clusters/k8s-01/hosts文件########################## #####################修改/etc/kubeasz/clusters/k8s-01/config.yml以下为要修改的内容########################## # [containerd]基础容器镜像 SANDBOX_IMAGE: "harbor.wbiao.cn/baseimages/pause-amd64:3.4.1" # [docker]信任的HTTP仓库 INSECURE_REG: '["127.0.0.1/8","192.168.10.110"]' 添加master节点的负载均衡服务器的vip和域名,192.168.10.118是我配置负载均衡的vip,可以不配置 ############################ # role:kube-master ############################ # k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名) MASTER_CERT_HOSTS: - "192.168.10.118" - "k8s.test.io" #- "www.test.com" # node节点最大pod 数 MAX_PODS: 500 ############################ # role:cluster-addon ############################ # coredns 自动安装 dns_install: "no" corednsVer: "1.8.0" ENABLE_LOCAL_DNS_CACHE: false dnsNodeCacheVer: "1.17.0" # 设置 local dns cache 地址 LOCAL_DNS_CACHE: "169.254.20.10" # metric server 自动安装 metricsserver_install: "no" metricsVer: "v0.3.6" # dashboard 自动安装 dashboard_install: "no" dashboardVer: "v2.2.0" dashboardMetricsScraperVer: "v1.0.6" #####################修改/etc/kubeasz/clusters/k8s-01/config.yml文件########################## #下载这个镜像最后导出来做备用,在已经成功登录过harbor的服务器下载方便上传 docker pull easzlab/pause-amd64:3.4.1 docker tag easzlab/pause-amd64:3.4.1 harbor.wbiao.cn/baseimages/pause-amd64:3.4.1 docker push harbor.wbiao.cn/baseimages/pause-amd64:3.4.1
#不让自动安装初始化负载均衡器和时间服务器 cat >/etc/kubeasz/playbooks/01.prepare.yml<<'EOF' # [optional] to synchronize system time of nodes with 'chrony' - hosts: - kube_master - kube_node - etcd roles: - { role: os-harden, when: "OS_HARDEN|bool" } - { role: chrony, when: "groups['chrony']|length > 0" } # to create CA, kubeconfig, kube-proxy.kubeconfig etc. - hosts: localhost roles: - deploy # prepare tasks for all nodes - hosts: - kube_master - kube_node - etcd roles: - prepare EOF

 

5.#执行环境初始化./ezctl setup k8s-01 01

./ezctl setup k8s-01 01

#安装截图

 

6.#执行第二步安装etcd
./ezctl setup k8s-01 02

#安装截图

#检查etcd的心跳检测
export NODE_IPS="192.168.10.111 192.168.10.112 192.168.10.113" && for ip in ${NODE_IPS};do ETCDCTL_AP1=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health; done


#检查结果截图

 

7.#执行安装docker,

./ezctl setup k8s-01 03

#安装截图

 

8.#执行04安装master

./ezctl setup k8s-01 04

#安装截图

#验证kubectl get node的命令是否可以使用,返回的状态Ready,SchedulingDisabled为正常安装
kubectl get node

 

9.#安装node节点
#修改/etc/kubeasz/roles/kube-node/templates/kube-proxy-config.yaml.j2默认rr轮询,为wrr加权轮询 root@k8s-master1:/etc/kubeasz# cat /etc/kubeasz/roles/kube-node/templates/kube-proxy-config.yaml.j2 kind: KubeProxyConfiguration apiVersion: kubeproxy.config.k8s.io/v1alpha1 bindAddress: {{ inventory_hostname }} clientConnection: kubeconfig: "/etc/kubernetes/kube-proxy.kubeconfig" clusterCIDR: "{{ CLUSTER_CIDR }}" conntrack: maxPerCore: 32768 min: 131072 tcpCloseWaitTimeout: 1h0m0s tcpEstablishedTimeout: 24h0m0s healthzBindAddress: {{ inventory_hostname }}:10256 hostnameOverride: "{{ inventory_hostname }}" metricsBindAddress: {{ inventory_hostname }}:10249 mode: "{{ PROXY_MODE }}" #添加以下内容,修改为加权轮询,默认是rr轮询,这里修改为wrr加权轮询 ipvs: scheduler: wrr

#执行安装命令
./ezctl setup k8s-01 05

 

#安装过程截图

检查node状态为Ready为正常状态
kubectl get node

 

10.#执行第6步安装网络组件
/etc/kubeasz/roles/calico/templates/calico-v3.15.yaml.j2 #修改此文件里面的镜像为自己的harbor仓库的,先下载好镜像打tag上传到自己的harbor上面
#先下载要使用的镜像打tag上传到自己的harbor
#calico/cni:v3.15.3镜像
docker pull calico/cni:v3.15.3
docker tag calico/cni:v3.15.3 harbor.wbiao.cn/baseimages/calico-cni:v3.15.3
docker push  harbor.wbiao.cn/baseimages/calico-cni:v3.15.3

#calico/pod2daemon-flexvol:v3.15.3镜像
docker pull  calico/pod2daemon-flexvol:v3.15.3
docker tag calico/pod2daemon-flexvol:v3.15.3 harbor.wbiao.cn/baseimages/calico-pod2daemon-flexvol:v3.15.3
docker push  harbor.wbiao.cn/baseimages/calico-pod2daemon-flexvol:v3.15.3

#calico/node:v3.15.3镜像
docker pull calico/node:v3.15.3
docker tag  calico/node:v3.15.3 harbor.wbiao.cn/baseimages/calico-node:v3.15.3 
docker push   harbor.wbiao.cn/baseimages/calico-node:v3.15.3 

#calico/kube-controllers:v3.15.3
docker pull calico/kube-controllers:v3.15.3
docker tag  calico/kube-controllers:v3.15.3 harbor.wbiao.cn/baseimages/calico-kube-controllers:v3.15.3
docker push harbor.wbiao.cn/baseimages/calico-kube-controllers:v3.15.3

#修改配置文件的镜像下载地址
sed -i "s#calico/cni:v3.15.3#harbor.wbiao.cn/baseimages/calico-cni:v3.15.3#g" /etc/kubeasz/roles/calico/templates/calico-v3.15.yaml.j2
sed -i "s#calico/pod2daemon-flexvol:v3.15.3#harbor.wbiao.cn/baseimages/calico-pod2daemon-flexvol:v3.15.3#g"  /etc/kubeasz/roles/calico/templates/calico-v3.15.yaml.j2
sed -i "s#calico/node:v3.15.3#harbor.wbiao.cn/baseimages/calico-node:v3.15.3#g"  /etc/kubeasz/roles/calico/templates/calico-v3.15.yaml.j2
sed -i "s#calico/kube-controllers:v3.15.3#harbor.wbiao.cn/baseimages/calico-kube-controllers:v3.15.3#g"  /etc/kubeasz/roles/calico/templates/calico-v3.15.yaml.j2

#执行命令安装
./ezctl setup k8s-01 06

#安装截图

#安装calico网络路由状态

#安装之后路由状态

#检查calico网络是否下载成功
kubectl get pods -n kube-system

#查看calico网络状态
calicoctl node status

11.#创建3个测试容器
kubectl run net-test1 --image=harbor.wbiao.cn/baseimages/alpine:latest sleep 300000
kubectl run net-test2 --image=harbor.wbiao.cn/baseimages/alpine:latest sleep 300000
kubectl run net-test3 --image=harbor.wbiao.cn/baseimages/alpine:latest sleep 300000
kubectl get pod -A -o wide

 #进入容器进行测试
kubectl exec -it net-test2 sh

 #ping其他的容器测试网络

#ping外网ip

 #没有安装coredns无法ping通百度

 

四、安装coredns

1.上传所需要的安装包

2.#解压安装包
tar -xf kubernetes-client-linux-amd64.tar.gz
tar -xf kubernetes-node-linux-amd64.tar.gz
tar -xf kubernetes-server-linux-amd64.tar.gz
tar -xf kubernetes.tar.gz

3.#修改模板文件安装coredns
##############################################################################
cat >/tools/coredns.yaml<<'EOF'
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
  - apiGroups:
    - ""
    resources:
    - endpoints
    - services
    - pods
    - namespaces
    verbs:
    - list
    - watch
  - apiGroups:
    - discovery.k8s.io
    resources:
    - endpointslices
    verbs:
    - list
    - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors #错误日志输出到stdout。
        health { #CoreDNs的运行状况报告为http://localhost:8080/health.
          lameduck 5s
        }
        bind 0.0.0.0
        ready #当coredns 服务启动完成后会进行在状态监测,会有个URL 路径为/ready返回200状态码,否则返回报错。
        #DNS_DOMAIN为clusters/k8s-ywx/hosts /etc/kubeasz/clusters/k8s-01/hosts配置文件中的CLUSTER_DNS_DOMAIN选项填写的域名后缀地址wbiao.local
        kubernetes wbiao.local in-addr.arpa ip6.arpa {  #CoreDNS将根据指定的service domain名称在Kubernetes SVC中进行域名解析,coredns得到这个域名wbiao.local是这个域名都到k8s里面去apiserver去解析
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153 #CoreDNS的指标数据可以配置Prometheus 问http://coredns svc:9153/metrics 进行收集
        
        forward . 223.5.5.5 {  #不是Kubernetes集群域内的域名查询都进行转发指定的服务器(/etc/resolv.conf),/etc/resolv.conf可以改为公司的DNS服务器或者其它的DNS地址:223.5.5.5
          max_concurrent 1000  #最大链接数,生产配置高一些
        }
        cache 30         #启用coreDNS的缓存时间30秒 cache 30  
        loop
        reload         #配置自动重新加载配置文件,如果修改了ConfigMap的配置,会在两分钟后生效
        loadbalance    #一个域名有多个记录会被轮询解析,(如nginx的server name字段配置的多过域名)。
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. Default is 1.
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      affinity:
         podAntiAffinity:
           preferredDuringSchedulingIgnoredDuringExecution:
           - weight: 100
             podAffinityTerm:
               labelSelector:
                 matchExpressions:
                   - key: k8s-app
                     operator: In
                     values: ["kube-dns"]
               topologyKey: kubernetes.io/hostname
      containers:
      - name: coredns
        #从harbor仓库拉取镜像文件配置,下载镜像修改为自己的镜像地址
        image: harbor.wbiao.cn/baseimages/coredns:v1.8.3
        imagePullPolicy: IfNotPresent
        resources:
          limits:
   #内存限制生产开3G-4G或者多副本,测试环境在这里打开为256mi
            memory: 256Mi
          requests:
    #cpu限制,生产配置3-4核,或者多个副本,
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe: #心跳检测配置livenessProbe
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  type: NodePort    #类型为nodeport
  selector:
    k8s-app: kube-dns
  clusterIP: 10.100.0.2  #为server_CIDR的第二个ip地址,这里配置的server_CIDR是10.100.0.0
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
    targetPort: 9153  #容器内部启动使用的端口是9153
    nodePort: 30009 #暴露给普罗米修斯的监控端口,使用node-ip:端口号进行访问
EOF
##############################################################################

4.#下载安装coredns所需要的docker镜像
docker pull k8s.gcr.io/coredns/coredns:v1.8.3
docker pull harbor.wbiao.cn/baseimages/coredns:v1.8.3

5.#查看service的地址,一般service的地址一般都是第二个地址
root@k8s-harbor-deploy:~# cat /etc/kubeasz/clusters/k8s-01/hosts |grep  'SERVICE_CIDR'
SERVICE_CIDR="10.100.0.0/16" 这里的话就是10.100.0.2

#在容器里面查看

 

6.#创建coredns
kubectl apply -f /tools/coredns.yam

 

7.#创建coredns成功检查
kubectl get pod -A -o wide

 #再次进入pod测试百度是能ping通

#ping的kubernetes相当于去自己配置域名后缀wbiao.local找地址

 

#使用任意一个node节点ip查看普罗米修斯配置的监控数据

192.168.10.114:30009/metrics

标签:cn,etc,kubeasz,calico,harbor,1.21,k8s,docker
From: https://www.cnblogs.com/yeyouqing/p/17028363.html

相关文章

  • k8s 1.26.x 二进制高可用部署
    标签(空格分隔):kubernetes系列一:系统环境初始化1.1系统环境系统:almalinux8.7x64cat/etc/hosts----172.16.10.81flyfish81172.16.10.82flyfish821......
  • 阿里云k8s前端测试环境cpu和内存过低pod无法启动的问题
    镜像在本地可以正常启动,放到阿里云之后无法启动,只在容器启动的一瞬间状态是oomkilled的然后无限重启容器,oomkilled状态只维持一会姑没有截图 阿里云edas配置使用cpu和内......
  • 使用 K8S 部署 RSS 全套自托管解决方案- RssHub + Tiny Tiny Rss
    前言什么是RSS?RSS是一种描述和同步网站内容的格式,是使用最广泛的XML应用。RSS搭建了信息迅速传播的一个技术平台,使得每个人都成为潜在的信息提供者。发布一个RSS......
  • 使用 K8S 部署 RSS 全套自托管解决方案- RssHub + Tiny Tiny Rss
    前言什么是RSS?RSS是一种描述和同步网站内容的格式,是使用最广泛的XML应用。RSS搭建了信息迅速传播的一个技术平台,使得每个人都成为潜在的信息提供者。发布一个RSS......
  • k8s1.26.x 最新版本二进制方式部署
    标签(空格分隔):kubernetes系列一:系统环境初始化1.1系统环境系统:almalinux8.7x64cat/etc/hosts----172.16.10.81flyfish81172.16.10.82flyfish821......
  • k8s二进制部署
    1.1生产环境部署K8s集群的两种方式kubeadmKubeadm是一个K8s部署工具,提供kubeadminit和kubeadmjoin,用于快速部署Kubernetes集群。二进制包从github......
  • K8S运维必知必会的 Kubectl 命令总结【转】
    kubectl常用命令指南Kubectl命令是操作kubernetes集群的最直接的方式,特别是运维人员,需要对这些命令有一个详细的掌握Kubectl自动补全#setupautocompleteinbash......
  • k8s安装-简洁版
    安装 openssh-server,提供远程连接sudoaptinstall-yopenssh-serversudoapt-getremovevim-commonapt-getinstallvimsudovi/etc/ssh/sshd_configsudosyste......
  • k8s发布周期
    https://kubernetes.io/zh-cn/releases/release/#the-release-cycleKubernetes目前大约每年发布三次。......
  • k8s-labels
    labels查看路径[root@k8smaster4ns]#kubectlexplainpod.metadata.labels查看Pod标签[root@k8smaster4ns]#kubectlgetpods--show-labels创建labels的Yaml[root@k8s......