1.K8S部署初始化准备
1.1 系统安装
- 地址规划,根据实际情况进行修改
主机名 | IP | 操作系统 |
---|---|---|
master | 10.0.0.10 | ubuntu22.04 |
worker01 | 10.0.0.11 | ubuntu22.04 |
worker02 | 10.0.0.12 | ubuntu22.04 |
1.2 配置网络与ssh
-
根据上面配置,得知用户名设置的为ubuntu,密码为000000
-
切换到root用户
sudo su -
# 输入密码:000000
- 配置静态网络
- 为什么?因为集群统一用了这个地址,如果配置动态IP,会导致租期到期切换地址情况导致集群崩溃
# This is the network config written by 'subiquity'
network:
ethernets:
ens33:
dhcp4: false
addresses:
- 10.0.0.10/24
routes:
- to: default
via: 10.0.0.2
nameservers:
addresses: [223.6.6.6]
version: 2
- 生效网络配置
netplan apply
- 配置ssh并设置root密码
vim /etc/ssh/sshd_config
'''
32 PermitRootLogin yes
'''
- 设置密码
passwd
# 输入两次需要设置的密码
- 重启生效ssh服务
systemctl restart sshd
-
到此可以进行远程工具连接
1.3 注意apps文件夹中准备的软件包
1.4 配置离线源
tar xvf k8s.tar.gz -C /opt/
cat > /etc/apt/sources.list << EOF
deb [trusted=yes] file:// /opt/k8s/debs/
EOF
- 加载源
apt clean all; apt update
1.6 系统资源限制优化
- 在文件最后添加
vim /etc/security/limits.conf
# root账户的资源限制
root soft core unlimited
root hard core unlimited
root soft nproc 1000000
root hard nproc 1000000
root soft nofile 1000000
root hard nofile 1000000
root soft memlock 32000
root hard memlock 32000
root soft msgqueue 8192000
root hard msgqueue 8192000
# 其它账户的资源限制
* soft core unlimited
* hard core unlimited
* soft nproc 1000000 # 系统最大进程数
* hard nproc 1000000
* soft nofile 1000000 # 系统最大打开文件个数
* hard nofile 1000000
* soft memlock 32000 # 最大锁的数量
* hard memlock 32000
* soft msgqueue 8192000 # 消息队列长度
* hard msgqueue 8192000
1.7 内核参数优化
- 在文件最后添加
vim /etc/sysctl.conf
# Controls source route verification
net.ipv4.conf.default.rp_filter = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
# Do not accept source routing
net.ipv4.conf.default.accept_source_route = 0
# Controls the System Request debugging functionality of the kernel
kernel.sysrq = 0
# Controls whether core dumps will append the PID to the core filename
# Useful for debugging multi-threaded application
kernel.core_uses_pid = 1
# Controls the use of TCP syncookies
net.ipv4.tcp_syncookies = 1
# Disable netfilter on bridges
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0
# Controls the default maxmimum size if a mesage queue
kernel.msgmnb = 65536
# Controls the maxmimum shared segment size, in bytes
kernel.msgmax = 65536
# Controls the maxmimum shared segment size, in bytes
kernel.shmmax = 68719476736
# Controls the maxmimum shared segment size, in bytes
kernel.shmall = 4294967296
# TCP kernel paramater
net.ipv4.tcp_mem = 786432 1048576 1572864
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_sack = 1
# socket buffer
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 262144
net.core.somaxconn = 20480
net.core.optmem_max = 81920
# TCP conn
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_syn_retries = 3
net.ipv4.tcp_retries1 = 3
net.ipv4.tcp_retries2 = 15
# tcp conn reuse
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_tw_reuse = 0
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_fin_timeout = 1
net.ipv4.tcp_max_tw_buckets = 20000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_synack_retries = 1
net.ipv4.ip_local_port_range = 10001 65000
# swap
vm.overcommit_memory = 0
vm.swappiness = 10
#net.ipv4.conf.eth1.rp_filter = 0
#net.ipv4.conf.lo.arp_filter = 1
#net.ipv4.conf.lo.arp_announce = 2
#net.ipv4.conf.all.arp_ignore = 1
#net.ipv4.conf.all.arp_announce = 2
- 重启服务器生效
reboot
1.8 k8s基本初始化配置
- 关闭swap分区或禁用swap文件
swapoff -a
# 注释掉关于swap分区的行
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
- 内核模块加载
sed -i "s/net.bridge.bridge-nf-call-ip6tables = 0/net.bridge.bridge-nf-call-ip6tables = 1/g" /etc/sysctl.conf
sed -i "s/net.bridge.bridge-nf-call-iptables = 0/net.bridge.bridge-nf-call-iptables = 1/g" /etc/sysctl.conf
modprobe br_netfilter
sysctl -p
- 启用内核模块
cat >> /etc/modules << EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
- 配置hosts
cat >> /etc/hosts << EOF
10.0.0.10 master
10.0.0.11 worker01
10.0.0.12 worker02
EOF
2.k8s服务安装
apt install -y kubectl kubelet kubeadm
3.docker服务安装
- 安装docker服务
apt install -y docker-ce
- 配置docker镜像加速(注意,仅在有网段情况下生效)
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://deckaaa2.mirror.aliyuncs.com","https://registry.docker-cn.com","https://reg-mirror.qiniu.com"]
}
EOF
systemctl daemon-reload
systemctl restart docker
4.初始化K8S
-
master节点
-
查看需要准备的镜像
kubeadm config images list
- 解压准备好的离线镜像
tar xvf kubeimage_1_20.tar.gz
- 进入离线镜像目录,使用for循环一键导入镜像
cd kubeimage_1_20/
for i in `ls`;do docker load -i $i;done
- 初始化k8s
kubeadm init --apiserver-advertise-address=10.0.0.10 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version=v1.20.15 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=Swap
5.配置集群并加入node节点
-
master节点
-
master节点配置pki密钥文件,主要用于k8s集群认证
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
-
slave所有节点
-
worker节点加入k8s集群
tar xvf kubeimage_1_20.tar.gz
cd kubeimage_1_20/
for i in `ls`;do docker load -i $i;done
- 加入集群
- master节点可调出命令:
- kubeadm token create --print-join-command
kubeadm join 10.0.0.10:6443 --token vzjl4b.0w3fmrjuxfc2zps2 --discovery-token-ca-cert-hash sha256:38e19b57fa4e070e57ae1ec5096f9f5b63947dbe887641fcadd5a77837992e09
6.安装网络插件
-
worker所有节点
cd kubeimage_1_20/
tar xf flannel.tar.gz
cd flannel/
for i in `ls`;do docker load -i $i;done
-
master节点节点
cd kubeimage_1_20/
tar xf flannel.tar.gz
cd flannel/
for i in `ls`;do docker load -i $i;done
kubectl apply -f kube-flannel.yml
- 查看集群状态
kubectl get nodes
7.ceph-rook环境准备(注意,不要用这种,这里只是介绍有这种技术,用8点)
-
生产环境跳过,看8点
-
master节点
-
解压源码目录
unzip rook-1.8.0.zip
-
所有节点
-
导入镜像
tar zxvf rookimage.tar.gz
cd rook/
for i in `ls`;do docker load -i $i;done
7.1.部署rook集群
-
master节点
cd rook-1.8.0/deploy/examples/
# 部署初始化工具
kubectl create -f crds.yaml -f common.yaml -f operator.yaml
# 开始部署集群
kubectl create -f cluster.yaml
# 去污点
kubectl taint node k8s-master01 node-role.kubernetes.io/master-
# 部署Rook Ceph 工具
kubectl create -f toolbox.yaml
# 部署Ceph Dashboard
kubectl apply -f dashboard-external-https.yaml
# 获取 dashboard admin密码
kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 -d
# 访问
https://10.0.0.10:32604/
- cluster.yaml文件详解
53 mgr:
54 # When higher availability of the mgr is needed, increase the count to 2.
55 # In that case, one mgr will be active and one in standby. When Ceph updates which
56 # mgr is active, Rook will update the mgr services to match the active mgr.
# 增加mgr数量
57 count: 1
210 storage: # cluster level storage configuration and selection
# 使用所有节点
211 useAllNodes: true
# 使用所有节点的设备
212 useAllDevices: true
# 指定设备、类型
223 # nodes:
224 # - name: "172.17.4.201"
225 # devices: # specific devices to use for storage can be specified for each node
226 # - name: "sdb"
227 # - name: "nvme01" # multiple osds can be created on high performance devices
228 # config:
229 # osdsPerDevice: "5"
230 # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
231 # config: # configuration can be specified at the node level which overrides the cluster level config
232 # - name: "172.17.4.301"
233 # deviceFilter: "^sd."
234 # when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd
7.2.rookceph验证
-
master
# 通过ceph-tool工具pod查看ceph集群状态
kubectl -n rook-ceph exec -it rook-ceph-tools-6cccf8dc7b-mhb2m bash
[rook@rook-ceph-tools-6cccf8dc7b-mhb2m /]$ ceph -s
cluster:
id: 1c2edb0e-cf06-4caf-ad0d-c8b2c0e7f10d
health: HEALTH_OK
services:
mon: 3 daemons, quorum a,b,c (age 49m)
mgr: a(active, since 48m)
osd: 3 osds: 3 up (since 48m), 3 in (since 48m)
data:
pools: 1 pools, 1 pgs
objects: 0 objects, 0 B
usage: 15 MiB used, 150 GiB / 150 GiB avail
pgs: 1 active+clean
8.分布式存储部署
- 制作离线源(所有节点)
tar zxvf ceph_quincy.tar.gz -C /opt/
cp /etc/apt/sources.list{,.bak}
cat >> /etc/apt/sources.list << EOF
deb [trusted=yes] file:// /opt/ceph_quincy/debs/
EOF
apt-get clean all
apt-get update
- 配置时间同步(所有节点)
# 可配置开启
timedatectl set-ntp true
# 配置上海时区
timedatectl set-timezone Asia/Shanghai
# 系统时钟与硬件时钟同步
hwclock --systohc
- 安装cephadm(master节点)
apt install -y cephadm
- 导入ceph镜像(所有节点)
docker load -i cephadm_images_v17.tar
- 搭建制作本地仓库(master节点)
# 导入镜像
docker load -i registry.tar
# 启动
docker run -d --name registry -p 5000:5000 --restart always 3a0f7b0a13ef
- 配置仓库地址(master节点)
cat > /etc/docker/daemon.json << EOF
{
"insecure-registries":["10.0.0.10:5000"]
}
EOF
systemctl daemon-reload
systemctl restart docker
# 打地址标签
docker tag 0912465dcea5 10.0.0.10:5000/ceph:v17
# 推入仓库
docker push 10.0.0.10:5000/ceph:v17
- 配置私有仓库(worker所有节点)
cat > /etc/docker/daemon.json << EOF
{
"insecure-registries":["10.0.0.10:5000"]
}
EOF
systemctl daemon-reload
systemctl restart docker
- 引导集群(master)
mkdir -p /etc/ceph
cephadm --image 10.0.0.10:5000/ceph:v17 bootstrap --mon-ip 10.0.0.10 --initial-dashboard-user admin --initial-dashboard-password 000000 --skip-pull
- 安装ceph-common工具(master节点)
apt install -y ceph-common
- 添加主机到集群
ssh-copy-id -f -i /etc/ceph/ceph.pub worker01
ssh-copy-id -f -i /etc/ceph/ceph.pub worker02
# 集群机器发现
ceph orch host add worker01
ceph orch host add worker02
- 部署OSD(master节点)
# 查看可用的磁盘设备
ceph orch device ls
# 添加到ceph集群中,在未使用的设备上自动创建osd
ceph orch apply osd --all-available-devices
PS:
# 从特定主机上的特定设备创建OSD:
ceph orch daemon add osd node1:/dev/sdb
ceph orch daemon add osd node2:/dev/sdb
ceph orch daemon add osd node3:/dev/sdb
# 查看osd磁盘
ceph -s
ceph df
10.制作镜像注意事项
-
只针对当前项目,已经提前制作好了,跳过,看11
-
创建启动目录,MySQL镜像容器没有这目录
mkdir /var/run/mysqld
chown -R mysql.mysql /var/run/mysqld/
# 夯筑启动
mysqld_safe
# 数据目录:
ll /var/lib/mysql/
11.编写k8s yaml
- 将vishow镜像导入k8s集群
docker load -i vishowv4.tar
- 创建命名空间
- 为了能分明管理业务
kubectl create ns vishow
# 查看是否成功
kubectl get ns
- 创建存放yaml目录
mkdir /k8syaml/vishowapp -pv
cd /k8syaml/vishowapp/
- 创建数据目录
mkdir /data/
-
编写yaml文件
-
创建ceph资源
# 存储vishow视频图片的存储池
ceph osd pool create vishow-data
ceph osd pool create vishow-metadata
- 创建cephfs
ceph fs new vishowfs vishow-metadata vishow-data
- 创建mds
ceph orch apply mds cephfs --placement="3 master worker01 worker02"
- 创建管理存储池用户并导出文件
cd /etc/ceph/
# 管理vishow-data密钥
ceph auth get-or-create client.vishowfsclient mon 'allow r' mds 'allow rw' osd 'allow rwx pool=vishow-data' -o ceph.client.vishowfsclient.keyring
- 导出管理存储池用户密钥
cd /etc/ceph/
ceph auth print-key client.vishowfsclient > vishowfsclient.key
- 使用cephfs挂载到物理服务器
mount -t ceph worker01:6789,worker02:6789:/ /data/ -o name=vishowfsclient,secretfile=/etc/ceph/vishowfsclient.key
- 创建mysql本地挂载目录(所有节点都需要创建)
- 数据持久化准备
mkdir /data/mysql -pv
# 创建mysql对应用户
vim /etc/passwd
mysql:x:101:101::/home/mysql:/bin/sh
tar xvf mysql.tar.gz -C /data/mysql
tar xvf vikan.tar.gz -C /data/
# 赋予mysql目录需要的权限
chown -R 101.101 /data/mysql/
- 安装配置nfs(master节点)
apt-get install nfs-kernel-server -y
# 编辑服务端配置文件
vim /etc/exports
#在文件的最后添加想要共享的文件
/data/mysql/ *(rw,sync,no_root_squash)
/data/vikan/ *(rw,sync,no_root_squash)
# 启动服务
systemctl restart nfs-server.service
- 安装nfs客户端(worker所有节点)
apt-get install nfs-common -y
- pod使用持久卷(master节点)
cd /k8syaml/vishowapp
vim vishow.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: app-vishow
namespace: vishow
spec:
replicas: 1
selector:
matchLabels:
app: viapp
template:
metadata:
labels:
app: viapp
spec:
containers:
- name: vishow
image: vishow:v1.4
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8098
- containerPort: 2198
volumeMounts:
- name: ceph-mysql
mountPath: "/var/lib/mysql"
readOnly: false
- name: vikan-home
mountPath: "/home/vikan"
readOnly: false
volumes:
- name: ceph-mysql
nfs:
server: 10.0.0.10
path: "/data/mysql"
- name: vikan-home
nfs:
server: 10.0.0.10
path: "/data/vikan"
---
apiVersion: v1
kind: Service
metadata:
name: app-vishow
namespace: vishow
spec:
type: NodePort
selector:
app: viapp
ports:
- name: port1
protocol: TCP
port: 2198
targetPort: 2198
nodePort: 30000
- name: port2
protocol: TCP
port: 8098
targetPort: 8098
nodePort: 30001
# 执行yaml文件
kubectl apply -f vishow.yaml
# 查看pod状态
kubectl get pod -n vishow -owide
标签:tar,业务,ceph,etc,ipv4,k8sphp,net,节点
From: https://www.cnblogs.com/wsxier/p/17487038.html