首页 > 其他分享 >ceph集群搭建

ceph集群搭建

时间:2023-02-06 09:34:55浏览次数:33  
标签:deploy -- ceph 集群 mon1 mds1 osd 搭建

#生产最好使用2个ip段

#系统
Ubuntu 20.04.3 
#192.168.10.20x集群通讯事业
#192.168.10.10x客户端和各种应用使用
192.168.10.200/192.168.10.100 ceph-deploy
192.168.10.201/192.168.10.101 ceph-mgr1-ceph-rgw1
192.168.10.202/192.168.10.102 ceph-mgr2-ceph-rgw2
192.168.10.203/192.168.10.103 ceph-mon1-ceph-mds1-node1
192.168.10.204/192.168.10.104 ceph-mon2-ceph-mds2-node2
192.168.10.205/192.168.10.105 ceph-mon3-ceph-mds3-node3

系统基础优化

#配置root远程登录
vim /etc/ssh/sshd_config
PermitRootLogin yes
UseDNS no
#重启sshd
systemctl restart sshd.service

#所有节点配置hosts
cat > /etc/hosts <<'EOF'
127.0.0.1 localhost
127.0.1.1 t

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters

192.168.10.200 ceph-deploy1
192.168.10.201 ceph-mgr1-ceph-rgw1
192.168.10.202 ceph-mgr2-ceph-rgw2
192.168.10.203 ceph-mon1-ceph-mds1-node1
192.168.10.204 ceph-mon2-ceph-mds2-node2
192.168.10.205 ceph-mon3-ceph-mds3-node3
EOF

#配置apt源这里使用ustc源
cat >/etc/apt/sources.list<<'EOF'
deb http://mirrors.ustc.edu.cn/ubuntu/ focal main restricted universe multiverse
deb-src http://mirrors.ustc.edu.cn/ubuntu/ focal main restricted universe multiverse

deb http://mirrors.ustc.edu.cn/ubuntu/ focal-security main restricted universe multiverse
deb-src http://mirrors.ustc.edu.cn/ubuntu/ focal-security main restricted universe multiverse

deb http://mirrors.ustc.edu.cn/ubuntu/ focal-updates main restricted universe multiverse
deb-src http://mirrors.ustc.edu.cn/ubuntu/ focal-updates main restricted universe multiverse

deb http://mirrors.ustc.edu.cn/ubuntu/ focal-backports main restricted universe multiverse
deb-src http://mirrors.ustc.edu.cn/ubuntu/ focal-backports main restricted universe multiverse

## Not recommended
# deb http://mirrors.ustc.edu.cn/ubuntu/ focal-proposed main restricted universe multiverse
# deb-src http://mirrors.ustc.edu.cn/ubuntu/ focal-proposed main restricted universe multiverse
EOF
#更新镜像源和校验安装包
wget -q -O- 'http://mirrors.ustc.edu.cn/ceph/keys/release.asc' | sudo apt-key add -
echo deb http://mirrors.ustc.edu.cn/ceph/debian-pacific/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
apt-get update
apt update


#时间同步
apt install cron -y
systemctl status cron.service  
echo "*/5 * * * * /usr/sbin/ntpdate time1.aliyun.com &> /dev/null && hwclock -w" >> /var/spool/cron/crontabs/root
/usr/sbin/ntpdate time1.aliyun.com &> /dev/null && hwclock -w
rm -rf /etc/localtime
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
cat >> /etc/default/locale << 'EOF'
LANG=en_US.UTF-8
LC_TIME=en_DK.UTF-8
EOF

#安装常用系统命令:
apt install iproute2  ntpdate  tcpdump telnet traceroute nfs-kernel-server nfs-common  lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev gcc openssh-server iotop unzip zip  apt-transport-https ca-certificates curl software-properties-common vim-common inetutils-ping -y



#所有节点的内核配置
cat > /etc/sysctl.conf <<'EOF'
# Controls source route verification
net.ipv4.conf.default.rp_filter = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
   
# Do not accept source routing
net.ipv4.conf.default.accept_source_route = 0
   
# Controls the System Request debugging functionality of the kernel
kernel.sysrq = 0
   
# Controls whether core dumps will append the PID to the core filename.
# Useful for debugging multi-threaded
applications. kernel.core_uses_pid = 1
   
# Controls the use of TCP syncookies
net.ipv4.tcp_syncookies = 1
   
# Disable netfilter on bridges.
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0
   
# Controls the default maxmimum size of a mesage queue
kernel.msgmnb = 65536
   
# # Controls the maximum size of a message, in bytes
kernel.msgmax = 65536
   
# Controls the maximum shared segment size, in bytes
kernel.shmmax = 68719476736
   
# # Controls the maximum number of shared memory segments, in pages
kernel.shmall = 4294967296
   
# TCP kernel paramater
net.ipv4.tcp_mem = 786432 1048576 1572864
net.ipv4.tcp_rmem = 4096        87380   4194304
net.ipv4.tcp_wmem = 4096        16384   4194304 n
et.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_sack = 1
   
# socket buffer
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 262144
net.core.somaxconn = 20480
net.core.optmem_max = 81920
   
# TCP conn
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_syn_retries = 3
net.ipv4.tcp_retries1 = 3
net.ipv4.tcp_retries2 = 15
   
# tcp conn reuse
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_tw_reuse = 0
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_fin_timeout = 1
   
   
net.ipv4.tcp_max_tw_buckets = 20000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syncookies = 1
   
# keepalive conn
net.ipv4.tcp_keepalive_time = 300
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.ip_local_port_range = 10001    65000
   
# swap
vm.overcommit_memory = 0
vm.swappiness = 10
   
#net.ipv4.conf.eth1.rp_filter = 0
#net.ipv4.conf.lo.arp_ignore = 1
#net.ipv4.conf.lo.arp_announce = 2
#net.ipv4.conf.all.arp_ignore = 1
#net.ipv4.conf.all.arp_announce = 2
EOF

#所有节点的文件权限配置
cat > /etc/security/limits.conf <<'EOF'
root                soft    core            unlimited
root                hard    core            unlimited
root                soft    nproc           1000000
root                hard    nproc           1000000
root                soft    nofile          1000000
root                hard    nofile          1000000
root                soft    memlock         32000
root                hard    memlock         32000
root                soft    msgqueue        8192000
root                hard    msgqueue        8192000
   
  
*                soft    core            unlimited
*                hard    core            unlimited
*                soft    nproc           1000000
*                hard    nproc           1000000
*                soft    nofile          1000000
*                hard    nofile          1000000
*                soft    memlock         32000
*                hard    memlock         32000
*                soft    msgqueue        8192000
*                hard    msgqueue        8192000
EOF

#所有节点安装python2.7
apt install python2.7 -y && ln -sv /usr/bin/python2.7 /usr/bin/python2


#所有节点创建ceph用户,并允许ceph 用户以 执行特权命令:
#推荐使用指定的普通用户部署和运行ceph 集群,普通用户只要能以非交互方式执行命令执行一些特权命令即可,新版的ceph-deploy 可以指定包含root的在内只要可以执行命令的用户,
#不过仍然推荐使用普通用户,比如ceph、cephuser、cephadmin 这样的用户去管理ceph 集群
#这里使用cephadmin用户
groupadd -r -g 2022 cephadmin && useradd -r -m -s /bin/bash -u 2022 -g 2022 cephadmin && echo cephadmin:qwe123 | chpasswd

#允许用户以特权身份执行命令
echo "cephadmin ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers

关闭swap
swapoff -a
  
vim /etc/fstab
# /etc/fstab: static file system information.
#
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# <file system> <mount point>   <type>  <options>       <dump>  <pass>
# / was on /dev/sda2 during curtin installation
/dev/disk/by-uuid/d70a7e92-2d0d-4014-a9a1-4cd95db5e242 / xfs defaults 0 0
#/swap.img      none    swap    sw      0       0

#快速配置主机名
hostname ceph-deploy && sed -i "s#t#ceph-deploy#g" /etc/hostname
hostname ceph-mgr1-ceph-rgw1 && sed -i "s#t#ceph-mgr1-ceph-rgw1#g" /etc/hostname
hostname ceph-mgr2-ceph-rgw2 && sed -i "s#t#ceph-mgr2-ceph-rgw2#g" /etc/hostname
hostname ceph-mon1-ceph-mds1-node1 && sed -i "s#t#ceph-mon1-ceph-mds1-node1#g" /etc/hostname
hostname ceph-mon2-ceph-mds2-node2 && sed -i "s#t#ceph-mon2-ceph-mds2-node2#g" /etc/hostname
hostname ceph-mon3-ceph-mds3-node3 && sed -i "s#t#ceph-mon3-ceph-mds3-node3#g" /etc/hostname

#快速配置ip
sed -i "s#200#201#g" /etc/netplan/00-installer-config.yaml && sed -i "s#100#101#g" /etc/netplan/00-installer-config.yaml && netplan apply && ip a
sed -i "s#200#202#g" /etc/netplan/00-installer-config.yaml && sed -i "s#100#102#g" /etc/netplan/00-installer-config.yaml && netplan apply && ip a
sed -i "s#200#203#g" /etc/netplan/00-installer-config.yaml && sed -i "s#100#103#g" /etc/netplan/00-installer-config.yaml && netplan apply && ip a
sed -i "s#200#204#g" /etc/netplan/00-installer-config.yaml && sed -i "s#100#104#g" /etc/netplan/00-installer-config.yaml && netplan apply && ip a
sed -i "s#200#205#g" /etc/netplan/00-installer-config.yaml && sed -i "s#100#105#g" /etc/netplan/00-installer-config.yaml && netplan apply && ip a

开始安装集群

#ceph-deploy执行切换到cephadmin创建key
cephadmin@ceph-deploy:~$ ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/home/cephadmin/.ssh/id_rsa): 
Created directory '/home/cephadmin/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /home/cephadmin/.ssh/id_rsa
Your public key has been saved in /home/cephadmin/.ssh/id_rsa.pub
The key fingerprint is:
SHA256:kkB54FFimEU/mCw0DxOm6SFwCjjRMQVq+/b3zDOdH5I cephadmin@ceph-deploy
The key's randomart image is:
+---[RSA 3072]----+
|=o%XX+.          |
|=O+@.*.          |
|*+. B.o          |
|+ o. . o         |
| o    o S        |
|  .    .    .    |
|   o      .E..   |
|  . .  .oo o. .  |
|     .. .+o ..   |
+----[SHA256]-----+



#安装sshpass
cephadmin@ceph-deploy:~$ sudo apt install sshpass


#ceph-deploy节点使用cephadmin用户分发密钥脚本
cat >ssh-scp.sh<<'EOF'
#!/bin/bash
#目标主机列表
IP="
192.168.10.201
192.168.10.202
192.168.10.203
192.168.10.204
192.168.10.205
"
for node in ${IP};do
    sshpass -p "qwe123" ssh-copy-id -i /home/cephadmin/.ssh/id_rsa.pub -o StrictHostKeyChecking=no cephadmin@${node} &> /dev/null
   if [ $? -eq 0 ];then
     echo "${node} 秘钥copy完成"
   else
     echo "${node} 秘钥copy失败"
   fi
done
EOF
  
#在ceph-deploy节点部署ceph-deploy工具包
#查看可以安装的版本
sudo apt-cache madison ceph-deploy

#执行安装命令
sudo apt install ceph-deploy=2.0.1-0ubuntu1.1


#初始化mon节点
#保存当前集群的初始化配置信息
mkdir ceph-cluster  && cd ceph-cluster/

#初始化
#--cluster-network  内部ceph服务器使用的,集群的管理,数据同步使用的网络#192.168.10.20x集群通讯事业
#--public-network   客户端使用的网络,就是应用程序使用的#192.168.10.10x客户端和各种应用使用
ceph-deploy new --cluster-network 192.168.10.0/24 --public-network 192.168.10.0/24  ceph-mon1-ceph-mds1-node1
ceph-deploy new --cluster-network 192.168.10.0/24 --public-network 192.168.10.0/24  ceph-mon2-ceph-mds2-node2
ceph-deploy new --cluster-network 192.168.10.0/24 --public-network 192.168.10.0/24  ceph-mon3-ceph-mds3-node3

#初始化ceph-node节点,执行安装软件:ceph ceph-osd ceph-mds ceph-mon radosgw
--no-adjust-repos:不推送repo仓库
--nogpgcheck:不进行包的校验
ceph-deploy install --no-adjust-repos --nogpgcheck ceph-mon1-ceph-mds1-node1 ceph-mon2-ceph-mds2-node2 ceph-mon3-ceph-mds3-node

#添加mon节点
ceph-deploy mon add ceph-mon1-ceph-mds1-node1 
ceph-deploy mon add ceph-mon2-ceph-mds2-node2 
ceph-deploy mon add ceph-mon3-ceph-mds3-node3
#ceph-mon结果验证
cephadmin@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     511a9f2f-c2eb-4461-9472-a2074f266bba
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-mon1-ceph-mds1-node1,ceph-mon2-ceph-mds2-node2,ceph-mon3-ceph-mds3-node3 (age 9s)
    mgr: ceph-mgr1-ceph-rgw1(active, since 19m), standbys: ceph-mgr2-ceph-rgw2
    osd: 12 osds: 12 up (since 3m), 12 in (since 3m)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   74 MiB used, 480 GiB / 480 GiB avail
    pgs:     1 active+clean
 



#在mon节点安装软件,配置mon节点并生成同步密钥
apt install -y ceph-mon

#在ceph-deploy节点初始化mon节点
ceph-deploy mon create-initial


#验证mon节点,根据生成的配置文件只添加了一台mon服务器
root@ceph-mon1-ceph-mds1-node1:~# ps aux | grep ceph-mon
ceph       73614  0.5  2.1 416112 43156 ?        Ssl  13:52   0:00 /usr/bin/ceph-mon -f --cluster ceph --id ceph-mon1-ceph-mds1-node1 --setuser ceph --setgroup ceph
root       74197  0.0  0.0   8160   656 pts/1    S+   13:53   0:00 grep --color=auto ceph-mon
#检查mon的端口此步很重要后面使用cephFS和rbd都是使用此端口,监听地址错误会导致超时
root@ceph-mon1-ceph-mds1-node1:~# ss -lnt|grep 6789
LISTEN  0       512        192.168.10.103:6789           0.0.0.0:*
root@ceph-mon2-ceph-mds2-node2:~# ss -lnt|grep 6789
LISTEN  0       512        192.168.10.104:6789           0.0.0.0:* 
root@ceph-mon3-ceph-mds3-node3:~#  ss -lnt|grep 6789
LISTEN  0       512        192.168.10.105:6789           0.0.0.0:* 


#在node节点安装ceph-common
apt install ceph-common -y

#拷贝密钥到node1,node2,node3节点
ceph-deploy admin ceph-mon1-ceph-mds1-node1 ceph-mon2-ceph-mds2-node2 ceph-mon3-ceph-mds3-node3

#在ceph-node节点对密钥授权
#认证文件的属主和属组为了安全考虑,默认设置为了root 用户和root 组,如果需要ceph用户也能执行ceph 命令,那么就需要对ceph 用户进行授权
apt-get install acl
setfacl -m u:cephadmin:rw /etc/ceph/ceph.client.admin.keyring


在ceph-mgr1和ceph-mgr2,ceph-mgr2上安装ceph-mgr包
apt install -y ceph-mgr

#初始化mgr节点
ceph-deploy mgr create ceph-mgr1-ceph-rgw1 ceph-mgr2-ceph-rgw2

验证ceph-mgr1-ceph-rgw1,ceph-mgr2-ceph-rgw2节点
root@ceph-mgr1-ceph-rgw1:~# ps aux | grep ceph-mgr


配置ceph-deploy管理ceph集群
#ceph-deploy管理ceph集群环境设置
sudo apt install -y ceph-common
ceph-deploy admin ceph-deploy
sudo setfacl -m u:cephadmin:rw /etc/ceph/ceph.client.admin.keyring

#ceph-deploy管理ceph集群信息
cephadmin@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     511a9f2f-c2eb-4461-9472-a2074f266bba
    health: HEALTH_WARN
            mon is allowing insecure global_id reclaim
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 1 daemons, quorum ceph-mon1-ceph-mds1-node1 (age 18m)
    mgr: ceph-mgr1-ceph-rgw1(active, since 3m), standbys: ceph-mgr2-ceph-rgw2
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs
	
	
#禁用非安全模式通信
ceph config set mon auth_allow_insecure_global_id_reclaim false
cephadmin@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     511a9f2f-c2eb-4461-9472-a2074f266bba
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 1 daemons, quorum ceph-mon1-ceph-mds1-node1 (age 19m)
    mgr: ceph-mgr1-ceph-rgw1(active, since 4m), standbys: ceph-mgr2-ceph-rgw2
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:  

#ceph集群个组件版本
cephadmin@ceph-deploy:~/ceph-cluster$ ceph version
ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) pacific (stable)
cephadmin@ceph-deploy:~/ceph-cluster$ ceph versions
{
    "mon": {
        "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) pacific (stable)": 1
    },
    "mgr": {
        "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) pacific (stable)": 2
    },
    "osd": {},
    "mds": {},
    "overall": {
        "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) pacific (stable)": 3
    }
}


#准备osd节点
#前面执行了node节点的初始化,如下步骤,后面的操作可以不执行
#擦除磁盘之前通过deploy 节点对node 节点执行安装ceph 基本运行环境。
#在ceph-deploy上操作
ceph-deploy install --release pacific ceph-mon1-ceph-mds1-node1 ceph-mon2-ceph-mds2-node2 ceph-mon3-ceph-mds3-node3

#列出ceph node 节点磁盘
ceph-deploy disk list ceph-mon1-ceph-mds1-node1 
ceph-deploy disk list ceph-mon2-ceph-mds2-node2
ceph-deploy disk list ceph-mon3-ceph-mds3-node3

#使用ceph-deploy disk zap擦除各ceph node的ceph数据磁盘
#在ceph-deploy上操作
ceph-deploy  disk zap ceph-mon1-ceph-mds1-node1  /dev/sdb
ceph-deploy  disk zap ceph-mon1-ceph-mds1-node1  /dev/sdc
ceph-deploy  disk zap ceph-mon1-ceph-mds1-node1  /dev/sdd
ceph-deploy  disk zap ceph-mon1-ceph-mds1-node1  /dev/sde
 
ceph-deploy  disk zap ceph-mon2-ceph-mds2-node2  /dev/sdb
ceph-deploy  disk zap ceph-mon2-ceph-mds2-node2  /dev/sdc
ceph-deploy  disk zap ceph-mon2-ceph-mds2-node2  /dev/sdd
ceph-deploy  disk zap ceph-mon2-ceph-mds2-node2  /dev/sde
  
ceph-deploy  disk zap ceph-mon3-ceph-mds3-node3  /dev/sdb
ceph-deploy  disk zap ceph-mon3-ceph-mds3-node3  /dev/sdc
ceph-deploy  disk zap ceph-mon3-ceph-mds3-node3  /dev/sdd
ceph-deploy  disk zap ceph-mon3-ceph-mds3-node3  /dev/sde


添加osd
#在ceph-deploy上操作
ceph-deploy osd create ceph-mon1-ceph-mds1-node1 --data /dev/sdb
ceph-deploy osd create ceph-mon1-ceph-mds1-node1 --data /dev/sdc
ceph-deploy osd create ceph-mon1-ceph-mds1-node1 --data /dev/sdd
ceph-deploy osd create ceph-mon1-ceph-mds1-node1 --data /dev/sde
 
ceph-deploy osd create ceph-mon2-ceph-mds2-node2 --data /dev/sdb
ceph-deploy osd create ceph-mon2-ceph-mds2-node2 --data /dev/sdc
ceph-deploy osd create ceph-mon2-ceph-mds2-node2 --data /dev/sdd
ceph-deploy osd create ceph-mon2-ceph-mds2-node2 --data /dev/sde
 
ceph-deploy osd create ceph-mon3-ceph-mds3-node3 --data /dev/sdb
ceph-deploy osd create ceph-mon3-ceph-mds3-node3 --data /dev/sdc
ceph-deploy osd create ceph-mon3-ceph-mds3-node3 --data /dev/sdd
ceph-deploy osd create ceph-mon3-ceph-mds3-node3 --data /dev/sde


设置osd开机自启动
systemctl enable ceph-osd@0 ceph-osd@1 ceph-osd@2 ceph-osd@3
systemctl enable ceph-osd@4 ceph-osd@5 ceph-osd@6 ceph-osd@7
systemctl enable ceph-osd@8 ceph-osd@9 ceph-osd@10 ceph-osd@11


#部署cephfs,安装ceph-mds软件包
apt install -y ceph-mds

#在ceph-deploy上把mds服务添加到ceph集群
#在ceph-deploy上操作
ceph-deploy mds create ceph-mon1-ceph-mds1-node1
ceph-deploy mds create ceph-mon2-ceph-mds2-node2
ceph-deploy mds create ceph-mon3-ceph-mds3-node3
ceph-deploy mds create ceph-mgr2-ceph-rgw2



创建CephFS metadata和cephfs-data存储池
cephfs-metadata:存放元数据的存储池,数据有几个T的话元数据大约1-2g,所有不需要太大,存放的都是数据的名字、大小,创建时间,修改时间,生产放32个pg足够了
cephfs-data存放数据的存储池
# 创建cephfs-metadata
root@ceph-mon1-ceph-mds1:~# ceph osd pool create cephfs-metadata 32 32
pool 'cephfs-metadata' created
# 创建cephfs-data
root@ceph-mon1-ceph-mds1:~# ceph osd pool create cephfs-data 64 64
pool 'cephfs-data' created
# 创建一个叫mycephfs的cephFS
root@ceph-mon1-ceph-mds1:~# ceph fs new mycephfs cephfs-metadata cephfs-data
new fs with metadata pool 3 and data pool 4

验证CephFS
root@ceph-mon1-ceph-mds1:~# ceph fs ls
name: mycephfs, metadata pool: cephfs-metadata, data pools: [cephfs-data ]
root@ceph-mon1-ceph-mds1:~# ceph fs status mycephfs
mycephfs - 0 clients
========
RANK  STATE           MDS             ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  ceph-mon1-ceph-mds1  Reqs:    0 /s    10     13     12      0   
      POOL         TYPE     USED  AVAIL  
cephfs-metadata  metadata  96.0k  50.6G  
  cephfs-data      data       0   50.6G  
MDS version: ceph version 16.2.10 (45fa1a083152e41a408d15505f594ec5f1b4fe17) pacific (stable)


#MDS高可用优化
cephadmin@ceph-deploy:~/ceph-cluster$ cat ceph.conf 
[global]
fsid = 511a9f2f-c2eb-4461-9472-a2074f266bba
public_network = 192.168.10.0/24
cluster_network = 192.168.10.0/24
mon_initial_members = ceph-mon1-ceph-mds1-node1
mon_host = 192.168.10.103
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

[mds.ceph-mon2-ceph-mds2-node2]
mds_standby_for_name = ceph-mon1-ceph-mds1-node1
mds_standby_replay = true
[mds.ceph-mon3-ceph-mds3-node3]
mds_standby_for_name = ceph-mgr2-ceph-rgw2
mds_standby_replay = true


#在ceph-deploy拷贝到/etc/ceph
sudo cp -ar /home/cephadmin/ceph-cluster/* /etc/ceph/

#分发配置文件并重启mds 服务:
#分发配置文件保证各mds 服务重启有效
ceph-deploy --overwrite-conf config push ceph-mon1-ceph-mds1-node1
ceph-deploy --overwrite-conf config push ceph-mon2-ceph-mds2-node2
ceph-deploy --overwrite-conf config push ceph-mon3-ceph-mds3-node3
ceph-deploy --overwrite-conf config push ceph-mgr1-ceph-rgw1
ceph-deploy --overwrite-conf config push ceph-mgr2-ceph-rgw2
 
#重启mds节点,先重启active节点ceph-mds1和ceph-mgr2,再重启standby节点ceph-mds2和ceph-mds3
systemctl restart [email protected]
systemctl restart [email protected]
systemctl restart [email protected]
systemctl restart [email protected]


#在ceph-mgr1和ceph-mgr2上安装ceph-radosgw
root@ceph-mgr1:~# apt install radosgw -y
root@ceph-mgr2:~# apt install radosgw -y


#在ceph deploy 服务器将ceph-mgr1和ceph-mgr2 初始化为radosGW 服务
#注意:如果前面有做ceph.conf的配置变动,需要将ceph.conf重新拷贝到ceph-rgw1和cpeh-rgw2上
ceph-deploy --overwrite-conf rgw create ceph-mgr1-ceph-rgw1
ceph-deploy --overwrite-conf rgw create ceph-mgr2-ceph-rgw2


#在ceph-deploy上查看集群的radosgw信息
cephadmin@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     511a9f2f-c2eb-4461-9472-a2074f266bba
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-mon1-ceph-mds1-node1,ceph-mon2-ceph-mds2-node2,ceph-mon3-ceph-mds3-node3 (age 16m)
    mgr: ceph-mgr1-ceph-rgw1(active, since 36m), standbys: ceph-mgr2-ceph-rgw2
    mds: 1/1 daemons up, 3 standby
    osd: 12 osds: 12 up (since 19m), 12 in (since 19m)
    rgw: 2 daemons active (2 hosts, 1 zones)
 
  data:
    volumes: 1/1 healthy
    pools:   7 pools, 225 pgs
    objects: 211 objects, 7.2 KiB
    usage:   90 MiB used, 480 GiB / 480 GiB avail
    pgs:     225 active+clean

配置时间偏差
#时钟偏差配置
mon clock drift allowed = 0.5 时钟偏差时间配置,默认为50毫秒,0.5的话就是0.5秒钟
mon clock drift warn backoff =3 时钟偏差出现多少次出现报警配置

添加配置
cat >/home/cephadmin/ceph-cluster/ceph.conf<<'EOF'
[global]
fsid = a6f25391-9efa-4a89-a7c6-663258be6321
public_network = 192.168.10.0/24
cluster_network = 192.168.10.0/24
mon_initial_members = ceph-mon1-ceph-mds1
mon_host = 192.168.10.103
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

#时钟偏差配置
mon clock drift allowed = 0.5
mon clock drift warn backoff =3
EOF


在同步到mon节点上 
scp ceph.conf [email protected]:/etc/ceph/
scp ceph.conf [email protected]:/etc/ceph/
scp ceph.conf [email protected]:/etc/ceph/

在mon服务器检查,也可以把配置直接拷贝到mon服务器的配置文件然后重启mon
===================================================
cephadmin@ceph-deploy:~/ceph-cluster$ cat ceph.conf
[global]
fsid = a6f25391-9efa-4a89-a7c6-663258be6321
public_network = 192.168.10.0/24
cluster_network = 192.168.10.0/24
mon_initial_members = ceph-mon1-ceph-mds1
mon_host = 192.168.10.103
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

#时钟偏差配置
mon clock drift allowed = 0.5
mon clock drift warn backoff =3
===================================================
root@ceph-mon2-ceph-mds2:~# cat /etc/ceph/ceph.conf
mon clock drift allowed = 0.5
mon clock drift warn backoff =3
cephadmin@ceph-deploy:~/ceph-cluster$ cat ceph.conf
[global]
fsid = a6f25391-9efa-4a89-a7c6-663258be6321
public_network = 192.168.10.0/24
cluster_network = 192.168.10.0/24
mon_initial_members = ceph-mon1-ceph-mds1
mon_host = 192.168.10.103
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

#时钟偏差配置
mon clock drift allowed = 0.5
mon clock drift warn backoff =3
===================================================
root@ceph-mon3-ceph-mds3:~# cat /etc/ceph/ceph.conf
mon clock drift allowed = 0.5
mon clock drift warn backoff =3
cephadmin@ceph-deploy:~/ceph-cluster$ cat ceph.conf
[global]
fsid = a6f25391-9efa-4a89-a7c6-663258be6321
public_network = 192.168.10.0/24
cluster_network = 192.168.10.0/24
mon_initial_members = ceph-mon1-ceph-mds1
mon_host = 192.168.10.103
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

#时钟偏差配置
mon clock drift allowed = 0.5
mon clock drift warn backoff =3
===================================================

重启mon节点
root@ceph-mon1-ceph-mds1:~# reboot
root@ceph-mon2-ceph-mds2:~# reboot
root@ceph-mon3-ceph-mds3:~# reboot

检查

标签:deploy,--,ceph,集群,mon1,mds1,osd,搭建
From: https://www.cnblogs.com/yeyouqing/p/17094452.html

相关文章

  • VS2022 MFC OpenCV环境搭建
    无法打开opencv2/opencv.hpp文件错误:在项目属性,配置属性,VC++目录:可执行目录: C:\opencv\build\x64\vc15\bin包含目录:C:\opencv\build\includeC:\opencv\build\i......
  • SpringCloud微服务电商系统在Kubernetes集群中上线详细教程
    Kubernetes集群部署Springcloud微服务商务系统 原文网址:https://www.wangt.cc//2021/12/springcloud%E5%BE%AE%E6%9C%8D%E5%8A%A1%E7%94%B5%E5%95%86%E7%B3%BB%E7%BB%9F%......
  • etcd集群搭建和使用中常见的报错信息(热key探测系列教程)
    etcd的下载地址:​​https://github.com/etcd-io/etcd/releases​​当前最新的v3.4.9,我之前用的时候包括目前京东热key线上都是用的3.4.6,下面主要是看一下如何搭建etcd集群。......
  • K8S集群安装
    1、环境准备1.1、机器环境节点CPU核数必须是:>=2核/内存要求必须是:>=2G,否则k8s无法启动DNS网络:最好设置为本地网络连通的DNS,否则网络不通,无法下载一些镜像......
  • ceph集群 octups添加Monitor
    添加Monitor在monitor节点导出mon的钥匙环文件sudo-ucephcephauthgetmon.-o/tmp/ceph.mon.keyring获取monmap并写入文件sudo-ucephcephmongetmap-o/t......
  • Docker-compose 容器集群的快速编排
    一、Docker-compose简介Docker-Compose项目是Docker官方的开源项目,负责实现对Docker容器集群的快速编排。Docker-Compose将所管理的容器分为三层,分别是 工程(projec......
  • ES_CCS/R(三):跨集群复制 Cross-cluster replication(CCR)
    跨集群复制(CCR)功能支持将远程集群中的索引复制到本地集群。可以在一些常见的生产用例中使用此功能:灾难恢复(DR)/高可用性(HA):如果主群集发生故障,则进行灾难恢复。辅助群集......
  • ES_CCS/R(二):跨集群搜索 Cross-cluster search (CCS)
    跨集群搜索(cross-clustersearch)使你可以针对一个或多个远程集群运行单个搜索请求。例如,你可以使用跨集群搜索来筛选和分析存储在不同数据中心的集群中的日志数据。 ......
  • ES_CCS/R(一):建立集群之间的安全互信
    为了能够实现CCR(跨集群复制Cross-clusterreplication)及CCS(跨集群搜索Cross-clustersearch ),我们必须让集群之间能够互信,这样才可以建立集群之间的连接。以通过......
  • 巨量引擎账户的搭建流程1
    巨量引擎广告投放平台,又称AD平台,它是字节跳动旗下的营销服务品牌,整合了今日头条、抖音短视频、火山小视频、西瓜视频、穿山甲等产品的营销能力,帮助广告主自助投放广告,高效......