首页 > 其他分享 >ceph16版本部署

ceph16版本部署

时间:2023-07-17 23:35:15浏览次数:35  
标签:部署 storage01 tcp ceph -- ipv4 版本 ceph16 net

1.初始化配置

IP 主机名
10.0.0.10 storage01
10.0.0.11 storage02
10.0.0.12 storage03

1.1 配置离线源(所有节点)

tar zxvf ceph16pkg.tar.gz -C /opt/

cat > /etc/apt/sources.list << EOF
deb [trusted=yes] file:// /opt/ceph16pkg/debs/
EOF

apt clean all
apt update

1.2 安装基本工具(所有节点)

apt install -y bash-completion vim net-tools

1.3 修改主机名

  • storage01
hostnamectl set-hostname storage01
  • storage02
hostnamectl set-hostname storage02
  • storage03
hostnamectl set-hostname storage03

1.4 服务器优化(所有节点)

vim /etc/security/limits.conf
# root账户的资源限制
root soft core unlimited
root hard core unlimited
root soft nproc 1000000
root hard nproc 1000000
root soft nofile 1000000
root hard nofile 1000000
root soft memlock 32000
root hard memlock 32000
root soft msgqueue 8192000
root hard msgqueue 8192000
# 其它账户的资源限制
* soft core unlimited
* hard core unlimited
* soft nproc 1000000 # 系统最大进程数
* hard nproc 1000000
* soft nofile 1000000 # 系统最大打开文件个数
* hard nofile 1000000
* soft memlock 32000 # 最大锁的数量
* hard memlock 32000
* soft msgqueue 8192000 # 消息队列长度
* hard msgqueue 8192000
vim /etc/sysctl.conf
# Controls source route verification
net.ipv4.conf.default.rp_filter = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
# Do not accept source routing
net.ipv4.conf.default.accept_source_route = 0
# Controls the System Request debugging functionality of the kernel
kernel.sysrq = 0
# Controls whether core dumps will append the PID to the core filename
# Useful for debugging multi-threaded application
kernel.core_uses_pid = 1
# Controls the use of TCP syncookies
net.ipv4.tcp_syncookies = 1
# Disable netfilter on bridges
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 0
# Controls the default maxmimum size if a mesage queue
kernel.msgmnb = 65536
# Controls the maxmimum shared segment size, in bytes
kernel.msgmax = 65536
# Controls the maxmimum shared segment size, in bytes
kernel.shmmax = 68719476736
# Controls the maxmimum shared segment size, in bytes
kernel.shmall = 4294967296
# TCP kernel paramater
net.ipv4.tcp_mem = 786432 1048576 1572864
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_sack = 1
# socket buffer
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 262144
net.core.somaxconn = 20480
net.core.optmem_max = 81920
# TCP conn
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_syn_retries = 3
net.ipv4.tcp_retries1 = 3
net.ipv4.tcp_retries2 = 15
# tcp conn reuse
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_tw_reuse = 0
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_fin_timeout = 1
net.ipv4.tcp_max_tw_buckets = 20000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_synack_retries = 1
net.ipv4.ip_local_port_range = 10001 65000
# swap
vm.overcommit_memory = 0
vm.swappiness = 10
#net.ipv4.conf.eth1.rp_filter = 0
#net.ipv4.conf.lo.arp_filter = 1
#net.ipv4.conf.lo.arp_announce = 2
#net.ipv4.conf.all.arp_ignore = 1
#net.ipv4.conf.all.arp_announce = 2
  • 重启实服务器生效
reboot

1.5 配置hosts解析(所有节点)

cat >> /etc/hosts <<EOF
10.0.0.10 storage01
10.0.0.11 storage02
10.0.0.12 storage03
EOF

1.6 配置时间同步

  • 安装服务(所有节点)
apt install -y chrony
  • storage01节点配置服务端
vim /etc/chrony/chrony.conf
'''
pool storage01 iburst
allow all
local stratum 10
'''

# 重启生效
systemctl restart chronyd
  • storage02/03节点配置客户端
vim /etc/chrony/chrony.conf
pool storage01 iburst

# 重启生效
systemctl restart chronyd

# 验证
chronyc sources -v

2.安装基本服务(所有节点)

apt install -y docker-ce

3.初始化集群

3.1 安装初始化工具

  • storage01节点
apt install -y cephadm

3.2 导入镜像(所有节点)

tar zxvf ceph16image.tar.gz

cd ceph16image/

for i in `ls`;do docker load -i $i; done

3.3 搭建本地仓库

  • storage01节点操作
docker run -d --name registry -p 5000:5000 --restart always 4bb5ea59f8e0

3.4 配置仓库地址(所有节点)

cat > /etc/docker/daemon.json << EOF
{
"insecure-registries":["10.0.0.10:5000"]
}
EOF

systemctl daemon-reload
systemctl restart docker

3.5 推入关键镜像到仓库

  • storage01节点
# 打tag标签
docker tag 327f301eff51 10.0.0.10:5000/ceph:v16

# 推入到私有仓库
docker push 10.0.0.10:5000/ceph:v16

3.6 初始化集群

  • storage01节点操作
# 安装ceph工具
mkdir /etc/ceph

# 初始化集群
cephadm --image 10.0.0.10:5000/ceph:v16 bootstrap --mon-ip 10.0.0.10 --initial-dashboard-user admin --initial-dashboard-password 000000 --skip-pull

=========================================================================
# 如果需要定义集群存储网络,换成如下:
cephadm --image 172.21.48.10:5000/ceph:v16 bootstrap --mon-ip 172.21.48.10 --initial-dashboard-user admin --initial-dashboard-password 000000 --cluster-network 10.0.0.0/24 --skip-pull

4.配置加入集群(storage01节点)

  • 传输密钥
ssh-copy-id -f -i /etc/ceph/ceph.pub storage02

ssh-copy-id -f -i /etc/ceph/ceph.pub storage03
  • 加入集群
cephadm shell

ceph orch host add storage02

ceph orch host add storage03

5.添加osd硬盘(storage01节点)

# 可查看那些硬盘设备可用
ceph orch device ls

# 将所有硬盘添加到ceph集群中
ceph orch apply osd --all-available-devices


======================如果特殊情况需要添加指定的硬盘=====================
ceph orch daemon add osd storage02:/dev/sdb

6.部署MDS

  • CephFS 需要两个 Pools,cephfs-data 和 cephfs-metadata,分别存储文件数据和文件元数据
ceph osd pool create cephfs-metadata 16 16

ceph osd pool create cephfs-data 32 32

ceph fs new cephfs cephfs-metadata cephfs-data

ceph orch apply mds cephfs --placement="3 storage01 storage02 storage03"

# 查看mds有三个,两个预备状态
ceph -s


mount -t ceph storage01:6789,storage02:6789,storage03:6789:/ /cloudssd/ -o name=fsclient,secretfile=/etc/ceph/fsclient.key

7.部署RGW

  • 存储对象存储
ceph orch apply rgw myorg cn-east-1 --placement="3 storage01 storage02 storage03"

# 最后一个,需要等一会
ceph orch ls

标签:部署,storage01,tcp,ceph,--,ipv4,版本,ceph16,net
From: https://www.cnblogs.com/wsxier/p/17561612.html

相关文章

  • 使用docker安装的tomcat部署activiti-app.war、activiti-admin.war失败(ClassNotFound
    背景一直以来习惯用docker配置一些本地学习环境,许多教程配置activiti的方式都是通过复制activiti的war包部署在tomcat中,我尝试了一下通过docker的方式遇到了一些不易察觉的错误。使用方式描述使用docker安装tomcat9.0dockerrun-d-p8080:8080--nametomcattomcat:9.0复......
  • 第九节:Centos8.x版本切换yum/dnf下载源为阿里的地址
    一.        二.        三.         !作       者:Yaopengfei(姚鹏飞)博客地址:http://www.cnblogs.com/yaopengfei/声     明1:如有错误,欢迎讨论,请勿谩骂^_^。声     明2:原创博客请在转载......
  • phpstudy部署简易的网站
     网站的起源 web1.0 纯静态网站 展示的数据就是网站源码中的数据 不能够具备利用攻击手段获得数据,但是可以拒绝式服务攻击 web2.0 动态网站 有数据库,网站数据放置在数据库中 具备攻击 获取数据 网站使用的协议 协议 http 超文本传输协议 ......
  • mac jdk多版本+maven配置
    单版本配置 JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk1.8.0_311.jdk/Contents/HomeM2_HOME=/Users/lilun/Documents/apache-maven-3.8.6PATH=$JAVA_HOME/bin:$PATH:.PATH=$M2_HOME/bin:$PATHexportM2_HOMEexportPATHexportCLASSPATHaliasjdk8="......
  • linux 中查看glibc版本
     001、查看软链接[root@PC1software]#ls-l/lib64/libc.so.6lrwxrwxrwx.1rootroot12Jul1717:22/lib64/libc.so.6->libc-2.17.so 002、执行   /lib64/libc.so.6[root@PC1software]#/lib64/libc.so.6GNUCLibrary(GNUlibc)stablereleaseversio......
  • GoFrame v2.5 版本发布,企业级 Golang 开发框架
    大家好啊,GoFrame 框架今天发布了 v2.5.0 正式版本啦!......
  • docker-compose 部署zookeeper
    docker-compose.yamlversion:'3.1'services:zoo1:image:zookeeperrestart:alwayshostname:zoo1ports:-2181:2181environment:ZOO_MY_ID:1ZOO_SERVERS:server.1=zoo1:2888:3888;2181server.2=zoo2:2888......
  • 百度飞桨PP-YOLOE ONNX 在LabVIEW中的部署推理(含源码)
    目录前言一、什么是PP-YOLO二、环境搭建1、部署本项目时所用环境2、LabVIEW工具包下载及安装三、模型的获取与转化1、安装paddle2、安装依赖的库3、安装pycocotools4、导出onnx模型(1)导出推理模型(2)安装paddle2onnx(3)转换成onnx格式四、在LabVIEW实现PP-YOLOE的部署推理1、LabVIEW......
  • 手机直播app源码部署搭建:带货潮流,商城功能!
     随着互联网时代的迅猛发展,手机直播app源码平台早已成为了人们获取资讯、娱乐放松等方式的主要载体,手机直播app源码平台的日益火爆,也让商人们有了一个新兴的想法出现:直播app平台如此火爆,平台的用户也如此庞大,可不可以将自己家的产品在直播app平台直播进行宣传?所以很多商人陆续开......
  • Scrapyd、scrapyd-client部署爬虫项目
    命令参考:https://github.com/scrapy/scrapyd-clienthttps://scrapyd.readthedocs.io安装组件pipinstallscrapydpipinstallscrapyd-clientScrapyd服务器端服务器端是可以有多个的,你爱多少个就搞多少个服务器端。部署scrapy时,可以手动指定部署到那个目标服务器端中。scr......