GlusterFS是一个开源的分布式文件系统,具有强大的横向扩展能力,通过扩展能够支持数PB存储容量和处理数千客户端。集成来自多台服务器上的磁盘存储资源到单一全局命名空间,已提供共享文件存储。
GlusterFS 具有高扩展性、高可用性、高性能、可横向扩展等特点,并且其没有元数据服务器的设计,让整个服务没有单点故障的隐患。
- GlusterFS 安装环境要求
- 三个节点(k8s-master03 k8s-node01 k8s-node02)
- 每个节点必须至少连接一个原始块设备(如空的本地磁盘)供 heketi 使用。这些设备上不得有任何数据,因为它们将由 heketi 格式化和分区。简单意思就是需要一个没有数据的空的本地硬盘。
部署GlusterFS
- K8s-master03 k8s-node01 k8s-node02 添加三块50G的硬盘三台机器已经添加了一块硬盘/dev/sdd
[root@k8s-master03 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sdd 8:48 0 50G 0 disk
- 三台节点安装
yum -y install centos-release-gluster7.noarchyum -y install glusterfs-server glusterfs-cli glusterfs-geo-replicationsystemctl enable glusterd && systemctl start glusterd && systemctl status glusterd
- 在master01节点安装hekeit
modprobe dm_snapshot && modprobe dm_mirror && modprobe dm_thin_pool
yum install -y centos-release-gluster
yum install -y heketi heketi-client
- 配置heketi.json
[root@k8s-master01 heketi]# cat heketi.json
{
"_port_comment": "Heketi Server Port Number",
"port": "8080",
"_use_auth": "Enable JWT authorization. Please enable for deployment",
"use_auth": false,
"_jwt": "Private keys for access",
"jwt": {
"_admin": "Admin has access to all APIs",
"admin": {
"key": "admin@com"
},
"_user": "User only has access to volumes endpoint",
"user": {
"key": "user@admin"
}
},
"_glusterfs_comment": "GlusterFS Configuration",
"glusterfs": {
"_executor_comment": [
"Execute plugin. Possible choices: mock, ssh",
"mock: This setting is used for testing and development.",
" It will not send commands to any node.",
"ssh: This setting will notify Heketi to ssh to the nodes.",
" It will need the values in sshexec to be configured.",
"kubernetes: Communicate with GlusterFS containers over",
" Kubernetes exec api."
],
"executor": "ssh",
"_sshexec_comment": "SSH username and private key file information",
"sshexec": {
"keyfile": "/etc/heketi/heketi_key",
"user": "root",
"port": "22",
"fstab": "/etc/fstab"
},
"_kubeexec_comment": "Kubernetes configuration",
"kubeexec": {
"host" :"https://kubernetes.host:8443",
"cert" : "/path/to/crt.file",
"insecure": false,
"user": "kubernetes username",
"password": "password for kubernetes user",
"namespace": "OpenShift project or Kubernetes namespace",
"fstab": "Optional: Specify fstab file on node. Default is etc/fstab"
},
"_db_comment": "Database file name",
"db": "/var/lib/heketi/heketi.db",
"_loglevel_comment": [
"Set log level. Choices are:",
" none, critical, error, warning, info, debug",
"Default is warning"
],
"loglevel" : "debug"
}
}
[root@k8s-master01 heketi]# ssh-keygen -t rsa -q -f etc/heketi/heketi_key -N ""
[root@k8s-master01 heketi]# chown heketi:heketi etc/heketi/heketi_key
[root@k8s-master01 heketi]# ssh-copy-id -i etc/heketi/heketi_key.pub [email protected]
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/etc/heketi/heketi_key.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
Number of key(s) added: 1
Now try logging into the machine, with: "ssh '[email protected]'"
and check to make sure that only the key(s) you wanted were added.
[root@k8s-master01 heketi]# ssh-copy-id -i etc/heketi/heketi_key.pub [email protected]
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/etc/heketi/heketi_key.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
Number of key(s) added: 1
Now try logging into the machine, with: "ssh '[email protected]'"
and check to make sure that only the key(s) you wanted were added.
[root@k8s-master01 heketi]# ssh-copy-id -i etc/heketi/heketi_key.pub [email protected]
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/etc/heketi/heketi_key.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
Number of key(s) added: 1
Now try logging into the machine, with: "ssh '[email protected]'"
and check to make sure that only the key(s) you wanted were added.
[root@k8s-master01 heketi]#systemctl enable heketi
[root@k8s-master01 heketi]# systemctl start heketi
[root@k8s-master01 heketi]# systemctl status heketi
[root@k8s-master01 heketi]# curl http://127.0.0.1:8080/hello
Hello from Heketi
- 设置gfs集群
创建topology.json文件
# vim topology.json
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"192.168.102.73"
],
"storage": [
"192.168.102.73"
]
},
"zone": 1
},
"devices": [
"/dev/sdd"
]
},
{
"node": {
"hostnames": {
"manage": [
"192.168.102.74"
],
"storage": [
"192.168.102.74"
]
},
"zone": 1
},
"devices": [
"/dev/sdd"
]
},
{
"node": {
"hostnames": {
"manage": [
"192.168.102.75"
],
"storage": [
"192.168.102.75"
]
},
"zone": 1
},
"devices": [
"/dev/sdd"
]
}
]
}
]
}
- 初始化heketi
[root@k8s-master01 ~]# heketi-cli --server http://127.0.0.1:8080 --user admin --secret admin@com topology load --json=/root/topology.json
Creating cluster ... ID: 449e2bf90d0035fb773adc14e8fa6c58
Allowing file volumes on cluster.
Allowing block volumes on cluster.
Creating node 192.168.102.73 ... ID: 4a49fa88df44baab4459e81cc99cb9da
Adding device dev/sdd ... OK
Creating node 192.168.102.74 ... ID: 0aa3789e12d2894284051035e675f0b5
Adding device dev/sdd ... OK
Creating node 192.168.102.75 ... ID: 2c609ca994b4b5a1ee925b7184138eb4
Adding device dev/sdd ... OK
[root@k8s-master01 ~]# heketi-cli --user admin --secret admin@com topology info --server http://localhost:8080
Cluster Id: 449e2bf90d0035fb773adc14e8fa6c58
File: true
Block: true
Volumes:
Nodes:
Node Id: 0aa3789e12d2894284051035e675f0b5
State: online
Cluster Id: 449e2bf90d0035fb773adc14e8fa6c58
Zone: 1
Management Hostnames: 192.168.102.74
Storage Hostnames: 192.168.102.74
Devices:
Id:d399d7d2446c2ed0237011387c2f7391 Name:/dev/sdd State:online Size (GiB):49 Used (GiB):0 Free (GiB):49
Bricks:
Node Id: 2c609ca994b4b5a1ee925b7184138eb4
State: online
Cluster Id: 449e2bf90d0035fb773adc14e8fa6c58
Zone: 1
Management Hostnames: 192.168.102.75
Storage Hostnames: 192.168.102.75
Devices:
Id:51b4e5810f2ac199404377132c805666 Name:/dev/sdd State:online Size (GiB):49 Used (GiB):0 Free (GiB):49
Bricks:
Node Id: 4a49fa88df44baab4459e81cc99cb9da
State: online
Cluster Id: 449e2bf90d0035fb773adc14e8fa6c58
Zone: 1
Management Hostnames: 192.168.102.73
Storage Hostnames: 192.168.102.73
Devices:
Id:cc6ab4e8472db465cf59dff58a626015 Name:/dev/sdd State:online Size (GiB):49 Used (GiB):0 Free (GiB):49
Bricks:
[root@k8s-master01 ~]#
- 创建secret和storageclass
#生成secret资源,其中”key”值需要转换为base64编码格式标签:存储,name,kubernetes,mongodb,GlusterFS,StorageClass,io,MONGODB,heketi From: https://blog.51cto.com/u_14966640/5959947
[root@k8s-master01 ~]# echo -n "admin@com"|base64
YWRtaW5AY29t
[root@k8s-master01 ~]# vim heketi-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: heketi-secret
namespace: default
data:
# base64 encoded password. E.g.: echo -n "mypassword" | base64
key: YWRtaW5AY29t
type: kubernetes.io/glusterfs
[root@k8s-master01 ~]# kubectl create -f heketi-secret.yaml
secret/heketi-secret created
#配置storageclass
[root@k8s-master01 ~]# vim gluster-heketi-storageclass.yaml
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: gluster-heketi-storageclass
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Delete
parameters:
resturl: "http://192.168.102.71:8080"
restauthenabled: "true"
restuser: "admin"
secretNamespace: "default"
secretName: "heketi-secret"
volumetype: "replicate:3"
[root@k8s-master01 ~]# kubectl create -f gluster-heketi-storageclass.yaml
[root@k8s-master01 ~]# kubectl describe storageclass gluster-heketi-storageclass
Name: gluster-heketi-storageclass
IsDefaultClass: No
Annotations: <none>
Provisioner: kubernetes.io/glusterfs
Parameters: restauthenabled=true,resturl=http://192.168.102.71:8080,restuser=admin,secretName=heketi-secret,secretNamespace=default,volumetype=replicate:3
AllowVolumeExpansion: <unset>
MountOptions: <none>
ReclaimPolicy: Delete
VolumeBindingMode: Immediate
Events: <none>