kubernetes storageclass存储动态生成pv流程:
首先创建storageclass-->pvc请求已经创建的sc,通过sc来自动创建pv-->这样就达到通过storageclass动态生成一个pv的效果了
测试创建的pod直接挂载ceph rbd
# 在master1-admin上操作
scp /etc/yum.repos.d/ceph.repo test-k8s-master1:/etc/yum.repos.d/
scp /etc/yum.repos.d/ceph.repo test-k8s-master2:/etc/yum.repos.d/
scp /etc/yum.repos.d/ceph.repo test-k8s-master3:/etc/yum.repos.d/
# 在k8s各个节点上操作
yum -y install ceph-common
# 在master1-admin上操作
scp -r /etc/ceph test-k8s-master1:/etc/
scp -r /etc/ceph test-k8s-master2:/etc/
scp -r /etc/ceph test-k8s-master3:/etc/
# 测试pod直接挂载ceph的volume,在master1-admin上操作
ceph osd pool create k8spool 256
rbd create rbda -s 1024 -p k8spool
rbd feature disable k8spool/rbda object-map fast-diff deep-flatten
# 测试pod直接挂载创建的rbda
[root@test-k8s-master1 scripts]# cat test.yaml
apiVersion: v1
kind: Pod
metadata:
name: testrbd
spec:
containers:
- image: nginx
name: nginx
volumeMounts:
- name: testrbd
mountPath: /mnt
volumes:
- name: testrbd
rbd:
monitors:
- '192.168.1.167:6789'
pool: k8spool
image: rbda
fsType: xfs
readOnly: false
user: admin
keyring: /etc/ceph/ceph.client.admin.keyring
[root@test-k8s-master1 scripts]# kubectl apply -f test.yaml
基于ceph rbd创建pv,pvc
# 创建ceph-secret这个k8s secret对象,这个secret对象用于k8s volume插件访问ceph集群
# 获取client.admin的keyring值,并用base64编码,在master1-admin上操作
[root@master1-admin firstrbd]# ceph auth get-key client.admin | base64
QVFDZXFWZGovdnJrREJBQW1FRXFSOWI1d2MycE5mUlJPaEF6dWc9PQ==
# 创建ceph的secret
[root@test-k8s-master1 scripts]# cat ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
data:
key: QVFDZXFWZGovdnJrREJBQW1FRXFSOWI1d2MycE5mUlJPaEF6dWc9PQ==
[root@test-k8s-master1 scripts]# kubectl apply -f ceph-secret.yaml
secret/ceph-secret created
# 创建pool池,在master1-admin上操作
ceph osd pool create k8spool2 256
rbd create rbda -s 1024 -p k8spool2
rbd feature disable k8spool2/rbda object-map fast-diff deep-flatten
# 创建pv
[root@test-k8s-master1 scripts]# cat pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: ceph-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
rbd:
monitors:
- 192.168.1.167:6789
pool: k8spool2
image: rbda
user: admin
secretRef:
name: ceph-secret
fsType: xfs
readOnly: false
persistentVolumeReclaimPolicy: Recycle
[root@test-k8s-master1 scripts]# kubectl apply -f pv.yaml
persistentvolume/ceph-pv created
[root@test-k8s-master1 scripts]# cat pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ceph-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
[root@test-k8s-master1 scripts]# kubectl apply -f pvc.yaml
persistentvolumeclaim/ceph-pvc created
[root@test-k8s-master1 scripts]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
ceph-pvc Bound ceph-pv 1Gi RWO
标签:master1,k8s,kubernetes,admin,ceph,etc,test,rbd
From: https://www.cnblogs.com/xone/p/16829539.html