集群环境:
root@master:~# kubectl version
Client Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.10", GitCommit:"7e54d50d3012cf3389e43b096ba35300f36e0817", GitTreeState:"clean", BuildDate:"2022-08-17T18:32:54Z", GoVersion:"go1.17.13", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.10", GitCommit:"7e54d50d3012cf3389e43b096ba35300f36e0817", GitTreeState:"clean", BuildDate:"2022-08-17T18:26:59Z", GoVersion:"go1.17.13", Compiler:"gc", Platform:"linux/amd64"}
系统版本:
root@master:~# lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 20.04.6 LTS
Release: 20.04
Codename: focal
使用场景
若StorageClass设置的回收策略设为"Retain",在PV因PVC被删除而转为"Released"状态时,表明该PV虽闲置但保留数据,且不再直接接受新的PVC绑定。面对这种情况,如何重新利用该PV内储存的数据呢?
处理步骤
1、查看StorageClass的策略是否为"Retain"
root@master:~/qwx/ceph/ceph-rbd-csi# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
csi-rbd-sc rbd.csi.ceph.com Retain Immediate true 141m
2、创建PVC,基于动态存储类创建PV,同时也能将PVC绑定至PV
root@master:~# cat raw-filesystem-pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem #挂载至/路径的方式
resources:
requests:
storage: 10Gi
storageClassName: csi-rbd-sc
root@master:~# kubectl apply -y raw-filesystem-pvc.yaml -n ceph
3、创建Pod并绑定PV
root@master:~# cat ceph-rbd-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ceph-rbd
spec:
replicas: 1
selector:
matchLabels: #rs or deployment
app: testing-rbd
template:
metadata:
labels:
app: testing-rbd
spec:
containers:
- name: testing
image: registry.cn-shanghai.aliyuncs.com/qwx_images/test-tools:v4
imagePullPolicy: IfNotPresent
volumeMounts:
- name: data
mountPath: /mnt
volumes:
- name: data
persistentVolumeClaim:
claimName: data
root@master:~#kubectl apply -f ceph-rbd-demo.yaml -n ceph
4、查看Pod是否runing
root@master:~/qwx/ceph/ceph-rbd-csi/test# kubectl get pod -n ceph
NAME READY STATUS RESTARTS AGE
ceph-rbd-85d8f49b44-48f2j 1/1 Running 0 6m51s
5、对Pod中的PV生成数据
root@master:~#kubectl exec -it ceph-rbd-85d8f49b44-48f2j -n ceph -- echo "123" > /mnt/123.txt && ls /mnt/ && cat /mnt/123.txt
123.txt
123
6、删除PVC和Pod,将PV状态置为"Released"
root@master:~# kubectl delete -f ceph-rbd-demo.yaml -f raw-filesystem-pvc.yaml -n ceph
deployment.apps "ceph-rbd" deleted
persistentvolumeclaim "data" deleted
7、将Released状态的PV设为可用Available
查看PV状态:
root@master:~# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-e416b2f8-5b1d-4dfe-8ddf-3fae847eac0f 10Gi RWO Retain Released ceph/data csi-rbd-sc 154m
修改PV元数据,删除claimRef中的数据:
root@master:~#kubectl edit pv/pvc-e416b2f8-5b1d-4dfe-8ddf-3fae847eac0f
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
name: data
namespace: ceph
resourceVersion: "8168616"
uid: 8c800a94-bacc-4ed8-9152-6d97507e646e
验证PV状态:
root@master:~# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-e416b2f8-5b1d-4dfe-8ddf-3fae847eac0f 10Gi RWO Retain Available csi-rbd-sc 157m
8、重新绑定PV至PVC:
root@master:~# cat raw-static-filesystem-pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
volumeName: pvc-e416b2f8-5b1d-4dfe-8ddf-3fae847eac0f
volumeMode: Filesystem #挂载至/路径的方式
resources:
requests:
storage: 10Gi
storageClassName: csi-rbd-sc
9、验证是否绑定成功:
root@master:~/qwx/ceph/ceph-rbd-csi/test# kubectl get pvc -n ceph
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
data Bound pvc-e416b2f8-5b1d-4dfe-8ddf-3fae847eac0f 10Gi RWO csi-rbd-sc 6s
root@master:~/qwx/ceph/ceph-rbd-csi/test# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-e416b2f8-5b1d-4dfe-8ddf-3fae847eac0f 10Gi RWO Retain Bound ceph/data csi-rbd-sc 163m
10、验证数据是否一致
root@master:~# cat ceph-rbd-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ceph-rbd
spec:
replicas: 1
selector:
matchLabels: #rs or deployment
app: testing-rbd
template:
metadata:
labels:
app: testing-rbd
spec:
containers:
- name: testing
image: registry.cn-shanghai.aliyuncs.com/qwx_images/test-tools:v4
imagePullPolicy: IfNotPresent
volumeMounts:
- name: data
mountPath: /mnt
volumes:
- name: data
persistentVolumeClaim:
claimName: data
root@master:~#kubectl apply -f ceph-rbd-demo.yaml -n ceph
deployment.apps/ceph-rbd created
root@master:~# kubectl get pod -n ceph
NAME READY STATUS RESTARTS AGE
ceph-rbd-85d8f49b44-mm74q 1/1 Running 0 4s
root@master:~# kubectl exec -it ceph-rbd-85d8f49b44-mm74q -n ceph -- ls /mnt/ && cat /mnt/123.txt
123.txt
123
仅登录用户可评论,点击 登录