一、动态供给存储介绍
二、部署步骤
kind: Deployment apiVersion: extensions/v1beta1 metadata: name: nfs-client-provisioner spec: replicas: 1 strategy: type: Recreate template: metadata: labels: app: nfs-client-provisioner spec: serviceAccountName: nfs-client-provisioner containers: - name: nfs-client-provisioner image: quay.io/external_storage/nfs-client-provisioner:latest volumeMounts: - name: nfs-client-root mountPath: /persistentvolumes env: - name: PROVISIONER_NAME value: fuseim.pri/ifs - name: NFS_SERVER value: *-*.cn-beijing.nas.aliyuncs.com - name: NFS_PATH value: /pods-volumes volumes: - name: nfs-client-root nfs: server: *-*-beijing.nas.aliyuncs.com path: /pods-volumes
# kubectl apply -f nfs-client-provisioner-deploy.yaml
2、创建SA以及RBAC授权
# vim nfs-client-rbac.yamlapiVersion: v1 kind: ServiceAccount metadata: name: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: nfs-client-provisioner-runner rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: run-nfs-client-provisioner subjects: - kind: ServiceAccount name: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default roleRef: kind: ClusterRole name: nfs-client-provisioner-runner apiGroup: rbac.authorization.k8s.io --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default subjects: - kind: ServiceAccount name: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default roleRef: kind: Role name: leader-locking-nfs-client-provisioner apiGroup: rbac.authorization.k8s.io
# kubectl apply -f nfs-client-rbac.yaml
3、创建存储类
# vim nfs-sotrage-class.yaml
apiVersion: storage.k8s.io/v1 #allowVolumeExpansion: true 开启允许扩容功能,但是nfs类型不支持 kind: StorageClass metadata: name: yiruike-nfs-storage mountOptions: - vers=4 - minorversion=0 - noresvport provisioner: fuseim.pri/ifs parameters: archiveOnDelete: "false"
设置pointsmart-nfs-storage sc为后端默认存储类:
# kubectl patch storageclass yiruike-nfs-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
[root@master-92 pv-pvc]# kubectl get sc
NAME PROVISIONER AGE
yiruike-nfs-storage(default) fuseim.pri/ifs 48s
三、验证部署结果
1、创建测试PVC文件
# vim test-claim.yaml
kind: PersistentVolumeClaim apiVersion: v1 metadata: name: test-claim annotations: volume.beta.kubernetes.io/storage-class: "yiruike-nfs-storage" spec: accessModes: - ReadWriteMany #persistentVolumeReclaimPolicy: Retain resources: requests: storage: 2Gi
# kubectl apply -f test-claim.yaml
# kubectl get pv,pvc
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE persistentvolume/pvc-* 2Gi RWX Delete Bound default/test-claim yiruike-nfs-storage 1s NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE persistentvolumeclaim/test-claim Bound pvc-2fc935df-62f2-11ea-9e5a-00163e0a8e3e 2Gi RWX yiruike-nfs-storage 5s
2、创建测试POD
启动一个pod测试在test-claim的PV里touch一个SUCCESS文件
# vim test-pod.yaml
kind: Pod apiVersion: v1 metadata: name: test-pod spec: containers: - name: test-pod image: busybox:1.24 command: - "/bin/sh" args: - "-c" - "touch /mnt/SUCCESS && exit 0 || exit 1" volumeMounts: - name: nfs-pvc mountPath: "/mnt" restartPolicy: "Never" volumes: - name: nfs-pvc persistentVolumeClaim: claimName: test-claim
# kubectl apply -f test-pod.yaml
# df -Th | grep aliyun
*-*.cn-beijing.nas.aliyuncs.com:/pods-volumes nfs4 10P 0 10P 0% /data/k8s/k8s/kubelet/pods/77a4ad8b-62e1-11ea-89e3-00163e301bb2/volumes/kubernetes.io~nfs/nfs-client-root
# ls /data/k8s/k8s/kubelet/pods/77a4ad8b-62e1-11ea-89e3-00163e301bb2/volumes/kubernetes.io~nfs/nfs-client-root
default-test-claim-pvc-0b1ce53d-62f4-11ea-9e5a-00163e0a8e3e
# ls /data/k8s/k8s/kubelet/pods/77a4ad8b-62e1-11ea-89e3-00163e301bb2/volumes/kubernetes.io~nfs/nfs-client-root/default-test-claim-pvc-0b1ce53d-62f4-11ea-9e5a-00163e0a8e3e
SUCCESS
由此可见,部署正常,并且可以动态分配NFS的共享卷
3、数据持久化验证
现在我们可以将 test-pod 这个pod删掉,测试数据卷里面的文件会不会消失。
# kubectl delete pod/test-pod
经过查看可以得知,删掉这个pod以后,数据不会丢失,这样我们也就达到了动态的数据持久化