1.NFS-PV-PVC 192.168.56.12上面跑着
[root@k8s-master pv]# cat pv.yaml apiVersion: v1 kind: PersistentVolume metadata: name: nfs-pv spec: capacity: storage: 5Gi accessModes: - ReadWriteMany persistentVolumeReclaimPolicy: Retain nfs: path: /opt/data server: 192.168.56.12 [root@k8s-master pv]# [root@k8s-master pv]# cat pvc.yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: name: pvc001 spec: accessModes: - ReadWriteMany resources: requests: storage: 5Gi [root@k8s-master pv]# [root@k8s-master pv]# cat pod.yaml apiVersion: v1 kind: Pod metadata: name: mypod spec: containers: - name: nginx image: nginx:latest volumeMounts: - mountPath: "/usr/share/nginx/html" name: wwwroot volumes: - name: wwwroot persistentVolumeClaim: claimName: pvc001 [root@k8s-master pv]# kubectl get pod -o wide mypod 1/1 Running 0 22m 172.17.42.9 192.168.56.11 [root@k8s-node1 data]# curl 172.17.42.9 nfs-pvc [root@k8s-node1 data]# 查看nfs /opt/data下面文件 [root@k8s-node2 data]# pwd /opt/data [root@k8s-node2 data]# cat index.html nfs-pvc [root@k8s-node2 data]#
2.glusterfs-PV-PVC 192.168.56.11和192.168.56.12上面跑着
[root@k8s-master gl]# cat pv.yaml apiVersion: v1 kind: PersistentVolume metadata: name: gluster-pv spec: capacity: storage: 5Gi accessModes: - ReadWriteMany glusterfs: endpoints: "glusterfs-cluster" ####需要提前创建 16章节已经创建 path: "nginx-volume" ############此处是gluster volume的名称 readOnly: false [root@k8s-master gl]# [root@k8s-master gl]# cat pvc.yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: name: pvc001 spec: accessModes: - ReadWriteMany resources: requests: storage: 5Gi [root@k8s-master gl]# [root@k8s-master gl]# cat pod.yaml apiVersion: v1 kind: Pod metadata: name: mypod spec: containers: - name: nginx image: nginx:latest volumeMounts: - mountPath: "/usr/share/nginx/html" name: wwwroot volumes: - name: wwwroot persistentVolumeClaim: claimName: pvc001 [root@k8s-master gl]# [root@k8s-node1 data]# curl 172.17.81.2 gluster-pvc [root@k8s-node1 data]# [root@k8s-node2 data]# cat index.html gluster-pvc [root@k8s-node2 data]#