赞
踩
nfs--server | nfs-client |
---|---|
k8s-master | k8s-node1、k8s-node2 |
[root@k8s-master ~] # yum install -y nfs-common nfs-utils
[root@k8s-master ~]# mkdir /nfsdata
[root@k8s-master ~]# chmod 666 /nfsdata
[root@k8s-master ~] # vim /etc/exports[root@k8s-master ~] # cat /etc/exports/nfsdata *(rw,no_root_squash,no_all_squash,sync)
[root@k8s-master ~] # systemctl start rpcbind[root@k8s-master ~] # systemctl start nfs
[root@k8s-node2 ~] # mkdir /test[root@k8s-node2 ~] # mount -t nfs 192.168.22.139:/nfsdata /test/ #nfs-server的IP[root@k8s-node2 ~] # df -Th|grep "/test"192.168.22.139: /nfsdata nfs4 19G 9 .9G 9 .0G 53 % /test[root@k8s-node2 ~] # touch /test/ip.txt[root@k8s-node2 ~] # ls /test/ip.txt
[root@k8s-master ~] # ls /nfsdata/ip.txt[root@k8s-node2 ~] # umount /test #测试完成之后,就可以卸载了
- [root@k8s-master ~]# vim nfs-pv1.yaml
- [root@k8s-master ~]# cat nfs-pv1.yaml
- apiVersion: v1
- kind: PersistentVolume
- metadata:
- name: mypv1
- spec:
- capacity: #指定PV的容量
- storage: 1Gi
- accessModes: #指定访问模式
- - ReadWriteOnce #指PV能以read-write模式mount到单个节点
- persistentVolumeReclaimPolicy: Recycle #指定当前PV的回收策略为Recycle,清除PV中的数据
- storageClassName: nfs #指定PV的class为nfs;相当于为PV设置了一个分类
- nfs:
- path: /nfsdata
- server: 192.168.22.139 #指定nfs目录所在的机器的地址
PS:
1)accessModes 指定访问模式为 ReadWriteOnce ,⽀持的访问模式有:
- [root@k8s-master ~]# kubectl apply -f nfs-pv1.yaml
- persistentvolume/mypv1 created
- [root@k8s-master ~]# kubectl get pv
- NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
- mypv1 1Gi RWO Recycle Available nfs 8s
- # STATUS为Available,表示mypv1准备就绪,可以被PVC申请
- [root@k8s-master ~]# vim nfs-pvc1.yaml
- [root@k8s-master ~]# cat nfs-pvc1.yaml
- apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: mypvc1
- spec:
- accessModes: #指定访问模式
- - ReadWriteOnce
- resources:
- requests:
- storage: 1Gi #指定访问PV的容量
- storageClassName: nfs #指定访问PV的class
- [root@k8s-master ~]# kubectl apply -f nfs-pvc1.yaml
- persistentvolumeclaim/mypvc1 created
- [root@k8s-master ~]# kubectl get pvc
- NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
- mypvc1 Bound mypv1 1Gi RWO nfs 6s
- [root@k8s-master ~]# kubectl get pv
- NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
- mypv1 1Gi RWO Recycle Bound default/mypvc1 nfs 12m
-
- #从查询pv和pvc的结果来看,mypvc1已经Bound到mypv1,申请成功
- [root@k8s-master ~]# vim pod1.yaml
- [root@k8s-master ~]# cat pod1.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: nfs-pod-nginx
- labels:
- app: nginx
- spec:
- containers:
- - name: mypod1
- image: daocloud.io/library/nginx
- ports:
- - containerPort: 80
- volumeMounts:
- - mountPath: "/usr/share/nginx/html"
- name: mydata
- volumes:
- - name: mydata
- persistentVolumeClaim:
- claimName: mypvc1
- [root@k8s-master ~]# kubectl apply -f pod1.yaml
- pod/nfs-pod-nginx created
- [root@k8s-master ~]# kubectl exec -it nfs-pod-nginx /bin/bash
- kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
- root@nfs-pod-nginx:/# ls /usr/share/nginx/html/
- ip.txt #上述/nfsdata中的文件
- root@nfs-pod-nginx:/# echo "hello!" > /usr/share/nginx/html/index.html
- root@nfs-pod-nginx:/# exit
- exit
- command terminated with exit code 130
- [root@k8s-master ~]# ls /nfsdata/ #也可在nfs的共享⽬录中查看到,说明卷共享成功
- index.html ip.txt
- [root@k8s-master ~]# cat /nfsdata/index.html
- hello!
- [root@k8s-master ~]# kubectl delete pod nfs-pod-nginx
- pod "nfs-pod-nginx" deleted
- [root@k8s-master ~]# kubectl delete pvc mypvc1
- persistentvolumeclaim "mypvc1" deleted
- [root@k8s-master ~]# kubectl get pv
- NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
- mypv1 1Gi RWO Retain Released default/mypvc1 nfs 98m
- # 虽然 mypv1 中的数据得到了保留,但其 PV 状态会⼀直处于 Released ,不能被其他PVC申请;
- # 为了重新使⽤存储资源,可以删除并重新创建mypv1;删除操作只是删除了PV对象,存储空间中的数据并不会被删除
- [root@k8s-master ~]# kubectl delete pv mypv1
- persistentvolume "mypv1" deleted
- [root@k8s-master ~]# ls /nfsdata/index.html
- /nfsdata/index.html
- [root@k8s-master ~]# cat /nfsdata/index.html
- hello!
- [root@k8s-master mysqlpv]# vim mysql-pv.yaml
- [root@k8s-master mysqlpv]# cat mysql-pv.yaml
- apiVersion: v1
- kind: PersistentVolume
- metadata:
- name: mysql-pv
- spec:
- capacity:
- storage: 1Gi
- accessModes:
- - ReadWriteOnce
- persistentVolumeReclaimPolicy: Retain
- storageClassName: nfs
- nfs:
- path: /nfsdata/mysql-pv #记得创建这个目录
- server: 192.168.22.139
- [root@k8s-master mysqlpv]# kubectl apply -f mysql-pv.yaml
- persistentvolume/mysql-pv created
- [root@k8s-master mysqlpv]# vim mysql-pvc.yaml
- [root@k8s-master mysqlpv]# cat mysql-pvc.yaml
- apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: mysql-pvc
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 1Gi
- storageClassName: nfs
- [root@k8s-master mysqlpv]# kubectl apply -f mysql-pvc.yaml
- persistentvolumeclaim/mysql-pvc created
编写mysql-pod.yaml文件
- [root@k8s-master mysqlpv]# vim mysql-pod.yaml
- [root@k8s-master mysqlpv]# cat mysql-pod.yaml
- apiVersion: v1
- kind: Service
- metadata:
- name: mysql
- spec:
- ports:
- - port: 3306
- targetPort: 3306
- selector:
- app: mysql
- ---
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: mysql
- spec:
- selector:
- matchLabels:
- app: mysql
- template:
- metadata:
- labels:
- app: mysql
- spec:
- containers:
- - image: daocloud.io/library/mysql:5.7.5-m15
- name: mysql
- env:
- - name: MYSQL_ROOT_PASSWORD
- value: qinxue@123
- ports:
- - containerPort: 3306
- name: mysql
- volumeMounts:
- - name: mysql-persistent-storage
- mountPath: /var/lib/mysql
- volumes:
- - name: mysql-persistent-storage
- persistentVolumeClaim:
- claimName: mysql-pvc
- [root@k8s-master mysqlpv]# kubectl apply -f mysql-pod.yaml
- service/mysql created
- deployment.apps/mysql created
- [root@k8s-master mysqlpv]# kubectl get pod -o wide | grep mysql
- mysql 1/1 Running 6 (3h27m ago) 13d 10.244.1.45 k8s-node1 <none> <none>
- mysql-55c4f546d-4nkt9 1/1 Running 0 43s 10.244.2.52 k8s-node2 <none> <none>
- [root@k8s-master mysqlpv]# kubectl exec -it mysql-bd87b4f8f-l6tdx /bin/bash
- kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
- root@mysql-bd87b4f8f-l6tdx:/# mysql -uroot -p‘qinxue@123’
- mysql> create database db1
- Query OK, 1 row affected (0.00 sec)
1)删除deployment,pvc,pv;然后重新创建pv,pvc,deployment;数据在Mysql中,仍然挂载成功;
- [root@k8s-master pv-pvc]# vim storageclass-nfs.yaml
- [root@k8s-master pv-pvc]# cat storageclass-nfs.yaml
- apiVersion: storage.k8s.io/v1
- kind: StorageClass
- metadata:
- name: managed-nfs-storage
- provisioner: fuseim.pri/ifs
- [root@k8s-master pv-pvc]# kubectl apply -f storageclass-nfs.yaml
- storageclass.storage.k8s.io/managed-nfs-storage created
- [root@k8s-master pv-pvc]# kubectl get sc
- NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
- managed-nfs-storage fuseim.pri/ifs Delete Immediate false 20s
1)因为storage⾃动创建pv需要经过kube-apiserver,所以要进⾏授权
2)创建1个serviceaccount;创建1个clusterrole,并赋予应该具有的权限,⽐如对于⼀些基本api资源的增删改查; 创建1个clusterrolebinding,将sa和clusterrole绑定到⼀起;这样sa就有权限了;然后pod中再使⽤这个sa,那么pod再创建的时候,会⽤到sa,sa具有创建pv的权限,便可以⾃动创建pv;
- [root@k8s-master pv-pvc]# vim rabc.yaml
- [root@k8s-master pv-pvc]# cat rabc.yaml
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: nfs-client-provisioner
- ---
- kind: ClusterRole
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: nfs-client-provisioner-runner
- rules:
- - apiGroups: [""]
- resources: ["persistentvolumes"]
- verbs: ["get", "list", "watch", "create", "delete"]
- - apiGroups: [""]
- resources: ["persistentvolumeclaims"]
- verbs: ["get", "list", "watch", "update"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["storageclasses"]
- verbs: ["get", "list", "watch"]
- - apiGroups: [""]
- resources: ["events"]
- verbs: ["list", "watch", "create", "update", "patch"]
- ---
- kind: ClusterRoleBinding
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: run-nfs-client-provisioner
- subjects:
- - kind: ServiceAccount
- name: nfs-client-provisioner
- namespace: default
- roleRef:
- kind: ClusterRole
- name: nfs-client-provisioner-runner
- apiGroup: rbac.authorization.k8s.io
- [root@k8s-master pv-pvc]# kubectl apply -f rabc.yaml
- serviceaccount/nfs-client-provisioner created
- clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
- clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
- [root@k8s-master pv-pvc]# kubectl get sa
- NAME SECRETS AGE
- default 0 14d
- nfs-client-provisioner 0 14s
- [root@k8s-master pv-pvc]# kubectl get cr |grep nfs
- error: the server doesn't have a resource type "cr"
- [root@k8s-master pv-pvc]# kubectl get clusterrole |grep nfs
- nfs-client-provisioner-runner 2024-08-06T04:52:23Z
- [root@k8s-master pv-pvc]# kubectl get clusterrolebinding |grep nfs
- run-nfs-client-provisioner ClusterRole/nfs-client-provisioner-runner 3m58s
- [root@k8s-master pv-pvc]# vim deployment-nfs.yaml
- [root@k8s-master pv-pvc]# cat deployment-nfs.yaml
- kind: Deployment
- apiVersion: apps/v1
- metadata:
- name: nfs-client-provisioner
- spec:
- selector:
- matchLabels:
- app: nfs-client-provisioner
- replicas: 1
- strategy:
- type: Recreate
- template:
- metadata:
- labels:
- app: nfs-client-provisioner
- spec:
- nodeName: k8s-node2
- serviceAccount: nfs-client-provisioner
- containers:
- - name: nfs-client-provisioner
- image: quay.io/external_storage/nfs-client-provisioner:latest
- volumeMounts:
- - name: nfs-client-root
- mountPath: /persistentvolumes
- env:
- - name: PROVISIONER_NAME
- value: fuseim.pri/ifs
- - name: NFS_SERVER
- value: 192.168.22.139
- - name: NFS_PATH
- value: /opt/container_data
- volumes:
- - name: nfs-client-root
- nfs:
- server: 192.168.22.139
- path: /opt/container_data
- [root@k8s-master pv-pvc]# kubectl apply -f deployment-nfs.yaml
- deployment.apps/nfs-client-provisioner created
- # nfs-client-provisioner 会以pod运⾏在k8s中
- [root@k8s-master pv-pvc]# kubectl get pod |grep nfs
- nfs-client-provisioner-6c745f9d9-msrtp 1/1 Running 0 6s
部署yaml⽂件参考:https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/
这里部署nginx服务
- [root@k8s-master pv-pvc]# cat nginx.yaml
- apiVersion: v1
- kind: Service
- metadata:
- name: nginx
- labels:
- app: nginx
- spec:
- ports:
- - port: 80
- name: web
- clusterIP: None
- selector:
- app: nginx
- ---
- apiVersion: apps/v1
- kind: StatefulSet
- metadata:
- name: web
- spec:
- serviceName: "nginx"
- replicas: 2
- selector:
- matchLabels:
- app: nginx
- template:
- metadata:
- labels:
- app: nginx
- spec:
- containers:
- - name: nginx
- image: daocloud.io/library/nginx:1.13.0-alpine
- ports:
- - containerPort: 80
- name: web
- volumeMounts:
- - name: www
- mountPath: /usr/share/nginx/html
- volumeClaimTemplates:
- - metadata:
- name: www
- spec:
- accessModes: [ "ReadWriteOnce" ]
- storageClassName: "managed-nfs-storage"
- resources:
- requests:
- storage: 1Gi
- [root@k8s-master pv-pvc]# kubectl apply -f nginx.yaml
- service/nginx created
- statefulset.apps/web created
- [root@k8s-master pv-pvc]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- configmap-pod 1/1 Running 5 (52m ago) 12d
- configmap-test-pod 1/1 Running 5 (52m ago) 12d
- mypod 1/1 Running 7 (52m ago) 13d
- mysql 1/1 Running 7 (52m ago) 13d
- nfs-client-provisioner-6c745f9d9-msrtp 1/1 Running 0 19m
- tomcat 1/1 Running 7 (52m ago) 14d
- web-0 1/1 Running 0 42s
- web-1 1/1 Running 0 16s
- # web-0创建成功后才会创建web-1
2)进入容器内在/usr/share/nginx/html目录下创建文件验证;删除一个pod后,数据仍然存在,不会丢失;
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。