|
2019-05-17
$ helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/ $ helm install nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \ --set nfs.server=x.x.x.x \ --set nfs.path=/nfs/data
apiVersion: v1 kind: ServiceAccount metadata: name: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: nfs-client-provisioner-runner rules: - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: run-nfs-client-provisioner subjects: - kind: ServiceAccount name: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default roleRef: kind: ClusterRole name: nfs-client-provisioner-runner apiGroup: rbac.authorization.k8s.io --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default subjects: - kind: ServiceAccount name: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default roleRef: kind: Role name: leader-locking-nfs-client-provisioner apiGroup: rbac.authorization.k8s.io
然后我们部署下NFS-Subdir-External-Provisioner的deployment
apiVersion: apps/v1 kind: Deployment metadata: name: nfs-client-provisioner labels: app: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default spec: replicas: 1 strategy: type: Recreate selector: matchLabels: app: nfs-client-provisioner template: metadata: labels: app: nfs-client-provisioner spec: serviceAccountName: nfs-client-provisioner containers: - name: nfs-client-provisioner image: ccr.ccs.tencentyun.com/niewx-k8s/nfs-subdir-external-provisioner:v4.0.2 volumeMounts: - name: nfs-client-root mountPath: /persistentvolumes env: - name: PROVISIONER_NAME value: k8s-sigs.io/nfs-subdir-external-provisioner ## Provisioner的名称,以后设置的storageclass要和这个保持一致 - name: NFS_SERVER ## NFS服务器地址,需和valumes参数中配置的保持一致 value: 10.0.8.6 - name: NFS_PATH ## NFS服务器数据存储目录,需和valumes参数中配置的保持一致 value: /nfs/data volumes: - name: nfs-client-root nfs: server: 10.0.8.6 ## NFS服务器地址 path: /nfs/data ## NFS服务器数据存储目录
接下来我们创建下StorageClass
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: nfs-client annotations: storageclass.kubernetes.io/is-default-class: "false" # 是否设置为默认的storageclass provisioner: k8s-sigs.io/nfs-subdir-external-provisioner # or choose another name, must match deployment's env PROVISIONER_NAME' parameters: archiveOnDelete: "false" # 设置为"false"时删除PVC不会保留数据,"true"则保留数据 mountOptions: - hard # 指定为硬挂载方式 - nfsvers=4 # 指定NFS版本,这个需要根据NFS Server版本号设置,可以nfsstat -m查看
首先创建下pvc
kind: PersistentVolumeClaim apiVersion: v1 metadata: name: test-claim spec: storageClassName: nfs-client accessModes: - ReadWriteMany resources: requests: storage: 1Mi
创建deploy挂载这个pvc
apiVersion: apps/v1 kind: Deployment metadata: labels: k8s-app: nfs-volume-test qcloud-app: nfs-volume-test name: nfs-volume-test namespace: default spec: replicas: 1 selector: matchLabels: k8s-app: nfs-volume-test qcloud-app: nfs-volume-test strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 0 type: RollingUpdate template: metadata: labels: k8s-app: nfs-volume-test qcloud-app: nfs-volume-test spec: containers: - image: nginx:latest imagePullPolicy: Always name: nfs-volume-test resources: {} securityContext: privileged: false volumeMounts: - mountPath: /tmp name: vol dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 volumes: - name: vol persistentVolumeClaim: claimName: test-claim
创建StatefulSet通过自动生成pvc的方式挂载下看看
apiVersion: apps/v1 kind: StatefulSet metadata: name: web labels: app: centos qcloud: web spec: selector: matchLabels: app: web replicas: 2 volumeClaimTemplates: - metadata: name: test annotations: volume.beta.kubernetes.io/storage-class: "nfs-client" spec: accessModes: [ "ReadWriteMany" ] resources: requests: storage: 1Mi template: metadata: labels: app: web spec: containers: - name: nginx image: nginx imagePullPolicy: IfNotPresent volumeMounts: - mountPath: "/etc/nginx/conf.d" name: test
创建后,会自动生成2个pvc,然后nfs上也会生成对应的存储目录
编辑:航网科技 来源:腾讯云 本文版权归原作者所有 转载请注明出处
微信扫一扫咨询客服
全国免费服务热线
0755-36300002