在哪里创建网站,godaddy 网站上传,如何查名下是否有注册的公司,外国网站域名在哪查kubernetes从v1.4版本开始引入了一个新的资源对象StorageClass#xff0c;用于标记存储资源的特性和性能。到v1.6版本时#xff0c;StorageClass和动态资源供应的机制得到了完善#xff0c;实现了存储卷的按需创建#xff0c;在共享存储的自动化管理进程中能够实现了重要的…kubernetes从v1.4版本开始引入了一个新的资源对象StorageClass用于标记存储资源的特性和性能。到v1.6版本时StorageClass和动态资源供应的机制得到了完善实现了存储卷的按需创建在共享存储的自动化管理进程中能够实现了重要的一步。
通过StorageClass的定义管理员可以将存储资源定义为某种类别Class正如存储设备对于自身的配置描述Profile例如 “快速存储” “慢速存储” “有数据冗余” 无数据冗余等。用户根据StorageClass的描述就能够直观得知各种存储资源的特性就可以根据应用对存储资源的需求去申请存储资源了。
StorageClass作为对存储资源的抽象定义对用户设置的PVC申请屏蔽后端的细节一方面减轻用户对于存储资源细节的关注另一方面也减轻了管理员手工管理PV的工作由系统自动完成PV的创建和绑定实现了动态的资源供应。使用基于StorageClass的动态资源供应模式将逐步成为云平台的标准存储配置模式
StorageClass的定义主要包括名称、后端存储的提供者(Provisioner)和后端存储的相关参数配置。StorageClass一旦被创建出来将无法修改。如需修改则只能删除原StorageClass的定义重建。
上面文章我们创建的pv和pvc都是静态的简单的来说静态的pv和pvc需要我们手动的创建这种情况很大程度上并不能满足我们的需求比如我们有一个应用需要对存储的并发度要求比较高而另外一个应用对读写速度又要求比较高特别是对于StatefulSet类型的应用。使用静态PV就很不合时了这里就需要使用StorageClass
如果需要使用StorageClass我们就需要安装对应的自动配置程序比如我们这里后端采用的是nfs那么我们就需要使用到一个nfs-client的自动配置程序我们也叫它Provisioner这个程序使用我们已经配置好的nfs服务器来自动创建持久卷也就是自动帮我们创建PV
#提前配置好了nfs
Server: 192.168.21.103
Path: /data/nfs_root首先我们需要配置nfs-client的Deployment文件
apiVersion: apps/v1
kind: Deployment
metadata:name: nfs-client-provisionernamespace: test
spec:replicas: 1selector:matchLabels:app: nfs-client-provisionerstrategy:type: Recreatetemplate:metadata:labels:app: nfs-client-provisionerspec:serviceAccountName: nfs-client-provisionercontainers:- name: nfs-client-provisionerimage: quay.io/external_storage/nfs-client-provisioner:latestvolumeMounts:- name: nfs-client-rootmountPath: /persistentvolumesenv:- name: PROVISIONER_NAMEvalue: fuseim.pri/ifs- name: NFS_SERVERvalue: 192.168.21.103 #nfs server 地址- name: NFS_PATHvalue: /data/nfs_root #nfs共享目录volumes:- name: nfs-client-rootnfs:server: 192.168.21.103path: /data/nfs_root
接下来我们还需要创建一个serveraccount,用于将nfs-client-provisioner中的ServiceAccount绑定到一个nfs-client-provisioner-runner的ClusterRole。而该ClusterRole声明了一些权限其中就包括了对persistentvolumes的增删改查所以我们就可以利用ServiceAccount来自动创建PV
apiVersion: v1
kind: ServiceAccount
metadata:name: nfs-client-provisionernamespace: test---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: nfs-client-provisioner-runnernamespace: test
rules:- apiGroups: []resources: [persistentvolumes]verbs: [get, list, watch, create, delete]- apiGroups: []resources: [persistentvolumeclaims]verbs: [get, list, watch, update]- apiGroups: [storage.k8s.io]resources: [storageclasses]verbs: [get, list, watch]- apiGroups: []resources: [events]verbs: [list, watch, create, update, patch]- apiGroups: []resources: [endpoints]verbs: [create, delete, get, list, watch, patch, update]---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: run-nfs-client-provisionernamespace: test
subjects:- kind: ServiceAccountname: nfs-client-provisionernamespace: test
roleRef:kind: ClusterRolename: nfs-client-provisioner-runnerapiGroup: rbac.authorization.k8s.io
创建storageclass
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:name: managed-nfs-storagenamespace: test
provisioner: fuseim.pri/ifs #fuseim.pri/ifs 需要和deployment保持一致创建
[rootmaster1 value_mount]# kubectl apply -f .
deployment.apps/nfs-client-provisioner created
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
storageclass.storage.k8s.io/managed-nfs-storage created
接下来我们可以查看一下
[rootmaster1 value_mount]# kubectl get pods -n test
NAME READY STATUS RESTARTS AGE
nfs-client-provisioner-6b8d9f996-xmbzt 1/1 Running 0 35s
tomcat-698554b5d9-h9p75 1/1 Running 2 (21d ago) 56d
[rootmaster1 value_mount]# kubectl get sc -n test
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
managed-nfs-storage fuseim.pri/ifs Delete Immediate false 43s
[rootmaster1 value_mount]# kubectl get ServiceAccount -n test
NAME SECRETS AGE
default 1 56d
nfs-client-provisioner 1 72s
接下来我们需要创建一个pvc来引用刚刚创建的storageclass
[rootmaster1 ~]# cat pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:name: test-claimnamespace: testannotations:volume.beta.kubernetes.io/storage-class: managed-nfs-storage #storageclass 名称
spec:accessModes: #访问模式- ReadWriteManyresources:requests:storage: 1024Mi[rootmaster1 ~]# kubectl apply -f pvc.yaml
persistentvolumeclaim/test-claim created
#通过查看可以发现新创建的pvc是Pending的状态不可用
[rootmaster1 ~]# kubectl get pvc -n test
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-claim Pending managed-nfs-storage 15s
通过日志查看新创建的pvc是Pending的状态的具体原因
[rootmaster1 ~]# kubectl get pods -n test
NAME READY STATUS RESTARTS AGE
nfs-client-provisioner-6b8d9f996-xmbzt 1/1 Running 0 3m40s
tomcat-698554b5d9-h9p75 1/1 Running 2 (21d ago) 56d
[rootmaster1 ~]# kubectl logs nfs-client-provisioner-6b8d9f996-xmbzt -n test
I1217 08:43:18.618926 1 leaderelection.go:185] attempting to acquire leader lease test/fuseim.pri-ifs...
E1217 08:43:36.046744 1 event.go:259] Could not construct reference to: v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:, APIVersion:}, ObjectMeta:v1.ObjectMeta{Name:fuseim.pri-ifs, GenerateName:, Namespace:test, SelfLink:, UID:d526eedc-6d03-452a-b246-4bd8b0254af1, ResourceVersion:84178, Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63838397330, loc:(*time.Location)(0x1956800)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string{control-plane.alpha.kubernetes.io/leader:{\holderIdentity\:\nfs-client-provisioner-6b8d9f996-xmbzt_53a33f75-9cb8-11ee-a730-7a51051133c9\,\leaseDurationSeconds\:15,\acquireTime\:\2023-12-17T08:43:36Z\,\renewTime\:\2023-12-17T08:43:36Z\,\leaderTransitions\:2}}, OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:}, Subsets:[]v1.EndpointSubset(nil)} due to: selfLink was empty, cant make reference. Will not report event: Normal LeaderElection nfs-client-provisioner-6b8d9f996-xmbzt_53a33f75-9cb8-11ee-a730-7a51051133c9 became leader
I1217 08:43:36.046837 1 leaderelection.go:194] successfully acquired lease test/fuseim.pri-ifs
I1217 08:43:36.046902 1 controller.go:631] Starting provisioner controller fuseim.pri/ifs_nfs-client-provisioner-6b8d9f996-xmbzt_53a33f75-9cb8-11ee-a730-7a51051133c9!
I1217 08:43:36.147231 1 controller.go:680] Started provisioner controller fuseim.pri/ifs_nfs-client-provisioner-6b8d9f996-xmbzt_53a33f75-9cb8-11ee-a730-7a51051133c9!
I1217 08:45:16.768115 1 controller.go:987] provision test/test-claim class managed-nfs-storage: started
E1217 08:45:16.776910 1 controller.go:1004] provision test/test-claim class managed-nfs-storage: unexpected error getting claim reference: selfLink was empty, cant make reference
通过日志可以看出报了managed-nfs-storage: unexpected error getting claim reference: selfLink was empty的错误由于Kubernetes 1.20禁用了selfLink创建的时候会报错
解决错误的方法使用新的不基于 SelfLink 功能的 provisioner 镜像重新创建 provisioner 容器
[rootmaster1 ~]# kubectl get deployment -n test
NAME READY UP-TO-DATE AVAILABLE AGE
nfs-client-provisioner 1/1 1 1 5m53s
tomcat 1/1 1 1 56d
[rootmaster1 ~]# kubectl edit deployment nfs-client-provisioner -n test
deployment.apps/nfs-client-provisioner edited
将nfs-client-provisioner的pod镜像修改成不基于 SelfLink 功能的的镜像registry.cn-beijing.aliyuncs.com/pylixm/nfs-subdir-external-provisioner:v4.0.0服务重启完以后咱们在重新创建一下pvc验证一下
[rootmaster1 ~]# kubectl delete -f pvc.yaml
persistentvolumeclaim test-claim deleted
[rootmaster1 ~]# kubectl apply -f pvc.yaml
persistentvolumeclaim/test-claim created
在查看新创建的pvc状态就是正常的了
[rootmaster1 ~]# kubectl get pvc -n test
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-claim Bound pvc-4ae8644e-021e-4d38-99e4-76f331810575 1Gi RWX managed-nfs-storage 29s
#这里可以看到自动创建了一个pv
[rootmaster1 ~]# kubectl get pv -n test
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-4ae8644e-021e-4d38-99e4-76f331810575 1Gi RWX Delete Bound test/test-claim managed-nfs-storage 2m4s
pvc-e7d3d926-4fa4-4044-bf2f-30df06eef4aa 8Gi RWO Delete Bound app/data-zookeeper-0 nfs-client 21d
在默认情况下我们创建一个pvc需要有一个状态为空闲的pv然后才会绑定到pvc上。使用storageclass之后我们不需要的手动创建pv只需要创建一个pvcstorageclass会自动将pv给创建并且关联
除了上面使用annotations参数指定storageclass的方式以外还可以通过修改yaml或者或者通过patch的方式将storageclass设置为默认的存储。(如找不到指定的storageclass则会使用默认sc进行创建pv)
通过修改storageclass方式
[rootmaster1 value_mount]# cat nfs-client-provisioner-StorageClass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:name: managed-nfs-storagenamespace: testannotations: #加上这个参数storageclass.kubernetes.io/is-default-class: true #加上这个参数值
provisioner: fuseim.pri/ifs
[rootmaster1 value_mount]# kubectl apply -f nfs-client-provisioner-StorageClass.yaml
storageclass.storage.k8s.io/managed-nfs-storage created
面我们创建了nfs-client以及storageclass和pvc现在我们后端存储已经做好了接下来我们需要创建一个应用进行演示
使用Deployment 引用PVC
[rootmaster1 pods_demo]# cat test-app.yaml
kind: Pod
apiVersion: v1
metadata:name: test-pod #pod的名称namespace: test
spec:containers:- name: test-pod #容器的名称image: busyboximagePullPolicy: IfNotPresentcommand: #容器执行的命令- /bin/shargs:- -c- touch /mnt/SUCCESS sleep 3600 exit 0 || exit 1volumeMounts:- name: nfs-pvc #容器挂载点名称mountPath: /mnt #容器挂载目录restartPolicy: Nevervolumes:- name: nfs-pvc #这里的名称要和上面容器挂载点的名称匹配persistentVolumeClaim: #挂载使用pvcclaimName: test-claim #pvc的名称
[rootmaster1 pods_demo]# kubectl apply -f test-app.yaml
我们在busybox创建一个SUCCESS的文件同时将/mnt目录挂载到pvc中
在nfs存储目录就可以看到这个目录
[rootk8s-node1 nfs_root]# cd test-test-claim-pvc-4ae8644e-021e-4d38-99e4-76f331810575
[rootk8s-node1 test-test-claim-pvc-4ae8644e-021e-4d38-99e4-76f331810575]# ll
total 0
-rw-r--r-- 1 root root 0 Dec 17 04:07 SUCCESS
[rootk8s-node1 test-test-claim-pvc-4ae8644e-021e-4d38-99e4-76f331810575]#