feat(attach): avoid creation of volumeattachment object

k8s is very slow in attaching the volumes when dealing with the
large number of volume attachment object.

(k8s issue https://github.com/kubernetes/kubernetes/issues/84169)

The volumeattachment is not required for ZFSPV, so avoid creation
of attachment object, also removed the csi-attacher container as
this is also not needed as it acts on volumeattachment object.

k8s is very slow in attaching the volumes when dealing with the
large number of volume attachment object :

k8s issue https://github.com/kubernetes/kubernetes/issues/84169).

Volumeattachment is a CR created just to tell the watcher of it
which is csi-attacher, that it has to call the Controller Publish/Unpublish grpc.
Which does all the tasks to attach the volumes to a node for example call to the
DigitalOcean Block Storage API service to attach a created volume to a specified node.
Since for ZFSPV, volume is already present locally, nothing needs to done in Controller
Publish/Unpublish, so it is good to remove them.

so avoiding creation of attachment object in this change, also removed the csi-attacher
container as this is also not needed as it acts on volumeattachment object.

Removed csi-cluster-driver-registrar container also as it is deprecated and not needed anymore.

We are using csidriver beta CRDs so minimum k8s version required is 1.14+.

Signed-off-by: Pawan <pawan@mayadata.io>
This commit is contained in:
Pawan 2020-04-17 13:59:28 +05:30 committed by Kiran Mova
parent 95230b5434
commit 6c410553d2
5 changed files with 121 additions and 219 deletions

View file

@ -1,6 +1,19 @@
--- ---
# Create the CSI Driver object
apiVersion: storage.k8s.io/v1beta1
kind: CSIDriver
metadata:
name: zfs.csi.openebs.io
spec:
# do not require volumeattachment
attachRequired: false
podInfoOnMount: false
volumeLifecycleModes:
- Persistent
---
apiVersion: apiextensions.k8s.io/v1beta1 apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
@ -463,6 +476,9 @@ rules:
- apiGroups: ["coordination.k8s.io"] - apiGroups: ["coordination.k8s.io"]
resources: ["leases"] resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"] verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["*"] - apiGroups: ["*"]
resources: ["zfsvolumes", "zfssnapshots"] resources: ["zfsvolumes", "zfssnapshots"]
verbs: ["*"] verbs: ["*"]
@ -543,7 +559,7 @@ spec:
image: quay.io/k8scsi/snapshot-controller:v2.0.1 image: quay.io/k8scsi/snapshot-controller:v2.0.1
args: args:
- "--v=5" - "--v=5"
- "--leader-election=false" - "--leader-election=true"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
- name: csi-provisioner - name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v1.5.0 image: quay.io/k8scsi/csi-provisioner:v1.5.0
@ -561,31 +577,6 @@ spec:
volumeMounts: volumeMounts:
- name: socket-dir - name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/ mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v2.0.0
imagePullPolicy: IfNotPresent
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-cluster-driver-registrar
image: quay.io/k8scsi/csi-cluster-driver-registrar:v1.0.1
args:
- "--v=5"
- "--driver-requires-attachment=false"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: openebs-zfs-plugin - name: openebs-zfs-plugin
image: quay.io/openebs/zfs-driver:ci image: quay.io/openebs/zfs-driver:ci
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
@ -611,45 +602,6 @@ spec:
emptyDir: {} emptyDir: {}
--- ---
############################## CSI- Attacher #######################
# Attacher must be able to work with PVs, nodes and VolumeAttachments
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-zfs-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments", "csinodes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-zfs-attacher-binding
subjects:
- kind: ServiceAccount
name: openebs-zfs-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: openebs-zfs-attacher-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -705,32 +657,6 @@ roleRef:
--- ---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-zfs-cluster-driver-registrar-role
rules:
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csidrivers"]
verbs: ["create", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-zfs-cluster-driver-registrar-binding
subjects:
- kind: ServiceAccount
name: openebs-zfs-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: openebs-zfs-cluster-driver-registrar-role
apiGroup: rbac.authorization.k8s.io
---
######################################## ########################################
########### ############ ########### ############
########### Node plugin ############ ########### Node plugin ############

View file

@ -458,6 +458,19 @@ status:
--- ---
# Create the CSI Driver object
apiVersion: storage.k8s.io/v1beta1
kind: CSIDriver
metadata:
name: zfs.csi.openebs.io
spec:
# do not require volumeattachment
attachRequired: false
podInfoOnMount: false
volumeLifecycleModes:
- Persistent
---
apiVersion: apiextensions.k8s.io/v1beta1 apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
@ -920,6 +933,9 @@ rules:
- apiGroups: ["coordination.k8s.io"] - apiGroups: ["coordination.k8s.io"]
resources: ["leases"] resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"] verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["*"] - apiGroups: ["*"]
resources: ["zfsvolumes", "zfssnapshots"] resources: ["zfsvolumes", "zfssnapshots"]
verbs: ["*"] verbs: ["*"]
@ -1000,7 +1016,7 @@ spec:
image: quay.io/k8scsi/snapshot-controller:v2.0.1 image: quay.io/k8scsi/snapshot-controller:v2.0.1
args: args:
- "--v=5" - "--v=5"
- "--leader-election=false" - "--leader-election=true"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
- name: csi-provisioner - name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v1.5.0 image: quay.io/k8scsi/csi-provisioner:v1.5.0
@ -1018,31 +1034,6 @@ spec:
volumeMounts: volumeMounts:
- name: socket-dir - name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/ mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v2.0.0
imagePullPolicy: IfNotPresent
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-cluster-driver-registrar
image: quay.io/k8scsi/csi-cluster-driver-registrar:v1.0.1
args:
- "--v=5"
- "--driver-requires-attachment=false"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: openebs-zfs-plugin - name: openebs-zfs-plugin
image: quay.io/openebs/zfs-driver:ci image: quay.io/openebs/zfs-driver:ci
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
@ -1068,45 +1059,6 @@ spec:
emptyDir: {} emptyDir: {}
--- ---
############################## CSI- Attacher #######################
# Attacher must be able to work with PVs, nodes and VolumeAttachments
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-zfs-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments", "csinodes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-zfs-attacher-binding
subjects:
- kind: ServiceAccount
name: openebs-zfs-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: openebs-zfs-attacher-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -1162,32 +1114,6 @@ roleRef:
--- ---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-zfs-cluster-driver-registrar-role
rules:
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csidrivers"]
verbs: ["create", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-zfs-cluster-driver-registrar-binding
subjects:
- kind: ServiceAccount
name: openebs-zfs-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: openebs-zfs-cluster-driver-registrar-role
apiGroup: rbac.authorization.k8s.io
---
######################################## ########################################
########### ############ ########### ############
########### Node plugin ############ ########### Node plugin ############

View file

@ -1,4 +1,4 @@
From zfs-driver:v0.6 version ZFS-LocalPV related CRs are now grouped together in its own group called `zfs.openebs.io`. Here steps are mentioned for how to upgrade for refactoring the CRDs. From zfs-driver:v0.6 version ZFS-LocalPV related CRs are now grouped together in its own group called `zfs.openebs.io`. Here steps are mentioned for how to upgrade for refactoring the CRDs. Please do not provision/deprovision any volume during the upgrade.
steps to upgrade:- steps to upgrade:-
@ -21,7 +21,9 @@ zfsvolume.zfs.openebs.io/pvc-82368c44-eee8-47ee-85a6-633a8023faa8 created
zfssnapshot.zfs.openebs.io/snapshot-dc61a056-f495-482b-8e6e-e7ddc4c13f47 created zfssnapshot.zfs.openebs.io/snapshot-dc61a056-f495-482b-8e6e-e7ddc4c13f47 created
zfssnapshot.zfs.openebs.io/snapshot-f9db91ea-529e-4dac-b2b8-ead045c612da created zfssnapshot.zfs.openebs.io/snapshot-f9db91ea-529e-4dac-b2b8-ead045c612da created
``` ```
` Please note that if you have modified the OPENEBS_NAMESPACE env in the driver's deployment to other namespace. Then you have to pass the namespace as an argument to the upgrade.sh script `sh upgrade/upgrash.sh [namespace]`.
3. upgrade the driver to v0.6 3. upgrade the driver to v0.6
``` ```
@ -56,3 +58,8 @@ zfssnapshot.openebs.io "snapshot-dc61a056-f495-482b-8e6e-e7ddc4c13f47" deleted
zfssnapshot.openebs.io "snapshot-f9db91ea-529e-4dac-b2b8-ead045c612da" deleted zfssnapshot.openebs.io "snapshot-f9db91ea-529e-4dac-b2b8-ead045c612da" deleted
customresourcedefinition.apiextensions.k8s.io "zfssnapshots.openebs.io" deleted customresourcedefinition.apiextensions.k8s.io "zfssnapshots.openebs.io" deleted
``` ```
Please note that if you have modified the OPENEBS_NAMESPACE env in the driver's deployment to other namespace. Then you have to pass the namespace as an argument to the cleanup.sh script `sh upgrade/cleanup.sh [namespace]`.
5. restart kube-controller [optional]
kube-controller-manager might be using stale volumeattachment resources, it might get flooded with the error logs. Restarting kube-controller will fix it.

View file

@ -2,7 +2,19 @@
set -e set -e
kubectl get zfsvolumes.openebs.io -n openebs -oyaml > volumes.yaml if [ -z $1 ]; then
# default namespace is openebs when all the custom resources are created
ZFSPV_NAMESPACE="openebs"
else
ZFSPV_NAMESPACE=$1
fi
echo "Fetching ZFS Volumes"
numVol=`kubectl get zfsvolumes.openebs.io --no-headers -n $ZFSPV_NAMESPACE | wc -l`
if [ $numVol -gt 0 ]; then
echo "Cleaning the ZFS Volumes($numVol)"
kubectl get zfsvolumes.openebs.io -n $ZFSPV_NAMESPACE -oyaml > volumes.yaml
# remove the finalizer from the old CR # remove the finalizer from the old CR
sed -i "/zfs.openebs.io\/finalizer/d" volumes.yaml sed -i "/zfs.openebs.io\/finalizer/d" volumes.yaml
@ -10,12 +22,25 @@ kubectl apply -f volumes.yaml
# delete the old CR # delete the old CR
kubectl delete -f volumes.yaml kubectl delete -f volumes.yaml
fi
# delete the CRD definition # delete the ZFSVolume CRD definition
kubectl delete crd zfsvolumes.openebs.io kubectl delete crd zfsvolumes.openebs.io
numAttach=`kubectl get volumeattachment --no-headers | grep zfs.csi.openebs.io | wc -l`
kubectl get zfssnapshots.openebs.io -n openebs -oyaml > snapshots.yaml if [ $numAttach -gt 0 ]; then
echo "Cleaning the volumeattachment($numAttach)"
# delete the volumeattachment object
kubectl delete volumeattachment --all
fi
echo "Fetching ZFS Snapshots"
numSnap=`kubectl get zfssnapshots.openebs.io --no-headers -n $ZFSPV_NAMESPACE | wc -l`
if [ $numSnap -gt 0 ]; then
echo "Cleaning the ZFS Snapshot($numSnap)"
kubectl get zfssnapshots.openebs.io -n $ZFSPV_NAMESPACE -oyaml > snapshots.yaml
# remove the finalizer from the old CR # remove the finalizer from the old CR
sed -i "/zfs.openebs.io\/finalizer/d" snapshots.yaml sed -i "/zfs.openebs.io\/finalizer/d" snapshots.yaml
@ -23,6 +48,7 @@ kubectl apply -f snapshots.yaml
# delete the old CR # delete the old CR
kubectl delete -f snapshots.yaml kubectl delete -f snapshots.yaml
fi
# delete the CRD definition # delete the ZFSSnapshot CRD definition
kubectl delete crd zfssnapshots.openebs.io kubectl delete crd zfssnapshots.openebs.io

View file

@ -1,11 +1,23 @@
#!/bin/bash #!/bin/bash
# do not provision/deprovision anything while running the upgrade script.
set -e set -e
if [ -z $1 ]; then
# default namespace is openebs when all the custom resources are created
ZFSPV_NAMESPACE="openebs"
else
ZFSPV_NAMESPACE=$1
fi
echo "Fetching ZFS Volumes"
numVol=`kubectl get zfsvolumes.openebs.io --no-headers -n $ZFSPV_NAMESPACE | wc -l`
if [ $numVol -gt 0 ]; then
# ZFSVolumes: create the new CR with apiVersion as zfs.openebs.io and kind as Volume # ZFSVolumes: create the new CR with apiVersion as zfs.openebs.io and kind as Volume
kubectl get zfsvolumes.openebs.io -n openebs -oyaml > volumes.yaml kubectl get zfsvolumes.openebs.io -n $ZFSPV_NAMESPACE -oyaml > volumes.yaml
# update the group name to zfs.openebs.io # update the group name to zfs.openebs.io
sed -i "s/apiVersion: openebs.io/apiVersion: zfs.openebs.io/g" volumes.yaml sed -i "s/apiVersion: openebs.io/apiVersion: zfs.openebs.io/g" volumes.yaml
@ -13,11 +25,15 @@ sed -i "s/apiVersion: openebs.io/apiVersion: zfs.openebs.io/g" volumes.yaml
kubectl apply -f volumes.yaml kubectl apply -f volumes.yaml
rm volumes.yaml rm volumes.yaml
fi
echo "Fetching ZFS Snapshots"
numSnap=`kubectl get zfssnapshots.openebs.io --no-headers -n $ZFSPV_NAMESPACE | wc -l`
if [ $numSnap -gt 0 ]; then
# ZFSSnapshots: create the new CR with apiVersion as zfs.openebs.io and kind as Snapshot # ZFSSnapshots: create the new CR with apiVersion as zfs.openebs.io and kind as Snapshot
kubectl get zfssnapshots.openebs.io -n openebs -oyaml > snapshots.yaml kubectl get zfssnapshots.openebs.io -n $ZFSPV_NAMESPACE -oyaml > snapshots.yaml
# update the group name to zfs.openebs.io # update the group name to zfs.openebs.io
@ -26,3 +42,4 @@ sed -i "s/apiVersion: openebs.io/apiVersion: zfs.openebs.io/g" snapshots.yaml
kubectl apply -f snapshots.yaml kubectl apply -f snapshots.yaml
rm snapshots.yaml rm snapshots.yaml
fi