diff --git a/deploy/yamls/zfs-driver.yaml b/deploy/yamls/zfs-driver.yaml index 6ffc20e..e5ae634 100644 --- a/deploy/yamls/zfs-driver.yaml +++ b/deploy/yamls/zfs-driver.yaml @@ -1,6 +1,19 @@ --- +# Create the CSI Driver object +apiVersion: storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: zfs.csi.openebs.io +spec: + # do not require volumeattachment + attachRequired: false + podInfoOnMount: false + volumeLifecycleModes: + - Persistent +--- + apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: @@ -463,6 +476,9 @@ rules: - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] - apiGroups: ["*"] resources: ["zfsvolumes", "zfssnapshots"] verbs: ["*"] @@ -543,7 +559,7 @@ spec: image: quay.io/k8scsi/snapshot-controller:v2.0.1 args: - "--v=5" - - "--leader-election=false" + - "--leader-election=true" imagePullPolicy: IfNotPresent - name: csi-provisioner image: quay.io/k8scsi/csi-provisioner:v1.5.0 @@ -561,31 +577,6 @@ spec: volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-attacher - image: quay.io/k8scsi/csi-attacher:v2.0.0 - imagePullPolicy: IfNotPresent - args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-cluster-driver-registrar - image: quay.io/k8scsi/csi-cluster-driver-registrar:v1.0.1 - args: - - "--v=5" - - "--driver-requires-attachment=false" - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: openebs-zfs-plugin image: quay.io/openebs/zfs-driver:ci imagePullPolicy: IfNotPresent @@ -611,45 +602,6 @@ spec: emptyDir: {} --- -############################## CSI- Attacher ####################### -# Attacher must be able to work with PVs, nodes and VolumeAttachments - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: openebs-zfs-attacher-role -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["csi.storage.k8s.io"] - resources: ["csinodeinfos"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments", "csinodes"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: openebs-zfs-attacher-binding -subjects: - - kind: ServiceAccount - name: openebs-zfs-controller-sa - namespace: kube-system -roleRef: - kind: ClusterRole - name: openebs-zfs-attacher-role - apiGroup: rbac.authorization.k8s.io - ---- - kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -705,32 +657,6 @@ roleRef: --- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: openebs-zfs-cluster-driver-registrar-role -rules: - - apiGroups: ["csi.storage.k8s.io"] - resources: ["csidrivers"] - verbs: ["create", "delete"] - ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: openebs-zfs-cluster-driver-registrar-binding -subjects: - - kind: ServiceAccount - name: openebs-zfs-controller-sa - namespace: kube-system -roleRef: - kind: ClusterRole - name: openebs-zfs-cluster-driver-registrar-role - apiGroup: rbac.authorization.k8s.io - ---- - ######################################## ########### ############ ########### Node plugin ############ diff --git a/deploy/zfs-operator.yaml b/deploy/zfs-operator.yaml index 58a3c8f..5828f04 100644 --- a/deploy/zfs-operator.yaml +++ b/deploy/zfs-operator.yaml @@ -458,6 +458,19 @@ status: --- +# Create the CSI Driver object +apiVersion: storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: zfs.csi.openebs.io +spec: + # do not require volumeattachment + attachRequired: false + podInfoOnMount: false + volumeLifecycleModes: + - Persistent +--- + apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: @@ -920,6 +933,9 @@ rules: - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] - apiGroups: ["*"] resources: ["zfsvolumes", "zfssnapshots"] verbs: ["*"] @@ -1000,7 +1016,7 @@ spec: image: quay.io/k8scsi/snapshot-controller:v2.0.1 args: - "--v=5" - - "--leader-election=false" + - "--leader-election=true" imagePullPolicy: IfNotPresent - name: csi-provisioner image: quay.io/k8scsi/csi-provisioner:v1.5.0 @@ -1018,31 +1034,6 @@ spec: volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-attacher - image: quay.io/k8scsi/csi-attacher:v2.0.0 - imagePullPolicy: IfNotPresent - args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-cluster-driver-registrar - image: quay.io/k8scsi/csi-cluster-driver-registrar:v1.0.1 - args: - - "--v=5" - - "--driver-requires-attachment=false" - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: openebs-zfs-plugin image: quay.io/openebs/zfs-driver:ci imagePullPolicy: IfNotPresent @@ -1068,45 +1059,6 @@ spec: emptyDir: {} --- -############################## CSI- Attacher ####################### -# Attacher must be able to work with PVs, nodes and VolumeAttachments - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: openebs-zfs-attacher-role -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["csi.storage.k8s.io"] - resources: ["csinodeinfos"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments", "csinodes"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: openebs-zfs-attacher-binding -subjects: - - kind: ServiceAccount - name: openebs-zfs-controller-sa - namespace: kube-system -roleRef: - kind: ClusterRole - name: openebs-zfs-attacher-role - apiGroup: rbac.authorization.k8s.io - ---- - kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -1162,32 +1114,6 @@ roleRef: --- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: openebs-zfs-cluster-driver-registrar-role -rules: - - apiGroups: ["csi.storage.k8s.io"] - resources: ["csidrivers"] - verbs: ["create", "delete"] - ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: openebs-zfs-cluster-driver-registrar-binding -subjects: - - kind: ServiceAccount - name: openebs-zfs-controller-sa - namespace: kube-system -roleRef: - kind: ClusterRole - name: openebs-zfs-cluster-driver-registrar-role - apiGroup: rbac.authorization.k8s.io - ---- - ######################################## ########### ############ ########### Node plugin ############ diff --git a/upgrade/README.md b/upgrade/README.md index b8da94d..219270f 100644 --- a/upgrade/README.md +++ b/upgrade/README.md @@ -1,4 +1,4 @@ -From zfs-driver:v0.6 version ZFS-LocalPV related CRs are now grouped together in its own group called `zfs.openebs.io`. Here steps are mentioned for how to upgrade for refactoring the CRDs. +From zfs-driver:v0.6 version ZFS-LocalPV related CRs are now grouped together in its own group called `zfs.openebs.io`. Here steps are mentioned for how to upgrade for refactoring the CRDs. Please do not provision/deprovision any volume during the upgrade. steps to upgrade:- @@ -21,7 +21,9 @@ zfsvolume.zfs.openebs.io/pvc-82368c44-eee8-47ee-85a6-633a8023faa8 created zfssnapshot.zfs.openebs.io/snapshot-dc61a056-f495-482b-8e6e-e7ddc4c13f47 created zfssnapshot.zfs.openebs.io/snapshot-f9db91ea-529e-4dac-b2b8-ead045c612da created ``` -` +Please note that if you have modified the OPENEBS_NAMESPACE env in the driver's deployment to other namespace. Then you have to pass the namespace as an argument to the upgrade.sh script `sh upgrade/upgrash.sh [namespace]`. + + 3. upgrade the driver to v0.6 ``` @@ -55,4 +57,9 @@ zfssnapshot.openebs.io/snapshot-f9db91ea-529e-4dac-b2b8-ead045c612da configured zfssnapshot.openebs.io "snapshot-dc61a056-f495-482b-8e6e-e7ddc4c13f47" deleted zfssnapshot.openebs.io "snapshot-f9db91ea-529e-4dac-b2b8-ead045c612da" deleted customresourcedefinition.apiextensions.k8s.io "zfssnapshots.openebs.io" deleted -``` \ No newline at end of file +``` + +Please note that if you have modified the OPENEBS_NAMESPACE env in the driver's deployment to other namespace. Then you have to pass the namespace as an argument to the cleanup.sh script `sh upgrade/cleanup.sh [namespace]`. + +5. restart kube-controller [optional] +kube-controller-manager might be using stale volumeattachment resources, it might get flooded with the error logs. Restarting kube-controller will fix it. diff --git a/upgrade/cleanup.sh b/upgrade/cleanup.sh index afdb901..3f94a06 100644 --- a/upgrade/cleanup.sh +++ b/upgrade/cleanup.sh @@ -2,27 +2,53 @@ set -e -kubectl get zfsvolumes.openebs.io -n openebs -oyaml > volumes.yaml +if [ -z $1 ]; then + # default namespace is openebs when all the custom resources are created + ZFSPV_NAMESPACE="openebs" +else + ZFSPV_NAMESPACE=$1 +fi -# remove the finalizer from the old CR -sed -i "/zfs.openebs.io\/finalizer/d" volumes.yaml -kubectl apply -f volumes.yaml +echo "Fetching ZFS Volumes" +numVol=`kubectl get zfsvolumes.openebs.io --no-headers -n $ZFSPV_NAMESPACE | wc -l` -# delete the old CR -kubectl delete -f volumes.yaml +if [ $numVol -gt 0 ]; then + echo "Cleaning the ZFS Volumes($numVol)" + kubectl get zfsvolumes.openebs.io -n $ZFSPV_NAMESPACE -oyaml > volumes.yaml -# delete the CRD definition + # remove the finalizer from the old CR + sed -i "/zfs.openebs.io\/finalizer/d" volumes.yaml + kubectl apply -f volumes.yaml + + # delete the old CR + kubectl delete -f volumes.yaml +fi + +# delete the ZFSVolume CRD definition kubectl delete crd zfsvolumes.openebs.io +numAttach=`kubectl get volumeattachment --no-headers | grep zfs.csi.openebs.io | wc -l` -kubectl get zfssnapshots.openebs.io -n openebs -oyaml > snapshots.yaml +if [ $numAttach -gt 0 ]; then + echo "Cleaning the volumeattachment($numAttach)" + # delete the volumeattachment object + kubectl delete volumeattachment --all +fi -# remove the finalizer from the old CR -sed -i "/zfs.openebs.io\/finalizer/d" snapshots.yaml -kubectl apply -f snapshots.yaml +echo "Fetching ZFS Snapshots" +numSnap=`kubectl get zfssnapshots.openebs.io --no-headers -n $ZFSPV_NAMESPACE | wc -l` -# delete the old CR -kubectl delete -f snapshots.yaml +if [ $numSnap -gt 0 ]; then + echo "Cleaning the ZFS Snapshot($numSnap)" + kubectl get zfssnapshots.openebs.io -n $ZFSPV_NAMESPACE -oyaml > snapshots.yaml -# delete the CRD definition + # remove the finalizer from the old CR + sed -i "/zfs.openebs.io\/finalizer/d" snapshots.yaml + kubectl apply -f snapshots.yaml + + # delete the old CR + kubectl delete -f snapshots.yaml +fi + +# delete the ZFSSnapshot CRD definition kubectl delete crd zfssnapshots.openebs.io diff --git a/upgrade/upgrade.sh b/upgrade/upgrade.sh index 737508d..b6b2e94 100644 --- a/upgrade/upgrade.sh +++ b/upgrade/upgrade.sh @@ -1,28 +1,45 @@ #!/bin/bash +# do not provision/deprovision anything while running the upgrade script. + set -e -# ZFSVolumes: create the new CR with apiVersion as zfs.openebs.io and kind as Volume +if [ -z $1 ]; then + # default namespace is openebs when all the custom resources are created + ZFSPV_NAMESPACE="openebs" +else + ZFSPV_NAMESPACE=$1 +fi -kubectl get zfsvolumes.openebs.io -n openebs -oyaml > volumes.yaml +echo "Fetching ZFS Volumes" +numVol=`kubectl get zfsvolumes.openebs.io --no-headers -n $ZFSPV_NAMESPACE | wc -l` + +if [ $numVol -gt 0 ]; then + # ZFSVolumes: create the new CR with apiVersion as zfs.openebs.io and kind as Volume + + kubectl get zfsvolumes.openebs.io -n $ZFSPV_NAMESPACE -oyaml > volumes.yaml + + # update the group name to zfs.openebs.io + sed -i "s/apiVersion: openebs.io/apiVersion: zfs.openebs.io/g" volumes.yaml + # create the new CR + kubectl apply -f volumes.yaml + + rm volumes.yaml +fi + +echo "Fetching ZFS Snapshots" +numSnap=`kubectl get zfssnapshots.openebs.io --no-headers -n $ZFSPV_NAMESPACE | wc -l` + +if [ $numSnap -gt 0 ]; then + # ZFSSnapshots: create the new CR with apiVersion as zfs.openebs.io and kind as Snapshot + + kubectl get zfssnapshots.openebs.io -n $ZFSPV_NAMESPACE -oyaml > snapshots.yaml -# update the group name to zfs.openebs.io -sed -i "s/apiVersion: openebs.io/apiVersion: zfs.openebs.io/g" volumes.yaml -# create the new CR -kubectl apply -f volumes.yaml + # update the group name to zfs.openebs.io + sed -i "s/apiVersion: openebs.io/apiVersion: zfs.openebs.io/g" snapshots.yaml + # create the new CR + kubectl apply -f snapshots.yaml -rm volumes.yaml - - -# ZFSSnapshots: create the new CR with apiVersion as zfs.openebs.io and kind as Snapshot - -kubectl get zfssnapshots.openebs.io -n openebs -oyaml > snapshots.yaml - - -# update the group name to zfs.openebs.io -sed -i "s/apiVersion: openebs.io/apiVersion: zfs.openebs.io/g" snapshots.yaml -# create the new CR -kubectl apply -f snapshots.yaml - -rm snapshots.yaml + rm snapshots.yaml +fi