diff --git a/cmd/controller/controller.go b/cmd/controller/controller.go index ca839b8..847f58a 100644 --- a/cmd/controller/controller.go +++ b/cmd/controller/controller.go @@ -85,7 +85,17 @@ func (c *ZVController) syncZV(zv *apis.ZFSVolume) error { zvol.RemoveZvolFinalizer(zv) } } else { - err = zvol.SetZvolProp(zv) + // if finalizer is not set then it means we are creating + // the volume. And if it is set then volume has already been + // created and this event is for property change only. + if zv.Finalizers != nil { + err = zvol.SetZvolProp(zv) + } else { + err = zvol.CreateZvol(zv) + if err == nil { + err = zvol.UpdateZvolInfo(zv) + } + } } return err } @@ -101,11 +111,8 @@ func (c *ZVController) addZV(obj interface{}) { if zvol.NodeID != zv.Spec.OwnerNodeID { return } - // TODO(pawan) scheduler will schedule the volume - // on a node and populate the OwnerNodeID accordingly. - // We need to create the zfs volume in that case. logrus.Infof("Got add event for ZV %s/%s", zv.Spec.PoolName, zv.Name) - //c.enqueueZV(zv) + c.enqueueZV(zv) } // updateZV is the update event handler for CstorVolumeClaim diff --git a/deploy/sample/fio.yaml b/deploy/sample/fio.yaml index 49f5a72..c75c257 100644 --- a/deploy/sample/fio.yaml +++ b/deploy/sample/fio.yaml @@ -12,8 +12,14 @@ parameters: #keyformat: "raw" #keylocation: "file:///home/pawan/key" poolname: "zfspv-pool" -provisioner: openebs.io/zfs +provisioner: zfs-localpv volumeBindingMode: WaitForFirstConsumer +allowedTopologies: +- matchLabelExpressions: + - key: kubernetes.io/hostname + values: + - gke-zfspv-pawan-default-pool-c8929518-cgd4 + - gke-zfspv-pawan-default-pool-c8929518-dxzc --- kind: PersistentVolumeClaim apiVersion: v1 @@ -32,15 +38,6 @@ kind: Pod metadata: name: fio spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - gke-pawan-zfspv-default-pool-1813a371-6nhl restartPolicy: Never containers: - name: perfrunner diff --git a/deploy/sample/percona.yaml b/deploy/sample/percona.yaml index 104b0eb..1d83fcf 100644 --- a/deploy/sample/percona.yaml +++ b/deploy/sample/percona.yaml @@ -9,7 +9,14 @@ parameters: dedup: "on" thinprovision: "yes" poolname: "zfspv-pool" -provisioner: openebs.io/zfs +provisioner: zfs-localpv +volumeBindingMode: WaitForFirstConsumer +allowedTopologies: +- matchLabelExpressions: + - key: kubernetes.io/hostname + values: + - gke-zfspv-pawan-default-pool-c8929518-cgd4 + - gke-zfspv-pawan-default-pool-c8929518-dxzc --- kind: PersistentVolumeClaim apiVersion: v1 @@ -57,7 +64,7 @@ data: mysql -uroot -pk8sDem0 -e "INSERT INTO Hardware (id, name, owner, description) values (1, "dellserver", "basavaraj", "controller");" $DB_NAME mysql -uroot -pk8sDem0 -e "DROP DATABASE $DB_NAME;" --- -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: percona @@ -73,15 +80,6 @@ spec: labels: name: percona spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - gke-pawan-zfspv-default-pool-26f2b9a9-5fqd containers: - resources: name: percona diff --git a/deploy/zfs-operator.yaml b/deploy/zfs-operator.yaml index a86c7cb..8a6d3ac 100644 --- a/deploy/zfs-operator.yaml +++ b/deploy/zfs-operator.yaml @@ -86,7 +86,7 @@ roleRef: --- kind: StatefulSet -apiVersion: apps/v1beta1 +apiVersion: apps/v1 metadata: name: openebs-zfs-controller namespace: kube-system @@ -107,13 +107,13 @@ spec: serviceAccount: openebs-zfs-controller-sa containers: - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:v1.0.1 + image: quay.io/k8scsi/csi-provisioner:v1.4.0 imagePullPolicy: IfNotPresent args: - - "--provisioner=openebs.io/zfs" - "--csi-address=$(ADDRESS)" - "--v=5" - "--feature-gates=Topology=true" + - "--strict-topology" env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock @@ -121,7 +121,7 @@ spec: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-attacher - image: quay.io/k8scsi/csi-attacher:v1.0.1 + image: quay.io/k8scsi/csi-attacher:v2.0.0 imagePullPolicy: IfNotPresent args: - "--v=5" @@ -184,7 +184,7 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments", "csinodes"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "update", "patch"] --- kind: ClusterRoleBinding @@ -324,7 +324,7 @@ roleRef: --- kind: DaemonSet -apiVersion: apps/v1beta2 +apiVersion: apps/v1 metadata: name: openebs-zfs-node namespace: kube-system @@ -343,7 +343,7 @@ spec: hostNetwork: true containers: - name: csi-node-driver-registrar - image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.1 + image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" @@ -427,21 +427,27 @@ spec: - name: zfs-bin hostPath: path: /sbin/zfs + type: File - name: libzpool hostPath: path: /lib/libzpool.so.2.0.0 + type: File - name: libzfscore hostPath: path: /lib/libzfs_core.so.1.0.0 + type: File - name: libzfs hostPath: path: /lib/libzfs.so.2.0.0 + type: File - name: libuutil hostPath: path: /lib/libuutil.so.1.0.1 + type: File - name: libnvpair hostPath: path: /lib/libnvpair.so.1.0.1 + type: File - name: registration-dir hostPath: path: /var/lib/kubelet/plugins_registry/ diff --git a/pkg/builder/build.go b/pkg/builder/build.go index e47151c..5533069 100644 --- a/pkg/builder/build.go +++ b/pkg/builder/build.go @@ -136,6 +136,12 @@ func (b *Builder) WithThinProv(thinprov string) *Builder { return b } +// WithOwnerNode sets owner node for the ZFSVolume where the volume should be provisioned +func (b *Builder) WithOwnerNode(host string) *Builder { + b.volume.Object.Spec.OwnerNodeID = host + return b +} + // WithBlockSize sets blocksize of ZFSVolume func (b *Builder) WithBlockSize(blockSize string) *Builder { bs := "4k" diff --git a/pkg/driver/agent.go b/pkg/driver/agent.go index 98f2eca..6d35d48 100644 --- a/pkg/driver/agent.go +++ b/pkg/driver/agent.go @@ -181,8 +181,12 @@ func (ns *node) NodeGetInfo( req *csi.NodeGetInfoRequest, ) (*csi.NodeGetInfoResponse, error) { + topology := map[string]string{zvol.ZFSTopologyKey: ns.driver.config.NodeID} return &csi.NodeGetInfoResponse{ NodeId: ns.driver.config.NodeID, + AccessibleTopology: &csi.Topology{ + Segments: topology, + }, }, nil } diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index eaa433b..52bc4fd 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -79,6 +79,9 @@ func (cs *controller) CreateVolume( pool := req.GetParameters()["poolname"] tp := req.GetParameters()["thinprovision"] + // setting first in preferred list as the ownernode of this volume + OwnerNode := req.AccessibilityRequirements.Preferred[0].Segments[zvol.ZFSTopologyKey] + volObj, err := builder.NewBuilder(). WithName(volName). WithCapacity(strconv.FormatInt(int64(size), 10)). @@ -89,6 +92,7 @@ func (cs *controller) CreateVolume( WithKeyFormat(kf). WithKeyLocation(kl). WithThinProv(tp). + WithOwnerNode(OwnerNode). WithCompression(compression).Build() if err != nil { @@ -100,9 +104,12 @@ func (cs *controller) CreateVolume( return nil, status.Error(codes.Internal, err.Error()) } + topology := map[string]string{zvol.ZFSTopologyKey: OwnerNode} + return csipayload.NewCreateVolumeResponseBuilder(). WithName(volName). WithCapacity(size). + WithTopology(topology). Build(), nil } diff --git a/pkg/response/create.go b/pkg/response/create.go index 6c23222..422257d 100644 --- a/pkg/response/create.go +++ b/pkg/response/create.go @@ -57,6 +57,14 @@ func (b *CreateVolumeResponseBuilder) WithContext(ctx map[string]string) *Create return b } +// WithTopology sets the topology for the +// CreateVolumeResponse instance +func (b *CreateVolumeResponseBuilder) WithTopology(topology map[string]string) *CreateVolumeResponseBuilder { + b.response.Volume.AccessibleTopology = make([]*csi.Topology, 1) + b.response.Volume.AccessibleTopology[0] = &csi.Topology{Segments: topology} + return b +} + // Build returns the constructed instance // of csi CreateVolumeResponse func (b *CreateVolumeResponseBuilder) Build() *csi.CreateVolumeResponse { diff --git a/pkg/zfs/mount.go b/pkg/zfs/mount.go index 2052269..3f0c1eb 100644 --- a/pkg/zfs/mount.go +++ b/pkg/zfs/mount.go @@ -109,14 +109,9 @@ func CreateAndMountZvol(vol *apis.ZFSVolume, mount *apis.MountInfo) error { return status.Error(codes.Internal, "volume is owned by different node") } - devicePath, err := createZvol(vol) + devicePath, err := GetDevicePath(vol) if err != nil { - return status.Error(codes.Internal, err.Error()) - } - - err = UpdateZvolInfo(vol) - if err != nil { - return status.Error(codes.Internal, err.Error()) + return status.Error(codes.Internal, "not able to get the device path") } /* @@ -138,7 +133,7 @@ func CreateAndMountZvol(vol *apis.ZFSVolume, mount *apis.MountInfo) error { } err = FormatAndMountZvol(devicePath, mount) if err != nil { - return status.Error(codes.Internal, err.Error()) + return status.Error(codes.Internal, "not able to mount the volume") } return err diff --git a/pkg/zfs/volume.go b/pkg/zfs/volume.go index 734985a..a0e03c0 100644 --- a/pkg/zfs/volume.go +++ b/pkg/zfs/volume.go @@ -31,9 +31,10 @@ const ( OpenEBSNamespaceKey string = "OPENEBS_NAMESPACE" // ZFSFinalizer for the ZfsVolume CR ZFSFinalizer string = "zfs.openebs.io/finalizer" - // ZFSNodeKey will be used to insert Label - // in ZfsVolume CR + // ZFSNodeKey will be used to insert Label in ZfsVolume CR ZFSNodeKey string = "kubernetes.io/nodename" + // ZFSTopologyKey is supported topology key for the zfs driver + ZFSTopologyKey string = "kubernetes.io/hostname" ) var ( diff --git a/pkg/zfs/zfs_util.go b/pkg/zfs/zfs_util.go index 12f40e8..afdfaed 100644 --- a/pkg/zfs/zfs_util.go +++ b/pkg/zfs/zfs_util.go @@ -119,9 +119,9 @@ func buildVolumeDestroyArgs(vol *apis.ZFSVolume) []string { return ZFSVolCmd } -// createZvol creates the zvol and returns the corresponding diskPath +// CreateZvol creates the zvol and returns the corresponding diskPath // of the volume which gets created on the node -func createZvol(vol *apis.ZFSVolume) (string, error) { +func CreateZvol(vol *apis.ZFSVolume) error { zvol := vol.Spec.PoolName + "/" + vol.Name devicePath := ZFS_DEVPATH + zvol @@ -135,16 +135,16 @@ func createZvol(vol *apis.ZFSVolume) (string, error) { logrus.Errorf( "zfs: could not create zvol %v cmd %v error: %s", zvol, args, string(out), ) - return "", err + return err } logrus.Infof("created zvol %s", zvol) } else if err == nil { logrus.Infof("using existing zvol %v", zvol) } else { - return "", err + return err } - return devicePath, nil + return nil } // SetZvolProp sets the zvol property @@ -191,3 +191,14 @@ func DestroyZvol(vol *apis.ZFSVolume) error { return nil } + +// GetDevicePath returns device path for zvol if it exists +func GetDevicePath(vol *apis.ZFSVolume) (string, error) { + zvol := vol.Spec.PoolName + "/" + vol.Name + devicePath := ZFS_DEVPATH + zvol + + if _, err := os.Stat(devicePath); os.IsNotExist(err) { + return "", err + } + return devicePath, nil +}