mirror of
https://github.com/TECHNOFAB11/zfs-localpv.git
synced 2025-12-11 22:10:11 +01:00
adding topology support for zfspv (#7)
This PR adds support to allow the CSI driver to pick up a node matching the topology specified in the storage class. Admin can specify allowedTopologies in the StorageClass to specify the nodes where the zfs pools are setup
```yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: openebs-zfspv
allowVolumeExpansion: true
parameters:
blocksize: "4k"
compression: "on"
dedup: "on"
thinprovision: "yes"
poolname: "zfspv-pool"
provisioner: zfs-localpv
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
- key: kubernetes.io/hostname
values:
- gke-zfspv-pawan-default-pool-c8929518-cgd4
- gke-zfspv-pawan-default-pool-c8929518-dxzc
```
Note: This PR picks up the first node from the list of nodes available.
Signed-off-by: Pawan <pawan@mayadata.io>
This commit is contained in:
parent
0218dacea0
commit
d0e97cddb2
11 changed files with 88 additions and 48 deletions
|
|
@ -85,7 +85,17 @@ func (c *ZVController) syncZV(zv *apis.ZFSVolume) error {
|
||||||
zvol.RemoveZvolFinalizer(zv)
|
zvol.RemoveZvolFinalizer(zv)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = zvol.SetZvolProp(zv)
|
// if finalizer is not set then it means we are creating
|
||||||
|
// the volume. And if it is set then volume has already been
|
||||||
|
// created and this event is for property change only.
|
||||||
|
if zv.Finalizers != nil {
|
||||||
|
err = zvol.SetZvolProp(zv)
|
||||||
|
} else {
|
||||||
|
err = zvol.CreateZvol(zv)
|
||||||
|
if err == nil {
|
||||||
|
err = zvol.UpdateZvolInfo(zv)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -101,11 +111,8 @@ func (c *ZVController) addZV(obj interface{}) {
|
||||||
if zvol.NodeID != zv.Spec.OwnerNodeID {
|
if zvol.NodeID != zv.Spec.OwnerNodeID {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// TODO(pawan) scheduler will schedule the volume
|
|
||||||
// on a node and populate the OwnerNodeID accordingly.
|
|
||||||
// We need to create the zfs volume in that case.
|
|
||||||
logrus.Infof("Got add event for ZV %s/%s", zv.Spec.PoolName, zv.Name)
|
logrus.Infof("Got add event for ZV %s/%s", zv.Spec.PoolName, zv.Name)
|
||||||
//c.enqueueZV(zv)
|
c.enqueueZV(zv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateZV is the update event handler for CstorVolumeClaim
|
// updateZV is the update event handler for CstorVolumeClaim
|
||||||
|
|
|
||||||
|
|
@ -12,8 +12,14 @@ parameters:
|
||||||
#keyformat: "raw"
|
#keyformat: "raw"
|
||||||
#keylocation: "file:///home/pawan/key"
|
#keylocation: "file:///home/pawan/key"
|
||||||
poolname: "zfspv-pool"
|
poolname: "zfspv-pool"
|
||||||
provisioner: openebs.io/zfs
|
provisioner: zfs-localpv
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
allowedTopologies:
|
||||||
|
- matchLabelExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
values:
|
||||||
|
- gke-zfspv-pawan-default-pool-c8929518-cgd4
|
||||||
|
- gke-zfspv-pawan-default-pool-c8929518-dxzc
|
||||||
---
|
---
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
|
|
@ -32,15 +38,6 @@ kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
name: fio
|
name: fio
|
||||||
spec:
|
spec:
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: kubernetes.io/hostname
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- gke-pawan-zfspv-default-pool-1813a371-6nhl
|
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
containers:
|
containers:
|
||||||
- name: perfrunner
|
- name: perfrunner
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,14 @@ parameters:
|
||||||
dedup: "on"
|
dedup: "on"
|
||||||
thinprovision: "yes"
|
thinprovision: "yes"
|
||||||
poolname: "zfspv-pool"
|
poolname: "zfspv-pool"
|
||||||
provisioner: openebs.io/zfs
|
provisioner: zfs-localpv
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
allowedTopologies:
|
||||||
|
- matchLabelExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
values:
|
||||||
|
- gke-zfspv-pawan-default-pool-c8929518-cgd4
|
||||||
|
- gke-zfspv-pawan-default-pool-c8929518-dxzc
|
||||||
---
|
---
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
|
|
@ -57,7 +64,7 @@ data:
|
||||||
mysql -uroot -pk8sDem0 -e "INSERT INTO Hardware (id, name, owner, description) values (1, "dellserver", "basavaraj", "controller");" $DB_NAME
|
mysql -uroot -pk8sDem0 -e "INSERT INTO Hardware (id, name, owner, description) values (1, "dellserver", "basavaraj", "controller");" $DB_NAME
|
||||||
mysql -uroot -pk8sDem0 -e "DROP DATABASE $DB_NAME;"
|
mysql -uroot -pk8sDem0 -e "DROP DATABASE $DB_NAME;"
|
||||||
---
|
---
|
||||||
apiVersion: apps/v1beta1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: percona
|
name: percona
|
||||||
|
|
@ -73,15 +80,6 @@ spec:
|
||||||
labels:
|
labels:
|
||||||
name: percona
|
name: percona
|
||||||
spec:
|
spec:
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: kubernetes.io/hostname
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- gke-pawan-zfspv-default-pool-26f2b9a9-5fqd
|
|
||||||
containers:
|
containers:
|
||||||
- resources:
|
- resources:
|
||||||
name: percona
|
name: percona
|
||||||
|
|
|
||||||
|
|
@ -86,7 +86,7 @@ roleRef:
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: StatefulSet
|
kind: StatefulSet
|
||||||
apiVersion: apps/v1beta1
|
apiVersion: apps/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: openebs-zfs-controller
|
name: openebs-zfs-controller
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
|
|
@ -107,13 +107,13 @@ spec:
|
||||||
serviceAccount: openebs-zfs-controller-sa
|
serviceAccount: openebs-zfs-controller-sa
|
||||||
containers:
|
containers:
|
||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
image: quay.io/k8scsi/csi-provisioner:v1.0.1
|
image: quay.io/k8scsi/csi-provisioner:v1.4.0
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
args:
|
args:
|
||||||
- "--provisioner=openebs.io/zfs"
|
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--feature-gates=Topology=true"
|
- "--feature-gates=Topology=true"
|
||||||
|
- "--strict-topology"
|
||||||
env:
|
env:
|
||||||
- name: ADDRESS
|
- name: ADDRESS
|
||||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||||
|
|
@ -121,7 +121,7 @@ spec:
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||||
- name: csi-attacher
|
- name: csi-attacher
|
||||||
image: quay.io/k8scsi/csi-attacher:v1.0.1
|
image: quay.io/k8scsi/csi-attacher:v2.0.0
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
args:
|
args:
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
|
|
@ -184,7 +184,7 @@ rules:
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
- apiGroups: ["storage.k8s.io"]
|
- apiGroups: ["storage.k8s.io"]
|
||||||
resources: ["volumeattachments", "csinodes"]
|
resources: ["volumeattachments", "csinodes"]
|
||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update", "patch"]
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
|
|
@ -324,7 +324,7 @@ roleRef:
|
||||||
---
|
---
|
||||||
|
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
apiVersion: apps/v1beta2
|
apiVersion: apps/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: openebs-zfs-node
|
name: openebs-zfs-node
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
|
|
@ -343,7 +343,7 @@ spec:
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
containers:
|
containers:
|
||||||
- name: csi-node-driver-registrar
|
- name: csi-node-driver-registrar
|
||||||
image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.1
|
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
|
||||||
args:
|
args:
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
|
|
@ -427,21 +427,27 @@ spec:
|
||||||
- name: zfs-bin
|
- name: zfs-bin
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /sbin/zfs
|
path: /sbin/zfs
|
||||||
|
type: File
|
||||||
- name: libzpool
|
- name: libzpool
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /lib/libzpool.so.2.0.0
|
path: /lib/libzpool.so.2.0.0
|
||||||
|
type: File
|
||||||
- name: libzfscore
|
- name: libzfscore
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /lib/libzfs_core.so.1.0.0
|
path: /lib/libzfs_core.so.1.0.0
|
||||||
|
type: File
|
||||||
- name: libzfs
|
- name: libzfs
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /lib/libzfs.so.2.0.0
|
path: /lib/libzfs.so.2.0.0
|
||||||
|
type: File
|
||||||
- name: libuutil
|
- name: libuutil
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /lib/libuutil.so.1.0.1
|
path: /lib/libuutil.so.1.0.1
|
||||||
|
type: File
|
||||||
- name: libnvpair
|
- name: libnvpair
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /lib/libnvpair.so.1.0.1
|
path: /lib/libnvpair.so.1.0.1
|
||||||
|
type: File
|
||||||
- name: registration-dir
|
- name: registration-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/kubelet/plugins_registry/
|
path: /var/lib/kubelet/plugins_registry/
|
||||||
|
|
|
||||||
|
|
@ -136,6 +136,12 @@ func (b *Builder) WithThinProv(thinprov string) *Builder {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithOwnerNode sets owner node for the ZFSVolume where the volume should be provisioned
|
||||||
|
func (b *Builder) WithOwnerNode(host string) *Builder {
|
||||||
|
b.volume.Object.Spec.OwnerNodeID = host
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
// WithBlockSize sets blocksize of ZFSVolume
|
// WithBlockSize sets blocksize of ZFSVolume
|
||||||
func (b *Builder) WithBlockSize(blockSize string) *Builder {
|
func (b *Builder) WithBlockSize(blockSize string) *Builder {
|
||||||
bs := "4k"
|
bs := "4k"
|
||||||
|
|
|
||||||
|
|
@ -181,8 +181,12 @@ func (ns *node) NodeGetInfo(
|
||||||
req *csi.NodeGetInfoRequest,
|
req *csi.NodeGetInfoRequest,
|
||||||
) (*csi.NodeGetInfoResponse, error) {
|
) (*csi.NodeGetInfoResponse, error) {
|
||||||
|
|
||||||
|
topology := map[string]string{zvol.ZFSTopologyKey: ns.driver.config.NodeID}
|
||||||
return &csi.NodeGetInfoResponse{
|
return &csi.NodeGetInfoResponse{
|
||||||
NodeId: ns.driver.config.NodeID,
|
NodeId: ns.driver.config.NodeID,
|
||||||
|
AccessibleTopology: &csi.Topology{
|
||||||
|
Segments: topology,
|
||||||
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -79,6 +79,9 @@ func (cs *controller) CreateVolume(
|
||||||
pool := req.GetParameters()["poolname"]
|
pool := req.GetParameters()["poolname"]
|
||||||
tp := req.GetParameters()["thinprovision"]
|
tp := req.GetParameters()["thinprovision"]
|
||||||
|
|
||||||
|
// setting first in preferred list as the ownernode of this volume
|
||||||
|
OwnerNode := req.AccessibilityRequirements.Preferred[0].Segments[zvol.ZFSTopologyKey]
|
||||||
|
|
||||||
volObj, err := builder.NewBuilder().
|
volObj, err := builder.NewBuilder().
|
||||||
WithName(volName).
|
WithName(volName).
|
||||||
WithCapacity(strconv.FormatInt(int64(size), 10)).
|
WithCapacity(strconv.FormatInt(int64(size), 10)).
|
||||||
|
|
@ -89,6 +92,7 @@ func (cs *controller) CreateVolume(
|
||||||
WithKeyFormat(kf).
|
WithKeyFormat(kf).
|
||||||
WithKeyLocation(kl).
|
WithKeyLocation(kl).
|
||||||
WithThinProv(tp).
|
WithThinProv(tp).
|
||||||
|
WithOwnerNode(OwnerNode).
|
||||||
WithCompression(compression).Build()
|
WithCompression(compression).Build()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -100,9 +104,12 @@ func (cs *controller) CreateVolume(
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
topology := map[string]string{zvol.ZFSTopologyKey: OwnerNode}
|
||||||
|
|
||||||
return csipayload.NewCreateVolumeResponseBuilder().
|
return csipayload.NewCreateVolumeResponseBuilder().
|
||||||
WithName(volName).
|
WithName(volName).
|
||||||
WithCapacity(size).
|
WithCapacity(size).
|
||||||
|
WithTopology(topology).
|
||||||
Build(), nil
|
Build(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -57,6 +57,14 @@ func (b *CreateVolumeResponseBuilder) WithContext(ctx map[string]string) *Create
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithTopology sets the topology for the
|
||||||
|
// CreateVolumeResponse instance
|
||||||
|
func (b *CreateVolumeResponseBuilder) WithTopology(topology map[string]string) *CreateVolumeResponseBuilder {
|
||||||
|
b.response.Volume.AccessibleTopology = make([]*csi.Topology, 1)
|
||||||
|
b.response.Volume.AccessibleTopology[0] = &csi.Topology{Segments: topology}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
// Build returns the constructed instance
|
// Build returns the constructed instance
|
||||||
// of csi CreateVolumeResponse
|
// of csi CreateVolumeResponse
|
||||||
func (b *CreateVolumeResponseBuilder) Build() *csi.CreateVolumeResponse {
|
func (b *CreateVolumeResponseBuilder) Build() *csi.CreateVolumeResponse {
|
||||||
|
|
|
||||||
|
|
@ -109,14 +109,9 @@ func CreateAndMountZvol(vol *apis.ZFSVolume, mount *apis.MountInfo) error {
|
||||||
return status.Error(codes.Internal, "volume is owned by different node")
|
return status.Error(codes.Internal, "volume is owned by different node")
|
||||||
}
|
}
|
||||||
|
|
||||||
devicePath, err := createZvol(vol)
|
devicePath, err := GetDevicePath(vol)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return status.Error(codes.Internal, err.Error())
|
return status.Error(codes.Internal, "not able to get the device path")
|
||||||
}
|
|
||||||
|
|
||||||
err = UpdateZvolInfo(vol)
|
|
||||||
if err != nil {
|
|
||||||
return status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -138,7 +133,7 @@ func CreateAndMountZvol(vol *apis.ZFSVolume, mount *apis.MountInfo) error {
|
||||||
}
|
}
|
||||||
err = FormatAndMountZvol(devicePath, mount)
|
err = FormatAndMountZvol(devicePath, mount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return status.Error(codes.Internal, err.Error())
|
return status.Error(codes.Internal, "not able to mount the volume")
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
|
|
|
||||||
|
|
@ -31,9 +31,10 @@ const (
|
||||||
OpenEBSNamespaceKey string = "OPENEBS_NAMESPACE"
|
OpenEBSNamespaceKey string = "OPENEBS_NAMESPACE"
|
||||||
// ZFSFinalizer for the ZfsVolume CR
|
// ZFSFinalizer for the ZfsVolume CR
|
||||||
ZFSFinalizer string = "zfs.openebs.io/finalizer"
|
ZFSFinalizer string = "zfs.openebs.io/finalizer"
|
||||||
// ZFSNodeKey will be used to insert Label
|
// ZFSNodeKey will be used to insert Label in ZfsVolume CR
|
||||||
// in ZfsVolume CR
|
|
||||||
ZFSNodeKey string = "kubernetes.io/nodename"
|
ZFSNodeKey string = "kubernetes.io/nodename"
|
||||||
|
// ZFSTopologyKey is supported topology key for the zfs driver
|
||||||
|
ZFSTopologyKey string = "kubernetes.io/hostname"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
||||||
|
|
@ -119,9 +119,9 @@ func buildVolumeDestroyArgs(vol *apis.ZFSVolume) []string {
|
||||||
return ZFSVolCmd
|
return ZFSVolCmd
|
||||||
}
|
}
|
||||||
|
|
||||||
// createZvol creates the zvol and returns the corresponding diskPath
|
// CreateZvol creates the zvol and returns the corresponding diskPath
|
||||||
// of the volume which gets created on the node
|
// of the volume which gets created on the node
|
||||||
func createZvol(vol *apis.ZFSVolume) (string, error) {
|
func CreateZvol(vol *apis.ZFSVolume) error {
|
||||||
zvol := vol.Spec.PoolName + "/" + vol.Name
|
zvol := vol.Spec.PoolName + "/" + vol.Name
|
||||||
devicePath := ZFS_DEVPATH + zvol
|
devicePath := ZFS_DEVPATH + zvol
|
||||||
|
|
||||||
|
|
@ -135,16 +135,16 @@ func createZvol(vol *apis.ZFSVolume) (string, error) {
|
||||||
logrus.Errorf(
|
logrus.Errorf(
|
||||||
"zfs: could not create zvol %v cmd %v error: %s", zvol, args, string(out),
|
"zfs: could not create zvol %v cmd %v error: %s", zvol, args, string(out),
|
||||||
)
|
)
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
logrus.Infof("created zvol %s", zvol)
|
logrus.Infof("created zvol %s", zvol)
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
logrus.Infof("using existing zvol %v", zvol)
|
logrus.Infof("using existing zvol %v", zvol)
|
||||||
} else {
|
} else {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return devicePath, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetZvolProp sets the zvol property
|
// SetZvolProp sets the zvol property
|
||||||
|
|
@ -191,3 +191,14 @@ func DestroyZvol(vol *apis.ZFSVolume) error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetDevicePath returns device path for zvol if it exists
|
||||||
|
func GetDevicePath(vol *apis.ZFSVolume) (string, error) {
|
||||||
|
zvol := vol.Spec.PoolName + "/" + vol.Name
|
||||||
|
devicePath := ZFS_DEVPATH + zvol
|
||||||
|
|
||||||
|
if _, err := os.Stat(devicePath); os.IsNotExist(err) {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return devicePath, nil
|
||||||
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue