adding topology support for zfspv (#7)

This PR adds support to allow the CSI driver to pick up a node matching the  topology specified in the storage class. Admin can specify allowedTopologies in the StorageClass to specify the nodes where the zfs pools are setup

```yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: openebs-zfspv
allowVolumeExpansion: true
parameters:
  blocksize: "4k"
  compression: "on"
  dedup: "on"
  thinprovision: "yes"
  poolname: "zfspv-pool"
provisioner: zfs-localpv
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
  - key: kubernetes.io/hostname
    values:
      - gke-zfspv-pawan-default-pool-c8929518-cgd4
      - gke-zfspv-pawan-default-pool-c8929518-dxzc
```

Note: This PR picks up the first node from the list of nodes available.

Signed-off-by: Pawan <pawan@mayadata.io>
This commit is contained in:
Pawan Prakash Sharma 2019-11-01 06:46:04 +05:30 committed by Kiran Mova
parent 0218dacea0
commit d0e97cddb2
11 changed files with 88 additions and 48 deletions

View file

@ -85,7 +85,17 @@ func (c *ZVController) syncZV(zv *apis.ZFSVolume) error {
zvol.RemoveZvolFinalizer(zv)
}
} else {
err = zvol.SetZvolProp(zv)
// if finalizer is not set then it means we are creating
// the volume. And if it is set then volume has already been
// created and this event is for property change only.
if zv.Finalizers != nil {
err = zvol.SetZvolProp(zv)
} else {
err = zvol.CreateZvol(zv)
if err == nil {
err = zvol.UpdateZvolInfo(zv)
}
}
}
return err
}
@ -101,11 +111,8 @@ func (c *ZVController) addZV(obj interface{}) {
if zvol.NodeID != zv.Spec.OwnerNodeID {
return
}
// TODO(pawan) scheduler will schedule the volume
// on a node and populate the OwnerNodeID accordingly.
// We need to create the zfs volume in that case.
logrus.Infof("Got add event for ZV %s/%s", zv.Spec.PoolName, zv.Name)
//c.enqueueZV(zv)
c.enqueueZV(zv)
}
// updateZV is the update event handler for CstorVolumeClaim

View file

@ -12,8 +12,14 @@ parameters:
#keyformat: "raw"
#keylocation: "file:///home/pawan/key"
poolname: "zfspv-pool"
provisioner: openebs.io/zfs
provisioner: zfs-localpv
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
- key: kubernetes.io/hostname
values:
- gke-zfspv-pawan-default-pool-c8929518-cgd4
- gke-zfspv-pawan-default-pool-c8929518-dxzc
---
kind: PersistentVolumeClaim
apiVersion: v1
@ -32,15 +38,6 @@ kind: Pod
metadata:
name: fio
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- gke-pawan-zfspv-default-pool-1813a371-6nhl
restartPolicy: Never
containers:
- name: perfrunner

View file

@ -9,7 +9,14 @@ parameters:
dedup: "on"
thinprovision: "yes"
poolname: "zfspv-pool"
provisioner: openebs.io/zfs
provisioner: zfs-localpv
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
- key: kubernetes.io/hostname
values:
- gke-zfspv-pawan-default-pool-c8929518-cgd4
- gke-zfspv-pawan-default-pool-c8929518-dxzc
---
kind: PersistentVolumeClaim
apiVersion: v1
@ -57,7 +64,7 @@ data:
mysql -uroot -pk8sDem0 -e "INSERT INTO Hardware (id, name, owner, description) values (1, "dellserver", "basavaraj", "controller");" $DB_NAME
mysql -uroot -pk8sDem0 -e "DROP DATABASE $DB_NAME;"
---
apiVersion: apps/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: percona
@ -73,15 +80,6 @@ spec:
labels:
name: percona
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- gke-pawan-zfspv-default-pool-26f2b9a9-5fqd
containers:
- resources:
name: percona

View file

@ -86,7 +86,7 @@ roleRef:
---
kind: StatefulSet
apiVersion: apps/v1beta1
apiVersion: apps/v1
metadata:
name: openebs-zfs-controller
namespace: kube-system
@ -107,13 +107,13 @@ spec:
serviceAccount: openebs-zfs-controller-sa
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v1.0.1
image: quay.io/k8scsi/csi-provisioner:v1.4.0
imagePullPolicy: IfNotPresent
args:
- "--provisioner=openebs.io/zfs"
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--feature-gates=Topology=true"
- "--strict-topology"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
@ -121,7 +121,7 @@ spec:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v1.0.1
image: quay.io/k8scsi/csi-attacher:v2.0.0
imagePullPolicy: IfNotPresent
args:
- "--v=5"
@ -184,7 +184,7 @@ rules:
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments", "csinodes"]
verbs: ["get", "list", "watch", "update"]
verbs: ["get", "list", "watch", "update", "patch"]
---
kind: ClusterRoleBinding
@ -324,7 +324,7 @@ roleRef:
---
kind: DaemonSet
apiVersion: apps/v1beta2
apiVersion: apps/v1
metadata:
name: openebs-zfs-node
namespace: kube-system
@ -343,7 +343,7 @@ spec:
hostNetwork: true
containers:
- name: csi-node-driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.1
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
@ -427,21 +427,27 @@ spec:
- name: zfs-bin
hostPath:
path: /sbin/zfs
type: File
- name: libzpool
hostPath:
path: /lib/libzpool.so.2.0.0
type: File
- name: libzfscore
hostPath:
path: /lib/libzfs_core.so.1.0.0
type: File
- name: libzfs
hostPath:
path: /lib/libzfs.so.2.0.0
type: File
- name: libuutil
hostPath:
path: /lib/libuutil.so.1.0.1
type: File
- name: libnvpair
hostPath:
path: /lib/libnvpair.so.1.0.1
type: File
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/

View file

@ -136,6 +136,12 @@ func (b *Builder) WithThinProv(thinprov string) *Builder {
return b
}
// WithOwnerNode sets owner node for the ZFSVolume where the volume should be provisioned
func (b *Builder) WithOwnerNode(host string) *Builder {
b.volume.Object.Spec.OwnerNodeID = host
return b
}
// WithBlockSize sets blocksize of ZFSVolume
func (b *Builder) WithBlockSize(blockSize string) *Builder {
bs := "4k"

View file

@ -181,8 +181,12 @@ func (ns *node) NodeGetInfo(
req *csi.NodeGetInfoRequest,
) (*csi.NodeGetInfoResponse, error) {
topology := map[string]string{zvol.ZFSTopologyKey: ns.driver.config.NodeID}
return &csi.NodeGetInfoResponse{
NodeId: ns.driver.config.NodeID,
AccessibleTopology: &csi.Topology{
Segments: topology,
},
}, nil
}

View file

@ -79,6 +79,9 @@ func (cs *controller) CreateVolume(
pool := req.GetParameters()["poolname"]
tp := req.GetParameters()["thinprovision"]
// setting first in preferred list as the ownernode of this volume
OwnerNode := req.AccessibilityRequirements.Preferred[0].Segments[zvol.ZFSTopologyKey]
volObj, err := builder.NewBuilder().
WithName(volName).
WithCapacity(strconv.FormatInt(int64(size), 10)).
@ -89,6 +92,7 @@ func (cs *controller) CreateVolume(
WithKeyFormat(kf).
WithKeyLocation(kl).
WithThinProv(tp).
WithOwnerNode(OwnerNode).
WithCompression(compression).Build()
if err != nil {
@ -100,9 +104,12 @@ func (cs *controller) CreateVolume(
return nil, status.Error(codes.Internal, err.Error())
}
topology := map[string]string{zvol.ZFSTopologyKey: OwnerNode}
return csipayload.NewCreateVolumeResponseBuilder().
WithName(volName).
WithCapacity(size).
WithTopology(topology).
Build(), nil
}

View file

@ -57,6 +57,14 @@ func (b *CreateVolumeResponseBuilder) WithContext(ctx map[string]string) *Create
return b
}
// WithTopology sets the topology for the
// CreateVolumeResponse instance
func (b *CreateVolumeResponseBuilder) WithTopology(topology map[string]string) *CreateVolumeResponseBuilder {
b.response.Volume.AccessibleTopology = make([]*csi.Topology, 1)
b.response.Volume.AccessibleTopology[0] = &csi.Topology{Segments: topology}
return b
}
// Build returns the constructed instance
// of csi CreateVolumeResponse
func (b *CreateVolumeResponseBuilder) Build() *csi.CreateVolumeResponse {

View file

@ -109,14 +109,9 @@ func CreateAndMountZvol(vol *apis.ZFSVolume, mount *apis.MountInfo) error {
return status.Error(codes.Internal, "volume is owned by different node")
}
devicePath, err := createZvol(vol)
devicePath, err := GetDevicePath(vol)
if err != nil {
return status.Error(codes.Internal, err.Error())
}
err = UpdateZvolInfo(vol)
if err != nil {
return status.Error(codes.Internal, err.Error())
return status.Error(codes.Internal, "not able to get the device path")
}
/*
@ -138,7 +133,7 @@ func CreateAndMountZvol(vol *apis.ZFSVolume, mount *apis.MountInfo) error {
}
err = FormatAndMountZvol(devicePath, mount)
if err != nil {
return status.Error(codes.Internal, err.Error())
return status.Error(codes.Internal, "not able to mount the volume")
}
return err

View file

@ -31,9 +31,10 @@ const (
OpenEBSNamespaceKey string = "OPENEBS_NAMESPACE"
// ZFSFinalizer for the ZfsVolume CR
ZFSFinalizer string = "zfs.openebs.io/finalizer"
// ZFSNodeKey will be used to insert Label
// in ZfsVolume CR
// ZFSNodeKey will be used to insert Label in ZfsVolume CR
ZFSNodeKey string = "kubernetes.io/nodename"
// ZFSTopologyKey is supported topology key for the zfs driver
ZFSTopologyKey string = "kubernetes.io/hostname"
)
var (

View file

@ -119,9 +119,9 @@ func buildVolumeDestroyArgs(vol *apis.ZFSVolume) []string {
return ZFSVolCmd
}
// createZvol creates the zvol and returns the corresponding diskPath
// CreateZvol creates the zvol and returns the corresponding diskPath
// of the volume which gets created on the node
func createZvol(vol *apis.ZFSVolume) (string, error) {
func CreateZvol(vol *apis.ZFSVolume) error {
zvol := vol.Spec.PoolName + "/" + vol.Name
devicePath := ZFS_DEVPATH + zvol
@ -135,16 +135,16 @@ func createZvol(vol *apis.ZFSVolume) (string, error) {
logrus.Errorf(
"zfs: could not create zvol %v cmd %v error: %s", zvol, args, string(out),
)
return "", err
return err
}
logrus.Infof("created zvol %s", zvol)
} else if err == nil {
logrus.Infof("using existing zvol %v", zvol)
} else {
return "", err
return err
}
return devicePath, nil
return nil
}
// SetZvolProp sets the zvol property
@ -191,3 +191,14 @@ func DestroyZvol(vol *apis.ZFSVolume) error {
return nil
}
// GetDevicePath returns device path for zvol if it exists
func GetDevicePath(vol *apis.ZFSVolume) (string, error) {
zvol := vol.Spec.PoolName + "/" + vol.Name
devicePath := ZFS_DEVPATH + zvol
if _, err := os.Stat(devicePath); os.IsNotExist(err) {
return "", err
}
return devicePath, nil
}