From 90ecfe9c73a58f3cbf09f7e442f7883421b1043a Mon Sep 17 00:00:00 2001 From: Pawan Date: Mon, 28 Dec 2020 19:00:52 +0530 Subject: [PATCH] feat(schd): adding capacity weighted scheduler The ZFS Driver will use capacity scheduler to pick a node which has less capacity occupied by the volumes. Making this as default scheduler as it is better than the volume count based scheduling. We can use below storageclass to specify the scheduler ```yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: openebs-zfspv allowVolumeExpansion: true parameters: scheduler: "CapacityWeighted" poolname: "zfspv-pool" provisioner: zfs.csi.openebs.io ``` Please Note that after the upgrade, there will be a change in the behavior. If we are not using `scheduler` parameter in the storage class then after the upgrade ZFS Driver will pick the node bases on volume capacity weight instead of the count. Signed-off-by: Pawan --- changelogs/unreleased/266-pawanpraka1 | 1 + pkg/driver/schd_helper.go | 44 ++++++++++++++++++++++++--- 2 files changed, 41 insertions(+), 4 deletions(-) create mode 100644 changelogs/unreleased/266-pawanpraka1 diff --git a/changelogs/unreleased/266-pawanpraka1 b/changelogs/unreleased/266-pawanpraka1 new file mode 100644 index 0000000..247afa0 --- /dev/null +++ b/changelogs/unreleased/266-pawanpraka1 @@ -0,0 +1 @@ +adding capacity weighted scheduler diff --git a/pkg/driver/schd_helper.go b/pkg/driver/schd_helper.go index e89a86a..88e28a2 100644 --- a/pkg/driver/schd_helper.go +++ b/pkg/driver/schd_helper.go @@ -19,6 +19,7 @@ package driver import ( "github.com/openebs/zfs-localpv/pkg/builder/volbuilder" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strconv" zfs "github.com/openebs/zfs-localpv/pkg/zfs" ) @@ -26,12 +27,15 @@ import ( // scheduling algorithm constants const ( // pick the node where less volumes are provisioned for the given pool - // this will be the default scheduler when none provided VolumeWeighted = "VolumeWeighted" + + // pick the node where total provisioned volumes have occupied less capacity from the given pool + // this will be the default scheduler when none provided + CapacityWeighted = "CapacityWeighted" ) // getVolumeWeightedMap goes through all the pools on all the nodes -// and creats the node mapping of the volume for all the nodes. +// and creates the node mapping of the volume for all the nodes. // It returns a map which has nodes as key and volumes present // on the nodes as corresponding value. func getVolumeWeightedMap(pool string) (map[string]int64, error) { @@ -56,12 +60,44 @@ func getVolumeWeightedMap(pool string) (map[string]int64, error) { return nmap, nil } +// getCapacityWeightedMap goes through all the pools on all the nodes +// and creates the node mapping of the capacity for all the nodes. +// It returns a map which has nodes as key and capacity provisioned +// on the nodes as corresponding value. The scheduler will use this map +// and picks the node which is less weighted. +func getCapacityWeightedMap(pool string) (map[string]int64, error) { + nmap := map[string]int64{} + + zvlist, err := volbuilder.NewKubeclient(). + WithNamespace(zfs.OpenEBSNamespace). + List(metav1.ListOptions{}) + + if err != nil { + return nmap, err + } + + // create the map of the volume capacity + // for the given pool + for _, zv := range zvlist.Items { + if zv.Spec.PoolName == pool { + volsize, err := strconv.ParseInt(zv.Spec.Capacity, 10, 64) + if err == nil { + nmap[zv.Spec.OwnerNodeID] += volsize + } + } + } + + return nmap, nil +} + // getNodeMap returns the node mapping for the given scheduling algorithm func getNodeMap(schd string, pool string) (map[string]int64, error) { switch schd { case VolumeWeighted: return getVolumeWeightedMap(pool) + case CapacityWeighted: + return getCapacityWeightedMap(pool) } - // return VolumeWeighted(default) if not specified - return getVolumeWeightedMap(pool) + // return CapacityWeighted(default) if not specified + return getCapacityWeightedMap(pool) }