feat(topology): adding support for custom topology keys (#94)

This commit adds the support for use to specify custom labels to the kubernetes nodes and use them in the allowedToplogoies section of the StorageClass. 

Few notes:
- This PR depends on the CSI driver's capability to support custom topology keys. 
- label on the nodes should be added first and then deploy the driver to make it aware of
all the labels that node has. If labels are added after ZFS-LocalPV driver
has been deployed, a restart all the node csi driver agents is required so that the driver
can pick the labels and add them as supported topology keys.
- if storageclass is using Immediate binding mode and topology key is not mentioned
then all the nodes should be labeled using same key, that means:
  - same key should be present on all nodes, nodes can have different values for those keys. 
  - If nodes are labeled with different keys i.e. some nodes are having different keys, then ZFSPV's default scheduler can not effictively do the volume count based scheduling. In this case the CSI provisioner will pick keys from any random node and then prepare the preferred topology list using the nodes which has those keys defined. And ZFSPV scheduler will schedule the PV among those nodes only.

Signed-off-by: Pawan <pawan@mayadata.io>
This commit is contained in:
Pawan Prakash Sharma 2020-04-30 14:13:29 +05:30 committed by GitHub
parent f65575e447
commit de9b302083
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 184 additions and 13 deletions

View file

@ -22,6 +22,7 @@ import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/openebs/zfs-localpv/pkg/builder/volbuilder"
k8sapi "github.com/openebs/zfs-localpv/pkg/client/k8s/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
zfs "github.com/openebs/zfs-localpv/pkg/zfs"
@ -34,10 +35,39 @@ const (
VolumeWeighted = "VolumeWeighted"
)
// GetNodeList gets the nodelist which satisfies the topology info
func GetNodeList(topo *csi.TopologyRequirement) ([]string, error) {
var nodelist []string
list, err := k8sapi.ListNodes(metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, node := range list.Items {
for _, prf := range topo.Preferred {
nodeFiltered := false
for key, value := range prf.Segments {
if node.Labels[key] != value {
nodeFiltered = true
break
}
}
if nodeFiltered == false {
nodelist = append(nodelist, node.Name)
break
}
}
}
return nodelist, nil
}
// volumeWeightedScheduler goes through all the pools on the nodes mentioned
// in the topology and picks the node which has less volume on
// the given zfs pool.
func volumeWeightedScheduler(topo *csi.TopologyRequirement, pool string) string {
func volumeWeightedScheduler(nodelist []string, pool string) string {
var selected string
zvlist, err := volbuilder.NewKubeclient().
@ -62,8 +92,7 @@ func volumeWeightedScheduler(topo *csi.TopologyRequirement, pool string) string
// schedule it on the node which has less
// number of volume for the given pool
for _, prf := range topo.Preferred {
node := prf.Segments[zfs.ZFSTopologyKey]
for _, node := range nodelist {
if volmap[node] < numVol {
selected = node
numVol = volmap[node]
@ -78,19 +107,29 @@ func scheduler(topo *csi.TopologyRequirement, schld string, pool string) string
if topo == nil ||
len(topo.Preferred) == 0 {
logrus.Errorf("topology information not provided")
logrus.Errorf("scheduler: topology information not provided")
return ""
}
nodelist, err := GetNodeList(topo)
if err != nil {
logrus.Errorf("scheduler: can not get the nodelist err : %v", err.Error())
return ""
} else if len(nodelist) == 0 {
logrus.Errorf("scheduler: nodelist is empty")
return ""
}
// if there is a single node, schedule it on that
if len(topo.Preferred) == 1 {
return topo.Preferred[0].Segments[zfs.ZFSTopologyKey]
if len(nodelist) == 1 {
return nodelist[0]
}
switch schld {
case VolumeWeighted:
return volumeWeightedScheduler(topo, pool)
return volumeWeightedScheduler(nodelist, pool)
default:
return volumeWeightedScheduler(topo, pool)
return volumeWeightedScheduler(nodelist, pool)
}
return ""