refact(deps): bump k8s and client-go deps to version v0.20.2 (#294)

Signed-off-by: prateekpandey14 <prateek.pandey@mayadata.io>
This commit is contained in:
Prateek Pandey 2021-03-31 16:43:42 +05:30 committed by GitHub
parent 533e17a9aa
commit b1aa6ab51a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2196 changed files with 306727 additions and 251810 deletions

View file

@ -17,6 +17,7 @@ limitations under the License.
package k8s
import (
"context"
"strings"
"github.com/pkg/errors"
@ -50,5 +51,5 @@ func (c *Configmap) Get(options metav1.GetOptions) (cm *corev1.ConfigMap, err er
if err != nil {
return nil, errors.Wrapf(err, "failed to get config map %s %s", c.namespace, c.name)
}
return cs.CoreV1().ConfigMaps(c.namespace).Get(c.name, options)
return cs.CoreV1().ConfigMaps(c.namespace).Get(context.TODO(), c.name, options)
}

View file

@ -17,6 +17,8 @@ limitations under the License.
package k8s
import (
"context"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -46,7 +48,7 @@ func (ns *NamespaceStruct) Get(name string, options metav1.GetOptions) (*corev1.
if err != nil {
return nil, errors.Wrapf(err, "failed to get namespace: %s", name)
}
return cs.CoreV1().Namespaces().Get(name, options)
return cs.CoreV1().Namespaces().Get(context.TODO(), name, options)
}
// List returns a slice of namespaces defined in a Kubernetes cluster
@ -55,5 +57,5 @@ func (ns *NamespaceStruct) List(options metav1.ListOptions) (*corev1.NamespaceLi
if err != nil {
return nil, errors.Wrapf(err, "failed to get namespaces")
}
return cs.CoreV1().Namespaces().List(options)
return cs.CoreV1().Namespaces().List(context.TODO(), options)
}

View file

@ -17,6 +17,8 @@ limitations under the License.
package k8s
import (
"context"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -46,7 +48,7 @@ func (n *NodeStruct) Get(name string, options metav1.GetOptions) (*corev1.Node,
if err != nil {
return nil, errors.Wrapf(err, "failed to get node: %s", name)
}
return cs.CoreV1().Nodes().Get(name, options)
return cs.CoreV1().Nodes().Get(context.TODO(), name, options)
}
// List returns a slice of Nodes registered in a Kubernetes cluster
@ -55,7 +57,7 @@ func (n *NodeStruct) List(options metav1.ListOptions) (*corev1.NodeList, error)
if err != nil {
return nil, errors.Wrapf(err, "failed to get nodes")
}
return cs.CoreV1().Nodes().List(options)
return cs.CoreV1().Nodes().List(context.TODO(), options)
}
// NumberOfNodes returns the number of nodes registered in a Kubernetes cluster

View file

@ -20,6 +20,7 @@ limitations under the License.
package k8s
import (
"context"
"fmt"
"strings"
@ -93,7 +94,7 @@ func (r *ResourceStruct) Create(obj *unstructured.Unstructured, subresources ...
err = errors.Wrapf(err, "failed to create resource '%s' '%s' at '%s'", r.gvr, obj.GetName(), r.namespace)
return
}
u, err = dynamic.Resource(r.gvr).Namespace(r.namespace).Create(obj, metav1.CreateOptions{}, subresources...)
u, err = dynamic.Resource(r.gvr).Namespace(r.namespace).Create(context.TODO(), obj, metav1.CreateOptions{}, subresources...)
if err != nil {
err = errors.Wrapf(err, "failed to create resource '%s' '%s' at '%s'", r.gvr, obj.GetName(), r.namespace)
return
@ -110,7 +111,7 @@ func (r *ResourceStruct) Delete(obj *unstructured.Unstructured, subresources ...
if err != nil {
return errors.Wrapf(err, "failed to delete resource '%s' '%s' at '%s'", r.gvr, obj.GetName(), r.namespace)
}
err = dynamic.Resource(r.gvr).Namespace(r.namespace).Delete(obj.GetName(), &metav1.DeleteOptions{})
err = dynamic.Resource(r.gvr).Namespace(r.namespace).Delete(context.TODO(), obj.GetName(), metav1.DeleteOptions{})
if err != nil {
return errors.Wrapf(err, "failed to delete resource '%s' '%s' at '%s'", r.gvr, obj.GetName(), r.namespace)
}
@ -128,7 +129,7 @@ func (r *ResourceStruct) Get(name string, opts metav1.GetOptions, subresources .
err = errors.Wrapf(err, "failed to get resource '%s' '%s' at '%s'", r.gvr, name, r.namespace)
return
}
u, err = dynamic.Resource(r.gvr).Namespace(r.namespace).Get(name, opts, subresources...)
u, err = dynamic.Resource(r.gvr).Namespace(r.namespace).Get(context.TODO(), name, opts, subresources...)
if err != nil {
err = errors.Wrapf(err, "failed to get resource '%s' '%s' at '%s'", r.gvr, name, r.namespace)
return
@ -155,7 +156,7 @@ func (r *ResourceStruct) Update(oldobj, newobj *unstructured.Unstructured, subre
resourceVersion := oldobj.GetResourceVersion()
newobj.SetResourceVersion(resourceVersion)
u, err = dynamic.Resource(r.gvr).Namespace(r.namespace).Update(newobj, metav1.UpdateOptions{}, subresources...)
u, err = dynamic.Resource(r.gvr).Namespace(r.namespace).Update(context.TODO(), newobj, metav1.UpdateOptions{}, subresources...)
if err != nil {
err = errors.Wrapf(err, "failed to update resource '%s' '%s' at '%s'", r.gvr, oldobj.GetName(), r.namespace)
return
@ -170,7 +171,7 @@ func (r *ResourceStruct) List(opts metav1.ListOptions) (u *unstructured.Unstruct
err = errors.Wrapf(err, "failed to list resource '%s' at '%s'", r.gvr, r.namespace)
return
}
u, err = dynamic.Resource(r.gvr).Namespace(r.namespace).List(opts)
u, err = dynamic.Resource(r.gvr).Namespace(r.namespace).List(context.TODO(), opts)
if err != nil {
err = errors.Wrapf(err, "failed to list resource '%s' at '%s'", r.gvr, r.namespace)
return

View file

@ -17,7 +17,10 @@ limitations under the License.
package helpers
import (
"os"
"strings"
"github.com/google/uuid"
)
// GetCaseInsensitiveMap coercs the map's keys to lower case, which only works
@ -43,3 +46,29 @@ func GetInsensitiveParameter(dict *map[string]string, key string) string {
insensitiveDict := GetCaseInsensitiveMap(dict)
return insensitiveDict[strings.ToLower(key)]
}
func exists(path string) (os.FileInfo, bool) {
info, err := os.Stat(path)
if os.IsNotExist(err) {
return nil, false
}
return info, true
}
// FileExists checks if a file exists and is not a directory
func FileExists(filepath string) bool {
info, present := exists(filepath)
return present && info.Mode().IsRegular()
}
// DirExists checks if a directory exists
func DirExists(path string) bool {
info, present := exists(path)
return present && info.IsDir()
}
// IsValidUUID validates whether a string is a valid UUID
func IsValidUUID(u string) bool {
_, err := uuid.Parse(u)
return err == nil
}

View file

@ -17,7 +17,7 @@ limitations under the License.
package mount
import (
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/utils/mount"
)
// GetMounts gets mountpoints for the specified volume

View file

@ -17,7 +17,7 @@ limitations under the License.
package scheduler
import (
"math"
"sort"
"github.com/container-storage-interface/spec/lib/go/csi"
k8sapi "github.com/openebs/lib-csi/pkg/client/k8s"
@ -25,8 +25,14 @@ import (
"k8s.io/klog"
)
// key value struct for creating the filtered list
type kv struct {
Key string
Value int64
}
// getNodeList gets the nodelist which satisfies the topology info
func getNodeList(topo *csi.TopologyRequirement) ([]string, error) {
func getNodeList(topo []*csi.Topology) ([]string, error) {
var nodelist []string
@ -36,7 +42,7 @@ func getNodeList(topo *csi.TopologyRequirement) ([]string, error) {
}
for _, node := range list.Items {
for _, prf := range topo.Preferred {
for _, prf := range topo {
nodeFiltered := false
for key, value := range prf.Segments {
if node.Labels[key] != value {
@ -54,45 +60,70 @@ func getNodeList(topo *csi.TopologyRequirement) ([]string, error) {
return nodelist, nil
}
// runScheduler goes through the node mapping
// in the topology and picks the node which is less weighted
func runScheduler(nodelist []string, nmap map[string]int64) string {
var selected string
// runScheduler goes through the node mapping in the topology
// and creates the list of preferred nodes as per their weight
func runScheduler(nodelist []string, nmap map[string]int64) []string {
var preferred []string
var fmap []kv
var weight int64 = math.MaxInt64
// schedule it on the node which has less weight
// go though the filtered node and prepare the preferred list
for _, node := range nodelist {
if nmap[node] < weight {
selected = node
weight = nmap[node]
if val, ok := nmap[node]; ok {
// create the filtered node map
fmap = append(fmap, kv{node, val})
} else {
// put the non occupied nodes in beginning of the list
preferred = append(preferred, node)
}
}
return selected
// sort the filtered node map
sort.Slice(fmap, func(i, j int) bool {
return fmap[i].Value < fmap[j].Value
})
// put the occupied nodes in the sorted order at the end
for _, kv := range fmap {
preferred = append(preferred, kv.Key)
}
return preferred
}
// Scheduler schedules the PV as per topology constraints for
// the given node weight.
func Scheduler(req *csi.CreateVolumeRequest, nmap map[string]int64) string {
topo := req.AccessibilityRequirements
if topo == nil ||
len(topo.Preferred) == 0 {
func Scheduler(req *csi.CreateVolumeRequest, nmap map[string]int64) []string {
var nodelist []string
areq := req.AccessibilityRequirements
if areq == nil {
klog.Errorf("scheduler: Accessibility Requirements not provided")
return nodelist
}
topo := areq.Preferred
if len(topo) == 0 {
// if preferred list is empty, use the requisite
topo = areq.Requisite
}
if len(topo) == 0 {
klog.Errorf("scheduler: topology information not provided")
return ""
return nodelist
}
nodelist, err := getNodeList(topo)
if err != nil {
klog.Errorf("scheduler: can not get the nodelist err : %v", err.Error())
return ""
return nodelist
} else if len(nodelist) == 0 {
klog.Errorf("scheduler: nodelist is empty")
return ""
return nodelist
}
// if there is a single node, schedule it on that
if len(nodelist) == 1 {
return nodelist[0]
return nodelist
}
return runScheduler(nodelist, nmap)