mirror of
https://github.com/TECHNOFAB11/zfs-localpv.git
synced 2025-12-12 06:20:11 +01:00
refactor(zfspv): renamed watcher to mgmt package
as it does the management task also corrected few logs and renamed zvol to zfs(as we support zvol and dataset both) Signed-off-by: Pawan <pawan@mayadata.io>
This commit is contained in:
parent
e953af99cf
commit
523e862159
7 changed files with 27 additions and 27 deletions
|
|
@ -200,7 +200,7 @@ func (b *Builder) WithLabels(labels map[string]string) *Builder {
|
|||
if len(labels) == 0 {
|
||||
b.errs = append(
|
||||
b.errs,
|
||||
errors.New("failed to build cstorvolume object: missing labels"),
|
||||
errors.New("failed to build zfs volume object: missing labels"),
|
||||
)
|
||||
return b
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,9 +19,9 @@ package driver
|
|||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
ctrl "github.com/openebs/zfs-localpv/cmd/controller"
|
||||
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/core/v1alpha1"
|
||||
"github.com/openebs/zfs-localpv/pkg/builder"
|
||||
"github.com/openebs/zfs-localpv/pkg/mgmt"
|
||||
zfs "github.com/openebs/zfs-localpv/pkg/zfs"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
|
@ -42,9 +42,9 @@ func NewNode(d *CSIDriver) csi.NodeServer {
|
|||
var ControllerMutex = sync.RWMutex{}
|
||||
// start the zfsvolume watcher
|
||||
go func() {
|
||||
err := ctrl.Start(&ControllerMutex)
|
||||
err := mgmt.Start(&ControllerMutex)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to start cstorvolume claim controller: %s", err.Error())
|
||||
logrus.Fatalf("Failed to start ZFS volume management controller: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
"github.com/openebs/zfs-localpv/pkg/builder"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
zvol "github.com/openebs/zfs-localpv/pkg/zfs"
|
||||
zfs "github.com/openebs/zfs-localpv/pkg/zfs"
|
||||
)
|
||||
|
||||
// scheduling algorithm constants
|
||||
|
|
@ -41,7 +41,7 @@ func volumeWeightedScheduler(topo *csi.TopologyRequirement, pool string) string
|
|||
var selected string
|
||||
|
||||
zvlist, err := builder.NewKubeclient().
|
||||
WithNamespace(zvol.OpenEBSNamespace).
|
||||
WithNamespace(zfs.OpenEBSNamespace).
|
||||
List(metav1.ListOptions{})
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -63,7 +63,7 @@ func volumeWeightedScheduler(topo *csi.TopologyRequirement, pool string) string
|
|||
// schedule it on the node which has less
|
||||
// number of volume for the given pool
|
||||
for _, prf := range topo.Preferred {
|
||||
node := prf.Segments[zvol.ZFSTopologyKey]
|
||||
node := prf.Segments[zfs.ZFSTopologyKey]
|
||||
if volmap[node] < numVol {
|
||||
selected = node
|
||||
numVol = volmap[node]
|
||||
|
|
@ -83,7 +83,7 @@ func scheduler(topo *csi.TopologyRequirement, schld string, pool string) string
|
|||
}
|
||||
// if there is a single node, schedule it on that
|
||||
if len(topo.Preferred) == 1 {
|
||||
return topo.Preferred[0].Segments[zvol.ZFSTopologyKey]
|
||||
return topo.Preferred[0].Segments[zfs.ZFSTopologyKey]
|
||||
}
|
||||
|
||||
switch schld {
|
||||
|
|
|
|||
136
pkg/mgmt/builder.go
Normal file
136
pkg/mgmt/builder.go
Normal file
|
|
@ -0,0 +1,136 @@
|
|||
/*
|
||||
Copyright 2019 The OpenEBS Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mgmt
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset"
|
||||
openebsScheme "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/scheme"
|
||||
informers "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions"
|
||||
listers "github.com/openebs/zfs-localpv/pkg/generated/lister/core/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const controllerAgentName = "zfsvolume-controller"
|
||||
|
||||
// ZVController is the controller implementation for ZV resources
|
||||
type ZVController struct {
|
||||
// kubeclientset is a standard kubernetes clientset
|
||||
kubeclientset kubernetes.Interface
|
||||
|
||||
// clientset is a openebs custom resource package generated for custom API group.
|
||||
clientset clientset.Interface
|
||||
|
||||
zvLister listers.ZFSVolumeLister
|
||||
|
||||
// zvSynced is used for caches sync to get populated
|
||||
zvSynced cache.InformerSynced
|
||||
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// ZVControllerBuilder is the builder object for controller.
|
||||
type ZVControllerBuilder struct {
|
||||
ZVController *ZVController
|
||||
}
|
||||
|
||||
// NewZVControllerBuilder returns an empty instance of controller builder.
|
||||
func NewZVControllerBuilder() *ZVControllerBuilder {
|
||||
return &ZVControllerBuilder{
|
||||
ZVController: &ZVController{},
|
||||
}
|
||||
}
|
||||
|
||||
// withKubeClient fills kube client to controller object.
|
||||
func (cb *ZVControllerBuilder) withKubeClient(ks kubernetes.Interface) *ZVControllerBuilder {
|
||||
cb.ZVController.kubeclientset = ks
|
||||
return cb
|
||||
}
|
||||
|
||||
// withOpenEBSClient fills openebs client to controller object.
|
||||
func (cb *ZVControllerBuilder) withOpenEBSClient(cs clientset.Interface) *ZVControllerBuilder {
|
||||
cb.ZVController.clientset = cs
|
||||
return cb
|
||||
}
|
||||
|
||||
// withZVLister fills zv lister to controller object.
|
||||
func (cb *ZVControllerBuilder) withZVLister(sl informers.SharedInformerFactory) *ZVControllerBuilder {
|
||||
zvInformer := sl.Openebs().V1alpha1().ZFSVolumes()
|
||||
cb.ZVController.zvLister = zvInformer.Lister()
|
||||
return cb
|
||||
}
|
||||
|
||||
// withZVSynced adds object sync information in cache to controller object.
|
||||
func (cb *ZVControllerBuilder) withZVSynced(sl informers.SharedInformerFactory) *ZVControllerBuilder {
|
||||
zvInformer := sl.Openebs().V1alpha1().ZFSVolumes()
|
||||
cb.ZVController.zvSynced = zvInformer.Informer().HasSynced
|
||||
return cb
|
||||
}
|
||||
|
||||
// withWorkqueue adds workqueue to controller object.
|
||||
func (cb *ZVControllerBuilder) withWorkqueueRateLimiting() *ZVControllerBuilder {
|
||||
cb.ZVController.workqueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ZV")
|
||||
return cb
|
||||
}
|
||||
|
||||
// withRecorder adds recorder to controller object.
|
||||
func (cb *ZVControllerBuilder) withRecorder(ks kubernetes.Interface) *ZVControllerBuilder {
|
||||
logrus.Infof("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(logrus.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: ks.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
|
||||
cb.ZVController.recorder = recorder
|
||||
return cb
|
||||
}
|
||||
|
||||
// withEventHandler adds event handlers controller object.
|
||||
func (cb *ZVControllerBuilder) withEventHandler(cvcInformerFactory informers.SharedInformerFactory) *ZVControllerBuilder {
|
||||
cvcInformer := cvcInformerFactory.Openebs().V1alpha1().ZFSVolumes()
|
||||
// Set up an event handler for when ZV resources change
|
||||
cvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: cb.ZVController.addZV,
|
||||
UpdateFunc: cb.ZVController.updateZV,
|
||||
DeleteFunc: cb.ZVController.deleteZV,
|
||||
})
|
||||
return cb
|
||||
}
|
||||
|
||||
// Build returns a controller instance.
|
||||
func (cb *ZVControllerBuilder) Build() (*ZVController, error) {
|
||||
err := openebsScheme.AddToScheme(scheme.Scheme)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cb.ZVController, nil
|
||||
}
|
||||
254
pkg/mgmt/mgmt.go
Normal file
254
pkg/mgmt/mgmt.go
Normal file
|
|
@ -0,0 +1,254 @@
|
|||
/*
|
||||
Copyright 2019 The OpenEBS Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mgmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/core/v1alpha1"
|
||||
zfs "github.com/openebs/zfs-localpv/pkg/zfs"
|
||||
k8serror "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// isDeletionCandidate checks if a zfs volume is a deletion candidate.
|
||||
func (c *ZVController) isDeletionCandidate(zv *apis.ZFSVolume) bool {
|
||||
return zv.ObjectMeta.DeletionTimestamp != nil
|
||||
}
|
||||
|
||||
// syncHandler compares the actual state with the desired, and attempts to
|
||||
// converge the two. It then updates the Status block of the spcPoolUpdated resource
|
||||
// with the current status of the resource.
|
||||
func (c *ZVController) syncHandler(key string) error {
|
||||
// Convert the namespace/name string into a distinct namespace and name
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the zv resource with this namespace/name
|
||||
zv, err := c.zvLister.ZFSVolumes(namespace).Get(name)
|
||||
if k8serror.IsNotFound(err) {
|
||||
runtime.HandleError(fmt.Errorf("zfsvolume '%s' has been deleted", key))
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
zvCopy := zv.DeepCopy()
|
||||
err = c.syncZV(zvCopy)
|
||||
return err
|
||||
}
|
||||
|
||||
// enqueueZV takes a ZFSVolume resource and converts it into a namespace/name
|
||||
// string which is then put onto the work queue. This method should *not* be
|
||||
// passed resources of any type other than ZFSVolume.
|
||||
func (c *ZVController) enqueueZV(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
runtime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
|
||||
}
|
||||
|
||||
// synZV is the function which tries to converge to a desired state for the
|
||||
// ZFSVolume
|
||||
func (c *ZVController) syncZV(zv *apis.ZFSVolume) error {
|
||||
var err error
|
||||
// ZFS Volume should be deleted. Check if deletion timestamp is set
|
||||
if c.isDeletionCandidate(zv) {
|
||||
err = zfs.DestroyVolume(zv)
|
||||
if err == nil {
|
||||
zfs.RemoveZvolFinalizer(zv)
|
||||
}
|
||||
} else {
|
||||
// if finalizer is not set then it means we are creating
|
||||
// the volume. And if it is set then volume has already been
|
||||
// created and this event is for property change only.
|
||||
if zv.Finalizers != nil {
|
||||
err = zfs.SetZvolProp(zv)
|
||||
} else {
|
||||
err = zfs.CreateVolume(zv)
|
||||
if err == nil {
|
||||
err = zfs.UpdateZvolInfo(zv)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// addZV is the add event handler for ZFSVolume
|
||||
func (c *ZVController) addZV(obj interface{}) {
|
||||
zv, ok := obj.(*apis.ZFSVolume)
|
||||
if !ok {
|
||||
runtime.HandleError(fmt.Errorf("Couldn't get zv object %#v", obj))
|
||||
return
|
||||
}
|
||||
|
||||
if zfs.NodeID != zv.Spec.OwnerNodeID {
|
||||
return
|
||||
}
|
||||
logrus.Infof("Got add event for ZV %s/%s", zv.Spec.PoolName, zv.Name)
|
||||
c.enqueueZV(zv)
|
||||
}
|
||||
|
||||
// updateZV is the update event handler for ZFSVolume
|
||||
func (c *ZVController) updateZV(oldObj, newObj interface{}) {
|
||||
|
||||
newZV, ok := newObj.(*apis.ZFSVolume)
|
||||
if !ok {
|
||||
runtime.HandleError(fmt.Errorf("Couldn't get zv object %#v", newZV))
|
||||
return
|
||||
}
|
||||
|
||||
if zfs.NodeID != newZV.Spec.OwnerNodeID {
|
||||
return
|
||||
}
|
||||
|
||||
oldZV, ok := oldObj.(*apis.ZFSVolume)
|
||||
if zfs.PropertyChanged(oldZV, newZV) ||
|
||||
c.isDeletionCandidate(newZV) {
|
||||
logrus.Infof("Got update event for ZV %s/%s", newZV.Spec.PoolName, newZV.Name)
|
||||
c.enqueueZV(newZV)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteZV is the delete event handler for ZFSVolume
|
||||
func (c *ZVController) deleteZV(obj interface{}) {
|
||||
zv, ok := obj.(*apis.ZFSVolume)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
runtime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
zv, ok = tombstone.Obj.(*apis.ZFSVolume)
|
||||
if !ok {
|
||||
runtime.HandleError(fmt.Errorf("Tombstone contained object that is not a zfsvolume %#v", obj))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if zfs.NodeID != zv.Spec.OwnerNodeID {
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Infof("Got delete event for ZV %s/%s", zv.Spec.PoolName, zv.Name)
|
||||
c.enqueueZV(zv)
|
||||
}
|
||||
|
||||
// Run will set up the event handlers for types we are interested in, as well
|
||||
// as syncing informer caches and starting workers. It will block until stopCh
|
||||
// is closed, at which point it will shutdown the workqueue and wait for
|
||||
// workers to finish processing their current work items.
|
||||
func (c *ZVController) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer runtime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
logrus.Info("Starting ZV controller")
|
||||
|
||||
// Wait for the k8s caches to be synced before starting workers
|
||||
logrus.Info("Waiting for informer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.zvSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
logrus.Info("Starting ZV workers")
|
||||
// Launch worker to process ZV resources
|
||||
// Threadiness will decide the number of workers you want to launch to process work items from queue
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
logrus.Info("Started ZV workers")
|
||||
<-stopCh
|
||||
logrus.Info("Shutting down ZV workers")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runWorker is a long-running function that will continually call the
|
||||
// processNextWorkItem function in order to read and process a message on the
|
||||
// workqueue.
|
||||
func (c *ZVController) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
// processNextWorkItem will read a single work item off the workqueue and
|
||||
// attempt to process it, by calling the syncHandler.
|
||||
func (c *ZVController) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
// Run the syncHandler, passing it the namespace/name string of the
|
||||
// ZV resource to be synced.
|
||||
if err := c.syncHandler(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
logrus.Infof("Successfully synced '%s'", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
runtime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
110
pkg/mgmt/start.go
Normal file
110
pkg/mgmt/start.go
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
Copyright 2019 The OpenEBS Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mgmt
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"time"
|
||||
|
||||
clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset"
|
||||
informers "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
|
||||
)
|
||||
|
||||
var (
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
)
|
||||
|
||||
// Start starts the zfsvolume controller.
|
||||
func Start(controllerMtx *sync.RWMutex) error {
|
||||
// set up signals so we handle the first shutdown signal gracefully
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
|
||||
// Get in cluster config
|
||||
cfg, err := getClusterConfig(kubeconfig)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error building kubeconfig")
|
||||
}
|
||||
|
||||
// Building Kubernetes Clientset
|
||||
kubeClient, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error building kubernetes clientset")
|
||||
}
|
||||
|
||||
// Building OpenEBS Clientset
|
||||
openebsClient, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error building openebs clientset")
|
||||
}
|
||||
|
||||
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
|
||||
zvInformerFactory := informers.NewSharedInformerFactory(openebsClient, time.Second*30)
|
||||
// Build() fn of all controllers calls AddToScheme to adds all types of this
|
||||
// clientset into the given scheme.
|
||||
// If multiple controllers happen to call this AddToScheme same time,
|
||||
// it causes panic with error saying concurrent map access.
|
||||
// This lock is used to serialize the AddToScheme call of all controllers.
|
||||
controllerMtx.Lock()
|
||||
|
||||
controller, err := NewZVControllerBuilder().
|
||||
withKubeClient(kubeClient).
|
||||
withOpenEBSClient(openebsClient).
|
||||
withZVSynced(zvInformerFactory).
|
||||
withZVLister(zvInformerFactory).
|
||||
withRecorder(kubeClient).
|
||||
withEventHandler(zvInformerFactory).
|
||||
withWorkqueueRateLimiting().Build()
|
||||
|
||||
// blocking call, can't use defer to release the lock
|
||||
controllerMtx.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error building controller instance")
|
||||
}
|
||||
|
||||
go kubeInformerFactory.Start(stopCh)
|
||||
go zvInformerFactory.Start(stopCh)
|
||||
|
||||
// Threadiness defines the number of workers to be launched in Run function
|
||||
return controller.Run(2, stopCh)
|
||||
}
|
||||
|
||||
// GetClusterConfig return the config for k8s.
|
||||
func getClusterConfig(kubeconfig string) (*rest.Config, error) {
|
||||
cfg, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to get k8s Incluster config. %+v", err)
|
||||
if kubeconfig == "" {
|
||||
return nil, errors.Wrap(err, "kubeconfig is empty")
|
||||
}
|
||||
cfg, err = clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error building kubeconfig")
|
||||
}
|
||||
}
|
||||
return cfg, err
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue