feat(zfspv): move to klog (#166)

Signed-off-by: vaniisgh <vanisingh@live.co.uk>
This commit is contained in:
vaniisgh 2020-06-29 12:18:33 +05:30 committed by GitHub
parent 54f2b0b9fd
commit d0d1664d43
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
53 changed files with 124 additions and 2991 deletions

View file

@ -17,8 +17,6 @@ limitations under the License.
package snapshot
import (
"github.com/Sirupsen/logrus"
clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset"
openebsScheme "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/scheme"
informers "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions"
@ -30,6 +28,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
)
const controllerAgentName = "zfssnap-controller"
@ -105,9 +104,9 @@ func (cb *SnapControllerBuilder) withWorkqueueRateLimiting() *SnapControllerBuil
// withRecorder adds recorder to controller object.
func (cb *SnapControllerBuilder) withRecorder(ks kubernetes.Interface) *SnapControllerBuilder {
logrus.Infof("Creating event broadcaster")
klog.Infof("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(logrus.Infof)
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: ks.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
cb.SnapController.recorder = recorder

View file

@ -20,14 +20,13 @@ import (
"fmt"
"time"
"github.com/Sirupsen/logrus"
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
zfs "github.com/openebs/zfs-localpv/pkg/zfs"
k8serror "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
)
// isDeletionCandidate checks if a zfs snapshot is a deletion candidate.
@ -106,7 +105,7 @@ func (c *SnapController) addSnap(obj interface{}) {
if zfs.NodeID != snap.Spec.OwnerNodeID {
return
}
logrus.Infof("Got add event for Snap %s/%s", snap.Spec.PoolName, snap.Name)
klog.Infof("Got add event for Snap %s/%s", snap.Spec.PoolName, snap.Name)
c.enqueueSnap(snap)
}
@ -125,7 +124,7 @@ func (c *SnapController) updateSnap(oldObj, newObj interface{}) {
// update on Snapshot CR does not make sense unless it is a deletion candidate
if c.isDeletionCandidate(newSnap) {
logrus.Infof("Got update event for Snap %s/%s@%s", newSnap.Spec.PoolName, newSnap.Labels[zfs.ZFSVolKey], newSnap.Name)
klog.Infof("Got update event for Snap %s/%s@%s", newSnap.Spec.PoolName, newSnap.Labels[zfs.ZFSVolKey], newSnap.Name)
c.enqueueSnap(newSnap)
}
}
@ -150,7 +149,7 @@ func (c *SnapController) deleteSnap(obj interface{}) {
return
}
logrus.Infof("Got delete event for Snap %s/%s@%s", snap.Spec.PoolName, snap.Labels[zfs.ZFSVolKey], snap.Name)
klog.Infof("Got delete event for Snap %s/%s@%s", snap.Spec.PoolName, snap.Labels[zfs.ZFSVolKey], snap.Name)
c.enqueueSnap(snap)
}
@ -163,23 +162,23 @@ func (c *SnapController) Run(threadiness int, stopCh <-chan struct{}) error {
defer c.workqueue.ShutDown()
// Start the informer factories to begin populating the informer caches
logrus.Info("Starting Snap controller")
klog.Info("Starting Snap controller")
// Wait for the k8s caches to be synced before starting workers
logrus.Info("Waiting for informer caches to sync")
klog.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.snapSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
logrus.Info("Starting Snap workers")
klog.Info("Starting Snap workers")
// Launch worker to process Snap resources
// Threadiness will decide the number of workers you want to launch to process work items from queue
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
logrus.Info("Started Snap workers")
klog.Info("Started Snap workers")
<-stopCh
logrus.Info("Shutting down Snap workers")
klog.Info("Shutting down Snap workers")
return nil
}
@ -235,7 +234,7 @@ func (c *SnapController) processNextWorkItem() bool {
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
logrus.Infof("Successfully synced '%s'", key)
klog.Infof("Successfully synced '%s'", key)
return nil
}(obj)

View file

@ -19,7 +19,6 @@ package snapshot
import (
"sync"
"github.com/Sirupsen/logrus"
"github.com/pkg/errors"
"time"
@ -30,6 +29,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
)
var (
@ -94,7 +94,7 @@ func Start(controllerMtx *sync.RWMutex, stopCh <-chan struct{}) error {
func getClusterConfig(kubeconfig string) (*rest.Config, error) {
cfg, err := rest.InClusterConfig()
if err != nil {
logrus.Errorf("Failed to get k8s Incluster config. %+v", err)
klog.Errorf("Failed to get k8s Incluster config. %+v", err)
if kubeconfig == "" {
return nil, errors.Wrap(err, "kubeconfig is empty")
}