mirror of
https://github.com/TECHNOFAB11/zfs-localpv.git
synced 2025-12-12 14:30:12 +01:00
feat(zfspv): move to klog (#166)
Signed-off-by: vaniisgh <vanisingh@live.co.uk>
This commit is contained in:
parent
54f2b0b9fd
commit
d0d1664d43
53 changed files with 124 additions and 2991 deletions
|
|
@ -17,7 +17,8 @@ limitations under the License.
|
|||
package driver
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"sync"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
|
||||
"github.com/openebs/zfs-localpv/pkg/builder/volbuilder"
|
||||
|
|
@ -30,8 +31,8 @@ import (
|
|||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// node is the server implementation
|
||||
|
|
@ -52,7 +53,7 @@ func NewNode(d *CSIDriver) csi.NodeServer {
|
|||
go func() {
|
||||
err := volume.Start(&ControllerMutex, stopCh)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to start ZFS volume management controller: %s", err.Error())
|
||||
klog.Fatalf("Failed to start ZFS volume management controller: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -60,7 +61,7 @@ func NewNode(d *CSIDriver) csi.NodeServer {
|
|||
go func() {
|
||||
err := snapshot.Start(&ControllerMutex, stopCh)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to start ZFS volume snapshot management controller: %s", err.Error())
|
||||
klog.Fatalf("Failed to start ZFS volume snapshot management controller: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -166,7 +167,7 @@ func (ns *node) NodeUnpublishVolume(
|
|||
"unable to umount the volume %s err : %s",
|
||||
volumeID, err.Error())
|
||||
}
|
||||
logrus.Infof("hostpath: volume %s path: %s has been unmounted.",
|
||||
klog.Infof("hostpath: volume %s path: %s has been unmounted.",
|
||||
volumeID, targetPath)
|
||||
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
|
|
@ -182,7 +183,7 @@ func (ns *node) NodeGetInfo(
|
|||
|
||||
node, err := k8sapi.GetNode(ns.driver.config.NodeID)
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to get the node %s", ns.driver.config.NodeID)
|
||||
klog.Errorf("failed to get the node %s", ns.driver.config.NodeID)
|
||||
return nil, err
|
||||
}
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -22,11 +22,11 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/openebs/zfs-localpv/pkg/builder/snapbuilder"
|
||||
"github.com/openebs/zfs-localpv/pkg/builder/volbuilder"
|
||||
|
|
@ -107,7 +107,7 @@ func CreateZFSVolume(req *csi.CreateVolumeRequest) (string, error) {
|
|||
return "", status.Error(codes.Internal, "scheduler failed")
|
||||
}
|
||||
|
||||
logrus.Infof("scheduled the volume %s/%s on node %s", pool, volName, selected)
|
||||
klog.Infof("scheduled the volume %s/%s on node %s", pool, volName, selected)
|
||||
|
||||
volObj, err := volbuilder.NewBuilder().
|
||||
WithName(volName).
|
||||
|
|
@ -245,7 +245,7 @@ func (cs *controller) DeleteVolume(
|
|||
ctx context.Context,
|
||||
req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
|
||||
|
||||
logrus.Infof("received request to delete volume {%s}", req.VolumeId)
|
||||
klog.Infof("received request to delete volume {%s}", req.VolumeId)
|
||||
|
||||
var (
|
||||
err error
|
||||
|
|
@ -377,7 +377,7 @@ func (cs *controller) CreateSnapshot(
|
|||
req *csi.CreateSnapshotRequest,
|
||||
) (*csi.CreateSnapshotResponse, error) {
|
||||
|
||||
logrus.Infof("CreateSnapshot volume %s@%s", req.SourceVolumeId, req.Name)
|
||||
klog.Infof("CreateSnapshot volume %s@%s", req.SourceVolumeId, req.Name)
|
||||
|
||||
snapTimeStamp := time.Now().Unix()
|
||||
state, err := zfs.GetZFSSnapshotStatus(req.Name)
|
||||
|
|
@ -446,7 +446,7 @@ func (cs *controller) DeleteSnapshot(
|
|||
req *csi.DeleteSnapshotRequest,
|
||||
) (*csi.DeleteSnapshotResponse, error) {
|
||||
|
||||
logrus.Infof("DeleteSnapshot request for %s", req.SnapshotId)
|
||||
klog.Infof("DeleteSnapshot request for %s", req.SnapshotId)
|
||||
|
||||
// snapshodID is formed as <volname>@<snapname>
|
||||
// parsing them here
|
||||
|
|
|
|||
|
|
@ -17,9 +17,9 @@ limitations under the License.
|
|||
package driver
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
config "github.com/openebs/zfs-localpv/pkg/config"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// volume can only be published once as
|
||||
|
|
@ -52,7 +52,7 @@ func GetVolumeCapabilityAccessModes() []*csi.VolumeCapability_AccessMode {
|
|||
|
||||
var vcams []*csi.VolumeCapability_AccessMode
|
||||
for _, vcam := range supported {
|
||||
logrus.Infof("enabling volume access mode: %s", vcam.String())
|
||||
klog.Infof("enabling volume access mode: %s", vcam.String())
|
||||
vcams = append(vcams, newVolumeCapabilityAccessMode(vcam))
|
||||
}
|
||||
return vcams
|
||||
|
|
|
|||
|
|
@ -17,13 +17,13 @@ limitations under the License.
|
|||
package driver
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"math"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/openebs/zfs-localpv/pkg/builder/volbuilder"
|
||||
k8sapi "github.com/openebs/zfs-localpv/pkg/client/k8s/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog"
|
||||
|
||||
zfs "github.com/openebs/zfs-localpv/pkg/zfs"
|
||||
)
|
||||
|
|
@ -107,16 +107,16 @@ func scheduler(topo *csi.TopologyRequirement, schld string, pool string) string
|
|||
|
||||
if topo == nil ||
|
||||
len(topo.Preferred) == 0 {
|
||||
logrus.Errorf("scheduler: topology information not provided")
|
||||
klog.Errorf("scheduler: topology information not provided")
|
||||
return ""
|
||||
}
|
||||
|
||||
nodelist, err := GetNodeList(topo)
|
||||
if err != nil {
|
||||
logrus.Errorf("scheduler: can not get the nodelist err : %v", err.Error())
|
||||
klog.Errorf("scheduler: can not get the nodelist err : %v", err.Error())
|
||||
return ""
|
||||
} else if len(nodelist) == 0 {
|
||||
logrus.Errorf("scheduler: nodelist is empty")
|
||||
klog.Errorf("scheduler: nodelist is empty")
|
||||
return ""
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,8 +17,6 @@ limitations under the License.
|
|||
package snapshot
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset"
|
||||
openebsScheme "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/scheme"
|
||||
informers "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions"
|
||||
|
|
@ -30,6 +28,7 @@ import (
|
|||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const controllerAgentName = "zfssnap-controller"
|
||||
|
|
@ -105,9 +104,9 @@ func (cb *SnapControllerBuilder) withWorkqueueRateLimiting() *SnapControllerBuil
|
|||
|
||||
// withRecorder adds recorder to controller object.
|
||||
func (cb *SnapControllerBuilder) withRecorder(ks kubernetes.Interface) *SnapControllerBuilder {
|
||||
logrus.Infof("Creating event broadcaster")
|
||||
klog.Infof("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(logrus.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: ks.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
|
||||
cb.SnapController.recorder = recorder
|
||||
|
|
|
|||
|
|
@ -20,14 +20,13 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
|
||||
zfs "github.com/openebs/zfs-localpv/pkg/zfs"
|
||||
k8serror "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// isDeletionCandidate checks if a zfs snapshot is a deletion candidate.
|
||||
|
|
@ -106,7 +105,7 @@ func (c *SnapController) addSnap(obj interface{}) {
|
|||
if zfs.NodeID != snap.Spec.OwnerNodeID {
|
||||
return
|
||||
}
|
||||
logrus.Infof("Got add event for Snap %s/%s", snap.Spec.PoolName, snap.Name)
|
||||
klog.Infof("Got add event for Snap %s/%s", snap.Spec.PoolName, snap.Name)
|
||||
c.enqueueSnap(snap)
|
||||
}
|
||||
|
||||
|
|
@ -125,7 +124,7 @@ func (c *SnapController) updateSnap(oldObj, newObj interface{}) {
|
|||
|
||||
// update on Snapshot CR does not make sense unless it is a deletion candidate
|
||||
if c.isDeletionCandidate(newSnap) {
|
||||
logrus.Infof("Got update event for Snap %s/%s@%s", newSnap.Spec.PoolName, newSnap.Labels[zfs.ZFSVolKey], newSnap.Name)
|
||||
klog.Infof("Got update event for Snap %s/%s@%s", newSnap.Spec.PoolName, newSnap.Labels[zfs.ZFSVolKey], newSnap.Name)
|
||||
c.enqueueSnap(newSnap)
|
||||
}
|
||||
}
|
||||
|
|
@ -150,7 +149,7 @@ func (c *SnapController) deleteSnap(obj interface{}) {
|
|||
return
|
||||
}
|
||||
|
||||
logrus.Infof("Got delete event for Snap %s/%s@%s", snap.Spec.PoolName, snap.Labels[zfs.ZFSVolKey], snap.Name)
|
||||
klog.Infof("Got delete event for Snap %s/%s@%s", snap.Spec.PoolName, snap.Labels[zfs.ZFSVolKey], snap.Name)
|
||||
c.enqueueSnap(snap)
|
||||
}
|
||||
|
||||
|
|
@ -163,23 +162,23 @@ func (c *SnapController) Run(threadiness int, stopCh <-chan struct{}) error {
|
|||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
logrus.Info("Starting Snap controller")
|
||||
klog.Info("Starting Snap controller")
|
||||
|
||||
// Wait for the k8s caches to be synced before starting workers
|
||||
logrus.Info("Waiting for informer caches to sync")
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.snapSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
logrus.Info("Starting Snap workers")
|
||||
klog.Info("Starting Snap workers")
|
||||
// Launch worker to process Snap resources
|
||||
// Threadiness will decide the number of workers you want to launch to process work items from queue
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
logrus.Info("Started Snap workers")
|
||||
klog.Info("Started Snap workers")
|
||||
<-stopCh
|
||||
logrus.Info("Shutting down Snap workers")
|
||||
klog.Info("Shutting down Snap workers")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -235,7 +234,7 @@ func (c *SnapController) processNextWorkItem() bool {
|
|||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
logrus.Infof("Successfully synced '%s'", key)
|
||||
klog.Infof("Successfully synced '%s'", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ package snapshot
|
|||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"time"
|
||||
|
|
@ -30,6 +29,7 @@ import (
|
|||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -94,7 +94,7 @@ func Start(controllerMtx *sync.RWMutex, stopCh <-chan struct{}) error {
|
|||
func getClusterConfig(kubeconfig string) (*rest.Config, error) {
|
||||
cfg, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to get k8s Incluster config. %+v", err)
|
||||
klog.Errorf("Failed to get k8s Incluster config. %+v", err)
|
||||
if kubeconfig == "" {
|
||||
return nil, errors.Wrap(err, "kubeconfig is empty")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,8 +17,6 @@ limitations under the License.
|
|||
package volume
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset"
|
||||
openebsScheme "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/scheme"
|
||||
informers "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions"
|
||||
|
|
@ -30,6 +28,7 @@ import (
|
|||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const controllerAgentName = "zfsvolume-controller"
|
||||
|
|
@ -105,9 +104,9 @@ func (cb *ZVControllerBuilder) withWorkqueueRateLimiting() *ZVControllerBuilder
|
|||
|
||||
// withRecorder adds recorder to controller object.
|
||||
func (cb *ZVControllerBuilder) withRecorder(ks kubernetes.Interface) *ZVControllerBuilder {
|
||||
logrus.Infof("Creating event broadcaster")
|
||||
klog.Infof("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(logrus.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: ks.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
|
||||
cb.ZVController.recorder = recorder
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ package volume
|
|||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"time"
|
||||
|
|
@ -30,6 +29,7 @@ import (
|
|||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -93,7 +93,7 @@ func Start(controllerMtx *sync.RWMutex, stopCh <-chan struct{}) error {
|
|||
func getClusterConfig(kubeconfig string) (*rest.Config, error) {
|
||||
cfg, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to get k8s Incluster config. %+v", err)
|
||||
klog.Errorf("Failed to get k8s Incluster config. %+v", err)
|
||||
if kubeconfig == "" {
|
||||
return nil, errors.Wrap(err, "kubeconfig is empty")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,14 +20,13 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
|
||||
zfs "github.com/openebs/zfs-localpv/pkg/zfs"
|
||||
k8serror "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// isDeletionCandidate checks if a zfs volume is a deletion candidate.
|
||||
|
|
@ -114,7 +113,7 @@ func (c *ZVController) addZV(obj interface{}) {
|
|||
if zfs.NodeID != zv.Spec.OwnerNodeID {
|
||||
return
|
||||
}
|
||||
logrus.Infof("Got add event for ZV %s/%s", zv.Spec.PoolName, zv.Name)
|
||||
klog.Infof("Got add event for ZV %s/%s", zv.Spec.PoolName, zv.Name)
|
||||
c.enqueueZV(zv)
|
||||
}
|
||||
|
||||
|
|
@ -134,7 +133,7 @@ func (c *ZVController) updateZV(oldObj, newObj interface{}) {
|
|||
oldZV, _ := oldObj.(*apis.ZFSVolume)
|
||||
if zfs.PropertyChanged(oldZV, newZV) ||
|
||||
c.isDeletionCandidate(newZV) {
|
||||
logrus.Infof("Got update event for ZV %s/%s", newZV.Spec.PoolName, newZV.Name)
|
||||
klog.Infof("Got update event for ZV %s/%s", newZV.Spec.PoolName, newZV.Name)
|
||||
c.enqueueZV(newZV)
|
||||
}
|
||||
}
|
||||
|
|
@ -159,7 +158,7 @@ func (c *ZVController) deleteZV(obj interface{}) {
|
|||
return
|
||||
}
|
||||
|
||||
logrus.Infof("Got delete event for ZV %s/%s", zv.Spec.PoolName, zv.Name)
|
||||
klog.Infof("Got delete event for ZV %s/%s", zv.Spec.PoolName, zv.Name)
|
||||
c.enqueueZV(zv)
|
||||
}
|
||||
|
||||
|
|
@ -172,23 +171,23 @@ func (c *ZVController) Run(threadiness int, stopCh <-chan struct{}) error {
|
|||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
logrus.Info("Starting ZV controller")
|
||||
klog.Info("Starting ZV controller")
|
||||
|
||||
// Wait for the k8s caches to be synced before starting workers
|
||||
logrus.Info("Waiting for informer caches to sync")
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.zvSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
logrus.Info("Starting ZV workers")
|
||||
klog.Info("Starting ZV workers")
|
||||
// Launch worker to process ZV resources
|
||||
// Threadiness will decide the number of workers you want to launch to process work items from queue
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
logrus.Info("Started ZV workers")
|
||||
klog.Info("Started ZV workers")
|
||||
<-stopCh
|
||||
logrus.Info("Shutting down ZV workers")
|
||||
klog.Info("Shutting down ZV workers")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -244,7 +243,7 @@ func (c *ZVController) processNextWorkItem() bool {
|
|||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
logrus.Infof("Successfully synced '%s'", key)
|
||||
klog.Infof("Successfully synced '%s'", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -64,7 +64,7 @@ func Get() string {
|
|||
path := filepath.Join(os.Getenv("GOPATH") + versionFile)
|
||||
vBytes, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to get version: %s", err.Error())
|
||||
klog.Errorf("failed to get version: %s", err.Error())
|
||||
return ""
|
||||
}
|
||||
|
||||
|
|
@ -83,7 +83,7 @@ func GetBuildMeta() string {
|
|||
path := filepath.Join(os.Getenv("GOPATH") + buildMetaFile)
|
||||
vBytes, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to get build version: %s", err.Error())
|
||||
klog.Errorf("failed to get build version: %s", err.Error())
|
||||
return ""
|
||||
}
|
||||
|
||||
|
|
@ -101,7 +101,7 @@ func GetGitCommit() string {
|
|||
cmd := exec.Command("git", "rev-parse", "--verify", "HEAD")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to get git commit: %s", err.Error())
|
||||
klog.Errorf("failed to get git commit: %s", err.Error())
|
||||
return ""
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,10 +5,10 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
|
|
@ -18,7 +18,7 @@ func FormatAndMountZvol(devicePath string, mountInfo *apis.MountInfo) error {
|
|||
|
||||
err := mounter.FormatAndMount(devicePath, mountInfo.MountPath, mountInfo.FSType, mountInfo.MountOptions)
|
||||
if err != nil {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"zfspv: failed to mount volume %s [%s] to %s, error %v",
|
||||
devicePath, mountInfo.FSType, mountInfo.MountPath, err,
|
||||
)
|
||||
|
|
@ -35,7 +35,7 @@ func UmountVolume(vol *apis.ZFSVolume, targetPath string,
|
|||
|
||||
dev, ref, err := mount.GetDeviceNameFromMount(mounter, targetPath)
|
||||
if err != nil {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"zfspv umount volume: failed to get device from mnt: %s\nError: %v",
|
||||
targetPath, err,
|
||||
)
|
||||
|
|
@ -44,7 +44,7 @@ func UmountVolume(vol *apis.ZFSVolume, targetPath string,
|
|||
|
||||
// device has already been un-mounted, return successful
|
||||
if len(dev) == 0 || ref == 0 {
|
||||
logrus.Warningf(
|
||||
klog.Warningf(
|
||||
"Warning: Unmount skipped because volume %s not mounted: %v",
|
||||
vol.Name, targetPath,
|
||||
)
|
||||
|
|
@ -54,7 +54,7 @@ func UmountVolume(vol *apis.ZFSVolume, targetPath string,
|
|||
if pathExists, pathErr := mount.PathExists(targetPath); pathErr != nil {
|
||||
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
logrus.Warningf(
|
||||
klog.Warningf(
|
||||
"Warning: Unmount skipped because path does not exist: %v",
|
||||
targetPath,
|
||||
)
|
||||
|
|
@ -62,7 +62,7 @@ func UmountVolume(vol *apis.ZFSVolume, targetPath string,
|
|||
}
|
||||
|
||||
if err = mounter.Unmount(targetPath); err != nil {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"zfs: failed to unmount %s: path %s err: %v",
|
||||
vol.Name, targetPath, err,
|
||||
)
|
||||
|
|
@ -72,17 +72,17 @@ func UmountVolume(vol *apis.ZFSVolume, targetPath string,
|
|||
if err = SetDatasetLegacyMount(vol); err != nil {
|
||||
// ignoring the failure as the volume has already
|
||||
// been umounted, now the new pod can mount it
|
||||
logrus.Warningf(
|
||||
klog.Warningf(
|
||||
"zfs: failed to set legacy mountpoint: %s err: %v",
|
||||
vol.Name, err,
|
||||
)
|
||||
}
|
||||
|
||||
if err := os.Remove(targetPath); err != nil {
|
||||
logrus.Errorf("zfspv: failed to remove mount path vol %s err : %v", vol.Name, err)
|
||||
klog.Errorf("zfspv: failed to remove mount path vol %s err : %v", vol.Name, err)
|
||||
}
|
||||
|
||||
logrus.Infof("umount done %s path %v", vol.Name, targetPath)
|
||||
klog.Infof("umount done %s path %v", vol.Name, targetPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -142,7 +142,7 @@ func verifyMountRequest(vol *apis.ZFSVolume, mountpath string) error {
|
|||
|
||||
devicePath, err := GetVolumeDevPath(vol)
|
||||
if err != nil {
|
||||
logrus.Errorf("can not get device for volume:%s dev %s err: %v",
|
||||
klog.Errorf("can not get device for volume:%s dev %s err: %v",
|
||||
vol.Name, devicePath, err.Error())
|
||||
return status.Errorf(codes.Internal, "verifyMount: GetVolumePath failed %s", err.Error())
|
||||
}
|
||||
|
|
@ -156,11 +156,11 @@ func verifyMountRequest(vol *apis.ZFSVolume, mountpath string) error {
|
|||
*/
|
||||
currentMounts, err := GetMounts(devicePath)
|
||||
if err != nil {
|
||||
logrus.Errorf("can not get mounts for volume:%s dev %s err: %v",
|
||||
klog.Errorf("can not get mounts for volume:%s dev %s err: %v",
|
||||
vol.Name, devicePath, err.Error())
|
||||
return status.Errorf(codes.Internal, "verifyMount: Getmounts failed %s", err.Error())
|
||||
} else if len(currentMounts) >= 1 {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"can not mount, volume:%s already mounted dev %s mounts: %v",
|
||||
vol.Name, devicePath, currentMounts,
|
||||
)
|
||||
|
|
@ -184,7 +184,7 @@ func MountZvol(vol *apis.ZFSVolume, mount *apis.MountInfo) error {
|
|||
return status.Error(codes.Internal, "not able to format and mount the zvol")
|
||||
}
|
||||
|
||||
logrus.Infof("zvol %v mounted %v fs %v", volume, mount.MountPath, mount.FSType)
|
||||
klog.Infof("zvol %v mounted %v fs %v", volume, mount.MountPath, mount.FSType)
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
@ -214,11 +214,11 @@ func MountDataset(vol *apis.ZFSVolume, mount *apis.MountInfo) error {
|
|||
cmd := exec.Command("mount", MountVolArg...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
logrus.Errorf("zfs: could not mount the dataset %v cmd %v error: %s",
|
||||
klog.Errorf("zfs: could not mount the dataset %v cmd %v error: %s",
|
||||
volume, MountVolArg, string(out))
|
||||
return status.Errorf(codes.Internal, "dataset: mount failed err : %s", string(out))
|
||||
}
|
||||
logrus.Infof("dataset : legacy mounted %s => %s", volume, mount.MountPath)
|
||||
klog.Infof("dataset : legacy mounted %s => %s", volume, mount.MountPath)
|
||||
} else {
|
||||
/*
|
||||
* We might have created volumes and then upgraded the node agent before
|
||||
|
|
@ -229,7 +229,7 @@ func MountDataset(vol *apis.ZFSVolume, mount *apis.MountInfo) error {
|
|||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "zfs: mount failed err : %s", err.Error())
|
||||
}
|
||||
logrus.Infof("dataset : mounted %s => %s", volume, mount.MountPath)
|
||||
klog.Infof("dataset : mounted %s => %s", volume, mount.MountPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -267,7 +267,7 @@ func MountBlock(vol *apis.ZFSVolume, mountinfo *apis.MountInfo) error {
|
|||
return status.Errorf(codes.Internal, "mount failed at %v err : %v", target, err)
|
||||
}
|
||||
|
||||
logrus.Infof("NodePublishVolume mounted block device %s at %s", devicePath, target)
|
||||
klog.Infof("NodePublishVolume mounted block device %s at %s", devicePath, target)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,10 +17,11 @@ limitations under the License.
|
|||
package zfs
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"os/exec"
|
||||
|
||||
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// ResizeExtn can be used to run a resize command on the ext2/3/4 filesystem
|
||||
|
|
@ -29,7 +30,7 @@ func ResizeExtn(devpath string) error {
|
|||
cmd := exec.Command("resize2fs", devpath)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
logrus.Errorf("zfspv: ResizeExtn failed error: %s", string(out))
|
||||
klog.Errorf("zfspv: ResizeExtn failed error: %s", string(out))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
@ -41,7 +42,7 @@ func ResizeXFS(path string) error {
|
|||
cmd := exec.Command("xfs_growfs", path)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
logrus.Errorf("zfspv: ResizeXFS failed error: %s", string(out))
|
||||
klog.Errorf("zfspv: ResizeXFS failed error: %s", string(out))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@
|
|||
package zfs
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
|
|
@ -24,6 +23,7 @@ import (
|
|||
"github.com/openebs/zfs-localpv/pkg/builder/volbuilder"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -64,11 +64,11 @@ func init() {
|
|||
|
||||
OpenEBSNamespace = os.Getenv(OpenEBSNamespaceKey)
|
||||
if OpenEBSNamespace == "" {
|
||||
logrus.Fatalf("OPENEBS_NAMESPACE environment variable not set")
|
||||
klog.Fatalf("OPENEBS_NAMESPACE environment variable not set")
|
||||
}
|
||||
NodeID = os.Getenv("OPENEBS_NODE_ID")
|
||||
if NodeID == "" && os.Getenv("OPENEBS_NODE_DRIVER") != "" {
|
||||
logrus.Fatalf("NodeID environment variable not set")
|
||||
klog.Fatalf("NodeID environment variable not set")
|
||||
}
|
||||
|
||||
GoogleAnalyticsEnabled = os.Getenv(GoogleAnalyticsKey)
|
||||
|
|
@ -82,7 +82,7 @@ func ProvisionVolume(
|
|||
|
||||
_, err := volbuilder.NewKubeclient().WithNamespace(OpenEBSNamespace).Create(vol)
|
||||
if err == nil {
|
||||
logrus.Infof("provisioned volume %s", vol.Name)
|
||||
klog.Infof("provisioned volume %s", vol.Name)
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
@ -105,7 +105,7 @@ func ProvisionSnapshot(
|
|||
|
||||
_, err := snapbuilder.NewKubeclient().WithNamespace(OpenEBSNamespace).Create(snap)
|
||||
if err == nil {
|
||||
logrus.Infof("provisioned snapshot %s", snap.Name)
|
||||
klog.Infof("provisioned snapshot %s", snap.Name)
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
@ -115,7 +115,7 @@ func ProvisionSnapshot(
|
|||
func DeleteSnapshot(snapname string) (err error) {
|
||||
err = snapbuilder.NewKubeclient().WithNamespace(OpenEBSNamespace).Delete(snapname)
|
||||
if err == nil {
|
||||
logrus.Infof("deprovisioned snapshot %s", snapname)
|
||||
klog.Infof("deprovisioned snapshot %s", snapname)
|
||||
}
|
||||
|
||||
return
|
||||
|
|
@ -132,7 +132,7 @@ func GetVolume(volumeID string) (*apis.ZFSVolume, error) {
|
|||
func DeleteVolume(volumeID string) (err error) {
|
||||
err = volbuilder.NewKubeclient().WithNamespace(OpenEBSNamespace).Delete(volumeID)
|
||||
if err == nil {
|
||||
logrus.Infof("deprovisioned volume %s", volumeID)
|
||||
klog.Infof("deprovisioned volume %s", volumeID)
|
||||
}
|
||||
|
||||
return
|
||||
|
|
@ -217,7 +217,7 @@ func GetZFSSnapshotStatus(snapID string) (string, error) {
|
|||
WithNamespace(OpenEBSNamespace).Get(snapID, getOptions)
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf("Get snapshot failed %s err: %s", snap.Name, err.Error())
|
||||
klog.Errorf("Get snapshot failed %s err: %s", snap.Name, err.Error())
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
|
@ -241,7 +241,7 @@ func UpdateSnapInfo(snap *apis.ZFSSnapshot) error {
|
|||
newSnap.Status.State = ZFSStatusReady
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf("Update snapshot failed %s err: %s", snap.Name, err.Error())
|
||||
klog.Errorf("Update snapshot failed %s err: %s", snap.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,8 +20,9 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
func xfsTempMount(volume string) error {
|
||||
|
|
@ -32,7 +33,7 @@ func xfsTempMount(volume string) error {
|
|||
tmpdir := "/tmp/" + pvol[1]
|
||||
err := os.Mkdir(tmpdir, 0755)
|
||||
if err != nil {
|
||||
logrus.Errorf("xfs: failed to create tmpdir %s error: %s", tmpdir, err.Error())
|
||||
klog.Errorf("xfs: failed to create tmpdir %s error: %s", tmpdir, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -40,7 +41,7 @@ func xfsTempMount(volume string) error {
|
|||
cmd := exec.Command("mount", "-o", "nouuid", device, tmpdir)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
logrus.Errorf("xfs: failed to mount volume %s=>%s error: %s", device, tmpdir, string(out))
|
||||
klog.Errorf("xfs: failed to mount volume %s=>%s error: %s", device, tmpdir, string(out))
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -48,14 +49,14 @@ func xfsTempMount(volume string) error {
|
|||
cmd = exec.Command("umount", tmpdir)
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
logrus.Errorf("xfs: failed to umount tmpdir %s error: %s", tmpdir, string(out))
|
||||
klog.Errorf("xfs: failed to umount tmpdir %s error: %s", tmpdir, string(out))
|
||||
return err
|
||||
}
|
||||
|
||||
// remove the directory
|
||||
err = os.Remove(tmpdir)
|
||||
if err != nil {
|
||||
logrus.Errorf("xfs: failed to remove tmpdir %s error: %s", tmpdir, err.Error())
|
||||
klog.Errorf("xfs: failed to remove tmpdir %s error: %s", tmpdir, err.Error())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
@ -82,9 +83,9 @@ func xfsGenerateUuid(volume string) error {
|
|||
cmd := exec.Command("xfs_admin", "-U", "generate", device)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
logrus.Errorf("xfs: uuid generate failed %s error: %s", volume, string(out))
|
||||
klog.Errorf("xfs: uuid generate failed %s error: %s", volume, string(out))
|
||||
return err
|
||||
}
|
||||
logrus.Infof("xfs: generated UUID for the cloned volume %s \n %v", volume, string(out))
|
||||
klog.Infof("xfs: generated UUID for the cloned volume %s \n %v", volume, string(out))
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,8 +21,9 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"fmt"
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// zfs related constants
|
||||
|
|
@ -328,14 +329,14 @@ func CreateVolume(vol *apis.ZFSVolume) error {
|
|||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"zfs: could not create volume %v cmd %v error: %s", volume, args, string(out),
|
||||
)
|
||||
return err
|
||||
}
|
||||
logrus.Infof("created volume %s", volume)
|
||||
klog.Infof("created volume %s", volume)
|
||||
} else if err == nil {
|
||||
logrus.Infof("using existing volume %v", volume)
|
||||
klog.Infof("using existing volume %v", volume)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -353,14 +354,14 @@ func CreateClone(vol *apis.ZFSVolume) error {
|
|||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"zfs: could not clone volume %v cmd %v error: %s", volume, args, string(out),
|
||||
)
|
||||
return err
|
||||
}
|
||||
logrus.Infof("created clone %s", volume)
|
||||
klog.Infof("created clone %s", volume)
|
||||
} else if err == nil {
|
||||
logrus.Infof("using existing clone volume %v", volume)
|
||||
klog.Infof("using existing clone volume %v", volume)
|
||||
}
|
||||
|
||||
if vol.Spec.FsType == "xfs" {
|
||||
|
|
@ -379,7 +380,7 @@ func SetDatasetMountProp(volume string, mountpath string) error {
|
|||
cmd := exec.Command(ZFSVolCmd, ZFSVolArg...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
logrus.Errorf("zfs: could not set mountpoint on dataset %v cmd %v error: %s",
|
||||
klog.Errorf("zfs: could not set mountpoint on dataset %v cmd %v error: %s",
|
||||
volume, ZFSVolArg, string(out))
|
||||
return fmt.Errorf("could not set the mountpoint, %s", string(out))
|
||||
}
|
||||
|
|
@ -413,7 +414,7 @@ func MountZFSDataset(vol *apis.ZFSVolume, mountpath string) error {
|
|||
cmd := exec.Command(ZFSVolCmd, MountVolArg...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
logrus.Errorf("zfs: could not mount the dataset %v cmd %v error: %s",
|
||||
klog.Errorf("zfs: could not mount the dataset %v cmd %v error: %s",
|
||||
volume, MountVolArg, string(out))
|
||||
return fmt.Errorf("not able to mount, %s", string(out))
|
||||
}
|
||||
|
|
@ -452,7 +453,7 @@ func GetVolumeProperty(vol *apis.ZFSVolume, prop string) (string, error) {
|
|||
cmd := exec.Command(ZFSVolCmd, ZFSVolArg...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
logrus.Errorf("zfs: could not get %s on dataset %v cmd %v error: %s",
|
||||
klog.Errorf("zfs: could not get %s on dataset %v cmd %v error: %s",
|
||||
prop, volume, ZFSVolArg, string(out))
|
||||
return "", fmt.Errorf("zfs get %s failed, %s", prop, string(out))
|
||||
}
|
||||
|
|
@ -491,12 +492,12 @@ func SetVolumeProp(vol *apis.ZFSVolume) error {
|
|||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"zfs: could not set property on volume %v cmd %v error: %s", volume, args, string(out),
|
||||
)
|
||||
return err
|
||||
}
|
||||
logrus.Infof("property set on volume %s", volume)
|
||||
klog.Infof("property set on volume %s", volume)
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
@ -506,7 +507,7 @@ func DestroyVolume(vol *apis.ZFSVolume) error {
|
|||
volume := vol.Spec.PoolName + "/" + vol.Name
|
||||
|
||||
if err := getVolume(volume); err != nil {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"destroy: volume %v is not present, error: %s", volume, err.Error(),
|
||||
)
|
||||
return nil
|
||||
|
|
@ -517,12 +518,12 @@ func DestroyVolume(vol *apis.ZFSVolume) error {
|
|||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"zfs: could not destroy volume %v cmd %v error: %s", volume, args, string(out),
|
||||
)
|
||||
return err
|
||||
}
|
||||
logrus.Infof("destroyed volume %s", volume)
|
||||
klog.Infof("destroyed volume %s", volume)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -534,7 +535,7 @@ func CreateSnapshot(snap *apis.ZFSSnapshot) error {
|
|||
snapDataset := snap.Spec.PoolName + "/" + volume + "@" + snap.Name
|
||||
|
||||
if err := getVolume(snapDataset); err == nil {
|
||||
logrus.Infof("snapshot already there %s", snapDataset)
|
||||
klog.Infof("snapshot already there %s", snapDataset)
|
||||
// snapshot already there just return
|
||||
return nil
|
||||
}
|
||||
|
|
@ -544,12 +545,12 @@ func CreateSnapshot(snap *apis.ZFSSnapshot) error {
|
|||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"zfs: could not create snapshot %v@%v cmd %v error: %s", volume, snap.Name, args, string(out),
|
||||
)
|
||||
return err
|
||||
}
|
||||
logrus.Infof("created snapshot %s@%s", volume, snap.Name)
|
||||
klog.Infof("created snapshot %s@%s", volume, snap.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -560,7 +561,7 @@ func DestroySnapshot(snap *apis.ZFSSnapshot) error {
|
|||
snapDataset := snap.Spec.PoolName + "/" + volume + "@" + snap.Name
|
||||
|
||||
if err := getVolume(snapDataset); err != nil {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"destroy: snapshot %v is not present, error: %s", volume, err.Error(),
|
||||
)
|
||||
return nil
|
||||
|
|
@ -571,12 +572,12 @@ func DestroySnapshot(snap *apis.ZFSSnapshot) error {
|
|||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"zfs: could not destroy snapshot %v@%v cmd %v error: %s", volume, snap.Name, args, string(out),
|
||||
)
|
||||
return err
|
||||
}
|
||||
logrus.Infof("deleted snapshot %s@%s", volume, snap.Name)
|
||||
klog.Infof("deleted snapshot %s@%s", volume, snap.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -607,7 +608,7 @@ func ResizeZFSVolume(vol *apis.ZFSVolume, mountpath string) error {
|
|||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf(
|
||||
klog.Errorf(
|
||||
"zfs: could not resize the volume %v cmd %v error: %s", volume, args, string(out),
|
||||
)
|
||||
return err
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue