2020-09-08 20:37:59 +05:30
|
|
|
/*
|
|
|
|
|
Copyright © 2020 The OpenEBS Authors
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
|
limitations under the License.
|
|
|
|
|
*/
|
|
|
|
|
|
2019-09-12 12:32:17 +05:30
|
|
|
package zfs
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"fmt"
|
|
|
|
|
"os"
|
2020-06-08 11:33:27 +05:30
|
|
|
"os/exec"
|
2019-09-12 12:32:17 +05:30
|
|
|
|
2020-05-22 18:07:17 +05:30
|
|
|
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
|
2020-11-25 18:49:51 +05:30
|
|
|
mnt "github.com/openebs/zfs-localpv/pkg/mount"
|
2019-09-12 12:32:17 +05:30
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
|
"google.golang.org/grpc/status"
|
2020-06-29 12:18:33 +05:30
|
|
|
"k8s.io/klog"
|
2019-09-12 12:32:17 +05:30
|
|
|
"k8s.io/kubernetes/pkg/util/mount"
|
|
|
|
|
)
|
|
|
|
|
|
2020-10-11 22:29:23 -07:00
|
|
|
// MountInfo contains the volume related info
|
|
|
|
|
// for all types of volumes in ZFSVolume
|
|
|
|
|
type MountInfo struct {
|
|
|
|
|
// FSType of a volume will specify the
|
|
|
|
|
// format type - ext4(default), xfs of PV
|
|
|
|
|
FSType string `json:"fsType"`
|
|
|
|
|
|
|
|
|
|
// AccessMode of a volume will hold the
|
|
|
|
|
// access mode of the volume
|
|
|
|
|
AccessModes []string `json:"accessModes"`
|
|
|
|
|
|
|
|
|
|
// MountPath of the volume will hold the
|
|
|
|
|
// path on which the volume is mounted
|
|
|
|
|
// on that node
|
|
|
|
|
MountPath string `json:"mountPath"`
|
|
|
|
|
|
|
|
|
|
// MountOptions specifies the options with
|
|
|
|
|
// which mount needs to be attempted
|
|
|
|
|
MountOptions []string `json:"mountOptions"`
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-12 12:32:17 +05:30
|
|
|
// FormatAndMountZvol formats and mounts the created volume to the desired mount path
|
2020-10-11 22:29:23 -07:00
|
|
|
func FormatAndMountZvol(devicePath string, mountInfo *MountInfo) error {
|
2019-09-12 12:32:17 +05:30
|
|
|
mounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: mount.NewOsExec()}
|
|
|
|
|
|
|
|
|
|
err := mounter.FormatAndMount(devicePath, mountInfo.MountPath, mountInfo.FSType, mountInfo.MountOptions)
|
|
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf(
|
2019-09-12 12:32:17 +05:30
|
|
|
"zfspv: failed to mount volume %s [%s] to %s, error %v",
|
|
|
|
|
devicePath, mountInfo.FSType, mountInfo.MountPath, err,
|
|
|
|
|
)
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// UmountVolume unmounts the volume and the corresponding mount path is removed
|
|
|
|
|
func UmountVolume(vol *apis.ZFSVolume, targetPath string,
|
|
|
|
|
) error {
|
|
|
|
|
mounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: mount.NewOsExec()}
|
|
|
|
|
|
feat(zfspv): handling unmounted volume
There can be cases where openebs namespace has been accidently deleted (Optoro case: https://mdap.zendesk.com/agent/tickets/963), There the driver attempted to destroy the dataset which will first umount the dataset and then try to destroy it, the destroy will fail as volume is busy. Here, as mentioned in the steps to recover, we have to manually mount the dataset
```
6. The driver might have attempted to destroy the volume before going down, which sets the mount as no(this strange behavior on gke ubuntu 18.04), we have to mount the dataset, go to the each node and check if there is any unmounted volume
zfs get mounted
if there is any unmounted dataset with this option as "no", we should do the below :-
mountpath=zfs get -Hp -o value mountpoint <dataset name>
zfs set mountpoint=none
zfs set mountpoint=<mountpath>
this will set the dataset to be mounted.
```
So in this case the volume will be unmounted and still mountpoint will set to the mountpath, so if application pod is deleted later on, it will try to mount the zfs dataset, here just setting the `mountpoint` is not sufficient, as if we have unmounted the zfs dataset (via zfs destroy in this case), so we have to explicitely mount the dataset **otherwise application will start running without any persistence storage**. Here automating the manual steps performed to resolve the problem, we are checking in the code that if zfs dataset is not mounted after setting the mountpoint property, attempt to mount it.
This is not the case with the zvol as it does not attempt to unmount it, so zvols are fine.
Also NodeUnPublish operation MUST be idempotent. If this RPC failed, or the CO does not know if it failed or not, it can choose to call NudeUnPublishRequest again. So handled this and returned successful if volume is not mounted also added descriptive error messages at few places.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-04-06 20:09:07 +05:30
|
|
|
dev, ref, err := mount.GetDeviceNameFromMount(mounter, targetPath)
|
2019-09-12 12:32:17 +05:30
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf(
|
2019-09-12 12:32:17 +05:30
|
|
|
"zfspv umount volume: failed to get device from mnt: %s\nError: %v",
|
|
|
|
|
targetPath, err,
|
|
|
|
|
)
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
feat(zfspv): handling unmounted volume
There can be cases where openebs namespace has been accidently deleted (Optoro case: https://mdap.zendesk.com/agent/tickets/963), There the driver attempted to destroy the dataset which will first umount the dataset and then try to destroy it, the destroy will fail as volume is busy. Here, as mentioned in the steps to recover, we have to manually mount the dataset
```
6. The driver might have attempted to destroy the volume before going down, which sets the mount as no(this strange behavior on gke ubuntu 18.04), we have to mount the dataset, go to the each node and check if there is any unmounted volume
zfs get mounted
if there is any unmounted dataset with this option as "no", we should do the below :-
mountpath=zfs get -Hp -o value mountpoint <dataset name>
zfs set mountpoint=none
zfs set mountpoint=<mountpath>
this will set the dataset to be mounted.
```
So in this case the volume will be unmounted and still mountpoint will set to the mountpath, so if application pod is deleted later on, it will try to mount the zfs dataset, here just setting the `mountpoint` is not sufficient, as if we have unmounted the zfs dataset (via zfs destroy in this case), so we have to explicitely mount the dataset **otherwise application will start running without any persistence storage**. Here automating the manual steps performed to resolve the problem, we are checking in the code that if zfs dataset is not mounted after setting the mountpoint property, attempt to mount it.
This is not the case with the zvol as it does not attempt to unmount it, so zvols are fine.
Also NodeUnPublish operation MUST be idempotent. If this RPC failed, or the CO does not know if it failed or not, it can choose to call NudeUnPublishRequest again. So handled this and returned successful if volume is not mounted also added descriptive error messages at few places.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-04-06 20:09:07 +05:30
|
|
|
// device has already been un-mounted, return successful
|
|
|
|
|
if len(dev) == 0 || ref == 0 {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Warningf(
|
feat(zfspv): handling unmounted volume
There can be cases where openebs namespace has been accidently deleted (Optoro case: https://mdap.zendesk.com/agent/tickets/963), There the driver attempted to destroy the dataset which will first umount the dataset and then try to destroy it, the destroy will fail as volume is busy. Here, as mentioned in the steps to recover, we have to manually mount the dataset
```
6. The driver might have attempted to destroy the volume before going down, which sets the mount as no(this strange behavior on gke ubuntu 18.04), we have to mount the dataset, go to the each node and check if there is any unmounted volume
zfs get mounted
if there is any unmounted dataset with this option as "no", we should do the below :-
mountpath=zfs get -Hp -o value mountpoint <dataset name>
zfs set mountpoint=none
zfs set mountpoint=<mountpath>
this will set the dataset to be mounted.
```
So in this case the volume will be unmounted and still mountpoint will set to the mountpath, so if application pod is deleted later on, it will try to mount the zfs dataset, here just setting the `mountpoint` is not sufficient, as if we have unmounted the zfs dataset (via zfs destroy in this case), so we have to explicitely mount the dataset **otherwise application will start running without any persistence storage**. Here automating the manual steps performed to resolve the problem, we are checking in the code that if zfs dataset is not mounted after setting the mountpoint property, attempt to mount it.
This is not the case with the zvol as it does not attempt to unmount it, so zvols are fine.
Also NodeUnPublish operation MUST be idempotent. If this RPC failed, or the CO does not know if it failed or not, it can choose to call NudeUnPublishRequest again. So handled this and returned successful if volume is not mounted also added descriptive error messages at few places.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-04-06 20:09:07 +05:30
|
|
|
"Warning: Unmount skipped because volume %s not mounted: %v",
|
|
|
|
|
vol.Name, targetPath,
|
|
|
|
|
)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-12 12:32:17 +05:30
|
|
|
if pathExists, pathErr := mount.PathExists(targetPath); pathErr != nil {
|
|
|
|
|
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
|
|
|
|
} else if !pathExists {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Warningf(
|
2019-09-12 12:32:17 +05:30
|
|
|
"Warning: Unmount skipped because path does not exist: %v",
|
|
|
|
|
targetPath,
|
|
|
|
|
)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-08 11:33:27 +05:30
|
|
|
if err = mounter.Unmount(targetPath); err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf(
|
2020-06-08 11:33:27 +05:30
|
|
|
"zfs: failed to unmount %s: path %s err: %v",
|
|
|
|
|
vol.Name, targetPath, err,
|
|
|
|
|
)
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err = SetDatasetLegacyMount(vol); err != nil {
|
|
|
|
|
// ignoring the failure as the volume has already
|
|
|
|
|
// been umounted, now the new pod can mount it
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Warningf(
|
2020-06-08 11:33:27 +05:30
|
|
|
"zfs: failed to set legacy mountpoint: %s err: %v",
|
|
|
|
|
vol.Name, err,
|
|
|
|
|
)
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
|
|
|
|
|
2020-04-22 12:07:25 +05:30
|
|
|
if err := os.Remove(targetPath); err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf("zfspv: failed to remove mount path vol %s err : %v", vol.Name, err)
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
|
|
|
|
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("umount done %s path %v", vol.Name, targetPath)
|
2019-09-12 12:32:17 +05:30
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-10 11:53:16 +05:30
|
|
|
func verifyMountRequest(vol *apis.ZFSVolume, mountpath string) (bool, error) {
|
2019-11-21 19:00:15 +05:30
|
|
|
if len(mountpath) == 0 {
|
2020-12-10 11:53:16 +05:30
|
|
|
return false, status.Error(codes.InvalidArgument, "verifyMount: mount path missing in request")
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if len(vol.Spec.OwnerNodeID) > 0 &&
|
|
|
|
|
vol.Spec.OwnerNodeID != NodeID {
|
2020-12-10 11:53:16 +05:30
|
|
|
return false, status.Error(codes.Internal, "verifyMount: volume is owned by different node")
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
2020-08-26 20:37:28 +05:30
|
|
|
if vol.Finalizers == nil {
|
2020-12-10 11:53:16 +05:30
|
|
|
return false, status.Error(codes.Internal, "verifyMount: volume is not ready to be mounted")
|
2020-07-02 10:58:29 +05:30
|
|
|
}
|
2019-09-12 12:32:17 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
devicePath, err := GetVolumeDevPath(vol)
|
2019-09-12 12:32:17 +05:30
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf("can not get device for volume:%s dev %s err: %v",
|
2019-11-21 19:00:15 +05:30
|
|
|
vol.Name, devicePath, err.Error())
|
2020-12-10 11:53:16 +05:30
|
|
|
return false, status.Errorf(codes.Internal, "verifyMount: GetVolumePath failed %s", err.Error())
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
|
|
|
|
|
2020-06-26 12:38:09 +05:30
|
|
|
// if it is not a shared volume, then make sure it is not mounted to more than one path
|
|
|
|
|
if vol.Spec.Shared != "yes" {
|
|
|
|
|
/*
|
|
|
|
|
* This check is the famous *Wall Of North*
|
|
|
|
|
* It will not let the volume to be mounted
|
|
|
|
|
* at more than two places. The volume should
|
|
|
|
|
* be unmounted before proceeding to the mount
|
|
|
|
|
* operation.
|
|
|
|
|
*/
|
2020-11-25 18:49:51 +05:30
|
|
|
currentMounts, err := mnt.GetMounts(devicePath)
|
2020-06-26 12:38:09 +05:30
|
|
|
if err != nil {
|
|
|
|
|
klog.Errorf("can not get mounts for volume:%s dev %s err: %v",
|
|
|
|
|
vol.Name, devicePath, err.Error())
|
2020-12-10 11:53:16 +05:30
|
|
|
return false, status.Errorf(codes.Internal, "verifyMount: Getmounts failed %s", err.Error())
|
2020-06-26 12:38:09 +05:30
|
|
|
} else if len(currentMounts) >= 1 {
|
2020-12-10 11:53:16 +05:30
|
|
|
if currentMounts[0] == mountpath {
|
|
|
|
|
return true, nil
|
|
|
|
|
}
|
2020-06-26 12:38:09 +05:30
|
|
|
klog.Errorf(
|
|
|
|
|
"can not mount, volume:%s already mounted dev %s mounts: %v",
|
|
|
|
|
vol.Name, devicePath, currentMounts,
|
|
|
|
|
)
|
2020-12-10 11:53:16 +05:30
|
|
|
return false, status.Errorf(codes.Internal, "verifyMount: device already mounted at %s", currentMounts)
|
2020-06-26 12:38:09 +05:30
|
|
|
}
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
2020-12-10 11:53:16 +05:30
|
|
|
return false, nil
|
2019-11-21 19:00:15 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// MountZvol mounts the disk to the specified path
|
2020-10-11 22:29:23 -07:00
|
|
|
func MountZvol(vol *apis.ZFSVolume, mount *MountInfo) error {
|
2019-11-21 19:00:15 +05:30
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
2020-12-10 11:53:16 +05:30
|
|
|
mounted, err := verifyMountRequest(vol, mount.MountPath)
|
2019-11-21 19:00:15 +05:30
|
|
|
if err != nil {
|
2020-06-08 11:33:27 +05:30
|
|
|
return err
|
2019-11-21 19:00:15 +05:30
|
|
|
}
|
|
|
|
|
|
2020-12-10 11:53:16 +05:30
|
|
|
if mounted {
|
|
|
|
|
klog.Infof("zvol : already mounted %s => %s", volume, mount.MountPath)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-07 18:21:02 +05:30
|
|
|
devicePath := ZFSDevPath + volume
|
2019-11-21 19:00:15 +05:30
|
|
|
|
2019-09-12 12:32:17 +05:30
|
|
|
err = FormatAndMountZvol(devicePath, mount)
|
|
|
|
|
if err != nil {
|
2019-11-21 19:00:15 +05:30
|
|
|
return status.Error(codes.Internal, "not able to format and mount the zvol")
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
|
|
|
|
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("zvol %v mounted %v fs %v", volume, mount.MountPath, mount.FSType)
|
2019-11-21 19:00:15 +05:30
|
|
|
|
2019-09-12 12:32:17 +05:30
|
|
|
return err
|
|
|
|
|
}
|
2019-11-21 19:00:15 +05:30
|
|
|
|
|
|
|
|
// MountDataset mounts the zfs dataset to the specified path
|
2020-10-11 22:29:23 -07:00
|
|
|
func MountDataset(vol *apis.ZFSVolume, mount *MountInfo) error {
|
2019-11-21 19:00:15 +05:30
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
2020-12-10 11:53:16 +05:30
|
|
|
mounted, err := verifyMountRequest(vol, mount.MountPath)
|
2019-11-21 19:00:15 +05:30
|
|
|
if err != nil {
|
2020-06-08 11:33:27 +05:30
|
|
|
return err
|
2019-11-21 19:00:15 +05:30
|
|
|
}
|
|
|
|
|
|
2020-12-10 11:53:16 +05:30
|
|
|
if mounted {
|
|
|
|
|
klog.Infof("dataset : already mounted %s => %s", volume, mount.MountPath)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-08 11:33:27 +05:30
|
|
|
val, err := GetVolumeProperty(vol, "mountpoint")
|
2019-11-21 19:00:15 +05:30
|
|
|
if err != nil {
|
2020-06-08 11:33:27 +05:30
|
|
|
return err
|
2019-11-21 19:00:15 +05:30
|
|
|
}
|
|
|
|
|
|
2020-06-08 11:33:27 +05:30
|
|
|
if val == "legacy" {
|
|
|
|
|
var MountVolArg []string
|
|
|
|
|
var mntopt string
|
|
|
|
|
|
|
|
|
|
for _, option := range mount.MountOptions {
|
|
|
|
|
mntopt += option + ","
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
MountVolArg = append(MountVolArg, "-o", mntopt, "-t", "zfs", volume, mount.MountPath)
|
|
|
|
|
cmd := exec.Command("mount", MountVolArg...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf("zfs: could not mount the dataset %v cmd %v error: %s",
|
2020-06-08 11:33:27 +05:30
|
|
|
volume, MountVolArg, string(out))
|
|
|
|
|
return status.Errorf(codes.Internal, "dataset: mount failed err : %s", string(out))
|
|
|
|
|
}
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("dataset : legacy mounted %s => %s", volume, mount.MountPath)
|
2020-06-08 11:33:27 +05:30
|
|
|
} else {
|
|
|
|
|
/*
|
|
|
|
|
* We might have created volumes and then upgraded the node agent before
|
|
|
|
|
* getting the mount request for that volume. In this case volume will
|
|
|
|
|
* not be created with mountpoint as legacy. Handling the mount in old way.
|
|
|
|
|
*/
|
|
|
|
|
err = MountZFSDataset(vol, mount.MountPath)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return status.Errorf(codes.Internal, "zfs: mount failed err : %s", err.Error())
|
|
|
|
|
}
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("dataset : mounted %s => %s", volume, mount.MountPath)
|
2020-06-08 11:33:27 +05:30
|
|
|
}
|
2019-11-21 19:00:15 +05:30
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-09 17:17:23 +08:00
|
|
|
// MountFilesystem mounts the disk to the specified path
|
2020-10-11 22:29:23 -07:00
|
|
|
func MountFilesystem(vol *apis.ZFSVolume, mount *MountInfo) error {
|
2020-12-10 11:53:16 +05:30
|
|
|
if err := os.MkdirAll(mount.MountPath, 0000); err != nil {
|
|
|
|
|
return status.Errorf(codes.Internal, "Could not create dir {%q}, err: %v", mount.MountPath, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// in case if the dir already exists, above call returns nil
|
|
|
|
|
// so permission needs to be updated
|
|
|
|
|
if err := os.Chmod(mount.MountPath, 0000); err != nil {
|
|
|
|
|
return status.Errorf(codes.Internal, "Could not change mode of dir {%q}, err: %v", mount.MountPath, err)
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
switch vol.Spec.VolumeType {
|
2020-07-07 18:21:02 +05:30
|
|
|
case VolTypeDataset:
|
2019-11-21 19:00:15 +05:30
|
|
|
return MountDataset(vol, mount)
|
|
|
|
|
default:
|
|
|
|
|
return MountZvol(vol, mount)
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-05-05 12:28:46 +05:30
|
|
|
|
2020-06-09 17:17:23 +08:00
|
|
|
// MountBlock mounts the block disk to the specified path
|
2020-10-11 22:29:23 -07:00
|
|
|
func MountBlock(vol *apis.ZFSVolume, mountinfo *MountInfo) error {
|
2020-05-05 12:28:46 +05:30
|
|
|
target := mountinfo.MountPath
|
2020-07-07 18:21:02 +05:30
|
|
|
devicePath := ZFSDevPath + vol.Spec.PoolName + "/" + vol.Name
|
2020-05-05 12:28:46 +05:30
|
|
|
mountopt := []string{"bind"}
|
|
|
|
|
|
|
|
|
|
mounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: mount.NewOsExec()}
|
|
|
|
|
|
|
|
|
|
// Create the mount point as a file since bind mount device node requires it to be a file
|
|
|
|
|
err := mounter.MakeFile(target)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return status.Errorf(codes.Internal, "Could not create target file %q: %v", target, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// do the bind mount of the zvol device at the target path
|
|
|
|
|
if err := mounter.Mount(devicePath, target, "", mountopt); err != nil {
|
|
|
|
|
if removeErr := os.Remove(target); removeErr != nil {
|
|
|
|
|
return status.Errorf(codes.Internal, "Could not remove mount target %q: %v", target, removeErr)
|
|
|
|
|
}
|
|
|
|
|
return status.Errorf(codes.Internal, "mount failed at %v err : %v", target, err)
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("NodePublishVolume mounted block device %s at %s", devicePath, target)
|
2020-05-05 12:28:46 +05:30
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|