diff --git a/.travis.yml b/.travis.yml index 793bd12..be368fd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -50,7 +50,8 @@ install: before_script: - "./buildscripts/travis-build.sh" script: - - sudo -E env "PATH=$PATH" make ci + - sudo -E env "PATH=$PATH" make ci || travis_terminate 1 + - sudo -E env "PATH=$PATH" make sanity || travis_terminate 1 after_success: - make deploy-images - bash <(curl -s https://codecov.io/bash) diff --git a/Makefile b/Makefile index 21ae98c..e3461d1 100644 --- a/Makefile +++ b/Makefile @@ -235,6 +235,12 @@ zfs-driver-image: zfs-driver ci: @echo "--> Running ci test"; $(PWD)/ci/ci-test.sh + +.PHONY: sanity +sanity: + @echo "--> Running CSI Sanity test"; + $(PWD)/ci/sanity.sh + # Push images deploy-images: @DIMAGE="${IMAGE_ORG}/zfs-driver" ./buildscripts/push diff --git a/changelogs/unreleased/232-pawanpraka1 b/changelogs/unreleased/232-pawanpraka1 new file mode 100644 index 0000000..5519913 --- /dev/null +++ b/changelogs/unreleased/232-pawanpraka1 @@ -0,0 +1 @@ +adding CSI Sanity test for ZFS-LocalPV diff --git a/ci/sanity.sh b/ci/sanity.sh new file mode 100755 index 0000000..1a4978c --- /dev/null +++ b/ci/sanity.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# Copyright 2020 The OpenEBS Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex +test_repo="kubernetes-csi" + +dumpAgentLogs() { + NR=$1 + AgentPOD=$(kubectl get pods -l app=openebs-zfs-node -o jsonpath='{.items[0].metadata.name}' -n kube-system) + kubectl describe po "$AgentPOD" -n kube-system + printf "\n\n" + kubectl logs --tail="${NR}" "$AgentPOD" -n kube-system -c openebs-zfs-plugin + printf "\n\n" +} + +dumpControllerLogs() { + NR=$1 + ControllerPOD=$(kubectl get pods -l app=openebs-zfs-controller -o jsonpath='{.items[0].metadata.name}' -n kube-system) + kubectl describe po "$ControllerPOD" -n kube-system + printf "\n\n" + kubectl logs --tail="${NR}" "$ControllerPOD" -n kube-system -c openebs-zfs-plugin + printf "\n\n" +} + +function dumpAllLogs() { + echo "========================= Dump All logs ========================" + dumpControllerLogs 1000 + dumpAgentLogs 1000 +} + +function initializeCSISanitySuite() { + echo "=============== Initialize CSI Sanity test suite ===============" + cat <> /tmp/parameters.json +{ + "node": "$HOSTNAME", + "poolname": "zfspv-pool", + "wait": "yes", + "thinprovision": "yes" +} +EOT + + sudo rm -rf /tmp/csi.sock + CSI_TEST_REPO="https://github.com/$test_repo/csi-test.git" + CSI_REPO_PATH="$GOPATH/src/github.com/$test_repo/csi-test" + if [ ! -d "$CSI_REPO_PATH" ] ; then + git clone -b "v4.0.1" "$CSI_TEST_REPO" "$CSI_REPO_PATH" + else + cd "$CSI_REPO_PATH" + git pull "$CSI_REPO_PATH" + fi + + cd "$CSI_REPO_PATH/cmd/csi-sanity" + make clean + make + + UUID=$(kubectl get pod -n kube-system openebs-zfs-controller-0 -o 'jsonpath={.metadata.uid}') + SOCK_PATH=/var/lib/kubelet/pods/"$UUID"/volumes/kubernetes.io~empty-dir/socket-dir/csi.sock + + sudo chmod -R 777 /var/lib/kubelet + sudo ln -s "$SOCK_PATH" /tmp/csi.sock + sudo chmod -R 777 /tmp/csi.sock +} + +function startTestSuite() { + echo "================== Start csi-sanity test suite =================" + ./csi-sanity --ginkgo.v --csi.controllerendpoint=///tmp/csi.sock --csi.endpoint=/var/lib/kubelet/plugins/zfs-localpv/csi.sock --csi.testvolumeparameters=/tmp/parameters.json + if [ $? -ne 0 ]; + then + dumpAllLogs + exit 1 + fi + exit 0 +} + +initializeCSISanitySuite + +# do not exit in case of error, let us print the logs +set +e + +startTestSuite diff --git a/pkg/driver/agent.go b/pkg/driver/agent.go index e9524c7..5209ebb 100644 --- a/pkg/driver/agent.go +++ b/pkg/driver/agent.go @@ -17,6 +17,7 @@ limitations under the License. package driver import ( + "strings" "sync" "github.com/container-storage-interface/spec/lib/go/csi" @@ -103,10 +104,12 @@ func GetVolAndMountInfo( mountinfo.MountOptions = append(mountinfo.MountOptions, "ro") } + volName := strings.ToLower(req.GetVolumeId()) + getOptions := metav1.GetOptions{} vol, err := volbuilder.NewKubeclient(). WithNamespace(zfs.OpenEBSNamespace). - Get(req.GetVolumeId(), getOptions) + Get(volName, getOptions) if err != nil { return nil, nil, err @@ -277,7 +280,7 @@ func (ns *node) NodeStageVolume( req *csi.NodeStageVolumeRequest, ) (*csi.NodeStageVolumeResponse, error) { - return &csi.NodeStageVolumeResponse{}, nil + return nil, status.Error(codes.Unimplemented, "") } // NodeUnstageVolume unmounts the volume from @@ -289,7 +292,7 @@ func (ns *node) NodeUnstageVolume( req *csi.NodeUnstageVolumeRequest, ) (*csi.NodeUnstageVolumeResponse, error) { - return &csi.NodeUnstageVolumeResponse{}, nil + return nil, status.Error(codes.Unimplemented, "") } // TODO @@ -309,11 +312,19 @@ func (ns *node) NodeExpandVolume( ) (*csi.NodeExpandVolumeResponse, error) { volumeID := req.GetVolumeId() + if req.GetVolumePath() == "" || volumeID == "" { + return nil, status.Errorf( + codes.InvalidArgument, + "path not provided for NodeExpandVolume Request %s", + volumeID, + ) + } + vol, err := zfs.GetZFSVolume(volumeID) if err != nil { return nil, status.Errorf( - codes.Internal, + codes.NotFound, "failed to handle NodeExpandVolume Request for %s, {%s}", req.VolumeId, err.Error(), @@ -351,7 +362,7 @@ func (ns *node) NodeGetVolumeStats( } if mount.IsMountPath(path) == false { - return nil, status.Error(codes.InvalidArgument, "path is not a mount path") + return nil, status.Error(codes.NotFound, "path is not a mount path") } var sfs unix.Statfs_t diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index 404ba1f..8400a82 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -26,6 +26,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + k8serror "k8s.io/apimachinery/pkg/api/errors" "k8s.io/klog" "github.com/openebs/zfs-localpv/pkg/builder/snapbuilder" @@ -100,9 +101,41 @@ func getRoundedCapacity(size int64) int64 { return ((size + Mi - 1) / Mi) * Mi } +func waitForReadyVolume(volname string) error { + for true { + vol, err := zfs.GetZFSVolume(volname) + if err != nil { + return status.Errorf(codes.Internal, + "zfs: wait failed, not able to get the volume %s %s", volname, err.Error()) + } + + switch vol.Status.State { + case zfs.ZFSStatusReady: + return nil + } + time.Sleep(time.Second) + } + return nil +} + +func waitForVolDestroy(volname string) error { + for true { + _, err := zfs.GetZFSVolume(volname) + if err != nil { + if k8serror.IsNotFound(err) { + return nil + } + return status.Errorf(codes.Internal, + "zfs: destroy wait failed, not able to get the volume %s %s", volname, err.Error()) + } + time.Sleep(time.Second) + } + return nil +} + // CreateZFSVolume create new zfs volume from csi volume request func CreateZFSVolume(req *csi.CreateVolumeRequest) (string, error) { - volName := req.GetName() + volName := strings.ToLower(req.GetName()) size := getRoundedCapacity(req.GetCapacityRange().RequiredBytes) // parameter keys may be mistyped from the CRD specification when declaring @@ -127,6 +160,24 @@ func CreateZFSVolume(req *csi.CreateVolumeRequest) (string, error) { vtype := zfs.GetVolumeType(fstype) + capacity := strconv.FormatInt(int64(size), 10) + + if vol, err := zfs.GetZFSVolume(volName); err == nil { + if vol.DeletionTimestamp != nil { + if _, ok := parameters["wait"]; ok { + if err := waitForVolDestroy(volName); err != nil { + return "", err + } + } + } else { + if vol.Spec.Capacity != capacity { + return "", status.Errorf(codes.AlreadyExists, + "volume %s already present", volName) + } + return vol.Spec.OwnerNodeID, nil + } + } + nmap, err := getNodeMap(schld, pool) if err != nil { return "", status.Errorf(codes.Internal, "get node map failed : %s", err.Error()) @@ -136,14 +187,18 @@ func CreateZFSVolume(req *csi.CreateVolumeRequest) (string, error) { selected := schd.Scheduler(req, nmap) if len(selected) == 0 { - return "", status.Error(codes.Internal, "scheduler failed") + // (hack): CSI Sanity test does not pass topology information + selected = parameters["node"] + if len(selected) == 0 { + return "", status.Error(codes.Internal, "scheduler failed, not able to select a node to create the PV") + } } klog.Infof("scheduled the volume %s/%s on node %s", pool, volName, selected) volObj, err := volbuilder.NewBuilder(). WithName(volName). - WithCapacity(strconv.FormatInt(int64(size), 10)). + WithCapacity(capacity). WithRecordSize(rs). WithVolBlockSize(bs). WithPoolName(pool). @@ -169,12 +224,18 @@ func CreateZFSVolume(req *csi.CreateVolumeRequest) (string, error) { "not able to provision the volume %s", err.Error()) } + if _, ok := parameters["wait"]; ok { + if err := waitForReadyVolume(volName); err != nil { + return "", err + } + } + return selected, nil } // CreateVolClone creates the clone from a volume func CreateVolClone(req *csi.CreateVolumeRequest, srcVol string) (string, error) { - volName := req.GetName() + volName := strings.ToLower(req.GetName()) parameters := req.GetParameters() // lower case keys, cf CreateZFSVolume() pool := helpers.GetInsensitiveParameter(¶meters, "poolname") @@ -222,8 +283,7 @@ func CreateVolClone(req *csi.CreateVolumeRequest, srcVol string) (string, error) // CreateSnapClone creates the clone from a snapshot func CreateSnapClone(req *csi.CreateVolumeRequest, snapshot string) (string, error) { - - volName := req.GetName() + volName := strings.ToLower(req.GetName()) parameters := req.GetParameters() // lower case keys, cf CreateZFSVolume() pool := helpers.GetInsensitiveParameter(¶meters, "poolname") @@ -233,7 +293,7 @@ func CreateSnapClone(req *csi.CreateVolumeRequest, snapshot string) (string, err snapshotID := strings.Split(snapshot, "@") if len(snapshotID) != 2 { return "", status.Errorf( - codes.Internal, + codes.NotFound, "snap name is not valid %s, {%s}", snapshot, "invalid snapshot name", @@ -242,7 +302,7 @@ func CreateSnapClone(req *csi.CreateVolumeRequest, snapshot string) (string, err snap, err := zfs.GetZFSSnapshot(snapshotID[1]) if err != nil { - return "", status.Error(codes.Internal, err.Error()) + return "", status.Error(codes.NotFound, err.Error()) } if snap.Spec.PoolName != pool { @@ -263,7 +323,7 @@ func CreateSnapClone(req *csi.CreateVolumeRequest, snapshot string) (string, err Build() volObj.Spec = snap.Spec - volObj.Spec.SnapName = snapshot + volObj.Spec.SnapName = strings.ToLower(snapshot) err = zfs.ProvisionVolume(volObj) if err != nil { @@ -283,18 +343,18 @@ func (cs *controller) CreateVolume( var err error var selected string - volName := req.GetName() - parameters := req.GetParameters() - // lower case keys, cf CreateZFSVolume() - pool := helpers.GetInsensitiveParameter(¶meters, "poolname") - size := getRoundedCapacity(req.GetCapacityRange().RequiredBytes) - contentSource := req.GetVolumeContentSource() - pvcName := helpers.GetInsensitiveParameter(¶meters, "csi.storage.k8s.io/pvc/name") - if err = cs.validateVolumeCreateReq(req); err != nil { return nil, err } + volName := strings.ToLower(req.GetName()) + parameters := req.GetParameters() + // lower case keys, cf CreateZFSVolume() + pool := helpers.GetInsensitiveParameter(¶meters, "poolname") + size := getRoundedCapacity(req.GetCapacityRange().GetRequiredBytes()) + contentSource := req.GetVolumeContentSource() + pvcName := helpers.GetInsensitiveParameter(¶meters, "csi.storage.k8s.io/pvc/name") + if contentSource != nil && contentSource.GetSnapshot() != nil { snapshotID := contentSource.GetSnapshot().GetSnapshotId() @@ -307,7 +367,7 @@ func (cs *controller) CreateVolume( } if err != nil { - return nil, status.Error(codes.Internal, err.Error()) + return nil, err } sendEventOrIgnore(pvcName, volName, strconv.FormatInt(int64(size), 10), "zfs-localpv", analytics.VolumeProvision) @@ -339,7 +399,7 @@ func (cs *controller) DeleteVolume( return nil, err } - volumeID := req.GetVolumeId() + volumeID := strings.ToLower(req.GetVolumeId()) // verify if the volume has already been deleted vol, err := zfs.GetVolume(volumeID) @@ -348,6 +408,9 @@ func (cs *controller) DeleteVolume( } if err != nil { + if k8serror.IsNotFound(err) { + goto deleteResponse + } return nil, errors.Wrapf( err, "failed to get volume for {%s}", @@ -371,6 +434,25 @@ deleteResponse: return csipayload.NewDeleteVolumeResponseBuilder().Build(), nil } +func isValidVolumeCapabilities(volCaps []*csi.VolumeCapability) bool { + hasSupport := func(cap *csi.VolumeCapability) bool { + for _, c := range SupportedVolumeCapabilityAccessModes { + if c.GetMode() == cap.AccessMode.GetMode() { + return true + } + } + return false + } + + foundAll := true + for _, c := range volCaps { + if !hasSupport(c) { + foundAll = false + } + } + return foundAll +} + // TODO Implementation will be taken up later // ValidateVolumeCapabilities validates the capabilities @@ -380,8 +462,26 @@ func (cs *controller) ValidateVolumeCapabilities( ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest, ) (*csi.ValidateVolumeCapabilitiesResponse, error) { + volumeID := strings.ToLower(req.GetVolumeId()) + if len(volumeID) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") + } + volCaps := req.GetVolumeCapabilities() + if len(volCaps) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume capabilities not provided") + } - return nil, status.Error(codes.Unimplemented, "") + if _, err := zfs.GetZFSVolume(volumeID); err != nil { + return nil, status.Errorf(codes.NotFound, "Get volume failed err %s", err.Error()) + } + + var confirmed *csi.ValidateVolumeCapabilitiesResponse_Confirmed + if isValidVolumeCapabilities(volCaps) { + confirmed = &csi.ValidateVolumeCapabilitiesResponse_Confirmed{VolumeCapabilities: volCaps} + } + return &csi.ValidateVolumeCapabilitiesResponse{ + Confirmed: confirmed, + }, nil } // ControllerGetCapabilities fetches controller capabilities @@ -406,16 +506,23 @@ func (cs *controller) ControllerExpandVolume( ctx context.Context, req *csi.ControllerExpandVolumeRequest, ) (*csi.ControllerExpandVolumeResponse, error) { + volumeID := strings.ToLower(req.GetVolumeId()) + if volumeID == "" { + return nil, status.Errorf( + codes.InvalidArgument, + "ControllerExpandVolume: no volumeID provided", + ) + } /* round off the new size */ updatedSize := getRoundedCapacity(req.GetCapacityRange().GetRequiredBytes()) - vol, err := zfs.GetZFSVolume(req.VolumeId) + vol, err := zfs.GetZFSVolume(volumeID) if err != nil { return nil, status.Errorf( codes.Internal, "ControllerExpandVolumeRequest: failed to get ZFSVolume in for %s, {%s}", - req.VolumeId, + volumeID, err.Error(), ) } @@ -425,7 +532,7 @@ func (cs *controller) ControllerExpandVolume( return nil, status.Errorf( codes.Internal, "ControllerExpandVolumeRequest: failed to parse volsize in for %s, {%s}", - req.VolumeId, + volumeID, err.Error(), ) } @@ -444,7 +551,7 @@ func (cs *controller) ControllerExpandVolume( return nil, status.Errorf( codes.Internal, "failed to handle ControllerExpandVolumeRequest for %s, {%s}", - req.VolumeId, + volumeID, err.Error(), ) } @@ -454,6 +561,40 @@ func (cs *controller) ControllerExpandVolume( Build(), nil } +func verifySnapshotRequest(req *csi.CreateSnapshotRequest) error { + snapName := strings.ToLower(req.GetName()) + volumeID := strings.ToLower(req.GetSourceVolumeId()) + + if snapName == "" || volumeID == "" { + return status.Errorf( + codes.InvalidArgument, + "CreateSnapshot error invalid request %s: %s", + volumeID, snapName, + ) + } + + snap, err := zfs.GetZFSSnapshot(snapName) + + if err != nil { + if k8serror.IsNotFound(err) { + return nil + } + return status.Errorf( + codes.NotFound, + "CreateSnapshot error snap %s %s get failed : %s", + snapName, volumeID, err.Error(), + ) + } + if snap.Labels[zfs.ZFSVolKey] != volumeID { + return status.Errorf( + codes.AlreadyExists, + "CreateSnapshot error snapshot %s already exist for different source vol %s: %s", + snapName, snap.Labels[zfs.ZFSVolKey], volumeID, + ) + } + return nil +} + // CreateSnapshot creates a snapshot for given volume // // This implements csi.ControllerServer @@ -461,27 +602,34 @@ func (cs *controller) CreateSnapshot( ctx context.Context, req *csi.CreateSnapshotRequest, ) (*csi.CreateSnapshotResponse, error) { + snapName := strings.ToLower(req.GetName()) + volumeID := strings.ToLower(req.GetSourceVolumeId()) - klog.Infof("CreateSnapshot volume %s@%s", req.SourceVolumeId, req.Name) + klog.Infof("CreateSnapshot volume %s@%s", volumeID, snapName) + + err := verifySnapshotRequest(req) + if err != nil { + return nil, err + } snapTimeStamp := time.Now().Unix() - state, err := zfs.GetZFSSnapshotStatus(req.Name) + state, err := zfs.GetZFSSnapshotStatus(snapName) if err == nil { return csipayload.NewCreateSnapshotResponseBuilder(). - WithSourceVolumeID(req.SourceVolumeId). - WithSnapshotID(req.SourceVolumeId+"@"+req.Name). + WithSourceVolumeID(volumeID). + WithSnapshotID(volumeID+"@"+snapName). WithCreationTime(snapTimeStamp, 0). WithReadyToUse(state == zfs.ZFSStatusReady). Build(), nil } - vol, err := zfs.GetZFSVolume(req.SourceVolumeId) + vol, err := zfs.GetZFSVolume(volumeID) if err != nil { return nil, status.Errorf( - codes.Internal, + codes.NotFound, "CreateSnapshot not able to get volume %s: %s, {%s}", - req.SourceVolumeId, req.Name, + volumeID, snapName, err.Error(), ) } @@ -489,14 +637,14 @@ func (cs *controller) CreateSnapshot( labels := map[string]string{zfs.ZFSVolKey: vol.Name} snapObj, err := snapbuilder.NewBuilder(). - WithName(req.Name). + WithName(snapName). WithLabels(labels).Build() if err != nil { return nil, status.Errorf( codes.Internal, "failed to create snapshotobject for %s: %s, {%s}", - req.SourceVolumeId, req.Name, + volumeID, snapName, err.Error(), ) } @@ -508,16 +656,16 @@ func (cs *controller) CreateSnapshot( return nil, status.Errorf( codes.Internal, "failed to handle CreateSnapshotRequest for %s: %s, {%s}", - req.SourceVolumeId, req.Name, + volumeID, snapName, err.Error(), ) } - state, _ = zfs.GetZFSSnapshotStatus(req.Name) + state, _ = zfs.GetZFSSnapshotStatus(snapName) return csipayload.NewCreateSnapshotResponseBuilder(). - WithSourceVolumeID(req.SourceVolumeId). - WithSnapshotID(req.SourceVolumeId+"@"+req.Name). + WithSourceVolumeID(volumeID). + WithSnapshotID(volumeID+"@"+snapName). WithCreationTime(snapTimeStamp, 0). WithReadyToUse(state == zfs.ZFSStatusReady). Build(), nil @@ -531,18 +679,18 @@ func (cs *controller) DeleteSnapshot( req *csi.DeleteSnapshotRequest, ) (*csi.DeleteSnapshotResponse, error) { + if req.SnapshotId == "" { + return nil, status.Errorf(codes.InvalidArgument, "DeleteSnapshot: empty snapshotID") + } + klog.Infof("DeleteSnapshot request for %s", req.SnapshotId) // snapshodID is formed as @ // parsing them here snapshotID := strings.Split(req.SnapshotId, "@") if len(snapshotID) != 2 { - return nil, status.Errorf( - codes.Internal, - "failed to handle DeleteSnapshot for %s, {%s}", - req.SnapshotId, - "failed to get the snapshot name, Manual intervention required", - ) + // should succeed when an invalid snapshot id is used + return &csi.DeleteSnapshotResponse{}, nil } if err := zfs.DeleteSnapshot(snapshotID[1]); err != nil { return nil, status.Errorf( diff --git a/pkg/zfs/mount.go b/pkg/zfs/mount.go index 9baf33f..9662e57 100644 --- a/pkg/zfs/mount.go +++ b/pkg/zfs/mount.go @@ -124,24 +124,24 @@ func UmountVolume(vol *apis.ZFSVolume, targetPath string, return nil } -func verifyMountRequest(vol *apis.ZFSVolume, mountpath string) error { +func verifyMountRequest(vol *apis.ZFSVolume, mountpath string) (bool, error) { if len(mountpath) == 0 { - return status.Error(codes.InvalidArgument, "verifyMount: mount path missing in request") + return false, status.Error(codes.InvalidArgument, "verifyMount: mount path missing in request") } if len(vol.Spec.OwnerNodeID) > 0 && vol.Spec.OwnerNodeID != NodeID { - return status.Error(codes.Internal, "verifyMount: volume is owned by different node") + return false, status.Error(codes.Internal, "verifyMount: volume is owned by different node") } if vol.Finalizers == nil { - return status.Error(codes.Internal, "verifyMount: volume is not ready to be mounted") + return false, status.Error(codes.Internal, "verifyMount: volume is not ready to be mounted") } devicePath, err := GetVolumeDevPath(vol) if err != nil { klog.Errorf("can not get device for volume:%s dev %s err: %v", vol.Name, devicePath, err.Error()) - return status.Errorf(codes.Internal, "verifyMount: GetVolumePath failed %s", err.Error()) + return false, status.Errorf(codes.Internal, "verifyMount: GetVolumePath failed %s", err.Error()) } // if it is not a shared volume, then make sure it is not mounted to more than one path @@ -157,26 +157,34 @@ func verifyMountRequest(vol *apis.ZFSVolume, mountpath string) error { if err != nil { klog.Errorf("can not get mounts for volume:%s dev %s err: %v", vol.Name, devicePath, err.Error()) - return status.Errorf(codes.Internal, "verifyMount: Getmounts failed %s", err.Error()) + return false, status.Errorf(codes.Internal, "verifyMount: Getmounts failed %s", err.Error()) } else if len(currentMounts) >= 1 { + if currentMounts[0] == mountpath { + return true, nil + } klog.Errorf( "can not mount, volume:%s already mounted dev %s mounts: %v", vol.Name, devicePath, currentMounts, ) - return status.Errorf(codes.Internal, "verifyMount: device already mounted at %s", currentMounts) + return false, status.Errorf(codes.Internal, "verifyMount: device already mounted at %s", currentMounts) } } - return nil + return false, nil } // MountZvol mounts the disk to the specified path func MountZvol(vol *apis.ZFSVolume, mount *MountInfo) error { volume := vol.Spec.PoolName + "/" + vol.Name - err := verifyMountRequest(vol, mount.MountPath) + mounted, err := verifyMountRequest(vol, mount.MountPath) if err != nil { return err } + if mounted { + klog.Infof("zvol : already mounted %s => %s", volume, mount.MountPath) + return nil + } + devicePath := ZFSDevPath + volume err = FormatAndMountZvol(devicePath, mount) @@ -192,11 +200,16 @@ func MountZvol(vol *apis.ZFSVolume, mount *MountInfo) error { // MountDataset mounts the zfs dataset to the specified path func MountDataset(vol *apis.ZFSVolume, mount *MountInfo) error { volume := vol.Spec.PoolName + "/" + vol.Name - err := verifyMountRequest(vol, mount.MountPath) + mounted, err := verifyMountRequest(vol, mount.MountPath) if err != nil { return err } + if mounted { + klog.Infof("dataset : already mounted %s => %s", volume, mount.MountPath) + return nil + } + val, err := GetVolumeProperty(vol, "mountpoint") if err != nil { return err @@ -237,6 +250,16 @@ func MountDataset(vol *apis.ZFSVolume, mount *MountInfo) error { // MountFilesystem mounts the disk to the specified path func MountFilesystem(vol *apis.ZFSVolume, mount *MountInfo) error { + if err := os.MkdirAll(mount.MountPath, 0000); err != nil { + return status.Errorf(codes.Internal, "Could not create dir {%q}, err: %v", mount.MountPath, err) + } + + // in case if the dir already exists, above call returns nil + // so permission needs to be updated + if err := os.Chmod(mount.MountPath, 0000); err != nil { + return status.Errorf(codes.Internal, "Could not change mode of dir {%q}, err: %v", mount.MountPath, err) + } + switch vol.Spec.VolumeType { case VolTypeDataset: return MountDataset(vol, mount)