mirror of
https://github.com/TECHNOFAB11/zfs-localpv.git
synced 2025-12-11 22:10:11 +01:00
feat(ZFSPV): adding support for applications to create "zfs" flesystem (#15)
Application can now create a storageclass to create zfs filesystem apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: openebs-zfspv5 allowVolumeExpansion: true parameters: blocksize: "4k" fstype: "zfs" poolname: "zfspv-pool" provisioner: zfs.csi.openebs.io ZFSPV was supporting ext2/3/4 and xfs filesystem only which adds one extra filesystem layer on top of ZFS filesystem. So now we can driectly write to the ZFS filesystem and get the optimal performance by directly creating ZFS filesystem for storage. Signed-off-by: Pawan <pawan@mayadata.io>
This commit is contained in:
parent
4ffd857191
commit
68db6d2774
13 changed files with 428 additions and 176 deletions
|
|
@ -22,7 +22,7 @@ import (
|
|||
ctrl "github.com/openebs/zfs-localpv/cmd/controller"
|
||||
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/core/v1alpha1"
|
||||
"github.com/openebs/zfs-localpv/pkg/builder"
|
||||
zvol "github.com/openebs/zfs-localpv/pkg/zfs"
|
||||
zfs "github.com/openebs/zfs-localpv/pkg/zfs"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
|
@ -65,7 +65,7 @@ func GetVolAndMountInfo(
|
|||
|
||||
getOptions := metav1.GetOptions{}
|
||||
vol, err := builder.NewKubeclient().
|
||||
WithNamespace(zvol.OpenEBSNamespace).
|
||||
WithNamespace(zfs.OpenEBSNamespace).
|
||||
Get(req.GetVolumeId(), getOptions)
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -96,8 +96,8 @@ func (ns *node) NodePublishVolume(
|
|||
if err != nil {
|
||||
goto PublishVolumeResponse
|
||||
}
|
||||
// Create the zfs volume and attempt mount operation on the requested path
|
||||
if err = zvol.CreateAndMountZvol(vol, mountInfo); err != nil {
|
||||
// attempt mount operation on the requested path
|
||||
if err = zfs.MountVolume(vol, mountInfo); err != nil {
|
||||
goto PublishVolumeResponse
|
||||
}
|
||||
|
||||
|
|
@ -120,6 +120,7 @@ func (ns *node) NodeUnpublishVolume(
|
|||
var (
|
||||
err error
|
||||
vol *apis.ZFSVolume
|
||||
devpath string
|
||||
currentMounts []string
|
||||
)
|
||||
|
||||
|
|
@ -130,22 +131,19 @@ func (ns *node) NodeUnpublishVolume(
|
|||
targetPath := req.GetTargetPath()
|
||||
volumeID := req.GetVolumeId()
|
||||
|
||||
getOptions := metav1.GetOptions{}
|
||||
vol, err = builder.NewKubeclient().
|
||||
WithNamespace(zvol.OpenEBSNamespace).
|
||||
Get(volumeID, getOptions)
|
||||
|
||||
if err != nil {
|
||||
if vol, err = zfs.GetZFSVolume(volumeID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zfsvolume := vol.Spec.PoolName + "/" + vol.Name
|
||||
devpath := zvol.ZFS_DEVPATH + zfsvolume
|
||||
currentMounts, err = zvol.GetMounts(devpath)
|
||||
if devpath, err = zfs.GetVolumeDevPath(vol); err != nil {
|
||||
goto NodeUnpublishResponse
|
||||
}
|
||||
|
||||
currentMounts, err = zfs.GetMounts(devpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(currentMounts) == 0 {
|
||||
goto NodeUnpublishResponse
|
||||
return nil, status.Error(codes.Internal, "umount request for not mounted volume")
|
||||
} else if len(currentMounts) == 1 {
|
||||
if currentMounts[0] != targetPath {
|
||||
return nil, status.Error(codes.Internal, "device not mounted at right path")
|
||||
|
|
@ -158,15 +156,14 @@ func (ns *node) NodeUnpublishVolume(
|
|||
return nil, status.Error(codes.Internal, "device not mounted at rightpath")
|
||||
}
|
||||
|
||||
if vol, err = zvol.GetZFSVolume(volumeID); (err != nil) || (vol == nil) {
|
||||
goto NodeUnpublishResponse
|
||||
}
|
||||
|
||||
if err = zvol.UmountVolume(vol, req.GetTargetPath()); err != nil {
|
||||
if err = zfs.UmountVolume(vol, req.GetTargetPath()); err != nil {
|
||||
goto NodeUnpublishResponse
|
||||
}
|
||||
|
||||
NodeUnpublishResponse:
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
logrus.Infof("hostpath: volume %s path: %s has been unmounted.",
|
||||
volumeID, targetPath)
|
||||
|
||||
|
|
@ -181,7 +178,7 @@ func (ns *node) NodeGetInfo(
|
|||
req *csi.NodeGetInfoRequest,
|
||||
) (*csi.NodeGetInfoResponse, error) {
|
||||
|
||||
topology := map[string]string{zvol.ZFSTopologyKey: ns.driver.config.NodeID}
|
||||
topology := map[string]string{zfs.ZFSTopologyKey: ns.driver.config.NodeID}
|
||||
return &csi.NodeGetInfoResponse{
|
||||
NodeId: ns.driver.config.NodeID,
|
||||
AccessibleTopology: &csi.Topology{
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/openebs/zfs-localpv/pkg/builder"
|
||||
errors "github.com/openebs/zfs-localpv/pkg/common/errors"
|
||||
csipayload "github.com/openebs/zfs-localpv/pkg/response"
|
||||
zvol "github.com/openebs/zfs-localpv/pkg/zfs"
|
||||
zfs "github.com/openebs/zfs-localpv/pkg/zfs"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
|
@ -69,7 +69,8 @@ func (cs *controller) CreateVolume(
|
|||
|
||||
volName := req.GetName()
|
||||
size := req.GetCapacityRange().RequiredBytes
|
||||
bs := req.GetParameters()["blocksize"]
|
||||
rs := req.GetParameters()["recordsize"]
|
||||
bs := req.GetParameters()["volblocksize"]
|
||||
compression := req.GetParameters()["compression"]
|
||||
dedup := req.GetParameters()["dedup"]
|
||||
encr := req.GetParameters()["encryption"]
|
||||
|
|
@ -78,6 +79,9 @@ func (cs *controller) CreateVolume(
|
|||
pool := req.GetParameters()["poolname"]
|
||||
tp := req.GetParameters()["thinprovision"]
|
||||
schld := req.GetParameters()["scheduler"]
|
||||
fstype := req.GetParameters()["fstype"]
|
||||
|
||||
vtype := zfs.GetVolumeType(fstype)
|
||||
|
||||
selected := scheduler(req.AccessibilityRequirements, schld, pool)
|
||||
|
||||
|
|
@ -90,7 +94,8 @@ func (cs *controller) CreateVolume(
|
|||
volObj, err := builder.NewBuilder().
|
||||
WithName(volName).
|
||||
WithCapacity(strconv.FormatInt(int64(size), 10)).
|
||||
WithBlockSize(bs).
|
||||
WithRecordSize(rs).
|
||||
WithVolBlockSize(bs).
|
||||
WithPoolName(pool).
|
||||
WithDedup(dedup).
|
||||
WithEncryption(encr).
|
||||
|
|
@ -98,18 +103,20 @@ func (cs *controller) CreateVolume(
|
|||
WithKeyLocation(kl).
|
||||
WithThinProv(tp).
|
||||
WithOwnerNode(selected).
|
||||
WithVolumeType(vtype).
|
||||
WithFsType(fstype).
|
||||
WithCompression(compression).Build()
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
err = zvol.ProvisionVolume(size, volObj)
|
||||
err = zfs.ProvisionVolume(size, volObj)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "not able to provision the volume")
|
||||
}
|
||||
|
||||
topology := map[string]string{zvol.ZFSTopologyKey: selected}
|
||||
topology := map[string]string{zfs.ZFSTopologyKey: selected}
|
||||
|
||||
return csipayload.NewCreateVolumeResponseBuilder().
|
||||
WithName(volName).
|
||||
|
|
@ -136,13 +143,13 @@ func (cs *controller) DeleteVolume(
|
|||
volumeID := req.GetVolumeId()
|
||||
|
||||
// verify if the volume has already been deleted
|
||||
vol, err := zvol.GetVolume(volumeID)
|
||||
vol, err := zfs.GetVolume(volumeID)
|
||||
if vol != nil && vol.DeletionTimestamp != nil {
|
||||
goto deleteResponse
|
||||
}
|
||||
|
||||
// Delete the corresponding ZV CR
|
||||
err = zvol.DeleteVolume(volumeID)
|
||||
err = zfs.DeleteVolume(volumeID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(
|
||||
err,
|
||||
|
|
|
|||
|
|
@ -76,7 +76,8 @@ func volumeWeightedScheduler(topo *csi.TopologyRequirement, pool string) string
|
|||
// the given zfs pool.
|
||||
func scheduler(topo *csi.TopologyRequirement, schld string, pool string) string {
|
||||
|
||||
if len(topo.Preferred) == 0 {
|
||||
if topo == nil ||
|
||||
len(topo.Preferred) == 0 {
|
||||
logrus.Errorf("topology information not provided")
|
||||
return ""
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue