mirror of
https://github.com/TECHNOFAB11/zfs-localpv.git
synced 2025-12-11 22:10:11 +01:00
feat(shared): adding shared mount support ZFSPV volumes
Applications who want to share a volume can use below storageclass to make their volumes shared by multiple pods ```yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: openebs-zfspv parameters: shared: "yes" fstype: "zfs" poolname: "zfspv-pool" provisioner: zfs.csi.openebs.io ``` Now the provisioned volume using this storageclass can be used by multiple pods. Here pods have to make sure of the data consistency and have to have locking mechanism. One thing to note here is pods will be scheduled to the node where volume is present so that all the pods can use the same volume as they can access it locally only. This was we can avoid the NFS overhead and can get the optimal performance also. Also fixed the log formatting in the GRPC log. Signed-off-by: Pawan <pawan@mayadata.io>
This commit is contained in:
parent
ac9d6d5729
commit
27065bf40a
11 changed files with 2774 additions and 1339 deletions
|
|
@ -193,6 +193,13 @@ type VolumeInfo struct {
|
|||
// FsType can not be modified once volume has been provisioned.
|
||||
// Default Value: ext4.
|
||||
FsType string `json:"fsType,omitempty"`
|
||||
|
||||
// Shared specifies whether the volume can be shared among multiple pods.
|
||||
// If it is not set to "yes", then the ZFS-LocalPV Driver will not allow
|
||||
// the volumes to be mounted by more than one pods.
|
||||
// +kubebuilder:validation:Required
|
||||
// +kubebuilder:validation:Enum=yes;no
|
||||
Shared string `json:"shared,omitempty"`
|
||||
}
|
||||
|
||||
type VolStatus struct {
|
||||
|
|
|
|||
|
|
@ -172,6 +172,12 @@ func (b *Builder) WithFsType(fstype string) *Builder {
|
|||
return b
|
||||
}
|
||||
|
||||
// WithShared sets where filesystem is shared or not
|
||||
func (b *Builder) WithShared(shared string) *Builder {
|
||||
b.volume.Object.Spec.Shared = shared
|
||||
return b
|
||||
}
|
||||
|
||||
// WithSnapshot sets Snapshot name for creating clone volume
|
||||
func (b *Builder) WithSnapshot(snap string) *Builder {
|
||||
b.volume.Object.Spec.SnapName = snap
|
||||
|
|
|
|||
|
|
@ -98,6 +98,7 @@ func CreateZFSVolume(req *csi.CreateVolumeRequest) (string, error) {
|
|||
tp := parameters["thinprovision"]
|
||||
schld := parameters["scheduler"]
|
||||
fstype := parameters["fstype"]
|
||||
shared := parameters["shared"]
|
||||
|
||||
vtype := zfs.GetVolumeType(fstype)
|
||||
|
||||
|
|
@ -124,6 +125,7 @@ func CreateZFSVolume(req *csi.CreateVolumeRequest) (string, error) {
|
|||
WithVolumeType(vtype).
|
||||
WithVolumeStatus(zfs.ZFSStatusPending).
|
||||
WithFsType(fstype).
|
||||
WithShared(shared).
|
||||
WithCompression(compression).Build()
|
||||
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, h
|
|||
|
||||
log := isInfotrmativeLog(info.FullMethod)
|
||||
if log == true {
|
||||
klog.Infof("GRPC call: %s\n requests %s", info.FullMethod, protosanitizer.StripSecrets(req))
|
||||
klog.Infof("GRPC call: %s requests %s", info.FullMethod, protosanitizer.StripSecrets(req))
|
||||
}
|
||||
|
||||
resp, err := handler(ctx, req)
|
||||
|
|
|
|||
|
|
@ -147,24 +147,27 @@ func verifyMountRequest(vol *apis.ZFSVolume, mountpath string) error {
|
|||
return status.Errorf(codes.Internal, "verifyMount: GetVolumePath failed %s", err.Error())
|
||||
}
|
||||
|
||||
/*
|
||||
* This check is the famous *Wall Of North*
|
||||
* It will not let the volume to be mounted
|
||||
* at more than two places. The volume should
|
||||
* be unmounted before proceeding to the mount
|
||||
* operation.
|
||||
*/
|
||||
currentMounts, err := GetMounts(devicePath)
|
||||
if err != nil {
|
||||
klog.Errorf("can not get mounts for volume:%s dev %s err: %v",
|
||||
vol.Name, devicePath, err.Error())
|
||||
return status.Errorf(codes.Internal, "verifyMount: Getmounts failed %s", err.Error())
|
||||
} else if len(currentMounts) >= 1 {
|
||||
klog.Errorf(
|
||||
"can not mount, volume:%s already mounted dev %s mounts: %v",
|
||||
vol.Name, devicePath, currentMounts,
|
||||
)
|
||||
return status.Errorf(codes.Internal, "verifyMount: device already mounted at %s", currentMounts)
|
||||
// if it is not a shared volume, then make sure it is not mounted to more than one path
|
||||
if vol.Spec.Shared != "yes" {
|
||||
/*
|
||||
* This check is the famous *Wall Of North*
|
||||
* It will not let the volume to be mounted
|
||||
* at more than two places. The volume should
|
||||
* be unmounted before proceeding to the mount
|
||||
* operation.
|
||||
*/
|
||||
currentMounts, err := GetMounts(devicePath)
|
||||
if err != nil {
|
||||
klog.Errorf("can not get mounts for volume:%s dev %s err: %v",
|
||||
vol.Name, devicePath, err.Error())
|
||||
return status.Errorf(codes.Internal, "verifyMount: Getmounts failed %s", err.Error())
|
||||
} else if len(currentMounts) >= 1 {
|
||||
klog.Errorf(
|
||||
"can not mount, volume:%s already mounted dev %s mounts: %v",
|
||||
vol.Name, devicePath, currentMounts,
|
||||
)
|
||||
return status.Errorf(codes.Internal, "verifyMount: device already mounted at %s", currentMounts)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue