2019-09-12 12:32:17 +05:30
|
|
|
/*
|
|
|
|
|
Copyright 2017 The Kubernetes Authors.
|
|
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
|
limitations under the License.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
package zfs
|
|
|
|
|
|
|
|
|
|
import (
|
2021-05-31 18:59:59 +05:30
|
|
|
"bufio"
|
2019-10-15 22:51:48 +05:30
|
|
|
"os/exec"
|
2019-11-21 19:00:15 +05:30
|
|
|
"path/filepath"
|
2021-05-31 18:59:59 +05:30
|
|
|
"strconv"
|
2019-09-12 12:32:17 +05:30
|
|
|
|
2020-06-04 21:57:28 +05:30
|
|
|
"fmt"
|
2021-03-01 23:56:42 +05:30
|
|
|
"os"
|
|
|
|
|
"time"
|
2020-06-29 12:18:33 +05:30
|
|
|
|
2020-12-18 21:12:52 +05:30
|
|
|
"strings"
|
|
|
|
|
|
|
|
|
|
"github.com/openebs/lib-csi/pkg/btrfs"
|
|
|
|
|
"github.com/openebs/lib-csi/pkg/xfs"
|
2020-05-22 18:07:17 +05:30
|
|
|
apis "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1"
|
2020-09-08 13:44:39 +05:30
|
|
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
2021-05-31 18:59:59 +05:30
|
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
2020-06-29 12:18:33 +05:30
|
|
|
"k8s.io/klog"
|
2019-09-12 12:32:17 +05:30
|
|
|
)
|
|
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
// zfs related constants
|
|
|
|
|
const (
|
2020-07-07 18:21:02 +05:30
|
|
|
ZFSDevPath = "/dev/zvol/"
|
|
|
|
|
FSTypeZFS = "zfs"
|
2019-11-21 19:00:15 +05:30
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// zfs command related constants
|
2019-09-12 12:32:17 +05:30
|
|
|
const (
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
ZFSVolCmd = "zfs"
|
|
|
|
|
ZFSCreateArg = "create"
|
|
|
|
|
ZFSCloneArg = "clone"
|
|
|
|
|
ZFSDestroyArg = "destroy"
|
|
|
|
|
ZFSSetArg = "set"
|
|
|
|
|
ZFSGetArg = "get"
|
|
|
|
|
ZFSListArg = "list"
|
|
|
|
|
ZFSSnapshotArg = "snapshot"
|
2020-09-08 13:44:39 +05:30
|
|
|
ZFSSendArg = "send"
|
|
|
|
|
ZFSRecvArg = "recv"
|
2019-11-21 19:00:15 +05:30
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// constants to define volume type
|
|
|
|
|
const (
|
2020-07-07 18:21:02 +05:30
|
|
|
VolTypeDataset = "DATASET"
|
|
|
|
|
VolTypeZVol = "ZVOL"
|
2019-09-12 12:32:17 +05:30
|
|
|
)
|
|
|
|
|
|
2020-06-09 17:17:23 +08:00
|
|
|
// PropertyChanged return whether volume property is changed
|
2019-09-12 12:32:17 +05:30
|
|
|
func PropertyChanged(oldVol *apis.ZFSVolume, newVol *apis.ZFSVolume) bool {
|
2020-07-07 18:21:02 +05:30
|
|
|
if oldVol.Spec.VolumeType == VolTypeDataset &&
|
|
|
|
|
newVol.Spec.VolumeType == VolTypeDataset &&
|
2019-11-21 19:00:15 +05:30
|
|
|
oldVol.Spec.RecordSize != newVol.Spec.RecordSize {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-12 12:32:17 +05:30
|
|
|
return oldVol.Spec.Compression != newVol.Spec.Compression ||
|
2019-11-21 19:00:15 +05:30
|
|
|
oldVol.Spec.Dedup != newVol.Spec.Dedup
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetVolumeType returns the volume type
|
|
|
|
|
// whether it is a zvol or dataset
|
|
|
|
|
func GetVolumeType(fstype string) string {
|
|
|
|
|
/*
|
|
|
|
|
* if fstype is provided as zfs then a zfs dataset will be created
|
|
|
|
|
* otherwise a zvol will be created
|
|
|
|
|
*/
|
|
|
|
|
switch fstype {
|
2020-07-07 18:21:02 +05:30
|
|
|
case FSTypeZFS:
|
|
|
|
|
return VolTypeDataset
|
2019-11-21 19:00:15 +05:30
|
|
|
default:
|
2020-07-07 18:21:02 +05:30
|
|
|
return VolTypeZVol
|
2019-11-21 19:00:15 +05:30
|
|
|
}
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
|
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
// builldZvolCreateArgs returns zfs create command for zvol along with attributes as a string array
|
|
|
|
|
func buildZvolCreateArgs(vol *apis.ZFSVolume) []string {
|
|
|
|
|
var ZFSVolArg []string
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, ZFSCreateArg)
|
2019-10-15 22:51:48 +05:30
|
|
|
|
|
|
|
|
if vol.Spec.ThinProvision == "yes" {
|
2019-11-21 19:00:15 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, "-s")
|
|
|
|
|
}
|
|
|
|
|
if len(vol.Spec.Capacity) != 0 {
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-V", vol.Spec.Capacity)
|
|
|
|
|
}
|
|
|
|
|
if len(vol.Spec.VolBlockSize) != 0 {
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-b", vol.Spec.VolBlockSize)
|
|
|
|
|
}
|
|
|
|
|
if len(vol.Spec.Dedup) != 0 {
|
|
|
|
|
dedupProperty := "dedup=" + vol.Spec.Dedup
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", dedupProperty)
|
|
|
|
|
}
|
|
|
|
|
if len(vol.Spec.Compression) != 0 {
|
|
|
|
|
compressionProperty := "compression=" + vol.Spec.Compression
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", compressionProperty)
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
2019-11-21 19:00:15 +05:30
|
|
|
if len(vol.Spec.Encryption) != 0 {
|
|
|
|
|
encryptionProperty := "encryption=" + vol.Spec.Encryption
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", encryptionProperty)
|
|
|
|
|
}
|
|
|
|
|
if len(vol.Spec.KeyLocation) != 0 {
|
|
|
|
|
keyLocation := "keylocation=" + vol.Spec.KeyLocation
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", keyLocation)
|
|
|
|
|
}
|
|
|
|
|
if len(vol.Spec.KeyFormat) != 0 {
|
|
|
|
|
keyFormat := "keyformat=" + vol.Spec.KeyFormat
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", keyFormat)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, volume)
|
|
|
|
|
|
|
|
|
|
return ZFSVolArg
|
|
|
|
|
}
|
|
|
|
|
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
// builldCloneCreateArgs returns zfs clone commands for zfs volume/dataset along with attributes as a string array
|
|
|
|
|
func buildCloneCreateArgs(vol *apis.ZFSVolume) []string {
|
|
|
|
|
var ZFSVolArg []string
|
|
|
|
|
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
|
|
|
|
snapshot := vol.Spec.PoolName + "/" + vol.Spec.SnapName
|
|
|
|
|
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, ZFSCloneArg)
|
|
|
|
|
|
2020-07-07 18:21:02 +05:30
|
|
|
if vol.Spec.VolumeType == VolTypeDataset {
|
2020-03-10 18:47:50 +05:30
|
|
|
if len(vol.Spec.Capacity) != 0 {
|
|
|
|
|
quotaProperty := "quota=" + vol.Spec.Capacity
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", quotaProperty)
|
|
|
|
|
}
|
|
|
|
|
if len(vol.Spec.RecordSize) != 0 {
|
|
|
|
|
recordsizeProperty := "recordsize=" + vol.Spec.RecordSize
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", recordsizeProperty)
|
|
|
|
|
}
|
|
|
|
|
if vol.Spec.ThinProvision == "no" {
|
|
|
|
|
reservationProperty := "reservation=" + vol.Spec.Capacity
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", reservationProperty)
|
|
|
|
|
}
|
2020-06-08 11:33:27 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", "mountpoint=legacy")
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
}
|
|
|
|
|
|
2020-03-10 18:47:50 +05:30
|
|
|
if len(vol.Spec.Dedup) != 0 {
|
|
|
|
|
dedupProperty := "dedup=" + vol.Spec.Dedup
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", dedupProperty)
|
|
|
|
|
}
|
|
|
|
|
if len(vol.Spec.Compression) != 0 {
|
|
|
|
|
compressionProperty := "compression=" + vol.Spec.Compression
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", compressionProperty)
|
|
|
|
|
}
|
|
|
|
|
if len(vol.Spec.Encryption) != 0 {
|
|
|
|
|
encryptionProperty := "encryption=" + vol.Spec.Encryption
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", encryptionProperty)
|
|
|
|
|
}
|
|
|
|
|
if len(vol.Spec.KeyLocation) != 0 {
|
|
|
|
|
keyLocation := "keylocation=" + vol.Spec.KeyLocation
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", keyLocation)
|
|
|
|
|
}
|
|
|
|
|
if len(vol.Spec.KeyFormat) != 0 {
|
|
|
|
|
keyFormat := "keyformat=" + vol.Spec.KeyFormat
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", keyFormat)
|
|
|
|
|
}
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, snapshot, volume)
|
|
|
|
|
return ZFSVolArg
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// buildZFSSnapCreateArgs returns zfs create command for zfs snapshot
|
|
|
|
|
// zfs snapshot <poolname>/<volname>@<snapname>
|
|
|
|
|
func buildZFSSnapCreateArgs(snap *apis.ZFSSnapshot) []string {
|
|
|
|
|
var ZFSSnapArg []string
|
|
|
|
|
|
|
|
|
|
volname := snap.Labels[ZFSVolKey]
|
|
|
|
|
snapDataset := snap.Spec.PoolName + "/" + volname + "@" + snap.Name
|
|
|
|
|
|
|
|
|
|
ZFSSnapArg = append(ZFSSnapArg, ZFSSnapshotArg, snapDataset)
|
|
|
|
|
|
|
|
|
|
return ZFSSnapArg
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// builldZFSSnapDestroyArgs returns zfs destroy command for zfs snapshot
|
|
|
|
|
// zfs destroy <poolname>/<volname>@<snapname>
|
|
|
|
|
func buildZFSSnapDestroyArgs(snap *apis.ZFSSnapshot) []string {
|
|
|
|
|
var ZFSSnapArg []string
|
|
|
|
|
|
|
|
|
|
volname := snap.Labels[ZFSVolKey]
|
|
|
|
|
snapDataset := snap.Spec.PoolName + "/" + volname + "@" + snap.Name
|
|
|
|
|
|
|
|
|
|
ZFSSnapArg = append(ZFSSnapArg, ZFSDestroyArg, snapDataset)
|
|
|
|
|
|
|
|
|
|
return ZFSSnapArg
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
// builldDatasetCreateArgs returns zfs create command for dataset along with attributes as a string array
|
|
|
|
|
func buildDatasetCreateArgs(vol *apis.ZFSVolume) []string {
|
|
|
|
|
var ZFSVolArg []string
|
|
|
|
|
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
|
|
|
|
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, ZFSCreateArg)
|
|
|
|
|
|
2019-10-15 22:51:48 +05:30
|
|
|
if len(vol.Spec.Capacity) != 0 {
|
2019-11-21 19:00:15 +05:30
|
|
|
quotaProperty := "quota=" + vol.Spec.Capacity
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", quotaProperty)
|
|
|
|
|
}
|
|
|
|
|
if len(vol.Spec.RecordSize) != 0 {
|
|
|
|
|
recordsizeProperty := "recordsize=" + vol.Spec.RecordSize
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", recordsizeProperty)
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
2019-11-21 19:00:15 +05:30
|
|
|
if vol.Spec.ThinProvision == "no" {
|
|
|
|
|
reservationProperty := "reservation=" + vol.Spec.Capacity
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", reservationProperty)
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
|
|
|
|
if len(vol.Spec.Dedup) != 0 {
|
|
|
|
|
dedupProperty := "dedup=" + vol.Spec.Dedup
|
2019-11-21 19:00:15 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", dedupProperty)
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
|
|
|
|
if len(vol.Spec.Compression) != 0 {
|
|
|
|
|
compressionProperty := "compression=" + vol.Spec.Compression
|
2019-11-21 19:00:15 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", compressionProperty)
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
|
|
|
|
if len(vol.Spec.Encryption) != 0 {
|
|
|
|
|
encryptionProperty := "encryption=" + vol.Spec.Encryption
|
2019-11-21 19:00:15 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", encryptionProperty)
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
|
|
|
|
if len(vol.Spec.KeyLocation) != 0 {
|
|
|
|
|
keyLocation := "keylocation=" + vol.Spec.KeyLocation
|
2019-11-21 19:00:15 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", keyLocation)
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
|
|
|
|
if len(vol.Spec.KeyFormat) != 0 {
|
|
|
|
|
keyFormat := "keyformat=" + vol.Spec.KeyFormat
|
2019-11-21 19:00:15 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", keyFormat)
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
|
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
// set the mount path to none, by default zfs mounts it to the default dataset path
|
2020-06-08 11:33:27 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, "-o", "mountpoint=legacy", volume)
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
return ZFSVolArg
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
|
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
// builldVolumeSetArgs returns volume set command along with attributes as a string array
|
2019-10-15 22:51:48 +05:30
|
|
|
// TODO(pawan) need to find a way to identify which property has changed
|
|
|
|
|
func buildVolumeSetArgs(vol *apis.ZFSVolume) []string {
|
2019-11-21 19:00:15 +05:30
|
|
|
var ZFSVolArg []string
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, ZFSSetArg)
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2020-07-07 18:21:02 +05:30
|
|
|
if vol.Spec.VolumeType == VolTypeDataset &&
|
2019-11-21 19:00:15 +05:30
|
|
|
len(vol.Spec.RecordSize) != 0 {
|
|
|
|
|
recordsizeProperty := "recordsize=" + vol.Spec.RecordSize
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, recordsizeProperty)
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
2019-11-21 19:00:15 +05:30
|
|
|
|
2019-10-15 22:51:48 +05:30
|
|
|
if len(vol.Spec.Dedup) != 0 {
|
|
|
|
|
dedupProperty := "dedup=" + vol.Spec.Dedup
|
2019-11-21 19:00:15 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, dedupProperty)
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
|
|
|
|
if len(vol.Spec.Compression) != 0 {
|
|
|
|
|
compressionProperty := "compression=" + vol.Spec.Compression
|
2019-11-21 19:00:15 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, compressionProperty)
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
|
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, volume)
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
return ZFSVolArg
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
|
|
|
|
|
2020-03-02 12:03:03 +05:30
|
|
|
// builldVolumeResizeArgs returns volume set for resizing the zfs volume
|
|
|
|
|
func buildVolumeResizeArgs(vol *apis.ZFSVolume) []string {
|
|
|
|
|
var ZFSVolArg []string
|
|
|
|
|
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
|
|
|
|
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, ZFSSetArg)
|
|
|
|
|
|
2020-07-07 18:21:02 +05:30
|
|
|
if vol.Spec.VolumeType == VolTypeDataset {
|
2020-03-02 12:03:03 +05:30
|
|
|
quotaProperty := "quota=" + vol.Spec.Capacity
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, quotaProperty)
|
|
|
|
|
} else {
|
|
|
|
|
volsizeProperty := "volsize=" + vol.Spec.Capacity
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, volsizeProperty)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, volume)
|
|
|
|
|
|
|
|
|
|
return ZFSVolArg
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-08 13:44:39 +05:30
|
|
|
// builldVolumeBackupArgs returns volume send command for sending the zfs volume
|
2020-09-16 19:17:48 +05:30
|
|
|
func buildVolumeBackupArgs(bkp *apis.ZFSBackup, vol *apis.ZFSVolume) ([]string, error) {
|
2020-09-08 13:44:39 +05:30
|
|
|
var ZFSVolArg []string
|
|
|
|
|
backupDest := bkp.Spec.BackupDest
|
|
|
|
|
|
|
|
|
|
bkpAddr := strings.Split(backupDest, ":")
|
2020-09-16 19:17:48 +05:30
|
|
|
if len(bkpAddr) != 2 {
|
|
|
|
|
return ZFSVolArg, fmt.Errorf("zfs: invalid backup server address %s", backupDest)
|
|
|
|
|
}
|
2020-09-08 13:44:39 +05:30
|
|
|
|
|
|
|
|
curSnap := vol.Spec.PoolName + "/" + vol.Name + "@" + bkp.Spec.SnapName
|
|
|
|
|
|
|
|
|
|
remote := " | nc -w 3 " + bkpAddr[0] + " " + bkpAddr[1]
|
|
|
|
|
|
|
|
|
|
cmd := ZFSVolCmd + " "
|
|
|
|
|
|
|
|
|
|
if len(bkp.Spec.PrevSnapName) > 0 {
|
|
|
|
|
prevSnap := vol.Spec.PoolName + "/" + vol.Name + "@" + bkp.Spec.PrevSnapName
|
|
|
|
|
// do incremental send
|
|
|
|
|
cmd += ZFSSendArg + " -i " + prevSnap + " " + curSnap + " " + remote
|
|
|
|
|
} else {
|
|
|
|
|
cmd += ZFSSendArg + " " + curSnap + remote
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-c", cmd)
|
|
|
|
|
|
2020-09-16 19:17:48 +05:30
|
|
|
return ZFSVolArg, nil
|
2020-09-08 13:44:39 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// builldVolumeRestoreArgs returns volume recv command for receiving the zfs volume
|
2021-03-01 23:56:42 +05:30
|
|
|
func buildVolumeRestoreArgs(rstr *apis.ZFSRestore) ([]string, error) {
|
2020-09-08 13:44:39 +05:30
|
|
|
var ZFSVolArg []string
|
2021-03-01 23:56:42 +05:30
|
|
|
var ZFSRecvParam string
|
2020-09-08 13:44:39 +05:30
|
|
|
restoreSrc := rstr.Spec.RestoreSrc
|
|
|
|
|
|
2021-03-01 23:56:42 +05:30
|
|
|
volume := rstr.VolSpec.PoolName + "/" + rstr.Spec.VolumeName
|
2020-09-08 13:44:39 +05:30
|
|
|
|
|
|
|
|
rstrAddr := strings.Split(restoreSrc, ":")
|
2020-09-16 19:17:48 +05:30
|
|
|
if len(rstrAddr) != 2 {
|
|
|
|
|
return ZFSVolArg, fmt.Errorf("zfs: invalid restore server address %s", restoreSrc)
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-08 13:44:39 +05:30
|
|
|
source := "nc -w 3 " + rstrAddr[0] + " " + rstrAddr[1] + " | "
|
|
|
|
|
|
2021-03-01 23:56:42 +05:30
|
|
|
if rstr.VolSpec.VolumeType == VolTypeDataset {
|
|
|
|
|
if len(rstr.VolSpec.Capacity) != 0 {
|
|
|
|
|
ZFSRecvParam += " -o quota=" + rstr.VolSpec.Capacity
|
|
|
|
|
}
|
|
|
|
|
if len(rstr.VolSpec.RecordSize) != 0 {
|
|
|
|
|
ZFSRecvParam += " -o recordsize=" + rstr.VolSpec.RecordSize
|
|
|
|
|
}
|
|
|
|
|
if rstr.VolSpec.ThinProvision == "no" {
|
|
|
|
|
ZFSRecvParam += " -o reservation=" + rstr.VolSpec.Capacity
|
|
|
|
|
}
|
|
|
|
|
ZFSRecvParam += " -o mountpoint=legacy"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if len(rstr.VolSpec.Dedup) != 0 {
|
|
|
|
|
ZFSRecvParam += " -o dedup=" + rstr.VolSpec.Dedup
|
|
|
|
|
}
|
|
|
|
|
if len(rstr.VolSpec.Compression) != 0 {
|
|
|
|
|
ZFSRecvParam += " -o compression=" + rstr.VolSpec.Compression
|
|
|
|
|
}
|
|
|
|
|
if len(rstr.VolSpec.Encryption) != 0 {
|
|
|
|
|
ZFSRecvParam += " -o encryption=" + rstr.VolSpec.Encryption
|
|
|
|
|
}
|
|
|
|
|
if len(rstr.VolSpec.KeyLocation) != 0 {
|
|
|
|
|
ZFSRecvParam += " -o keylocation=" + rstr.VolSpec.KeyLocation
|
|
|
|
|
}
|
|
|
|
|
if len(rstr.VolSpec.KeyFormat) != 0 {
|
|
|
|
|
ZFSRecvParam += " -o keyformat=" + rstr.VolSpec.KeyFormat
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cmd := source + ZFSVolCmd + " " + ZFSRecvArg + ZFSRecvParam + " -F " + volume
|
2020-09-08 13:44:39 +05:30
|
|
|
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, "-c", cmd)
|
|
|
|
|
|
2020-09-16 19:17:48 +05:30
|
|
|
return ZFSVolArg, nil
|
2020-09-08 13:44:39 +05:30
|
|
|
}
|
|
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
// builldVolumeDestroyArgs returns volume destroy command along with attributes as a string array
|
2019-10-15 22:51:48 +05:30
|
|
|
func buildVolumeDestroyArgs(vol *apis.ZFSVolume) []string {
|
2019-11-21 19:00:15 +05:30
|
|
|
var ZFSVolArg []string
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2020-09-08 13:44:39 +05:30
|
|
|
ZFSVolArg = append(ZFSVolArg, ZFSDestroyArg, "-r", volume)
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
return ZFSVolArg
|
2019-10-15 22:51:48 +05:30
|
|
|
}
|
|
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
func getVolume(volume string) error {
|
|
|
|
|
var ZFSVolArg []string
|
|
|
|
|
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, ZFSListArg, volume)
|
2019-09-12 12:32:17 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
cmd := exec.Command(ZFSVolCmd, ZFSVolArg...)
|
|
|
|
|
_, err := cmd.CombinedOutput()
|
|
|
|
|
return err
|
|
|
|
|
}
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
// CreateVolume creates the zvol/dataset as per
|
|
|
|
|
// info provided in ZFSVolume object
|
|
|
|
|
func CreateVolume(vol *apis.ZFSVolume) error {
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
|
|
|
|
|
|
|
|
|
if err := getVolume(volume); err != nil {
|
|
|
|
|
var args []string
|
2020-07-07 18:21:02 +05:30
|
|
|
if vol.Spec.VolumeType == VolTypeDataset {
|
2019-11-21 19:00:15 +05:30
|
|
|
args = buildDatasetCreateArgs(vol)
|
|
|
|
|
} else {
|
|
|
|
|
args = buildZvolCreateArgs(vol)
|
|
|
|
|
}
|
2019-10-15 22:51:48 +05:30
|
|
|
cmd := exec.Command(ZFSVolCmd, args...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
2019-09-12 12:32:17 +05:30
|
|
|
|
|
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf(
|
2019-11-21 19:00:15 +05:30
|
|
|
"zfs: could not create volume %v cmd %v error: %s", volume, args, string(out),
|
2019-09-12 12:32:17 +05:30
|
|
|
)
|
2019-11-01 06:46:04 +05:30
|
|
|
return err
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("created volume %s", volume)
|
2019-09-12 12:32:17 +05:30
|
|
|
} else if err == nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("using existing volume %v", volume)
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
|
|
|
|
|
2019-11-01 06:46:04 +05:30
|
|
|
return nil
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
|
|
|
|
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
// CreateClone creates clone for the zvol/dataset as per
|
|
|
|
|
// info provided in ZFSVolume object
|
|
|
|
|
func CreateClone(vol *apis.ZFSVolume) error {
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
|
|
|
|
|
2020-11-11 18:58:25 +05:30
|
|
|
if srcVol, ok := vol.Labels[ZFSSrcVolKey]; ok {
|
|
|
|
|
// datasource is volume, create the snapshot first
|
|
|
|
|
snap := &apis.ZFSSnapshot{}
|
|
|
|
|
snap.Name = vol.Name // use volname as snapname
|
|
|
|
|
snap.Spec = vol.Spec
|
|
|
|
|
// add src vol name
|
|
|
|
|
snap.Labels = map[string]string{ZFSVolKey: srcVol}
|
|
|
|
|
|
|
|
|
|
klog.Infof("creating snapshot %s@%s for the clone %s", srcVol, snap.Name, volume)
|
|
|
|
|
|
|
|
|
|
err := CreateSnapshot(snap)
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
klog.Errorf(
|
|
|
|
|
"zfs: could not create snapshot for the clone vol %s snap %s err %v", volume, snap.Name, err,
|
|
|
|
|
)
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
if err := getVolume(volume); err != nil {
|
2021-05-04 19:57:41 +05:30
|
|
|
args := buildCloneCreateArgs(vol)
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
cmd := exec.Command(ZFSVolCmd, args...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf(
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
"zfs: could not clone volume %v cmd %v error: %s", volume, args, string(out),
|
|
|
|
|
)
|
|
|
|
|
return err
|
|
|
|
|
}
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("created clone %s", volume)
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
} else if err == nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("using existing clone volume %v", volume)
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
}
|
|
|
|
|
|
2020-03-17 22:27:35 +05:30
|
|
|
if vol.Spec.FsType == "xfs" {
|
2020-11-25 18:49:51 +05:30
|
|
|
device := ZFSDevPath + volume
|
|
|
|
|
return xfs.GenerateUUID(device)
|
2020-03-17 22:27:35 +05:30
|
|
|
}
|
2020-07-02 10:58:29 +05:30
|
|
|
if vol.Spec.FsType == "btrfs" {
|
2020-11-23 18:36:49 +05:30
|
|
|
device := ZFSDevPath + volume
|
|
|
|
|
return btrfs.GenerateUUID(device)
|
2020-07-07 18:21:02 +05:30
|
|
|
}
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
// SetDatasetMountProp sets mountpoint for the volume
|
|
|
|
|
func SetDatasetMountProp(volume string, mountpath string) error {
|
|
|
|
|
var ZFSVolArg []string
|
2019-09-12 12:32:17 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
mountProperty := "mountpoint=" + mountpath
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, ZFSSetArg, mountProperty, volume)
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
cmd := exec.Command(ZFSVolCmd, ZFSVolArg...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf("zfs: could not set mountpoint on dataset %v cmd %v error: %s",
|
2019-11-21 19:00:15 +05:30
|
|
|
volume, ZFSVolArg, string(out))
|
2020-06-04 21:57:28 +05:30
|
|
|
return fmt.Errorf("could not set the mountpoint, %s", string(out))
|
2019-11-21 19:00:15 +05:30
|
|
|
}
|
2020-06-04 21:57:28 +05:30
|
|
|
return nil
|
2019-11-21 19:00:15 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// MountZFSDataset mounts the dataset to the given mountpoint
|
|
|
|
|
func MountZFSDataset(vol *apis.ZFSVolume, mountpath string) error {
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
|
|
|
|
|
feat(zfspv): handling unmounted volume
There can be cases where openebs namespace has been accidently deleted (Optoro case: https://mdap.zendesk.com/agent/tickets/963), There the driver attempted to destroy the dataset which will first umount the dataset and then try to destroy it, the destroy will fail as volume is busy. Here, as mentioned in the steps to recover, we have to manually mount the dataset
```
6. The driver might have attempted to destroy the volume before going down, which sets the mount as no(this strange behavior on gke ubuntu 18.04), we have to mount the dataset, go to the each node and check if there is any unmounted volume
zfs get mounted
if there is any unmounted dataset with this option as "no", we should do the below :-
mountpath=zfs get -Hp -o value mountpoint <dataset name>
zfs set mountpoint=none
zfs set mountpoint=<mountpath>
this will set the dataset to be mounted.
```
So in this case the volume will be unmounted and still mountpoint will set to the mountpath, so if application pod is deleted later on, it will try to mount the zfs dataset, here just setting the `mountpoint` is not sufficient, as if we have unmounted the zfs dataset (via zfs destroy in this case), so we have to explicitely mount the dataset **otherwise application will start running without any persistence storage**. Here automating the manual steps performed to resolve the problem, we are checking in the code that if zfs dataset is not mounted after setting the mountpoint property, attempt to mount it.
This is not the case with the zvol as it does not attempt to unmount it, so zvols are fine.
Also NodeUnPublish operation MUST be idempotent. If this RPC failed, or the CO does not know if it failed or not, it can choose to call NudeUnPublishRequest again. So handled this and returned successful if volume is not mounted also added descriptive error messages at few places.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-04-06 20:09:07 +05:30
|
|
|
// set the mountpoint to the path where this volume should be mounted
|
|
|
|
|
err := SetDatasetMountProp(volume, mountpath)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* see if we should attempt to mount the dataset.
|
|
|
|
|
* Setting the mountpoint is sufficient to mount the zfs dataset,
|
|
|
|
|
* but if dataset has been unmounted, then setting the mountpoint
|
|
|
|
|
* is not sufficient, we have to mount the dataset explicitly
|
|
|
|
|
*/
|
|
|
|
|
mounted, err := GetVolumeProperty(vol, "mounted")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if mounted == "no" {
|
|
|
|
|
var MountVolArg []string
|
|
|
|
|
MountVolArg = append(MountVolArg, "mount", volume)
|
|
|
|
|
cmd := exec.Command(ZFSVolCmd, MountVolArg...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf("zfs: could not mount the dataset %v cmd %v error: %s",
|
feat(zfspv): handling unmounted volume
There can be cases where openebs namespace has been accidently deleted (Optoro case: https://mdap.zendesk.com/agent/tickets/963), There the driver attempted to destroy the dataset which will first umount the dataset and then try to destroy it, the destroy will fail as volume is busy. Here, as mentioned in the steps to recover, we have to manually mount the dataset
```
6. The driver might have attempted to destroy the volume before going down, which sets the mount as no(this strange behavior on gke ubuntu 18.04), we have to mount the dataset, go to the each node and check if there is any unmounted volume
zfs get mounted
if there is any unmounted dataset with this option as "no", we should do the below :-
mountpath=zfs get -Hp -o value mountpoint <dataset name>
zfs set mountpoint=none
zfs set mountpoint=<mountpath>
this will set the dataset to be mounted.
```
So in this case the volume will be unmounted and still mountpoint will set to the mountpath, so if application pod is deleted later on, it will try to mount the zfs dataset, here just setting the `mountpoint` is not sufficient, as if we have unmounted the zfs dataset (via zfs destroy in this case), so we have to explicitely mount the dataset **otherwise application will start running without any persistence storage**. Here automating the manual steps performed to resolve the problem, we are checking in the code that if zfs dataset is not mounted after setting the mountpoint property, attempt to mount it.
This is not the case with the zvol as it does not attempt to unmount it, so zvols are fine.
Also NodeUnPublish operation MUST be idempotent. If this RPC failed, or the CO does not know if it failed or not, it can choose to call NudeUnPublishRequest again. So handled this and returned successful if volume is not mounted also added descriptive error messages at few places.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-04-06 20:09:07 +05:30
|
|
|
volume, MountVolArg, string(out))
|
2020-06-04 21:57:28 +05:30
|
|
|
return fmt.Errorf("not able to mount, %s", string(out))
|
feat(zfspv): handling unmounted volume
There can be cases where openebs namespace has been accidently deleted (Optoro case: https://mdap.zendesk.com/agent/tickets/963), There the driver attempted to destroy the dataset which will first umount the dataset and then try to destroy it, the destroy will fail as volume is busy. Here, as mentioned in the steps to recover, we have to manually mount the dataset
```
6. The driver might have attempted to destroy the volume before going down, which sets the mount as no(this strange behavior on gke ubuntu 18.04), we have to mount the dataset, go to the each node and check if there is any unmounted volume
zfs get mounted
if there is any unmounted dataset with this option as "no", we should do the below :-
mountpath=zfs get -Hp -o value mountpoint <dataset name>
zfs set mountpoint=none
zfs set mountpoint=<mountpath>
this will set the dataset to be mounted.
```
So in this case the volume will be unmounted and still mountpoint will set to the mountpath, so if application pod is deleted later on, it will try to mount the zfs dataset, here just setting the `mountpoint` is not sufficient, as if we have unmounted the zfs dataset (via zfs destroy in this case), so we have to explicitely mount the dataset **otherwise application will start running without any persistence storage**. Here automating the manual steps performed to resolve the problem, we are checking in the code that if zfs dataset is not mounted after setting the mountpoint property, attempt to mount it.
This is not the case with the zvol as it does not attempt to unmount it, so zvols are fine.
Also NodeUnPublish operation MUST be idempotent. If this RPC failed, or the CO does not know if it failed or not, it can choose to call NudeUnPublishRequest again. So handled this and returned successful if volume is not mounted also added descriptive error messages at few places.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-04-06 20:09:07 +05:30
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
2019-11-21 19:00:15 +05:30
|
|
|
}
|
|
|
|
|
|
2020-06-08 11:33:27 +05:30
|
|
|
// SetDatasetLegacyMount sets the dataset mountpoint to legacy if not set
|
|
|
|
|
func SetDatasetLegacyMount(vol *apis.ZFSVolume) error {
|
2020-07-07 18:21:02 +05:30
|
|
|
if vol.Spec.VolumeType != VolTypeDataset {
|
2020-06-08 11:33:27 +05:30
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
prop, err := GetVolumeProperty(vol, "mountpoint")
|
2020-04-22 12:07:25 +05:30
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2019-11-21 19:00:15 +05:30
|
|
|
|
2020-06-08 11:33:27 +05:30
|
|
|
if prop != "legacy" {
|
|
|
|
|
// set the mountpoint to legacy
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
|
|
|
|
err = SetDatasetMountProp(volume, "legacy")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return err
|
2019-11-21 19:00:15 +05:30
|
|
|
}
|
|
|
|
|
|
2019-12-05 16:57:34 +05:30
|
|
|
// GetVolumeProperty gets zfs properties for the volume
|
|
|
|
|
func GetVolumeProperty(vol *apis.ZFSVolume, prop string) (string, error) {
|
|
|
|
|
var ZFSVolArg []string
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
|
|
|
|
|
|
|
|
|
ZFSVolArg = append(ZFSVolArg, ZFSGetArg, "-pH", "-o", "value", prop, volume)
|
|
|
|
|
|
|
|
|
|
cmd := exec.Command(ZFSVolCmd, ZFSVolArg...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf("zfs: could not get %s on dataset %v cmd %v error: %s",
|
2019-12-05 16:57:34 +05:30
|
|
|
prop, volume, ZFSVolArg, string(out))
|
2020-06-08 11:33:27 +05:30
|
|
|
return "", fmt.Errorf("zfs get %s failed, %s", prop, string(out))
|
2019-12-05 16:57:34 +05:30
|
|
|
}
|
|
|
|
|
val := out[:len(out)-1]
|
|
|
|
|
return string(val), nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVolumeProp sets the volume property
|
|
|
|
|
func SetVolumeProp(vol *apis.ZFSVolume) error {
|
2019-11-21 19:00:15 +05:30
|
|
|
var err error
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
|
|
|
|
|
|
|
|
|
if len(vol.Spec.Compression) == 0 &&
|
|
|
|
|
len(vol.Spec.Dedup) == 0 &&
|
2020-07-07 18:21:02 +05:30
|
|
|
(vol.Spec.VolumeType != VolTypeDataset ||
|
2019-11-21 19:00:15 +05:30
|
|
|
len(vol.Spec.RecordSize) == 0) {
|
|
|
|
|
//nothing to set, just return
|
|
|
|
|
return nil
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
2019-11-21 19:00:15 +05:30
|
|
|
/* Case: Restart =>
|
|
|
|
|
* In this case we get the add event but here we don't know which
|
|
|
|
|
* property has changed when we were down, so firing the zfs set
|
|
|
|
|
* command with the all property present on the ZFSVolume.
|
|
|
|
|
|
|
|
|
|
* Case: Property Change =>
|
|
|
|
|
* TODO(pawan) When we get the update event, we make sure at least
|
|
|
|
|
* one property has changed before adding it to the event queue for
|
|
|
|
|
* handling. At this stage, since we haven't stored the
|
|
|
|
|
* ZFSVolume object as it will be too heavy, we are firing the set
|
|
|
|
|
* command with the all property preset in the ZFSVolume object since
|
|
|
|
|
* it is guaranteed that at least one property has changed.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
args := buildVolumeSetArgs(vol)
|
|
|
|
|
cmd := exec.Command(ZFSVolCmd, args...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf(
|
2019-11-21 19:00:15 +05:30
|
|
|
"zfs: could not set property on volume %v cmd %v error: %s", volume, args, string(out),
|
|
|
|
|
)
|
|
|
|
|
return err
|
|
|
|
|
}
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("property set on volume %s", volume)
|
2019-09-12 12:32:17 +05:30
|
|
|
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
// DestroyVolume deletes the zfs volume
|
|
|
|
|
func DestroyVolume(vol *apis.ZFSVolume) error {
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
2019-09-12 12:32:17 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
if err := getVolume(volume); err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf(
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
"destroy: volume %v is not present, error: %s", volume, err.Error(),
|
|
|
|
|
)
|
2019-11-21 19:00:15 +05:30
|
|
|
return nil
|
|
|
|
|
}
|
2019-10-15 22:51:48 +05:30
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
args := buildVolumeDestroyArgs(vol)
|
|
|
|
|
cmd := exec.Command(ZFSVolCmd, args...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf(
|
2019-11-21 19:00:15 +05:30
|
|
|
"zfs: could not destroy volume %v cmd %v error: %s", volume, args, string(out),
|
|
|
|
|
)
|
|
|
|
|
return err
|
2019-09-12 12:32:17 +05:30
|
|
|
}
|
2020-11-11 18:58:25 +05:30
|
|
|
|
|
|
|
|
if srcVol, ok := vol.Labels[ZFSSrcVolKey]; ok {
|
|
|
|
|
// datasource is volume, delete the dependent snapshot
|
|
|
|
|
snap := &apis.ZFSSnapshot{}
|
|
|
|
|
snap.Name = vol.Name // snapname is same as volname
|
|
|
|
|
snap.Spec = vol.Spec
|
|
|
|
|
// add src vol name
|
|
|
|
|
snap.Labels = map[string]string{ZFSVolKey: srcVol}
|
|
|
|
|
|
|
|
|
|
klog.Infof("destroying snapshot %s@%s for the clone %s", srcVol, snap.Name, volume)
|
|
|
|
|
|
|
|
|
|
err := DestroySnapshot(snap)
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
// no need to reconcile as volume has already been deleted
|
|
|
|
|
klog.Errorf(
|
|
|
|
|
"zfs: could not destroy snapshot for the clone vol %s snap %s err %v", volume, snap.Name, err,
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("destroyed volume %s", volume)
|
2019-09-12 12:32:17 +05:30
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2019-11-01 06:46:04 +05:30
|
|
|
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
// CreateSnapshot creates the zfs volume snapshot
|
|
|
|
|
func CreateSnapshot(snap *apis.ZFSSnapshot) error {
|
|
|
|
|
|
|
|
|
|
volume := snap.Labels[ZFSVolKey]
|
|
|
|
|
snapDataset := snap.Spec.PoolName + "/" + volume + "@" + snap.Name
|
|
|
|
|
|
|
|
|
|
if err := getVolume(snapDataset); err == nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("snapshot already there %s", snapDataset)
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
// snapshot already there just return
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
args := buildZFSSnapCreateArgs(snap)
|
|
|
|
|
cmd := exec.Command(ZFSVolCmd, args...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf(
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
"zfs: could not create snapshot %v@%v cmd %v error: %s", volume, snap.Name, args, string(out),
|
|
|
|
|
)
|
|
|
|
|
return err
|
|
|
|
|
}
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("created snapshot %s@%s", volume, snap.Name)
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DestroySnapshot deletes the zfs volume snapshot
|
|
|
|
|
func DestroySnapshot(snap *apis.ZFSSnapshot) error {
|
|
|
|
|
|
|
|
|
|
volume := snap.Labels[ZFSVolKey]
|
|
|
|
|
snapDataset := snap.Spec.PoolName + "/" + volume + "@" + snap.Name
|
|
|
|
|
|
|
|
|
|
if err := getVolume(snapDataset); err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf(
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
"destroy: snapshot %v is not present, error: %s", volume, err.Error(),
|
|
|
|
|
)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
args := buildZFSSnapDestroyArgs(snap)
|
|
|
|
|
cmd := exec.Command(ZFSVolCmd, args...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf(
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
"zfs: could not destroy snapshot %v@%v cmd %v error: %s", volume, snap.Name, args, string(out),
|
|
|
|
|
)
|
|
|
|
|
return err
|
|
|
|
|
}
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Infof("deleted snapshot %s@%s", volume, snap.Name)
|
feat(zfspv): adding snapshot and clone support for ZFSPV (#39)
This commits support snapshot and clone commands via CSI driver. User can create snap and clone using the following steps.
Note:
- Snapshot is created via reconciliation CR
- Cloned volume will be on the same zpool where the snapshot is taken
- Cloned volume will have same properties as source volume.
-----------------------------------
Create a Snapshotclass
```
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
```
Once snapshotclass is created, we can use this class to create a Snapshot
```
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: zfspv-snap
spec:
volumeSnapshotClassName: zfspv-snapclass
source:
persistentVolumeClaimName: csi-zfspv
```
```
$ kubectl get volumesnapshot
NAME AGE
zfspv-snap 7m52s
```
```
$ kubectl get volumesnapshot -o yaml
apiVersion: v1
items:
- apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"snapshot.storage.k8s.io/v1beta1","kind":"VolumeSnapshot","metadata":{"annotations":{},"name":"zfspv-snap","namespace":"default"},"spec":{"source":{"persistentVolumeClaimName":"csi-zfspv"},"volumeSnapshotClassName":"zfspv-snapclass"}}
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
- snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
generation: 1
name: zfspv-snap
namespace: default
resourceVersion: "30040"
selfLink: /apis/snapshot.storage.k8s.io/v1beta1/namespaces/default/volumesnapshots/zfspv-snap
uid: 1a5cf166-c599-4f58-9f3c-f1148be47fca
spec:
source:
persistentVolumeClaimName: csi-zfspv
volumeSnapshotClassName: zfspv-snapclass
status:
boundVolumeSnapshotContentName: snapcontent-1a5cf166-c599-4f58-9f3c-f1148be47fca
creationTime: "2020-01-30T10:31:24Z"
readyToUse: true
restoreSize: "0"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Openebs resource for the created snapshot
```
$ kubectl get snap -n openebs -o yaml
apiVersion: v1
items:
- apiVersion: openebs.io/v1alpha1
kind: ZFSSnapshot
metadata:
creationTimestamp: "2020-01-30T10:31:24Z"
finalizers:
- zfs.openebs.io/finalizer
generation: 2
labels:
kubernetes.io/nodename: pawan-2
openebs.io/persistent-volume: pvc-18cab7c3-ec5e-4264-8507-e6f7df4c789a
name: snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
namespace: openebs
resourceVersion: "30035"
selfLink: /apis/openebs.io/v1alpha1/namespaces/openebs/zfssnapshots/snapshot-1a5cf166-c599-4f58-9f3c-f1148be47fca
uid: e29d571c-42b5-4fb7-9110-e1cfc9b96641
spec:
capacity: "4294967296"
fsType: zfs
ownerNodeID: pawan-2
poolName: zfspv-pool
status: Ready
volumeType: DATASET
kind: List
metadata:
resourceVersion: ""
selfLink: ""
```
Create a clone volume
We can provide a datasource as snapshot name to create a clone volume
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zfspv-clone
spec:
storageClassName: openebs-zfspv
dataSource:
name: zfspv-snap
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
```
It will create a ZFS clone volume from the mentioned snapshot and create the PV on the same node where original volume is there.
Here, As resize is not supported yet, the clone PVC size should match the size of the snapshot.
Also, all the properties from the storageclass will not be considered for the clone case, it will take the properties from the snapshot and create the clone volume. One thing to note here is that, the storageclass in clone PVC should have the same poolname as that of the original volume as across the pool, clone is not supported.
Signed-off-by: Pawan <pawan@mayadata.io>
2020-02-13 13:31:17 +05:30
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-21 19:00:15 +05:30
|
|
|
// GetVolumeDevPath returns devpath for the given volume
|
|
|
|
|
func GetVolumeDevPath(vol *apis.ZFSVolume) (string, error) {
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
2020-07-07 18:21:02 +05:30
|
|
|
if vol.Spec.VolumeType == VolTypeDataset {
|
2019-11-21 19:00:15 +05:30
|
|
|
return volume, nil
|
|
|
|
|
}
|
2019-11-01 06:46:04 +05:30
|
|
|
|
2020-07-07 18:21:02 +05:30
|
|
|
devicePath := ZFSDevPath + volume
|
2019-11-21 19:00:15 +05:30
|
|
|
|
|
|
|
|
// evaluate the symlink to get the dev path for zvol
|
|
|
|
|
dev, err := filepath.EvalSymlinks(devicePath)
|
|
|
|
|
if err != nil {
|
2019-11-01 06:46:04 +05:30
|
|
|
return "", err
|
|
|
|
|
}
|
2019-11-21 19:00:15 +05:30
|
|
|
|
|
|
|
|
return dev, nil
|
2019-11-01 06:46:04 +05:30
|
|
|
}
|
2020-03-02 12:03:03 +05:30
|
|
|
|
2020-06-09 17:17:23 +08:00
|
|
|
// ResizeZFSVolume resize volume
|
2021-02-01 12:03:54 +05:30
|
|
|
func ResizeZFSVolume(vol *apis.ZFSVolume, mountpath string, resizefs bool) error {
|
2020-03-02 12:03:03 +05:30
|
|
|
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
|
|
|
|
args := buildVolumeResizeArgs(vol)
|
|
|
|
|
cmd := exec.Command(ZFSVolCmd, args...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
2020-06-29 12:18:33 +05:30
|
|
|
klog.Errorf(
|
2020-03-02 12:03:03 +05:30
|
|
|
"zfs: could not resize the volume %v cmd %v error: %s", volume, args, string(out),
|
|
|
|
|
)
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-04 19:57:41 +05:30
|
|
|
if resizefs {
|
2021-02-01 12:03:54 +05:30
|
|
|
// resize the filesystem so that applications can use the expanded space
|
|
|
|
|
err = handleVolResize(vol, mountpath)
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-02 12:03:03 +05:30
|
|
|
return err
|
|
|
|
|
}
|
2020-09-08 13:44:39 +05:30
|
|
|
|
|
|
|
|
// CreateBackup creates the backup
|
|
|
|
|
func CreateBackup(bkp *apis.ZFSBackup) error {
|
|
|
|
|
vol, err := GetZFSVolume(bkp.Spec.VolumeName)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
|
|
|
|
|
|
|
|
|
/* create the snapshot for the backup */
|
|
|
|
|
snap := &apis.ZFSSnapshot{}
|
|
|
|
|
snap.Name = bkp.Spec.SnapName
|
|
|
|
|
snap.Spec.PoolName = vol.Spec.PoolName
|
|
|
|
|
snap.Labels = map[string]string{ZFSVolKey: vol.Name}
|
|
|
|
|
|
|
|
|
|
err = CreateSnapshot(snap)
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
klog.Errorf(
|
|
|
|
|
"zfs: could not create snapshot for the backup vol %s snap %s err %v", volume, snap.Name, err,
|
|
|
|
|
)
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-16 19:17:48 +05:30
|
|
|
args, err := buildVolumeBackupArgs(bkp, vol)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2020-09-08 13:44:39 +05:30
|
|
|
cmd := exec.Command("bash", args...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
klog.Errorf(
|
|
|
|
|
"zfs: could not backup the volume %v cmd %v error: %s", volume, args, string(out),
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DestoryBackup deletes the snapshot created
|
|
|
|
|
func DestoryBackup(bkp *apis.ZFSBackup) error {
|
|
|
|
|
vol, err := GetZFSVolume(bkp.Spec.VolumeName)
|
|
|
|
|
if err != nil {
|
|
|
|
|
if k8serrors.IsNotFound(err) {
|
|
|
|
|
// Volume has been deleted, return
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
volume := vol.Spec.PoolName + "/" + vol.Name
|
|
|
|
|
|
|
|
|
|
/* create the snapshot for the backup */
|
|
|
|
|
snap := &apis.ZFSSnapshot{}
|
|
|
|
|
snap.Name = bkp.Spec.SnapName
|
|
|
|
|
snap.Spec.PoolName = vol.Spec.PoolName
|
|
|
|
|
snap.Labels = map[string]string{ZFSVolKey: vol.Name}
|
|
|
|
|
|
|
|
|
|
err = DestroySnapshot(snap)
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
klog.Errorf(
|
|
|
|
|
"zfs: could not destroy snapshot for the backup vol %s snap %s err %v", volume, snap.Name, err,
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-01 23:56:42 +05:30
|
|
|
// getDevice waits for the device to be created and returns the devpath
|
|
|
|
|
func getDevice(volume string) (string, error) {
|
|
|
|
|
device := ZFSDevPath + volume
|
|
|
|
|
// device should be created within 5 seconds
|
|
|
|
|
timeout := time.After(5 * time.Second)
|
|
|
|
|
for {
|
|
|
|
|
select {
|
|
|
|
|
case <-timeout:
|
|
|
|
|
return "", fmt.Errorf("zfs: not able to get the device: %s", device)
|
|
|
|
|
default:
|
|
|
|
|
if _, err := os.Stat(device); err == nil {
|
|
|
|
|
return device, nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-08 13:44:39 +05:30
|
|
|
// CreateRestore creates the restore
|
|
|
|
|
func CreateRestore(rstr *apis.ZFSRestore) error {
|
2021-03-01 23:56:42 +05:30
|
|
|
if len(rstr.VolSpec.PoolName) == 0 {
|
|
|
|
|
// for backward compatibility, older version of
|
|
|
|
|
// velero will not add spec in the ZFSRestore Object
|
|
|
|
|
// query it here and fill that information
|
|
|
|
|
vol, err := GetZFSVolume(rstr.Spec.VolumeName)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
rstr.VolSpec = vol.Spec
|
2020-09-08 13:44:39 +05:30
|
|
|
}
|
2021-03-01 23:56:42 +05:30
|
|
|
args, err := buildVolumeRestoreArgs(rstr)
|
2020-09-16 19:17:48 +05:30
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-01 23:56:42 +05:30
|
|
|
volume := rstr.VolSpec.PoolName + "/" + rstr.Spec.VolumeName
|
|
|
|
|
|
2020-09-08 13:44:39 +05:30
|
|
|
cmd := exec.Command("bash", args...)
|
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
klog.Errorf(
|
|
|
|
|
"zfs: could not restore the volume %v cmd %v error: %s", volume, args, string(out),
|
|
|
|
|
)
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* need to generate a new uuid for zfs and btrfs volumes
|
|
|
|
|
* so that we can mount it.
|
|
|
|
|
*/
|
2021-03-01 23:56:42 +05:30
|
|
|
if rstr.VolSpec.FsType == "xfs" {
|
|
|
|
|
device, err := getDevice(volume)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2020-11-25 18:49:51 +05:30
|
|
|
return xfs.GenerateUUID(device)
|
2020-09-08 13:44:39 +05:30
|
|
|
}
|
2021-03-01 23:56:42 +05:30
|
|
|
if rstr.VolSpec.FsType == "btrfs" {
|
|
|
|
|
device, err := getDevice(volume)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2020-11-23 18:36:49 +05:30
|
|
|
return btrfs.GenerateUUID(device)
|
2020-09-08 13:44:39 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2021-05-31 18:59:59 +05:30
|
|
|
|
|
|
|
|
// ListZFSPool invokes `zfs list` to list all the available
|
|
|
|
|
// pools in the node.
|
|
|
|
|
func ListZFSPool() ([]apis.Pool, error) {
|
|
|
|
|
args := []string{
|
|
|
|
|
ZFSListArg, "-s", "name",
|
|
|
|
|
"-o", "name,guid,available",
|
|
|
|
|
"-H", "-p",
|
|
|
|
|
}
|
|
|
|
|
cmd := exec.Command(ZFSVolCmd, args...)
|
|
|
|
|
output, err := cmd.CombinedOutput()
|
|
|
|
|
if err != nil {
|
|
|
|
|
klog.Errorf("zfs: could not list zpool cmd %v: %v", args, err)
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return decodeListOutput(output)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The `zfs list` command will list down all the resources including
|
|
|
|
|
// pools and volumes and as the pool names cannot have "/" in the name
|
|
|
|
|
// the function below filters out the pools. Sample output of command:
|
|
|
|
|
// $ zfs list -s name -o name,guid,available -H -p
|
|
|
|
|
// zfspv-pool 4734063099997348493 103498467328
|
|
|
|
|
// zfspv-pool/pvc-be02d230-3738-4de9-8968-70f5d10d86dd 3380225606535803752 4294942720
|
|
|
|
|
func decodeListOutput(raw []byte) ([]apis.Pool, error) {
|
|
|
|
|
scanner := bufio.NewScanner(strings.NewReader(string(raw)))
|
|
|
|
|
pools := []apis.Pool{}
|
|
|
|
|
for scanner.Scan() {
|
|
|
|
|
items := strings.Split(strings.TrimSpace(scanner.Text()), "\t")
|
|
|
|
|
if !strings.Contains(items[0], "/") {
|
|
|
|
|
var pool apis.Pool
|
|
|
|
|
pool.Name = items[0]
|
|
|
|
|
pool.UUID = items[1]
|
|
|
|
|
sizeBytes, err := strconv.ParseInt(items[2],
|
|
|
|
|
10, 64)
|
|
|
|
|
if err != nil {
|
|
|
|
|
err = fmt.Errorf("cannot get free size for pool %v: %v", pool.Name, err)
|
|
|
|
|
return pools, err
|
|
|
|
|
}
|
|
|
|
|
pool.Free = *resource.NewQuantity(sizeBytes, resource.BinarySI)
|
|
|
|
|
pools = append(pools, pool)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return pools, nil
|
|
|
|
|
}
|