feat(crd-gen): automate the CRDs generation with validations for APIs (#75)

- To generate the CRD spec `make manifest` generate then under
  deploy/yamls directory
- added a update-crd script to automate the steps to generate
  CRDs and its validation of each types

Signed-off-by: prateekpandey14 <prateek.pandey@mayadata.io>
This commit is contained in:
Prateek Pandey 2020-04-01 17:54:20 +05:30 committed by GitHub
parent 8a9ac43ab5
commit 6033789c17
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
26 changed files with 509 additions and 82 deletions

View file

@ -134,6 +134,10 @@ informer:
--output-package $(SRC_PKG)/generated/informer \
--go-header-file ./buildscripts/custom-boilerplate.go.txt
manifests:
@echo "+ Generating zfs localPV crds"
$(PWD)/buildscripts/update-crd.sh
.PHONY: zfs-driver
zfs-driver: format
@echo "--------------------------------"

45
buildscripts/update-crd.sh Executable file
View file

@ -0,0 +1,45 @@
#!/bin/bash
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#set -o errexit
set -o nounset
set -o pipefail
## find or download controller-gen
CONTROLLER_GEN=$(which controller-gen)
if [ "$CONTROLLER_GEN" = "" ]
then
TMP_DIR=$(mktemp -d);
cd $TMP_DIR;
go mod init tmp;
go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.8;
rm -rf $TMP_DIR;
CONTROLLER_GEN=$(which controller-gen)
fi
if [ "$CONTROLLER_GEN" = "" ]
then
echo "ERROR: failed to get controller-gen";
exit 1;
fi
SCRIPT_ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
$CONTROLLER_GEN crd:trivialVersions=true,preserveUnknownFields=false paths=${SCRIPT_ROOT}/pkg/apis/openebs.io/zfs/v1alpha1 output:crd:artifacts:config=deploy/yamls
# To use your own boilerplate text use:
# --go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt

View file

@ -0,0 +1,153 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.8
creationTimestamp: null
name: zfssnapshots.zfs.openebs.io
spec:
group: zfs.openebs.io
names:
kind: ZFSSnapshot
listKind: ZFSSnapshotList
plural: zfssnapshots
shortNames:
- zfssnap
singular: zfssnapshot
preserveUnknownFields: false
scope: Namespaced
validation:
openAPIV3Schema:
description: ZFSSnapshot represents a ZFS Snapshot of the zfsvolume
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: VolumeInfo contains the volume related info for all types of
volumes in ZFSVolume
properties:
capacity:
description: Capacity of the volume
minLength: 1
type: string
compression:
description: Controls the compression algorithm used for this dataset.
Compression specifies if the it should enabled on the zvol. Setting
compression to on indicates that the current default compression algorithm
should be used. The current default compression algorithm is either
lzjb or, if the lz4_compress feature is enabled, lz4. Changing this
property affects only newly-written data.
pattern: ^(on|off|lzjb|gzip|gzip-[1-9]|zle|lz4)$
type: string
dedup:
description: Deduplication is the process for removing redundant data
at the block level, reducing the total amount of data stored. If a
file system has the dedup property enabled, duplicate data blocks
are removed synchronously. The result is that only unique data is
stored and common components are shared among files. Deduplication
can consume significant processing power (CPU) and memory as well
as generate additional disk IO. Before creating a pool with deduplication
enabled, ensure that you have planned your hardware requirements appropriately
and implemented appropriate recovery practices, such as regular backups.
As an alternative to deduplication consider using compression=lz4,
as a less resource-intensive alternative. should be enabled on the
zvol
enum:
- "on"
- "off"
type: string
encryption:
description: Enabling the encryption feature allows for the creation
of encrypted filesystems and volumes. ZFS will encrypt file and zvol
data, file attributes, ACLs, permission bits, directory listings,
FUID mappings, and userused / groupused data. ZFS will not encrypt
metadata related to the pool structure, including dataset and snapshot
names, dataset hierarchy, properties, file size, file holes, and deduplication
tables (though the deduplicated data itself is encrypted).
pattern: ^(on|off|aes-128-[c,g]cm|aes-192-[c,g]cm|aes-256-[c,g]cm)$
type: string
fsType:
description: FsType specifies filesystem type for the zfs volume/dataset
type: string
keyformat:
description: KeyFormat specifies format of the encryption key
type: string
keylocation:
description: KeyLocation is the location of key for the encryption
type: string
ownerNodeID:
minLength: 1
type: string
poolName:
description: poolName specifies the name of the pool where this volume
should be created
minLength: 1
type: string
recordsize:
description: RecordSize specifies the record size for the zfs dataset
minLength: 1
type: string
snapname:
description: SnapName specifies the name of the snapshot where this
volume should be cloned
type: string
thinProvision:
description: Thinprovision specifies if we should thin provisioned the
volume or not
enum:
- "Yes"
- "no"
type: string
volblocksize:
description: VolBlockSize specifies the block size for the zvol
minLength: 1
type: string
volumeType:
description: volumeType determines whether the volume is of type "DATASET"
or "ZVOL". if fsttype provided in the storageclass is "zfs", then
it will create a volume of type "DATASET". If "ext4", "ext3", "ext2"
or "xfs" is mentioned as fstype in the storageclass, it will create
a volume of type "ZVOL" so that it can be further formatted with the
fstype provided in the storageclass.
enum:
- ZVOL
- DATASET
type: string
required:
- capacity
- ownerNodeID
- poolName
- volumeType
type: object
status:
properties:
state:
type: string
type: object
required:
- spec
- status
type: object
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View file

@ -0,0 +1,181 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.8
creationTimestamp: null
name: zfsvolumes.zfs.openebs.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.poolName
description: ZFS Pool where the volume is created
name: ZPool
type: string
- JSONPath: .spec.ownerNodeID
description: Node where the volume is created
name: Node
type: string
- JSONPath: .spec.capacity
description: Size of the volume
name: Size
type: string
- JSONPath: .spec.volblocksize
description: volblocksize of volume
name: volblocksize
type: string
- JSONPath: .spec.recordsize
description: recordsize of created zfs dataset
name: recordsize
type: string
- JSONPath: .spec.fsType
description: filesystem created on the volume
name: Filesystem
type: string
- JSONPath: .status.creationTime
description: Timestamp when the volume has been created.
name: CreationTime
type: date
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
group: zfs.openebs.io
names:
kind: ZFSVolume
listKind: ZFSVolumeList
plural: zfsvolumes
shortNames:
- zfsvol
- zv
singular: zfsvolume
preserveUnknownFields: false
scope: Namespaced
subresources: {}
validation:
openAPIV3Schema:
description: ZFSVolume represents a ZFS based volume
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: VolumeInfo contains the volume related info for all types of
volumes in ZFSVolume
properties:
capacity:
description: Capacity of the volume
minLength: 1
type: string
compression:
description: Controls the compression algorithm used for this dataset.
Compression specifies if the it should enabled on the zvol. Setting
compression to on indicates that the current default compression algorithm
should be used. The current default compression algorithm is either
lzjb or, if the lz4_compress feature is enabled, lz4. Changing this
property affects only newly-written data.
pattern: ^(on|off|lzjb|gzip|gzip-[1-9]|zle|lz4)$
type: string
dedup:
description: Deduplication is the process for removing redundant data
at the block level, reducing the total amount of data stored. If a
file system has the dedup property enabled, duplicate data blocks
are removed synchronously. The result is that only unique data is
stored and common components are shared among files. Deduplication
can consume significant processing power (CPU) and memory as well
as generate additional disk IO. Before creating a pool with deduplication
enabled, ensure that you have planned your hardware requirements appropriately
and implemented appropriate recovery practices, such as regular backups.
As an alternative to deduplication consider using compression=lz4,
as a less resource-intensive alternative. should be enabled on the
zvol
enum:
- "on"
- "off"
type: string
encryption:
description: Enabling the encryption feature allows for the creation
of encrypted filesystems and volumes. ZFS will encrypt file and zvol
data, file attributes, ACLs, permission bits, directory listings,
FUID mappings, and userused / groupused data. ZFS will not encrypt
metadata related to the pool structure, including dataset and snapshot
names, dataset hierarchy, properties, file size, file holes, and deduplication
tables (though the deduplicated data itself is encrypted).
pattern: ^(on|off|aes-128-[c,g]cm|aes-192-[c,g]cm|aes-256-[c,g]cm)$
type: string
fsType:
description: FsType specifies filesystem type for the zfs volume/dataset
type: string
keyformat:
description: KeyFormat specifies format of the encryption key
type: string
keylocation:
description: KeyLocation is the location of key for the encryption
type: string
ownerNodeID:
minLength: 1
type: string
poolName:
description: poolName specifies the name of the pool where this volume
should be created
minLength: 1
type: string
recordsize:
description: RecordSize specifies the record size for the zfs dataset
minLength: 1
type: string
snapname:
description: SnapName specifies the name of the snapshot where this
volume should be cloned
type: string
thinProvision:
description: Thinprovision specifies if we should thin provisioned the
volume or not
enum:
- "Yes"
- "no"
type: string
volblocksize:
description: VolBlockSize specifies the block size for the zvol
minLength: 1
type: string
volumeType:
description: volumeType determines whether the volume is of type "DATASET"
or "ZVOL". if fsttype provided in the storageclass is "zfs", then
it will create a volume of type "DATASET". If "ext4", "ext3", "ext2"
or "xfs" is mentioned as fstype in the storageclass, it will create
a volume of type "ZVOL" so that it can be further formatted with the
fstype provided in the storageclass.
enum:
- ZVOL
- DATASET
type: string
required:
- capacity
- ownerNodeID
- poolName
- volumeType
type: object
required:
- spec
type: object
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View file

@ -17,5 +17,5 @@ limitations under the License.
// +k8s:deepcopy-gen=package,register
// Package v1alpha1 is the API version
// +groupName=openebs.io
// +groupName=zfs.openebs.io
package v1alpha1

View file

@ -25,6 +25,8 @@ import (
// +resource:path=zfssnapshot
// ZFSSnapshot represents a ZFS Snapshot of the zfsvolume
// +kubebuilder:object:root=true
// +kubebuilder:resource:scope=Namespaced,shortName=zfssnap
type ZFSSnapshot struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

View file

@ -25,6 +25,16 @@ import (
// +resource:path=zfsvolume
// ZFSVolume represents a ZFS based volume
// +kubebuilder:object:root=true
// +kubebuilder:resource:scope=Namespaced,shortName=zfsvol;zv
// +kubebuilder:printcolumn:name="ZPool",type=string,JSONPath=`.spec.poolName`,description="ZFS Pool where the volume is created"
// +kubebuilder:printcolumn:name="Node",type=string,JSONPath=`.spec.ownerNodeID`,description="Node where the volume is created"
// +kubebuilder:printcolumn:name="Size",type=string,JSONPath=`.spec.capacity`,description="Size of the volume"
// +kubebuilder:printcolumn:name="volblocksize",type=string,JSONPath=`.spec.volblocksize`,description="volblocksize of volume"
// +kubebuilder:printcolumn:name="recordsize",type=string,JSONPath=`.spec.recordsize`,description="recordsize of created zfs dataset"
// +kubebuilder:printcolumn:name="Filesystem",type=string,JSONPath=`.spec.fsType`,description="filesystem created on the volume"
// +kubebuilder:printcolumn:name="CreationTime",type=date,JSONPath=`.status.creationTime`,description="Timestamp when the volume has been created."
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
type ZFSVolume struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@ -71,12 +81,18 @@ type ZFSVolumeList struct {
// VolumeInfo contains the volume related info
// for all types of volumes in ZFSVolume
type VolumeInfo struct {
// OwnerNodeID is the Node ID which
// is the owner of this Volume
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:Required
OwnerNodeID string `json:"ownerNodeID"`
// poolName specifies the name of the
// pool where this volume should be created
// +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
PoolName string `json:"poolName"`
// SnapName specifies the name of the
@ -84,41 +100,67 @@ type VolumeInfo struct {
SnapName string `json:"snapname,omitempty"`
// Capacity of the volume
// +kubebuilder:validation:MinLength=1
Capacity string `json:"capacity"`
// RecordSize specifies the record size
// for the zfs dataset
// +kubebuilder:validation:MinLength=1
RecordSize string `json:"recordsize,omitempty"`
// VolBlockSize specifies the block size for the zvol
// +kubebuilder:validation:MinLength=1
VolBlockSize string `json:"volblocksize,omitempty"`
// Compression specifies if the it should
// enabled on the zvol
// Controls the compression algorithm used for this dataset. Compression
// specifies if the it should enabled on the zvol. Setting compression to on
// indicates that the current default compression algorithm should be used.
// The current default compression algorithm is either lzjb or, if the lz4_compress
// feature is enabled, lz4.
// Changing this property affects only newly-written data.
// +kubebuilder:validation:Pattern="^(on|off|lzjb|gzip|gzip-[1-9]|zle|lz4)$"
Compression string `json:"compression,omitempty"`
// Dedup specifies the deduplication
// Deduplication is the process for removing redundant data at the block level,
// reducing the total amount of data stored. If a file system has the dedup property
// enabled, duplicate data blocks are removed synchronously.
// The result is that only unique data is stored and common components are shared among files.
// Deduplication can consume significant processing power (CPU) and memory as well as generate additional disk IO.
// Before creating a pool with deduplication enabled, ensure that you have planned your hardware
// requirements appropriately and implemented appropriate recovery practices, such as regular backups.
// As an alternative to deduplication consider using compression=lz4, as a less resource-intensive alternative.
// should be enabled on the zvol
// +kubebuilder:validation:Enum=on;off
Dedup string `json:"dedup,omitempty"`
// Encryption specifies the encryption
// should be enabled on the zvol
// Enabling the encryption feature allows for the creation of
// encrypted filesystems and volumes. ZFS will encrypt file and zvol data,
// file attributes, ACLs, permission bits, directory listings, FUID mappings,
// and userused / groupused data. ZFS will not encrypt metadata related to the
// pool structure, including dataset and snapshot names, dataset hierarchy,
// properties, file size, file holes, and deduplication tables
// (though the deduplicated data itself is encrypted).
// +kubebuilder:validation:Pattern="^(on|off|aes-128-[c,g]cm|aes-192-[c,g]cm|aes-256-[c,g]cm)$"
Encryption string `json:"encryption,omitempty"`
// KeyLocation is the location of key
// for the encryption
// KeyLocation is the location of key for the encryption
KeyLocation string `json:"keylocation,omitempty"`
// KeyFormat specifies format of the
// encryption key
// KeyFormat specifies format of the encryption key
KeyFormat string `json:"keyformat,omitempty"`
// Thinprovision specifies if we should
// thin provisioned the volume or not
// +kubebuilder:validation:Enum=Yes;no
ThinProvision string `json:"thinProvision,omitempty"`
// VolumeType specifies whether the volume is
// zvol or a dataset
// volumeType determines whether the volume is of type "DATASET" or "ZVOL".
// if fsttype provided in the storageclass is "zfs", then it will create a
// volume of type "DATASET". If "ext4", "ext3", "ext2" or "xfs" is mentioned as fstype
// in the storageclass, it will create a volume of type "ZVOL" so that it can be
// further formatted with the fstype provided in the storageclass.
// +kubebuilder:validation:Required
// +kubebuilder:validation:Enum=ZVOL;DATASET
VolumeType string `json:"volumeType"`
// FsType specifies filesystem type for the

View file

@ -140,7 +140,7 @@ func defaultGet(
name, namespace string,
opts metav1.GetOptions,
) (*apis.ZFSSnapshot, error) {
return cli.OpenebsV1alpha1().
return cli.ZfsV1alpha1().
ZFSSnapshots(namespace).
Get(name, opts)
}
@ -152,7 +152,7 @@ func defaultList(
namespace string,
opts metav1.ListOptions,
) (*apis.ZFSSnapshotList, error) {
return cli.OpenebsV1alpha1().
return cli.ZfsV1alpha1().
ZFSSnapshots(namespace).
List(opts)
}
@ -166,7 +166,7 @@ func defaultDel(
) error {
deletePropagation := metav1.DeletePropagationForeground
opts.PropagationPolicy = &deletePropagation
err := cli.OpenebsV1alpha1().
err := cli.ZfsV1alpha1().
ZFSSnapshots(namespace).
Delete(name, opts)
return err
@ -179,7 +179,7 @@ func defaultCreate(
vol *apis.ZFSSnapshot,
namespace string,
) (*apis.ZFSSnapshot, error) {
return cli.OpenebsV1alpha1().
return cli.ZfsV1alpha1().
ZFSSnapshots(namespace).
Create(vol)
}
@ -191,7 +191,7 @@ func defaultUpdate(
vol *apis.ZFSSnapshot,
namespace string,
) (*apis.ZFSSnapshot, error) {
return cli.OpenebsV1alpha1().
return cli.ZfsV1alpha1().
ZFSSnapshots(namespace).
Update(vol)
}

View file

@ -140,7 +140,7 @@ func defaultGet(
name, namespace string,
opts metav1.GetOptions,
) (*apis.ZFSVolume, error) {
return cli.OpenebsV1alpha1().
return cli.ZfsV1alpha1().
ZFSVolumes(namespace).
Get(name, opts)
}
@ -152,7 +152,7 @@ func defaultList(
namespace string,
opts metav1.ListOptions,
) (*apis.ZFSVolumeList, error) {
return cli.OpenebsV1alpha1().
return cli.ZfsV1alpha1().
ZFSVolumes(namespace).
List(opts)
}
@ -166,7 +166,7 @@ func defaultDel(
) error {
deletePropagation := metav1.DeletePropagationForeground
opts.PropagationPolicy = &deletePropagation
err := cli.OpenebsV1alpha1().
err := cli.ZfsV1alpha1().
ZFSVolumes(namespace).
Delete(name, opts)
return err
@ -179,7 +179,7 @@ func defaultCreate(
vol *apis.ZFSVolume,
namespace string,
) (*apis.ZFSVolume, error) {
return cli.OpenebsV1alpha1().
return cli.ZfsV1alpha1().
ZFSVolumes(namespace).
Create(vol)
}
@ -191,7 +191,7 @@ func defaultUpdate(
vol *apis.ZFSVolume,
namespace string,
) (*apis.ZFSVolume, error) {
return cli.OpenebsV1alpha1().
return cli.ZfsV1alpha1().
ZFSVolumes(namespace).
Update(vol)
}

View file

@ -19,7 +19,7 @@ limitations under the License.
package internalclientset
import (
openebsv1alpha1 "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/typed/zfs/v1alpha1"
zfsv1alpha1 "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/typed/zfs/v1alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
@ -27,19 +27,19 @@ import (
type Interface interface {
Discovery() discovery.DiscoveryInterface
OpenebsV1alpha1() openebsv1alpha1.OpenebsV1alpha1Interface
ZfsV1alpha1() zfsv1alpha1.ZfsV1alpha1Interface
}
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
openebsV1alpha1 *openebsv1alpha1.OpenebsV1alpha1Client
zfsV1alpha1 *zfsv1alpha1.ZfsV1alpha1Client
}
// OpenebsV1alpha1 retrieves the OpenebsV1alpha1Client
func (c *Clientset) OpenebsV1alpha1() openebsv1alpha1.OpenebsV1alpha1Interface {
return c.openebsV1alpha1
// ZfsV1alpha1 retrieves the ZfsV1alpha1Client
func (c *Clientset) ZfsV1alpha1() zfsv1alpha1.ZfsV1alpha1Interface {
return c.zfsV1alpha1
}
// Discovery retrieves the DiscoveryClient
@ -58,7 +58,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
}
var cs Clientset
var err error
cs.openebsV1alpha1, err = openebsv1alpha1.NewForConfig(&configShallowCopy)
cs.zfsV1alpha1, err = zfsv1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
@ -74,7 +74,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.openebsV1alpha1 = openebsv1alpha1.NewForConfigOrDie(c)
cs.zfsV1alpha1 = zfsv1alpha1.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
@ -83,7 +83,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.openebsV1alpha1 = openebsv1alpha1.New(c)
cs.zfsV1alpha1 = zfsv1alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs

View file

@ -20,8 +20,8 @@ package fake
import (
clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset"
openebsv1alpha1 "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/typed/zfs/v1alpha1"
fakeopenebsv1alpha1 "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/typed/zfs/v1alpha1/fake"
zfsv1alpha1 "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/typed/zfs/v1alpha1"
fakezfsv1alpha1 "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/typed/zfs/v1alpha1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
@ -71,7 +71,7 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
var _ clientset.Interface = &Clientset{}
// OpenebsV1alpha1 retrieves the OpenebsV1alpha1Client
func (c *Clientset) OpenebsV1alpha1() openebsv1alpha1.OpenebsV1alpha1Interface {
return &fakeopenebsv1alpha1.FakeOpenebsV1alpha1{Fake: &c.Fake}
// ZfsV1alpha1 retrieves the ZfsV1alpha1Client
func (c *Clientset) ZfsV1alpha1() zfsv1alpha1.ZfsV1alpha1Interface {
return &fakezfsv1alpha1.FakeZfsV1alpha1{Fake: &c.Fake}
}

View file

@ -19,7 +19,7 @@ limitations under the License.
package fake
import (
openebsv1alpha1 "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1alpha1"
zfsv1alpha1 "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@ -31,7 +31,7 @@ var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var parameterCodec = runtime.NewParameterCodec(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
openebsv1alpha1.AddToScheme,
zfsv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition

View file

@ -19,7 +19,7 @@ limitations under the License.
package scheme
import (
openebsv1alpha1 "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1alpha1"
zfsv1alpha1 "github.com/openebs/zfs-localpv/pkg/apis/openebs.io/zfs/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@ -31,7 +31,7 @@ var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
openebsv1alpha1.AddToScheme,
zfsv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition

View file

@ -24,21 +24,21 @@ import (
testing "k8s.io/client-go/testing"
)
type FakeOpenebsV1alpha1 struct {
type FakeZfsV1alpha1 struct {
*testing.Fake
}
func (c *FakeOpenebsV1alpha1) ZFSSnapshots(namespace string) v1alpha1.ZFSSnapshotInterface {
func (c *FakeZfsV1alpha1) ZFSSnapshots(namespace string) v1alpha1.ZFSSnapshotInterface {
return &FakeZFSSnapshots{c, namespace}
}
func (c *FakeOpenebsV1alpha1) ZFSVolumes(namespace string) v1alpha1.ZFSVolumeInterface {
func (c *FakeZfsV1alpha1) ZFSVolumes(namespace string) v1alpha1.ZFSVolumeInterface {
return &FakeZFSVolumes{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeOpenebsV1alpha1) RESTClient() rest.Interface {
func (c *FakeZfsV1alpha1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}

View file

@ -30,13 +30,13 @@ import (
// FakeZFSSnapshots implements ZFSSnapshotInterface
type FakeZFSSnapshots struct {
Fake *FakeOpenebsV1alpha1
Fake *FakeZfsV1alpha1
ns string
}
var zfssnapshotsResource = schema.GroupVersionResource{Group: "openebs.io", Version: "v1alpha1", Resource: "zfssnapshots"}
var zfssnapshotsResource = schema.GroupVersionResource{Group: "zfs.openebs.io", Version: "v1alpha1", Resource: "zfssnapshots"}
var zfssnapshotsKind = schema.GroupVersionKind{Group: "openebs.io", Version: "v1alpha1", Kind: "ZFSSnapshot"}
var zfssnapshotsKind = schema.GroupVersionKind{Group: "zfs.openebs.io", Version: "v1alpha1", Kind: "ZFSSnapshot"}
// Get takes name of the zFSSnapshot, and returns the corresponding zFSSnapshot object, and an error if there is any.
func (c *FakeZFSSnapshots) Get(name string, options v1.GetOptions) (result *v1alpha1.ZFSSnapshot, err error) {

View file

@ -30,13 +30,13 @@ import (
// FakeZFSVolumes implements ZFSVolumeInterface
type FakeZFSVolumes struct {
Fake *FakeOpenebsV1alpha1
Fake *FakeZfsV1alpha1
ns string
}
var zfsvolumesResource = schema.GroupVersionResource{Group: "openebs.io", Version: "v1alpha1", Resource: "zfsvolumes"}
var zfsvolumesResource = schema.GroupVersionResource{Group: "zfs.openebs.io", Version: "v1alpha1", Resource: "zfsvolumes"}
var zfsvolumesKind = schema.GroupVersionKind{Group: "openebs.io", Version: "v1alpha1", Kind: "ZFSVolume"}
var zfsvolumesKind = schema.GroupVersionKind{Group: "zfs.openebs.io", Version: "v1alpha1", Kind: "ZFSVolume"}
// Get takes name of the zFSVolume, and returns the corresponding zFSVolume object, and an error if there is any.
func (c *FakeZFSVolumes) Get(name string, options v1.GetOptions) (result *v1alpha1.ZFSVolume, err error) {

View file

@ -25,27 +25,27 @@ import (
rest "k8s.io/client-go/rest"
)
type OpenebsV1alpha1Interface interface {
type ZfsV1alpha1Interface interface {
RESTClient() rest.Interface
ZFSSnapshotsGetter
ZFSVolumesGetter
}
// OpenebsV1alpha1Client is used to interact with features provided by the openebs.io group.
type OpenebsV1alpha1Client struct {
// ZfsV1alpha1Client is used to interact with features provided by the zfs.openebs.io group.
type ZfsV1alpha1Client struct {
restClient rest.Interface
}
func (c *OpenebsV1alpha1Client) ZFSSnapshots(namespace string) ZFSSnapshotInterface {
func (c *ZfsV1alpha1Client) ZFSSnapshots(namespace string) ZFSSnapshotInterface {
return newZFSSnapshots(c, namespace)
}
func (c *OpenebsV1alpha1Client) ZFSVolumes(namespace string) ZFSVolumeInterface {
func (c *ZfsV1alpha1Client) ZFSVolumes(namespace string) ZFSVolumeInterface {
return newZFSVolumes(c, namespace)
}
// NewForConfig creates a new OpenebsV1alpha1Client for the given config.
func NewForConfig(c *rest.Config) (*OpenebsV1alpha1Client, error) {
// NewForConfig creates a new ZfsV1alpha1Client for the given config.
func NewForConfig(c *rest.Config) (*ZfsV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
@ -54,12 +54,12 @@ func NewForConfig(c *rest.Config) (*OpenebsV1alpha1Client, error) {
if err != nil {
return nil, err
}
return &OpenebsV1alpha1Client{client}, nil
return &ZfsV1alpha1Client{client}, nil
}
// NewForConfigOrDie creates a new OpenebsV1alpha1Client for the given config and
// NewForConfigOrDie creates a new ZfsV1alpha1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *OpenebsV1alpha1Client {
func NewForConfigOrDie(c *rest.Config) *ZfsV1alpha1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
@ -67,9 +67,9 @@ func NewForConfigOrDie(c *rest.Config) *OpenebsV1alpha1Client {
return client
}
// New creates a new OpenebsV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *OpenebsV1alpha1Client {
return &OpenebsV1alpha1Client{c}
// New creates a new ZfsV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *ZfsV1alpha1Client {
return &ZfsV1alpha1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
@ -87,7 +87,7 @@ func setConfigDefaults(config *rest.Config) error {
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *OpenebsV1alpha1Client) RESTClient() rest.Interface {
func (c *ZfsV1alpha1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}

View file

@ -56,7 +56,7 @@ type zFSSnapshots struct {
}
// newZFSSnapshots returns a ZFSSnapshots
func newZFSSnapshots(c *OpenebsV1alpha1Client, namespace string) *zFSSnapshots {
func newZFSSnapshots(c *ZfsV1alpha1Client, namespace string) *zFSSnapshots {
return &zFSSnapshots{
client: c.RESTClient(),
ns: namespace,

View file

@ -55,7 +55,7 @@ type zFSVolumes struct {
}
// newZFSVolumes returns a ZFSVolumes
func newZFSVolumes(c *OpenebsV1alpha1Client, namespace string) *zFSVolumes {
func newZFSVolumes(c *ZfsV1alpha1Client, namespace string) *zFSVolumes {
return &zFSVolumes{
client: c.RESTClient(),
ns: namespace,

View file

@ -172,9 +172,9 @@ type SharedInformerFactory interface {
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
Openebs() zfs.Interface
Zfs() zfs.Interface
}
func (f *sharedInformerFactory) Openebs() zfs.Interface {
func (f *sharedInformerFactory) Zfs() zfs.Interface {
return zfs.New(f, f.namespace, f.tweakListOptions)
}

View file

@ -52,11 +52,11 @@ func (f *genericInformer) Lister() cache.GenericLister {
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
// Group=openebs.io, Version=v1alpha1
// Group=zfs.openebs.io, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("zfssnapshots"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Openebs().V1alpha1().ZFSSnapshots().Informer()}, nil
return &genericInformer{resource: resource.GroupResource(), informer: f.Zfs().V1alpha1().ZFSSnapshots().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("zfsvolumes"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Openebs().V1alpha1().ZFSVolumes().Informer()}, nil
return &genericInformer{resource: resource.GroupResource(), informer: f.Zfs().V1alpha1().ZFSVolumes().Informer()}, nil
}

View file

@ -16,7 +16,7 @@ limitations under the License.
// Code generated by informer-gen. DO NOT EDIT.
package openebs
package zfs
import (
internalinterfaces "github.com/openebs/zfs-localpv/pkg/generated/informer/externalversions/internalinterfaces"

View file

@ -61,13 +61,13 @@ func NewFilteredZFSSnapshotInformer(client internalclientset.Interface, namespac
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.OpenebsV1alpha1().ZFSSnapshots(namespace).List(options)
return client.ZfsV1alpha1().ZFSSnapshots(namespace).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.OpenebsV1alpha1().ZFSSnapshots(namespace).Watch(options)
return client.ZfsV1alpha1().ZFSSnapshots(namespace).Watch(options)
},
},
&zfsv1alpha1.ZFSSnapshot{},

View file

@ -61,13 +61,13 @@ func NewFilteredZFSVolumeInformer(client internalclientset.Interface, namespace
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.OpenebsV1alpha1().ZFSVolumes(namespace).List(options)
return client.ZfsV1alpha1().ZFSVolumes(namespace).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.OpenebsV1alpha1().ZFSVolumes(namespace).Watch(options)
return client.ZfsV1alpha1().ZFSVolumes(namespace).Watch(options)
},
},
&zfsv1alpha1.ZFSVolume{},

View file

@ -85,14 +85,14 @@ func (cb *SnapControllerBuilder) withOpenEBSClient(cs clientset.Interface) *Snap
// withSnapLister fills snap lister to controller object.
func (cb *SnapControllerBuilder) withSnapLister(sl informers.SharedInformerFactory) *SnapControllerBuilder {
snapInformer := sl.Openebs().V1alpha1().ZFSSnapshots()
snapInformer := sl.Zfs().V1alpha1().ZFSSnapshots()
cb.SnapController.snapLister = snapInformer.Lister()
return cb
}
// withSnapSynced adds object sync information in cache to controller object.
func (cb *SnapControllerBuilder) withSnapSynced(sl informers.SharedInformerFactory) *SnapControllerBuilder {
snapInformer := sl.Openebs().V1alpha1().ZFSSnapshots()
snapInformer := sl.Zfs().V1alpha1().ZFSSnapshots()
cb.SnapController.snapSynced = snapInformer.Informer().HasSynced
return cb
}
@ -116,7 +116,7 @@ func (cb *SnapControllerBuilder) withRecorder(ks kubernetes.Interface) *SnapCont
// withEventHandler adds event handlers controller object.
func (cb *SnapControllerBuilder) withEventHandler(cvcInformerFactory informers.SharedInformerFactory) *SnapControllerBuilder {
cvcInformer := cvcInformerFactory.Openebs().V1alpha1().ZFSSnapshots()
cvcInformer := cvcInformerFactory.Zfs().V1alpha1().ZFSSnapshots()
// Set up an event handler for when Snap resources change
cvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: cb.SnapController.addSnap,

View file

@ -85,14 +85,14 @@ func (cb *ZVControllerBuilder) withOpenEBSClient(cs clientset.Interface) *ZVCont
// withZVLister fills zv lister to controller object.
func (cb *ZVControllerBuilder) withZVLister(sl informers.SharedInformerFactory) *ZVControllerBuilder {
zvInformer := sl.Openebs().V1alpha1().ZFSVolumes()
zvInformer := sl.Zfs().V1alpha1().ZFSVolumes()
cb.ZVController.zvLister = zvInformer.Lister()
return cb
}
// withZVSynced adds object sync information in cache to controller object.
func (cb *ZVControllerBuilder) withZVSynced(sl informers.SharedInformerFactory) *ZVControllerBuilder {
zvInformer := sl.Openebs().V1alpha1().ZFSVolumes()
zvInformer := sl.Zfs().V1alpha1().ZFSVolumes()
cb.ZVController.zvSynced = zvInformer.Informer().HasSynced
return cb
}
@ -116,7 +116,7 @@ func (cb *ZVControllerBuilder) withRecorder(ks kubernetes.Interface) *ZVControll
// withEventHandler adds event handlers controller object.
func (cb *ZVControllerBuilder) withEventHandler(cvcInformerFactory informers.SharedInformerFactory) *ZVControllerBuilder {
cvcInformer := cvcInformerFactory.Openebs().V1alpha1().ZFSVolumes()
cvcInformer := cvcInformerFactory.Zfs().V1alpha1().ZFSVolumes()
// Set up an event handler for when ZV resources change
cvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: cb.ZVController.addZV,