mirror of
https://github.com/TECHNOFAB11/zfs-localpv.git
synced 2025-12-11 22:10:11 +01:00
fix(provisioning): register topologyKeys from driver env (#395)
Signed-off-by: shubham <shubham14bajpai@gmail.com>
This commit is contained in:
parent
37a5cb80e2
commit
a6462c5234
6 changed files with 109 additions and 20 deletions
|
|
@ -1053,6 +1053,12 @@ spec:
|
||||||
value: agent
|
value: agent
|
||||||
- name: OPENEBS_NAMESPACE
|
- name: OPENEBS_NAMESPACE
|
||||||
value: openebs
|
value: openebs
|
||||||
|
- name: ALLOWED_TOPOLOGIES
|
||||||
|
# The desired comma separated keys can be added here,
|
||||||
|
# by default all the node label keys are allowed.
|
||||||
|
# For example:
|
||||||
|
# value: "kubernetes.io/hostname,openebs.io/rack"
|
||||||
|
value: "All"
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: plugin-dir
|
- name: plugin-dir
|
||||||
mountPath: /plugin
|
mountPath: /plugin
|
||||||
|
|
|
||||||
|
|
@ -2020,6 +2020,7 @@ globalDefault: false
|
||||||
description: "This priority class should be used for the OpenEBS ZFS localPV CSI driver controller deployment only."
|
description: "This priority class should be used for the OpenEBS ZFS localPV CSI driver controller deployment only."
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
kind: StatefulSet
|
kind: StatefulSet
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
metadata:
|
metadata:
|
||||||
|
|
@ -2346,6 +2347,12 @@ spec:
|
||||||
value: agent
|
value: agent
|
||||||
- name: OPENEBS_NAMESPACE
|
- name: OPENEBS_NAMESPACE
|
||||||
value: openebs
|
value: openebs
|
||||||
|
- name: ALLOWED_TOPOLOGIES
|
||||||
|
# The desired comma separated keys can be added here,
|
||||||
|
# by default all the node label keys are allowed.
|
||||||
|
# For example:
|
||||||
|
# value: "kubernetes.io/hostname,openebs.io/rack"
|
||||||
|
value: "All"
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: plugin-dir
|
- name: plugin-dir
|
||||||
mountPath: /plugin
|
mountPath: /plugin
|
||||||
|
|
|
||||||
36
docs/faq.md
36
docs/faq.md
|
|
@ -91,7 +91,12 @@ spec:
|
||||||
|
|
||||||
### 6. How to add custom topology key
|
### 6. How to add custom topology key
|
||||||
|
|
||||||
To add custom topology key, we can label all the nodes with the required key and value :-
|
To add custom topology key:
|
||||||
|
* Label the nodes with the required key and value.
|
||||||
|
* Set env variables in the ZFS driver daemonset yaml(openebs-zfs-node), if already deployed, you can edit the daemonSet directly. By default the env is set to `All` which will take the node label keys as allowed topologies.
|
||||||
|
* "openebs.io/nodename" and "openebs.io/nodeid" are added as default topology key.
|
||||||
|
* Create storageclass with above specific labels keys.
|
||||||
|
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ kubectl label node pawan-node-1 openebs.io/rack=rack1
|
$ kubectl label node pawan-node-1 openebs.io/rack=rack1
|
||||||
|
|
@ -101,12 +106,25 @@ $ kubectl get nodes pawan-node-1 --show-labels
|
||||||
NAME STATUS ROLES AGE VERSION LABELS
|
NAME STATUS ROLES AGE VERSION LABELS
|
||||||
pawan-node-1 Ready worker 16d v1.17.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=pawan-node-1,kubernetes.io/os=linux,node-role.kubernetes.io/worker=true,openebs.io/rack=rack1
|
pawan-node-1 Ready worker 16d v1.17.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=pawan-node-1,kubernetes.io/os=linux,node-role.kubernetes.io/worker=true,openebs.io/rack=rack1
|
||||||
|
|
||||||
|
$ kubectl get ds -n kube-system openebs-zfs-node -o yaml
|
||||||
|
...
|
||||||
|
env:
|
||||||
|
- name: OPENEBS_NODE_ID
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
- name: OPENEBS_CSI_ENDPOINT
|
||||||
|
value: unix:///plugin/csi.sock
|
||||||
|
- name: OPENEBS_NODE_DRIVER
|
||||||
|
value: agent
|
||||||
|
- name: OPENEBS_NAMESPACE
|
||||||
|
value: openebs
|
||||||
|
- name: ALLOWED_TOPOLOGIES
|
||||||
|
value: "openebs.io/rack"
|
||||||
```
|
```
|
||||||
It is recommended is to label all the nodes with the same key, they can have different values for the given keys, but all keys should be present on all the worker node.
|
It is recommended is to label all the nodes with the same key, they can have different values for the given keys, but all keys should be present on all the worker node.
|
||||||
|
|
||||||
Once we have labeled the node, we can install the zfs driver. The driver will pick the node labels and add that as the supported topology key. If the driver is already installed and you want to add a new topology information, you can label the node with the topology information and then restart of the ZFSPV CSI driver daemon sets (openebs-zfs-node) are required so that the driver can pick the labels and add them as supported topology keys. We should restart the pod in kube-system namespace with the name as openebs-zfs-node-[xxxxx] which is the node agent pod for the ZFS-LocalPV Driver.
|
Once we have labeled the node, we can install the zfs driver. The driver will pick the keys from env "ALLOWED_TOPOLOGIES" and add that as the supported topology key. If the driver is already installed and you want to add a new topology information, you can edit the ZFS-LocalPV CSI driver daemon sets (openebs-zfs-node).
|
||||||
|
|
||||||
Note that restart of ZFSPV CSI driver daemon sets are must in case, if we are going to use WaitForFirstConsumer as volumeBindingMode in storage class. In case of immediate volume binding mode, restart of daemon set is not a must requirement, irrespective of sequence of labeling the node either prior to install zfs driver or after install. However it is recommended to restart the daemon set if we are labeling the nodes after the installation.
|
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ kubectl get pods -n kube-system -l role=openebs-zfs
|
$ kubectl get pods -n kube-system -l role=openebs-zfs
|
||||||
|
|
@ -140,12 +158,8 @@ spec:
|
||||||
- name: zfs.csi.openebs.io
|
- name: zfs.csi.openebs.io
|
||||||
nodeID: pawan-node-1
|
nodeID: pawan-node-1
|
||||||
topologyKeys:
|
topologyKeys:
|
||||||
- beta.kubernetes.io/arch
|
- openebs.io/nodeid
|
||||||
- beta.kubernetes.io/os
|
- openebs.io/nodename
|
||||||
- kubernetes.io/arch
|
|
||||||
- kubernetes.io/hostname
|
|
||||||
- kubernetes.io/os
|
|
||||||
- node-role.kubernetes.io/worker
|
|
||||||
- openebs.io/rack
|
- openebs.io/rack
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
@ -170,7 +184,7 @@ allowedTopologies:
|
||||||
|
|
||||||
The ZFSPV CSI driver will schedule the PV to the nodes where label "openebs.io/rack" is set to "rack1". If there are multiple nodes qualifying this prerequisite, then it will pick the node which has less number of volumes provisioned for the given ZFS Pool.
|
The ZFSPV CSI driver will schedule the PV to the nodes where label "openebs.io/rack" is set to "rack1". If there are multiple nodes qualifying this prerequisite, then it will pick the node which has less number of volumes provisioned for the given ZFS Pool.
|
||||||
|
|
||||||
Note that if storageclass is using Immediate binding mode and topology key is not mentioned then all the nodes should be labeled using same key, that means, same key should be present on all nodes, nodes can have different values for those keys. If nodes are labeled with different keys i.e. some nodes are having different keys, then ZFSPV's default scheduler can not effictively do the volume count based scheduling. Here, in this case the CSI provisioner will pick keys from any random node and then prepare the preferred topology list using the nodes which has those keys defined and ZFSPV scheduler will schedule the PV among those nodes only.
|
Note that if storageclass is using Immediate binding mode and storageclass allowedTopologies is not mentioned then all the nodes should be labeled using "ALLOWED_TOPOLOGIES" keys, that means, "ALLOWED_TOPOLOGIES" keys should be present on all nodes, nodes can have different values for those keys. If some nodes don't have those keys, then ZFSPV's default scheduler can not effectively do the volume capacity based scheduling. Here, in this case the CSI provisioner will pick keys from any random node and then prepare the preferred topology list using the nodes which has those keys defined and ZFSPV scheduler will schedule the PV among those nodes only.
|
||||||
|
|
||||||
### 7. Why the ZFS volume size is different than the reqeusted size in PVC
|
### 7. Why the ZFS volume size is different than the reqeusted size in PVC
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -189,6 +189,57 @@ allowedTopologies:
|
||||||
- node-2
|
- node-2
|
||||||
```
|
```
|
||||||
|
|
||||||
|
At the same time, you must set env variables in the ZFS-LocalPV CSI driver daemon sets (openebs-zfs-node) so that it can pick the node label as the supported topology. It adds "openebs.io/nodename" as default topology key. If the key doesn't exist in the node labels when the CSI ZFS driver register, the key will not add to the topologyKeys. Set more than one keys separated by commas.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
env:
|
||||||
|
- name: OPENEBS_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
- name: OPENEBS_CSI_ENDPOINT
|
||||||
|
value: unix:///plugin/csi.sock
|
||||||
|
- name: OPENEBS_NODE_DRIVER
|
||||||
|
value: agent
|
||||||
|
- name: OPENEBS_NAMESPACE
|
||||||
|
value: openebs
|
||||||
|
- name: ALLOWED_TOPOLOGIES
|
||||||
|
value: "test1,test2"
|
||||||
|
```
|
||||||
|
|
||||||
|
We can verify that key has been registered successfully with the ZFS LocalPV CSI Driver by checking the CSI node object yaml :-
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
$ kubectl get csinodes pawan-node-1 -oyaml
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: CSINode
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: "2020-04-13T14:49:59Z"
|
||||||
|
name: k8s-node-1
|
||||||
|
ownerReferences:
|
||||||
|
- apiVersion: v1
|
||||||
|
kind: Node
|
||||||
|
name: k8s-node-1
|
||||||
|
uid: fe268f4b-d9a9-490a-a999-8cde20c4dadb
|
||||||
|
resourceVersion: "4586341"
|
||||||
|
selfLink: /apis/storage.k8s.io/v1/csinodes/k8s-node-1
|
||||||
|
uid: 522c2110-9d75-4bca-9879-098eb8b44e5d
|
||||||
|
spec:
|
||||||
|
drivers:
|
||||||
|
- name: zfs.csi.openebs.io
|
||||||
|
nodeID: k8s-node-1
|
||||||
|
topologyKeys:
|
||||||
|
- openebs.io/nodename
|
||||||
|
- test1
|
||||||
|
- test2
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to change topology keys, just set new env(ALLOWED_TOPOLOGIES) .Check [faq](./faq.md#6-how-to-add-custom-topology-key) for more details.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ kubectl edit ds -n kube-system openebs-zfs-node
|
||||||
|
```
|
||||||
|
|
||||||
Here we can have ZFS Pool of name “zfspv-pool” created on the nvme disks and want to use this high performing ZFS Pool for the applications that need higher IOPS. We can use the above SorageClass to create the PVC and deploy the application using that.
|
Here we can have ZFS Pool of name “zfspv-pool” created on the nvme disks and want to use this high performing ZFS Pool for the applications that need higher IOPS. We can use the above SorageClass to create the PVC and deploy the application using that.
|
||||||
|
|
||||||
The ZFS-LocalPV driver will create the Volume in the Pool “zfspv-pool” present on the node which will be seleted based on scheduler we chose in storage-class. In the above StorageClass, if total capacity of provisioned volumes on node-1 is less, it will create the volume on node-1 only. Alternatively, we can use `volumeBindingMode: WaitForFirstConsumer` to let the k8s select the node where the volume should be provisioned.
|
The ZFS-LocalPV driver will create the Volume in the Pool “zfspv-pool” present on the node which will be seleted based on scheduler we chose in storage-class. In the above StorageClass, if total capacity of provisioned volumes on node-1 is less, it will create the volume on node-1 only. Alternatively, we can use `volumeBindingMode: WaitForFirstConsumer` to let the k8s select the node where the volume should be provisioned.
|
||||||
|
|
@ -202,13 +253,7 @@ pawan@pawan-master:~/pawan$ kubectl label node pawan-node-1 openebs.io/zpool=nvm
|
||||||
node/pawan-node-1 labeled
|
node/pawan-node-1 labeled
|
||||||
```
|
```
|
||||||
|
|
||||||
Now, restart the ZFS-LocalPV Driver (if already deployed, otherwise please ignore) so that it can pick the new node label as the supported topology. Check [faq](./faq.md#6-how-to-add-custom-topology-key) for more details.
|
Add "openebs.io/zpool" to the ZFS-LocalPV CSI driver daemon sets env(ALLOWED_TOPOLOGIES). Now, we can create the StorageClass like this:
|
||||||
|
|
||||||
```
|
|
||||||
$ kubectl delete po -n kube-system -l role=openebs-zfs
|
|
||||||
```
|
|
||||||
|
|
||||||
Now, we can create the StorageClass like this:
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: storage.k8s.io/v1
|
apiVersion: storage.k8s.io/v1
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !ignore_autogenerated
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -235,8 +235,24 @@ func (ns *node) NodeGetInfo(
|
||||||
* }
|
* }
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// support all the keys that node has
|
topology := map[string]string{}
|
||||||
topology := node.Labels
|
|
||||||
|
// support topologykeys from env ALLOWED_TOPOLOGIES
|
||||||
|
allowedTopologies := strings.Trim(os.Getenv("ALLOWED_TOPOLOGIES"), " ")
|
||||||
|
if strings.ToLower(allowedTopologies) == "all" {
|
||||||
|
topology = node.Labels
|
||||||
|
} else {
|
||||||
|
allowedKeys := strings.Split(allowedTopologies, ",")
|
||||||
|
for _, key := range allowedKeys {
|
||||||
|
if key != "" {
|
||||||
|
if value, ok := node.Labels[key]; ok {
|
||||||
|
topology[key] = value
|
||||||
|
} else {
|
||||||
|
klog.Warningf("failed to get value for topology key: %s", key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// add driver's topology key if not labelled already
|
// add driver's topology key if not labelled already
|
||||||
if _, ok := topology[zfs.ZFSTopologyKey]; !ok {
|
if _, ok := topology[zfs.ZFSTopologyKey]; !ok {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue