feat(e2e-test): Add e2e-tests for zfs-localpv (#298)

Signed-off-by: w3aman <aman.gupta@mayadata.io>
This commit is contained in:
Aman Gupta 2021-06-09 21:21:39 +05:30 committed by GitHub
parent 53f872fcf1
commit 4e73638b5a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
137 changed files with 8745 additions and 0 deletions

View file

@ -0,0 +1,49 @@
## About this experiment
This experiment verifies runtime modification of zvolume properties which were set via storage-class.
## Supported platforms:
K8s : 1.18+
OS : Ubuntu, CentOS
ZFS : 0.7, 0.8
## Entry-Criteria
- K8s cluster should be in healthy state including all desired nodes in ready state.
- zfs-controller and node-agent daemonset pods should be in running state.
## Steps performed
- Get the zvolume name and then obtain properties like compression, dedup from that zvolume.
- After that update these parameter properties and apply the zvolume yaml.
- Verify update values from zvolume and from node as well where volume was provisioned.
## How to run
- This experiment accepts the parameters in form of kubernetes job environmental variables.
- For running this experiment of run time modification of zv properties, clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then first apply rbac and crds for e2e-framework.
```
kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml
kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml
```
then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job.
```
kubectl create -f run_e2e_test.yml
```
All the env variables description is provided with the comments in the same file.
After creating kubernetes job, when the jobs pod is instantiated, we can see the logs of that pod which is executing the test-case.
```
kubectl get pods -n e2e
kubectl logs -f <zv-property-runtime-modify-xxxxx-xxxxx> -n e2e
```
To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail).
```
kubectl get e2er
kubectl get e2er zv-property-runtime-modify -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase
kubectl get e2er zv-property-runtime-modify -n e2e --no-headers -o custom-columns=:.spec.testStatus.result
```

View file

@ -0,0 +1,51 @@
apiVersion: batch/v1
kind: Job
metadata:
generateName: zv-property-runtime-modify-
namespace: e2e
spec:
template:
metadata:
labels:
test: zv-property-runtime-modify
spec:
serviceAccountName: e2e
restartPolicy: Never
containers:
- name: ansibletest
image: openebs/zfs-localpv-e2e:ci
imagePullPolicy: IfNotPresent
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: default
- name: APP_NAMESPACE ## Namespace in which application is deployed
value: ''
- name: APP_LABEL ## Application label
value: ''
- name: FILE_SYSTEM_TYPE ## Give the file_system_name (values: zfs, ext4 or xfs)
value: ''
- name: ZFS_OPERATOR_NAMESPACE ## Namespace in which all the resources created by zfs driver will be present
value: '' ## for e.g. zfsvolume (zv) will be in this namespace
- name: APP_PVC ## PersistentVolumeClaim Name for the application
value: ''
## Give values to modify the zvolume parameters value at runtime
## Supported values ("on", "off", "lzjb", "gzip", "gzip-[1-9]", "zle" and "lz4")
- name: NEW_COMPRESSION_PARAMETER
value: ''
## supported values ("on" and "off")
- name: NEW_DEDUP_PARAMETER
value: ''
## Provide value of zpool name from which desired dataset/zvolume is provisioned
- name: ZPOOL_NAME
value: ''
command: ["/bin/bash"]
args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zv-property-runtime-modify/test.yml -i /etc/ansible/hosts -vv; exit 0"]

View file

@ -0,0 +1,188 @@
- hosts: localhost
connection: local
gather_facts: False
vars_files:
- test_vars.yml
tasks:
- block:
## Generating the testname for zv property runtime modify test
- include_tasks: /e2e-tests/hack/create_testname.yml
## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource)
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
vars:
status: 'SOT'
- block:
- name: Update the daemonset spec template with test specific values
template:
src: zv_property_ds.j2
dest: zv_property_ds.yml
- name: Create a daemonset with privileged access to verify zvol properties at node level
shell: >
kubectl create -f ./zv_property_ds.yml
args:
executable: /bin/bash
register: status
failed_when: "status.rc != 0"
- name: Confirm that the ds pods are running on all nodes
shell: >
kubectl get pod -l test=zv-property-modify-{{ fs_type }}
--no-headers -o custom-columns=:status.phase | sort | uniq
args:
executable: /bin/bash
register: result
until: "result.stdout == 'Running'"
delay: 5
retries: 20
- name: Get the zvolume name
shell: >
kubectl get pvc {{ pvc_name }} -n {{ app_ns }} --no-headers
-o custom-columns=:.spec.volumeName
args:
executable: /bin/bash
register: zvol_name
- name: Record the zvolume name
set_fact:
zv_name: "{{ zvol_name.stdout }}"
- name: Get the node name on which volume is provisioned
shell: >
kubectl get zv {{ zvol_name.stdout }} -n {{ zfs_operator_ns }} --no-headers
-o custom-columns=:.spec.ownerNodeID
args:
executable: /bin/bash
register: vol_node_name
- name: Get the daemonset pod name which is scheduled on the same node as of volume node
shell: >
kubectl get pod -l test=zv-property-modify-{{ fs_type }} --no-headers
-o jsonpath='{.items[?(@.spec.nodeName=="{{ vol_node_name.stdout }}")].metadata.name}'
args:
executable: /bin/bash
register: ds_pod_name
- name: Record the daemonset pod name scheduled on the same node with application pod
set_fact:
ds_pod: "{{ ds_pod_name.stdout }}"
- name: Get the compression parameter value from the zvolume
shell: >
kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers
-o custom-columns=:.spec.compression
args:
executable: /bin/bash
register: compress_val
- name: Get the Dedup parameter value from the zvolume
shell: >
kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers
-o custom-columns=:.spec.dedup
args:
executable: /bin/bash
register: dedup_val
- name: Get the yaml file for zvolume
shell: >
kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} -o yaml > zv.yml
args:
executable: /bin/bash
- name: Modify the compression parameter value
replace:
path: zv.yml
regexp: 'compression: "{{ compress_val.stdout }}"'
replace: 'compression: "{{ new_compress_val }}"'
- name: Modify the dedup parameter value
replace:
path: zv.yml
regexp: 'dedup: "{{ dedup_val.stdout }}"'
replace: 'dedup: "{{ new_dedup_val }}"'
- name: Apply the modified yaml to update the new value of zvolume parameters
shell: >
kubectl apply -f zv.yml
args:
executable: /bin/bash
register: result
failed_when: "result.rc != 0"
- name: Verify that compression parameter value is modified in zvolume
shell: >
kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers
-o custom-columns=:.spec.compression
args:
executable: /bin/bash
register: modified_compress_val
until: modified_compress_val.stdout == "{{ new_compress_val }}"
delay: 2
retries: 20
- name: Verify that compression parameter value is modified in dataset/zvolume on node
shell: >
kubectl exec -ti {{ ds_pod }} -- bash -c 'zfs get all {{ zpool_name }}/{{ zv_name }} | grep compression'
args:
executable: /bin/bash
register: modified_compress_val
until: "new_compress_val in modified_compress_val.stdout"
delay: 2
retries: 20
- name: Verify that dedup parameter value is modified in zvolume
shell: >
kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers
-o custom-columns=:.spec.dedup
args:
executable: /bin/bash
register: modified_dedup_val
until: modified_dedup_val.stdout == "{{ new_dedup_val }}"
delay: 2
retries: 20
- name: Verify that compression parameter value is modified in dataset/zvolume on node
shell: >
kubectl exec -ti {{ ds_pod }} -- bash -c 'zfs get all {{ zpool_name }}/{{ zv_name }} | grep dedup'
args:
executable: /bin/bash
register: modified_dedup_val
until: "new_dedup_val in modified_dedup_val.stdout"
delay: 2
retries: 20
- set_fact:
flag: "Pass"
rescue:
- set_fact:
flag: "Fail"
always:
- name: Get the name of daemonset
shell: >
kubectl get ds -n e2e -o jsonpath='{.items[?(@.spec.selector.matchLabels.test=="zv-property-modify-{{ fs_type }}")].metadata.name}'
args:
executable: /bin/bash
register: ds_name
- name: Delete the daemonset with privileged access to verify zvol properties at node level
shell: >
kubectl delete ds {{ ds_name.stdout }} -n e2e
args:
executable: /bin/bash
register: status
failed_when: "status.rc != 0"
## Record EOT (end of test) in e2e result e2e-cr (e2e-custom-resource)
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
vars:
status: 'EOT'

View file

@ -0,0 +1,17 @@
test_name: zv-property-runtime-modify
app_ns: "{{ lookup('env','APP_NAMESPACE') }}"
app_label: "{{ lookup('env','APP_LABEL') }}"
fs_type: "{{ lookup('env','FILE_SYSTEM_TYPE') }}"
zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}"
pvc_name: "{{ lookup('env','APP_PVC') }}"
new_compress_val: "{{ lookup('env','NEW_COMPRESSION_PARAMETER') }}"
new_dedup_val: "{{ lookup('env','NEW_DEDUP_PARAMETER') }}"
zpool_name: "{{ lookup('env','ZPOOL_NAME') }}"

View file

@ -0,0 +1,34 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
generateName: zv-property-modify-{{ fs_type }}-
spec:
selector:
matchLabels:
test: zv-property-modify-{{ fs_type }}
template:
metadata:
labels:
test: zv-property-modify-{{ fs_type }}
spec:
containers:
- name: zfsutils
image: quay.io/w3aman/zfsutils:ci
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'echo Hello! && sleep 1800']
volumeMounts:
- name: udev
mountPath: /run/udev
- name: device
mountPath: /dev
securityContext:
privileged: true
tty: true
volumes:
- hostPath:
path: /run/udev
name: udev
- hostPath:
path: /dev
name: device