mirror of
https://github.com/TECHNOFAB11/zfs-localpv.git
synced 2025-12-12 14:30:12 +01:00
feat(e2e-test): Add e2e-tests for zfs-localpv (#298)
Signed-off-by: w3aman <aman.gupta@mayadata.io>
This commit is contained in:
parent
53f872fcf1
commit
4e73638b5a
137 changed files with 8745 additions and 0 deletions
50
e2e-tests/experiments/upgrade-zfs-localpv/README.md
Normal file
50
e2e-tests/experiments/upgrade-zfs-localpv/README.md
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
## About this experiment
|
||||
|
||||
This experiment upgrades the zfs-localpv driver components from any previous version to the latest desired stable version or to the master branch ci images.
|
||||
|
||||
## Supported platforms:
|
||||
|
||||
K8s : 1.18+
|
||||
|
||||
OS : Ubuntu, CentOS
|
||||
|
||||
ZFS : 0.7, 0.8
|
||||
|
||||
## Entry-Criteria
|
||||
|
||||
- K8s nodes should be ready.
|
||||
- Do not provision/deprovision any volumes during the upgrade, if we can not control it, then we can scale down the openebs-zfs-controller stateful set to zero replica which will pause all the provisioning/deprovisioning request. And once upgrade is done, the upgraded Driver will continue the provisioning/deprovisioning process.
|
||||
|
||||
## Exit-Criteria
|
||||
|
||||
- zfs-driver should be upgraded to desired version.
|
||||
- All the components related to zfs-localpv driver including zfs-controller and csi node-agents should be running and upraded to desired version as well.
|
||||
- All the zfs volumes should be healthy and data prior to the upgrade should not be impacted.
|
||||
- After upgrade we should be able to provision the volume and other related task with no regressions.
|
||||
|
||||
## How to run
|
||||
|
||||
- This experiment accepts the parameters in form of kubernetes job environmental variables.
|
||||
- For running this experiment of upgrading zfs-localpv driver, clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then first apply rbac and crds for e2e-framework.
|
||||
```
|
||||
kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml
|
||||
kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml
|
||||
```
|
||||
then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job.
|
||||
```
|
||||
kubectl create -f run_e2e_test.yml
|
||||
```
|
||||
All the env variables description is provided with the comments in the same file.
|
||||
After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case.
|
||||
|
||||
```
|
||||
kubectl get pods -n e2e
|
||||
kubectl logs -f <upgrade-zfs-localpv-xxxxx-xxxxx> -n e2e
|
||||
```
|
||||
To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail).
|
||||
|
||||
```
|
||||
kubectl get e2er
|
||||
kubectl get e2er upgrade-zfs-localpv -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase
|
||||
kubectl get e2er upgrade-zfs-localpv -n e2e --no-headers -o custom-columns=:.spec.testStatus.result
|
||||
```
|
||||
41
e2e-tests/experiments/upgrade-zfs-localpv/run_e2e_test.yml
Normal file
41
e2e-tests/experiments/upgrade-zfs-localpv/run_e2e_test.yml
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
generateName: upgrade-zfs-localpv-
|
||||
namespace: e2e
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
test: zfs-localpv-upgrade
|
||||
spec:
|
||||
serviceAccountName: e2e
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: ansibletest
|
||||
image: openebs/zfs-localpv-e2e:ci
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: ANSIBLE_STDOUT_CALLBACK
|
||||
value: default
|
||||
|
||||
## Give the versioned branch name for zfs_localpv provisioner from openebs/zfs-localpv repo
|
||||
## for e.g. (v1.4.x , v1.5.x OR master)
|
||||
- name: TO_VERSION_ZFS_BRANCH
|
||||
value: ''
|
||||
|
||||
## Provide ZFS_DRIVER image to which upgrade is to done. To use ci images use ci tag.
|
||||
## Give full image name (for e.g. openebs/zfs-driver:<tag>)
|
||||
- name: TO_VERSION_ZFS_DRIVER_IMAGE
|
||||
value: ''
|
||||
|
||||
# This is the namespace where the zfs driver will create all its resources.
|
||||
# By default it is in openebs namespace. If you have been using some different namespace
|
||||
# provide that value. We should never attempt to modify this namespace as old resources will
|
||||
# not be available under the new namespace.
|
||||
- name: ZFS_OPERATOR_NAMESPACE
|
||||
value: 'openebs'
|
||||
|
||||
command: ["/bin/bash"]
|
||||
args: ["-c", "ansible-playbook ./e2e-tests/experiments/upgrade-zfs-localpv/test.yml -i /etc/ansible/hosts -v; exit 0"]
|
||||
213
e2e-tests/experiments/upgrade-zfs-localpv/test.yml
Normal file
213
e2e-tests/experiments/upgrade-zfs-localpv/test.yml
Normal file
|
|
@ -0,0 +1,213 @@
|
|||
- hosts: localhost
|
||||
connection: local
|
||||
gather_facts: False
|
||||
|
||||
vars_files:
|
||||
- test_vars.yml
|
||||
|
||||
tasks:
|
||||
|
||||
- block:
|
||||
|
||||
## Generating the testname for zfs localpv upgrade test
|
||||
- include_tasks: /e2e-tests/hack/create_testname.yml
|
||||
|
||||
## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource)
|
||||
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
|
||||
vars:
|
||||
status: 'SOT'
|
||||
|
||||
- name: Get the list of pods of zfs-localpv components (zfs-controller and zfs-node agent deamonset)
|
||||
shell: >
|
||||
kubectl get pods -n kube-system -l role=openebs-zfs
|
||||
--no-headers -o custom-columns=:.metadata.name
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: zfs_localpv_components
|
||||
|
||||
- name: Verify that the zfs-localpv components are in running state
|
||||
shell: >
|
||||
kubectl get pods {{ item }} -n kube-system --no-headers -o custom-columns=:status.phase
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: ZFS_driver_components
|
||||
failed_when: "ZFS_driver_components.stdout != 'Running'"
|
||||
with_items: "{{ zfs_localpv_components.stdout_lines }}"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Get the version tag for zfs-driver
|
||||
shell: >
|
||||
kubectl get sts openebs-zfs-controller -n kube-system
|
||||
-o jsonpath='{.spec.template.spec.containers[?(@.name=="openebs-zfs-plugin")].image}' | cut -d ":" -f2
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: zfs_driver_tag
|
||||
|
||||
- name: Get the replica count for zfs-controller statefulset
|
||||
shell: >
|
||||
kubectl get sts openebs-zfs-controller -n kube-system -o jsonpath='{.status.replicas}'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: no_of_zfs_ctrl_replicas
|
||||
|
||||
- name: Record the number of zfs-controller replicas
|
||||
set_fact:
|
||||
zfs_ctrl_replicas: "{{ no_of_zfs_ctrl_replicas.stdout }}"
|
||||
|
||||
- name: Get the list of node-agent pods in openebs-zfs-node daemonset
|
||||
shell: >
|
||||
kubectl get po -n kube-system -l app=openebs-zfs-node --no-headers -o custom-columns=:.metadata.name
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: ds_pods
|
||||
|
||||
- block:
|
||||
## This task creates new CRDs as zfs-LocalPV related CRs are now grouped
|
||||
## under `zfs.openebs.io` from v0.6 release.
|
||||
- name: Apply the new CRDs for zfs-LocalPV
|
||||
shell: >
|
||||
kubectl apply -f https://raw.githubusercontent.com/openebs/zfs-localpv/master/upgrade/crd.yaml
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: new_crds
|
||||
failed_when: "new_crds.rc != 0"
|
||||
|
||||
## This task create new CRs for zfs-volume and zfs-snapshot with updated
|
||||
## apiversion to `zfs.openebs.io`. Previously this was `openebs.io`.
|
||||
- name: Download the Upgrade script for creating new CRs with apiversion as `zfs.openebs.io`
|
||||
get_url:
|
||||
url: https://raw.githubusercontent.com/openebs/zfs-localpv/master/upgrade/upgrade.sh
|
||||
dest: ./upgrade.sh
|
||||
force: yes
|
||||
register: result
|
||||
until: "'OK' in result.msg"
|
||||
delay: 5
|
||||
retries: 3
|
||||
|
||||
- name: Apply the upgrade script
|
||||
shell: sh ./upgrade.sh {{ zfs_operator_ns }}
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
when:
|
||||
- zfs_driver_tag.stdout == "v0.4" or zfs_driver_tag.stdout == "0.4.1" or zfs_driver_tag.stdout == "v0.5"
|
||||
- "'v0.4.x' not in to_version_zfs_branch"
|
||||
- "'v0.5.x' not in to_version_zfs_branch"
|
||||
|
||||
- name: Download the zfs-operator file
|
||||
get_url:
|
||||
url: https://raw.githubusercontent.com/openebs/zfs-localpv/{{ to_version_zfs_branch }}/deploy/zfs-operator.yaml
|
||||
dest: ./new_zfs_operator.yml
|
||||
force: yes
|
||||
register: result
|
||||
until: "'OK' in result.msg"
|
||||
delay: 5
|
||||
retries: 3
|
||||
|
||||
- name: Update the openebs zfs-driver image
|
||||
replace:
|
||||
path: ./new_zfs_operator.yml
|
||||
regexp: openebs/zfs-driver:ci
|
||||
replace: "{{ lookup('env','TO_VERSION_ZFS_DRIVER_IMAGE') }}"
|
||||
when: lookup('env','TO_VERSION_ZFS_DRIVER_IMAGE') | length > 0
|
||||
|
||||
- name: Update the number of zfs-controller statefulset replicas
|
||||
replace:
|
||||
path: ./new_zfs_operator.yml
|
||||
regexp: "replicas: 1"
|
||||
replace: "replicas: {{ zfs_ctrl_replicas }}"
|
||||
|
||||
- name: Apply the zfs_operator file to deploy zfs-driver components to the newer version
|
||||
shell:
|
||||
kubectl apply -f ./new_zfs_operator.yml
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
- name: Wait for some time to old zfs-driver components to go into Terminating state.
|
||||
shell: >
|
||||
sleep 30
|
||||
|
||||
- name: Verify zfs-node agent previous pods are not present in kube-system namespace
|
||||
shell: >
|
||||
kubectl get pods -n kube-system -l app=openebs-zfs-node --no-headers
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: new_ds_pods
|
||||
until: "'{{ item }}' not in new_ds_pods.stdout"
|
||||
delay: 5
|
||||
retries: 40
|
||||
with_items: "{{ ds_pods.stdout_lines }}"
|
||||
|
||||
- name: Verify zfs-node agent newer pods are in running status
|
||||
shell: >
|
||||
kubectl get pods -n kube-system -l app=openebs-zfs-node
|
||||
--no-headers -o custom-columns=:status.phase | sort | uniq
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: new_ds_pods
|
||||
until: "new_ds_pods.stdout == 'Running'"
|
||||
delay: 5
|
||||
retries: 30
|
||||
|
||||
- name: Verify that zfs-node agent daemonset image is upgraded
|
||||
shell: >
|
||||
kubectl get ds openebs-zfs-node -n kube-system
|
||||
-o jsonpath='{.spec.template.spec.containers[?(@.name=="openebs-zfs-plugin")].image}'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: ds_image
|
||||
failed_when: ds_image.stdout != to_version_zfs_driver_image
|
||||
|
||||
- name: Check for the count of zfs-controller ready replicas
|
||||
shell: >
|
||||
kubectl get sts openebs-zfs-controller -n kube-system -o jsonpath='{.status.readyReplicas}'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: ready_replicas
|
||||
until: "ready_replicas.stdout|int == zfs_ctrl_replicas|int"
|
||||
delay: 5
|
||||
retries: 20
|
||||
|
||||
- name: Verify that zfs-driver version from the zfs-controller statefulset image is upgraded
|
||||
shell: >
|
||||
kubectl get sts openebs-zfs-controller -n kube-system
|
||||
-o jsonpath='{.spec.template.spec.containers[?(@.name=="openebs-zfs-plugin")].image}'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: zfs_ctrl_image
|
||||
failed_when: zfs_ctrl_image.stdout != to_version_zfs_driver_image
|
||||
|
||||
- block:
|
||||
|
||||
- name: Download the cleanup script for removing the resources with old CRs and delete old CRDs
|
||||
get_url:
|
||||
url: https://raw.githubusercontent.com/openebs/zfs-localpv/master/upgrade/cleanup.sh
|
||||
dest: ./cleanup.sh
|
||||
force: yes
|
||||
register: result
|
||||
until: "'OK' in result.msg"
|
||||
delay: 5
|
||||
retries: 3
|
||||
|
||||
- name: Apply the cleanup script
|
||||
shell: sh ./cleanup.sh {{ zfs_operator_ns }}
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
when:
|
||||
- zfs_driver_tag.stdout == "v0.4" or zfs_driver_tag.stdout == "0.4.1" or zfs_driver_tag.stdout == "v0.5"
|
||||
- "'v0.4.x' not in to_version_zfs_branch"
|
||||
- "'v0.5.x' not in to_version_zfs_branch"
|
||||
|
||||
- set_fact:
|
||||
flag: "Pass"
|
||||
|
||||
rescue:
|
||||
- set_fact:
|
||||
flag: "Fail"
|
||||
|
||||
always:
|
||||
## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource)
|
||||
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
|
||||
vars:
|
||||
status: 'EOT'
|
||||
7
e2e-tests/experiments/upgrade-zfs-localpv/test_vars.yml
Normal file
7
e2e-tests/experiments/upgrade-zfs-localpv/test_vars.yml
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
test_name: upgrade-zfs-localpv
|
||||
|
||||
to_version_zfs_branch: "{{ lookup('env','TO_VERSION_ZFS_BRANCH') }}"
|
||||
|
||||
to_version_zfs_driver_image: "{{ lookup('env','TO_VERSION_ZFS_DRIVER_IMAGE') }}"
|
||||
|
||||
zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}"
|
||||
Loading…
Add table
Add a link
Reference in a new issue