feat(e2e-test): Add e2e-tests for zfs-localpv (#298)

Signed-off-by: w3aman <aman.gupta@mayadata.io>
This commit is contained in:
Aman Gupta 2021-06-09 21:21:39 +05:30 committed by GitHub
parent 53f872fcf1
commit 4e73638b5a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
137 changed files with 8745 additions and 0 deletions

View file

@ -0,0 +1,64 @@
## About this experiment
This experiment creates the volume snapshot of zfs-localpv which can be used further for creating a clone. Snapshot will be created in the same namespace where application pvc is created. One thing need to be noted that this experiment scale down the application before taking the snapshot, it is done this way to create the application consistent volume snapshot. After creating the snapshot application will be scaled up again.
## Supported platforms:
K8s : 1.18+
OS : Ubuntu, CentOS
ZFS : 0.7, 0.8
## Entry-Criteria
- K8s cluster should be in healthy state including all desired nodes in ready state.
- zfs-controller and node-agent daemonset pods should be in running state.
- Application should be deployed succesfully consuming the zfs-localpv storage.
- Volume snapshot class of zfs csi driver should be present to create the snapshot.
## Steps performed
This experiment consist of provisioning and deprovisioing of volume snapshot but performs one task at a time based on ACTION env value < provision or deprovision >.
Provision:
- Check the application pod status, should be in running state.
- If DATA_PERSISTENCT check is enabled then dump some data into application pod mount point.
- Check if volume snapshot class is present.
- Scale down the application and wait till pod terminates successfully.
- Create the volume snapshot in the application namespace itself.
- Check the created snapshot resource and make sure readyToUse field is true.
- Scale up the application again.
Deprovision:
- Delete the volume snapshot from the application namespace.
- Verify that volume snapshot content is no longer present.
## How to run
- This experiment accepts the parameters in form of kubernetes job environmental variables.
- For running this experiment of zfspv snapshot, clone openebs/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then first apply rbac and crds for e2e-framework.
```
kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml
kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml
```
then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job.
```
kubectl create -f run_e2e_test.yml
```
All the env variables description is provided with the comments in the same file.
After creating kubernetes job, when the jobs pod is instantiated, we can see the logs of that pod which is executing the test-case.
```
kubectl get pods -n e2e
kubectl logs -f <zfspv-snapshot-xxxxx-xxxxx> -n e2e
```
To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail).
```
kubectl get e2er
kubectl get e2er zfspv-snapshot -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase
kubectl get e2er zfspv-snapshot -n e2e --no-headers -o custom-columns=:.spec.testStatus.result
```

View file

@ -0,0 +1,65 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: zfspv-snapshot-clone
namespace: e2e
data:
parameters.yml: |
---
apiVersion: batch/v1
kind: Job
metadata:
generateName: zfspv-snapshot-
namespace: e2e
spec:
template:
metadata:
labels:
test: zfspv-snapshot
spec:
serviceAccountName: e2e
restartPolicy: Never
containers:
- name: ansibletest
image: openebs/zfs-localpv-e2e:ci
imagePullPolicy: IfNotPresent
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: default
- name: APP_NAMESPACE ## Namespace in which application is deployed
value: ''
- name: APP_PVC ## PersistentVolumeClaim Name for the application
value: ''
- name: APP_LABEL ## Label value of the application
value: ''
- name: ZFS_OPERATOR_NAMESPACE ## Namespace in which all the resources created by zfs driver will be present
value: '' ## for e.g. zfsvolume (zv) will be in this namespace
- name: SNAPSHOT_CLASS ## Name of zfspv volumesnapshotclass
value: ''
- name: SNAPSHOT_NAME ## Snapshot will be created with this name in application namespace
value: ''
- name: ACTION ## Use 'deprovision' for snapshot cleanup
value: 'provision'
- name: DATA_PERSISTENCE ## Give values according to the application
value: '' ## For `Busybox` : `busybox` & For `Percona` : `mysql`
command: ["/bin/bash"]
args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zfspv-snapshot/test.yml -i /etc/ansible/hosts -vv; exit 0"]
volumeMounts:
- name: parameters
mountPath: /mnt/
volumes:
- name: parameters
configMap:
name: zfspv-snapshot-clone

View file

@ -0,0 +1,228 @@
- hosts: localhost
connection: local
gather_facts: False
vars_files:
- test_vars.yml
- /mnt/parameters.yml
tasks:
- block:
## Generating the testname for zfspv snapshot e2e-test
- include_tasks: /e2e-tests/hack/create_testname.yml
## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource)
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
vars:
status: 'SOT'
- block:
- name: Get the name of application pod
shell: >
kubectl get pod -n {{ app_ns }} -l {{ app_label }}
--no-headers -o custom-columns=:.metadata.name | shuf -n1
args:
executable: /bin/bash
register: app_pod_name
- name: Check if the application pod are in running state
shell: >
kubectl get pods {{ app_pod_name.stdout }} -n {{ app_ns }}
--no-headers -o custom-columns=:.status.phase
args:
executable: /bin/bash
register: app_pod_status
failed_when: "'Running' not in app_pod_status.stdout"
- block:
- name: Create some test data into the application
include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml"
vars:
status: 'LOAD'
ns: "{{ app_ns }}"
pod_name: "{{ app_pod_name.stdout }}"
when: data_persistence == 'busybox'
- block:
- name: Create some test data into the application
include_tasks: "/e2e-tests/utils/applications/mysql/mysql_data_persistence.yml"
vars:
status: 'LOAD'
ns: "{{ app_ns }}"
pod_name: "{{ app_pod_name.stdout }}"
when: data_persistence == 'mysql'
- name: Update the snapshot template with the test specific variables
template:
src: volume_snapshot.j2
dest: volume_snapshot.yml
- name: Check if the volume snapshot class is present
shell: >
kubectl get volumesnapshotclass
args:
executable: /bin/bash
register: snapshot_class_status
failed_when: "snapshot_class not in snapshot_class_status.stdout"
- name: Get the application deployment name
shell: >
kubectl get deployment -n {{ app_ns }} -l {{ app_label }} --no-headers
-o custom-columns=:.metadata.name
args:
executable: /bin/bash
register: app_deployment_name
- name: Get the replica count for application deployment
shell: >
kubectl get deployment {{ app_deployment_name.stdout }} -n {{ app_ns }} --no-headers
-o custom-columns=:.spec.replicas
args:
executable: /bin/bash
register: replica_count
- name: Scale down the application before taking the zfs vol-snapshot
shell: >
kubectl scale deployment/{{ app_deployment_name.stdout }} -n {{ app_ns }} --replicas=0
args:
executable: /bin/bash
- name: Verify that modified replica count is zero
shell: >
kubectl get deployment {{ app_deployment_name.stdout }} -n {{ app_ns }} --no-headers
-o custom-columns=:.spec.replicas
args:
executable: /bin/bash
register: modify_replica_count
until: "modify_replica_count.stdout == \"0\""
delay: 3
retries: 60
- name: Verify that the application pod is not present after scaling down the deployment
shell: >
kubectl get pods -n {{ app_ns }}
args:
executable: /bin/bash
register: app_pod_status
until: "app_pod_name.stdout not in app_pod_status.stdout"
delay: 3
retries: 60
## As we are checking the status of only one pod if is terminated successfully
## but in case of shared mount support other pods may not be terminate at the same time
## to avoid such condition here we have manual wait for 30 seconds.
- name: Manual wait for some time
shell: sleep 30
- name: create zfspv volumes snapshot
shell: >
kubectl create -f volume_snapshot.yml
args:
executable: /bin/bash
register: status
failed_when: "status.rc != 0"
- name: Get the uid of the snapshot taken
shell: >
kubectl get volumesnapshot.snapshot {{ snapshot_name }} -n {{ app_ns }}
--no-headers -o custom-columns=:.metadata.uid
args:
executable: /bin/bash
register: snap_uid
- set_fact:
snapshot_uid: "{{ snap_uid.stdout }}"
- name: Confirm that volumesnapshot {{ snapshot_name }} is ready to use
shell: >
kubectl get volumesnapshot.snapshot {{ snapshot_name }} -n {{ app_ns }}
-o jsonpath='{.status.readyToUse}'
args:
executable: /bin/bash
register: isSnapshotReady
until: "isSnapshotReady.stdout == 'true'"
delay: 3
retries: 50
- name: Check the status for openebs resource for the created snapshot {{ snapshot_name }}
shell: >
kubectl get zfssnap -n {{ zfs_operator_ns }}
-o jsonpath='{.items[?(@.metadata.name=="snapshot-{{ snapshot_uid }}")].status.state}'
args:
executable: /bin/bash
register: zfssnap_status
until: "zfssnap_status.stdout == 'Ready'"
delay: 3
retries: 50
- name: Scale up the application deployment after taking zfs-volume-snapshot
shell: >
kubectl scale deployment/{{ app_deployment_name.stdout }} -n {{ app_ns }} --replicas={{ replica_count.stdout}}
args:
executable: /bin/bash
- name: Verify that all the replicas are ready of application deployment
shell: >
kubectl get deployment {{ app_deployment_name.stdout }} -n {{ app_ns }} --no-headers
-o custom-columns=:.status.readyReplicas
args:
executable: /bin/bash
register: ready_replica_count
until: ready_replica_count.stdout == replica_count.stdout
delay: 3
retries: 50
when: action == 'provision'
- block:
- name: Obtain the SnapshotContent of the snapshot
shell: >
kubectl get volumesnapshot.snapshot {{ snapshot_name }} -n {{ app_ns }}
--no-headers -o custom-columns=:.spec.snapshotContentName
args:
executable: /bin/bash
register: snapshot_content
- set_fact:
snapshotcontent: "{{ snapshot_content.stdout }}"
- name: Delete the volume snapshot
shell: >
kubectl delete volumesnapshot.snapshot {{ snapshot_name }} -n {{ app_ns }}
args:
executable: /bin/bash
register: status
failed_when: "status.rc != 0"
- name: Verify if the volume snapshot is deleted successfully
shell: >
kubectl get volumesnapshot.snapshot -n {{ app_ns }}
args:
executable: /bin/bash
register: ss_name
failed_when: "snapshot_name in ss_name.stdout"
- name: Verify if the volumesnapshotcontent is deleted
shell: >
kubectl get volumesnapshotcontent -n {{ app_ns }}
args:
executable: /bin/bash
register: ss_content
failed_when: "snapshotcontent in ss_content.stdout"
when: action == 'deprovision'
- set_fact:
flag: "Pass"
rescue:
- set_fact:
flag: "Fail"
always:
## RECORD END-OF-TEST IN e2e RESULT CR
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
vars:
status: 'EOT'

View file

@ -0,0 +1,17 @@
test_name: zfspv-snapshot
app_ns: "{{ lookup('env','APP_NAMESPACE') }}"
pvc_name: "{{ lookup('env','APP_PVC') }}"
snapshot_class: "{{ lookup('env','SNAPSHOT_CLASS') }}"
snapshot_name: "{{ lookup('env','SNAPSHOT_NAME') }}"
data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}"
zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}"
action: "{{ lookup('env','ACTION') }}"
app_label: "{{ lookup('env','APP_LABEL') }}"

View file

@ -0,0 +1,9 @@
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: {{ snapshot_name }}
namespace: {{ app_ns }}
spec:
volumeSnapshotClassName: {{ snapshot_class }}
source:
persistentVolumeClaimName: {{ pvc_name }}