feat(e2e-test): Add e2e-tests for zfs-localpv (#298)

Signed-off-by: w3aman <aman.gupta@mayadata.io>
This commit is contained in:
Aman Gupta 2021-06-09 21:21:39 +05:30 committed by GitHub
parent 53f872fcf1
commit 4e73638b5a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
137 changed files with 8745 additions and 0 deletions

View file

@ -0,0 +1,65 @@
## About the experiment
- This functional test verifies the zfs-localpv shared mount volume support via multiple pods. Applications who wants to share the volume can use the storage-class as below.
```
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: openebs-zfspv
parameters:
shared: "yes"
fstype: "zfs"
poolname: "< zpool_name >"
provisioner: zfs.csi.openebs.io
```
Note: For running this experiment above storage-class should be present. This storage will be created as a part of zfs-localpv provisioner experiment. If zfs-localpv components are not deployed using e2e-test script located at `openebs/zfs-localpv/e2e-tests/experiment/zfs-localpv-provisioiner` please make sure you create the storage class from above mentioned yaml.
## Supported platforms:
K8s : 1.18+
OS : Ubuntu, CentOS
ZFS : 0.7, 0.8
## Entry-Criteria
- K8s cluster should be in healthy state including all the nodes in ready state.
- zfs-controller and node-agent daemonset pods should be in running state.
- storage class with `shared: yes` enabled should be present.
## Steps performed in this experiment:
1. First deploy the busybox application using `shared: yes` enabled storage-class
2. Then we dump some dummy data into the application pod mount point.
3. Scale the busybox deployment replicas so that multiple pods (here replicas = 2) can share the volume.
4. After that data consistency is verified from the scaled application pod in the way that data is accessible from both the pods and after restarting the application pod data consistency should be maintained.
## How to run
- This experiment accepts the parameters in form of kubernetes job environmental variables.
- For running this experiment of zfspv shared mount, first clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then apply rbac and crds for e2e-framework.
```
kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml
kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml
```
then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job.
```
kubectl create -f run_e2e_test.yml
```
All the env variables description is provided with the comments in the same file.
After creating kubernetes job, when the jobs pod is instantiated, we can see the logs of that pod which is executing the test-case.
```
kubectl get pods -n e2e
kubectl logs -f <zfspv-shared-mount-xxxxx-xxxxx> -n e2e
```
To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail).
```
kubectl get e2er
kubectl get e2er zfspv-shared-mount -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase
kubectl get e2er zfspv-shared-mount -n e2e --no-headers -o custom-columns=:.spec.testStatus.result
```

View file

@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: busybox-shared-mount
namespace: "{{ app_ns }}"
labels:
app: shared-mount
spec:
selector:
matchLabels:
app: shared-mount
template:
metadata:
labels:
app: shared-mount
spec:
containers:
- name: app-busybox
imagePullPolicy: IfNotPresent
image: gcr.io/google-containers/busybox
command: ["/bin/sh"]
args: ["-c", "while true; do sleep 10;done"]
env:
volumeMounts:
- name: data-vol
mountPath: /busybox
volumes:
- name: data-vol
persistentVolumeClaim:
claimName: "{{ app_pvc }}"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
namespace: "{{ app_ns }}"
name: "{{ app_pvc }}"
spec:
storageClassName: "{{ storage_class }}"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi

View file

@ -0,0 +1,59 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: zfspv-shared-mount
namespace: e2e
data:
parameters.yml: |
---
apiVersion: batch/v1
kind: Job
metadata:
generateName: zfspv-shared-mount-
namespace: e2e
spec:
template:
metadata:
labels:
test: shared-mount-volume
spec:
serviceAccountName: e2e
restartPolicy: Never
containers:
- name: ansibletest
image: openebs/zfs-localpv-e2e:ci
imagePullPolicy: IfNotPresent
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: default
- name: APP_NAMESPACE ## Namespace in which application is deployed
value: ''
- name: APP_PVC ## PVC name of the application
value: ''
- name: STORAGE_CLASS ## Give the storage class supporting shared volume mount
value: ''
- name: ZFS_OPERATOR_NAMESPACE ## Namespace in which all the resources created by zfs driver will be present
value: '' ## for e.g. zfsvolume (zv) will be in this namespace
- name: DATA_PERSISTENCE ## Give values according to the application
value: '' ## For `Busybox` : `busybox`
- name: ACTION ## `provision` OR `deprovision`
value: ''
command: ["/bin/bash"]
args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zfspv-shared-mount/test.yml -i /etc/ansible/hosts -vv; exit 0"]
volumeMounts:
- name: parameters
mountPath: /mnt/
volumes:
- name: parameters
configMap:
name: zfspv-shared-mount

View file

@ -0,0 +1,243 @@
- hosts: localhost
connection: local
gather_facts: False
vars_files:
- test_vars.yml
- /mnt/parameters.yml
tasks:
- block:
## Generating the testname for zfspv shared mount volume test
- include_tasks: /e2e-tests/hack/create_testname.yml
## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource)
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
vars:
status: 'SOT'
- block:
- name: Create application namespace
shell: >
kubectl create ns {{ app_ns }}
args:
executable: /bin/bash
- name: Update the busybox application template with test specific values
template:
src: busybox_share.j2
dest: busybox_share.yml
- name: Deploy the busybox application using above storage-class
shell: >
kubectl apply -f busybox_share.yml
args:
executable: /bin/bash
- name: Check the pvc status
shell: >
kubectl get pvc -n {{ app_ns }} --no-headers -o custom-columns=:.status.phase
args:
executable: /bin/bash
register: pvc_status
until: pvc_status.stdout == 'Bound'
delay: 2
retries: 20
- name: Get the application deployment name
shell: >
kubectl get deploy -n {{ app_ns }} --no-headers -o custom-columns=:.metadata.name
args:
executable: /bin/bash
register: app_deploy_name
- name: Get the application pod name
shell: >
kubectl get pod -n {{ app_ns }} -l app=shared-mount --no-headers -o custom-columns=:.metadata.name
args:
executable: /bin/bash
register: app_pod_name
- name: Check if the application pod is running
shell: >
kubectl get pod {{ app_pod_name.stdout }} -n {{ app_ns }} --no-headers -o custom-columns=:.status.phase
args:
executable: /bin/bash
register: app_pod_status
until: "app_pod_status.stdout == 'Running'"
delay: 3
retries: 30
- name: Create some test data into the application pod
include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml"
vars:
status: 'LOAD'
ns: "{{ app_ns }}"
pod_name: "{{ app_pod_name.stdout }}"
- name: Scale the deployment replicas to use shared mount volume by multiple pods
shell: >
kubectl scale deploy/{{ app_deploy_name.stdout }} -n {{ app_ns }} --replicas=2
args:
executable: /bin/bash
- name: Check the no of replicas in deployment spec
shell: >
kubectl get deploy/{{ app_deploy_name.stdout }} -n {{ app_ns }} --no-headers
-o custom-columns=:.status.readyReplicas
args:
executable: /bin/bash
register: replica_count
until: "replica_count.stdout == '2'"
delay: 2
retries: 20
- name: Get the new application pod name after scaling the deployment replicas
shell: >
kubectl get pod -n {{ app_ns }} -l app=shared-mount --no-headers
-o custom-columns=:.metadata.name | grep -v {{ app_pod_name.stdout }}
args:
executable: /bin/bash
register: scaled_app_pod_name
- name: Check the container status of the new application pod
shell: >
kubectl get pod {{ scaled_app_pod_name.stdout }} -n {{ app_ns }} --no-headers
-o jsonpath='{.status.containerStatuses[].state}' | grep running
args:
executable: /bin/bash
register: containerStatus
until: "'running' in containerStatus.stdout"
delay: 2
retries: 50
- name: Label the scaled application pod
shell: >
kubectl label pod {{ scaled_app_pod_name.stdout }} -n {{ app_ns }} name=share-pod
args:
executable: /bin/bash
register: status
failed_when: "status.rc != 0"
- name: Verify if the shared volume data is accessible from both the application pods
include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml"
vars:
status: 'VERIFY'
ns: "{{ app_ns }}"
label: name=share-pod
pod_name: "{{ app_pod_name.stdout }}"
- name: Delete the dumped data files from scaled application pod
shell: >
kubectl exec -ti {{ scaled_app_pod_name.stdout }} -n {{ app_ns }} -- sh -c
'rm -rf /busybox/*'
args:
executable: /bin/bash
register: status
failed_when: "status.rc != 0"
- name: Again dumping some dummy data, this time from scaled application pod
include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml"
vars:
status: 'LOAD'
ns: "{{ app_ns }}"
pod_name: "{{ scaled_app_pod_name.stdout }}"
- name: Get the application pod name
shell: >
kubectl get pod -n {{ app_ns }} --no-headers -o custom-columns=:.metadata.name | grep -v {{ scaled_app_pod_name.stdout }}
args:
executable: /bin/bash
register: app_pod_name
- name: Label the application pod
shell: >
kubectl label pod {{ app_pod_name.stdout }} -n {{ app_ns }} name=previous-pod
args:
executable: /bin/bash
register: status
failed_when: "status.rc != 0"
## To keep the application pod label and deployment label same we label the deployment as well
## This will help in filtering while running volume-snapshot test.
- name: Label the application deployment
shell: >
kubectl label deploy/{{ app_deploy_name.stdout }} -n {{ app_ns }} name=previous-pod
args:
executable: /bin/bash
register: status
failed_when: "status.rc != 0"
- name: Verify the data consistency from the previous pod
include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml"
vars:
status: 'VERIFY'
ns: "{{ app_ns }}"
label: name=previous-pod
pod_name: "{{ scaled_app_pod_name.stdout }}"
when: action == 'provision'
- block:
- name: Get the zvolume name from the pvc name
shell: >
kubectl get pvc {{ app_pvc }} -n {{ app_ns }} -o jsonpath='{.spec.volumeName}'
args:
executable: /bin/bash
register: zvol_name
- name: Update the busybox deployment template with test specific values
template:
src: busybox_share.j2
dest: busybox_share.yml
- name: Delete the application deployment
shell: >
kubectl delete -f busybox_share.yml
args:
executable: /bin/bash
register: status
- name: Verify that application pods have been deleted successfully
shell: >
kubectl get pods -n {{ app_ns }}
args:
executable: /bin/bash
register: app_pod_status
failed_when: "'No resources found' in app_pod_status.stdout"
- name: Verify the successful deletion of pvc in {{ app_ns }} namespaces
shell: >
kubectl get pvc -n {{ app_ns }}
args:
executable: /bin/bash
register: pvc_status
failed_when: "app_pvc in pvc_status.stdout"
- name: Verify the successful deletion of zvolume
shell: >
kubectl get zv -n {{ zfs_operator_ns }}
args:
executable: /bin/bash
register: zv_status
until: "zvol_name.stdout not in zv_status.stdout"
delay: 3
retries: 30
when: action == 'deprovision'
- set_fact:
flag: "Pass"
rescue:
- set_fact:
flag: "Fail"
always:
## RECORD END-OF-TEST IN e2e RESULT CR
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
vars:
status: 'EOT'

View file

@ -0,0 +1,13 @@
test_name: zfspv-shared-mount
app_ns: "{{ lookup('env','APP_NAMESPACE') }}"
app_pvc: "{{ lookup('env','APP_PVC') }}"
data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}"
storage_class: "{{ lookup('env','STORAGE_CLASS') }}"
action: "{{ lookup('env','ACTION') }}"
zfs_operator_ns: "{{ lookup('env', 'ZFS_OPERATOR_NAMESPACE') }}"