feat(e2e-test): Add e2e-tests for zfs-localpv (#298)

Signed-off-by: w3aman <aman.gupta@mayadata.io>
This commit is contained in:
Aman Gupta 2021-06-09 21:21:39 +05:30 committed by GitHub
parent 53f872fcf1
commit 4e73638b5a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
137 changed files with 8745 additions and 0 deletions

View file

@ -0,0 +1,62 @@
## About this experiment
This experiment creates the clone directly from the volume as datasource and use that cloned volume for some application. This experiment verifies that clone volume should have the same data for which snaphsot was taken and this data should be easily accessible from some new application when this clone volume is mounted on it.
## Supported platforms:
K8s : 1.18+
OS : Ubuntu, CentOS
ZFS : 0.7, 0.8
## Entry-Criteria
- K8s cluster should be in healthy state including all desired nodes in ready state.
- zfs-controller and node-agent daemonset pods should be in running state.
- Application should be deployed successfully consuming zfs-localpv storage.
- size for the clone-pvc should be equal to the original pvc.
## Steps performed
This experiment consist of provisioning and deprovisioning of zfspv-clone but performs one task at a time based on ACTION env value < provision or deprovision >.
Provision:
- Create the clone by applying the pvc yaml with parent pvc name in the datasource.
- Verify that clone-pvc gets bound.
- Deploy new application and verifies that clone volume gets successully mounted on application.
- Verify the data consistency that it should contain the same data as of volume snapshot.
Deprovision:
- Delete the application which is using the cloned volume.
- Verify that clone pvc is deleted successfully.
- Verify that zvolume is deleted successfully.
## How to run
- This experiment accepts the parameters in form of kubernetes job environmental variables.
- For running this experiment of zfs-localpv clone directly form pvc, first clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then apply rbac and crds for e2e-framework.
```
kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml
kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml
```
then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job.
```
kubectl create -f run_e2e_test.yml
```
All the env variables description is provided with the comments in the same file.
After creating kubernetes job, when the jobs pod is instantiated, we can see the logs of that pod which is executing the test-case.
```
kubectl get pods -n e2e
kubectl logs -f <zfspv-clone-from-pvc-xxxxx-xxxxx> -n e2e
```
To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail).
```
kubectl get e2er
kubectl get e2er zfspv-clone-from-pvc -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase
kubectl get e2er zfspv-clone-from-pvc -n e2e --no-headers -o custom-columns=:.spec.testStatus.result
```

View file

@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ app_name }}-clone
namespace: "{{ app_ns }}"
labels:
app: clone-app-from-pvc
spec:
selector:
matchLabels:
app: clone-app-from-pvc
template:
metadata:
labels:
app: clone-app-from-pvc
spec:
containers:
- name: app-busybox
imagePullPolicy: IfNotPresent
image: gcr.io/google-containers/busybox
command: ["/bin/sh"]
args: ["-c", "while true; do sleep 10;done"]
env:
volumeMounts:
- name: data-vol
mountPath: /busybox
volumes:
- name: data-vol
persistentVolumeClaim:
claimName: "{{ clone_pvc_name }}"

View file

@ -0,0 +1,15 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ clone_pvc_name }}
namespace: {{ app_ns }}
spec:
storageClassName: {{ storage_class }}
dataSource:
name: {{ parent_pvc_name }}
kind: PersistentVolumeClaim
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ clone_pvc_size }} ## clone PVC size should match the size of the snapshot

View file

@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ app_name }}-clone
namespace: {{ app_ns }}
labels:
app: clone-app-from-pvc
spec:
replicas: 1
selector:
matchLabels:
app: clone-app-from-pvc
template:
metadata:
labels:
app: clone-app-from-pvc
spec:
containers:
- resources:
limits:
cpu: 0.5
name: percona
image: openebs/tests-custom-percona:latest
imagePullPolicy: IfNotPresent
args:
- "--ignore-db-dir"
- "lost+found"
env:
- name: MYSQL_ROOT_PASSWORD
value: k8sDem0
ports:
- containerPort: 3306
name: percona
volumeMounts:
- mountPath: /var/lib/mysql
name: data-vol
#<!-- BEGIN ANSIBLE MANAGED BLOCK -->
livenessProbe:
exec:
command: ["bash", "sql-test.sh"]
initialDelaySeconds: 60
periodSeconds: 1
timeoutSeconds: 10
#<!-- END ANSIBLE MANAGED BLOCK -->
volumes:
- name: data-vol
persistentVolumeClaim:
claimName: {{ clone_pvc_name }}

View file

@ -0,0 +1,65 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: zfspv-clone-from-pvc
namespace: e2e
data:
parameters.yml: |
---
apiVersion: batch/v1
kind: Job
metadata:
generateName: zfspv-clone-from-pvc-
namespace: e2e
spec:
template:
metadata:
labels:
test: zfspv-clone-from-pvc
spec:
serviceAccountName: e2e
restartPolicy: Never
containers:
- name: ansibletest
image: openebs/zfs-localpv-e2e:ci
imagePullPolicy: IfNotPresent
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: default
- name: APP_NAMESPACE ## Namespace in which application is deployed
value: ''
- name: APP_LABEL ## Parent application label
value: '' ## Give value in format (key=value)
- name: ZFS_OPERATOR_NAMESPACE ## Namespace in which all the resources created by zfs driver will be present
value: '' ## for e.g. zfsvolume (zv) will be in this namespace
- name: PARENT_PVC_NAME ## Give value of parent pvc name which is using by the application
value: ''
- name: CLONE_PVC_NAME ## Cloned pvc will be created by this name in the same namespace where spapshot is present
value: ''
- name: APP_NAME ## Provide the application name which will be deployed using cloned PVC
value: '' ## Supported values are: `busybox` and `percona`
- name: ACTION ## Use 'deprovision' for clone cleanup
value: 'provision'
- name: DATA_PERSISTENCE ## Give values according to the application
value: "" ## For `Busybox` : `busybox` & For `Percona` : `mysql`
command: ["/bin/bash"]
args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/test.yml -i /etc/ansible/hosts -vv; exit 0"]
volumeMounts:
- name: parameters
mountPath: /mnt/
volumes:
- name: parameters
configMap:
name: zfspv-clone-from-pvc

View file

@ -0,0 +1,283 @@
- hosts: localhost
connection: local
gather_facts: False
vars_files:
- test_vars.yml
- /mnt/parameters.yml
tasks:
- block:
## Generating the testname for zfspc clone directly from pvc test
- include_tasks: /e2e-tests/hack/create_testname.yml
## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource)
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
vars:
status: 'SOT'
- block:
- name: Get the application pod name
shell: >
kubectl get pod -n {{ app_ns }} -l {{ app_label }}
--no-headers -o custom-columns=:.metadata.name | shuf -n1
args:
executable: /bin/bash
register: app_pod_name
- name: Check if the application pod is in running state
shell: >
kubectl get pod {{ app_pod_name.stdout }} -n {{ app_ns }}
--no-headers -o custom-columns=:.status.phase
args:
executable: /bin/bash
register: app_pod_status
failed_when: "'Running' not in app_pod_status.stdout"
- name: Get the capacity size of parent pvc {{ parent_pvc_name }}
shell: >
kubectl get pvc {{ parent_pvc_name }} -n {{ app_ns }}
--no-headers -o custom-columns=:.status.capacity.storage
args:
executable: /bin/bash
register: parent_pvc_size
## clone pvc size should be same as parent pvc size
- name: Record clone pvc size
set_fact:
clone_pvc_size: "{{ parent_pvc_size.stdout }}"
- name: Get the storage class name used for provisioning {{ parent_pvc_name }} pvc
shell: >
kubectl get pvc {{ parent_pvc_name }} -n {{ app_ns }} --no-headers
-o custom-columns=:.spec.storageClassName
args:
executable: /bin/bash
register: stg_class
- name: Record the storage class name
set_fact:
storage_class: "{{ stg_class.stdout }}"
- block:
- name: Create some test data into the application
include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml"
vars:
status: 'LOAD'
ns: "{{ app_ns }}"
pod_name: "{{ app_pod_name.stdout }}"
when: data_persistence == 'busybox'
- block:
- name: Create some test data into the application
include_tasks: "/e2e-tests/utils/applications/mysql/mysql_data_persistence.yml"
vars:
status: 'LOAD'
ns: "{{ app_ns }}"
pod_name: "{{ app_pod_name.stdout }}"
when: data_persistence == 'mysql'
- name: Update the clone_pvc template with the test specific values to create clone
template:
src: clone_pvc.j2
dest: clone_pvc.yml
- name: Create the clone
shell: >
kubectl create -f clone_pvc.yml
args:
executable: /bin/bash
register: status
failed_when: "status.rc != 0"
- block:
- name: Update the {{ app_name }} deployment yaml with test specific values
template:
src: busybox.j2
dest: busybox.yml
- name: Deploy the {{ app_name }} application using cloned PVC
shell: >
kubectl create -f busybox.yml
args:
executable: /bin/bash
when: app_name == "busybox"
- block:
- name: Update the {{ app_name }} deployment yaml with test specific values
template:
src: percona.j2
dest: percona.yml
- name: Deploy the {{ app_name }} application using cloned PVC
shell: >
kubectl create -f percona.yml
args:
executable: /bin/bash
when: app_name == "percona"
- name: Check if the cloned PVC is bound
shell: >
kubectl get pvc {{ clone_pvc_name }} -n {{ app_ns }}
--no-headers -o custom-columns=:.status.phase
args:
executable: /bin/bash
register: clone_pvc_status
until: "'Bound' in clone_pvc_status.stdout"
delay: 3
retries: 50
- name: Get {{ app_name }} application pod name which is using clone pvc
shell: >
kubectl get pods -n {{ app_ns }} -l app=clone-app-from-pvc --no-headers
-o=custom-columns=NAME:".metadata.name"
args:
executable: /bin/bash
register: pod_name
- name: Record the {{ app_name }} application pod name
set_fact:
clone_pod_name: "{{ pod_name.stdout }}"
- name: Checking {{ app_name }} application pod is in running state
shell: >
kubectl get pods {{clone_pod_name}} -n {{ app_ns }}
-o jsonpath='{.status.phase}'
register: pod_status
until: "'Running' in pod_status.stdout"
delay: 3
retries: 50
- name: Get the container status of {{ app_name }} application pod
shell: >
kubectl get pods {{ clone_pod_name }} -n {{ app_ns }}
-o jsonpath='{.status.containerStatuses[].state}' | grep running
args:
executable: /bin/bash
register: containerStatus
until: "'running' in containerStatus.stdout"
delay: 2
retries: 50
- block:
- name: Verify the data persistency
include_tasks: "/e2e-tests/utils/applications/mysql/mysql_data_persistence.yml"
vars:
status: 'VERIFY'
ns: "{{ app_ns }}"
label: app=clone-app-from-pvc
pod_name: "{{ clone_pod_name }}"
when: data_persistence == 'mysql'
- block:
- name: Verify the data persistency
include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml"
vars:
status: 'VERIFY'
ns: "{{ app_ns }}"
label: app=clone-app-from-pvc
pod_name: "{{ clone_pod_name }}"
when: data_persistence == 'busybox'
when: lookup('env','ACTION') == 'provision'
- block:
- name: Get the ZV name for the cloned PVC
shell: >
kubectl get pvc {{ clone_pvc_name }} -n {{ app_ns }} -o jsonpath='{.spec.volumeName}'
args:
executable: /bin/bash
register: zv_name
- name: Get {{ app_name }} application pod name which is using cloned pvc
shell: >
kubectl get pods -n {{ app_ns }} -l app=clone-app-from-pvc --no-headers
-o=custom-columns=NAME:".metadata.name"
args:
executable: /bin/bash
register: clone_pod_name
- block:
- name: Update the {{ app_name }} deployment yaml with test specific values
template:
src: busybox.j2
dest: busybox.yml
- name: delete the {{ app_name }} application which is using cloned pvc
shell: >
kubectl delete -f busybox.yml
args:
executable: /bin/bash
register: status
failed_when: "status.rc != 0"
when: app_name == 'busybox'
- block:
- name: Update the {{ app_name }} deployment yaml with test specific values
template:
src: percona.j2
dest: percona.yml
- name: delete the {{ app_name }} application which is using cloned pvc
shell: >
kubectl delete -f percona.yml
args:
executable: /bin/bash
register: status
failed_when: "status.rc != 0"
when: app_name == 'percona'
- name: Check if the {{ app_name }} application pod which is using cloned pvc is deleted successfully
shell: >
kubectl get pods -n {{ app_ns }}
args:
executable: /bin/bash
register: app_status
until: "clone_pod_name.stdout not in app_status.stdout"
delay: 3
retries: 50
- name: Delete the cloned pvc
shell: >
kubectl delete pvc {{ clone_pvc_name }} -n {{ app_ns }}
args:
executable: /bin/bash
register: clone_pvc_status
failed_when: "clone_pvc_status.rc != 0"
- name: Check if the cloned pvc is deleted
shell: >
kubectl get pvc -n {{ app_ns }}
args:
executable: /bin/bash
register: clone_pvc_status
until: "clone_pvc_name not in clone_pvc_status.stdout"
delay: 3
retries: 50
- name: Check if the ZV for cloned pvc is deleted
shell: >
kubectl get zv -n {{ zfs_operator_ns }}
args:
executable: /bin/bash
register: zv_status
until: "zv_name.stdout not in zv_status.stdout"
delay: 3
retries: 30
when: lookup('env','ACTION') == 'deprovision'
- set_fact:
flag: "Pass"
rescue:
- set_fact:
flag: "Fail"
always:
## RECORD END-OF-TEST IN e2e RESULT CR
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
vars:
status: 'EOT'

View file

@ -0,0 +1,17 @@
test_name: zfspv-clone-from-pvc
app_ns: "{{ lookup('env','APP_NAMESPACE') }}"
parent_pvc_name: "{{ lookup('env', 'PARENT_PVC_NAME') }}"
clone_pvc_name: "{{ lookup('env','CLONE_PVC_NAME') }}"
app_name: "{{ lookup('env','APP_NAME') }}"
app_label: "{{ lookup('env','APP_LABEL') }}"
action: "{{ lookup('env','ACTION') }}"
zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}"
data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}"