diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3c1128b..6e2539f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -102,6 +102,35 @@ jobs: make ci make sanity + ansible: + runs-on: ubuntu-latest + needs: ['lint', 'unit-test', 'bdd-test'] + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Set Image Org + # sets the default IMAGE_ORG to openebs + run: | + [ -z "${{ secrets.IMAGE_ORG }}" ] && IMAGE_ORG=openebs || IMAGE_ORG=${{ secrets.IMAGE_ORG}} + echo "IMAGE_ORG=${IMAGE_ORG}" >> $GITHUB_ENV + + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and push the ansible image + uses: docker/build-push-action@v2 + with: + file: ./e2e-tests/Dockerfile + push: true + load: false + platforms: linux/amd64 + tags: | + ${{ env.IMAGE_ORG }}/zfs-localpv-e2e:ci + csi-driver: runs-on: ubuntu-latest needs: ['lint', 'bdd-test'] diff --git a/e2e-tests/Dockerfile b/e2e-tests/Dockerfile new file mode 100644 index 0000000..fa13672 --- /dev/null +++ b/e2e-tests/Dockerfile @@ -0,0 +1,48 @@ +# Copyright 2020-2021 The OpenEBS Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM ubuntu:18.04 + +LABEL maintainer="OpenEBS" + +#Installing necessary ubuntu packages +RUN rm -rf /var/lib/apt/lists/* && \ + apt-get clean && \ + apt-get update --fix-missing || true && \ + apt-get install -y python python-pip netcat iproute2 jq sshpass bc git\ + curl openssh-client + +#Installing ansible +RUN pip install ansible==2.7.3 + +#Installing openshift +RUN pip install openshift==0.11.2 + +#Installing jmespath +RUN pip install jmespath + +RUN touch /mnt/parameters.yml + +#Installing Kubectl +ENV KUBE_LATEST_VERSION="v1.20.0" +RUN curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && \ + chmod +x /usr/local/bin/kubectl + +#Adding hosts entries and making ansible folders +RUN mkdir /etc/ansible/ /ansible && \ + echo "[local]" >> /etc/ansible/hosts && \ + echo "127.0.0.1" >> /etc/ansible/hosts + +#Copying Necessary Files +COPY ./e2e-tests ./e2e-tests \ No newline at end of file diff --git a/e2e-tests/apps/busybox/deployers/busybox_deployment.yml b/e2e-tests/apps/busybox/deployers/busybox_deployment.yml new file mode 100644 index 0000000..34f11ae --- /dev/null +++ b/e2e-tests/apps/busybox/deployers/busybox_deployment.yml @@ -0,0 +1,42 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app-busybox + labels: + lkey: lvalue +spec: + selector: + matchLabels: + lkey: lvalue + template: + metadata: + labels: + lkey: lvalue + spec: + containers: + - name: app-busybox + imagePullPolicy: IfNotPresent + image: gcr.io/google-containers/busybox + command: ["/bin/sh"] + args: ["-c", "while true; do sleep 10;done"] + env: + volumeMounts: + - name: data-vol + mountPath: /busybox + volumes: + - name: data-vol + persistentVolumeClaim: + claimName: testclaim +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: testclaim +spec: + storageClassName: testclass + accessModes: + - ReadWriteOnce + resources: + requests: + storage: teststorage \ No newline at end of file diff --git a/e2e-tests/apps/busybox/deployers/busybox_statefulset.yml b/e2e-tests/apps/busybox/deployers/busybox_statefulset.yml new file mode 100644 index 0000000..4664a85 --- /dev/null +++ b/e2e-tests/apps/busybox/deployers/busybox_statefulset.yml @@ -0,0 +1,36 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: app-busybox + labels: + lkey: lvalue +spec: + selector: + matchLabels: + lkey: lvalue + template: + metadata: + labels: + lkey: lvalue + spec: + containers: + - name: app-busybox + image: gcr.io/google-containers/busybox + imagePullPolicy: IfNotPresent + command: + - sh + - -c + - 'date > /busybox/date.txt; sync; sleep 5; sync; tail -f /dev/null;' + volumeMounts: + - name: testclaim + mountPath: /busybox + volumeClaimTemplates: + - metadata: + name: testclaim + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: testclass + resources: + requests: + storage: teststorage \ No newline at end of file diff --git a/e2e-tests/apps/busybox/deployers/run_e2e_test.yml b/e2e-tests/apps/busybox/deployers/run_e2e_test.yml new file mode 100644 index 0000000..3b986fe --- /dev/null +++ b/e2e-tests/apps/busybox/deployers/run_e2e_test.yml @@ -0,0 +1,55 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: busybox-deploy- + namespace: e2e +spec: + template: + metadata: + name: busybox-deploy + labels: + app: busybox + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + # Name of the storage class to use for volume provisioning + - name: STORAGE_CLASS + value: 'zfspv-sc' + + # This is the namespace where busybox application will be deployed + - name: APP_NAMESPACE + value: 'busybox' + + # Application label for busybox deployment/statefulset in `key=value` format + - name: APP_LABEL + value: 'app=busybox' + + # Application PVC name + - name: APP_PVC + value: 'busybox-pvc' + + # Persistent volume storage capacity (for e.g, 5Gi) + - name: PV_CAPACITY + value: '5Gi' + + # Use: `statefuleset` to deploy busybox application as statefulset + # Use: `deployment` to deploy busybox application as deployment + - name: DEPLOY_TYPE + value: 'deployment' + + # Use: `provision` to deploy the application + # Use: `deprovision` to deprovision the application + - name: ACTION + value: 'provision' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/apps/busybox/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] \ No newline at end of file diff --git a/e2e-tests/apps/busybox/deployers/test.yml b/e2e-tests/apps/busybox/deployers/test.yml new file mode 100644 index 0000000..898582c --- /dev/null +++ b/e2e-tests/apps/busybox/deployers/test.yml @@ -0,0 +1,78 @@ +--- +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + - block: + + ## Generating the testname for deployment + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## RECORD START-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - block: + - block: + ## Prerequisite tasks such as, namespace creation and replacing placeholder + ## with test specific values, before deploying application + - include_tasks: /e2e-tests/utils/k8s/pre_create_app_deploy.yml + vars: + application: "{{ application_statefulset }}" + + ## Deploying the application + - include_tasks: /e2e-tests/utils/k8s/deploy_single_app.yml + vars: + application: "{{ application_statefulset }}" + + when: "'deprovision' not in action" + + - name: Deprovisioning the Application + include_tasks: /e2e-tests/utils/k8s/deprovision_statefulset.yml + vars: + app_deployer: "{{ application_statefulset }}" + when: "'deprovision' is in action" + + when: lookup('env','DEPLOY_TYPE') == 'statefulset' + + - block: + - block: + ## Prerequisite tasks such as, namespace creation and replacing placeholder + ## with test specific values, before deploying application + - include_tasks: /e2e-tests/utils/k8s/pre_create_app_deploy.yml + vars: + application: "{{ application_deployment }}" + + ## Deploying the application + - include_tasks: /e2e-tests/utils/k8s/deploy_single_app.yml + vars: + application: "{{ application_deployment }}" + + when: "'deprovision' not in action" + + - name: Deprovisioning the Application + include_tasks: /e2e-tests/utils/k8s/deprovision_deployment.yml + vars: + app_deployer: "{{ application_deployment }}" + when: "'deprovision' is in action" + + when: lookup('env','DEPLOY_TYPE') == 'deployment' + + - set_fact: + flag: "Pass" + + rescue: + - name: Setting fail flag + set_fact: + flag: "Fail" + + always: + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/apps/busybox/deployers/test_vars.yml b/e2e-tests/apps/busybox/deployers/test_vars.yml new file mode 100644 index 0000000..7264b97 --- /dev/null +++ b/e2e-tests/apps/busybox/deployers/test_vars.yml @@ -0,0 +1,19 @@ +test_name: "busybox-{{ action }}-{{ app_ns }}" + +application_name: "busybox" + +application_statefulset: busybox_statefulset.yml + +application_deployment: busybox_deployment.yml + +storage_class: "{{ lookup('env','STORAGE_CLASS') }}" + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +app_label: "{{ lookup('env','APP_LABEL') }}" + +app_pvc: "{{ lookup('env','APP_PVC') }}" + +deploy_type: "{{ lookup('env','DEPLOY_TYPE') }}" + +action: "{{ lookup('env','ACTION') }}" \ No newline at end of file diff --git a/e2e-tests/apps/busybox/liveness/busybox_liveness.yml b/e2e-tests/apps/busybox/liveness/busybox_liveness.yml new file mode 100644 index 0000000..84781b8 --- /dev/null +++ b/e2e-tests/apps/busybox/liveness/busybox_liveness.yml @@ -0,0 +1,80 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: app-namespace + labels: + name: app-namespace +rules: +- apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: app-namespace + namespace: app-namespace + labels: + name: app-namespace + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: app-namespace + labels: + name: app-namespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: app-namespace +subjects: +- kind: ServiceAccount + name: app-namespace + namespace: app-namespace + +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: busybox-liveness- + namespace: app-namespace +spec: + template: + metadata: + name: busybox-liveness + namespace: app-namespace + labels: + liveness: busybox-liveness + + # label used for mass-liveness check upon infra-chaos + infra-aid: liveness + + spec: + serviceAccountName: app-namespace + restartPolicy: Never + + containers: + - name: busybox-liveness + image: openebs/busybox-client + imagePullPolicy: Always + + env: + - name: LIVENESS_TIMEOUT_SECONDS + value: "liveness-timeout-seconds" + + # number of retries when livenss-fails + - name: LIVENESS_RETRY_COUNT + value: "liveness-retry-count" + + # Namespace in which busybox is running + - name: NAMESPACE + value: app-namespace + + - name: POD_NAME + value: pod-name + + command: ["/bin/bash"] + args: ["-c", "./liveness.sh; exit 0"] \ No newline at end of file diff --git a/e2e-tests/apps/busybox/liveness/run_e2e_test.yml b/e2e-tests/apps/busybox/liveness/run_e2e_test.yml new file mode 100644 index 0000000..f5e4262 --- /dev/null +++ b/e2e-tests/apps/busybox/liveness/run_e2e_test.yml @@ -0,0 +1,57 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: busybox-liveness- + namespace: e2e +spec: + activeDeadlineSeconds: 5400 + template: + metadata: + name: busybox-liveness + namespace: e2e + labels: + liveness: busybox-liveness + + # label used for mass-liveness check upon infra-chaos + infra-aid: liveness + + spec: + serviceAccountName: e2e + restartPolicy: Never + + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + + - name: LIVENESS_TIMEOUT_SECONDS + value: "10" + + # number of retries when livenss-fails + - name: LIVENESS_RETRY_COUNT + value: "5" + + # This is the namespace where busybox application is running + - name: APP_NAMESPACE + value: 'busybox' + + # Application label for busybox in `key=value` format + - name: APP_LABEL + value: 'app=busybox' + + # Use: `provision` to apply the liveness-probe checks for busybox application + # Use: `deprovision` to deprovision the liveness-probe + - name: ACTION + value: 'provision' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/apps/busybox/liveness/test.yml -i /etc/ansible/hosts -v; exit 0"] \ No newline at end of file diff --git a/e2e-tests/apps/busybox/liveness/test.yml b/e2e-tests/apps/busybox/liveness/test.yml new file mode 100644 index 0000000..e581a90 --- /dev/null +++ b/e2e-tests/apps/busybox/liveness/test.yml @@ -0,0 +1,96 @@ +--- +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + - block: + - block: + + - name: Record test instance/run ID + set_fact: + run_id: "{{ lookup('env','RUN_ID') }}" + + - name: Construct testname appended with runID + set_fact: + test_name: "{{ test_name }}-{{ run_id }}" + + when: lookup('env','RUN_ID') + + ## RECORD START-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - block: + + - name: Getting the application pod name + shell: kubectl get pod -n {{ namespace }} -l {{ app_label }} -o jsonpath={.items[0].metadata.name} + register: pod_name + + - name: Replacing the placeholder for pod-name + replace: + path: "{{ busybox_liveness }}" + regexp: "pod-name" + replace: "{{ pod_name.stdout }}" + + - name: Replacing the placeholder for namespace + replace: + path: "{{ busybox_liveness }}" + regexp: "app-namespace" + replace: "{{ namespace }}" + + - name: Replacing the placeholder for liveness-retry-count + replace: + path: "{{ busybox_liveness }}" + regexp: "liveness-retry-count" + replace: "{{ liveness_retry }}" + + - name: Replacing the placeholder for liveness-timeout + replace: + path: "{{ busybox_liveness }}" + regexp: "liveness-timeout-seconds" + replace: "{{ liveness_timeout }}" + + - name: Creating busybox-liveness job + shell: kubectl create -f {{ busybox_liveness }} + + - name: Verifying whether liveness pod is started successfully + shell: kubectl get pod -n {{ namespace }} -l liveness=busybox-liveness -o jsonpath={.items[0].status.phase} + register: pod_status + until: "'Running' in pod_status.stdout" + delay: 5 + retries: 40 + + - set_fact: + flag: "Pass" + + when: "'deprovision' not in action" + + - block: + - name: Getting the busybox liveness job + shell: kubectl get job -l liveness=busybox-liveness -n {{ namespace }} -o jsonpath='{.items[0].metadata.name}' + register: liveness_job + + - name: Deleting busybox liveness job + shell: kubectl delete job {{ liveness_job.stdout }} -n {{ namespace }} + + - set_fact: + flag: "Pass" + + when: "'deprovision' is in action" + + rescue: + - set_fact: + flag: "Fail" + + always: + + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' + \ No newline at end of file diff --git a/e2e-tests/apps/busybox/liveness/test_vars.yml b/e2e-tests/apps/busybox/liveness/test_vars.yml new file mode 100644 index 0000000..4d331c9 --- /dev/null +++ b/e2e-tests/apps/busybox/liveness/test_vars.yml @@ -0,0 +1,15 @@ +test_name: busybox-liveness + +namespace: "{{ lookup('env','APP_NAMESPACE') }}" + +app_label: "{{ lookup('env','APP_LABEL') }}" + +busybox_liveness: busybox_liveness.yml + +liveness_retry: "{{ lookup('env','LIVENESS_RETRY_COUNT') }}" + +liveness_timeout: "{{ lookup('env','LIVENESS_TIMEOUT_SECONDS') }}" + +liveness_log: "liveness-running" + +action: "{{ lookup('env','ACTION') }}" \ No newline at end of file diff --git a/e2e-tests/apps/percona/deployers/percona.yml b/e2e-tests/apps/percona/deployers/percona.yml new file mode 100644 index 0000000..51e19cd --- /dev/null +++ b/e2e-tests/apps/percona/deployers/percona.yml @@ -0,0 +1,72 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: percona + labels: + lkey: lvalue +spec: + selector: + matchLabels: + lkey: lvalue + template: + metadata: + labels: + lkey: lvalue + spec: + containers: + - resources: + limits: + cpu: 0.5 + name: percona + image: openebs/tests-custom-percona:latest + imagePullPolicy: IfNotPresent + args: + - "--ignore-db-dir" + - "lost+found" + env: + - name: MYSQL_ROOT_PASSWORD + value: k8sDem0 + ports: + - containerPort: 3306 + name: percona + volumeMounts: + - mountPath: /var/lib/mysql + name: data-vol + # + livenessProbe: + exec: + command: ["bash", "sql-test.sh"] + initialDelaySeconds: 60 + periodSeconds: 1 + timeoutSeconds: 10 + # + volumes: + - name: data-vol + persistentVolumeClaim: + claimName: testclaim +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: testclaim +spec: + storageClassName: testclass + accessModes: + - ReadWriteOnce + resources: + requests: + storage: teststorage +--- +apiVersion: v1 +kind: Service +metadata: + name: percona-mysql + labels: + lkey: lvalue +spec: + ports: + - port: 3306 + targetPort: 3306 + selector: + lkey: lvalue \ No newline at end of file diff --git a/e2e-tests/apps/percona/deployers/run_e2e_test.yml b/e2e-tests/apps/percona/deployers/run_e2e_test.yml new file mode 100644 index 0000000..7cde4b3 --- /dev/null +++ b/e2e-tests/apps/percona/deployers/run_e2e_test.yml @@ -0,0 +1,52 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: percona-deploy- + namespace: e2e +spec: + template: + metadata: + name: percona-deploy + labels: + app: percona-deployment + + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + # Name of the storage class to use for volume provisioning + - name: STORAGE_CLASS + value: 'zfspv-sc' + + # This is the namespace where percona application will be deployed + - name: APP_NAMESPACE + value: 'percona' + + # Application label for percona deployment in `key=value` format + - name: APP_LABEL + value: 'app=percona' + + # Application PVC name + - name: APP_PVC + value: 'percona-pvc' + + # Persistent volume storage capacity (for e.g, 5Gi) + - name: PV_CAPACITY + value: '5Gi' + + # Use: `provision` to deploy the application + # Use: `deprovision` to deprovision the application + - name: ACTION + value: 'provision' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/apps/percona/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] \ No newline at end of file diff --git a/e2e-tests/apps/percona/deployers/test.yml b/e2e-tests/apps/percona/deployers/test.yml new file mode 100644 index 0000000..c6dd54c --- /dev/null +++ b/e2e-tests/apps/percona/deployers/test.yml @@ -0,0 +1,57 @@ +--- +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + - block: + + ## Generating the testname for deployment + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## RECORD START-OF-TEST IN e2e RESULT CR + - include_tasks: "/e2e-tests/hack/update_e2e_result_resource.yml" + vars: + status: 'SOT' + + - block: + ## Prerequisite tasks such as, namespace creation and replacing placeholder + ## with test specific values, before deploying application + - include_tasks: /e2e-tests/utils/k8s/pre_create_app_deploy.yml + vars: + application: "{{ application_deployment }}" + + ## Deploying the application + - include_tasks: /e2e-tests/utils/k8s/deploy_single_app.yml + vars: + application: "{{ application_deployment }}" + + ## Fetching the pod name + - include_tasks: /e2e-tests/utils/k8s/fetch_app_pod.yml + + ## Checking the db is ready for connection + - include_tasks: /e2e-tests/utils/applications/mysql/check_db_connection.yml + + when: "'deprovision' not in action" + + - name: Deprovisioning the Application + include_tasks: /e2e-tests/utils/k8s/deprovision_deployment.yml + vars: + app_deployer: "{{ application_deployment }}" + when: "'deprovision' is in action" + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' diff --git a/e2e-tests/apps/percona/deployers/test_vars.yml b/e2e-tests/apps/percona/deployers/test_vars.yml new file mode 100644 index 0000000..874b695 --- /dev/null +++ b/e2e-tests/apps/percona/deployers/test_vars.yml @@ -0,0 +1,15 @@ +test_name: "percona-{{ action }}-{{ app_ns }}" + +application_deployment: percona.yml + +application_name: "percona" + +storage_class: "{{ lookup('env','STORAGE_CLASS') }}" + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +app_label: "{{ lookup('env','APP_LABEL') }}" + +app_pvc: "{{ lookup('env','APP_PVC') }}" + +action: "{{ lookup('env','ACTION') }}" \ No newline at end of file diff --git a/e2e-tests/apps/percona/workload/replace.yml b/e2e-tests/apps/percona/workload/replace.yml new file mode 100644 index 0000000..fe246f8 --- /dev/null +++ b/e2e-tests/apps/percona/workload/replace.yml @@ -0,0 +1,57 @@ +- name: Replace the label in loadgen job spec. + replace: + path: "{{ percona_loadgen }}" + regexp: "loadgen_lkey: loadgen_lvalue" + replace: "{{ loadgen_lkey }}: {{ loadgen_lvalue }}" + +- name: Replace the db-user placeholder in tpcc-config file + replace: + path: "{{ tpcc_conf }}" + regexp: "test_user" + replace: "{{ db_user }}" + +- name: Replace the password placeholder in tpcc-config file + replace: + path: "{{ tpcc_conf }}" + regexp: "test_password" + replace: "{{ db_password }}" + +- name: Replace the duration placeholder in tpcc-config file + replace: + path: "{{ tpcc_conf }}" + regexp: "test_duration" + replace: "{{ load_duration }}" + +- name: Replace the warehouse placeholder in tpcc-config file + replace: + path: "{{ tpcc_conf }}" + regexp: "test_warehouse" + replace: "{{ test_warehouse }}" + +- name: Replace the test connections placeholder in tpcc-config file + replace: + path: "{{ tpcc_conf }}" + regexp: "test_connections" + replace: "{{ test_connections }}" + +- name: Replace the test warmup-period placeholder in tpcc-config file + replace: + path: "{{ tpcc_conf }}" + regexp: "test_warmup_period" + replace: "{{ test_warmup_period }}" + +- name: Replace the test interval placeholder in tpcc-config file + replace: + path: "{{ tpcc_conf }}" + regexp: "test_interval" + replace: "{{ test_interval }}" + +- name: Getting the Service IP of Application + shell: kubectl get svc -n {{ app_ns }} -l {{ app_service_label }} -o jsonpath='{.items[0].spec.clusterIP}' + register: ip + +- name: Replace the Service IP placeholder + replace: + path: "{{ percona_loadgen }}" + regexp: "service_ip" + replace: "{{ ip.stdout }}" \ No newline at end of file diff --git a/e2e-tests/apps/percona/workload/run_e2e_test.yml b/e2e-tests/apps/percona/workload/run_e2e_test.yml new file mode 100644 index 0000000..8126df6 --- /dev/null +++ b/e2e-tests/apps/percona/workload/run_e2e_test.yml @@ -0,0 +1,60 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: percona-loadgen- + namespace: e2e +spec: + template: + metadata: + name: percona-loadgen + namespace: e2e + labels: + loadgen: percona-loadjob + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + # This is the namespace where percona application is running + - name: APP_NAMESPACE + value: 'percona' + + - name: APP_LABEL + value: 'app=percona' + + - name: LOADGEN_LABEL + value: loadgen=percona-loadgen + + # Database user name + - name: DB_USER + value: root + + - name: DB_PASSWORD + value: k8sDem0 + + # Bench duration (in min) + # TODO: Use a tpcc-template to define workload w/ more granularity + - name: LOAD_DURATION + value: "600" + + - name: TPCC_WAREHOUSES + value: "1" + + - name: TPCC_CONNECTIONS + value: "18" + + - name: TPCC_WARMUP_PERIOD + value: "10" + + - name: LOAD_INTERVAL + value: "10" + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/apps/percona/workload/test.yml -i /etc/ansible/hosts -v; exit 0"] \ No newline at end of file diff --git a/e2e-tests/apps/percona/workload/test.yml b/e2e-tests/apps/percona/workload/test.yml new file mode 100644 index 0000000..311b6f1 --- /dev/null +++ b/e2e-tests/apps/percona/workload/test.yml @@ -0,0 +1,81 @@ +--- +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + - block: + + ## Generating the testname for deployment + - include_tasks: /e2e-tests/hack/create_testname.yml + + # RECORD START-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - name: Checking the status of test specific namespace. + include_tasks: /e2e-tests/utils/k8s/status_testns.yml + + - name: Get the application label value from env + set_fact: + app_lkey: "{{ app_label.split('=')[0] }}" + app_lvalue: "{{ app_label.split('=')[1] }}" + + - name: Checking whether application is running + include_tasks: /e2e-tests/utils/k8s/status_app_pod.yml + + - name: Obtaining the loadgen pod label from env. + set_fact: + loadgen_lkey: "{{ loadgen_label.split('=')[0] }}" + loadgen_lvalue: "{{ loadgen_label.split('=')[1] }}" + + - name: Replace default values/placeholder with test-specific values + include_tasks: ./replace.yml + + - name: Checking for configmap + shell: kubectl get configmap -n {{ app_ns }} + register: configmap + + - name: Creating a kubernetes config map to hold the tpcc benchmark config + shell: kubectl create configmap tpcc-config --from-file {{ tpcc_conf }} -n {{ app_ns }} + when: "'tpcc-config' not in configmap.stdout" + + - name: Create Percona Loadgen Job + shell: kubectl apply -f {{ percona_loadgen }} -n {{ app_ns }} + + - name: Verify load-gen pod is running + shell: kubectl get pods -n {{ app_ns }} -l {{ loadgen_label }} -o jsonpath='{.items[0].status.phase}' + args: + executable: /bin/bash + register: result + until: "'Running' in result.stdout" + delay: 5 + retries: 60 + + - name: Getting the Percona POD name + shell: kubectl get po -n {{ app_ns }} -l {{ app_label }} -o jsonpath='{.items[0].metadata.name}' + register: pod_name + + - name: Verifying load-generation + shell: kubectl exec -it {{ pod_name.stdout }} -n {{ app_ns }} -- mysql -u{{ db_user }} -p{{ db_password }} -e "show databases" + register: output + until: "'tpcc-' in output.stdout" + delay: 5 + retries: 120 + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' diff --git a/e2e-tests/apps/percona/workload/test_vars.yml b/e2e-tests/apps/percona/workload/test_vars.yml new file mode 100644 index 0000000..03fe5a5 --- /dev/null +++ b/e2e-tests/apps/percona/workload/test_vars.yml @@ -0,0 +1,27 @@ +test_name: percona-loadgen-{{ app_ns }} + +percona_loadgen: tpcc_bench.yml + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +app_label: "{{ lookup('env','APP_LABEL') }}" + +app_service_label: "{{ lookup('env','APP_LABEL') }}" + +loadgen_label: "{{ lookup('env','LOADGEN_LABEL') }}" + +db_user: "{{ lookup('env','DB_USER') }}" + +db_password: "{{ lookup('env','DB_PASSWORD') }}" + +load_duration: "{{ lookup('env','LOAD_DURATION') }}" + +test_warehouse: "{{ lookup('env','TPCC_WAREHOUSES') }}" + +test_connections: "{{ lookup('env','TPCC_CONNECTIONS') }}" + +test_warmup_period: "{{ lookup('env','TPCC_WARMUP_PERIOD') }}" + +test_interval: "{{ lookup('env','LOAD_INTERVAL') }}" + +tpcc_conf: tpcc.conf \ No newline at end of file diff --git a/e2e-tests/apps/percona/workload/tpcc.conf b/e2e-tests/apps/percona/workload/tpcc.conf new file mode 100644 index 0000000..98fd3d0 --- /dev/null +++ b/e2e-tests/apps/percona/workload/tpcc.conf @@ -0,0 +1,9 @@ +{ + "db_user": "test_user", + "db_password": "test_password", + "warehouses": "test_warehouse", + "connections": "test_connections", + "warmup_period": "test_warmup_period", + "run_duration": "test_duration", + "interval": "test_interval" +} \ No newline at end of file diff --git a/e2e-tests/apps/percona/workload/tpcc_bench.yml b/e2e-tests/apps/percona/workload/tpcc_bench.yml new file mode 100644 index 0000000..0539a50 --- /dev/null +++ b/e2e-tests/apps/percona/workload/tpcc_bench.yml @@ -0,0 +1,27 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: tpcc-bench +spec: + template: + metadata: + name: tpcc-bench + labels: + loadgen_lkey: loadgen_lvalue + spec: + restartPolicy: Never + containers: + - name: tpcc-bench + image: openebs/tests-tpcc-client + command: ["/bin/bash"] + args: ["-c", "./tpcc-runner.sh service_ip tpcc.conf; exit 0"] + volumeMounts: + - name: tpcc-configmap + mountPath: /tpcc-mysql/tpcc.conf + subPath: tpcc.conf + tty: true + volumes: + - name: tpcc-configmap + configMap: + name: tpcc-config \ No newline at end of file diff --git a/e2e-tests/chaoslib/pumba/pod_failure_by_sigkill.yaml b/e2e-tests/chaoslib/pumba/pod_failure_by_sigkill.yaml new file mode 100644 index 0000000..2d680f6 --- /dev/null +++ b/e2e-tests/chaoslib/pumba/pod_failure_by_sigkill.yaml @@ -0,0 +1,132 @@ +--- +- block: + + - name: Setup pumba chaos infrastructure + shell: > + kubectl apply -f /e2e-tests/chaoslib/pumba/pumba.yml -n {{ namespace }} + args: + executable: /bin/bash + register: result + + - name: Confirm that the pumba ds is running on all desired nodes + shell: > + kubectl get pod -l app=pumba --no-headers -o custom-columns=:status.phase + -n {{ namespace }} | sort | uniq + args: + executable: /bin/bash + register: result + until: "result.stdout == 'Running'" + delay: 1 + retries: 60 + ignore_errors: true + + - name: Get the application pod name + shell: > + kubectl get pod -l {{ label }} -n {{ namespace }} + -o=custom-columns=NAME:".metadata.name" --no-headers | shuf | head -1 + args: + executable: /bin/bash + register: pod_name + + - name: Record application pod name + set_fact: + app_pod: "{{ pod_name.stdout }}" + + - name: Identify the node name where application pod is scheduled + shell: > + kubectl get pod {{ app_pod }} -n {{ namespace }} + --no-headers -o custom-columns=:spec.nodeName + args: + executable: /bin/bash + register: result + + - name: Record the node name + set_fact: + app_node: "{{ result.stdout }}" + + - name: Get application container name + shell: > + kubectl get pods -l {{ label }} -n {{ namespace }} + -o jsonpath='{.items[0].spec.containers[0].name}' + args: + executable: /bin/bash + register: container + + - name: Record the app_container + set_fact: + app_container: "{{ container.stdout }}" + + - name: Record the pumba pod scheduled on same node as of application pod + shell: > + kubectl get pod -l app=pumba -o wide -n {{ namespace }} + | grep {{ app_node }} | awk '{print $1}' + args: + executable: /bin/bash + register: pumba_pod + + - name: Record container restartCount + shell: > + kubectl get pod {{ app_pod }} -n {{ namespace }} + -o=jsonpath='{.status.containerStatuses[?(@.name=="{{ app_container }}")].restartCount}' + args: + executable: /bin/bash + register: restartCnt_prev + + - name: Force kill the application pod container using pumba + shell: > + kubectl exec {{ pumba_pod.stdout}} -n {{ namespace }} + -- pumba kill --signal SIGKILL re2:k8s_{{ app_container }}_{{ app_pod }}; + args: + executable: /bin/bash + ignore_errors: true + register: result + + - name: Verify container restartCount + shell: > + kubectl get pod {{ app_pod }} -n {{ namespace }} + -o=jsonpath='{.status.containerStatuses[?(@.name=="{{ app_container }}")].restartCount}' + args: + executable: /bin/bash + register: restartCnt + until: "restartCnt.stdout|int > restartCnt_prev.stdout|int" + delay: 2 + retries: 30 + + when: action == "killapp" + +- block: + + - name: Check if pumba pod is indeed running + shell: > + kubectl get pod -l app=pumba --no-headers -o custom-columns=:status.phase + -n {{ namespace }} | sort | uniq + args: + executable: /bin/bash + register: result + until: "result.stdout == 'Running'" + delay: 1 + retries: 60 + ignore_errors: true + + - block: + + - name: Delete the pumba daemonset + shell: > + kubectl delete -f /e2e-tests/chaoslib/pumba/pumba.yml -n {{ namespace }} + args: + executable: /bin/bash + register: result + + - name: Confirm that the pumba ds is deleted successfully + shell: > + kubectl get pod -l app=pumba --no-headers -n {{ namespace }} + args: + executable: /bin/bash + register: result + until: "'Running' not in result.stdout" + delay: 1 + retries: 150 + + when: result.stdout is defined and result.stdout == "Running" + + when: action == "deletepumba" \ No newline at end of file diff --git a/e2e-tests/chaoslib/pumba/pumba.yml b/e2e-tests/chaoslib/pumba/pumba.yml new file mode 100644 index 0000000..3aa0ab5 --- /dev/null +++ b/e2e-tests/chaoslib/pumba/pumba.yml @@ -0,0 +1,37 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: pumba +spec: + selector: + matchLabels: + app: pumba + template: + metadata: + labels: + app: pumba + com.gaiaadm.pumba: "true" # prevent pumba from killing itself + name: pumba + spec: + containers: + - image: gaiaadm/pumba:0.4.8 + imagePullPolicy: IfNotPresent + name: pumba + # Pumba command: modify it to suite your needs + # Dry run: Randomly try to kill some container every 3 minutes + command: ["pumba", "--dry", "--random", "--interval", "3m", "kill", "--signal", "SIGTERM"] + resources: + requests: + cpu: 10m + memory: 5M + limits: + cpu: 100m + memory: 20M + volumeMounts: + - name: dockersocket + mountPath: /var/run/docker.sock + volumes: + - hostPath: + path: /var/run/docker.sock + name: dockersocket \ No newline at end of file diff --git a/e2e-tests/chaoslib/service_failure/service_chaos.yml b/e2e-tests/chaoslib/service_failure/service_chaos.yml new file mode 100644 index 0000000..0e06106 --- /dev/null +++ b/e2e-tests/chaoslib/service_failure/service_chaos.yml @@ -0,0 +1,142 @@ +- block: + + - name: Identify the node on which application pod is scheduled + shell: > + kubectl get pod {{ app_pod }} -n {{ app_ns }} + --no-headers -o custom-columns=:spec.nodeName + args: + executable: /bin/bash + register: node_name + + - name: Record the node name on which application pod is scheduled + set_fact: + app_node: "{{ node_name.stdout }}" + + - name: Get the IP Address of the node on which application pod is scheduled + shell: > + kubectl get nodes {{ app_node }} --no-headers -o jsonpath='{.status.addresses[0].address}' + args: + executable: /bin/bash + register: node_ip_address + + - name: Record the IP Address of the node on which application pod is scheduled + set_fact: + node_ip_add: "{{ node_ip_address.stdout }}" + + - block: + + - name: stop the {{ svc_type }} service on node where application pod is scheduled + shell: > + sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} + "echo {{ node_pwd }} | sudo -S su -c 'systemctl stop {{ svc_type }}.service'" + args: + executable: /bin/bash + + - name: Check for the {{ svc_type }} service status + shell: > + sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} + "echo {{ node_pwd }} | sudo -S su -c 'systemctl status {{ svc_type }}.service'" | grep 'inactive' + args: + executable: /bin/bash + register: svc_status + until: "'inactive' in svc_status.stdout" + delay: 5 + retries: 15 + + - name: Check the node {{ app_node }} status on which {{ svc_type }} failure chaos is induced + shell: + kubectl get nodes {{ app_node }} + args: + executable: /bin/bash + register: node_status + until: "'NotReady' in node_status.stdout" + delay: 10 + retries: 30 + + - name: Check if the new application pod is scheduled after {{ svc_type }} failure + shell: > + kubectl get pods -n {{ app_ns }} -l {{ app_label }} --no-headers | wc -l + args: + executable: /bin/bash + register: app_pod_count + until: "'2' in app_pod_count.stdout" + delay: 15 + retries: 30 + + - name: Get the new application pod name + shell: > + kubectl get pod -n {{ app_ns }} -l {{ app_label }} --no-headers | grep -v Terminating | awk '{print $1}' + args: + executable: /bin/bash + register: new_app_pod_name + + - name: Record the new application pod name + set_fact: + new_app_pod: "{{ new_app_pod_name.stdout }}" + + - name: Check for the newly created application pod status + shell: > + kubectl get pod {{ new_app_pod }} -n {{ app_ns }} --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: new_app_pod_status + until: "'Pending' in new_app_pod_status.stdout" + delay: 5 + retries: 20 + + when: svc_type=="kubelet" or svc_type=="docker" + + when: action == "svc_stop" + + +- block: + + - name: Start the {{ svc_type }} services + shell: > + sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} + "echo {{ node_pwd }} | sudo -S su -c 'systemctl start {{ svc_type }}.service'" + args: + executable: /bin/bash + + - name: Check for the {{ svc_type }} services status + shell: > + sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} + "echo {{ node_pwd }} | sudo -S su -c 'systemctl status {{ svc_type }}.service'" | grep 'active (running)' + args: + executable: /bin/bash + register: svc_status + until: "'active (running)' in svc_status.stdout" + delay: 5 + retries: 15 + + - name: Check for the node status after starting {{ svc_type }} service + shell: > + kubectl get nodes {{ app_node }} --no-headers + args: + executable: /bin/bash + register: node_status + until: "'NotReady' not in node_status.stdout" + delay: 10 + retries: 30 + + - name: Verify that previous pods are deleted successfully after restart of {{ svc_type }} + shell: > + kubectl get pods -n {{ app_ns }} -l {{ app_label }} --no-headers | wc -l + args: + executable: /bin/bash + register: app_pod_count + until: "'1' in app_pod_count.stdout" + delay: 5 + retries: 60 + + - name: Get the status of newly created application pod + shell: > + kubectl get pod {{ new_app_pod }} -n {{ app_ns }} --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: application_pod + until: "'Running' in application_pod.stdout" + delay: 10 + retries: 50 + + when: action == "svc_start" \ No newline at end of file diff --git a/e2e-tests/chaoslib/vmware_chaos/vm_power_operations.yml b/e2e-tests/chaoslib/vmware_chaos/vm_power_operations.yml new file mode 100644 index 0000000..1e341c2 --- /dev/null +++ b/e2e-tests/chaoslib/vmware_chaos/vm_power_operations.yml @@ -0,0 +1,20 @@ +--- +# This util can handle 'power on' and 'power off' operations on VMware based virtual machines. +# The parameters required by this util are +# - ESX IP +# - ESX root Password +# - The target virtual machine name +# - Operation, either 'on' or 'off' +# +- name: Obtain the VM ID + shell: sshpass -p {{ esx_pwd }} ssh -o StrictHostKeyChecking=no root@{{ esx_ip }} vim-cmd vmsvc/getallvms | awk '{print $1 " " $2}' | grep {{ target_node }} | awk '{print $1}' + args: + executable: /bin/bash + register: id + +- name: Perform operation on the target vm + shell: sshpass -p {{ esx_pwd }} ssh -o StrictHostKeyChecking=no root@{{ esx_ip }} vim-cmd vmsvc/power.{{operation}} {{ id.stdout }} + args: + executable: /bin/bash + register: result + failed_when: "result.rc != 0" \ No newline at end of file diff --git a/e2e-tests/experiments/chaos/app_pod_failure/README.md b/e2e-tests/experiments/chaos/app_pod_failure/README.md new file mode 100644 index 0000000..88a3184 --- /dev/null +++ b/e2e-tests/experiments/chaos/app_pod_failure/README.md @@ -0,0 +1,63 @@ +## About this experiment + +This experiment validates the stability and fault-tolerance of application pod consuming zfs-localpv storage. In this test chaos is induced on the container of application pod using pumba chaos utils. Basically it is used as a disruptive test, to cause loss of access to storage by failing the application pod and later it tests the recovery workflow of the application pod. + +## Supported platforms: + +K8s : 1.18+ + +OS : Ubuntu, CentOS + +ZFS : 0.7, 0.8 + +## Entry-Criteria + +- One application should be deployed consuming zfs-localpv storage. +- Application services are accessible & pods are healthy +- Application writes are successful +- zfs-controller and csi node-agent daemonset pods should be in running state. + +## Exit-Criteria + +- Application services are accessible & pods are healthy +- Data written prior to chaos is successfully retrieved/read +- Data consistency is maintained as per integrity check utils +- Storage target pods are healthy + +## Steps performed + +- Get the application pod name and check its Running status +- Dump some dummy data into the application mount point to check data consistency after chaos injection. +- Create a daemonset of pumba utils and get the name of the pod scheduled on the same node as of application pod. Utils used in this test is located at `e2e-tests/chaoslib/pumba` directory. +- Now using SIGKILL command via pumba pod disrupt the access of application container to the storage. And now in recovery process container restarts. +- Check the container restart count to validate successful chaos injection. +- Validate the data consistency by checking the md5sum of test data. +- Delete the pumba daemonset. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of application pod failure, clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then first apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. + +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er application-pod-failure -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er application-pod-failure -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/chaos/app_pod_failure/data_persistence.j2 b/e2e-tests/experiments/chaos/app_pod_failure/data_persistence.j2 new file mode 100644 index 0000000..68ba06a --- /dev/null +++ b/e2e-tests/experiments/chaos/app_pod_failure/data_persistence.j2 @@ -0,0 +1,5 @@ +{% if data_persistence is defined and data_persistence == 'mysql' %} + consistencyutil: /e2e-tests/utils/applications/mysql/mysql_data_persistence.yml + {% elif data_persistence is defined and data_persistence == 'busybox' %} + consistencyutil: /e2e-tests/utils/applications/busybox/busybox_data_persistence.yml +{% endif %} diff --git a/e2e-tests/experiments/chaos/app_pod_failure/run_e2e_test.yml b/e2e-tests/experiments/chaos/app_pod_failure/run_e2e_test.yml new file mode 100644 index 0000000..ddd6338 --- /dev/null +++ b/e2e-tests/experiments/chaos/app_pod_failure/run_e2e_test.yml @@ -0,0 +1,57 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: app-pod-failure + namespace: e2e +data: + parameters.yml: | + +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: application-pod-failure- + namespace: e2e +spec: + template: + metadata: + labels: + name: application-pod-failure + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + # This is the namespace where application pod is deployed + # on which we have to perform this pod-failure chaos + - name: APP_NAMESPACE + value: '' + + # Application pod label + - name: APP_LABEL + value: '' + + # Specify the container runtime used , to pick the relevant chaos util + - name: CONTAINER_RUNTIME + value: docker + + #check if the data is consistent. Currently supported values are 'mysql' and 'busybox' + - name: DATA_PERSISTENCE + value: "" + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/chaos/app_pod_failure/test.yml -i /etc/ansible/hosts -vv; exit 0"] + volumeMounts: + - name: parameters + mountPath: /mnt/ + volumes: + - name: parameters + configMap: + name: app-pod-failure \ No newline at end of file diff --git a/e2e-tests/experiments/chaos/app_pod_failure/test.yml b/e2e-tests/experiments/chaos/app_pod_failure/test.yml new file mode 100644 index 0000000..a9252ad --- /dev/null +++ b/e2e-tests/experiments/chaos/app_pod_failure/test.yml @@ -0,0 +1,105 @@ +--- +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + - /mnt/parameters.yml + + tasks: + - block: + + ## Generating the testname for application pod failure chaos test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - name: Identify the data consistency util to be invoked + template: + src: data_persistence.j2 + dest: data_persistence.yml + + - include_vars: + file: data_persistence.yml + + - name: Record the data consistency util path + set_fact: + data_consistency_util_path: "{{ consistencyutil }}" + when: data_persistence != '' + + - name: Display the app information passed via the test job + debug: + msg: + - "The application info is as follows:" + - "Namespace : {{ app_ns }}" + - "Label : {{ app_label }}" + + - block: + + - name: Get application pod name + shell: > + kubectl get pods -n {{ app_ns }} -l {{ app_label }} --no-headers + -o=custom-columns=NAME:".metadata.name" | shuf -n 1 + args: + executable: /bin/bash + register: app_pod_name + + - name: Check that application pod is in running state + shell: > + kubectl get pod {{ app_pod_name.stdout }} -n {{ app_ns }} + --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: pod_status + failed_when: "pod_status.stdout != 'Running'" + + - name: Create some test data + include: "{{ data_consistency_util_path }}" + vars: + status: 'LOAD' + ns: "{{ app_ns }}" + pod_name: "{{ app_pod_name.stdout }}" + when: data_persistence != '' + + ## APPLICATION FAULT INJECTION + + - include_tasks: /e2e-tests/chaoslib/pumba/pod_failure_by_sigkill.yaml + vars: + action: "killapp" + app_pod: "{{ app_pod_name.stdout }}" + namespace: "{{ app_ns }}" + label: "{{ app_label }}" + + - name: Verify application data persistence + include: "{{ data_consistency_util_path }}" + vars: + status: 'VERIFY' + ns: "{{ app_ns }}" + label: "{{ app_label }}" + pod_name: "{{ app_pod_name.stdout }}" + when: data_persistence != '' + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' + chaostype: "" + app: "" + + - include_tasks: /e2e-tests/chaoslib/pumba/pod_failure_by_sigkill.yaml + vars: + action: "deletepumba" + namespace: "{{ app_ns }}" \ No newline at end of file diff --git a/e2e-tests/experiments/chaos/app_pod_failure/test_vars.yml b/e2e-tests/experiments/chaos/app_pod_failure/test_vars.yml new file mode 100644 index 0000000..6f75427 --- /dev/null +++ b/e2e-tests/experiments/chaos/app_pod_failure/test_vars.yml @@ -0,0 +1,9 @@ +test_name: application-pod-failure + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +app_label: "{{ lookup('env','APP_LABEL') }}" + +cri: "{{ lookup('env','CONTAINER_RUNTIME') }}" + +data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/backup_and_restore/RestoreItemAction_configmap.j2 b/e2e-tests/experiments/functional/backup_and_restore/RestoreItemAction_configmap.j2 new file mode 100644 index 0000000..22314a2 --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/RestoreItemAction_configmap.j2 @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + # any name can be used; Velero uses the labels (below) + # to identify it rather than the name + name: change-pvc-node-selector-config + # must be in the velero namespace + namespace: velero + # the below labels should be used verbatim in your + # ConfigMap. + labels: + # this value-less label identifies the ConfigMap as + # config for a plugin (i.e. the built-in restore item action plugin) + velero.io/plugin-config: "" + # this label identifies the name and kind of plugin + # that this ConfigMap is for. + velero.io/change-pvc-node-selector: RestoreItemAction +data: + # add 1+ key-value pairs here, where the key is the old + # node name and the value is the new node name. + {{ source_node }}: {{ destination_node }} \ No newline at end of file diff --git a/e2e-tests/experiments/functional/backup_and_restore/backup.yml b/e2e-tests/experiments/functional/backup_and_restore/backup.yml new file mode 100644 index 0000000..0e85a84 --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/backup.yml @@ -0,0 +1,68 @@ +--- +- block: + + - name: Create the volume snapshot location which has the information about where the snapshot should be stored + shell: kubectl apply -f ./volume_snapshot_location.yml + args: + executable: /bin/bash + + - name: Check that volume snapshot location class is present + shell: kubectl get volumesnapshotlocation -n velero + args: + executable: /bin/bash + register: vol_snapshot_location + until: "'zfspv-snaplocation' in vol_snapshot_location.stdout" + delay: 2 + retries: 30 + + - name: Creating Backup + shell: > + velero backup create {{ velero_backup_name }} --snapshot-volumes --include-namespaces={{ app_ns }} --volume-snapshot-locations=zfspv-snaplocation --storage-location=default + args: + executable: /bin/bash + + - name: Get the state of Backup + shell: kubectl get backup {{ velero_backup_name }} -n velero -o jsonpath='{.status.phase}' + args: + executable: /bin/bash + register: backup_state + until: "'Completed' in backup_state.stdout" + delay: 5 + retries: 100 + + when: action == "backup" + + +# Schedule creates a cron job for backup. Notation "--schedule=*/2 * * * *" applies same as kubernetes cron job here. + +- block: + + - name: Creating schedule backup + shell: velero create schedule {{ schedule_name }} --schedule="*/1 * * * *" --snapshot-volumes --include-namespaces={{ app_ns }} --volume-snapshot-locations=zfspv-snaplocation --storage-location=default + + when: action == "schedule_backup" + +- block: + + - name: Create the volume snapshot location which has the information about where the snapshot should be stored + shell: kubectl apply -f ./incremental_backup_vsl.yml + args: + executable: /bin/bash + + ## Incremental backups work with schedules. Following task will create one schedule which will create backups + ## Per minute (as per the cron job format). First backup will be full backup and after that incremental backups + ## and at last one will be full backup. This sequence will be repeated till the schedule is enabled. + - name: Creating incremental backup + shell: velero create schedule {{ schedule_name }} --schedule="*/1 * * * *" --snapshot-volumes --include-namespaces={{ app_ns }} --volume-snapshot-locations=incr --storage-location=default + + ## After creating schedule backup creation starts. so waiting for some time for dumping + ## the data before creation of first incremental backup. + - name: sleep some time + shell: sleep 30 + + ## This task will create backups periodically after one minutes. Meanwhile this task will dump some dummy data + ## so that we can verify them after restoring the backups. + - name: Create backups incrementally after dumping data periodically + include_tasks: "./incremental_backup.yml" + + when: action == "incremental_backup" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/backup_and_restore/credentials_minio b/e2e-tests/experiments/functional/backup_and_restore/credentials_minio new file mode 100644 index 0000000..735edc7 --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/credentials_minio @@ -0,0 +1,3 @@ +[default] +aws_access_key_id = minio +aws_secret_access_key = minio123 diff --git a/e2e-tests/experiments/functional/backup_and_restore/data_persistence.j2 b/e2e-tests/experiments/functional/backup_and_restore/data_persistence.j2 new file mode 100644 index 0000000..68ba06a --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/data_persistence.j2 @@ -0,0 +1,5 @@ +{% if data_persistence is defined and data_persistence == 'mysql' %} + consistencyutil: /e2e-tests/utils/applications/mysql/mysql_data_persistence.yml + {% elif data_persistence is defined and data_persistence == 'busybox' %} + consistencyutil: /e2e-tests/utils/applications/busybox/busybox_data_persistence.yml +{% endif %} diff --git a/e2e-tests/experiments/functional/backup_and_restore/incremental_backup.yml b/e2e-tests/experiments/functional/backup_and_restore/incremental_backup.yml new file mode 100644 index 0000000..e05fdc0 --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/incremental_backup.yml @@ -0,0 +1,85 @@ +- name: Obtain the mount path for the application + shell: > + kubectl get pods -n {{ app_ns }} -l {{ app_label }} + -o custom-columns=:.spec.containers[].volumeMounts[].mountPath --no-headers + args: + executable: /bin/bash + register: mount + +- name: Record the mount path for the application + set_fact: + mount_path: "{{ mount.stdout }}" + +- name: Dump some dummy data in application mount point + shell: > + kubectl exec -ti {{ app_pod_name }} -n {{ app_ns }} -- sh + -c "dd if=/dev/urandom of={{ mount_path }}/incr-file1 bs=4k count=1024 && + md5sum {{ mount_path }}/incr-file1 > {{ mount_path }}/pre-incr-file1-md5 && sync;sync;sync" + args: + executable: /bin/bash + +- name: Wait for some seconds + shell: sleep 60 + +- name: Again dump some dummy data + shell: > + kubectl exec -ti {{ app_pod_name }} -n {{ app_ns }} -- sh + -c "dd if=/dev/urandom of={{ mount_path }}/incr-file2 bs=4k count=1024 && + md5sum {{ mount_path }}/incr-file2 > {{ mount_path }}/pre-incr-file2-md5 && sync;sync;sync" + args: + executable: /bin/bash + +- name: Wait for some time to finish all incremental backup and at last full backup + shell: sleep 180 + +- name: Get the first backup name which is full backup by default + shell: velero get backup | grep {{ schedule_name }} | tail -n1 | awk '{print $1}' + args: + executable: /bin/bash + register: first_full_bkp + +- name: Record the first full backup name + set_fact: + first_full_backup: "{{ first_full_bkp.stdout }}" + +- name: Get the first incremental backup name + shell: velero get backup | grep {{ schedule_name }} | tail -n2 | head -n1 | awk '{print $1}' + args: + executable: /bin/bash + register: first_incr_bkp + +- name: Record the first incremental backup name + set_fact: + first_incremental_backup: "{{ first_incr_bkp.stdout }}" + +- name: Get the second incremental backup name + shell: velero get backup | grep {{ schedule_name }} | tail -n3 | head -n1 | awk '{print $1}' + args: + executable: /bin/bash + register: second_incr_bkp + +- name: Record the second incremental backup name + set_fact: + second_incremental_backup: "{{ second_incr_bkp.stdout }}" + +- name: Get the last full backup name which is after two incremental backups + shell: velero get backup | grep {{ schedule_name }} | tail -n4 | head -n1 | awk '{print $1}' + args: + executable: /bin/bash + register: last_full_bkp + +- name: Record the last full backup name + set_fact: + last_full_backup: "{{ last_full_bkp.stdout }}" + +- name: Check status of all four backups + shell: kubectl get backups.velero.io {{ item }} -n velero -o jsonpath='{.status.phase}' + args: + executable: /bin/bash + register: backup_status + failed_when: "backup_status.stdout != 'Completed'" + loop: + - "{{ first_full_backup }}" + - "{{ first_incremental_backup }}" + - "{{ second_incremental_backup }}" + - "{{ last_full_backup }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/backup_and_restore/incremental_backup_vsl.yml b/e2e-tests/experiments/functional/backup_and_restore/incremental_backup_vsl.yml new file mode 100644 index 0000000..fdfea36 --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/incremental_backup_vsl.yml @@ -0,0 +1,16 @@ +apiVersion: velero.io/v1 +kind: VolumeSnapshotLocation +metadata: + name: incr + namespace: velero +spec: + provider: openebs.io/zfspv-blockstore + config: + bucket: velero + prefix: zfs + incrBackupCount: "2" # number of incremental backup we want to have + namespace: openebs # this is namespace where ZFS-LocalPV creates all the CRs, passed as OPENEBS_NAMESPACE env in the ZFS-LocalPV deployment + provider: aws + region: minio + s3ForcePathStyle: "true" + s3Url: http://minio.velero.svc:9000 \ No newline at end of file diff --git a/e2e-tests/experiments/functional/backup_and_restore/incremental_restore.yml b/e2e-tests/experiments/functional/backup_and_restore/incremental_restore.yml new file mode 100644 index 0000000..da2f87c --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/incremental_restore.yml @@ -0,0 +1,83 @@ +- name: Obtain the mount path for the application + shell: > + kubectl get pods -n {{ app_ns }} -l {{ app_label }} + -o custom-columns=:.spec.containers[].volumeMounts[].mountPath --no-headers + args: + executable: /bin/bash + register: mount + +- name: Record the mount path for the application + set_fact: + mount_path: "{{ mount.stdout }}" + +- name: Get the first backup name which is full backup by default + shell: velero get backup | grep {{ schedule_name }} | tail -n1 | awk '{print $1}' + args: + executable: /bin/bash + register: first_full_bkp + +- name: Record the first full backup name + set_fact: + first_full_backup: "{{ first_full_bkp.stdout }}" + +- name: Get the first incremental backup name + shell: velero get backup | grep {{ schedule_name }} | tail -n2 | head -n1 | awk '{print $1}' + args: + executable: /bin/bash + register: first_incr_bkp + +- name: Record the first incremental backup name + set_fact: + first_incremental_backup: "{{ first_incr_bkp.stdout }}" + +- name: Get the second incremental backup name + shell: velero get backup | grep {{ schedule_name }} | tail -n3 | head -n1 | awk '{print $1}' + args: + executable: /bin/bash + register: second_incr_bkp + +- name: Record the second incremental backup name + set_fact: + second_incremental_backup: "{{ second_incr_bkp.stdout }}" + +- name: Get the last full backup name which is after two incremental backups + shell: velero get backup | grep {{ schedule_name }} | tail -n4 | head -n1 | awk '{print $1}' + args: + executable: /bin/bash + register: last_full_bkp + +- name: Record the last full backup name + set_fact: + last_full_backup: "{{ last_full_bkp.stdout }}" + +- name: Restore the first incremental backup + include: "./restore.yml" + vars: + velero_backup_name: "{{ first_incremental_backup }}" + app_ns_new: "first-incr-restore-ns" + +- name: Check the data consistency + shell: > + kubectl exec -ti {{ restore_app_pod }} -n first-incr-restore-ns + -- sh -c "cd {{ mount_path }} && ls" + args: + executable: /bin/bash + register: data_status + failed_when: "'incr-file1' not in data_status.stdout" + +- name: Restore the second incremental backup + include: "./restore.yml" + vars: + velero_backup_name: "{{ second_incremental_backup }}" + app_ns_new: "second-incr-restore-ns" + +- name: Check the data consistency + shell: > + kubectl exec -ti {{ restore_app_pod }} -n second-incr-restore-ns + -- sh -c "cd {{ mount_path }} && ls" + args: + executable: /bin/bash + register: data_status + failed_when: + - "'incr-file1' not in data_status.stdout" + - "'incr-file2' not in data_status.stdout" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/backup_and_restore/restore.yml b/e2e-tests/experiments/functional/backup_and_restore/restore.yml new file mode 100644 index 0000000..69ec051 --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/restore.yml @@ -0,0 +1,105 @@ +- block: + + - name: Restoring application + shell: > + velero restore create --from-backup {{ velero_backup_name }} --restore-volumes=true --namespace-mappings {{ app_ns }}:{{ app_ns_new }} + args: + executable: /bin/bash + + when: lookup('env','RESTORE_IN_DIFF_NAMESPACE') == "true" + +- block: + + - name: Delete all the resources in application namespace + shell: | + kubectl delete deploy,pvc,svc --all -n {{ app_ns }} + kubectl delete ns {{ app_ns }} + + - name: Verify that application namespace is deleted successfully + shell: kubectl get ns --no-headers -o custom-columns=:metadata.name + args: + executable: /bin/bash + register: ns_list + until: "'{{ app_ns }}' not in ns_list.stdout_lines" + delay: 5 + retries: 30 + + - name: Verify that PV is successfully deleted + shell: > + kubectl get pv -o json | jq -r '.items[] | + select(.spec.claimRef.name == "{{ app_pvc }}" and .spec.claimRef.namespace == "{{ app_ns }}" ) + | .metadata.name' + args: + executable: /bin/bash + register: pv_list + until: "'' in pv_list.stdout" + delay: 3 + retries: 30 + + - name: Restoring application + shell: > + velero restore create --from-backup {{ velero_backup_name }} --restore-volumes=true + args: + executable: /bin/bash + + when: lookup('env','RESTORE_IN_DIFF_NAMESPACE') == "false" + +- name: Getting latest restore name + shell: velero get restore | grep {{ velero_backup_name }} | awk '{print $1}' | tail -n 1 + register: restore_name + +- name: Checking the restore status + shell: kubectl get restore {{ restore_name.stdout }} -n velero -o jsonpath='{.status.phase}' + register: restore_state + until: "'Completed' in restore_state.stdout" + delay: 5 + retries: 60 + +- name: Check the new namespace is created and in Active state + shell: kubectl get namespace {{ app_ns_new }} -o jsonpath='{.status.phase}' + args: + executable: /bin/bash + register: app_ns_new_status + failed_when: "'Active' not in app_ns_new_status.stdout" + +- name: Check that pods in {{ app_ns_new }} are in running state + shell: kubectl get pods -n {{ app_ns_new }} --no-headers -o custom-columns=:status.phase | sort | uniq + args: + executable: /bin/bash + register: pod_status + until: "pod_status.stdout == 'Running'" + delay: 3 + retries: 50 + +- name: Get the application pod name in {{ app_ns_new }} namespace + shell: kubectl get pod -n {{ app_ns_new }} -l {{ app_label }} --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: restore_application_pod + +- name: Record the application pod name in {{ app_ns_new }} namespace + set_fact: + restore_app_pod: "{{ restore_application_pod.stdout }}" + +- block: + + - name: Check if the restore has done on different node + shell: > + kubectl get pod -n {{ app_ns_new }} -l {{ app_label }} --no-headers -o custom-columns=:.spec.nodeName + args: + executable: /bin/bash + register: restore_node_name + failed_when: "'{{ destination_node }}' != '{{ restore_node_name.stdout }}'" + + when: lookup('env','RESTORE_IN_DIFF_NODE') == "true" + + + +- name: Verify the data consistency + include: "{{ data_consistency_util_path }}" + vars: + status: 'VERIFY' + ns: "{{ app_ns_new }}" + label: "{{ app_label }}" + pod_name: "{{ restore_app_pod }}" + when: data_persistence != '' \ No newline at end of file diff --git a/e2e-tests/experiments/functional/backup_and_restore/run_e2e_test.yml b/e2e-tests/experiments/functional/backup_and_restore/run_e2e_test.yml new file mode 100644 index 0000000..cee77c9 --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/run_e2e_test.yml @@ -0,0 +1,86 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: zfspv-backup-store + namespace: e2e +data: + parameters.yml: | + +--- + +apiVersion: batch/v1 +kind: Job +metadata: + generateName: zfspv-velero-backup-restore- + namespace: e2e +spec: + template: + metadata: + name: e2e + labels: + test: velero-backup-restore + + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + - name: APP_NAMESPACE ## Namespace where application is deployed + value: '' + + - name: APP_LABEL ## Application label + value: '' + + - name: APP_PVC ## PVC name for application + value: '' + + - name: ACTION ## On basis of the value of this env, this playbook will perform tasks. + value: '' ## Supported values: (backup, restore, schedule_backup, incremental_backup, incremental_restore) + + - name: VELERO_BACKUP_NAME ## Velero backup will be created with name of this value. + value: '' + + - name: VELERO_PLUGIN_NAME ## Velero plugin name. For zfs-localpv plugin name is `openebs/velero-plugin:2.1.0` + value: '' ## you can use ci image also. + + - name: VELERO_SCHEDULE_BACKUP_NAME ## If you want to create scheduled backup (based on ACTION env) give the name for it. + value: '' + + - name: VELERO_VERSION ## Velero version (for e.g. v1.4.0) + value: '' + + - name: STORAGE_BUCKET ## Supported values: minio + value: '' + + - name: RESTORE_IN_DIFF_NAMESPACE ## For restoring the backup in different namespace provide value accordingly. + value: '' ## Supported values: ( true, false ) + + - name: RESTORE_NAMESPACE ## If `RESTORE_IN_DIFF_NAMESPACE: true` provide the namespace in which you want to restore the backup + value: '' + + - name: RESTORE_IN_DIFF_NODE ## For restoring the backup on different nodes provide value accordingly. + value: '' ## Supported values: ( true, false) + + - name: DATA_PERSISTENCE ## Supported values: (busybox, mysql) + value: '' ## To check data-consistency provide values accordingly. When value is set; some dummy test data will be + ## dumped in application mount point and will verify that data in restored application. + + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/backup_and_restore/test.yml -i /etc/ansible/hosts -vv; exit 0"] + + volumeMounts: + - name: parameters + mountPath: /mnt/ + volumes: + - name: parameters + configMap: + name: zfspv-backup-store \ No newline at end of file diff --git a/e2e-tests/experiments/functional/backup_and_restore/setup_dependency.yml b/e2e-tests/experiments/functional/backup_and_restore/setup_dependency.yml new file mode 100644 index 0000000..4be1c88 --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/setup_dependency.yml @@ -0,0 +1,138 @@ +- name: Download velero binary + get_url: + url: "{{ velero_binary_url }}" + dest: "./" + force: yes + register: result + until: "'OK' in result.msg" + delay: 3 + retries: 5 + +- name: Installing velero inside e2e-test container + shell: | + tar -xvf velero-{{ velero_version }}-linux-amd64.tar.gz + mv velero-{{ velero_version }}-linux-amd64/velero /usr/local/bin/ + +- name: Checking the velero version + shell: velero version + register: velero + failed_when: "velero_version not in velero.stdout" + +- block: + + - name: Installing velero server inside cluster + shell: > + velero install \ + --provider aws \ + --bucket velero \ + --secret-file ./credentials_minio \ + --plugins velero/velero-plugin-for-aws:v1.0.0 \ + --use-volume-snapshots=false \ + --use-restic \ + --backup-location-config region=minio,s3ForcePathStyle="true",s3Url=http://minio.velero.svc:9000 + + - name: Check velero server pod status + shell: kubectl get pod -n velero -l deploy=velero -o jsonpath='{.items[0].status.phase}' + register: velero_pod_status + until: "'Running' in velero_pod_status.stdout" + delay: 5 + retries: 20 + + - name: Check velero server pod's container status + shell: kubectl get pod -n velero -l deploy=velero -o jsonpath='{.items[0].status.containerStatuses[0].state}' + register: velero_container_status + until: "'running' in velero_container_status.stdout" + delay: 5 + retries: 20 + + - name: Check that restic daemonset pods are running + shell: kubectl get pods -n velero -l name=restic --no-headers -o custom-columns=:status.phase | sort | uniq + register: restic_pod_status + until: "restic_pod_status.stdout == 'Running'" + delay: 3 + retries: 20 + + - name: Installing minio + shell: kubectl apply -f velero-{{ velero_version }}-linux-amd64/examples/minio/00-minio-deployment.yaml + args: + executable: /bin/bash + + - name: Waiting for minio job to create bucket + shell: kubectl get pod -n velero -l job-name=minio-setup -o jsonpath='{.items[*].status.phase}' + register: minio_job_status + until: "'Succeeded' in minio_job_status.stdout" + delay: 5 + retries: 20 + + - name: Checking for minio pod status + shell: kubectl get pod -n velero -l component=minio -ojsonpath='{.items[0].status.phase}' + register: minio_status + until: "'Running' in minio_status.stdout" + delay: 5 + retries: 15 + + when: bucket_type == "minio" + +- name: Get the velero server pod name + shell: kubectl get pod -n velero -l deploy=velero --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: velero_pod + +- name: Check if the velero-plugin for zfs-localpv is already added + shell: > + kubectl get deploy velero -n velero + -o jsonpath='{.spec.template.spec.initContainers[?(@.name=="velero-plugin")].name}' + args: + executable: /bin/bash + register: zfs_localpv_velero_plugin + +# from velero version v1.6.0 velero plugin for openebs is renamed to openebs-velero-plugin +- name: Check if the velero-plugin for zfs-localpv is already added + shell: > + kubectl get deploy velero -n velero + -o jsonpath='{.spec.template.spec.initContainers[?(@.name=="openebs-velero-plugin")].name}' + args: + executable: /bin/bash + register: zfs_localpv_velero_plugin_latest + +- name: Add velero-plugin for zfs-localpv + shell: velero plugin add {{ velero_plugin_name }} + args: + executable: /bin/bash + when: + - zfs_localpv_velero_plugin.stdout != 'velero-plugin' + - zfs_localpv_velero_plugin_latest.stdout != 'openebs-velero-plugin' + +#After installing openebs velero plugin a new velero pod comes up in Running state and the older one will terminates. + +- name: Wait until older velero pod terminates successfully + shell: kubectl get pods -n velero + args: + executable: /bin/bash + register: velero_pod_list + until: "'velero_pod.stdout' not in velero_pod_list.stdout" + delay: 3 + retries: 30 + +- name: Check velero server pod status + shell: kubectl get pod -n velero -l deploy=velero -o jsonpath='{.items[0].status.phase}' + register: velero_pod_run + until: "'Running' in velero_pod_run.stdout" + delay: 5 + retries: 20 + +- name: Check velero server pod's container status + shell: kubectl get pod -n velero -l deploy=velero -o jsonpath='{.items[0].status.containerStatuses[0].state}' + register: velero_container + until: "'running' in velero_container.stdout" + delay: 5 + retries: 20 + +- name: Check velero plugin for VolumeSnapshotter is present + shell: velero plugin get + register: snapshotter_plugin + until: "'zfspv-blockstore' in snapshotter_plugin.stdout" + delay: 2 + retries: 40 + \ No newline at end of file diff --git a/e2e-tests/experiments/functional/backup_and_restore/test.yml b/e2e-tests/experiments/functional/backup_and_restore/test.yml new file mode 100644 index 0000000..d9f9f7b --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/test.yml @@ -0,0 +1,146 @@ +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + - /mnt/parameters.yml + + tasks: + + - block: + + ## Generating the testname for zfs volume resize test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - name: Identify the data consistency util to be invoked + template: + src: data_persistence.j2 + dest: data_persistence.yml + + - include_vars: + file: data_persistence.yml + + - name: Record the data consistency util path + set_fact: + data_consistency_util_path: "{{ consistencyutil }}" + when: data_persistence != '' + + - name: Install velero + include_tasks: "./setup_dependency.yml" + + - name: Get the application pod name + shell: > + kubectl get pod -n {{ app_ns }} -l {{ app_label }} + --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: application_pod_name + + - name: Record the application pod name + set_fact: + app_pod_name: "{{ application_pod_name.stdout }}" + + - name: Check if the application pod is in running state + shell: > + kubectl get pod {{ app_pod_name }} -n {{ app_ns }} + --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: app_pod_status + failed_when: "'Running' not in app_pod_status.stdout" + + - block: + + - name: Create some test data + include: "{{ data_consistency_util_path }}" + vars: + status: 'LOAD' + ns: "{{ app_ns }}" + pod_name: "{{ app_pod_name }}" + when: data_persistence != '' + + - name: Create the backup of the namespace in which application is deployed + include_tasks: "./backup.yml" + + when: action == 'backup' or action == 'schedule_backup' or action == 'incremental_backup' + + + - block: + + - name: Get the application node name + shell: > + kubectl get pod {{ app_pod_name }} -n {{ app_ns }} + --no-headers -o custom-columns=:.spec.nodeName + args: + executable: /bin/bash + register: application_node_name + + - name: Record the application node name + set_fact: + source_node: "{{ application_node_name.stdout }}" + + - name: Get any one of the nodes from remaining worker nodes in cluster + shell: > + kubectl get nodes --no-headers | grep -v master | grep -v {{ source_node }} | shuf -n 1 | awk '{print $1}' + args: + executable: /bin/bash + register: random_node + + - name: Record this random node as destination node for restoring on differet node + set_fact: + destination_node: "{{ random_node.stdout }}" + + - name: Update the restore item action configmap with test specific values + template: + src: RestoreItemAction_configmap.j2 + dest: RestoreItemAction_configmap.yml + + - name: Apply the configmap + shell: kubectl apply -f RestoreItemAction_configmap.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + when: lookup('env','RESTORE_IN_DIFF_NODE') == "true" + + - block: + + - name: Restore the backup + include_tasks: "./restore.yml" + when: lookup('env','RESTORE_IN_DIFF_NAMESPACE') == "true" + + - name: Restore the backup + include_tasks: "./restore.yml" + vars: + app_ns_new: "{{ app_ns }}" + when: lookup('env','RESTORE_IN_DIFF_NAMESPACE') == "false" + + when: action == 'restore' + + - block: + + - name: Restore the incremental backups + include_tasks: "./incremental_restore.yml" + + when: action == 'incremental_restore' + + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + # RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' diff --git a/e2e-tests/experiments/functional/backup_and_restore/test_vars.yml b/e2e-tests/experiments/functional/backup_and_restore/test_vars.yml new file mode 100644 index 0000000..a88e0b5 --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/test_vars.yml @@ -0,0 +1,13 @@ +test_name: "zfspv-velero-backup-restore" +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" +app_ns_new: "{{ lookup('env','RESTORE_NAMESPACE') }}" +app_label: "{{ lookup('env','APP_LABEL') }}" +app_pvc: "{{ lookup('env','APP_PVC') }}" +velero_backup_name: "{{ lookup('env','VELERO_BACKUP_NAME') }}" +velero_plugin_name: "{{ lookup('env','VELERO_PLUGIN_NAME') }}" +velero_version: "{{ lookup('env','VELERO_VERSION') }}" +bucket_type: "{{ lookup('env','STORAGE_BUCKET') }}" +data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}" +action: "{{ lookup('env','ACTION') }}" +schedule_name: "{{ lookup('env','VELERO_SCHEDULE_BACKUP_NAME') }}" +velero_binary_url: "https://github.com/vmware-tanzu/velero/releases/download/{{ lookup('env','VELERO_VERSION') }}/velero-{{ lookup('env','VELERO_VERSION') }}-linux-amd64.tar.gz" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/backup_and_restore/volume_snapshot_location.yml b/e2e-tests/experiments/functional/backup_and_restore/volume_snapshot_location.yml new file mode 100644 index 0000000..b67833c --- /dev/null +++ b/e2e-tests/experiments/functional/backup_and_restore/volume_snapshot_location.yml @@ -0,0 +1,15 @@ +apiVersion: velero.io/v1 +kind: VolumeSnapshotLocation +metadata: + name: zfspv-snaplocation + namespace: velero +spec: + provider: openebs.io/zfspv-blockstore + config: + bucket: velero + prefix: zfs + namespace: openebs # this is namespace where ZFS-LocalPV creates all the CRs, passed as OPENEBS_NAMESPACE env in the ZFS-LocalPV deployment + provider: aws + region: minio + s3ForcePathStyle: "true" + s3Url: http://minio.velero.svc:9000 \ No newline at end of file diff --git a/e2e-tests/experiments/functional/csi-volume-resize/README.md b/e2e-tests/experiments/functional/csi-volume-resize/README.md new file mode 100644 index 0000000..62eb927 --- /dev/null +++ b/e2e-tests/experiments/functional/csi-volume-resize/README.md @@ -0,0 +1,72 @@ +## About this experiment + +This experiment verifies the csi volume resize feature of zfs-localpv. For resizing the volume we just need to update the pvc yaml with desired size and apply it. We can directly edit the pvc by ```kubectl edit pvc -n ``` command and update the spec.resources.requests.storage field with desired volume size. One thing need to be noted that volume resize can only be done from lower pvc size to higher pvc size. We can not resize the volume from higher pvc size to lower one, in-short volume shrink is not possible. zfs driver supports online volume expansion, so that for using the resized volume, application pod restart is not required. For resize, storage-class which will provision the pvc should have `allowVolumeExpansion: true` field. + +for e.g. +``` +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: zfspv-sc +allowVolumeExpansion: true +parameters: + poolname: "zfs-test-pool" +provisioner: zfs.csi.openebs.io +``` + +## Supported platforms: + +K8s : 1.18+ + +OS : Ubuntu, CentOS + +ZFS : 0.7, 0.8 + +## Entry-criteria + +- K8s cluster should be in healthy state including desired worker nodes in ready state. +- zfs-controller and csi node-agent daemonset pods should be in running state. +- storage class with `allowVolumeExpansion: true` enable should be present. +- Application should be deployed succesfully consuming the zfs-localpv storage. + +## Exit-criteria + +- Volume should be resized successfully and application should be accessible seamlessly. +- Application should be able to use the new resize volume space. + +## Steps performed + +- Check pvc status, it should be Bound. Get storage class name and capacity size of this pvc. +- Update the pvc size with desired volume size, which should not be lesser than previous volume size because volume shrink is not supported. +- Check the updated size in pvc spec. +- Since it is online volume expansion, we don't need to restart the application pod but here we restart intentionally to validate that resized space is available after restart of application pod. +- To use the resized space this test will dump some dummy data at application mount point. This dummy data size will be previous volume size + 1 Gi. So make sure we have this much enough space. +- At last this test will delete the dummy data files to free the space. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of csi volume resize, clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then first apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. + +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er csi-volume-resize -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er csi-volume-resize -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/functional/csi-volume-resize/run_e2e_test.yml b/e2e-tests/experiments/functional/csi-volume-resize/run_e2e_test.yml new file mode 100644 index 0000000..8c5a849 --- /dev/null +++ b/e2e-tests/experiments/functional/csi-volume-resize/run_e2e_test.yml @@ -0,0 +1,43 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: csi-volume-resize- + namespace: e2e +spec: + template: + metadata: + name: csi-volume-resize + labels: + test: csi-volume-resize + + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + # This is the namespace where application pod is running + - name: APP_NAMESPACE + value: '' + + # Name of the application pvc + - name: APP_PVC + value: '' + + # Application pod label in `key=value` format + - name: APP_LABEL + value: '' + + # Resized PVC size (for eg. 10Gi) + - name: NEW_PV_CAPACITY + value: '' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/csi-volume-resize/test.yml -i /etc/ansible/hosts -vv; exit 0"] \ No newline at end of file diff --git a/e2e-tests/experiments/functional/csi-volume-resize/test.yml b/e2e-tests/experiments/functional/csi-volume-resize/test.yml new file mode 100644 index 0000000..1b5f219 --- /dev/null +++ b/e2e-tests/experiments/functional/csi-volume-resize/test.yml @@ -0,0 +1,172 @@ +--- +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + - block: + + ## Generating the testname for csi volume resize test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - block: + + - name: Check if the pvc {{ app_pvc }} is bound + shell: > + kubectl get pvc {{ app_pvc }} -n {{ app_ns }} --no-headers + -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: pvc_status + failed_when: "'Bound' not in pvc_status.stdout" + + - name: Get the storage class name used for provisioning {{ app_pvc }} pvc + shell: > + kubectl get pvc {{ app_pvc }} -n {{ app_ns }} --no-headers + -o custom-columns=:.spec.storageClassName + args: + executable: /bin/bash + register: storage_class + + - name: Get the present capacity size of pvc {{ app_pvc }} + shell: > + kubectl get pvc {{ app_pvc }} -n {{ app_ns }} --no-headers + -o custom-columns=:.status.capacity.storage + args: + executable: /bin/bash + register: vol_size + + # This test will work with one pod at a time retrieved by app_label in app_namespace + # If there are multiple pods with same label within one namespace, it takes one + # pod randomly. (for e.g. shared mount pods) + - name: Get the application pod name which is consuming {{ app_pvc }} pvc + shell: > + kubectl get pod -n {{ app_ns }} -l {{ app_label }} --no-headers + -o custom-columns=:.metadata.name | shuf -n1 + args: + executable: /bin/bash + register: app_pod + + - name: Obtain the mount path for the application + shell: > + kubectl get pod {{ app_pod.stdout }} -n {{ app_ns }} + -o custom-columns=:.spec.containers[].volumeMounts[].mountPath --no-headers + args: + executable: /bin/bash + register: mount + + - name: Fetch the value part from storage capacity + shell: echo "{{ vol_size.stdout }}" | grep -o -E '[0-9]+' + args: + executable: /bin/bash + register: value_str + + - name: Obtain the PVC spec + shell: > + kubectl get pvc {{ app_pvc }} -n {{ app_ns }} + --no-headers -o yaml > pvc.yml + args: + executable: /bin/bash + + - name: Update the desired capacity in PVC spec + replace: + path: pvc.yml + before: 'storageClassName: {{ storage_class.stdout }}' + regexp: "storage: {{ vol_size.stdout }}" + replace: "storage: {{ desired_vol_size }}" + + - name: Configure PVC with the new capacity + shell: kubectl apply -f pvc.yml + args: + executable: /bin/bash + register: result + failed_when: "result.rc != 0" + + - name: Check if the update PVC is bound + shell: > + kubectl get pvc {{ app_pvc }} -n {{ app_ns }} --no-headers + -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: pvc_status + failed_when: "'Bound' not in pvc_status.stdout" + + - name: Check if the storage capacity is updated in PVC + shell: > + kubectl get pvc {{ app_pvc }} -n {{ app_ns }} --no-headers + -o custom-columns=:status.capacity.storage + args: + executable: /bin/bash + register: capacity + until: "desired_vol_size in capacity.stdout" + delay: 3 + retries: 60 + + - name: Restart the application pod after resizing the volume + shell: kubectl delete pod {{ app_pod.stdout }} -n {{ app_ns }} + args: + executable: /bin/bash + register: app_pod_status + failed_when: app_pod_status.rc != 0 + + - name: Verify that application pod is deleted successfully. + shell: > + kubectl get pods -n {{ app_ns }} + args: + executable: /bin/bash + register: app_pod_list + until: '"{{ app_pod.stdout }}" not in app_pod_list.stdout' + delay: 3 + retries: 30 + + - name: Get the name of application pod after Restart + shell: > + kubectl get pod -n {{ app_ns }} -l {{ app_label }} --no-headers + -o custom-columns=:.metadata.name | shuf -n1 + args: + executable: /bin/bash + register: app_pod_name + + ## Here we will dump +1Gi data than to previous pvc size + - set_fact: + value_num: '{{ ( (value_str.stdout | int + 1 | int) * 262144) | int }}' + + - name: Dump some more dummy data in the application mount point for using resized volume + shell: > + kubectl exec -it "{{ app_pod_name.stdout }}" -n "{{ app_ns }}" + -- sh -c "cd {{ mount.stdout }} && dd if=/dev/urandom of=volume.txt bs=4k count={{ value_num }}" + args: + executable: /bin/bash + register: load + failed_when: "load.rc != 0" + + - name: Delete the test file from application mount point + shell: > + kubectl exec -it "{{ app_pod_name.stdout }}" -n "{{ app_ns }}" + -- sh -c "cd {{ mount.stdout }} && rm -f volume.txt" + args: + executable: /bin/bash + register: testfile + failed_when: "testfile.rc != 0" + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + + # RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/functional/csi-volume-resize/test_vars.yml b/e2e-tests/experiments/functional/csi-volume-resize/test_vars.yml new file mode 100644 index 0000000..29bf5b8 --- /dev/null +++ b/e2e-tests/experiments/functional/csi-volume-resize/test_vars.yml @@ -0,0 +1,14 @@ +--- +test_name: csi-volume-resize + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +app_label: "{{ lookup('env','APP_LABEL') }}" + +app_pvc: "{{ lookup('env','APP_PVC') }}" + +vol_size: "{{ lookup('env','OLD_PV_CAPACITY') }}" + +desired_vol_size: "{{ lookup('env','NEW_PV_CAPACITY') }}" + +storage_class: "{{ lookup('env','STORAGE_CLASS') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfs-controller-high-availability/README.md b/e2e-tests/experiments/functional/zfs-controller-high-availability/README.md new file mode 100644 index 0000000..86268b8 --- /dev/null +++ b/e2e-tests/experiments/functional/zfs-controller-high-availability/README.md @@ -0,0 +1,63 @@ +## About this experiment + +This functional experiment scale up the zfs-controller statefulset replicas to use it in high availability mode and then verify the zfs-localpv behaviour when one of the replicas go down. This experiment checks the initial number of replicas of zfs-controller statefulset and scale it by one if a free node is present which should be able to schedule the pods. Default value for zfs-controller statefulset replica is one. + +## Supported platforms: + +K8s : 1.18+ + +OS : Ubuntu, CentOS + +ZFS : 0.7, 0.8 + +## Entry-Criteria + +- k8s cluster should be in healthy state including all desired worker nodes in ready state. +- zfs-localpv driver should be deployed and zfs-controller and csi node-agent daemonset pods should be in running state. +- one spare schedulable node should be present in the cluster so that after scaling up the zfs-controller replica by one, new replica gets scheduled on that node. These replicas will follow the anti-affinity rules so that replica pods will be present on different nodes only. + +## Exit-Criteria + +- zfs-controller statefulset should be scaled up by one replica. +- All the replias should be in running state. +- zfs-localpv volumes should be healthy and data after scaling up controller should not be impacted. +- This experiment makes one of the zfs-controller statefulset replica to go down, as a result active/master replica of zfs-controller prior to the experiment will be changed to some other remaining replica after the experiment completes. This happens because of the lease mechanism, which is being used to decide which replica will be serving as master. At a time only one replica will be master. +- Volumes provisioning / deprovisioning should not be impacted if any one replica goes down. + +## Steps performed + +- Get the no of zfs-controller statefulset replica count. +- Scale down the controller replicas to zero, wait until controller pods gets terminated successfully and then try to provision a volume to use by busybox application. +- Due to zero active replicas of zfs-controller, pvc should remain in Pending state. +- If no. of schedulable nodes are greater or equal to the previous replica count + 1, then zfs-controller will be scaled up by +1 replica. Doing this will Bound the pvc and application pod will come in Running state. +- Now taint all the nodes with `NoSchedule` so that when we delete the master replica of zfs-controller it doesn't come back to running state and at that time lease should be given to some other replica and now that replica will work as master. +- Now deprovision the application. This time deprovision will be done by that master replica which is active at present. So here we validated that provision and deprovisioning was successully done by two different replica of zfs-controller. And remove the taints before exiting the test execution. And then we check running statue of all the replicas and csi-node agent pods. +- If no. of schedulable nodes are not present for scheduling updated no. of replicas then this test will fail at the task of scaling up replicas and then it will skip further tasks. Before exiting it will scale up the down replicas with same no of replica count which was present at starting of this experiment. Doing this will Bound the pvc and application pod will come in running state. This test execution will end after deleting that pvc and application pod. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of zfs-localpv controller high availability, clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then first apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. + +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er zfs-controller-high-availability -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er zfs-controller-high-availability -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfs-controller-high-availability/busybox_app.yml b/e2e-tests/experiments/functional/zfs-controller-high-availability/busybox_app.yml new file mode 100644 index 0000000..64c2d29 --- /dev/null +++ b/e2e-tests/experiments/functional/zfs-controller-high-availability/busybox_app.yml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app-busybox-ha + labels: + app: test_ha +spec: + selector: + matchLabels: + app: test_ha + template: + metadata: + labels: + app: test_ha + spec: + tolerations: + - key: "key" + operator: "Equal" + value: "value" + effect: "NoSchedule" + containers: + - name: app-busybox + imagePullPolicy: IfNotPresent + image: gcr.io/google-containers/busybox + command: ["/bin/sh"] + args: ["-c", "while true; do sleep 10;done"] + env: + volumeMounts: + - name: data-vol + mountPath: /busybox + volumes: + - name: data-vol + persistentVolumeClaim: + claimName: pvcha +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvcha +spec: + storageClassName: zfspv-sc + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfs-controller-high-availability/run_e2e_test.yml b/e2e-tests/experiments/functional/zfs-controller-high-availability/run_e2e_test.yml new file mode 100644 index 0000000..127a17d --- /dev/null +++ b/e2e-tests/experiments/functional/zfs-controller-high-availability/run_e2e_test.yml @@ -0,0 +1,30 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generateName: zfs-controller-high-availability- + namespace: e2e +spec: + template: + metadata: + labels: + test: zfs-controller-high-availability + spec: + serviceAccountName: e2e + restartPolicy: Never + + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + # This is the namespace where the zfs driver created all its resources including zvol. + # By default it is in openebs namespace. If we changed it at the time of zfs-driver provisioning + # give that namespace name here for the value of this env. + - name: ZFS_OPERATOR_NAMESPACE + value: 'openebs' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zfs-controller-high-availability/test.yml -i /etc/ansible/hosts -vv; exit 0"] \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfs-controller-high-availability/test.yml b/e2e-tests/experiments/functional/zfs-controller-high-availability/test.yml new file mode 100644 index 0000000..dbd111b --- /dev/null +++ b/e2e-tests/experiments/functional/zfs-controller-high-availability/test.yml @@ -0,0 +1,315 @@ +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + - block: + + ## Generating the testname for zfs localpv controller high-availability test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - name: Get the no of replicas in zfs-controller statefulset + shell: > + kubectl get sts openebs-zfs-controller -n kube-system -o jsonpath='{.status.replicas}' + args: + executable: /bin/bash + register: controller_rep_count + + - name: Record the replica count of zfs-controller + set_fact: + zfs_ctrl_replicas: "{{ controller_rep_count.stdout }}" + + - name: Get the list of names of all the nodes in cluster + shell: > + kubectl get nodes --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: node_list + + - name: Get the count of the schedulable nodes, which don't have `NoSchedule` taints + shell: > + kubectl get nodes --no-headers -o custom-columns=:.spec.taints + | grep -v NoSchedule | wc -l + args: + executable: /bin/bash + register: schedulable_nodes_count + + - name: Record the number of schedulable nodes in cluster + set_fact: + no_of_schedulable_nodes: "{{ schedulable_nodes_count.stdout }}" + + - name: scale down the replicas to zero of zfs-controller statefulset + shell: > + kubectl scale sts openebs-zfs-controller -n kube-system --replicas=0 + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: check that zfs-controller pods has been terminated successfully + shell: > + kubectl get pods -n kube-system -l app=openebs-zfs-controller + args: + executable: /bin/bash + register: ctrl_pods + until: "'No resources found' in ctrl_pods.stderr" + delay: 3 + retries: 40 + + - name: Provision a test volume when zfs-controller is not active + shell: > + kubectl apply -f busybox_app.yml + args: + executable: /bin/bash + + - name: check the pvc status, it should be in pending state + shell: > + kubectl get pvc pvcha -n e2e -o jsonpath='{.status.phase}' + args: + executable: /bin/bash + register: pvc_status + failed_when: "'Pending' not in pvc_status.stdout" + + - name: Manual wait for 15 seconds, pvc should not get bound in this time + shell: sleep 15 + + - name: again check the pvc status + shell: > + kubectl get pvc pvcha -n e2e -o jsonpath='{.status.phase}' + args: + executable: /bin/bash + register: pvc_status + failed_when: "'Pending' not in pvc_status.stdout" + + - block: + + - name: scale up the zfs-controller statefulset with +1 no of replica count + shell: > + kubectl scale sts openebs-zfs-controller -n kube-system + --replicas="{{ zfs_ctrl_replicas|int + 1 }}" + args: + executable: /bin/bash + + - name: check that zfs-controller statefulset replicas are up and running + shell: > + kubectl get pods -n kube-system -l app=openebs-zfs-controller --no-headers + -o custom-columns=:.status.phase | grep Running | wc -l + args: + executable: /bin/bash + register: ready_replicas + until: "{{ ready_replicas.stdout|int }} == {{ zfs_ctrl_replicas|int + 1 }}" + delay: 3 + retries: 50 + + - name: check the pvc status after zfs controller is up and running + shell: > + kubectl get pvc pvcha -n e2e -o jsonpath='{.status.phase}' + args: + executable: /bin/bash + register: pvc_status + until: "'Bound' in pvc_status.stdout" + delay: 3 + retries: 40 + + - name: Get the application pod name + shell: > + kubectl get pods -n e2e -o jsonpath='{.items[?(@.metadata.labels.app=="test_ha")].metadata.name}' + args: + executable: /bin/bash + register: app_pod_name + + - name: Check if the application pod is in running state. + shell: > + kubectl get pods -n e2e -o jsonpath='{.items[?(@.metadata.labels.app=="test_ha")].status.phase}' + register: pod_status + until: "'Running' in pod_status.stdout" + delay: 3 + retries: 40 + + - name: Get the name of the controller pod replica which is active as master at present + shell: > + kubectl get lease zfs-csi-openebs-io -n kube-system -o jsonpath='{.spec.holderIdentity}' + args: + executable: /bin/bash + register: master_replica + + - name: Taint all nodes with `NoSchedule` to keep replica {{ master_replica.stdout }} out of action + shell: > + kubectl taint node {{ item }} key=value:NoSchedule + args: + executable: /bin/bash + register: taint_status + until: "'tainted' in taint_status.stdout " + with_items: "{{ node_list.stdout_lines }}" + + - name: Delete the {{ master_replica.stdout }} replica pod + shell: > + kubectl delete pod {{ master_replica.stdout }} -n kube-system + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Get the new replica name which is in action as master for zfs-controller + shell: > + kubectl get lease zfs-csi-openebs-io -n kube-system -o jsonpath='{.spec.holderIdentity}' + args: + executable: /bin/bash + register: new_master_replica + retries: 40 + delay: 3 + until: master_replica.stdout != new_master_replica.stdout + + - name: Get the zfs-volume name from the pvc name + shell: > + kubectl get pvc pvcha -n e2e -o jsonpath='{.spec.volumeName}' + args: + executable: /bin/bash + register: zfsvol_name + + - name: Deprovision the application + shell: > + kubectl delete -f busybox_app.yml + args: + executable: /bin/bash + + - name: Verify that application pods have been deleted successfully + shell: > + kubectl get pods -n e2e + args: + executable: /bin/bash + register: app_pod + until: "'{{ app_pod_name.stdout }}' not in app_pod.stdout" + delay: 3 + retries: 40 + + - name: verify that pvc has been deleted successfully + shell: > + kubectl get pvc -n e2e + args: + executable: /bin/bash + register: pvc_status + until: "'pvcha' not in pvc_status.stdout" + delay: 3 + retries: 40 + + - name: verify that zfsvol has been deleted successfully + shell: > + kubectl get zv -n {{ zfs_operator_ns }} + args: + executable: /bin/bash + register: zfsvol_status + until: "zfsvol_name.stdout not in zfsvol_status.stdout" + delay: 3 + retries: 40 + + when: "{{ zfs_ctrl_replicas|int + 1 }} <= {{no_of_schedulable_nodes|int}}" + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + + - name: Remove the taint from the nodes + shell: > + kubectl taint node {{ item }} key- + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + with_items: "{{ node_list.stdout_lines }}" + ignore_errors: true + + - block: + + - name: Scale up the zfs-controller with same no of replica count + shell: > + kubectl scale sts openebs-zfs-controller -n kube-system --replicas={{ zfs_ctrl_replicas }} + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Verify that the zfs-controller pod and zfs-node daemonset pods are running + shell: > + kubectl get pods -n kube-system -l role=openebs-zfs + --no-headers -o custom-columns=:status.phase | sort | uniq + args: + executable: /bin/bash + register: zfs_driver_components + until: "zfs_driver_components.stdout == 'Running'" + delay: 3 + retries: 50 + + - name: Get the zfs-volume name from the pvc name + shell: > + kubectl get pvc pvcha -n e2e -o jsonpath='{.spec.volumeName}' + args: + executable: /bin/bash + register: zfsvol_name + + - name: Deprovision the application + shell: > + kubectl delete -f busybox_app.yml + args: + executable: /bin/bash + + - name: Verify that application pods have been deleted successfully + shell: > + kubectl get pods -n e2e -l app=test_ha + args: + executable: /bin/bash + register: app_pod + until: "'No resources found' in app_pod.stderr" + delay: 3 + retries: 40 + + - name: verify that pvc has been deleted successfully + shell: > + kubectl get pvc -n e2e + args: + executable: /bin/bash + register: pvc_status + until: "'pvcha' not in pvc_status.stdout" + delay: 3 + retries: 40 + + - name: verify that zfsvol has been deleted successfully + shell: > + kubectl get zv -n {{ zfs_operator_ns }} + args: + executable: /bin/bash + register: zfsvol_status + until: "zfsvol_name.stdout not in zfsvol_status.stdout" + delay: 3 + retries: 40 + + when: "{{ zfs_ctrl_replicas|int + 1 }} > {{no_of_schedulable_nodes|int}}" + + - name: Verify that the zfs-controller pod and zfs-node daemonset pods are running + shell: > + kubectl get pods -n kube-system -l role=openebs-zfs + --no-headers -o custom-columns=:status.phase | sort | uniq + args: + executable: /bin/bash + register: zfs_driver_components + until: "zfs_driver_components.stdout == 'Running'" + delay: 3 + retries: 50 + + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfs-controller-high-availability/test_vars.yml b/e2e-tests/experiments/functional/zfs-controller-high-availability/test_vars.yml new file mode 100644 index 0000000..bb607da --- /dev/null +++ b/e2e-tests/experiments/functional/zfs-controller-high-availability/test_vars.yml @@ -0,0 +1,3 @@ +test_name: zfs-controller-high-availability + +zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/README.md b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/README.md new file mode 100644 index 0000000..5d89a2d --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/README.md @@ -0,0 +1,62 @@ +## About this experiment + +This experiment creates the clone directly from the volume as datasource and use that cloned volume for some application. This experiment verifies that clone volume should have the same data for which snaphsot was taken and this data should be easily accessible from some new application when this clone volume is mounted on it. + +## Supported platforms: +K8s : 1.18+ + +OS : Ubuntu, CentOS + +ZFS : 0.7, 0.8 + +## Entry-Criteria + +- K8s cluster should be in healthy state including all desired nodes in ready state. +- zfs-controller and node-agent daemonset pods should be in running state. +- Application should be deployed successfully consuming zfs-localpv storage. +- size for the clone-pvc should be equal to the original pvc. + +## Steps performed + +This experiment consist of provisioning and deprovisioning of zfspv-clone but performs one task at a time based on ACTION env value < provision or deprovision >. + +Provision: + +- Create the clone by applying the pvc yaml with parent pvc name in the datasource. +- Verify that clone-pvc gets bound. +- Deploy new application and verifies that clone volume gets successully mounted on application. +- Verify the data consistency that it should contain the same data as of volume snapshot. + +Deprovision: + +- Delete the application which is using the cloned volume. +- Verify that clone pvc is deleted successfully. +- Verify that zvolume is deleted successfully. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of zfs-localpv clone directly form pvc, first clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. + +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er zfspv-clone-from-pvc -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er zfspv-clone-from-pvc -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/busybox.j2 b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/busybox.j2 new file mode 100644 index 0000000..933e7ac --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/busybox.j2 @@ -0,0 +1,30 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ app_name }}-clone + namespace: "{{ app_ns }}" + labels: + app: clone-app-from-pvc +spec: + selector: + matchLabels: + app: clone-app-from-pvc + template: + metadata: + labels: + app: clone-app-from-pvc + spec: + containers: + - name: app-busybox + imagePullPolicy: IfNotPresent + image: gcr.io/google-containers/busybox + command: ["/bin/sh"] + args: ["-c", "while true; do sleep 10;done"] + env: + volumeMounts: + - name: data-vol + mountPath: /busybox + volumes: + - name: data-vol + persistentVolumeClaim: + claimName: "{{ clone_pvc_name }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/clone_pvc.j2 b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/clone_pvc.j2 new file mode 100644 index 0000000..8bbbe2d --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/clone_pvc.j2 @@ -0,0 +1,15 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ clone_pvc_name }} + namespace: {{ app_ns }} +spec: + storageClassName: {{ storage_class }} + dataSource: + name: {{ parent_pvc_name }} + kind: PersistentVolumeClaim + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ clone_pvc_size }} ## clone PVC size should match the size of the snapshot \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/percona.j2 b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/percona.j2 new file mode 100644 index 0000000..acd9419 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/percona.j2 @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ app_name }}-clone + namespace: {{ app_ns }} + labels: + app: clone-app-from-pvc +spec: + replicas: 1 + selector: + matchLabels: + app: clone-app-from-pvc + template: + metadata: + labels: + app: clone-app-from-pvc + spec: + containers: + - resources: + limits: + cpu: 0.5 + name: percona + image: openebs/tests-custom-percona:latest + imagePullPolicy: IfNotPresent + args: + - "--ignore-db-dir" + - "lost+found" + env: + - name: MYSQL_ROOT_PASSWORD + value: k8sDem0 + ports: + - containerPort: 3306 + name: percona + volumeMounts: + - mountPath: /var/lib/mysql + name: data-vol + # + livenessProbe: + exec: + command: ["bash", "sql-test.sh"] + initialDelaySeconds: 60 + periodSeconds: 1 + timeoutSeconds: 10 + # + volumes: + - name: data-vol + persistentVolumeClaim: + claimName: {{ clone_pvc_name }} \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/run_e2e_test.yml b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/run_e2e_test.yml new file mode 100644 index 0000000..a46f9ab --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/run_e2e_test.yml @@ -0,0 +1,65 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: zfspv-clone-from-pvc + namespace: e2e +data: + parameters.yml: | + +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: zfspv-clone-from-pvc- + namespace: e2e +spec: + template: + metadata: + labels: + test: zfspv-clone-from-pvc + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + - name: APP_NAMESPACE ## Namespace in which application is deployed + value: '' + + - name: APP_LABEL ## Parent application label + value: '' ## Give value in format (key=value) + + - name: ZFS_OPERATOR_NAMESPACE ## Namespace in which all the resources created by zfs driver will be present + value: '' ## for e.g. zfsvolume (zv) will be in this namespace + + - name: PARENT_PVC_NAME ## Give value of parent pvc name which is using by the application + value: '' + + - name: CLONE_PVC_NAME ## Cloned pvc will be created by this name in the same namespace where spapshot is present + value: '' + + - name: APP_NAME ## Provide the application name which will be deployed using cloned PVC + value: '' ## Supported values are: `busybox` and `percona` + + - name: ACTION ## Use 'deprovision' for clone cleanup + value: 'provision' + + - name: DATA_PERSISTENCE ## Give values according to the application + value: "" ## For `Busybox` : `busybox` & For `Percona` : `mysql` + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/test.yml -i /etc/ansible/hosts -vv; exit 0"] + + volumeMounts: + - name: parameters + mountPath: /mnt/ + volumes: + - name: parameters + configMap: + name: zfspv-clone-from-pvc \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/test.yml b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/test.yml new file mode 100644 index 0000000..9b62a7a --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/test.yml @@ -0,0 +1,283 @@ +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + - /mnt/parameters.yml + + tasks: + - block: + + ## Generating the testname for zfspc clone directly from pvc test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - block: + + - name: Get the application pod name + shell: > + kubectl get pod -n {{ app_ns }} -l {{ app_label }} + --no-headers -o custom-columns=:.metadata.name | shuf -n1 + args: + executable: /bin/bash + register: app_pod_name + + - name: Check if the application pod is in running state + shell: > + kubectl get pod {{ app_pod_name.stdout }} -n {{ app_ns }} + --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: app_pod_status + failed_when: "'Running' not in app_pod_status.stdout" + + - name: Get the capacity size of parent pvc {{ parent_pvc_name }} + shell: > + kubectl get pvc {{ parent_pvc_name }} -n {{ app_ns }} + --no-headers -o custom-columns=:.status.capacity.storage + args: + executable: /bin/bash + register: parent_pvc_size + + ## clone pvc size should be same as parent pvc size + - name: Record clone pvc size + set_fact: + clone_pvc_size: "{{ parent_pvc_size.stdout }}" + + - name: Get the storage class name used for provisioning {{ parent_pvc_name }} pvc + shell: > + kubectl get pvc {{ parent_pvc_name }} -n {{ app_ns }} --no-headers + -o custom-columns=:.spec.storageClassName + args: + executable: /bin/bash + register: stg_class + + - name: Record the storage class name + set_fact: + storage_class: "{{ stg_class.stdout }}" + + - block: + - name: Create some test data into the application + include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml" + vars: + status: 'LOAD' + ns: "{{ app_ns }}" + pod_name: "{{ app_pod_name.stdout }}" + when: data_persistence == 'busybox' + + - block: + - name: Create some test data into the application + include_tasks: "/e2e-tests/utils/applications/mysql/mysql_data_persistence.yml" + vars: + status: 'LOAD' + ns: "{{ app_ns }}" + pod_name: "{{ app_pod_name.stdout }}" + when: data_persistence == 'mysql' + + - name: Update the clone_pvc template with the test specific values to create clone + template: + src: clone_pvc.j2 + dest: clone_pvc.yml + + - name: Create the clone + shell: > + kubectl create -f clone_pvc.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - block: + - name: Update the {{ app_name }} deployment yaml with test specific values + template: + src: busybox.j2 + dest: busybox.yml + + - name: Deploy the {{ app_name }} application using cloned PVC + shell: > + kubectl create -f busybox.yml + args: + executable: /bin/bash + when: app_name == "busybox" + + - block: + - name: Update the {{ app_name }} deployment yaml with test specific values + template: + src: percona.j2 + dest: percona.yml + + - name: Deploy the {{ app_name }} application using cloned PVC + shell: > + kubectl create -f percona.yml + args: + executable: /bin/bash + when: app_name == "percona" + + - name: Check if the cloned PVC is bound + shell: > + kubectl get pvc {{ clone_pvc_name }} -n {{ app_ns }} + --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: clone_pvc_status + until: "'Bound' in clone_pvc_status.stdout" + delay: 3 + retries: 50 + + - name: Get {{ app_name }} application pod name which is using clone pvc + shell: > + kubectl get pods -n {{ app_ns }} -l app=clone-app-from-pvc --no-headers + -o=custom-columns=NAME:".metadata.name" + args: + executable: /bin/bash + register: pod_name + + - name: Record the {{ app_name }} application pod name + set_fact: + clone_pod_name: "{{ pod_name.stdout }}" + + - name: Checking {{ app_name }} application pod is in running state + shell: > + kubectl get pods {{clone_pod_name}} -n {{ app_ns }} + -o jsonpath='{.status.phase}' + register: pod_status + until: "'Running' in pod_status.stdout" + delay: 3 + retries: 50 + + - name: Get the container status of {{ app_name }} application pod + shell: > + kubectl get pods {{ clone_pod_name }} -n {{ app_ns }} + -o jsonpath='{.status.containerStatuses[].state}' | grep running + args: + executable: /bin/bash + register: containerStatus + until: "'running' in containerStatus.stdout" + delay: 2 + retries: 50 + + - block: + - name: Verify the data persistency + include_tasks: "/e2e-tests/utils/applications/mysql/mysql_data_persistence.yml" + vars: + status: 'VERIFY' + ns: "{{ app_ns }}" + label: app=clone-app-from-pvc + pod_name: "{{ clone_pod_name }}" + when: data_persistence == 'mysql' + + - block: + - name: Verify the data persistency + include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml" + vars: + status: 'VERIFY' + ns: "{{ app_ns }}" + label: app=clone-app-from-pvc + pod_name: "{{ clone_pod_name }}" + when: data_persistence == 'busybox' + + when: lookup('env','ACTION') == 'provision' + + - block: + - name: Get the ZV name for the cloned PVC + shell: > + kubectl get pvc {{ clone_pvc_name }} -n {{ app_ns }} -o jsonpath='{.spec.volumeName}' + args: + executable: /bin/bash + register: zv_name + + - name: Get {{ app_name }} application pod name which is using cloned pvc + shell: > + kubectl get pods -n {{ app_ns }} -l app=clone-app-from-pvc --no-headers + -o=custom-columns=NAME:".metadata.name" + args: + executable: /bin/bash + register: clone_pod_name + + - block: + - name: Update the {{ app_name }} deployment yaml with test specific values + template: + src: busybox.j2 + dest: busybox.yml + + - name: delete the {{ app_name }} application which is using cloned pvc + shell: > + kubectl delete -f busybox.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + when: app_name == 'busybox' + + - block: + - name: Update the {{ app_name }} deployment yaml with test specific values + template: + src: percona.j2 + dest: percona.yml + + - name: delete the {{ app_name }} application which is using cloned pvc + shell: > + kubectl delete -f percona.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + when: app_name == 'percona' + + - name: Check if the {{ app_name }} application pod which is using cloned pvc is deleted successfully + shell: > + kubectl get pods -n {{ app_ns }} + args: + executable: /bin/bash + register: app_status + until: "clone_pod_name.stdout not in app_status.stdout" + delay: 3 + retries: 50 + + - name: Delete the cloned pvc + shell: > + kubectl delete pvc {{ clone_pvc_name }} -n {{ app_ns }} + args: + executable: /bin/bash + register: clone_pvc_status + failed_when: "clone_pvc_status.rc != 0" + + - name: Check if the cloned pvc is deleted + shell: > + kubectl get pvc -n {{ app_ns }} + args: + executable: /bin/bash + register: clone_pvc_status + until: "clone_pvc_name not in clone_pvc_status.stdout" + delay: 3 + retries: 50 + + - name: Check if the ZV for cloned pvc is deleted + shell: > + kubectl get zv -n {{ zfs_operator_ns }} + args: + executable: /bin/bash + register: zv_status + until: "zv_name.stdout not in zv_status.stdout" + delay: 3 + retries: 30 + + when: lookup('env','ACTION') == 'deprovision' + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/test_vars.yml b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/test_vars.yml new file mode 100644 index 0000000..ba27047 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone-directly-from-pvc/test_vars.yml @@ -0,0 +1,17 @@ +test_name: zfspv-clone-from-pvc + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +parent_pvc_name: "{{ lookup('env', 'PARENT_PVC_NAME') }}" + +clone_pvc_name: "{{ lookup('env','CLONE_PVC_NAME') }}" + +app_name: "{{ lookup('env','APP_NAME') }}" + +app_label: "{{ lookup('env','APP_LABEL') }}" + +action: "{{ lookup('env','ACTION') }}" + +zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}" + +data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone/README.md b/e2e-tests/experiments/functional/zfspv-clone/README.md new file mode 100644 index 0000000..63d5b1e --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone/README.md @@ -0,0 +1,66 @@ +## About this experiment + +This experiment creates the clone from the volume snapshot created before and use that cloned volume for some application. This experiment verifies that clone volume should be provisioned on the same node where original volume was provisioned. Apart from this, it verifies that clone volume should have the same data for which snaphsot was taken and this data should be easily accessible from some new application when this clone volume is mounted on it. + +## Supported platforms: + +K8s : 1.18+ + +OS : Ubuntu, CentOS + +ZFS : 0.7, 0.8 + +Please note that for kubernetes version less than 1.17, `VolumeSnapshotDataSource` feature gate needs to be enabled at kubelet and kube-apiserver. + +## Entry-Criteria + +- K8s cluster should be in healthy state including all desired nodes in ready state. +- zfs-controller and node-agent daemonset pods should be in running state. +- volume snapshot should be present and should be in ready to use state. +- volume snapshot should be in the same namespace where application and pvc are present. +- size for the clone-pvc should be equal to the original pvc. + +## Steps performed + +This experiment consist of provisioning and deprovisioing of zfspv-clone but performs one task at a time based on ACTION env value < provision or deprovision >. + +Provision: + +- Check if the volume snapshot is present. +- Create the clone by applying the pvc yaml with snapshot name in the datasource. +- Verify that pvc gets bound. +- Deploy new application and verifies that clone volume gets successully mounted on application. +- Verify the data consistency that it should contain the same data as of volume snapshot. + +Deprovision: + +- Delete the application which is using the cloned volume. +- Verify that clone pvc is deleted successfully. +- Verify that zvolume is deleted successfully. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of zfs-localpv clone creation, first clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er zfspv-clone -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er zfspv-clone -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone/busybox.j2 b/e2e-tests/experiments/functional/zfspv-clone/busybox.j2 new file mode 100644 index 0000000..9deb72c --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone/busybox.j2 @@ -0,0 +1,30 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ app_name }}-clone + namespace: "{{ app_ns }}" + labels: + name: clone-app +spec: + selector: + matchLabels: + name: clone-app + template: + metadata: + labels: + name: clone-app + spec: + containers: + - name: app-busybox + imagePullPolicy: IfNotPresent + image: gcr.io/google-containers/busybox + command: ["/bin/sh"] + args: ["-c", "while true; do sleep 10;done"] + env: + volumeMounts: + - name: data-vol + mountPath: /busybox + volumes: + - name: data-vol + persistentVolumeClaim: + claimName: "{{ clone_pvc_name }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone/clone_pvc.j2 b/e2e-tests/experiments/functional/zfspv-clone/clone_pvc.j2 new file mode 100644 index 0000000..1797707 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone/clone_pvc.j2 @@ -0,0 +1,16 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ clone_pvc_name }} + namespace: {{ app_ns }} +spec: + storageClassName: {{ storage_class }} + dataSource: + name: {{ snapshot_name }} + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ clone_pvc_size }} ## clone PVC size should match the size of the snapshot \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone/percona.j2 b/e2e-tests/experiments/functional/zfspv-clone/percona.j2 new file mode 100644 index 0000000..8c18194 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone/percona.j2 @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ app_name }}-clone + namespace: {{ app_ns }} + labels: + name: clone-app +spec: + replicas: 1 + selector: + matchLabels: + name: clone-app + template: + metadata: + labels: + name: clone-app + spec: + containers: + - resources: + limits: + cpu: 0.5 + name: percona + image: openebs/tests-custom-percona:latest + imagePullPolicy: IfNotPresent + args: + - "--ignore-db-dir" + - "lost+found" + env: + - name: MYSQL_ROOT_PASSWORD + value: k8sDem0 + ports: + - containerPort: 3306 + name: percona + volumeMounts: + - mountPath: /var/lib/mysql + name: data-vol + # + livenessProbe: + exec: + command: ["bash", "sql-test.sh"] + initialDelaySeconds: 60 + periodSeconds: 1 + timeoutSeconds: 10 + # + volumes: + - name: data-vol + persistentVolumeClaim: + claimName: {{ clone_pvc_name }} \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone/run_e2e_test.yml b/e2e-tests/experiments/functional/zfspv-clone/run_e2e_test.yml new file mode 100644 index 0000000..2f4c9e3 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone/run_e2e_test.yml @@ -0,0 +1,56 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: zfspv-clone- + namespace: e2e +spec: + template: + metadata: + labels: + test: zfspv-clone + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + - name: APP_NAMESPACE ## Namespace in which application is deployed + value: '' + + - name: ZFS_OPERATOR_NAMESPACE ## Namespace in which all the resources created by zfs driver will be present + value: '' ## for e.g. zfsvolume (zv) will be in this namespace + + - name: STORAGE_CLASS ## Storage class name by which original volume was provisioned + value: '' + + - name: SNAPSHOT_NAME ## Snapshot name from which clone has to be created + value: '' + + - name: CLONE_PVC_NAME ## Cloned pvc will be created by this name in the same namespace where spapshot is present + value: '' + + - name: APP_NAME ## Provide the application name which will be deployed using cloned PVC + value: '' ## Supported values are: `busybox` and `percona` + + - name: ACTION ## Use 'deprovision' for clone cleanup + value: 'provision' + + - name: DATA_PERSISTENCE ## Give values according to the application + value: "" ## For `Busybox` : `busybox` & For `Percona` : `mysql` + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zfspv-clone/test.yml -i /etc/ansible/hosts -vv; exit 0"] + + volumeMounts: + - name: parameters + mountPath: /mnt/ + volumes: + - name: parameters + configMap: + name: zfspv-snapshot-clone \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone/test.yml b/e2e-tests/experiments/functional/zfspv-clone/test.yml new file mode 100644 index 0000000..78c1886 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone/test.yml @@ -0,0 +1,251 @@ +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + - /mnt/parameters.yml + + tasks: + - block: + + ## Generating the testname for zfspv clone test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - block: + - name: Check if the snapshot {{ snapshot_name }} is present to create clone + shell: > + kubectl get volumesnapshot.snapshot -n {{ app_ns }} + args: + executable: /bin/bash + register: snapshot_status + failed_when: snapshot_name not in snapshot_status.stdout + + - name: Get the source pvc name of snapshot {{ snapshot_name }} + shell: > + kubectl get volumesnapshot.snapshot {{ snapshot_name }} -n {{ app_ns }} + --no-headers -o custom-columns=:spec.source.persistentVolumeClaimName + args: + executable: /bin/bash + register: source_pvc + + # clone pvc size should be same as of source pvc + - name: Get the capacity size of source pvc + shell: > + kubectl get pvc {{ source_pvc.stdout }} -n {{ app_ns }} + --no-headers -o custom-columns=:.status.capacity.storage + args: + executable: /bin/bash + register: source_pvc_size + + - name: Record clone pvc size same as of source pvc size + set_fact: + clone_pvc_size: "{{ source_pvc_size.stdout }}" + + - name: Update the clone_pvc template with the test specific values to create clone + template: + src: clone_pvc.j2 + dest: clone_pvc.yml + + - name: Create the clone + shell: > + kubectl create -f clone_pvc.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - block: + - name: Update the {{ app_name }} deployment yaml with test specific values + template: + src: busybox.j2 + dest: busybox.yml + + - name: Deploy the {{ app_name }} application using cloned PVC + shell: > + kubectl create -f busybox.yml + args: + executable: /bin/bash + when: app_name == "busybox" + + - block: + - name: Update the {{ app_name }} deployment yaml with test specific values + template: + src: percona.j2 + dest: percona.yml + + - name: Deploy the {{ app_name }} application using cloned PVC + shell: > + kubectl create -f percona.yml + args: + executable: /bin/bash + when: app_name == "percona" + + - name: Check if the clone PVC is bound + shell: > + kubectl get pvc {{ clone_pvc_name }} -n {{ app_ns }} + --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: clone_pvc_status + until: "'Bound' in clone_pvc_status.stdout" + delay: 5 + retries: 30 + + - name: Get clone {{ app_name }} application pod name + shell: > + kubectl get pods -n {{ app_ns }} -l name=clone-app --no-headers + -o=custom-columns=NAME:".metadata.name" + args: + executable: /bin/bash + register: pod_name + + - name: Record the clone {{ app_name }} application pod name + set_fact: + app_pod_name: "{{ pod_name.stdout }}" + + - name: Checking clone {{ app_name }} application pod is in running state + shell: > + kubectl get pods {{app_pod_name}} -n {{ app_ns }} + -o jsonpath='{.status.phase}' + register: pod_status + until: "'Running' in pod_status.stdout" + delay: 5 + retries: 50 + + - name: Get the container status of clone {{ app_name }} application pod + shell: > + kubectl get pods {{ app_pod_name }} -n {{ app_ns }} + -o jsonpath='{.status.containerStatuses[].state}' | grep running + args: + executable: /bin/bash + register: containerStatus + until: "'running' in containerStatus.stdout" + delay: 2 + retries: 50 + + - block: + - name: Verify the data persistency + include_tasks: "/e2e-tests/utils/applications/mysql/mysql_data_persistence.yml" + vars: + status: 'VERIFY' + ns: "{{ app_ns }}" + label: name=clone-app + pod_name: "{{ app_pod_name }}" + when: data_persistence == 'mysql' + + - block: + - name: Verify the data persistency + include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml" + vars: + status: 'VERIFY' + ns: "{{ app_ns }}" + label: name=clone-app + pod_name: "{{ app_pod_name }}" + when: data_persistence == 'busybox' + + when: lookup('env','ACTION') == 'provision' + + - block: + - name: Get the ZV name for the cloned PVC + shell: > + kubectl get pvc {{ clone_pvc_name }} -n {{ app_ns }} -o jsonpath='{.spec.volumeName}' + args: + executable: /bin/bash + register: zv_name + + - name: Get {{ app_name }} application pod name which is using cloned pvc + shell: > + kubectl get pods -n {{ app_ns }} -l name=clone-app --no-headers + -o=custom-columns=NAME:".metadata.name" + args: + executable: /bin/bash + register: pod_name + + - block: + - name: Update the {{ app_name }} deployment yaml with test specific values + template: + src: busybox.j2 + dest: busybox.yml + + - name: delete the {{ app_name }} application which is using cloned pvc + shell: > + kubectl delete -f busybox.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + when: app_name == 'busybox' + + - block: + - name: Update the {{ app_name }} deployment yaml with test specific values + template: + src: percona.j2 + dest: percona.yml + + - name: delete the {{ app_name }} application which is using cloned pvc + shell: > + kubectl delete -f percona.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + when: app_name == 'percona' + + - name: Check if the {{ app_name }} application pod which is using cloned pvc is deleted successfully + shell: > + kubectl get pods -n {{ app_ns }} + args: + executable: /bin/bash + register: app_status + until: "pod_name.stdout not in app_status.stdout" + delay: 15 + retries: 30 + + - name: Delete the cloned pvc + shell: > + kubectl delete pvc {{ clone_pvc_name }} -n {{ app_ns }} + args: + executable: /bin/bash + register: pvc_status + failed_when: "pvc_status.rc != 0" + + - name: Check if the cloned pvc is deleted + shell: > + kubectl get pvc -n {{ app_ns }} + args: + executable: /bin/bash + register: clone_pvc_status + until: "clone_pvc_name not in clone_pvc_status.stdout" + delay: 5 + retries: 30 + + - name: Check if the ZV for cloned pvc is deleted + shell: > + kubectl get zv -n {{ zfs_operator_ns }} + args: + executable: /bin/bash + register: zv_status + until: "zv_name.stdout not in zv_status.stdout" + delay: 5 + retries: 15 + + when: lookup('env','ACTION') == 'deprovision' + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-clone/test_vars.yml b/e2e-tests/experiments/functional/zfspv-clone/test_vars.yml new file mode 100644 index 0000000..0557023 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-clone/test_vars.yml @@ -0,0 +1,17 @@ +test_name: zfspv-clone + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +storage_class: "{{ lookup('env','STORAGE_CLASS') }}" + +snapshot_name: "{{ lookup('env','SNAPSHOT_NAME') }}" + +clone_pvc_name: "{{ lookup('env','CLONE_PVC_NAME') }}" + +app_name: "{{ lookup('env','APP_NAME') }}" + +action: "{{ lookup('env','ACTION') }}" + +zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}" + +data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-custom-topology/README.md b/e2e-tests/experiments/functional/zfspv-custom-topology/README.md new file mode 100644 index 0000000..b04ffff --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-custom-topology/README.md @@ -0,0 +1,59 @@ +## About the experiment + +- After zfs-driver:v0.7.x user can label the nodes with the required topology, the zfs-localpv driver will support all the node labels as topology keys. This experiment verifies this custom-topology support for zfs-localpv. Volume should be provisioned on only such nodes which have been labeled with the keys set via the storage-class. +- In this experiment we cover two scenarios such as one with immediate volume binding and other with late binding (i.e. WaitForFirstConsumer). If we add a label to node after zfs-localpv driver deployment and using late binding mode, then a restart of all the node agents are required so that the driver can pick the labels and add them as supported topology key. Restart is not required in case of immediate volumebinding irrespective of if we add labels after zfs-driver deployment or before. + +## Supported platforms: +K8s : 1.18+ + +OS : Ubuntu, CentOS + +ZFS : 0.7, 0.8 + +ZFS-LocalPV version: 0.7+ + +## Entry-Criteria + +- K8s cluster should be in healthy state including all desired nodes in ready state. +- zfs-controller and node-agent daemonset pods should be in running state. + +## Steps performed + +- select any of the two nodes randomly from the k8s cluster and label them with some key. +- deploy five applications using the pvc, provisioned by storage class in which volume binding mode is immediate. +- verify that pvc is bound and application pod is in running state. +- verify that volume is provisioned on only those nodes which was labeled prior to the provisioning. +- after that deploy five more applications, using the pvc provisioned by storage class in which volume binding mode is waitforfirstconsumer. +- check that pvc remains in pending state. +- restart the csi node-agent pods on all nodes. +- verify that new topology keys are now present in csi-nodes. +- now pvc should come into Bound state and application should be in running state. +- verify that volume is provisioned on only those nodes which was labeled. +- At end of test, remove the node labels and restart csi nodes so that custom-labels will be removed from csi node. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of zfspv custom topology, first clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er zfspv-custom-topology -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er zfspv-custom-topology -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-custom-topology/app_gen_immediate.sh b/e2e-tests/experiments/functional/zfspv-custom-topology/app_gen_immediate.sh new file mode 100644 index 0000000..9c6d6e7 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-custom-topology/app_gen_immediate.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e + +mkdir app_yamls_immediate + +for i in $(seq 1 5) +do + sed "s/pvc-custom-topology/pvc-custom-topology-$i/g" busybox.yml > app_yamls_immediate/busybox-$i.yml + sed -i "s/busybox-deploy-custom-topology/busybox-deploy-custom-topology-$i/g" app_yamls_immediate/busybox-$i.yml + sed -i "s/storageClassName: zfspv-custom-topology/storageClassName: zfspv-custom-topology-immediate/g" app_yamls_immediate/busybox-$i.yml +done \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-custom-topology/app_gen_wfc.sh b/e2e-tests/experiments/functional/zfspv-custom-topology/app_gen_wfc.sh new file mode 100644 index 0000000..2f5f8a2 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-custom-topology/app_gen_wfc.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e + +mkdir app_yamls_wfc + +for i in $(seq 1 5) +do + sed "s/pvc-custom-topology/pvc-custom-topology-$i/g" busybox.yml > app_yamls_wfc/busybox-$i.yml + sed -i "s/busybox-deploy-custom-topology/busybox-deploy-custom-topology-$i/g" app_yamls_wfc/busybox-$i.yml + sed -i "s/storageClassName: zfspv-custom-topology/storageClassName: zfspv-custom-topology-wfc/g" app_yamls_wfc/busybox-$i.yml +done \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-custom-topology/busybox.yml b/e2e-tests/experiments/functional/zfspv-custom-topology/busybox.yml new file mode 100644 index 0000000..7969ffe --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-custom-topology/busybox.yml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: busybox-deploy-custom-topology + labels: + test: zfspv-custom-topology +spec: + selector: + matchLabels: + test: zfspv-custom-topology + template: + metadata: + labels: + test: zfspv-custom-topology + spec: + containers: + - name: app-busybox + imagePullPolicy: IfNotPresent + image: gcr.io/google-containers/busybox + command: ["/bin/sh"] + args: ["-c", "while true; do sleep 10;done"] + env: + volumeMounts: + - name: data-vol + mountPath: /busybox + volumes: + - name: data-vol + persistentVolumeClaim: + claimName: pvc-custom-topology + +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvc-custom-topology +spec: + storageClassName: zfspv-custom-topology + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 4Gi \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-custom-topology/run_e2e_test.yml b/e2e-tests/experiments/functional/zfspv-custom-topology/run_e2e_test.yml new file mode 100644 index 0000000..543f5d6 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-custom-topology/run_e2e_test.yml @@ -0,0 +1,33 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generateName: zfspv-custom-topology- + namespace: e2e +spec: + template: + metadata: + labels: + test: zfspv-custom-topology + spec: + serviceAccountName: e2e + restartPolicy: Never + + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + - name: APP_NAMESPACE + value: 'custom-ns' + + - name: ZPOOL_NAME + value: 'zfs-test-pool' + + - name: NODE_LABEL + value: 'test=custom-topology' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zfspv-custom-topology/test.yml -i /etc/ansible/hosts -vv; exit 0"] \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-custom-topology/storage_class.j2 b/e2e-tests/experiments/functional/zfspv-custom-topology/storage_class.j2 new file mode 100644 index 0000000..d998c87 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-custom-topology/storage_class.j2 @@ -0,0 +1,31 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: zfspv-custom-topology-wfc +allowVolumeExpansion: true +parameters: + fstype: "zfs" + poolname: "{{ zpool_name }}" +provisioner: zfs.csi.openebs.io +volumeBindingMode: WaitForFirstConsumer +allowedTopologies: +- matchLabelExpressions: + - key: {{ lkey }} + values: + - {{ lvalue }} + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: zfspv-custom-topology-immediate +allowVolumeExpansion: true +parameters: + fstype: "zfs" + poolname: "{{ zpool_name }}" +provisioner: zfs.csi.openebs.io +allowedTopologies: +- matchLabelExpressions: + - key: {{ lkey }} + values: + - {{ lvalue }} \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-custom-topology/test.yml b/e2e-tests/experiments/functional/zfspv-custom-topology/test.yml new file mode 100644 index 0000000..4beab59 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-custom-topology/test.yml @@ -0,0 +1,304 @@ +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + - block: + + ## Generating the testname for zfspv custom-topology support test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - name: Get any of the two nodes from cluster which are not having `noSchedule` taints + shell: > + kubectl get nodes --no-headers -o custom-columns=:.metadata.name,:.spec.taints | + grep -v NoSchedule | shuf -n 2 | awk '{print $1}' + args: + executable: /bin/bash + register: node_list + + - name: Label the nodes with custom labels + shell: > + kubectl label node {{ item }} {{ node_label }} + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + with_items: "{{ node_list.stdout_lines }}" + + - name: Split the node label into key and values + set_fact: + lkey: "{{ node_label.split('=')[0] }}" + lvalue: "{{ node_label.split('=')[1] }}" + + - name: Update the storage_class template with test specific values + template: + src: storage_class.j2 + dest: storage_class.yml + + - name: Create the storage class yaml + shell: kubectl create -f storage_class.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Create namespace + shell: kubectl create ns {{ app_ns }}-immediate + args: + executable: /bin/bash + + - name: Apply the script for generating multiple busybox application yamls + shell: bash app_gen_immediate.sh + args: + executable: /bin/bash + + - name: Apply the busybox yamls + shell: > + kubectl apply -f app_yamls_immediate/ -n {{ app_ns}}-immediate + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Get the pvc list + shell: kubectl get pvc -n {{ app_ns }}-immediate --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: pvc_list + + - name: Check the PVC status. + shell: kubectl get pvc {{ item }} -n {{ app_ns }}-immediate --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: pvc_status + with_items: "{{ pvc_list.stdout_lines }}" + until: " pvc_status.stdout == 'Bound'" + delay: 5 + retries: 30 + + - name: Get the application pod list + shell: kubectl get pods -n {{ app_ns }}-immediate -l test=zfspv-custom-topology --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: app_pod_list + + - name: Check the application pod status + shell: > + kubectl get pods {{ item }} -n {{ app_ns }}-immediate --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: app_pod_status + with_items: "{{ app_pod_list.stdout_lines }}" + until: "app_pod_status.stdout == 'Running'" + delay: 5 + retries: 20 + + - name: Check the container status + shell: > + kubectl get pods {{ item }} -n {{ app_ns }}-immediate --no-headers -o custom-columns=:.status.containerStatuses[*].state + args: + executable: /bin/bash + register: container_status + with_items: "{{ app_pod_list.stdout_lines }}" + until: "'running' in container_status.stdout" + delay: 2 + retries: 30 + + - name: Verify that application pod is scheduled on node on which custom label is applied + shell: kubectl get pods {{ item }} -n {{ app_ns }}-immediate --no-headers -o custom-columns=:.spec.nodeName + args: + executable: /bin/bash + register: node_name + with_items: "{{ app_pod_list.stdout_lines }}" + failed_when: "'{{ node_name.stdout }}' not in node_list.stdout" + + - name: Deprovision the application + shell: kubectl delete -f app_yamls_immediate/ -n {{ app_ns}}-immediate + args: + executable: /bin/bash + register: deprovision_status + failed_when: "deprovision_status.rc != 0" + + - name: Delete the namespace + shell: kubectl delete ns {{ app_ns }}-immediate + args: + executable: /bin/bash + register: namespace_status + failed_when: "namespace_status.rc != 0" + + - name: Create namespace + shell: kubectl create ns {{ app_ns}}-wfc + args: + executable: /bin/bash + + - name: Apply the script for generating multiple busybox application yamls + shell: bash app_gen_wfc.sh + args: + executable: /bin/bash + + - name: Apply the busybox yamls + shell: > + kubectl apply -f app_yamls_wfc/ -n {{ app_ns }}-wfc + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + ## Restart of node-agent pods is required to get aware of node_labels + ## Meanwhile PVC will be remain in pending state. + + - name: Check all the pvc is in pending state. + shell: kubectl get pvc -n {{ app_ns }}-wfc --no-headers -o custom-columns=:.status.phase | sort | uniq + args: + executable: /bin/bash + register: pvc_status + failed_when: "pvc_status.stdout != 'Pending'" + + - name: Restart the zfs node-agent pods in kube-system namespace + shell: kubectl delete pods -n kube-system -l app=openebs-zfs-node + args: + executable: /bin/bash + + - name: Wait for 10 sec + shell: + sleep 10 + + - name: Check for the zfs node-agent pods to come into Running state + shell: > + kubectl get pods -n kube-system -l app=openebs-zfs-node + --no-headers -o custom-columns=:.status.phase | sort | uniq + args: + executable: /bin/bash + register: zfs_node_pod_status + until: "zfs_node_pod_status.stdout == 'Running'" + delay: 5 + retries: 20 + + - name: Verify new topology key is now available in csi_nodes + shell: kubectl get csinode {{ item }} --no-headers -o custom-columns=:.spec.drivers[*].topologyKeys + args: + executable: /bin/bash + register: csi_node_keys + until: "'{{ lkey }}' in csi_node_keys.stdout" + delay: 2 + retries: 20 + with_items: "{{ node_list.stdout_lines }}" + + - name: Get the pvc list + shell: kubectl get pvc -n {{ app_ns }}-wfc --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: pvc_list + + - name: Check the status of pvc + shell: kubectl get pvc {{ item }} -n {{ app_ns }}-wfc --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: pvc_status + with_items: "{{ pvc_list.stdout_lines }}" + until: "pvc_status.stdout == 'Bound'" + delay: 2 + retries: 30 + + - name: Get the application pod list + shell: kubectl get pods -n {{ app_ns }}-wfc -l test=zfspv-custom-topology --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: app_pod_list + + - name: Check the application pod status + shell: > + kubectl get pods {{ item }} -n {{ app_ns }}-wfc --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: app_pod_status + with_items: "{{ app_pod_list.stdout_lines }}" + until: "app_pod_status.stdout == 'Running'" + delay: 5 + retries: 20 + + - name: Check the container status + shell: > + kubectl get pods {{ item }} -n {{ app_ns }}-wfc --no-headers -o custom-columns=:.status.containerStatuses[*].state + args: + executable: /bin/bash + register: container_status + with_items: "{{ app_pod_list.stdout_lines }}" + until: "'running' in container_status.stdout" + delay: 2 + retries: 30 + + - name: Verify that application pod is scheduled on node on which custom label is applied + shell: kubectl get pods {{ item }} -n {{ app_ns }}-wfc --no-headers -o custom-columns=:.spec.nodeName + args: + executable: /bin/bash + register: node_name + with_items: "{{ app_pod_list.stdout_lines }}" + failed_when: "'{{ node_name.stdout }}' not in node_list.stdout" + + - name: Deprovision the application + shell: kubectl delete -f app_yamls_wfc/ -n {{ app_ns}}-wfc + args: + executable: /bin/bash + register: deprovision_status + failed_when: "deprovision_status.rc != 0" + + - name: Delete the namespace + shell: kubectl delete ns {{ app_ns }}-wfc + args: + executable: /bin/bash + register: namespace_status + failed_when: "namespace_status.rc != 0" + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + + - name: Remove the labels from node after end of test + shell: kubectl label node {{ item }} {{ lkey }}- + args: + executable: /bin/bash + register: label_status + with_items: "{{ node_list.stdout_lines }}" + failed_when: "label_status.rc != 0" + + - name: Restart the zfs node-agent pods in kube-system namespace to remove label from csi-nodes + shell: kubectl delete pods -n kube-system -l app=openebs-zfs-node + args: + executable: /bin/bash + + - name: Check for the zfs node-agent pods to come into Running state + shell: > + kubectl get pods -n kube-system -l app=openebs-zfs-node + --no-headers -o custom-columns=:.status.phase | sort | uniq + args: + executable: /bin/bash + register: zfs_node_pod_status + until: "zfs_node_pod_status.stdout == 'Running'" + delay: 5 + retries: 20 + + - name: Delete the storage class + shell: kubectl delete -f storage_class.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + # RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-custom-topology/test_vars.yml b/e2e-tests/experiments/functional/zfspv-custom-topology/test_vars.yml new file mode 100644 index 0000000..da998ef --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-custom-topology/test_vars.yml @@ -0,0 +1,7 @@ +test_name: zfspv-custom-topology + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +zpool_name: "{{ lookup('env','ZPOOL_NAME') }}" + +node_label: "{{ lookup('env','NODE_LABEL') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-raw-block-volume/README.md b/e2e-tests/experiments/functional/zfspv-raw-block-volume/README.md new file mode 100644 index 0000000..c1f807d --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-raw-block-volume/README.md @@ -0,0 +1,69 @@ +## About this experiment + +This experiment verifies the provision and deprovision of raw block volumes by zfs-localpv. There are some specialized applications that require direct access to a block device because, for example, the file system layer introduces unneeded overhead. The most common case is databases, which prefer to organize their data directly on the underlying storage. In this experiment we are not using any such application for testing, but using a simple busybox application to verify successful provisioning and deprovisioning of raw block volume. + +To provisione the Raw Block volume, we should create a storageclass without any fstype as Raw block volume does not have any fstype. + +``` +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: zfspv-raw-block + allowVolumeExpansion: true + parameters: + poolname: "zfspv-pool" + provisioner: zfs.csi.openebs.io +``` +Note: For running this experiment above storage-class should be present. This storage class will be created as a part of zfs-localpv provisioner experiment. If zfs-localpv components are not deployed using e2e-test script located at `openebs/zfs-localpv/e2e-tests/experiment/zfs-localpv-provisioiner` please make sure you create the storage class from above mentioned yaml. + +## Supported platforms: + +K8s : 1.18+ + +OS : Ubuntu, CentOS + +ZFS : 0.7, 0.8 + +## Entry-Criteria + +- K8s cluster should be in healthy state including all desired nodes in ready state. +- zfs-controller and node-agent daemonset pods should be in running state. +- storage class without any fstype should be present. +- a directory should be present on node with name `raw_block_volume`. + +## Steps performed + +- deploy the busybox application with given a devicePath. +- verify that application pvc gets bound and application pod is in running state. +- dump some data into raw block device and take the md5sum of data. +- restart the application and verify the data consistency. +- After that update the pvc with double value of previous pvc size, to validate resize support for raw block volumes. +- when resize is successful, then dump some dummy data into application to use the resized space. +- At last deprovision the application and check its successful deletion. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of zfspv raw block volume creation, first clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er zfspv-raw-block-volume -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er zfspv-raw-block-volume -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-raw-block-volume/busybox.j2 b/e2e-tests/experiments/functional/zfspv-raw-block-volume/busybox.j2 new file mode 100644 index 0000000..ab0cf6b --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-raw-block-volume/busybox.j2 @@ -0,0 +1,53 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app-busybox + labels: + app: raw-block-vol +spec: + selector: + matchLabels: + app: raw-block-vol + template: + metadata: + labels: + app: raw-block-vol + spec: + containers: + - name: app-busybox + imagePullPolicy: IfNotPresent + image: gcr.io/google-containers/busybox + command: ["/bin/sh"] + args: ["-c", "while true; do sleep 10;done"] + env: + volumeDevices: + - name: data-raw-block-vol + devicePath: /dev/sdc + + volumeMounts: + - name: data-mount-vol + mountPath: /busybox + + volumes: + - name: data-raw-block-vol + persistentVolumeClaim: + claimName: block-vol-pvc + + - name: data-mount-vol + hostPath: + path: /raw_block_volume + +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: block-vol-pvc +spec: + volumeMode: Block + storageClassName: {{ storage_class }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ pvc_size }} \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-raw-block-volume/run_e2e_test.yml b/e2e-tests/experiments/functional/zfspv-raw-block-volume/run_e2e_test.yml new file mode 100644 index 0000000..517fa5a --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-raw-block-volume/run_e2e_test.yml @@ -0,0 +1,43 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: zfspv-block-volume- + namespace: e2e +spec: + template: + metadata: + labels: + test: zfspv-block-volume + spec: + serviceAccountName: e2e + restartPolicy: Never + + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + # This is the namespace where raw block volume consuming application will be deployed + - name: APP_NAMESPACE + value: 'block-ns' + + # storage class name for raw block volume + # this storage class doesn't have any `fstype` parameter + - name: STORAGE_CLASS + value: 'zfspv-raw-block' + + # size of the pvc for `volumeMode: Block` + - name: PVC_SIZE + value: '5Gi' + + # This is the namespace where zfs-driver creates all its resources. + # By default it is `openebs` namespace + - name: ZFS_OPERATOR_NAMESPACE + value: 'openebs' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zfspv-raw-block-volume/test.yml -i /etc/ansible/hosts -vv; exit 0"] \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-raw-block-volume/test.yml b/e2e-tests/experiments/functional/zfspv-raw-block-volume/test.yml new file mode 100644 index 0000000..0ce0233 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-raw-block-volume/test.yml @@ -0,0 +1,248 @@ +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + - block: + + ## Generating the testname for zfspv raw block volume test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - name: Create the namespace for block-volume of zfspv + shell: > + kubectl create ns {{ app_ns }} + args: + executable: /bin/bash + + - name: Update the busybox application template with the test specific values + template: + src: busybox.j2 + dest: busybox.yml + + - name: Deploy the application using block volume pvc + shell: > + kubectl create -f busybox.yml -n {{ app_ns }} + args: + executable: /bin/bash + + - name: Check if the block volume PVC is bound + shell: > + kubectl get pvc block-vol-pvc -n {{ app_ns }} + --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: pvc_status + until: "'Bound' in pvc_status.stdout" + delay: 5 + retries: 30 + + - name: Get the zvolume name + shell: kubectl get pvc block-vol-pvc -n {{ app_ns }} -o custom-columns=:.spec.volumeName + args: + executable: /bin/bash + register: zv_name + + - name: Get the application pod name + shell: > + kubectl get pod -n {{ app_ns }} -l app=raw-block-vol + --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: app_pod_name + + - name: Check if the application pod is in running state + shell: > + kubectl get pod {{ app_pod_name.stdout }} -n {{ app_ns }} + --no-headers -o custom-columns=:.status.phase + register: pod_status + until: "'Running' in pod_status.stdout" + delay: 5 + retries: 50 + + - name: Create some test data into the raw block device and take the md5sum of data + shell: > + kubectl exec -ti {{ app_pod_name.stdout }} -n {{ app_ns }} + -- sh -c "{{ item }}" + args: + executable: /bin/bash + register: result + failed_when: "result.rc != 0" + with_items: + - "dd if=/dev/urandom of=/dev/sdc bs=4k count=262144" + - "md5sum /dev/sdc > /busybox/pre-md5" + + - name: Restart the busybox application + shell: kubectl delete pod {{ app_pod_name.stdout }} -n {{ app_ns }} + args: + executable: /bin/bash + + - name: Get the application pod name after restart + shell: > + kubectl get pod -n {{ app_ns }} -l app=raw-block-vol + --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: new_pod_name + + - name: Check if the application pod is in running state + shell: > + kubectl get pod {{ new_pod_name.stdout }} -n {{ app_ns }} + --no-headers -o custom-columns=:.status.phase + register: pod_status + until: "'Running' in pod_status.stdout" + delay: 5 + retries: 50 + + - name: Again take the md5sum of the data after restarting the application pod + shell: > + kubectl exec -ti {{ new_pod_name.stdout }} -n {{ app_ns }} + -- sh -c "md5sum /dev/sdc > /busybox/post-md5" + args: + executable: /bin/bash + register: result + failed_when: "result.rc != 0" + + - name: Verify whether data is consistence after restarting the application pod + shell: > + kubectl exec -ti {{ new_pod_name.stdout }} -n {{ app_ns }} + -- sh -c "diff /busybox/pre-md5 /busybox/post-md5" + args: + executable: /bin/bash + register: result + failed_when: "result.rc != 0 or result.stdout != ''" + + - name: Obtain the mount path for the application + shell: > + kubectl get pod {{ new_pod_name.stdout }} -n {{ app_ns }} + -o custom-columns=:.spec.containers[].volumeMounts[].mountPath --no-headers + args: + executable: /bin/bash + register: mount + + - name: Fetch the Storage from PVC using namespace + shell: kubectl get pvc -n {{ app_ns }} -o jsonpath={.items[0].spec.resources.requests.storage} + args: + executable: /bin/bash + register: storage_capacity + + - name: Fetch the alphabet(G,M,m,g) from storage capacity + shell: echo "{{ storage_capacity.stdout }}" | grep -o -E '[0-9]+' + args: + executable: /bin/bash + register: value_pvc + + ## Here we will resize the volume to double value of present pvc size + - set_fact: + desired_vol_size: '{{ (value_pvc.stdout | int * 2 | int) | int }}' + + - name: Obtain the PVC spec + shell: > + kubectl get pvc block-vol-pvc -n {{ app_ns }} + --no-headers -o yaml > pvc.yml + args: + executable: /bin/bash + + - name: Update the desired capacity in PVC spec + replace: + path: pvc.yml + before: 'storageClassName: {{ storage_class }}' + regexp: "storage: {{ pvc_size }}" + replace: "storage: {{ desired_vol_size }}Gi" + + - name: Configure PVC with the new capacity + shell: kubectl apply -f pvc.yml + args: + executable: /bin/bash + register: result + failed_when: "result.rc != 0" + + - name: Check if the desired PVC is bound + shell: > + kubectl get pvc block-vol-pvc -n {{ app_ns }} --no-headers + -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: pvc_status + failed_when: "'Bound' not in pvc_status.stdout" + + - name: Check if the storage capacity is updated in PVC + shell: > + kubectl get pvc block-vol-pvc -n {{ app_ns }} --no-headers + -o custom-columns=:status.capacity.storage + args: + executable: /bin/bash + register: capacity + until: "desired_vol_size in capacity.stdout" + delay: 10 + retries: 50 + + ## Here we will dump +1Gi data than to previous pvc size + - set_fact: + value_num: '{{ ( (value_pvc.stdout | int + 1 | int) * 1024) | int }}' + + - name: Dump some more dummy data in the application mount point for using resized volume + shell: > + kubectl exec -it "{{ new_pod_name.stdout }}" -n "{{ app_ns }}" + -- sh -c "cd {{ mount.stdout }} && dd if=/dev/urandom of=volume.txt bs=1024k count={{ value_num }}" + args: + executable: /bin/bash + register: load + failed_when: "load.rc != 0" + + - name: Deprovision the busybox application + shell: kubectl delete -f busybox.yml -n {{ app_ns }} + args: + executable: /bin/bash + + - name: Verify that busybox application is successfully deprovisioned + shell: kubectl get pods -n {{ app_ns }} + args: + executable: /bin/bash + register: app_pod_status + until: "'new_app_pod.stdout' not in app_pod_status.stdout" + delay: 5 + retries: + + - name: Verify that pvc is deleted successfully + shell: kubectl get pvc -n {{ app_ns }} + args: + executable: /bin/bash + register: pvc_status + until: "'block-vol-pvc' not in pvc_status.stdout" + delay: 3 + retries: 30 + + - name: Verify the zvolume is deleted successfully + shell: kubectl get zv -n {{ zfs_operator_ns }} + args: + executable: /bin/bash + register: zvol_list + until: "'zv_name.stdout' not in zvol_list.stdout" + delay: 3 + retries: 30 + + - name: Delete the application namespace + shell: kubectl delete ns {{ app_ns }} + args: + executable: /bin/bash + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-raw-block-volume/test_vars.yml b/e2e-tests/experiments/functional/zfspv-raw-block-volume/test_vars.yml new file mode 100644 index 0000000..60b94ed --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-raw-block-volume/test_vars.yml @@ -0,0 +1,9 @@ +test_name: zfspv-raw-block-volume + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +storage_class: "{{ lookup('env','STORAGE_CLASS') }}" + +pvc_size: "{{ lookup('env','PVC_SIZE') }}" + +zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}" diff --git a/e2e-tests/experiments/functional/zfspv-shared-mount/README.md b/e2e-tests/experiments/functional/zfspv-shared-mount/README.md new file mode 100644 index 0000000..8db8710 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-shared-mount/README.md @@ -0,0 +1,65 @@ +## About the experiment + +- This functional test verifies the zfs-localpv shared mount volume support via multiple pods. Applications who wants to share the volume can use the storage-class as below. + +``` +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: openebs-zfspv +parameters: + shared: "yes" + fstype: "zfs" + poolname: "< zpool_name >" +provisioner: zfs.csi.openebs.io +``` +Note: For running this experiment above storage-class should be present. This storage will be created as a part of zfs-localpv provisioner experiment. If zfs-localpv components are not deployed using e2e-test script located at `openebs/zfs-localpv/e2e-tests/experiment/zfs-localpv-provisioiner` please make sure you create the storage class from above mentioned yaml. + +## Supported platforms: + +K8s : 1.18+ + +OS : Ubuntu, CentOS + +ZFS : 0.7, 0.8 + +## Entry-Criteria + +- K8s cluster should be in healthy state including all the nodes in ready state. +- zfs-controller and node-agent daemonset pods should be in running state. +- storage class with `shared: yes` enabled should be present. + +## Steps performed in this experiment: + +1. First deploy the busybox application using `shared: yes` enabled storage-class +2. Then we dump some dummy data into the application pod mount point. +3. Scale the busybox deployment replicas so that multiple pods (here replicas = 2) can share the volume. +4. After that data consistency is verified from the scaled application pod in the way that data is accessible from both the pods and after restarting the application pod data consistency should be maintained. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of zfspv shared mount, first clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. + +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er zfspv-shared-mount -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er zfspv-shared-mount -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-shared-mount/busybox_share.j2 b/e2e-tests/experiments/functional/zfspv-shared-mount/busybox_share.j2 new file mode 100644 index 0000000..9776801 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-shared-mount/busybox_share.j2 @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: busybox-shared-mount + namespace: "{{ app_ns }}" + labels: + app: shared-mount +spec: + selector: + matchLabels: + app: shared-mount + template: + metadata: + labels: + app: shared-mount + spec: + containers: + - name: app-busybox + imagePullPolicy: IfNotPresent + image: gcr.io/google-containers/busybox + command: ["/bin/sh"] + args: ["-c", "while true; do sleep 10;done"] + env: + volumeMounts: + - name: data-vol + mountPath: /busybox + volumes: + - name: data-vol + persistentVolumeClaim: + claimName: "{{ app_pvc }}" + +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: "{{ app_ns }}" + name: "{{ app_pvc }}" +spec: + storageClassName: "{{ storage_class }}" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 4Gi \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-shared-mount/run_e2e_test.yml b/e2e-tests/experiments/functional/zfspv-shared-mount/run_e2e_test.yml new file mode 100644 index 0000000..ffa13ab --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-shared-mount/run_e2e_test.yml @@ -0,0 +1,59 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: zfspv-shared-mount + namespace: e2e +data: + parameters.yml: | + +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: zfspv-shared-mount- + namespace: e2e +spec: + template: + metadata: + labels: + test: shared-mount-volume + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + - name: APP_NAMESPACE ## Namespace in which application is deployed + value: '' + + - name: APP_PVC ## PVC name of the application + value: '' + + - name: STORAGE_CLASS ## Give the storage class supporting shared volume mount + value: '' + + - name: ZFS_OPERATOR_NAMESPACE ## Namespace in which all the resources created by zfs driver will be present + value: '' ## for e.g. zfsvolume (zv) will be in this namespace + + - name: DATA_PERSISTENCE ## Give values according to the application + value: '' ## For `Busybox` : `busybox` + + - name: ACTION ## `provision` OR `deprovision` + value: '' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zfspv-shared-mount/test.yml -i /etc/ansible/hosts -vv; exit 0"] + + volumeMounts: + - name: parameters + mountPath: /mnt/ + volumes: + - name: parameters + configMap: + name: zfspv-shared-mount \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-shared-mount/test.yml b/e2e-tests/experiments/functional/zfspv-shared-mount/test.yml new file mode 100644 index 0000000..8cf65aa --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-shared-mount/test.yml @@ -0,0 +1,243 @@ +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + - /mnt/parameters.yml + + tasks: + - block: + + ## Generating the testname for zfspv shared mount volume test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - block: + + - name: Create application namespace + shell: > + kubectl create ns {{ app_ns }} + args: + executable: /bin/bash + + - name: Update the busybox application template with test specific values + template: + src: busybox_share.j2 + dest: busybox_share.yml + + - name: Deploy the busybox application using above storage-class + shell: > + kubectl apply -f busybox_share.yml + args: + executable: /bin/bash + + - name: Check the pvc status + shell: > + kubectl get pvc -n {{ app_ns }} --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: pvc_status + until: pvc_status.stdout == 'Bound' + delay: 2 + retries: 20 + + - name: Get the application deployment name + shell: > + kubectl get deploy -n {{ app_ns }} --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: app_deploy_name + + - name: Get the application pod name + shell: > + kubectl get pod -n {{ app_ns }} -l app=shared-mount --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: app_pod_name + + - name: Check if the application pod is running + shell: > + kubectl get pod {{ app_pod_name.stdout }} -n {{ app_ns }} --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: app_pod_status + until: "app_pod_status.stdout == 'Running'" + delay: 3 + retries: 30 + + - name: Create some test data into the application pod + include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml" + vars: + status: 'LOAD' + ns: "{{ app_ns }}" + pod_name: "{{ app_pod_name.stdout }}" + + - name: Scale the deployment replicas to use shared mount volume by multiple pods + shell: > + kubectl scale deploy/{{ app_deploy_name.stdout }} -n {{ app_ns }} --replicas=2 + args: + executable: /bin/bash + + - name: Check the no of replicas in deployment spec + shell: > + kubectl get deploy/{{ app_deploy_name.stdout }} -n {{ app_ns }} --no-headers + -o custom-columns=:.status.readyReplicas + args: + executable: /bin/bash + register: replica_count + until: "replica_count.stdout == '2'" + delay: 2 + retries: 20 + + - name: Get the new application pod name after scaling the deployment replicas + shell: > + kubectl get pod -n {{ app_ns }} -l app=shared-mount --no-headers + -o custom-columns=:.metadata.name | grep -v {{ app_pod_name.stdout }} + args: + executable: /bin/bash + register: scaled_app_pod_name + + - name: Check the container status of the new application pod + shell: > + kubectl get pod {{ scaled_app_pod_name.stdout }} -n {{ app_ns }} --no-headers + -o jsonpath='{.status.containerStatuses[].state}' | grep running + args: + executable: /bin/bash + register: containerStatus + until: "'running' in containerStatus.stdout" + delay: 2 + retries: 50 + + - name: Label the scaled application pod + shell: > + kubectl label pod {{ scaled_app_pod_name.stdout }} -n {{ app_ns }} name=share-pod + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Verify if the shared volume data is accessible from both the application pods + include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml" + vars: + status: 'VERIFY' + ns: "{{ app_ns }}" + label: name=share-pod + pod_name: "{{ app_pod_name.stdout }}" + + - name: Delete the dumped data files from scaled application pod + shell: > + kubectl exec -ti {{ scaled_app_pod_name.stdout }} -n {{ app_ns }} -- sh -c + 'rm -rf /busybox/*' + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Again dumping some dummy data, this time from scaled application pod + include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml" + vars: + status: 'LOAD' + ns: "{{ app_ns }}" + pod_name: "{{ scaled_app_pod_name.stdout }}" + + - name: Get the application pod name + shell: > + kubectl get pod -n {{ app_ns }} --no-headers -o custom-columns=:.metadata.name | grep -v {{ scaled_app_pod_name.stdout }} + args: + executable: /bin/bash + register: app_pod_name + + - name: Label the application pod + shell: > + kubectl label pod {{ app_pod_name.stdout }} -n {{ app_ns }} name=previous-pod + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + ## To keep the application pod label and deployment label same we label the deployment as well + ## This will help in filtering while running volume-snapshot test. + - name: Label the application deployment + shell: > + kubectl label deploy/{{ app_deploy_name.stdout }} -n {{ app_ns }} name=previous-pod + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Verify the data consistency from the previous pod + include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml" + vars: + status: 'VERIFY' + ns: "{{ app_ns }}" + label: name=previous-pod + pod_name: "{{ scaled_app_pod_name.stdout }}" + + when: action == 'provision' + + - block: + + - name: Get the zvolume name from the pvc name + shell: > + kubectl get pvc {{ app_pvc }} -n {{ app_ns }} -o jsonpath='{.spec.volumeName}' + args: + executable: /bin/bash + register: zvol_name + + - name: Update the busybox deployment template with test specific values + template: + src: busybox_share.j2 + dest: busybox_share.yml + + - name: Delete the application deployment + shell: > + kubectl delete -f busybox_share.yml + args: + executable: /bin/bash + register: status + + - name: Verify that application pods have been deleted successfully + shell: > + kubectl get pods -n {{ app_ns }} + args: + executable: /bin/bash + register: app_pod_status + failed_when: "'No resources found' in app_pod_status.stdout" + + - name: Verify the successful deletion of pvc in {{ app_ns }} namespaces + shell: > + kubectl get pvc -n {{ app_ns }} + args: + executable: /bin/bash + register: pvc_status + failed_when: "app_pvc in pvc_status.stdout" + + - name: Verify the successful deletion of zvolume + shell: > + kubectl get zv -n {{ zfs_operator_ns }} + args: + executable: /bin/bash + register: zv_status + until: "zvol_name.stdout not in zv_status.stdout" + delay: 3 + retries: 30 + + when: action == 'deprovision' + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-shared-mount/test_vars.yml b/e2e-tests/experiments/functional/zfspv-shared-mount/test_vars.yml new file mode 100644 index 0000000..cdec365 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-shared-mount/test_vars.yml @@ -0,0 +1,13 @@ +test_name: zfspv-shared-mount + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +app_pvc: "{{ lookup('env','APP_PVC') }}" + +data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}" + +storage_class: "{{ lookup('env','STORAGE_CLASS') }}" + +action: "{{ lookup('env','ACTION') }}" + +zfs_operator_ns: "{{ lookup('env', 'ZFS_OPERATOR_NAMESPACE') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-snapshot/README.md b/e2e-tests/experiments/functional/zfspv-snapshot/README.md new file mode 100644 index 0000000..b309cab --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-snapshot/README.md @@ -0,0 +1,64 @@ +## About this experiment + +This experiment creates the volume snapshot of zfs-localpv which can be used further for creating a clone. Snapshot will be created in the same namespace where application pvc is created. One thing need to be noted that this experiment scale down the application before taking the snapshot, it is done this way to create the application consistent volume snapshot. After creating the snapshot application will be scaled up again. + +## Supported platforms: + +K8s : 1.18+ + +OS : Ubuntu, CentOS + +ZFS : 0.7, 0.8 + +## Entry-Criteria + +- K8s cluster should be in healthy state including all desired nodes in ready state. +- zfs-controller and node-agent daemonset pods should be in running state. +- Application should be deployed succesfully consuming the zfs-localpv storage. +- Volume snapshot class of zfs csi driver should be present to create the snapshot. + +## Steps performed + +This experiment consist of provisioning and deprovisioing of volume snapshot but performs one task at a time based on ACTION env value < provision or deprovision >. + +Provision: + +- Check the application pod status, should be in running state. +- If DATA_PERSISTENCT check is enabled then dump some data into application pod mount point. +- Check if volume snapshot class is present. +- Scale down the application and wait till pod terminates successfully. +- Create the volume snapshot in the application namespace itself. +- Check the created snapshot resource and make sure readyToUse field is true. +- Scale up the application again. + +Deprovision: + +- Delete the volume snapshot from the application namespace. +- Verify that volume snapshot content is no longer present. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of zfspv snapshot, clone openebs/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then first apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er zfspv-snapshot -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er zfspv-snapshot -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-snapshot/run_e2e_test.yml b/e2e-tests/experiments/functional/zfspv-snapshot/run_e2e_test.yml new file mode 100644 index 0000000..baf9544 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-snapshot/run_e2e_test.yml @@ -0,0 +1,65 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: zfspv-snapshot-clone + namespace: e2e +data: + parameters.yml: | + +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: zfspv-snapshot- + namespace: e2e +spec: + template: + metadata: + labels: + test: zfspv-snapshot + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + - name: APP_NAMESPACE ## Namespace in which application is deployed + value: '' + + - name: APP_PVC ## PersistentVolumeClaim Name for the application + value: '' + + - name: APP_LABEL ## Label value of the application + value: '' + + - name: ZFS_OPERATOR_NAMESPACE ## Namespace in which all the resources created by zfs driver will be present + value: '' ## for e.g. zfsvolume (zv) will be in this namespace + + - name: SNAPSHOT_CLASS ## Name of zfspv volumesnapshotclass + value: '' + + - name: SNAPSHOT_NAME ## Snapshot will be created with this name in application namespace + value: '' + + - name: ACTION ## Use 'deprovision' for snapshot cleanup + value: 'provision' + + - name: DATA_PERSISTENCE ## Give values according to the application + value: '' ## For `Busybox` : `busybox` & For `Percona` : `mysql` + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zfspv-snapshot/test.yml -i /etc/ansible/hosts -vv; exit 0"] + + volumeMounts: + - name: parameters + mountPath: /mnt/ + volumes: + - name: parameters + configMap: + name: zfspv-snapshot-clone \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-snapshot/test.yml b/e2e-tests/experiments/functional/zfspv-snapshot/test.yml new file mode 100644 index 0000000..ea0a385 --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-snapshot/test.yml @@ -0,0 +1,228 @@ +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + - /mnt/parameters.yml + + tasks: + - block: + + ## Generating the testname for zfspv snapshot e2e-test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - block: + - name: Get the name of application pod + shell: > + kubectl get pod -n {{ app_ns }} -l {{ app_label }} + --no-headers -o custom-columns=:.metadata.name | shuf -n1 + args: + executable: /bin/bash + register: app_pod_name + + - name: Check if the application pod are in running state + shell: > + kubectl get pods {{ app_pod_name.stdout }} -n {{ app_ns }} + --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: app_pod_status + failed_when: "'Running' not in app_pod_status.stdout" + + - block: + - name: Create some test data into the application + include_tasks: "/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml" + vars: + status: 'LOAD' + ns: "{{ app_ns }}" + pod_name: "{{ app_pod_name.stdout }}" + when: data_persistence == 'busybox' + + - block: + - name: Create some test data into the application + include_tasks: "/e2e-tests/utils/applications/mysql/mysql_data_persistence.yml" + vars: + status: 'LOAD' + ns: "{{ app_ns }}" + pod_name: "{{ app_pod_name.stdout }}" + when: data_persistence == 'mysql' + + - name: Update the snapshot template with the test specific variables + template: + src: volume_snapshot.j2 + dest: volume_snapshot.yml + + - name: Check if the volume snapshot class is present + shell: > + kubectl get volumesnapshotclass + args: + executable: /bin/bash + register: snapshot_class_status + failed_when: "snapshot_class not in snapshot_class_status.stdout" + + - name: Get the application deployment name + shell: > + kubectl get deployment -n {{ app_ns }} -l {{ app_label }} --no-headers + -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: app_deployment_name + + - name: Get the replica count for application deployment + shell: > + kubectl get deployment {{ app_deployment_name.stdout }} -n {{ app_ns }} --no-headers + -o custom-columns=:.spec.replicas + args: + executable: /bin/bash + register: replica_count + + - name: Scale down the application before taking the zfs vol-snapshot + shell: > + kubectl scale deployment/{{ app_deployment_name.stdout }} -n {{ app_ns }} --replicas=0 + args: + executable: /bin/bash + + - name: Verify that modified replica count is zero + shell: > + kubectl get deployment {{ app_deployment_name.stdout }} -n {{ app_ns }} --no-headers + -o custom-columns=:.spec.replicas + args: + executable: /bin/bash + register: modify_replica_count + until: "modify_replica_count.stdout == \"0\"" + delay: 3 + retries: 60 + + - name: Verify that the application pod is not present after scaling down the deployment + shell: > + kubectl get pods -n {{ app_ns }} + args: + executable: /bin/bash + register: app_pod_status + until: "app_pod_name.stdout not in app_pod_status.stdout" + delay: 3 + retries: 60 + + ## As we are checking the status of only one pod if is terminated successfully + ## but in case of shared mount support other pods may not be terminate at the same time + ## to avoid such condition here we have manual wait for 30 seconds. + - name: Manual wait for some time + shell: sleep 30 + + - name: create zfspv volumes snapshot + shell: > + kubectl create -f volume_snapshot.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Get the uid of the snapshot taken + shell: > + kubectl get volumesnapshot.snapshot {{ snapshot_name }} -n {{ app_ns }} + --no-headers -o custom-columns=:.metadata.uid + args: + executable: /bin/bash + register: snap_uid + + - set_fact: + snapshot_uid: "{{ snap_uid.stdout }}" + + - name: Confirm that volumesnapshot {{ snapshot_name }} is ready to use + shell: > + kubectl get volumesnapshot.snapshot {{ snapshot_name }} -n {{ app_ns }} + -o jsonpath='{.status.readyToUse}' + args: + executable: /bin/bash + register: isSnapshotReady + until: "isSnapshotReady.stdout == 'true'" + delay: 3 + retries: 50 + + - name: Check the status for openebs resource for the created snapshot {{ snapshot_name }} + shell: > + kubectl get zfssnap -n {{ zfs_operator_ns }} + -o jsonpath='{.items[?(@.metadata.name=="snapshot-{{ snapshot_uid }}")].status.state}' + args: + executable: /bin/bash + register: zfssnap_status + until: "zfssnap_status.stdout == 'Ready'" + delay: 3 + retries: 50 + + - name: Scale up the application deployment after taking zfs-volume-snapshot + shell: > + kubectl scale deployment/{{ app_deployment_name.stdout }} -n {{ app_ns }} --replicas={{ replica_count.stdout}} + args: + executable: /bin/bash + + - name: Verify that all the replicas are ready of application deployment + shell: > + kubectl get deployment {{ app_deployment_name.stdout }} -n {{ app_ns }} --no-headers + -o custom-columns=:.status.readyReplicas + args: + executable: /bin/bash + register: ready_replica_count + until: ready_replica_count.stdout == replica_count.stdout + delay: 3 + retries: 50 + + when: action == 'provision' + + - block: + + - name: Obtain the SnapshotContent of the snapshot + shell: > + kubectl get volumesnapshot.snapshot {{ snapshot_name }} -n {{ app_ns }} + --no-headers -o custom-columns=:.spec.snapshotContentName + args: + executable: /bin/bash + register: snapshot_content + + - set_fact: + snapshotcontent: "{{ snapshot_content.stdout }}" + + - name: Delete the volume snapshot + shell: > + kubectl delete volumesnapshot.snapshot {{ snapshot_name }} -n {{ app_ns }} + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Verify if the volume snapshot is deleted successfully + shell: > + kubectl get volumesnapshot.snapshot -n {{ app_ns }} + args: + executable: /bin/bash + register: ss_name + failed_when: "snapshot_name in ss_name.stdout" + + - name: Verify if the volumesnapshotcontent is deleted + shell: > + kubectl get volumesnapshotcontent -n {{ app_ns }} + args: + executable: /bin/bash + register: ss_content + failed_when: "snapshotcontent in ss_content.stdout" + + when: action == 'deprovision' + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-snapshot/test_vars.yml b/e2e-tests/experiments/functional/zfspv-snapshot/test_vars.yml new file mode 100644 index 0000000..b389caf --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-snapshot/test_vars.yml @@ -0,0 +1,17 @@ +test_name: zfspv-snapshot + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +pvc_name: "{{ lookup('env','APP_PVC') }}" + +snapshot_class: "{{ lookup('env','SNAPSHOT_CLASS') }}" + +snapshot_name: "{{ lookup('env','SNAPSHOT_NAME') }}" + +data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}" + +zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}" + +action: "{{ lookup('env','ACTION') }}" + +app_label: "{{ lookup('env','APP_LABEL') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zfspv-snapshot/volume_snapshot.j2 b/e2e-tests/experiments/functional/zfspv-snapshot/volume_snapshot.j2 new file mode 100644 index 0000000..7da55da --- /dev/null +++ b/e2e-tests/experiments/functional/zfspv-snapshot/volume_snapshot.j2 @@ -0,0 +1,9 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshot +metadata: + name: {{ snapshot_name }} + namespace: {{ app_ns }} +spec: + volumeSnapshotClassName: {{ snapshot_class }} + source: + persistentVolumeClaimName: {{ pvc_name }} \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zv-properties-verify/README.md b/e2e-tests/experiments/functional/zv-properties-verify/README.md new file mode 100644 index 0000000..3168036 --- /dev/null +++ b/e2e-tests/experiments/functional/zv-properties-verify/README.md @@ -0,0 +1,52 @@ +## About this experiment + +This experiment verifies that zvolume properties are same as set via the stoarge-class. + +## Supported platforms: + +K8s : 1.18+ + +OS : Ubuntu, CentOS + +ZFS : 0.7, 0.8 + +## Entry-Criteria + +- K8s cluster should be in healthy state including all desired nodes in ready state. +- zfs-controller and node-agent daemonset pods should be in running state. + +## Steps performed + +- Get the zvolume name and the storage class name by which volume was provisioned. +- After that following properties are verified to be same from zvol properties as well as from storage class. + 1. File-system type + 2. Compression + 3. Dedup + 4. Recordsize / volblocksize + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of zv properties verify, clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then first apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er zv-properties-verify -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er zv-properties-verify -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zv-properties-verify/run_e2e_test.yml b/e2e-tests/experiments/functional/zv-properties-verify/run_e2e_test.yml new file mode 100644 index 0000000..c685395 --- /dev/null +++ b/e2e-tests/experiments/functional/zv-properties-verify/run_e2e_test.yml @@ -0,0 +1,36 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generateName: zv-properties-verify- + namespace: e2e +spec: + template: + metadata: + labels: + test: zv-properties-verify + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + ## Namespace in which application is deployed + - name: APP_NAMESPACE + value: '' + + ## PersistentVolumeClaim Name for the application + - name: APP_PVC + value: '' + + # This is the namespace where the zfs driver created all its resources. + # By default it is in openebs namespace. + - name: ZFS_OPERATOR_NAMESPACE + value: 'openebs' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zv-properties-verify/test.yml -i /etc/ansible/hosts -vv; exit 0"] \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zv-properties-verify/test.yml b/e2e-tests/experiments/functional/zv-properties-verify/test.yml new file mode 100644 index 0000000..996d630 --- /dev/null +++ b/e2e-tests/experiments/functional/zv-properties-verify/test.yml @@ -0,0 +1,152 @@ +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + - block: + + ## Generating the testname for zfs volume properties verify test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - block: + + - name: Get the zvolume name + shell: > + kubectl get pvc {{ pvc_name }} -n {{ app_ns }} --no-headers + -o custom-columns=:.spec.volumeName + args: + executable: /bin/bash + register: zvol_name + + - name: Record the zvolume name + set_fact: + zv_name: "{{ zvol_name.stdout }}" + + - name: Get the storage class name used to create volume + shell: > + kubectl get pvc {{ pvc_name }} -n {{ app_ns }} --no-headers + -o custom-columns=:.spec.storageClassName + args: + executable: /bin/bash + register: stg_class_name + + - name: Record the storage class name + set_fact: + sc_name: "{{ stg_class_name.stdout }}" + + - name: Get the value of compression parameter from the storage class + shell: > + kubectl get sc {{ sc_name }} --no-headers + -o custom-columns=:.parameters.compression + args: + executable: /bin/bash + register: compression_parameter + + - name: Compare this value with the compression field in zvolume + shell: > + kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers + -o custom-columns=:.spec.compression + args: + executable: /bin/bash + register: result + failed_when: compression_parameter.stdout != result.stdout + + - name: Get the value of dedup parameter from the storage class + shell: > + kubectl get sc {{ sc_name }} --no-headers + -o custom-columns=:.parameters.dedup + args: + executable: /bin/bash + register: dedup_parameter + + - name: Compare this value with the dedup field in zvolume + shell: > + kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers + -o custom-columns=:.spec.dedup + args: + executable: /bin/bash + register: result + failed_when: dedup_parameter.stdout != result.stdout + + - name: Get the file system type to be created on application mount from the storage class + shell: > + kubectl get sc {{ sc_name }} --no-headers + -o custom-columns=:.parameters.fstype + args: + executable: /bin/bash + register: fstype_parameter + + - name: Compare this value with the fstype field in zvolume + shell: > + kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers + -o custom-columns=:.spec.fsType + args: + executable: /bin/bash + register: result + failed_when: fstype_parameter.stdout != result.stdout + + - block: + + - name: Get the value of recordsize from the storage class when fstype is zfs + shell: > + kubectl get sc {{ sc_name }} --no-headers + -o custom-columns=:.parameters.recordsize + args: + executable: /bin/bash + register: recordsize_parameter + + - name: Compare this value with the recordsize field in zvolume + shell: > + kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers + -o custom-columns=:.spec.recordsize + args: + executable: /bin/bash + register: result + failed_when: recordsize_parameter.stdout != result.stdout + + when: fstype_parameter.stdout == "zfs" + + - block: + + - name: Get the value of volblocksize from the storage class when fstype is xfs or ext + shell: > + kubectl get sc {{ sc_name }} --no-headers + -o custom-columns=:.parameters.volblocksize + args: + executable: /bin/bash + register: volblocksize_parameter + + - name: Compare this value with the volblocksize field in the zvolume + shell: > + kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers + -o custom-columns=:.spec.volblocksize + args: + executable: /bin/bash + register: result + failed_when: volblocksize_parameter.stdout != result.stdout + + when: + - fstype_parameter.stdout == "xfs" or fstype_parameter.stdout == "ext4" or + fstype_parameter.stdout == "ext3" or fstype_parameter.stdout == "ext2" or + fstype_parameter.stdout == "btrfs" + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + # RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zv-properties-verify/test_vars.yml b/e2e-tests/experiments/functional/zv-properties-verify/test_vars.yml new file mode 100644 index 0000000..750e12a --- /dev/null +++ b/e2e-tests/experiments/functional/zv-properties-verify/test_vars.yml @@ -0,0 +1,7 @@ +test_name: zv-properties-verify + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +pvc_name: "{{ lookup('env','APP_PVC') }}" + +zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zv-property-runtime-modify/README.md b/e2e-tests/experiments/functional/zv-property-runtime-modify/README.md new file mode 100644 index 0000000..92e001f --- /dev/null +++ b/e2e-tests/experiments/functional/zv-property-runtime-modify/README.md @@ -0,0 +1,49 @@ +## About this experiment + +This experiment verifies runtime modification of zvolume properties which were set via storage-class. + +## Supported platforms: + +K8s : 1.18+ + +OS : Ubuntu, CentOS + +ZFS : 0.7, 0.8 + +## Entry-Criteria + +- K8s cluster should be in healthy state including all desired nodes in ready state. +- zfs-controller and node-agent daemonset pods should be in running state. + +## Steps performed + +- Get the zvolume name and then obtain properties like compression, dedup from that zvolume. +- After that update these parameter properties and apply the zvolume yaml. +- Verify update values from zvolume and from node as well where volume was provisioned. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of run time modification of zv properties, clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then first apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er zv-property-runtime-modify -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er zv-property-runtime-modify -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zv-property-runtime-modify/run_e2e_test.yml b/e2e-tests/experiments/functional/zv-property-runtime-modify/run_e2e_test.yml new file mode 100644 index 0000000..af40969 --- /dev/null +++ b/e2e-tests/experiments/functional/zv-property-runtime-modify/run_e2e_test.yml @@ -0,0 +1,51 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generateName: zv-property-runtime-modify- + namespace: e2e +spec: + template: + metadata: + labels: + test: zv-property-runtime-modify + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + - name: APP_NAMESPACE ## Namespace in which application is deployed + value: '' + + - name: APP_LABEL ## Application label + value: '' + + - name: FILE_SYSTEM_TYPE ## Give the file_system_name (values: zfs, ext4 or xfs) + value: '' + + - name: ZFS_OPERATOR_NAMESPACE ## Namespace in which all the resources created by zfs driver will be present + value: '' ## for e.g. zfsvolume (zv) will be in this namespace + + - name: APP_PVC ## PersistentVolumeClaim Name for the application + value: '' + + ## Give values to modify the zvolume parameters value at runtime + ## Supported values ("on", "off", "lzjb", "gzip", "gzip-[1-9]", "zle" and "lz4") + - name: NEW_COMPRESSION_PARAMETER + value: '' + + ## supported values ("on" and "off") + - name: NEW_DEDUP_PARAMETER + value: '' + + ## Provide value of zpool name from which desired dataset/zvolume is provisioned + - name: ZPOOL_NAME + value: '' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/zv-property-runtime-modify/test.yml -i /etc/ansible/hosts -vv; exit 0"] \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zv-property-runtime-modify/test.yml b/e2e-tests/experiments/functional/zv-property-runtime-modify/test.yml new file mode 100644 index 0000000..822f3d1 --- /dev/null +++ b/e2e-tests/experiments/functional/zv-property-runtime-modify/test.yml @@ -0,0 +1,188 @@ +- hosts: localhost + connection: local + gather_facts: False + + + vars_files: + - test_vars.yml + + tasks: + - block: + + ## Generating the testname for zv property runtime modify test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - block: + - name: Update the daemonset spec template with test specific values + template: + src: zv_property_ds.j2 + dest: zv_property_ds.yml + + - name: Create a daemonset with privileged access to verify zvol properties at node level + shell: > + kubectl create -f ./zv_property_ds.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Confirm that the ds pods are running on all nodes + shell: > + kubectl get pod -l test=zv-property-modify-{{ fs_type }} + --no-headers -o custom-columns=:status.phase | sort | uniq + args: + executable: /bin/bash + register: result + until: "result.stdout == 'Running'" + delay: 5 + retries: 20 + + - name: Get the zvolume name + shell: > + kubectl get pvc {{ pvc_name }} -n {{ app_ns }} --no-headers + -o custom-columns=:.spec.volumeName + args: + executable: /bin/bash + register: zvol_name + + - name: Record the zvolume name + set_fact: + zv_name: "{{ zvol_name.stdout }}" + + - name: Get the node name on which volume is provisioned + shell: > + kubectl get zv {{ zvol_name.stdout }} -n {{ zfs_operator_ns }} --no-headers + -o custom-columns=:.spec.ownerNodeID + args: + executable: /bin/bash + register: vol_node_name + + - name: Get the daemonset pod name which is scheduled on the same node as of volume node + shell: > + kubectl get pod -l test=zv-property-modify-{{ fs_type }} --no-headers + -o jsonpath='{.items[?(@.spec.nodeName=="{{ vol_node_name.stdout }}")].metadata.name}' + args: + executable: /bin/bash + register: ds_pod_name + + - name: Record the daemonset pod name scheduled on the same node with application pod + set_fact: + ds_pod: "{{ ds_pod_name.stdout }}" + + - name: Get the compression parameter value from the zvolume + shell: > + kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers + -o custom-columns=:.spec.compression + args: + executable: /bin/bash + register: compress_val + + - name: Get the Dedup parameter value from the zvolume + shell: > + kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers + -o custom-columns=:.spec.dedup + args: + executable: /bin/bash + register: dedup_val + + - name: Get the yaml file for zvolume + shell: > + kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} -o yaml > zv.yml + args: + executable: /bin/bash + + - name: Modify the compression parameter value + replace: + path: zv.yml + regexp: 'compression: "{{ compress_val.stdout }}"' + replace: 'compression: "{{ new_compress_val }}"' + + - name: Modify the dedup parameter value + replace: + path: zv.yml + regexp: 'dedup: "{{ dedup_val.stdout }}"' + replace: 'dedup: "{{ new_dedup_val }}"' + + - name: Apply the modified yaml to update the new value of zvolume parameters + shell: > + kubectl apply -f zv.yml + args: + executable: /bin/bash + register: result + failed_when: "result.rc != 0" + + - name: Verify that compression parameter value is modified in zvolume + shell: > + kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers + -o custom-columns=:.spec.compression + args: + executable: /bin/bash + register: modified_compress_val + until: modified_compress_val.stdout == "{{ new_compress_val }}" + delay: 2 + retries: 20 + + - name: Verify that compression parameter value is modified in dataset/zvolume on node + shell: > + kubectl exec -ti {{ ds_pod }} -- bash -c 'zfs get all {{ zpool_name }}/{{ zv_name }} | grep compression' + args: + executable: /bin/bash + register: modified_compress_val + until: "new_compress_val in modified_compress_val.stdout" + delay: 2 + retries: 20 + + - name: Verify that dedup parameter value is modified in zvolume + shell: > + kubectl get zv {{ zv_name }} -n {{ zfs_operator_ns }} --no-headers + -o custom-columns=:.spec.dedup + args: + executable: /bin/bash + register: modified_dedup_val + until: modified_dedup_val.stdout == "{{ new_dedup_val }}" + delay: 2 + retries: 20 + + - name: Verify that compression parameter value is modified in dataset/zvolume on node + shell: > + kubectl exec -ti {{ ds_pod }} -- bash -c 'zfs get all {{ zpool_name }}/{{ zv_name }} | grep dedup' + args: + executable: /bin/bash + register: modified_dedup_val + until: "new_dedup_val in modified_dedup_val.stdout" + delay: 2 + retries: 20 + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + + - name: Get the name of daemonset + shell: > + kubectl get ds -n e2e -o jsonpath='{.items[?(@.spec.selector.matchLabels.test=="zv-property-modify-{{ fs_type }}")].metadata.name}' + args: + executable: /bin/bash + register: ds_name + + - name: Delete the daemonset with privileged access to verify zvol properties at node level + shell: > + kubectl delete ds {{ ds_name.stdout }} -n e2e + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + ## Record EOT (end of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zv-property-runtime-modify/test_vars.yml b/e2e-tests/experiments/functional/zv-property-runtime-modify/test_vars.yml new file mode 100644 index 0000000..393cd68 --- /dev/null +++ b/e2e-tests/experiments/functional/zv-property-runtime-modify/test_vars.yml @@ -0,0 +1,17 @@ +test_name: zv-property-runtime-modify + +app_ns: "{{ lookup('env','APP_NAMESPACE') }}" + +app_label: "{{ lookup('env','APP_LABEL') }}" + +fs_type: "{{ lookup('env','FILE_SYSTEM_TYPE') }}" + +zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}" + +pvc_name: "{{ lookup('env','APP_PVC') }}" + +new_compress_val: "{{ lookup('env','NEW_COMPRESSION_PARAMETER') }}" + +new_dedup_val: "{{ lookup('env','NEW_DEDUP_PARAMETER') }}" + +zpool_name: "{{ lookup('env','ZPOOL_NAME') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/functional/zv-property-runtime-modify/zv_property_ds.j2 b/e2e-tests/experiments/functional/zv-property-runtime-modify/zv_property_ds.j2 new file mode 100644 index 0000000..02d0d2a --- /dev/null +++ b/e2e-tests/experiments/functional/zv-property-runtime-modify/zv_property_ds.j2 @@ -0,0 +1,34 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + generateName: zv-property-modify-{{ fs_type }}- +spec: + selector: + matchLabels: + test: zv-property-modify-{{ fs_type }} + template: + metadata: + labels: + test: zv-property-modify-{{ fs_type }} + spec: + containers: + - name: zfsutils + image: quay.io/w3aman/zfsutils:ci + imagePullPolicy: IfNotPresent + command: ['sh', '-c', 'echo Hello! && sleep 1800'] + volumeMounts: + - name: udev + mountPath: /run/udev + - name: device + mountPath: /dev + securityContext: + privileged: true + tty: true + volumes: + - hostPath: + path: /run/udev + name: udev + - hostPath: + path: /dev + name: device \ No newline at end of file diff --git a/e2e-tests/experiments/infra-chaos/node_failure/README.md b/e2e-tests/experiments/infra-chaos/node_failure/README.md new file mode 100644 index 0000000..c97bcb6 --- /dev/null +++ b/e2e-tests/experiments/infra-chaos/node_failure/README.md @@ -0,0 +1,85 @@ +## Experiment Metadata + +| Type | Description | Storage | K8s Platform | +| ----- | ------------------------------------------------------------ | ------- | ----------------- | +| Chaos | Power off the node where application pod is hosted and observe application behavior | OpenEBS | on-premise-VMware | + +## Entry-Criteria + +- Application services are accessible & pods are healthy +- Application writes are successful + +## Exit-Criteria + +- Application pod should be evicted and rescheduled on other node. +- Data written prior to chaos is successfully retrieved/read +- Database consistency is maintained as per db integrity check utils +- Storage target pods are healthy + +### Notes + +- Typically used as a disruptive test, to cause loss of access to storage target by killing the node where application pod is scheduled. +- The container should be created again and it should be healthy. + +## Associated Utils + +- `vm_power_operations.yml`,`mysql_data_persistence.yml`,`busybox_data_persistence.yml` + + + +### Procedure + +This scenario validates the behaviour of application and OpenEBS persistent volumes in the amidst of chaos induced on the node where the application pod is scheduled. It is performed by shutting down the node(virtual machine) created on VMware hypervisor. After attaining podevictiontimeout(5 minutes by default), the application pod is expected to be scheduled on other available node. Due to abrupt shutdown, the old application pod still remain in unknown state. As an impact, volume mount in the newly scheduled pod fails due to multi-attach error. As a workaround for this, the node CR will be deleted which kills the old pod. Then, the application pod is expected to run successfully after 5 minutes. + +Based on the value of env `DATA_PERSISTENCE`, the corresponding data consistency util will be executed. At present, only busybox and percona-mysql are supported. Along with specifying env in the litmus experiment, user needs to pass name for configmap and the data consistency specific parameters required via configmap in the format as follows: + +``` + parameters.yml: | + blocksize: 4k + blockcount: 1024 + testfile: difiletest +``` + +It is recommended to pass test-name for configmap and mount the corresponding configmap as volume in the litmus pod. The above snippet holds the parameters required for validation data consistency in busybox application. + +For percona-mysql, the following parameters are to be injected into configmap. + +``` + parameters.yml: | + dbuser: root + dbpassword: k8sDem0 + dbname: tdb +``` + +The configmap data will be utilised by litmus experiments as its variables while executing the scenario. + +Based on the data provided, litmus checks if the data is consistent after recovering from induced chaos. + +ESX password has to updated through k8s secret created. The litmus runner can retrieve the password from secret as environmental variable and utilize it for performing admin operations on the server. + + + +Note: To perform admin operatons on vmware, the VM display name in hypervisor should match its hostname. + + + +## Litmus experiment Environment Variables + +### Application + +| Parameter | Description | +| ---------------- | ------------------------------------------------------------ | +| APP_NAMESPACE | Namespace in which application pods are deployed | +| APP_LABEL | Unique Labels in `key=value` format of application deployment | +| APP_PVC | Name of persistent volume claim used for app's volume mounts | +| TARGET_NAMESPACE | Namespace where OpenEBS is installed | +| DATA_PERSISTENCE | Specify the application name against which data consistency has to be ensured. Example: busybox | + +### Chaos + +| Parameter | Description | +| ------------ | ------------------------------------------------------------ | +| PLATFORM | The platform where k8s cluster is created. Currently, only 'vmware' is supported. | +| ESX_HOST_IP | The IP address of ESX server where the virtual machines are hosted. | +| ESX_PASSWORD | To be passed as configmap data. | + diff --git a/e2e-tests/experiments/infra-chaos/node_failure/data_persistence.j2 b/e2e-tests/experiments/infra-chaos/node_failure/data_persistence.j2 new file mode 100644 index 0000000..68ba06a --- /dev/null +++ b/e2e-tests/experiments/infra-chaos/node_failure/data_persistence.j2 @@ -0,0 +1,5 @@ +{% if data_persistence is defined and data_persistence == 'mysql' %} + consistencyutil: /e2e-tests/utils/applications/mysql/mysql_data_persistence.yml + {% elif data_persistence is defined and data_persistence == 'busybox' %} + consistencyutil: /e2e-tests/utils/applications/busybox/busybox_data_persistence.yml +{% endif %} diff --git a/e2e-tests/experiments/infra-chaos/node_failure/run_e2e_test.yml b/e2e-tests/experiments/infra-chaos/node_failure/run_e2e_test.yml new file mode 100644 index 0000000..340c1a5 --- /dev/null +++ b/e2e-tests/experiments/infra-chaos/node_failure/run_e2e_test.yml @@ -0,0 +1,108 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: node-failure + namespace: e2e +data: + parameters.yml: | + +--- +apiVersion: v1 +kind: Secret +metadata: + name: host-password + namespace: e2e +type: Opaque +data: + password: + +--- +apiVersion: v1 +kind: Secret +metadata: + name: node-password + namespace: e2e +type: Opaque +data: + passwordNode: + +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: node-failure- + namespace: e2e +spec: + template: + metadata: + labels: + test: node-failure + spec: + serviceAccountName: e2e + restartPolicy: Never + + #nodeSelector: + # kubernetes.io/hostname: + + tolerations: + - key: "infra-aid" + operator: "Equal" + value: "observer" + effect: "NoSchedule" + + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + - name: APP_NAMESPACE + value: '' + + - name: APP_LABEL + value: '' + + - name: APP_PVC + value: '' + + # The IP address of ESX HOST + - name: ESX_HOST_IP + value: "" + + - name: ZFS_OPERATOR_NAMESPACE + value: '' + + - name: USERNAME + value: '' + + - name: ZPOOL_NAME + value: '' + + - name: ESX_PASSWORD + valueFrom: + secretKeyRef: + name: host-password + key: password + + - name: NODE_PASSWORD + valueFrom: + secretKeyRef: + name: node-password + key: passwordNode + + - name: DATA_PERSISTENCE + value: "" + + command: ["/bin/bash"] + args: ["-c", "ANSIBLE_LOCAL_TEMP=$HOME/.ansible/tmp ANSIBLE_REMOTE_TEMP=$HOME/.ansible/tmp ansible-playbook ./e2e-tests/experiments/infra-chaos/node_failure/test.yml -i /etc/ansible/hosts -vv; exit 0"] + volumeMounts: + - name: parameters + mountPath: /mnt/ + volumes: + - name: parameters + configMap: + name: node-failure \ No newline at end of file diff --git a/e2e-tests/experiments/infra-chaos/node_failure/test.yml b/e2e-tests/experiments/infra-chaos/node_failure/test.yml new file mode 100644 index 0000000..1a6657e --- /dev/null +++ b/e2e-tests/experiments/infra-chaos/node_failure/test.yml @@ -0,0 +1,236 @@ +--- +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + - /mnt/parameters.yml + + tasks: + + - block: + + ## Generating the testname for node failure chaos test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - name: Identify the data consistency util to be invoked + template: + src: data_persistence.j2 + dest: data_persistence.yml + + - include_vars: + file: data_persistence.yml + + - name: Record the data consistency util path + set_fact: + data_consistency_util_path: "{{ consistencyutil }}" + when: data_persistence != '' + + - name: Get application pod name + shell: > + kubectl get pod -n {{ namespace }} -l {{ label }} --no-headers + -o=custom-columns=NAME:".metadata.name" + args: + executable: /bin/bash + register: app_pod_name + + - name: Record the application pod name + set_fact: + application_pod: "{{ app_pod_name.stdout }}" + + - name: Obtain PVC name from the application mount + shell: > + kubectl get pods "{{ app_pod_name.stdout }}" -n "{{ namespace }}" + -o custom-columns=:.spec.volumes[*].persistentVolumeClaim.claimName --no-headers + args: + executable: /bin/bash + register: pvc + + - name: Obtain the Persistent Volume name + shell: > + kubectl get pvc "{{ pvc.stdout }}" -n "{{ namespace }}" --no-headers + -o custom-columns=:.spec.volumeName + args: + executable: /bin/bash + register: pv + failed_when: 'pv.stdout == ""' + + - name: Record the pv name + set_fact: + pv_name: "{{ pv.stdout }}" + + ## Generate dummy test data on the application + - name: Generate data on the specified application. + include: "{{ data_consistency_util_path }}" + vars: + status: 'LOAD' + ns: "{{ namespace }}" + pod_name: "{{ app_pod_name.stdout }}" + when: data_persistence != '' + + ## Obtain the node name where application pod is running + - name: Get Application pod Node to perform chaos + shell: > + kubectl get pod {{ app_pod_name.stdout }} -n {{ namespace }} + --no-headers -o custom-columns=:spec.nodeName + args: + executable: /bin/bash + register: app_node + + - name: Record the application pod node name + set_fact: + app_node_name: "{{ app_node.stdout }}" + + ## Execute the chaos util to turn off the target node + - include_tasks: "/e2e-tests/chaoslib/vmware_chaos/vm_power_operations.yml" + vars: + esx_ip: "{{ host_ip }}" + target_node: "{{ app_node.stdout }}" + operation: "off" + + - name: Check the node status + shell: kubectl get nodes {{ app_node.stdout }} --no-headers + args: + executable: /bin/bash + register: state + until: "'NotReady' in state.stdout" + delay: 15 + retries: 30 + + - name: Check if the new application pod is scheduled after node failure + shell: > + kubectl get pods -n {{ namespace }} -l {{ label }} --no-headers | wc -l + args: + executable: /bin/bash + register: app_pod_count + until: "'2' in app_pod_count.stdout" + delay: 15 + retries: 30 + + - name: Get the new application pod name + shell: > + kubectl get pod -n {{ namespace }} -l {{ label }} --no-headers | grep -v Terminating | awk '{print $1}' + args: + executable: /bin/bash + register: new_app_pod_name + + - name: Record the new application pod name + set_fact: + new_app_pod: "{{ new_app_pod_name.stdout }}" + + - name: Check for the newly created application pod status + shell: > + kubectl get pod {{ new_app_pod }} -n {{ namespace }} --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: new_app_pod_status + failed_when: "'Pending' not in new_app_pod_status.stdout" + + - include_tasks: "/e2e-tests/chaoslib/vmware_chaos/vm_power_operations.yml" + vars: + esx_ip: "{{ host_ip }}" + target_node: "{{ app_node_name }}" + operation: "on" + + - name: Check the node status + shell: kubectl get node {{ app_node_name }} --no-headers + args: + executable: /bin/bash + register: node_status + until: "'NotReady' not in node_status.stdout" + delay: 10 + retries: 30 + + - name: verify that previous application pod is successfully deleted + shell: kubectl get pod -n {{ namespace }} -l {{ label }} --no-headers + args: + executable: /bin/bash + register: app_pod_status + until: "'{{ application_pod }}' not in app_pod_status.stdout" + delay: 5 + retries: 40 + + - name: Get the IP Address of the node on which application pod is scheduled + shell: > + kubectl get nodes {{ app_node_name }} --no-headers -o jsonpath='{.status.addresses[0].address}' + args: + executable: /bin/bash + register: node_ip_address + + - name: Record the IP Address of the node on which application pod is scheduled + set_fact: + node_ip_add: "{{ node_ip_address.stdout }}" + + - name: Check if zpool is present + shell: > + sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} "zpool list" + args: + executable: /bin/bash + register: zpool_status + + - name: Import the zpool after turning on the VM's + shell: > + sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} + "echo {{ node_pwd }} | sudo -S su -c 'zpool import -f {{ zpool_name }}'" + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + when: "'{{ zpool_name }}' not in zpool_status.stdout" + + - name: verify that zfs dataset is available now + shell: > + sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} "zfs list" + args: + executable: /bin/bash + register: zfs_dataset + until: "'{{ zpool_name }}/{{ pv_name }}' in zfs_dataset.stdout" + delay: 10 + retries: 30 + + - name: check the newly scheduled application pod status + shell: kubectl get pod {{ new_app_pod }} -n {{ namespace }} --no-headers -o custom-columns=:.status.phase + args: + executable: /bin/bash + register: new_app_pod_status + until: "'Running' in new_app_pod_status.stdout" + delay: 5 + retries: 50 + + - block: + + - name: Obtain the rescheduled pod name + shell: > + kubectl get pods -n {{ namespace }} -l {{ label }} --no-headers + -o custom-columns=:metadata.name + args: + executable: /bin/bash + register: rescheduled_app_pod + + - name: Verify application data persistence + include: "{{ data_consistency_util_path }}" + vars: + status: 'VERIFY' + ns: "{{ namespace }}" + pod_name: "{{ rescheduled_app_pod.stdout }}" + + when: data_persistence != '' + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/infra-chaos/node_failure/test_vars.yml b/e2e-tests/experiments/infra-chaos/node_failure/test_vars.yml new file mode 100644 index 0000000..e71094e --- /dev/null +++ b/e2e-tests/experiments/infra-chaos/node_failure/test_vars.yml @@ -0,0 +1,24 @@ +--- +# Test specific parameters + +test_name: node-failure + +namespace: "{{ lookup('env','APP_NAMESPACE') }}" + +pvc: "{{ lookup('env','APP_PVC') }}" + +label: "{{ lookup('env','APP_LABEL') }}" + +host_ip: "{{ lookup('env','ESX_HOST_IP') }}" + +esx_pwd: "{{ lookup('env','ESX_PASSWORD') }}" + +data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}" + +zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}" + +user: "{{ lookup('env','USERNAME') }}" + +zpool_name: "{{ lookup('env','ZPOOL_NAME') }}" + +node_pwd: "{{ lookup('env','NODE_PASSWORD') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/infra-chaos/service_failure/README.md b/e2e-tests/experiments/infra-chaos/service_failure/README.md new file mode 100644 index 0000000..e69de29 diff --git a/e2e-tests/experiments/infra-chaos/service_failure/data_persistence.j2 b/e2e-tests/experiments/infra-chaos/service_failure/data_persistence.j2 new file mode 100644 index 0000000..68ba06a --- /dev/null +++ b/e2e-tests/experiments/infra-chaos/service_failure/data_persistence.j2 @@ -0,0 +1,5 @@ +{% if data_persistence is defined and data_persistence == 'mysql' %} + consistencyutil: /e2e-tests/utils/applications/mysql/mysql_data_persistence.yml + {% elif data_persistence is defined and data_persistence == 'busybox' %} + consistencyutil: /e2e-tests/utils/applications/busybox/busybox_data_persistence.yml +{% endif %} diff --git a/e2e-tests/experiments/infra-chaos/service_failure/run_e2e_test.yml b/e2e-tests/experiments/infra-chaos/service_failure/run_e2e_test.yml new file mode 100644 index 0000000..1c420b7 --- /dev/null +++ b/e2e-tests/experiments/infra-chaos/service_failure/run_e2e_test.yml @@ -0,0 +1,93 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: service-failure + namespace: e2e +data: + parameters.yml: | + +--- +apiVersion: v1 +kind: Secret +metadata: + name: node-password + namespace: e2e +type: Opaque +data: + password: + +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: service-failure-chaos- + namespace: e2e +spec: + template: + metadata: + labels: + name: service-failure-chaos + spec: + serviceAccountName: e2e + restartPolicy: Never + + #nodeSelector: + # kubernetes.io/hostname: + + tolerations: + - key: "infra-aid" + operator: "Equal" + value: "observer" + effect: "NoSchedule" + + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + - name: ZFS_OPERATOR_NAMESPACE + value: "" + + - name: APP_NAMESPACE + value: "" + + - name: APP_LABEL + value: "" + + - name: APP_PVC + value: "" + + ## specify the service type to perform chaos + ## for kubelet failure use value: kubelet + ## for container runtime failure use value: (docker,containerd,cri-o) + - name: SVC_TYPE + value: "" + + - name: USERNAME + value: "" + + ## To check data persistence against specific application provide value as below: + ## (For busybox value: "busybox" and For percona value: "mysql") + - name: DATA_PERSISTENCE + value: "" + + - name: NODE_PASSWORD + valueFrom: + secretKeyRef: + name: node-password + key: password + + command: ["/bin/bash"] + args: ["-c", "ANSIBLE_LOCAL_TEMP=$HOME/.ansible/tmp ANSIBLE_REMOTE_TEMP=$HOME/.ansible/tmp ansible-playbook ./e2e-tests/experiments/infra-chaos/service_failure/test.yml -i /etc/ansible/hosts -vv; exit 0"] + + volumeMounts: + - name: parameters + mountPath: /mnt/ + volumes: + - name: parameters + configMap: + name: service-failure \ No newline at end of file diff --git a/e2e-tests/experiments/infra-chaos/service_failure/test.yml b/e2e-tests/experiments/infra-chaos/service_failure/test.yml new file mode 100644 index 0000000..732a3ad --- /dev/null +++ b/e2e-tests/experiments/infra-chaos/service_failure/test.yml @@ -0,0 +1,83 @@ +--- +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + - /mnt/parameters.yml + + tasks: + - block: + + ## Generating the testname for service failure chaos test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - name: Identify the data consistency util to be invoked + template: + src: data_persistence.j2 + dest: data_persistence.yml + + - include_vars: + file: data_persistence.yml + + - name: Record the data consistency util path + set_fact: + data_consistency_util_path: "{{ consistencyutil }}" + when: data_persistence != '' + + - name: Get the application pod name + shell: > + kubectl get pods -n {{ namespace }} -l {{ label }} --no-headers + -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: app_pod_name + + - name: Create some test data + include: "{{ data_consistency_util_path }}" + vars: + status: 'LOAD' + ns: "{{ namespace }}" + pod_name: "{{ app_pod_name.stdout }}" + when: data_persistence != '' + + - include_tasks: /e2e-tests/chaoslib/service_failure/service_chaos.yml + vars: + app_ns: "{{ namespace }}" + app_label: "{{ label }}" + action: "svc_stop" + app_pod: "{{ app_pod_name.stdout }}" + + - include_tasks: /e2e-tests/chaoslib/service_failure/service_chaos.yml + vars: + app_ns: "{{ namespace }}" + app_label: "{{ label }}" + action: "svc_start" + app_pod: "{{ app_pod_name.stdout }}" + + - name: Verify application data persistence + include: "{{ data_consistency_util_path }}" + vars: + status: 'VERIFY' + ns: "{{ namespace }}" + pod_name: "{{ new_app_pod }}" + when: data_persistence != '' + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + ## RECORD END-OF-TEST IN e2e RESULT CR + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/infra-chaos/service_failure/test_vars.yml b/e2e-tests/experiments/infra-chaos/service_failure/test_vars.yml new file mode 100644 index 0000000..7856efd --- /dev/null +++ b/e2e-tests/experiments/infra-chaos/service_failure/test_vars.yml @@ -0,0 +1,9 @@ +test_name: "{{ svc_type }}-service-failure" +namespace: "{{ lookup('env','APP_NAMESPACE') }}" +label: "{{ lookup('env','APP_LABEL') }}" +pvc: "{{ lookup('env','APP_PVC') }}" +svc_type: "{{ lookup('env','SVC_TYPE') }}" +zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}" +data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}" +node_pwd: "{{ lookup('env','NODE_PASSWORD') }}" +user: "{{ lookup('env','USERNAME') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/upgrade-zfs-localpv/README.md b/e2e-tests/experiments/upgrade-zfs-localpv/README.md new file mode 100644 index 0000000..f32b73a --- /dev/null +++ b/e2e-tests/experiments/upgrade-zfs-localpv/README.md @@ -0,0 +1,50 @@ +## About this experiment + +This experiment upgrades the zfs-localpv driver components from any previous version to the latest desired stable version or to the master branch ci images. + +## Supported platforms: + +K8s : 1.18+ + +OS : Ubuntu, CentOS + +ZFS : 0.7, 0.8 + +## Entry-Criteria + +- K8s nodes should be ready. +- Do not provision/deprovision any volumes during the upgrade, if we can not control it, then we can scale down the openebs-zfs-controller stateful set to zero replica which will pause all the provisioning/deprovisioning request. And once upgrade is done, the upgraded Driver will continue the provisioning/deprovisioning process. + +## Exit-Criteria + +- zfs-driver should be upgraded to desired version. +- All the components related to zfs-localpv driver including zfs-controller and csi node-agents should be running and upraded to desired version as well. +- All the zfs volumes should be healthy and data prior to the upgrade should not be impacted. +- After upgrade we should be able to provision the volume and other related task with no regressions. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of upgrading zfs-localpv driver, clone openens/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then first apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er upgrade-zfs-localpv -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er upgrade-zfs-localpv -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/upgrade-zfs-localpv/run_e2e_test.yml b/e2e-tests/experiments/upgrade-zfs-localpv/run_e2e_test.yml new file mode 100644 index 0000000..a83a8c9 --- /dev/null +++ b/e2e-tests/experiments/upgrade-zfs-localpv/run_e2e_test.yml @@ -0,0 +1,41 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: upgrade-zfs-localpv- + namespace: e2e +spec: + template: + metadata: + labels: + test: zfs-localpv-upgrade + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + ## Give the versioned branch name for zfs_localpv provisioner from openebs/zfs-localpv repo + ## for e.g. (v1.4.x , v1.5.x OR master) + - name: TO_VERSION_ZFS_BRANCH + value: '' + + ## Provide ZFS_DRIVER image to which upgrade is to done. To use ci images use ci tag. + ## Give full image name (for e.g. openebs/zfs-driver:) + - name: TO_VERSION_ZFS_DRIVER_IMAGE + value: '' + + # This is the namespace where the zfs driver will create all its resources. + # By default it is in openebs namespace. If you have been using some different namespace + # provide that value. We should never attempt to modify this namespace as old resources will + # not be available under the new namespace. + - name: ZFS_OPERATOR_NAMESPACE + value: 'openebs' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/upgrade-zfs-localpv/test.yml -i /etc/ansible/hosts -v; exit 0"] \ No newline at end of file diff --git a/e2e-tests/experiments/upgrade-zfs-localpv/test.yml b/e2e-tests/experiments/upgrade-zfs-localpv/test.yml new file mode 100644 index 0000000..5c95f02 --- /dev/null +++ b/e2e-tests/experiments/upgrade-zfs-localpv/test.yml @@ -0,0 +1,213 @@ +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + + - block: + + ## Generating the testname for zfs localpv upgrade test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - name: Get the list of pods of zfs-localpv components (zfs-controller and zfs-node agent deamonset) + shell: > + kubectl get pods -n kube-system -l role=openebs-zfs + --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: zfs_localpv_components + + - name: Verify that the zfs-localpv components are in running state + shell: > + kubectl get pods {{ item }} -n kube-system --no-headers -o custom-columns=:status.phase + args: + executable: /bin/bash + register: ZFS_driver_components + failed_when: "ZFS_driver_components.stdout != 'Running'" + with_items: "{{ zfs_localpv_components.stdout_lines }}" + ignore_errors: true + + - name: Get the version tag for zfs-driver + shell: > + kubectl get sts openebs-zfs-controller -n kube-system + -o jsonpath='{.spec.template.spec.containers[?(@.name=="openebs-zfs-plugin")].image}' | cut -d ":" -f2 + args: + executable: /bin/bash + register: zfs_driver_tag + + - name: Get the replica count for zfs-controller statefulset + shell: > + kubectl get sts openebs-zfs-controller -n kube-system -o jsonpath='{.status.replicas}' + args: + executable: /bin/bash + register: no_of_zfs_ctrl_replicas + + - name: Record the number of zfs-controller replicas + set_fact: + zfs_ctrl_replicas: "{{ no_of_zfs_ctrl_replicas.stdout }}" + + - name: Get the list of node-agent pods in openebs-zfs-node daemonset + shell: > + kubectl get po -n kube-system -l app=openebs-zfs-node --no-headers -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: ds_pods + + - block: + ## This task creates new CRDs as zfs-LocalPV related CRs are now grouped + ## under `zfs.openebs.io` from v0.6 release. + - name: Apply the new CRDs for zfs-LocalPV + shell: > + kubectl apply -f https://raw.githubusercontent.com/openebs/zfs-localpv/master/upgrade/crd.yaml + args: + executable: /bin/bash + register: new_crds + failed_when: "new_crds.rc != 0" + + ## This task create new CRs for zfs-volume and zfs-snapshot with updated + ## apiversion to `zfs.openebs.io`. Previously this was `openebs.io`. + - name: Download the Upgrade script for creating new CRs with apiversion as `zfs.openebs.io` + get_url: + url: https://raw.githubusercontent.com/openebs/zfs-localpv/master/upgrade/upgrade.sh + dest: ./upgrade.sh + force: yes + register: result + until: "'OK' in result.msg" + delay: 5 + retries: 3 + + - name: Apply the upgrade script + shell: sh ./upgrade.sh {{ zfs_operator_ns }} + args: + executable: /bin/bash + + when: + - zfs_driver_tag.stdout == "v0.4" or zfs_driver_tag.stdout == "0.4.1" or zfs_driver_tag.stdout == "v0.5" + - "'v0.4.x' not in to_version_zfs_branch" + - "'v0.5.x' not in to_version_zfs_branch" + + - name: Download the zfs-operator file + get_url: + url: https://raw.githubusercontent.com/openebs/zfs-localpv/{{ to_version_zfs_branch }}/deploy/zfs-operator.yaml + dest: ./new_zfs_operator.yml + force: yes + register: result + until: "'OK' in result.msg" + delay: 5 + retries: 3 + + - name: Update the openebs zfs-driver image + replace: + path: ./new_zfs_operator.yml + regexp: openebs/zfs-driver:ci + replace: "{{ lookup('env','TO_VERSION_ZFS_DRIVER_IMAGE') }}" + when: lookup('env','TO_VERSION_ZFS_DRIVER_IMAGE') | length > 0 + + - name: Update the number of zfs-controller statefulset replicas + replace: + path: ./new_zfs_operator.yml + regexp: "replicas: 1" + replace: "replicas: {{ zfs_ctrl_replicas }}" + + - name: Apply the zfs_operator file to deploy zfs-driver components to the newer version + shell: + kubectl apply -f ./new_zfs_operator.yml + args: + executable: /bin/bash + + - name: Wait for some time to old zfs-driver components to go into Terminating state. + shell: > + sleep 30 + + - name: Verify zfs-node agent previous pods are not present in kube-system namespace + shell: > + kubectl get pods -n kube-system -l app=openebs-zfs-node --no-headers + args: + executable: /bin/bash + register: new_ds_pods + until: "'{{ item }}' not in new_ds_pods.stdout" + delay: 5 + retries: 40 + with_items: "{{ ds_pods.stdout_lines }}" + + - name: Verify zfs-node agent newer pods are in running status + shell: > + kubectl get pods -n kube-system -l app=openebs-zfs-node + --no-headers -o custom-columns=:status.phase | sort | uniq + args: + executable: /bin/bash + register: new_ds_pods + until: "new_ds_pods.stdout == 'Running'" + delay: 5 + retries: 30 + + - name: Verify that zfs-node agent daemonset image is upgraded + shell: > + kubectl get ds openebs-zfs-node -n kube-system + -o jsonpath='{.spec.template.spec.containers[?(@.name=="openebs-zfs-plugin")].image}' + args: + executable: /bin/bash + register: ds_image + failed_when: ds_image.stdout != to_version_zfs_driver_image + + - name: Check for the count of zfs-controller ready replicas + shell: > + kubectl get sts openebs-zfs-controller -n kube-system -o jsonpath='{.status.readyReplicas}' + args: + executable: /bin/bash + register: ready_replicas + until: "ready_replicas.stdout|int == zfs_ctrl_replicas|int" + delay: 5 + retries: 20 + + - name: Verify that zfs-driver version from the zfs-controller statefulset image is upgraded + shell: > + kubectl get sts openebs-zfs-controller -n kube-system + -o jsonpath='{.spec.template.spec.containers[?(@.name=="openebs-zfs-plugin")].image}' + args: + executable: /bin/bash + register: zfs_ctrl_image + failed_when: zfs_ctrl_image.stdout != to_version_zfs_driver_image + + - block: + + - name: Download the cleanup script for removing the resources with old CRs and delete old CRDs + get_url: + url: https://raw.githubusercontent.com/openebs/zfs-localpv/master/upgrade/cleanup.sh + dest: ./cleanup.sh + force: yes + register: result + until: "'OK' in result.msg" + delay: 5 + retries: 3 + + - name: Apply the cleanup script + shell: sh ./cleanup.sh {{ zfs_operator_ns }} + args: + executable: /bin/bash + + when: + - zfs_driver_tag.stdout == "v0.4" or zfs_driver_tag.stdout == "0.4.1" or zfs_driver_tag.stdout == "v0.5" + - "'v0.4.x' not in to_version_zfs_branch" + - "'v0.5.x' not in to_version_zfs_branch" + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/upgrade-zfs-localpv/test_vars.yml b/e2e-tests/experiments/upgrade-zfs-localpv/test_vars.yml new file mode 100644 index 0000000..01a62ce --- /dev/null +++ b/e2e-tests/experiments/upgrade-zfs-localpv/test_vars.yml @@ -0,0 +1,7 @@ +test_name: upgrade-zfs-localpv + +to_version_zfs_branch: "{{ lookup('env','TO_VERSION_ZFS_BRANCH') }}" + +to_version_zfs_driver_image: "{{ lookup('env','TO_VERSION_ZFS_DRIVER_IMAGE') }}" + +zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/zfs-localpv-provisioner/Dockerfile b/e2e-tests/experiments/zfs-localpv-provisioner/Dockerfile new file mode 100644 index 0000000..0e81cb0 --- /dev/null +++ b/e2e-tests/experiments/zfs-localpv-provisioner/Dockerfile @@ -0,0 +1,30 @@ +# Copyright 2020-2021 The OpenEBS Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +########################################################################## +# This Dockerfile is used to create the image `quay.io/w3aman/zfsutils:ci`# +# which is being used in the daemonset in the file `zfs_utils_ds.yml` # +# Here we install zfs utils in the image so that zfs command can be run # +# from the container, mainly to create zpool on desired nodes. # +########################################################################## + +FROM ubuntu:20.04 + +RUN apt-get update + +RUN apt-get install sudo -y + +RUN apt-get install zfsutils-linux -y + +CMD [ "bash" ] \ No newline at end of file diff --git a/e2e-tests/experiments/zfs-localpv-provisioner/README.md b/e2e-tests/experiments/zfs-localpv-provisioner/README.md new file mode 100644 index 0000000..cd67ac3 --- /dev/null +++ b/e2e-tests/experiments/zfs-localpv-provisioner/README.md @@ -0,0 +1,64 @@ +## About this experiment + +This experiment deploys the zfs-localpv provisioner in kube-system namespace which includes zfs-controller statefulset (with default value of replica count 1) and csi-node agent deamonset. Apart from this, zpool creation on nodes and generic use-case storage-classes and snapshot class for dynamic provisioning of the volumes based on values provided via env's in run_e2e_test.yml file gets created in this experiment. + +## Supported platforms: + +K8s: 1.18+ + +OS: Ubuntu, CentOS + +ZFS: 0.7, 0.8 + +## Entry-Criteria + +- K8s cluster should be in healthy state including all the desired worker nodes in ready state. +- External disk should be attached to the nodes for zpool creation on top of it. +- If we dont use this experiment to deploy zfs-localpv provisioner, we can directly apply the zfs-operator file via command as mentioned below and make sure you have zpool created on desired nodes to provision volumes. +```kubectl apply -f https://openebs.github.io/charts/zfs-operator.yaml``` + +## Exit-Criteria + +- zfs-localpv components should be deployed successfully and all the pods including zfs-controller and csi node-agent daemonset are in running state. + +## Steps performed + +- zpool creation on nodes + - if `ZPOOL_CREATION` env value is set to `true` zpool is created on the nodes. + - selection of nodes on which zpool will be created, is taken via the values of `ZPOOL_NODE_NAME` env. if it is blank then zpool will be created on all worker nodes. + - selected nodes will be labeled (if all nodes are used then labeling will be skipped as it is unnecessary) so that a privileged daemoset pods can schedule on those nodes and can create zpool on respected nodes by executing zpool create command via daemonset pods. + - Delete the daemonset and remove label from nodes after zpool creation. +- Download the operator file for zfs-localpv driver from `ZFS_BRANCH`. +- Update the zfs-operator namespace if it is specified other than default value `openebs` in `ZFS_OPERATOR_NAMESPACE` env. +- Update the zfs-driver image tag. (if specified other than ci tag) +- Apply the operator yaml and wait for zfs-controller and csi-node agent pods to come up in Running state. +- Create general use case storage_classes for dynamic volume provisioning. +- Create one volumesnapshot class for capturing zfs volume snapshot. + +## How to run + +- This experiment accepts the parameters in form of kubernetes job environmental variables. +- For running this experiment of deploying zfs-localpv provisioner, clone openebs/zfs-localpv[https://github.com/openebs/zfs-localpv] repo and then first apply rbac and crds for e2e-framework. +``` +kubectl apply -f zfs-localpv/e2e-tests/hack/rbac.yaml +kubectl apply -f zfs-localpv/e2e-tests/hack/crds.yaml +``` +then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job. +``` +kubectl create -f run_e2e_test.yml +``` +All the env variables description is provided with the comments in the same file. + +After creating kubernetes job, when the job’s pod is instantiated, we can see the logs of that pod which is executing the test-case. + +``` +kubectl get pods -n e2e +kubectl logs -f -n e2e +``` +To get the test-case result, get the corresponding e2e custom-resource `e2eresult` (short name: e2er ) and check its phase (Running or Completed) and result (Pass or Fail). + +``` +kubectl get e2er +kubectl get e2er zfs-localpv-provisioner -n e2e --no-headers -o custom-columns=:.spec.testStatus.phase +kubectl get e2er zfs-localpv-provisioner -n e2e --no-headers -o custom-columns=:.spec.testStatus.result +``` \ No newline at end of file diff --git a/e2e-tests/experiments/zfs-localpv-provisioner/openebs-zfspv-sc.j2 b/e2e-tests/experiments/zfs-localpv-provisioner/openebs-zfspv-sc.j2 new file mode 100644 index 0000000..cd2ca14 --- /dev/null +++ b/e2e-tests/experiments/zfs-localpv-provisioner/openebs-zfspv-sc.j2 @@ -0,0 +1,138 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: "zfspv-sc-ext4" +allowVolumeExpansion: true +parameters: + volblocksize: "{{ record_size }}" + compression: "{{ compress }}" + dedup: "{{ de_dup }}" + fstype: "ext4" + poolname: "{{ zpool_name }}" +provisioner: zfs.csi.openebs.io +volumeBindingMode: WaitForFirstConsumer +## To create ZPOOL on only some of the node then mention node names in values with allowedTopologies +##allowedTopologies: +##- matchLabelExpressions: +## - key: kubernetes.io/hostname +## values: + + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: "zfspv-sc-xfs" +allowVolumeExpansion: true +parameters: + volblocksize: "{{ vol_block_size }}" + compression: "{{ compress }}" + dedup: "{{ de_dup }}" + fstype: "xfs" + poolname: "{{ zpool_name }}" +provisioner: zfs.csi.openebs.io +volumeBindingMode: WaitForFirstConsumer +## To create ZPOOL on only some of the node then mention node names in values with allowedTopologies +##allowedTopologies: +##- matchLabelExpressions: +## - key: kubernetes.io/hostname +## values: + + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: "zfspv-sc" +allowVolumeExpansion: true +parameters: + recordsize: "{{ record_size }}" + compression: "{{ compress }}" + dedup: "{{ de_dup }}" + fstype: "zfs" + poolname: "{{ zpool_name }}" +provisioner: zfs.csi.openebs.io +volumeBindingMode: WaitForFirstConsumer +## To create ZPOOL on only some of the node then mention node names in values with allowedTopologies +##allowedTopologies: +##- matchLabelExpressions: +## - key: kubernetes.io/hostname +## values: + + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: "zfspv-sc-btrfs" +parameters: + volblocksize: "{{ record_size }}" + compression: "{{ compress }}" + dedup: "{{ de_dup }}" + fstype: "btrfs" + poolname: "{{ zpool_name }}" +provisioner: zfs.csi.openebs.io +volumeBindingMode: WaitForFirstConsumer +## To create ZPOOL on only some of the node then mention node names in values with allowedTopologies +##allowedTopologies: +##- matchLabelExpressions: +## - key: kubernetes.io/hostname +## values: + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: "zfspv-raw-block" +allowVolumeExpansion: true +parameters: + poolname: "{{ zpool_name }}" +provisioner: zfs.csi.openebs.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: "zfspv-shared" +allowVolumeExpansion: true +parameters: + shared: "yes" + fstype: "zfs" + poolname: "{{ zpool_name }}" +provisioner: zfs.csi.openebs.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: "zfspv-xfs-shared" +allowVolumeExpansion: true +parameters: + shared: "yes" + fstype: "xfs" + poolname: "{{ zpool_name }}" +provisioner: zfs.csi.openebs.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: "zfspv-ext4-shared" +allowVolumeExpansion: true +parameters: + shared: "yes" + fstype: "ext4" + poolname: "{{ zpool_name }}" +provisioner: zfs.csi.openebs.io + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: "zfspv-btrfs-shared" +parameters: + shared: "yes" + fstype: "btrfs" + poolname: "{{ zpool_name }}" +provisioner: zfs.csi.openebs.io \ No newline at end of file diff --git a/e2e-tests/experiments/zfs-localpv-provisioner/run_e2e_test.yml b/e2e-tests/experiments/zfs-localpv-provisioner/run_e2e_test.yml new file mode 100644 index 0000000..61eb15d --- /dev/null +++ b/e2e-tests/experiments/zfs-localpv-provisioner/run_e2e_test.yml @@ -0,0 +1,129 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + generateName: zfs-localpv-provisioner- + namespace: e2e +spec: + template: + metadata: + labels: + test: zfs-localpv-provisioner + spec: + serviceAccountName: e2e + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/zfs-localpv-e2e:ci + imagePullPolicy: IfNotPresent + env: + - name: ANSIBLE_STDOUT_CALLBACK + #value: log_plays + value: default + + # This test will download the zfs-localpv operator file from this branch. + # Change the env value according to versioned branch name for zfs-localpv provisioner + # from openebs/zfs-localpv repo. for e.g. (v1.4.x , v1.5.x OR master) + # by default test-specific value of `ZFS_BRANCH` is master. + - name: ZFS_BRANCH + value: 'master' + + # After v1.5.0 in each branch of openebs/zfs-localpv repo zfs-localpv driver image is set to + # `ci` tag `openebs/zfs-driver:ci`. Give the full image name here with desired image tag to replace + # it with `ci` tag. for e.g. (openebs/zfs-driver:1.5.0). Leaving this env empty will + # apply the operator yaml with by default present `ci` tag i.e. `openebs/zfs-driver:ci` + - name: ZFS_DRIVER_IMAGE + value: '' + + # This is the namespace where the zfs driver will create all its resources. + # By default it is in openebs namespace. If we want to change it to use a different + # namespace change the value of this env with desired namespace name. + - name: ZFS_OPERATOR_NAMESPACE + value: 'openebs' + + # In addition to provisioning of zfs-localpv driver if we want to create zpool on worker nodes, + # use `true` as the value for this env else leave it blank or false. If zpool is already present and no need of zpool + # creation via this test script then then set this value as `false`. + # by default this `env` value is `false` and will skip zpool creation on nodes. + - name: ZPOOL_CREATION + value: '' + + # In case if we have use value as `true` in `ZPOOL_CREATION` env, provide here + # the name for zpool by which name it will be created via this test script else leave blank. + # If we don't want to create volume group on nodes via this test but still + # wants to create some generally used storage_classes for provisioning of zfs volumes + # provide here the zpool name which you have already setted up and it will be + # used in storage class template. + # by default test-specific value of zpool name is `zfs-test-pool`. + - name: ZPOOL_NAME + value: 'zfs-test-pool' + + # If we want to create encrypted zpool provide value `on` else `off` + # by default value is `off` + - name: ZPOOL_ENCRYPTION + value: 'off' + + # For creating encrypted zpool this test uses the keyformat as passphrase. + # to create one such passphrase provide here a character string of minimum length as 8 (for e.g. test1234) + # which will be used in automatically when zpool create command promts for passphrase. + # by default this test will use password as `test1234` for zpool encryption + # you can use a different one for your zpools. + - name: ZPOOL_ENCRYPTION_PASSWORD + value: 'test1234' + + # This is the env to decide which type of zpool we want to create, or we have already set up + # this type of zpool. by default test specific value for this env is `striped`. + # supported values are (stripe, mirror, raidz, raidz2 and raidz3) + - name: ZPOOL_TYPE + value: 'stripe' + + # In case if we have use value as `true` in `ZPOOL_CREATION` env, provide here + # the name of the disks to use them for creation of zpool, else leave blank. for e.g. `/dev/sdb` + # If we want to use more than one disk (when mirrored or raidz pools) give the names in space seperated format + # for e.g. "/dev/sdb /dev/sdc" + - name: ZPOOL_DISKS + value: '' + + # In case if we have use value as `true` in `ZPOOL_CREATION` env, provide here + # the name of nodes on which we want zpools to be created. Leaving this blank + # will create zpools on all the schedulabel nodes. + # Provide node names in comma seperated format for e.g. ('node-1,node-2,node-3') + - name: ZPOOL_NODE_NAMES + value: '' + + # If we want to create some generally used storage_classes and snapshot class for provisioning + # of zfs volumes and taking zfs snapshots provide `true` as the value for this env. Leaving this value + # blank will consider as false. by default test-specific value for this env is `true`. + - name: STORAGE_CLASS_CREATION + value: 'true' + + # Snapshot class will be created with name which will be provided here + # by default test specific value is + - name: SNAPSHOT_CLASS + value: 'zfs-snapshot-class' + + # If data compression is needed use value: 'on' else 'off' + # by default test-specific value is `off` + - name: COMPRESSION + value: 'off' + + # If data duplication is needed give value: 'on' else 'off' + # by default test-specific value is `off` + - name: DEDUP + value: 'off' + + # This env value will be used in storage classes templates in case of xfs and ext or btrfs file system, + # where we create a ZVOL a raw block device carved out of ZFS Pool. + # provide the blocksize with which you want to create the block devices. by default test-specific value + # will be `4k`. Supported values: Any power of 2 from 512 bytes to 128 Kbytes + - name: VOLBLOCKSIZE + value: '4k' + + # This env value will be used in storage classes templates in case of zfs file system + # provide recordsize which is the maximum block size for files and will be used to create ZFS datasets + # by default test-specific value will be `4k`. Supported values: Any power of 2 from 512 bytes to 128 Kbytes + - name: RECORDSIZE + value: '4k' + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./e2e-tests/experiments/zfs-localpv-provisioner/test.yml -i /etc/ansible/hosts -v; exit 0"] \ No newline at end of file diff --git a/e2e-tests/experiments/zfs-localpv-provisioner/snapshot-class.j2 b/e2e-tests/experiments/zfs-localpv-provisioner/snapshot-class.j2 new file mode 100644 index 0000000..62147ce --- /dev/null +++ b/e2e-tests/experiments/zfs-localpv-provisioner/snapshot-class.j2 @@ -0,0 +1,8 @@ +kind: VolumeSnapshotClass +apiVersion: snapshot.storage.k8s.io/v1beta1 +metadata: + name: {{ snapshot_class }} + annotations: + snapshot.storage.kubernetes.io/is-default-class: "true" +driver: zfs.csi.openebs.io +deletionPolicy: Delete \ No newline at end of file diff --git a/e2e-tests/experiments/zfs-localpv-provisioner/test.yml b/e2e-tests/experiments/zfs-localpv-provisioner/test.yml new file mode 100644 index 0000000..6f9e5d4 --- /dev/null +++ b/e2e-tests/experiments/zfs-localpv-provisioner/test.yml @@ -0,0 +1,110 @@ +--- +- hosts: localhost + connection: local + gather_facts: False + + vars_files: + - test_vars.yml + + tasks: + - block: + + ## Generating the testname for zfs localpv provisioner test + - include_tasks: /e2e-tests/hack/create_testname.yml + + ## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'SOT' + + - name: Create zpool on each desired worker node + include_tasks: /e2e-tests/experiments/zfs-localpv-provisioner/zpool_creation.yml + when: lookup('env','ZPOOL_CREATION') == 'true' + + - name: Download OpenEBS zfs-localpv operator file + get_url: + url: https://raw.githubusercontent.com/openebs/zfs-localpv/{{ zfs_branch }}/deploy/zfs-operator.yaml + dest: ./zfs_operator.yml + force: yes + register: result + until: "'OK' in result.msg" + delay: 5 + retries: 3 + + - name: Update the openebs zfs-driver image tag + replace: + path: ./zfs_operator.yml + regexp: openebs/zfs-driver:ci + replace: "{{ lookup('env','ZFS_DRIVER_IMAGE') }}" + when: lookup('env','ZFS_DRIVER_IMAGE') | length > 0 + + - name: Update the namespace where we want to create zfs-localpv driver resources + shell: > + sed -i -e "/name: OPENEBS_NAMESPACE/{n;s/value: openebs/value: {{ zfs_operator_ns }}/g}" zfs_operator.yml && + sed -z "s/kind: Namespace\nmetadata:\n name: openebs/kind: Namespace\nmetadata:\n name: {{ zfs_operator_ns }}/" -i zfs_operator.yml + args: + executable: /bin/bash + register: update_status + failed_when: "update_status.rc != 0" + when: "zfs_operator_ns != 'openebs'" + + - name: Apply the zfs_operator file to deploy zfs-driver components + shell: + kubectl apply -f ./zfs_operator.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Verify that the zfs-controller pod and zfs-node daemonset pods are running + shell: > + kubectl get pods -n kube-system -l role=openebs-zfs + --no-headers -o custom-columns=:status.phase | sort | uniq + args: + executable: /bin/bash + register: zfs_driver_components + until: "zfs_driver_components.stdout == 'Running'" + delay: 5 + retries: 30 + + - block: + + - name: Update the storage class template with test specific values. + template: + src: openebs-zfspv-sc.j2 + dest: openebs-zfspv-sc.yml + + - name: Create Storageclasses + shell: kubectl apply -f openebs-zfspv-sc.yml + args: + executable: /bin/bash + register: sc_result + failed_when: "sc_result.rc != 0" + + - name: Update volume snapshot class template with the test specific variables. + template: + src: snapshot-class.j2 + dest: snapshot-class.yml + + - name: Create VolumeSnapshotClass + shell: kubectl apply -f snapshot-class.yml + args: + executable: /bin/bash + register: volsc_result + failed_when: "volsc_result.rc != 0" + + when: "{{ lookup('env','STORAGE_CLASS_CREATION') }} == true" + + - set_fact: + flag: "Pass" + + rescue: + - name: Setting fail flag + set_fact: + flag: "Fail" + + always: + ## Record EOT (end of test) in e2e result e2e-cr (e2e-custom-resource) + - include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml + vars: + status: 'EOT' \ No newline at end of file diff --git a/e2e-tests/experiments/zfs-localpv-provisioner/test_vars.yml b/e2e-tests/experiments/zfs-localpv-provisioner/test_vars.yml new file mode 100644 index 0000000..3204dcb --- /dev/null +++ b/e2e-tests/experiments/zfs-localpv-provisioner/test_vars.yml @@ -0,0 +1,29 @@ +test_name: zfs-localpv-provisioner + +zfs_branch: "{{ lookup('env','ZFS_BRANCH') }}" + +zfs_driver_image: "{{ lookup('env','ZFS_DRIVER_IMAGE') }}" + +zfs_operator_ns: "{{ lookup('env','ZFS_OPERATOR_NAMESPACE') }}" + +zpool_name: "{{ lookup('env','ZPOOL_NAME') }}" + +zpool_encryption: "{{ lookup('env','ZPOOL_ENCRYPTION') }}" + +enc_pwd: "{{ lookup('env','ZPOOL_ENCRYPTION_PASSWORD') }}" + +zpool_type: "{{ lookup('env','ZPOOL_TYPE') }}" + +zpool_disks: "{{ lookup('env','ZPOOL_DISKS') }}" + +node_names: "{{ lookup('env','ZPOOL_NODE_NAMES') }}" + +snapshot_class: "{{ lookup('env','SNAPSHOT_CLASS') }}" + +compress: "{{ lookup('env','COMPRESSION') }}" + +de_dup: "{{ lookup('env','DEDUP') }}" + +record_size: "{{ lookup('env','RECORDSIZE') }}" + +vol_block_size: "{{ lookup('env','VOLBLOCKSIZE') }}" \ No newline at end of file diff --git a/e2e-tests/experiments/zfs-localpv-provisioner/zfs_utils_ds.yml b/e2e-tests/experiments/zfs-localpv-provisioner/zfs_utils_ds.yml new file mode 100644 index 0000000..3b4035a --- /dev/null +++ b/e2e-tests/experiments/zfs-localpv-provisioner/zfs_utils_ds.yml @@ -0,0 +1,68 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: e2e-zfspv-bin + namespace: e2e +data: + zfs: | + #!/bin/sh + if [ -x /host/sbin/zfs ]; then + chroot /host /sbin/zfs "$@" + elif [ -x /host/usr/sbin/zfs ]; then + chroot /host /usr/sbin/zfs "$@" + else + chroot /host zfs "$@" + fi + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: zpool-creation +spec: + selector: + matchLabels: + app: zfs-utils + template: + metadata: + labels: + app: zfs-utils + spec: + #nodeSelector: + #test: zfs-utils + containers: + - name: zfsutils + image: quay.io/w3aman/zfsutils:ci + imagePullPolicy: IfNotPresent + command: ['sh', '-c', 'echo Hello! && sleep 1800'] + volumeMounts: + - name: udev + mountPath: /run/udev + - name: device + mountPath: /dev + - name: chroot-zfs + mountPath: /sbin/zfs + subPath: zfs + - name: host-root + mountPath: /host + mountPropagation: "HostToContainer" + readOnly: true + securityContext: + privileged: true + tty: true + volumes: + - hostPath: + path: /run/udev + name: udev + - hostPath: + path: /dev + name: device + - name: chroot-zfs + configMap: + defaultMode: 0555 + name: e2e-zfspv-bin + - name: host-root + hostPath: + path: / + type: Directory \ No newline at end of file diff --git a/e2e-tests/experiments/zfs-localpv-provisioner/zpool_creation.yml b/e2e-tests/experiments/zfs-localpv-provisioner/zpool_creation.yml new file mode 100644 index 0000000..e4c344a --- /dev/null +++ b/e2e-tests/experiments/zfs-localpv-provisioner/zpool_creation.yml @@ -0,0 +1,130 @@ +--- +- block: + - name: Get the list of nodes from the value of env's for zpool creation + set_fact: + node_list: "{{ node_names.split(',') }}" + when: "node_names != ''" + + - name: Get the list of all those nodes which are in Ready state and having no taints in cluster + shell: > + kubectl get nodes -o json | jq -r 'try .items[] | select(.spec.taints|not) + | select(.status.conditions[].reason=="KubeletReady" and .status.conditions[].status=="True") + | .metadata.name' + register: schedulabel_nodes + when: "node_names == ''" + + # zpool creation command is `zpool create ` + # if it is striped pool then will be replace by empty because + # command for striped pool is `zpool create ` and for other + # type like mirror or raidz it will be replace by `zpool_type` env value. + - name: Record the pool type value from env's + set_fact: + zpool_type_val: "{% if zpool_type == '' or zpool_type == 'stripe' %}{% else %} '{{ zpool_type }}' {% endif %}" + + - block: + + - name: Label the nodes for privileged DaemonSet pods to schedule on it + shell: > + kubectl label node {{ item }} test=zfs-utils + args: + executable: /bin/bash + register: label_status + failed_when: "label_status.rc != 0" + with_items: "{{ node_list }}" + + - name: Update the DaemonSet yaml to use nodes label selector + shell: > + sed -i -e "s|#nodeSelector|nodeSelector|g" \ + -e "s|#test: zfs-utils|test: zfs-utils|g" /e2e-tests/experiments/zfs-localpv-provisioner/zfs_utils_ds.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + when: "node_names != ''" + + - name: Create a DaemonSet with privileged access for volume group creation on nodes + shell: > + kubectl apply -f /e2e-tests/experiments/zfs-localpv-provisioner/zfs_utils_ds.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Check if DaemonSet pods are in running state on all desired nodes + shell: > + kubectl get pods -n e2e -l app=zfs-utils + --no-headers -o custom-columns=:.status.phase | sort | uniq + args: + executable: /bin/bash + register: result + until: "result.stdout == 'Running'" + delay: 3 + retries: 40 + + - name: Get the list of DaemonSet pods + shell: > + kubectl get pods -n e2e -l app=zfs-utils --no-headers + -o custom-columns=:.metadata.name + args: + executable: /bin/bash + register: ds_pods_list + + - name: Create non-encrypted zpool on desired worker nodes + shell: > + kubectl exec -ti {{ item }} -- bash -c 'zpool create {{ zpool_name }} {{ zpool_type_val }} {{ zpool_disks }}' + args: + executable: /bin/bash + register: zpool_status + failed_when: "zpool_status.rc != 0" + with_items: "{{ ds_pods_list.stdout_lines }}" + when: zpool_encryption == 'off' or zpool_encryption == '' + + - name: Create encrypted zpool on desired worker nodes + shell: > + kubectl exec -ti {{ item }} -- bash -c "echo {{ enc_pwd }} | sudo -S su -c + 'zpool create -O encryption=on -O keyformat=passphrase -O keylocation=prompt {{ zpool_name }} {{ zpool_type_val }} {{ zpool_disks }}'" + args: + executable: /bin/bash + register: enc_zpool_status + failed_when: "enc_zpool_status.rc != 0" + with_items: "{{ ds_pods_list.stdout_lines }}" + when: "zpool_encryption == 'on'" + + always: + + # Here always block tasks will execute everytime irrespective of previous tasks result + # so here we will delete daemonset pods and remove label which were created on nodes. + # Here purpose for using `ignore_errors: true` is that if this test fails even before + # creating daemonset or labeling the node then deleting them will fail as they don't exist. + + - name: Delete the DaemonSet + shell: > + kubectl delete -f /e2e-tests/experiments/zfs-localpv-provisioner/zfs_utils_ds.yml + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + ignore_errors: true + + - name: Remove the label from nodes + shell: > + kubectl label node {{ item }} test- + args: + executable: /bin/bash + register: label_status + failed_when: "label_status.rc != 0" + with_items: "{{ node_list }}" + when: "node_names != ''" + ignore_errors: true + + - name: Remove the label from nodes + shell: > + kubectl label node {{ item }} test- + args: + executable: /bin/bash + register: label_status + failed_when: "label_status.rc != 0" + with_items: "{{ schedulabel_nodes.stdout_lines }}" + when: "node_names == ''" + ignore_errors: true \ No newline at end of file diff --git a/e2e-tests/hack/crds.yaml b/e2e-tests/hack/crds.yaml new file mode 100644 index 0000000..673273c --- /dev/null +++ b/e2e-tests/hack/crds.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + # name must match the spec fields below, and be in the form: . + name: e2eresults.e2e.io +spec: + # group name to use for REST API: /apis// + group: e2e.io + # version name to use for REST API: /apis// + version: v1alpha1 + # either Namespaced or Cluster + scope: Cluster + names: + # plural name to be used in the URL: /apis/// + plural: e2eresults + # singular name to be used as an alias on the CLI and for display + singular: e2eresult + # kind is normally the CamelCased singular type. Your resource manifests use this. + kind: e2eResult + # shortNames allow shorter string to match your resource name on the CLI + shortNames: + - e2er diff --git a/e2e-tests/hack/create_testname.yml b/e2e-tests/hack/create_testname.yml new file mode 100644 index 0000000..d64da97 --- /dev/null +++ b/e2e-tests/hack/create_testname.yml @@ -0,0 +1,11 @@ +--- +- block: + - name: Record test instance/run ID + set_fact: + run_id: "{{ lookup('env','RUN_ID') }}" + + - name: Construct testname appended with runID + set_fact: + test_name: "{{ test_name }}-{{ run_id }}" + + when: lookup('env','RUN_ID') diff --git a/e2e-tests/hack/e2e-result.j2 b/e2e-tests/hack/e2e-result.j2 new file mode 100644 index 0000000..2f26b47 --- /dev/null +++ b/e2e-tests/hack/e2e-result.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: e2e.io/v1alpha1 +kind: e2eResult +metadata: + # name of the e2e testcase + name: {{ test }} +spec: + testStatus: + phase: {{ phase }} + result: {{ verdict }} \ No newline at end of file diff --git a/e2e-tests/hack/rbac.yaml b/e2e-tests/hack/rbac.yaml new file mode 100644 index 0000000..5d1ee77 --- /dev/null +++ b/e2e-tests/hack/rbac.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: e2e +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: e2e + namespace: e2e + labels: + name: e2e +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: e2e + labels: + name: e2e +rules: +- apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: e2e + labels: + name: e2e +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: e2e +subjects: +- kind: ServiceAccount + name: e2e + namespace: e2e \ No newline at end of file diff --git a/e2e-tests/hack/update_e2e_result_resource.yml b/e2e-tests/hack/update_e2e_result_resource.yml new file mode 100644 index 0000000..c5fcabf --- /dev/null +++ b/e2e-tests/hack/update_e2e_result_resource.yml @@ -0,0 +1,45 @@ +--- +- block: + - name: Generate the e2e result CR to reflect SOT (Start of Test) + template: + src: e2e-result.j2 + dest: e2e-result.yaml + vars: + test: "{{ test_name }}" + phase: in-progress + verdict: none + + - name: Analyze the e2e cr yaml + shell: cat e2e-result.yaml + + - name: Apply the e2e result CR + k8s: + state: present + src: e2e-result.yaml + register: e2er_status + failed_when: "e2er_status is failed" + + when: status == "SOT" + +- block: + - name: Generate the e2e result CR to reflect EOT (End of Test) + template: + src: e2e-result.j2 + dest: e2e-result.yaml + vars: + test: "{{ test_name }}" + phase: completed + verdict: "{{ flag }}" + + - name: Analyze the e2e cr yaml + shell: cat e2e-result.yaml + + - name: Apply the e2e result CR + k8s: + state: present + src: e2e-result.yaml + merge_type: merge + register: e2er_status + failed_when: "e2er_status is failed" + + when: status == "EOT" \ No newline at end of file diff --git a/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml b/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml new file mode 100644 index 0000000..425b5da --- /dev/null +++ b/e2e-tests/utils/applications/busybox/busybox_data_persistence.yml @@ -0,0 +1,108 @@ +--- +- block: + + - name: Create some test data in the busybox app + shell: > + kubectl exec {{ pod_name }} -n {{ ns }} + -- sh -c "{{ item }}" + args: + executable: /bin/bash + register: result + failed_when: "result.rc != 0" + with_items: + - "dd if=/dev/urandom of=/busybox/{{ testfile }} bs={{ blocksize }} count={{ blockcount }}" + - "md5sum /busybox/{{ testfile }} > /busybox/{{ testfile }}-pre-chaos-md5" + - "sync;sync;sync" + + when: status == "LOAD" + +- block: + + - name: Kill the application pod + shell: > + kubectl delete pod {{ pod_name }} -n {{ ns }} + args: + executable: /bin/bash + + - name: Verify if the application pod is deleted + shell: > + kubectl get pods -n {{ ns }} + args: + executable: /bin/bash + register: podstatus + until: '"{{ pod_name }}" not in podstatus.stdout' + retries: 2 + delay: 150 + + - name: Obtain the newly created pod name for application + shell: > + kubectl get pods -n {{ ns }} -l {{ label }} -o jsonpath='{.items[].metadata.name}' + args: + executable: /bin/bash + register: newpod_name + + - name: Checking application pod is in running state + shell: kubectl get pods -n {{ ns }} -o jsonpath='{.items[?(@.metadata.name=="{{ newpod_name.stdout }}")].status.phase}' + register: result + until: "((result.stdout.split()|unique)|length) == 1 and 'Running' in result.stdout" + delay: 2 + retries: 150 + + - name: Get the container status of application. + shell: > + kubectl get pods -n {{ ns }} -o jsonpath='{.items[?(@.metadata.name=="{{ newpod_name.stdout }}")].status.containerStatuses[].state}' | grep running + args: + executable: /bin/bash + register: containerStatus + until: "'running' in containerStatus.stdout" + delay: 2 + retries: 150 + + - name: Check the md5sum of stored data file + shell: > + kubectl exec {{ newpod_name.stdout }} -n {{ ns }} + -- sh -c "md5sum /busybox/{{ testfile }} > /busybox/{{ testfile }}-post-chaos-md5" + args: + executable: /bin/bash + register: status + failed_when: "status.rc != 0" + + - name: Verify whether data is consistent + shell: > + kubectl exec {{ newpod_name.stdout }} -n {{ ns }} + -- sh -c "diff /busybox/{{ testfile }}-pre-chaos-md5 /busybox/{{ testfile }}-post-chaos-md5" + args: + executable: /bin/bash + register: result + failed_when: "result.rc != 0 or result.stdout != ''" + + when: status == "VERIFY" + +- block: + + - name: Obtain the current pod name for application + shell: > + kubectl get pods -n {{ ns }} -l {{ label }} -o jsonpath='{.items[].metadata.name}' + args: + executable: /bin/bash + register: newpod_name + + - name: Delete/drop the files + shell: > + kubectl exec {{ newpod_name.stdout }} -n {{ ns }} + -- sh -c "rm -f /busybox/{{ testfile }}*" + args: + executable: /bin/bash + register: status + + - name: Verify successful file delete + shell: > + kubectl exec {{ newpod_name.stdout }} -n {{ ns }} + -- ls /busybox/ + args: + executable: /bin/bash + register: result + failed_when: "testfile in result.stdout" + + when: status == "DELETE" + diff --git a/e2e-tests/utils/applications/mysql/check_db_connection.yml b/e2e-tests/utils/applications/mysql/check_db_connection.yml new file mode 100644 index 0000000..dbd3fd5 --- /dev/null +++ b/e2e-tests/utils/applications/mysql/check_db_connection.yml @@ -0,0 +1,8 @@ +#Check if the database is ready for connection, upper bound wait time: 900s +- name: Check if db is ready for connections + shell: kubectl logs {{ pod_name.resources.0.metadata.name }} -n {{ app_ns }} | grep 'ready for connections' | wc -l + register: initcheck + until: initcheck.stdout == "2" + delay: 5 + retries: 180 + diff --git a/e2e-tests/utils/applications/mysql/mysql_data_persistence.yml b/e2e-tests/utils/applications/mysql/mysql_data_persistence.yml new file mode 100644 index 0000000..50ffa99 --- /dev/null +++ b/e2e-tests/utils/applications/mysql/mysql_data_persistence.yml @@ -0,0 +1,108 @@ +--- +- block: + + - name: Create some test data in the mysql database + shell: > + kubectl exec {{ pod_name }} -n {{ ns }} + -- {{ item }} + args: + executable: /bin/bash + register: result + failed_when: "result.rc != 0" + with_items: + - mysql -u{{ dbuser }} -p{{ dbpassword }} -e 'create database {{ dbname }};' + - mysql -u{{ dbuser }} -p{{ dbpassword }} -e 'create table ttbl (Data VARCHAR(20));' {{ dbname }} + - mysql -u{{ dbuser }} -p{{ dbpassword }} -e 'insert into ttbl (Data) VALUES ("tdata");' {{ dbname }} + + when: status == "LOAD" + +- block: + + - name: Kill the application pod + shell: > + kubectl delete pod {{ pod_name }} -n {{ ns }} + args: + executable: /bin/bash + + - name: Verify if the application pod is deleted + shell: > + kubectl get pods -n {{ ns }} + args: + executable: /bin/bash + register: podstatus + until: '"{{ pod_name }}" not in podstatus.stdout' + retries: 2 + delay: 150 + + - name: Obtain the newly created pod name for application + shell: > + kubectl get pods -n {{ ns }} -l {{ label }} -o jsonpath='{.items[].metadata.name}' + args: + executable: /bin/bash + register: newpod_name + + - name: Checking application pod is in running state + shell: kubectl get pods -n {{ ns }} -o jsonpath='{.items[?(@.metadata.name=="{{ newpod_name.stdout }}")].status.phase}' + register: result + until: "((result.stdout.split()|unique)|length) == 1 and 'Running' in result.stdout" + delay: 2 + retries: 150 + + - name: Get the container status of application. + shell: > + kubectl get pods -n {{ ns }} -o jsonpath='{.items[?(@.metadata.name=="{{ newpod_name.stdout }}")].status.containerStatuses[].state}' | grep running + args: + executable: /bin/bash + register: containerStatus + until: "'running' in containerStatus.stdout" + delay: 2 + retries: 150 + + - name: Check if db is ready for connections + shell: kubectl logs {{ newpod_name.stdout }} -n {{ ns }} | grep 'ready for connections' + register: initcheck + until: "'ready for connections' in initcheck.stdout" + delay: 5 + retries: 180 + + - name: Checking for the Corrupted tables + shell: > + kubectl exec {{ newpod_name.stdout }} -n {{ ns }} + -- mysqlcheck -c {{ dbname }} -u{{ dbuser }} -p{{ dbpassword }} + args: + executable: /bin/bash + register: status + failed_when: "'OK' not in status.stdout" + + - name: Verify mysql data persistence + shell: > + kubectl exec {{ newpod_name.stdout }} -n {{ ns }} + -- mysql -u{{ dbuser }} -p{{ dbpassword }} -e 'select * from ttbl' {{ dbname }}; + args: + executable: /bin/bash + register: result + failed_when: "'tdata' not in result.stdout" + + when: status == "VERIFY" + +- block: + + - name: Delete/drop MySQL database + shell: > + kubectl exec {{ pod_name }} -n {{ ns }} + -- mysql -u{{ dbuser }} -p{{ dbpassword }} -e 'drop database {{ dbname }}'; + args: + executable: /bin/bash + register: status + + - name: Verify successful db delete + shell: > + kubectl exec {{ pod_name }} -n {{ ns }} + -- mysql -u{{ dbuser }} -p{{ dbpassword }} -e 'show databases'; + args: + executable: /bin/bash + register: result + failed_when: "dbname in result.stdout" + + when: status == "DELETE" + diff --git a/e2e-tests/utils/k8s/create_ns.yml b/e2e-tests/utils/k8s/create_ns.yml new file mode 100644 index 0000000..4b2fc8d --- /dev/null +++ b/e2e-tests/utils/k8s/create_ns.yml @@ -0,0 +1,16 @@ +--- +- name: Obtain list of existing namespaces + shell: > + kubectl get ns --no-headers -o custom-columns=:metadata.name + args: + executable: /bin/bash + register: ns_list + +- name: Create test specific namespace. + shell: kubectl create ns {{ app_ns }} + args: + executable: /bin/bash + when: app_ns != 'e2e' and app_ns not in ns_list.stdout_lines + + # Check status of namespace +- include_tasks: /e2e-tests/utils/k8s/status_testns.yml \ No newline at end of file diff --git a/e2e-tests/utils/k8s/deploy_single_app.yml b/e2e-tests/utils/k8s/deploy_single_app.yml new file mode 100644 index 0000000..6f8ae92 --- /dev/null +++ b/e2e-tests/utils/k8s/deploy_single_app.yml @@ -0,0 +1,12 @@ +--- +#Deploying application on k8's cluster and cross checking whether the +#application is deployed successfully. +- name: Deploying {{ application_name }} + k8s: + state: present + src: "{{ application }}" + namespace: "{{ app_ns }}" + merge_type: merge + register: result + +- include_tasks: /e2e-tests/utils/k8s/status_app_pod.yml diff --git a/e2e-tests/utils/k8s/deprovision_deployment.yml b/e2e-tests/utils/k8s/deprovision_deployment.yml new file mode 100644 index 0000000..731c499 --- /dev/null +++ b/e2e-tests/utils/k8s/deprovision_deployment.yml @@ -0,0 +1,106 @@ +--- +# This Utility task file can delete the application and its underlying resources such as pvc and service from K8s cluster +# This accepts application namespace, application label and application manifest file as input parameters. +# The parameters used are +# - app_deployer ( Deployment spec yaml file ) +# - app_ns ( application namespace ) +# - app_label ( application label) +# +- block: + + - name: Check if the application to be deleted is available. + k8s_facts: + kind: Pod + label_selectors: + - "{{ app_label }}" + namespace: "{{ app_ns }}" + register: po_name + until: "{{ po_name | json_query('resources[*].status.phase') | unique | length==1}}" + delay: 5 + retries: 60 + + - name: Obtaining the PVC name using application label. + set_fact: + pvc_name: "{{ po_name.resources.0.spec.volumes.0.persistentVolumeClaim.claimName }}" + pod_name: "{{ po_name.resources.0.metadata.name }}" + + - name: Obtaining the PV name from PVC name. + k8s_facts: + kind: PersistentVolumeClaim + namespace: "{{ app_ns }}" + name: "{{ pvc_name }}" + register: pv_name + + - set_fact: + pvname: "{{ pv_name | json_query('resources[0].spec.volumeName') }}" + + ## Replacing the item names in the respective deployer spec file. + - name: Replace the PVC name in application deployer spec. + replace: + path: "{{ app_deployer }}" + regexp: "testclaim" + replace: "{{ lookup('env','APP_PVC') }}" + when: app_pvc is defined + + - name: Replace the storageclass placeholder with provider + replace: + path: "{{ app_deployer }}" + regexp: "testclass" + replace: "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}" + when: storage_class is defined + + - block: + + - name: Get the application label values from env + set_fact: + app_lkey: "{{ app_label.split('=')[0] }}" + app_lvalue: "{{ app_label.split('=')[1] }}" + + - name: Replace the application label placeholder + replace: + path: "{{ app_deployer }}" + regexp: "lkey: lvalue" + replace: "{{ app_lkey }}: {{ app_lvalue }}" + when: app_label is defined + + - name: Delete the application deployment. + shell: kubectl delete -f {{ app_deployer }} -n {{ app_ns }} + args: + executable: /bin/bash + ignore_errors: true + + - name: Check if the PVC is deleted. + k8s_facts: + kind: PersistentVolumeClaim + namespace: "{{ app_ns }}" + label_selectors: + - "{{ app_label }}" + register: resource_list + until: resource_list.resources | length < 1 + delay: 5 + retries: 120 + +- name: Check if the pods are deleted in the namespaces + shell: > + kubectl get pods -n {{ app_ns }} + args: + executable: /bin/bash + register: result + until: "pod_name not in result.stdout" + delay: 5 + retries: 60 + +- name: Delete the namespace. + k8s: + state: absent + kind: Namespace + name: "{{ app_ns }}" + +- name: Check if the PV is deleted. + k8s_facts: + kind: PersistentVolume + name: "{{ pvname }}" + label_selectors: + - "{{ app_label }}" + register: pv_result + failed_when: "pv_result.resources | length > 1" \ No newline at end of file diff --git a/e2e-tests/utils/k8s/deprovision_statefulset.yml b/e2e-tests/utils/k8s/deprovision_statefulset.yml new file mode 100644 index 0000000..b1e0b4b --- /dev/null +++ b/e2e-tests/utils/k8s/deprovision_statefulset.yml @@ -0,0 +1,72 @@ +--- +- block: + + - name: Check if the statefulset application exists. + shell: kubectl get pods -n {{ app_ns }} -l {{ app_label }} + register: pods + failed_when: "'No resources found' in pods.stdout" + + - name: Obtaining PVCs related to the application. + shell: kubectl get pvc -n {{ app_ns }} -l {{ app_label }} --no-headers -o custom-columns=:.metadata.name + register: pvc_list + + - name: Obtaining the PV names. + shell: kubectl get pvc -l {{ app_label }} -n {{ app_ns }} --no-headers -o custom-columns=:.spec.volumeName + register: pv_list + + ## Replacing the item names in the respective deployer spec file. + - name: Replace the PVC name in application deployer spec. + replace: + path: "{{ app_deployer }}" + regexp: "testclaim" + replace: "{{ lookup('env','APP_PVC') }}" + when: app_pvc is defined + + - name: Replace the storageclass placeholder with provider + replace: + path: "{{ app_deployer }}" + regexp: "testclass" + replace: "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}" + when: storage_class is defined + + - block: + + - name: Get the application label values from env + set_fact: + app_lkey: "{{ app_label.split('=')[0] }}" + app_lvalue: "{{ app_label.split('=')[1] }}" + + - name: Replace the application label placeholder + replace: + path: "{{ app_deployer }}" + regexp: "lkey: lvalue" + replace: "{{ app_lkey }}: {{ app_lvalue }}" + + when: app_label is defined + + - name: Delete the application and its related service. + shell: kubectl delete -f {{ app_deployer }} -n {{ app_ns }} + register: app_status + until: 'app_status.rc == 0' + delay: 5 + retries: 60 + + - name: Deleting the PVC + shell: kubectl delete pvc {{ item }} -n {{ app_ns }} + args: + executable: /bin/bash + with_items: + - "{{ pvc_list.stdout_lines }}" + + - name: Check if the PVCs are deleted + shell: kubectl get pvc -n {{ app_ns }} + register: list_pvc + until: "'No resources found' in list_pvc.stderr" + delay: 30 + retries: 15 + +- name: Delete the namespace. + shell: kubectl delete ns {{ app_ns }} + args: + executable: /bin/bash + diff --git a/e2e-tests/utils/k8s/fetch_app_pod.yml b/e2e-tests/utils/k8s/fetch_app_pod.yml new file mode 100644 index 0000000..c43878b --- /dev/null +++ b/e2e-tests/utils/k8s/fetch_app_pod.yml @@ -0,0 +1,13 @@ +--- +#Fetching the details of the application pod +- name: Getting the {{ application_name }} POD name + k8s_facts: + kind: Pod + namespace: "{{ app_ns }}" + label_selectors: + - "{{ app_label }}" + register: pod_name + +- debug: + msg: "{{ pod_name | json_query('resources[*].metadata.name') }}" + diff --git a/e2e-tests/utils/k8s/pre_create_app_deploy.yml b/e2e-tests/utils/k8s/pre_create_app_deploy.yml new file mode 100644 index 0000000..d9ddfe8 --- /dev/null +++ b/e2e-tests/utils/k8s/pre_create_app_deploy.yml @@ -0,0 +1,40 @@ +--- +- block: + - name: Check whether the provider storageclass is present + shell: kubectl get sc "{{ lookup('env','STORAGE_CLASS') }}" + args: + executable: /bin/bash + register: result + failed_when: "result.rc != 0" + + - name: Replace the storageclass placeholder with test specific value + replace: + path: "{{ application }}" + regexp: "testclass" + replace: "{{ lookup('env','STORAGE_CLASS') }}" + + - name: Replace the application pvc placeholder with test specific value + replace: + path: "{{ application }}" + regexp: "testclaim" + replace: "{{ lookup('env','APP_PVC') }}" + + - name: Replace the persistent volume capcity placeholder with test specific value + replace: + path: "{{ application }}" + regexp: "teststorage" + replace: "{{ lookup('env','PV_CAPACITY') }}" + + - name: Get the application label value from env + set_fact: + app_lkey: "{{ app_label.split('=')[0] }}" + app_lvalue: "{{ app_label.split('=')[1] }}" + + - name: Replace the application label placeholder in deployment spec + replace: + path: "{{ application }}" + regexp: "lkey: lvalue" + replace: "{{ app_lkey }}: {{ app_lvalue }}" + + # Create test specific namespace + - include_tasks: /e2e-tests/utils/k8s/create_ns.yml diff --git a/e2e-tests/utils/k8s/status_app_pod.yml b/e2e-tests/utils/k8s/status_app_pod.yml new file mode 100644 index 0000000..3f08752 --- /dev/null +++ b/e2e-tests/utils/k8s/status_app_pod.yml @@ -0,0 +1,18 @@ +--- +- name: Checking {{ application_name }} pod is in running state + shell: kubectl get pods -n {{ app_ns }} -o jsonpath='{.items[?(@.metadata.labels.{{app_lkey}}=="{{app_lvalue}}")].status.phase}' + register: result + until: "((result.stdout.split()|unique)|length) == 1 and 'Running' in result.stdout" + delay: 3 + retries: 60 + +- name: Get the container status of application. + shell: > + kubectl get pod -n {{ app_ns }} -l {{app_lkey}}="{{app_lvalue}}" + -o custom-columns=:..containerStatuses[].state --no-headers | grep -w "running" + args: + executable: /bin/bash + register: containerStatus + until: "'running' in containerStatus.stdout" + delay: 3 + retries: 60 diff --git a/e2e-tests/utils/k8s/status_testns.yml b/e2e-tests/utils/k8s/status_testns.yml new file mode 100644 index 0000000..68b20d5 --- /dev/null +++ b/e2e-tests/utils/k8s/status_testns.yml @@ -0,0 +1,9 @@ +--- +- name: Checking the status of test specific namespace. + k8s_facts: + kind: Namespace + name: "{{ app_ns }}" + register: npstatus + until: "'Active' in npstatus.resources.0.status.phase" + delay: 30 + retries: 10