mirror of
https://github.com/TECHNOFAB11/zfs-localpv.git
synced 2025-12-11 22:10:11 +01:00
feat(e2e-test): Add e2e-tests for zfs-localpv (#298)
Signed-off-by: w3aman <aman.gupta@mayadata.io>
This commit is contained in:
parent
53f872fcf1
commit
4e73638b5a
137 changed files with 8745 additions and 0 deletions
132
e2e-tests/chaoslib/pumba/pod_failure_by_sigkill.yaml
Normal file
132
e2e-tests/chaoslib/pumba/pod_failure_by_sigkill.yaml
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
---
|
||||
- block:
|
||||
|
||||
- name: Setup pumba chaos infrastructure
|
||||
shell: >
|
||||
kubectl apply -f /e2e-tests/chaoslib/pumba/pumba.yml -n {{ namespace }}
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: result
|
||||
|
||||
- name: Confirm that the pumba ds is running on all desired nodes
|
||||
shell: >
|
||||
kubectl get pod -l app=pumba --no-headers -o custom-columns=:status.phase
|
||||
-n {{ namespace }} | sort | uniq
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: result
|
||||
until: "result.stdout == 'Running'"
|
||||
delay: 1
|
||||
retries: 60
|
||||
ignore_errors: true
|
||||
|
||||
- name: Get the application pod name
|
||||
shell: >
|
||||
kubectl get pod -l {{ label }} -n {{ namespace }}
|
||||
-o=custom-columns=NAME:".metadata.name" --no-headers | shuf | head -1
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: pod_name
|
||||
|
||||
- name: Record application pod name
|
||||
set_fact:
|
||||
app_pod: "{{ pod_name.stdout }}"
|
||||
|
||||
- name: Identify the node name where application pod is scheduled
|
||||
shell: >
|
||||
kubectl get pod {{ app_pod }} -n {{ namespace }}
|
||||
--no-headers -o custom-columns=:spec.nodeName
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: result
|
||||
|
||||
- name: Record the node name
|
||||
set_fact:
|
||||
app_node: "{{ result.stdout }}"
|
||||
|
||||
- name: Get application container name
|
||||
shell: >
|
||||
kubectl get pods -l {{ label }} -n {{ namespace }}
|
||||
-o jsonpath='{.items[0].spec.containers[0].name}'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: container
|
||||
|
||||
- name: Record the app_container
|
||||
set_fact:
|
||||
app_container: "{{ container.stdout }}"
|
||||
|
||||
- name: Record the pumba pod scheduled on same node as of application pod
|
||||
shell: >
|
||||
kubectl get pod -l app=pumba -o wide -n {{ namespace }}
|
||||
| grep {{ app_node }} | awk '{print $1}'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: pumba_pod
|
||||
|
||||
- name: Record container restartCount
|
||||
shell: >
|
||||
kubectl get pod {{ app_pod }} -n {{ namespace }}
|
||||
-o=jsonpath='{.status.containerStatuses[?(@.name=="{{ app_container }}")].restartCount}'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: restartCnt_prev
|
||||
|
||||
- name: Force kill the application pod container using pumba
|
||||
shell: >
|
||||
kubectl exec {{ pumba_pod.stdout}} -n {{ namespace }}
|
||||
-- pumba kill --signal SIGKILL re2:k8s_{{ app_container }}_{{ app_pod }};
|
||||
args:
|
||||
executable: /bin/bash
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- name: Verify container restartCount
|
||||
shell: >
|
||||
kubectl get pod {{ app_pod }} -n {{ namespace }}
|
||||
-o=jsonpath='{.status.containerStatuses[?(@.name=="{{ app_container }}")].restartCount}'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: restartCnt
|
||||
until: "restartCnt.stdout|int > restartCnt_prev.stdout|int"
|
||||
delay: 2
|
||||
retries: 30
|
||||
|
||||
when: action == "killapp"
|
||||
|
||||
- block:
|
||||
|
||||
- name: Check if pumba pod is indeed running
|
||||
shell: >
|
||||
kubectl get pod -l app=pumba --no-headers -o custom-columns=:status.phase
|
||||
-n {{ namespace }} | sort | uniq
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: result
|
||||
until: "result.stdout == 'Running'"
|
||||
delay: 1
|
||||
retries: 60
|
||||
ignore_errors: true
|
||||
|
||||
- block:
|
||||
|
||||
- name: Delete the pumba daemonset
|
||||
shell: >
|
||||
kubectl delete -f /e2e-tests/chaoslib/pumba/pumba.yml -n {{ namespace }}
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: result
|
||||
|
||||
- name: Confirm that the pumba ds is deleted successfully
|
||||
shell: >
|
||||
kubectl get pod -l app=pumba --no-headers -n {{ namespace }}
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: result
|
||||
until: "'Running' not in result.stdout"
|
||||
delay: 1
|
||||
retries: 150
|
||||
|
||||
when: result.stdout is defined and result.stdout == "Running"
|
||||
|
||||
when: action == "deletepumba"
|
||||
37
e2e-tests/chaoslib/pumba/pumba.yml
Normal file
37
e2e-tests/chaoslib/pumba/pumba.yml
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: pumba
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: pumba
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: pumba
|
||||
com.gaiaadm.pumba: "true" # prevent pumba from killing itself
|
||||
name: pumba
|
||||
spec:
|
||||
containers:
|
||||
- image: gaiaadm/pumba:0.4.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: pumba
|
||||
# Pumba command: modify it to suite your needs
|
||||
# Dry run: Randomly try to kill some container every 3 minutes
|
||||
command: ["pumba", "--dry", "--random", "--interval", "3m", "kill", "--signal", "SIGTERM"]
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 5M
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 20M
|
||||
volumeMounts:
|
||||
- name: dockersocket
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/run/docker.sock
|
||||
name: dockersocket
|
||||
142
e2e-tests/chaoslib/service_failure/service_chaos.yml
Normal file
142
e2e-tests/chaoslib/service_failure/service_chaos.yml
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
- block:
|
||||
|
||||
- name: Identify the node on which application pod is scheduled
|
||||
shell: >
|
||||
kubectl get pod {{ app_pod }} -n {{ app_ns }}
|
||||
--no-headers -o custom-columns=:spec.nodeName
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: node_name
|
||||
|
||||
- name: Record the node name on which application pod is scheduled
|
||||
set_fact:
|
||||
app_node: "{{ node_name.stdout }}"
|
||||
|
||||
- name: Get the IP Address of the node on which application pod is scheduled
|
||||
shell: >
|
||||
kubectl get nodes {{ app_node }} --no-headers -o jsonpath='{.status.addresses[0].address}'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: node_ip_address
|
||||
|
||||
- name: Record the IP Address of the node on which application pod is scheduled
|
||||
set_fact:
|
||||
node_ip_add: "{{ node_ip_address.stdout }}"
|
||||
|
||||
- block:
|
||||
|
||||
- name: stop the {{ svc_type }} service on node where application pod is scheduled
|
||||
shell: >
|
||||
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
|
||||
"echo {{ node_pwd }} | sudo -S su -c 'systemctl stop {{ svc_type }}.service'"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
- name: Check for the {{ svc_type }} service status
|
||||
shell: >
|
||||
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
|
||||
"echo {{ node_pwd }} | sudo -S su -c 'systemctl status {{ svc_type }}.service'" | grep 'inactive'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: svc_status
|
||||
until: "'inactive' in svc_status.stdout"
|
||||
delay: 5
|
||||
retries: 15
|
||||
|
||||
- name: Check the node {{ app_node }} status on which {{ svc_type }} failure chaos is induced
|
||||
shell:
|
||||
kubectl get nodes {{ app_node }}
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: node_status
|
||||
until: "'NotReady' in node_status.stdout"
|
||||
delay: 10
|
||||
retries: 30
|
||||
|
||||
- name: Check if the new application pod is scheduled after {{ svc_type }} failure
|
||||
shell: >
|
||||
kubectl get pods -n {{ app_ns }} -l {{ app_label }} --no-headers | wc -l
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: app_pod_count
|
||||
until: "'2' in app_pod_count.stdout"
|
||||
delay: 15
|
||||
retries: 30
|
||||
|
||||
- name: Get the new application pod name
|
||||
shell: >
|
||||
kubectl get pod -n {{ app_ns }} -l {{ app_label }} --no-headers | grep -v Terminating | awk '{print $1}'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: new_app_pod_name
|
||||
|
||||
- name: Record the new application pod name
|
||||
set_fact:
|
||||
new_app_pod: "{{ new_app_pod_name.stdout }}"
|
||||
|
||||
- name: Check for the newly created application pod status
|
||||
shell: >
|
||||
kubectl get pod {{ new_app_pod }} -n {{ app_ns }} --no-headers -o custom-columns=:.status.phase
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: new_app_pod_status
|
||||
until: "'Pending' in new_app_pod_status.stdout"
|
||||
delay: 5
|
||||
retries: 20
|
||||
|
||||
when: svc_type=="kubelet" or svc_type=="docker"
|
||||
|
||||
when: action == "svc_stop"
|
||||
|
||||
|
||||
- block:
|
||||
|
||||
- name: Start the {{ svc_type }} services
|
||||
shell: >
|
||||
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
|
||||
"echo {{ node_pwd }} | sudo -S su -c 'systemctl start {{ svc_type }}.service'"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
- name: Check for the {{ svc_type }} services status
|
||||
shell: >
|
||||
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
|
||||
"echo {{ node_pwd }} | sudo -S su -c 'systemctl status {{ svc_type }}.service'" | grep 'active (running)'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: svc_status
|
||||
until: "'active (running)' in svc_status.stdout"
|
||||
delay: 5
|
||||
retries: 15
|
||||
|
||||
- name: Check for the node status after starting {{ svc_type }} service
|
||||
shell: >
|
||||
kubectl get nodes {{ app_node }} --no-headers
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: node_status
|
||||
until: "'NotReady' not in node_status.stdout"
|
||||
delay: 10
|
||||
retries: 30
|
||||
|
||||
- name: Verify that previous pods are deleted successfully after restart of {{ svc_type }}
|
||||
shell: >
|
||||
kubectl get pods -n {{ app_ns }} -l {{ app_label }} --no-headers | wc -l
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: app_pod_count
|
||||
until: "'1' in app_pod_count.stdout"
|
||||
delay: 5
|
||||
retries: 60
|
||||
|
||||
- name: Get the status of newly created application pod
|
||||
shell: >
|
||||
kubectl get pod {{ new_app_pod }} -n {{ app_ns }} --no-headers -o custom-columns=:.status.phase
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: application_pod
|
||||
until: "'Running' in application_pod.stdout"
|
||||
delay: 10
|
||||
retries: 50
|
||||
|
||||
when: action == "svc_start"
|
||||
20
e2e-tests/chaoslib/vmware_chaos/vm_power_operations.yml
Normal file
20
e2e-tests/chaoslib/vmware_chaos/vm_power_operations.yml
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
# This util can handle 'power on' and 'power off' operations on VMware based virtual machines.
|
||||
# The parameters required by this util are
|
||||
# - ESX IP
|
||||
# - ESX root Password
|
||||
# - The target virtual machine name
|
||||
# - Operation, either 'on' or 'off'
|
||||
#
|
||||
- name: Obtain the VM ID
|
||||
shell: sshpass -p {{ esx_pwd }} ssh -o StrictHostKeyChecking=no root@{{ esx_ip }} vim-cmd vmsvc/getallvms | awk '{print $1 " " $2}' | grep {{ target_node }} | awk '{print $1}'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: id
|
||||
|
||||
- name: Perform operation on the target vm
|
||||
shell: sshpass -p {{ esx_pwd }} ssh -o StrictHostKeyChecking=no root@{{ esx_ip }} vim-cmd vmsvc/power.{{operation}} {{ id.stdout }}
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: result
|
||||
failed_when: "result.rc != 0"
|
||||
Loading…
Add table
Add a link
Reference in a new issue