mirror of
https://github.com/TECHNOFAB11/zfs-localpv.git
synced 2025-12-12 14:30:12 +01:00
254 lines
No EOL
8.8 KiB
YAML
254 lines
No EOL
8.8 KiB
YAML
---
|
|
- hosts: localhost
|
|
connection: local
|
|
gather_facts: False
|
|
|
|
vars_files:
|
|
- test_vars.yml
|
|
- /mnt/parameters.yml
|
|
|
|
tasks:
|
|
|
|
- block:
|
|
|
|
## Generating the testname for node failure chaos test
|
|
- include_tasks: /e2e-tests/hack/create_testname.yml
|
|
|
|
## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource)
|
|
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
|
|
vars:
|
|
status: 'SOT'
|
|
|
|
- name: Identify the data consistency util to be invoked
|
|
template:
|
|
src: data_persistence.j2
|
|
dest: data_persistence.yml
|
|
|
|
- include_vars:
|
|
file: data_persistence.yml
|
|
|
|
- name: Record the data consistency util path
|
|
set_fact:
|
|
data_consistency_util_path: "{{ consistencyutil }}"
|
|
when: data_persistence != ''
|
|
|
|
- name: Get application pod name
|
|
shell: >
|
|
kubectl get pod -n {{ namespace }} -l {{ label }} --no-headers
|
|
-o=custom-columns=NAME:".metadata.name"
|
|
args:
|
|
executable: /bin/bash
|
|
register: app_pod_name
|
|
|
|
- name: Record the application pod name
|
|
set_fact:
|
|
application_pod: "{{ app_pod_name.stdout }}"
|
|
|
|
- name: Obtain PVC name from the application mount
|
|
shell: >
|
|
kubectl get pods "{{ app_pod_name.stdout }}" -n "{{ namespace }}"
|
|
-o custom-columns=:.spec.volumes[*].persistentVolumeClaim.claimName --no-headers
|
|
args:
|
|
executable: /bin/bash
|
|
register: pvc
|
|
|
|
- name: Obtain the Persistent Volume name
|
|
shell: >
|
|
kubectl get pvc "{{ pvc.stdout }}" -n "{{ namespace }}" --no-headers
|
|
-o custom-columns=:.spec.volumeName
|
|
args:
|
|
executable: /bin/bash
|
|
register: pv
|
|
failed_when: 'pv.stdout == ""'
|
|
|
|
- name: Record the pv name
|
|
set_fact:
|
|
pv_name: "{{ pv.stdout }}"
|
|
|
|
## Generate dummy test data on the application
|
|
- name: Generate data on the specified application.
|
|
include: "{{ data_consistency_util_path }}"
|
|
vars:
|
|
status: 'LOAD'
|
|
ns: "{{ namespace }}"
|
|
pod_name: "{{ app_pod_name.stdout }}"
|
|
when: data_persistence != ''
|
|
|
|
## Obtain the node name where application pod is running
|
|
- name: Get Application pod Node to perform chaos
|
|
shell: >
|
|
kubectl get pod {{ app_pod_name.stdout }} -n {{ namespace }}
|
|
--no-headers -o custom-columns=:spec.nodeName
|
|
args:
|
|
executable: /bin/bash
|
|
register: app_node
|
|
|
|
- name: Record the application pod node name
|
|
set_fact:
|
|
app_node_name: "{{ app_node.stdout }}"
|
|
|
|
## Execute the chaos util to turn off the target node
|
|
- include_tasks: "/e2e-tests/chaoslib/vmware_chaos/vm_power_operations.yml"
|
|
vars:
|
|
esx_ip: "{{ host_ip }}"
|
|
target_node: "{{ app_node.stdout }}"
|
|
operation: "off"
|
|
|
|
- name: Check the node status
|
|
shell: kubectl get nodes {{ app_node.stdout }} --no-headers
|
|
args:
|
|
executable: /bin/bash
|
|
register: state
|
|
until: "'NotReady' in state.stdout"
|
|
delay: 15
|
|
retries: 30
|
|
|
|
- name: Check if the new application pod is scheduled after node failure
|
|
shell: >
|
|
kubectl get pods -n {{ namespace }} -l {{ label }} --no-headers | wc -l
|
|
args:
|
|
executable: /bin/bash
|
|
register: app_pod_count
|
|
until: "'2' in app_pod_count.stdout"
|
|
delay: 15
|
|
retries: 30
|
|
|
|
- name: Get the new application pod name
|
|
shell: >
|
|
kubectl get pod -n {{ namespace }} -l {{ label }} --no-headers | grep -v Terminating | awk '{print $1}'
|
|
args:
|
|
executable: /bin/bash
|
|
register: new_app_pod_name
|
|
|
|
- name: Record the new application pod name
|
|
set_fact:
|
|
new_app_pod: "{{ new_app_pod_name.stdout }}"
|
|
|
|
- name: Check for the newly created application pod status
|
|
shell: >
|
|
kubectl get pod {{ new_app_pod }} -n {{ namespace }} --no-headers -o custom-columns=:.status.phase
|
|
args:
|
|
executable: /bin/bash
|
|
register: new_app_pod_status
|
|
failed_when: "'Pending' not in new_app_pod_status.stdout"
|
|
|
|
- include_tasks: "/e2e-tests/chaoslib/vmware_chaos/vm_power_operations.yml"
|
|
vars:
|
|
esx_ip: "{{ host_ip }}"
|
|
target_node: "{{ app_node_name }}"
|
|
operation: "on"
|
|
|
|
- name: Check the node status
|
|
shell: kubectl get node {{ app_node_name }} --no-headers
|
|
args:
|
|
executable: /bin/bash
|
|
register: node_status
|
|
until: "'NotReady' not in node_status.stdout"
|
|
delay: 10
|
|
retries: 30
|
|
|
|
- name: verify that previous application pod is successfully deleted
|
|
shell: kubectl get pod -n {{ namespace }} -l {{ label }} --no-headers
|
|
args:
|
|
executable: /bin/bash
|
|
register: app_pod_status
|
|
until: "'{{ application_pod }}' not in app_pod_status.stdout"
|
|
delay: 5
|
|
retries: 40
|
|
|
|
- name: Get the IP Address of the node on which application pod is scheduled
|
|
shell: >
|
|
kubectl get nodes {{ app_node_name }} --no-headers -o jsonpath='{.status.addresses[0].address}'
|
|
args:
|
|
executable: /bin/bash
|
|
register: node_ip_address
|
|
|
|
- name: Record the IP Address of the node on which application pod is scheduled
|
|
set_fact:
|
|
node_ip_add: "{{ node_ip_address.stdout }}"
|
|
|
|
- name: Check if zpool is present
|
|
shell: >
|
|
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} "zpool list"
|
|
args:
|
|
executable: /bin/bash
|
|
register: zpool_status
|
|
|
|
- name: Import the zpool after turning on the VM's
|
|
shell: >
|
|
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
|
|
"echo {{ node_pwd }} | sudo -S su -c 'zpool import -f {{ zpool_name }}'"
|
|
args:
|
|
executable: /bin/bash
|
|
register: status
|
|
failed_when: "status.rc != 0"
|
|
when: "'{{ zpool_name }}' not in zpool_status.stdout"
|
|
|
|
- name: verify that zfs dataset is available now
|
|
shell: >
|
|
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} "zfs list"
|
|
args:
|
|
executable: /bin/bash
|
|
register: zfs_dataset
|
|
until: "'{{ zpool_name }}/{{ pv_name }}' in zfs_dataset.stdout"
|
|
delay: 10
|
|
retries: 30
|
|
|
|
- name: Check encryption keystatus on node
|
|
shell: >
|
|
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} "zfs get keystatus | grep {{ zpool_name }}"
|
|
args:
|
|
executable: /bin/bash
|
|
register: keystatus
|
|
failed_when: "keystatus.rc != 0"
|
|
|
|
- name: Load key's passphrase into datasets on the node
|
|
shell: >
|
|
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
|
|
"echo {{ node_pwd }} | sudo -S su -c 'echo {{ enc_pwd }} | zfs load-key -L prompt {{ zpool_name }}'"
|
|
args:
|
|
executable: /bin/bash
|
|
register: key_load_status
|
|
failed_when: "key_load_status.rc != 0"
|
|
when: "'unavailable' in keystatus.stdout"
|
|
|
|
- name: check the newly scheduled application pod status
|
|
shell: kubectl get pod {{ new_app_pod }} -n {{ namespace }} --no-headers -o custom-columns=:.status.phase
|
|
args:
|
|
executable: /bin/bash
|
|
register: new_app_pod_status
|
|
until: "'Running' in new_app_pod_status.stdout"
|
|
delay: 5
|
|
retries: 50
|
|
|
|
- block:
|
|
|
|
- name: Obtain the rescheduled pod name
|
|
shell: >
|
|
kubectl get pods -n {{ namespace }} -l {{ label }} --no-headers
|
|
-o custom-columns=:metadata.name
|
|
args:
|
|
executable: /bin/bash
|
|
register: rescheduled_app_pod
|
|
|
|
- name: Verify application data persistence
|
|
include: "{{ data_consistency_util_path }}"
|
|
vars:
|
|
status: 'VERIFY'
|
|
ns: "{{ namespace }}"
|
|
pod_name: "{{ rescheduled_app_pod.stdout }}"
|
|
|
|
when: data_persistence != ''
|
|
|
|
- set_fact:
|
|
flag: "Pass"
|
|
|
|
rescue:
|
|
- set_fact:
|
|
flag: "Fail"
|
|
|
|
always:
|
|
|
|
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
|
|
vars:
|
|
status: 'EOT' |