refact(e2e): load key's passphrase in dataset after node-power-failure test case

Signed-off-by: w3aman <aman.gupta@mayadata.io>
This commit is contained in:
w3aman 2021-07-27 16:31:51 +05:30
parent c0653530bd
commit 96ae41c094
4 changed files with 60 additions and 3 deletions

View file

@ -25,12 +25,29 @@
- block: - block:
- name: stop the {{ svc_type }} service on node where application pod is scheduled - name: stop the docker service on node where application pod is scheduled
shell: >
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
"echo {{ node_pwd }} | sudo -S su -c 'systemctl stop docker.socket'"
args:
executable: /bin/bash
when: svc_type == "docker"
- name: stop the container runtime (if containerd, or crio) services on the application node
shell: > shell: >
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
"echo {{ node_pwd }} | sudo -S su -c 'systemctl stop {{ svc_type }}.service'" "echo {{ node_pwd }} | sudo -S su -c 'systemctl stop {{ svc_type }}.service'"
args: args:
executable: /bin/bash executable: /bin/bash
when: svc_type == "containerd" or svc_type == "crio"
- name: stop the kubelet service on node where application pod is scheduled
shell: >
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
"echo {{ node_pwd }} | sudo -S su -c 'systemctl stop kubelet.service'"
args:
executable: /bin/bash
when: svc_type == "kubelet"
- name: Check for the {{ svc_type }} service status - name: Check for the {{ svc_type }} service status
shell: > shell: >
@ -91,12 +108,29 @@
- block: - block:
- name: Start the {{ svc_type }} services - name: Start the docker services
shell: >
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
"echo {{ node_pwd }} | sudo -S su -c 'systemctl start docker.socket'"
args:
executable: /bin/bash
when: svc_type == "docker"
- name: Start the container runtime (if containerd, or crio) services
shell: > shell: >
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
"echo {{ node_pwd }} | sudo -S su -c 'systemctl start {{ svc_type }}.service'" "echo {{ node_pwd }} | sudo -S su -c 'systemctl start {{ svc_type }}.service'"
args: args:
executable: /bin/bash executable: /bin/bash
when: svc_type == "containerd" or svc_type == "crio"
- name: Start the kubelet services
shell: >
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
"echo {{ node_pwd }} | sudo -S su -c 'systemctl start kubelet.service'"
args:
executable: /bin/bash
when: svc_type == "kubelet"
- name: Check for the {{ svc_type }} services status - name: Check for the {{ svc_type }} services status
shell: > shell: >

View file

@ -82,6 +82,9 @@ spec:
- name: ZPOOL_NAME - name: ZPOOL_NAME
value: '' value: ''
- name: ZPOOL_ENCRYPTION_PASSWORD
value: 'test1234'
- name: ESX_PASSWORD - name: ESX_PASSWORD
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:

View file

@ -194,6 +194,24 @@
delay: 10 delay: 10
retries: 30 retries: 30
- name: Check encryption keystatus on node
shell: >
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }} "zfs get keystatus | grep {{ zpool_name }}"
args:
executable: /bin/bash
register: keystatus
failed_when: "keystatus.rc != 0"
- name: Load key's passphrase into datasets on the node
shell: >
sshpass -p {{ node_pwd }} ssh -o StrictHostKeyChecking=no {{ user }}@{{ node_ip_add }}
"echo {{ node_pwd }} | sudo -S su -c 'echo {{ enc_pwd }} | zfs load-key -L prompt {{ zpool_name }}'"
args:
executable: /bin/bash
register: key_load_status
failed_when: "key_load_status.rc != 0"
when: "'unavailable' in keystatus.stdout"
- name: check the newly scheduled application pod status - name: check the newly scheduled application pod status
shell: kubectl get pod {{ new_app_pod }} -n {{ namespace }} --no-headers -o custom-columns=:.status.phase shell: kubectl get pod {{ new_app_pod }} -n {{ namespace }} --no-headers -o custom-columns=:.status.phase
args: args:

View file

@ -21,4 +21,6 @@ user: "{{ lookup('env','USERNAME') }}"
zpool_name: "{{ lookup('env','ZPOOL_NAME') }}" zpool_name: "{{ lookup('env','ZPOOL_NAME') }}"
enc_pwd: "{{ lookup('env','ZPOOL_ENCRYPTION_PASSWORD') }}"
node_pwd: "{{ lookup('env','NODE_PASSWORD') }}" node_pwd: "{{ lookup('env','NODE_PASSWORD') }}"