feat(e2e-test): Add e2e-tests for zfs-localpv (#298)

Signed-off-by: w3aman <aman.gupta@mayadata.io>
This commit is contained in:
Aman Gupta 2021-06-09 21:21:39 +05:30 committed by GitHub
parent 53f872fcf1
commit 4e73638b5a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
137 changed files with 8745 additions and 0 deletions

View file

@ -0,0 +1,21 @@
apiVersion: v1
kind: ConfigMap
metadata:
# any name can be used; Velero uses the labels (below)
# to identify it rather than the name
name: change-pvc-node-selector-config
# must be in the velero namespace
namespace: velero
# the below labels should be used verbatim in your
# ConfigMap.
labels:
# this value-less label identifies the ConfigMap as
# config for a plugin (i.e. the built-in restore item action plugin)
velero.io/plugin-config: ""
# this label identifies the name and kind of plugin
# that this ConfigMap is for.
velero.io/change-pvc-node-selector: RestoreItemAction
data:
# add 1+ key-value pairs here, where the key is the old
# node name and the value is the new node name.
{{ source_node }}: {{ destination_node }}

View file

@ -0,0 +1,68 @@
---
- block:
- name: Create the volume snapshot location which has the information about where the snapshot should be stored
shell: kubectl apply -f ./volume_snapshot_location.yml
args:
executable: /bin/bash
- name: Check that volume snapshot location class is present
shell: kubectl get volumesnapshotlocation -n velero
args:
executable: /bin/bash
register: vol_snapshot_location
until: "'zfspv-snaplocation' in vol_snapshot_location.stdout"
delay: 2
retries: 30
- name: Creating Backup
shell: >
velero backup create {{ velero_backup_name }} --snapshot-volumes --include-namespaces={{ app_ns }} --volume-snapshot-locations=zfspv-snaplocation --storage-location=default
args:
executable: /bin/bash
- name: Get the state of Backup
shell: kubectl get backup {{ velero_backup_name }} -n velero -o jsonpath='{.status.phase}'
args:
executable: /bin/bash
register: backup_state
until: "'Completed' in backup_state.stdout"
delay: 5
retries: 100
when: action == "backup"
# Schedule creates a cron job for backup. Notation "--schedule=*/2 * * * *" applies same as kubernetes cron job here.
- block:
- name: Creating schedule backup
shell: velero create schedule {{ schedule_name }} --schedule="*/1 * * * *" --snapshot-volumes --include-namespaces={{ app_ns }} --volume-snapshot-locations=zfspv-snaplocation --storage-location=default
when: action == "schedule_backup"
- block:
- name: Create the volume snapshot location which has the information about where the snapshot should be stored
shell: kubectl apply -f ./incremental_backup_vsl.yml
args:
executable: /bin/bash
## Incremental backups work with schedules. Following task will create one schedule which will create backups
## Per minute (as per the cron job format). First backup will be full backup and after that incremental backups
## and at last one will be full backup. This sequence will be repeated till the schedule is enabled.
- name: Creating incremental backup
shell: velero create schedule {{ schedule_name }} --schedule="*/1 * * * *" --snapshot-volumes --include-namespaces={{ app_ns }} --volume-snapshot-locations=incr --storage-location=default
## After creating schedule backup creation starts. so waiting for some time for dumping
## the data before creation of first incremental backup.
- name: sleep some time
shell: sleep 30
## This task will create backups periodically after one minutes. Meanwhile this task will dump some dummy data
## so that we can verify them after restoring the backups.
- name: Create backups incrementally after dumping data periodically
include_tasks: "./incremental_backup.yml"
when: action == "incremental_backup"

View file

@ -0,0 +1,3 @@
[default]
aws_access_key_id = minio
aws_secret_access_key = minio123

View file

@ -0,0 +1,5 @@
{% if data_persistence is defined and data_persistence == 'mysql' %}
consistencyutil: /e2e-tests/utils/applications/mysql/mysql_data_persistence.yml
{% elif data_persistence is defined and data_persistence == 'busybox' %}
consistencyutil: /e2e-tests/utils/applications/busybox/busybox_data_persistence.yml
{% endif %}

View file

@ -0,0 +1,85 @@
- name: Obtain the mount path for the application
shell: >
kubectl get pods -n {{ app_ns }} -l {{ app_label }}
-o custom-columns=:.spec.containers[].volumeMounts[].mountPath --no-headers
args:
executable: /bin/bash
register: mount
- name: Record the mount path for the application
set_fact:
mount_path: "{{ mount.stdout }}"
- name: Dump some dummy data in application mount point
shell: >
kubectl exec -ti {{ app_pod_name }} -n {{ app_ns }} -- sh
-c "dd if=/dev/urandom of={{ mount_path }}/incr-file1 bs=4k count=1024 &&
md5sum {{ mount_path }}/incr-file1 > {{ mount_path }}/pre-incr-file1-md5 && sync;sync;sync"
args:
executable: /bin/bash
- name: Wait for some seconds
shell: sleep 60
- name: Again dump some dummy data
shell: >
kubectl exec -ti {{ app_pod_name }} -n {{ app_ns }} -- sh
-c "dd if=/dev/urandom of={{ mount_path }}/incr-file2 bs=4k count=1024 &&
md5sum {{ mount_path }}/incr-file2 > {{ mount_path }}/pre-incr-file2-md5 && sync;sync;sync"
args:
executable: /bin/bash
- name: Wait for some time to finish all incremental backup and at last full backup
shell: sleep 180
- name: Get the first backup name which is full backup by default
shell: velero get backup | grep {{ schedule_name }} | tail -n1 | awk '{print $1}'
args:
executable: /bin/bash
register: first_full_bkp
- name: Record the first full backup name
set_fact:
first_full_backup: "{{ first_full_bkp.stdout }}"
- name: Get the first incremental backup name
shell: velero get backup | grep {{ schedule_name }} | tail -n2 | head -n1 | awk '{print $1}'
args:
executable: /bin/bash
register: first_incr_bkp
- name: Record the first incremental backup name
set_fact:
first_incremental_backup: "{{ first_incr_bkp.stdout }}"
- name: Get the second incremental backup name
shell: velero get backup | grep {{ schedule_name }} | tail -n3 | head -n1 | awk '{print $1}'
args:
executable: /bin/bash
register: second_incr_bkp
- name: Record the second incremental backup name
set_fact:
second_incremental_backup: "{{ second_incr_bkp.stdout }}"
- name: Get the last full backup name which is after two incremental backups
shell: velero get backup | grep {{ schedule_name }} | tail -n4 | head -n1 | awk '{print $1}'
args:
executable: /bin/bash
register: last_full_bkp
- name: Record the last full backup name
set_fact:
last_full_backup: "{{ last_full_bkp.stdout }}"
- name: Check status of all four backups
shell: kubectl get backups.velero.io {{ item }} -n velero -o jsonpath='{.status.phase}'
args:
executable: /bin/bash
register: backup_status
failed_when: "backup_status.stdout != 'Completed'"
loop:
- "{{ first_full_backup }}"
- "{{ first_incremental_backup }}"
- "{{ second_incremental_backup }}"
- "{{ last_full_backup }}"

View file

@ -0,0 +1,16 @@
apiVersion: velero.io/v1
kind: VolumeSnapshotLocation
metadata:
name: incr
namespace: velero
spec:
provider: openebs.io/zfspv-blockstore
config:
bucket: velero
prefix: zfs
incrBackupCount: "2" # number of incremental backup we want to have
namespace: openebs # this is namespace where ZFS-LocalPV creates all the CRs, passed as OPENEBS_NAMESPACE env in the ZFS-LocalPV deployment
provider: aws
region: minio
s3ForcePathStyle: "true"
s3Url: http://minio.velero.svc:9000

View file

@ -0,0 +1,83 @@
- name: Obtain the mount path for the application
shell: >
kubectl get pods -n {{ app_ns }} -l {{ app_label }}
-o custom-columns=:.spec.containers[].volumeMounts[].mountPath --no-headers
args:
executable: /bin/bash
register: mount
- name: Record the mount path for the application
set_fact:
mount_path: "{{ mount.stdout }}"
- name: Get the first backup name which is full backup by default
shell: velero get backup | grep {{ schedule_name }} | tail -n1 | awk '{print $1}'
args:
executable: /bin/bash
register: first_full_bkp
- name: Record the first full backup name
set_fact:
first_full_backup: "{{ first_full_bkp.stdout }}"
- name: Get the first incremental backup name
shell: velero get backup | grep {{ schedule_name }} | tail -n2 | head -n1 | awk '{print $1}'
args:
executable: /bin/bash
register: first_incr_bkp
- name: Record the first incremental backup name
set_fact:
first_incremental_backup: "{{ first_incr_bkp.stdout }}"
- name: Get the second incremental backup name
shell: velero get backup | grep {{ schedule_name }} | tail -n3 | head -n1 | awk '{print $1}'
args:
executable: /bin/bash
register: second_incr_bkp
- name: Record the second incremental backup name
set_fact:
second_incremental_backup: "{{ second_incr_bkp.stdout }}"
- name: Get the last full backup name which is after two incremental backups
shell: velero get backup | grep {{ schedule_name }} | tail -n4 | head -n1 | awk '{print $1}'
args:
executable: /bin/bash
register: last_full_bkp
- name: Record the last full backup name
set_fact:
last_full_backup: "{{ last_full_bkp.stdout }}"
- name: Restore the first incremental backup
include: "./restore.yml"
vars:
velero_backup_name: "{{ first_incremental_backup }}"
app_ns_new: "first-incr-restore-ns"
- name: Check the data consistency
shell: >
kubectl exec -ti {{ restore_app_pod }} -n first-incr-restore-ns
-- sh -c "cd {{ mount_path }} && ls"
args:
executable: /bin/bash
register: data_status
failed_when: "'incr-file1' not in data_status.stdout"
- name: Restore the second incremental backup
include: "./restore.yml"
vars:
velero_backup_name: "{{ second_incremental_backup }}"
app_ns_new: "second-incr-restore-ns"
- name: Check the data consistency
shell: >
kubectl exec -ti {{ restore_app_pod }} -n second-incr-restore-ns
-- sh -c "cd {{ mount_path }} && ls"
args:
executable: /bin/bash
register: data_status
failed_when:
- "'incr-file1' not in data_status.stdout"
- "'incr-file2' not in data_status.stdout"

View file

@ -0,0 +1,105 @@
- block:
- name: Restoring application
shell: >
velero restore create --from-backup {{ velero_backup_name }} --restore-volumes=true --namespace-mappings {{ app_ns }}:{{ app_ns_new }}
args:
executable: /bin/bash
when: lookup('env','RESTORE_IN_DIFF_NAMESPACE') == "true"
- block:
- name: Delete all the resources in application namespace
shell: |
kubectl delete deploy,pvc,svc --all -n {{ app_ns }}
kubectl delete ns {{ app_ns }}
- name: Verify that application namespace is deleted successfully
shell: kubectl get ns --no-headers -o custom-columns=:metadata.name
args:
executable: /bin/bash
register: ns_list
until: "'{{ app_ns }}' not in ns_list.stdout_lines"
delay: 5
retries: 30
- name: Verify that PV is successfully deleted
shell: >
kubectl get pv -o json | jq -r '.items[] |
select(.spec.claimRef.name == "{{ app_pvc }}" and .spec.claimRef.namespace == "{{ app_ns }}" )
| .metadata.name'
args:
executable: /bin/bash
register: pv_list
until: "'' in pv_list.stdout"
delay: 3
retries: 30
- name: Restoring application
shell: >
velero restore create --from-backup {{ velero_backup_name }} --restore-volumes=true
args:
executable: /bin/bash
when: lookup('env','RESTORE_IN_DIFF_NAMESPACE') == "false"
- name: Getting latest restore name
shell: velero get restore | grep {{ velero_backup_name }} | awk '{print $1}' | tail -n 1
register: restore_name
- name: Checking the restore status
shell: kubectl get restore {{ restore_name.stdout }} -n velero -o jsonpath='{.status.phase}'
register: restore_state
until: "'Completed' in restore_state.stdout"
delay: 5
retries: 60
- name: Check the new namespace is created and in Active state
shell: kubectl get namespace {{ app_ns_new }} -o jsonpath='{.status.phase}'
args:
executable: /bin/bash
register: app_ns_new_status
failed_when: "'Active' not in app_ns_new_status.stdout"
- name: Check that pods in {{ app_ns_new }} are in running state
shell: kubectl get pods -n {{ app_ns_new }} --no-headers -o custom-columns=:status.phase | sort | uniq
args:
executable: /bin/bash
register: pod_status
until: "pod_status.stdout == 'Running'"
delay: 3
retries: 50
- name: Get the application pod name in {{ app_ns_new }} namespace
shell: kubectl get pod -n {{ app_ns_new }} -l {{ app_label }} --no-headers -o custom-columns=:.metadata.name
args:
executable: /bin/bash
register: restore_application_pod
- name: Record the application pod name in {{ app_ns_new }} namespace
set_fact:
restore_app_pod: "{{ restore_application_pod.stdout }}"
- block:
- name: Check if the restore has done on different node
shell: >
kubectl get pod -n {{ app_ns_new }} -l {{ app_label }} --no-headers -o custom-columns=:.spec.nodeName
args:
executable: /bin/bash
register: restore_node_name
failed_when: "'{{ destination_node }}' != '{{ restore_node_name.stdout }}'"
when: lookup('env','RESTORE_IN_DIFF_NODE') == "true"
- name: Verify the data consistency
include: "{{ data_consistency_util_path }}"
vars:
status: 'VERIFY'
ns: "{{ app_ns_new }}"
label: "{{ app_label }}"
pod_name: "{{ restore_app_pod }}"
when: data_persistence != ''

View file

@ -0,0 +1,86 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: zfspv-backup-store
namespace: e2e
data:
parameters.yml: |
---
apiVersion: batch/v1
kind: Job
metadata:
generateName: zfspv-velero-backup-restore-
namespace: e2e
spec:
template:
metadata:
name: e2e
labels:
test: velero-backup-restore
spec:
serviceAccountName: e2e
restartPolicy: Never
containers:
- name: ansibletest
image: openebs/zfs-localpv-e2e:ci
imagePullPolicy: IfNotPresent
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: default
- name: APP_NAMESPACE ## Namespace where application is deployed
value: ''
- name: APP_LABEL ## Application label
value: ''
- name: APP_PVC ## PVC name for application
value: ''
- name: ACTION ## On basis of the value of this env, this playbook will perform tasks.
value: '' ## Supported values: (backup, restore, schedule_backup, incremental_backup, incremental_restore)
- name: VELERO_BACKUP_NAME ## Velero backup will be created with name of this value.
value: ''
- name: VELERO_PLUGIN_NAME ## Velero plugin name. For zfs-localpv plugin name is `openebs/velero-plugin:2.1.0`
value: '' ## you can use ci image also.
- name: VELERO_SCHEDULE_BACKUP_NAME ## If you want to create scheduled backup (based on ACTION env) give the name for it.
value: ''
- name: VELERO_VERSION ## Velero version (for e.g. v1.4.0)
value: ''
- name: STORAGE_BUCKET ## Supported values: minio
value: ''
- name: RESTORE_IN_DIFF_NAMESPACE ## For restoring the backup in different namespace provide value accordingly.
value: '' ## Supported values: ( true, false )
- name: RESTORE_NAMESPACE ## If `RESTORE_IN_DIFF_NAMESPACE: true` provide the namespace in which you want to restore the backup
value: ''
- name: RESTORE_IN_DIFF_NODE ## For restoring the backup on different nodes provide value accordingly.
value: '' ## Supported values: ( true, false)
- name: DATA_PERSISTENCE ## Supported values: (busybox, mysql)
value: '' ## To check data-consistency provide values accordingly. When value is set; some dummy test data will be
## dumped in application mount point and will verify that data in restored application.
command: ["/bin/bash"]
args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/backup_and_restore/test.yml -i /etc/ansible/hosts -vv; exit 0"]
volumeMounts:
- name: parameters
mountPath: /mnt/
volumes:
- name: parameters
configMap:
name: zfspv-backup-store

View file

@ -0,0 +1,138 @@
- name: Download velero binary
get_url:
url: "{{ velero_binary_url }}"
dest: "./"
force: yes
register: result
until: "'OK' in result.msg"
delay: 3
retries: 5
- name: Installing velero inside e2e-test container
shell: |
tar -xvf velero-{{ velero_version }}-linux-amd64.tar.gz
mv velero-{{ velero_version }}-linux-amd64/velero /usr/local/bin/
- name: Checking the velero version
shell: velero version
register: velero
failed_when: "velero_version not in velero.stdout"
- block:
- name: Installing velero server inside cluster
shell: >
velero install \
--provider aws \
--bucket velero \
--secret-file ./credentials_minio \
--plugins velero/velero-plugin-for-aws:v1.0.0 \
--use-volume-snapshots=false \
--use-restic \
--backup-location-config region=minio,s3ForcePathStyle="true",s3Url=http://minio.velero.svc:9000
- name: Check velero server pod status
shell: kubectl get pod -n velero -l deploy=velero -o jsonpath='{.items[0].status.phase}'
register: velero_pod_status
until: "'Running' in velero_pod_status.stdout"
delay: 5
retries: 20
- name: Check velero server pod's container status
shell: kubectl get pod -n velero -l deploy=velero -o jsonpath='{.items[0].status.containerStatuses[0].state}'
register: velero_container_status
until: "'running' in velero_container_status.stdout"
delay: 5
retries: 20
- name: Check that restic daemonset pods are running
shell: kubectl get pods -n velero -l name=restic --no-headers -o custom-columns=:status.phase | sort | uniq
register: restic_pod_status
until: "restic_pod_status.stdout == 'Running'"
delay: 3
retries: 20
- name: Installing minio
shell: kubectl apply -f velero-{{ velero_version }}-linux-amd64/examples/minio/00-minio-deployment.yaml
args:
executable: /bin/bash
- name: Waiting for minio job to create bucket
shell: kubectl get pod -n velero -l job-name=minio-setup -o jsonpath='{.items[*].status.phase}'
register: minio_job_status
until: "'Succeeded' in minio_job_status.stdout"
delay: 5
retries: 20
- name: Checking for minio pod status
shell: kubectl get pod -n velero -l component=minio -ojsonpath='{.items[0].status.phase}'
register: minio_status
until: "'Running' in minio_status.stdout"
delay: 5
retries: 15
when: bucket_type == "minio"
- name: Get the velero server pod name
shell: kubectl get pod -n velero -l deploy=velero --no-headers -o custom-columns=:.metadata.name
args:
executable: /bin/bash
register: velero_pod
- name: Check if the velero-plugin for zfs-localpv is already added
shell: >
kubectl get deploy velero -n velero
-o jsonpath='{.spec.template.spec.initContainers[?(@.name=="velero-plugin")].name}'
args:
executable: /bin/bash
register: zfs_localpv_velero_plugin
# from velero version v1.6.0 velero plugin for openebs is renamed to openebs-velero-plugin
- name: Check if the velero-plugin for zfs-localpv is already added
shell: >
kubectl get deploy velero -n velero
-o jsonpath='{.spec.template.spec.initContainers[?(@.name=="openebs-velero-plugin")].name}'
args:
executable: /bin/bash
register: zfs_localpv_velero_plugin_latest
- name: Add velero-plugin for zfs-localpv
shell: velero plugin add {{ velero_plugin_name }}
args:
executable: /bin/bash
when:
- zfs_localpv_velero_plugin.stdout != 'velero-plugin'
- zfs_localpv_velero_plugin_latest.stdout != 'openebs-velero-plugin'
#After installing openebs velero plugin a new velero pod comes up in Running state and the older one will terminates.
- name: Wait until older velero pod terminates successfully
shell: kubectl get pods -n velero
args:
executable: /bin/bash
register: velero_pod_list
until: "'velero_pod.stdout' not in velero_pod_list.stdout"
delay: 3
retries: 30
- name: Check velero server pod status
shell: kubectl get pod -n velero -l deploy=velero -o jsonpath='{.items[0].status.phase}'
register: velero_pod_run
until: "'Running' in velero_pod_run.stdout"
delay: 5
retries: 20
- name: Check velero server pod's container status
shell: kubectl get pod -n velero -l deploy=velero -o jsonpath='{.items[0].status.containerStatuses[0].state}'
register: velero_container
until: "'running' in velero_container.stdout"
delay: 5
retries: 20
- name: Check velero plugin for VolumeSnapshotter is present
shell: velero plugin get
register: snapshotter_plugin
until: "'zfspv-blockstore' in snapshotter_plugin.stdout"
delay: 2
retries: 40

View file

@ -0,0 +1,146 @@
- hosts: localhost
connection: local
gather_facts: False
vars_files:
- test_vars.yml
- /mnt/parameters.yml
tasks:
- block:
## Generating the testname for zfs volume resize test
- include_tasks: /e2e-tests/hack/create_testname.yml
## Record SOT (start of test) in e2e result e2e-cr (e2e-custom-resource)
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
vars:
status: 'SOT'
- name: Identify the data consistency util to be invoked
template:
src: data_persistence.j2
dest: data_persistence.yml
- include_vars:
file: data_persistence.yml
- name: Record the data consistency util path
set_fact:
data_consistency_util_path: "{{ consistencyutil }}"
when: data_persistence != ''
- name: Install velero
include_tasks: "./setup_dependency.yml"
- name: Get the application pod name
shell: >
kubectl get pod -n {{ app_ns }} -l {{ app_label }}
--no-headers -o custom-columns=:.metadata.name
args:
executable: /bin/bash
register: application_pod_name
- name: Record the application pod name
set_fact:
app_pod_name: "{{ application_pod_name.stdout }}"
- name: Check if the application pod is in running state
shell: >
kubectl get pod {{ app_pod_name }} -n {{ app_ns }}
--no-headers -o custom-columns=:.status.phase
args:
executable: /bin/bash
register: app_pod_status
failed_when: "'Running' not in app_pod_status.stdout"
- block:
- name: Create some test data
include: "{{ data_consistency_util_path }}"
vars:
status: 'LOAD'
ns: "{{ app_ns }}"
pod_name: "{{ app_pod_name }}"
when: data_persistence != ''
- name: Create the backup of the namespace in which application is deployed
include_tasks: "./backup.yml"
when: action == 'backup' or action == 'schedule_backup' or action == 'incremental_backup'
- block:
- name: Get the application node name
shell: >
kubectl get pod {{ app_pod_name }} -n {{ app_ns }}
--no-headers -o custom-columns=:.spec.nodeName
args:
executable: /bin/bash
register: application_node_name
- name: Record the application node name
set_fact:
source_node: "{{ application_node_name.stdout }}"
- name: Get any one of the nodes from remaining worker nodes in cluster
shell: >
kubectl get nodes --no-headers | grep -v master | grep -v {{ source_node }} | shuf -n 1 | awk '{print $1}'
args:
executable: /bin/bash
register: random_node
- name: Record this random node as destination node for restoring on differet node
set_fact:
destination_node: "{{ random_node.stdout }}"
- name: Update the restore item action configmap with test specific values
template:
src: RestoreItemAction_configmap.j2
dest: RestoreItemAction_configmap.yml
- name: Apply the configmap
shell: kubectl apply -f RestoreItemAction_configmap.yml
args:
executable: /bin/bash
register: status
failed_when: "status.rc != 0"
when: lookup('env','RESTORE_IN_DIFF_NODE') == "true"
- block:
- name: Restore the backup
include_tasks: "./restore.yml"
when: lookup('env','RESTORE_IN_DIFF_NAMESPACE') == "true"
- name: Restore the backup
include_tasks: "./restore.yml"
vars:
app_ns_new: "{{ app_ns }}"
when: lookup('env','RESTORE_IN_DIFF_NAMESPACE') == "false"
when: action == 'restore'
- block:
- name: Restore the incremental backups
include_tasks: "./incremental_restore.yml"
when: action == 'incremental_restore'
- set_fact:
flag: "Pass"
rescue:
- set_fact:
flag: "Fail"
always:
# RECORD END-OF-TEST IN e2e RESULT CR
- include_tasks: /e2e-tests/hack/update_e2e_result_resource.yml
vars:
status: 'EOT'

View file

@ -0,0 +1,13 @@
test_name: "zfspv-velero-backup-restore"
app_ns: "{{ lookup('env','APP_NAMESPACE') }}"
app_ns_new: "{{ lookup('env','RESTORE_NAMESPACE') }}"
app_label: "{{ lookup('env','APP_LABEL') }}"
app_pvc: "{{ lookup('env','APP_PVC') }}"
velero_backup_name: "{{ lookup('env','VELERO_BACKUP_NAME') }}"
velero_plugin_name: "{{ lookup('env','VELERO_PLUGIN_NAME') }}"
velero_version: "{{ lookup('env','VELERO_VERSION') }}"
bucket_type: "{{ lookup('env','STORAGE_BUCKET') }}"
data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}"
action: "{{ lookup('env','ACTION') }}"
schedule_name: "{{ lookup('env','VELERO_SCHEDULE_BACKUP_NAME') }}"
velero_binary_url: "https://github.com/vmware-tanzu/velero/releases/download/{{ lookup('env','VELERO_VERSION') }}/velero-{{ lookup('env','VELERO_VERSION') }}-linux-amd64.tar.gz"

View file

@ -0,0 +1,15 @@
apiVersion: velero.io/v1
kind: VolumeSnapshotLocation
metadata:
name: zfspv-snaplocation
namespace: velero
spec:
provider: openebs.io/zfspv-blockstore
config:
bucket: velero
prefix: zfs
namespace: openebs # this is namespace where ZFS-LocalPV creates all the CRs, passed as OPENEBS_NAMESPACE env in the ZFS-LocalPV deployment
provider: aws
region: minio
s3ForcePathStyle: "true"
s3Url: http://minio.velero.svc:9000