diff --git a/charts/velero/ci/test-values-back-compat.yaml b/charts/velero/ci/test-values-back-compat.yaml new file mode 100644 index 000000000..0ae3469f9 --- /dev/null +++ b/charts/velero/ci/test-values-back-compat.yaml @@ -0,0 +1,26 @@ +installCRDs: true + +# Set provider name and backup storage location bucket name +configuration: + provider: aws + backupStorageLocation: + bucket: velero + config: + region: us-west-1 + profile: test + volumeSnapshotLocation: + provider: aws + config: + bucket: velero + region: us-west-1 + +# Set a service account so that the CRD clean up job has proper permissions to delete CRDs +serviceAccount: + server: + name: velero + +# Whether or not to clean up CustomResourceDefintions when deleting a release. +# Cleaning up CRDs will delete the BackupStorageLocation and VolumeSnapshotLocation instances, which would have to be reconfigured. +# Backup data in object storage will _not_ be deleted, however Backup instances in the Kubernetes API will. +# Always clean up CRDs in CI. +cleanUpCRDs: true diff --git a/charts/velero/templates/deployment.yaml b/charts/velero/templates/deployment.yaml index 9d8747a2d..f462960e1 100644 --- a/charts/velero/templates/deployment.yaml +++ b/charts/velero/templates/deployment.yaml @@ -1,4 +1,4 @@ -{{- if .Values.provider -}} +{{- if or .Values.provider .Values.configuration.provider -}} {{- $providers := list .Values.provider .Values.backupStorageLocation.provider .Values.volumeSnapshotLocation.provider .Values.configuration.backupStorageLocation.provider .Values.configuration.volumeSnapshotLocation.provider | compact | uniq -}} {{- $provider := first $providers -}} {{- $useSecret := or .Values.credentials.existingSecret (or .Values.credentials.secretContents .Values.credentials.extraEnvVars) -}} diff --git a/charts/velero/tests/__snapshot__/back-compat-snapshot_test.yaml.snap b/charts/velero/tests/__snapshot__/back-compat-snapshot_test.yaml.snap new file mode 100644 index 000000000..86525367c --- /dev/null +++ b/charts/velero/tests/__snapshot__/back-compat-snapshot_test.yaml.snap @@ -0,0 +1,163 @@ +manifest should match snapshot: + 1: | + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Tiller + app.kubernetes.io/name: velero + helm.sh/chart: velero-2.14.0 + name: RELEASE-NAME-velero + spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/name: velero + template: + metadata: + annotations: + prometheus.io/path: /metrics + prometheus.io/port: "8085" + prometheus.io/scrape: "true" + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Tiller + app.kubernetes.io/name: velero + helm.sh/chart: velero-2.14.0 + name: velero + spec: + containers: + - args: + - server + command: + - /velero + env: + - name: VELERO_SCRATCH_DIR + value: /scratch + - name: VELERO_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: LD_LIBRARY_PATH + value: /plugins + image: velero/velero:v1.4.2 + imagePullPolicy: IfNotPresent + name: velero + ports: + - containerPort: 8085 + name: monitoring + volumeMounts: + - mountPath: /plugins + name: plugins + initContainers: + - image: velero/velero-plugin-for-aws:v1.1.0 + imagePullPolicy: IfNotPresent + name: velero-plugin-for-aws + volumeMounts: + - mountPath: /target + name: plugins + restartPolicy: Always + serviceAccountName: velero + volumes: + - emptyDir: {} + name: plugins + - emptyDir: {} + name: scratch + 2: | + apiVersion: velero.io/v1 + kind: BackupStorageLocation + metadata: + annotations: + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Tiller + app.kubernetes.io/name: velero + helm.sh/chart: velero-2.14.0 + name: default + spec: + objectStorage: + bucket: null + provider: null + 3: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Tiller + app.kubernetes.io/name: velero + helm.sh/chart: velero-2.14.0 + name: restic + spec: + selector: + matchLabels: + name: restic + template: + metadata: + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Tiller + app.kubernetes.io/name: velero + helm.sh/chart: velero-2.14.0 + name: restic + spec: + containers: + - args: + - restic + - server + command: + - /velero + env: + - name: VELERO_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: VELERO_SCRATCH_DIR + value: /scratch + image: velero/velero:v1.4.2 + imagePullPolicy: IfNotPresent + name: restic + securityContext: + privileged: false + volumeMounts: + - mountPath: /host_pods + mountPropagation: HostToContainer + name: host-pods + - mountPath: /scratch + name: scratch + securityContext: + runAsUser: 0 + serviceAccountName: velero + volumes: + - hostPath: + path: /var/lib/kubelet/pods + name: host-pods + - emptyDir: {} + name: scratch + 4: | + apiVersion: velero.io/v1 + kind: VolumeSnapshotLocation + metadata: + annotations: + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Tiller + app.kubernetes.io/name: velero + helm.sh/chart: velero-2.14.0 + name: default + spec: + config: + bucket: velero + region: us-west-1 + provider: aws diff --git a/charts/velero/tests/back-compat-snapshot_test.yaml b/charts/velero/tests/back-compat-snapshot_test.yaml new file mode 100644 index 000000000..3d0524339 --- /dev/null +++ b/charts/velero/tests/back-compat-snapshot_test.yaml @@ -0,0 +1,13 @@ +templates: + - deployment.yaml + - backupstoragelocation.yaml + - restic-daemonset.yaml + - volumesnapshotlocation.yaml +tests: + - it: manifest should match snapshot + values: + - ../ci/test-values-back-compat.yaml + set: + restic.enabled: true + asserts: + - matchSnapshot: {}