Skip to content

Commit

Permalink
WIP: Adding backcompat testing
Browse files Browse the repository at this point in the history
  • Loading branch information
kav committed Oct 24, 2020
1 parent 6b5e7d1 commit 12dd785
Show file tree
Hide file tree
Showing 4 changed files with 203 additions and 1 deletion.
26 changes: 26 additions & 0 deletions charts/velero/ci/test-values-back-compat.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
installCRDs: true

# Set provider name and backup storage location bucket name
configuration:
provider: aws
backupStorageLocation:
bucket: velero
config:
region: us-west-1
profile: test
volumeSnapshotLocation:
provider: aws
config:
bucket: velero
region: us-west-1

# Set a service account so that the CRD clean up job has proper permissions to delete CRDs
serviceAccount:
server:
name: velero

# Whether or not to clean up CustomResourceDefintions when deleting a release.
# Cleaning up CRDs will delete the BackupStorageLocation and VolumeSnapshotLocation instances, which would have to be reconfigured.
# Backup data in object storage will _not_ be deleted, however Backup instances in the Kubernetes API will.
# Always clean up CRDs in CI.
cleanUpCRDs: true
2 changes: 1 addition & 1 deletion charts/velero/templates/deployment.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
{{- if .Values.provider -}}
{{- if or .Values.provider .Values.configuration.provider -}}
{{- $providers := list .Values.provider .Values.backupStorageLocation.provider .Values.volumeSnapshotLocation.provider .Values.configuration.backupStorageLocation.provider .Values.configuration.volumeSnapshotLocation.provider | compact | uniq -}}
{{- $provider := first $providers -}}
{{- $useSecret := or .Values.credentials.existingSecret (or .Values.credentials.secretContents .Values.credentials.extraEnvVars) -}}
Expand Down
163 changes: 163 additions & 0 deletions charts/velero/tests/__snapshot__/back-compat-snapshot_test.yaml.snap
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
manifest should match snapshot:
1: |
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Tiller
app.kubernetes.io/name: velero
helm.sh/chart: velero-2.14.0
name: RELEASE-NAME-velero
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/name: velero
template:
metadata:
annotations:
prometheus.io/path: /metrics
prometheus.io/port: "8085"
prometheus.io/scrape: "true"
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Tiller
app.kubernetes.io/name: velero
helm.sh/chart: velero-2.14.0
name: velero
spec:
containers:
- args:
- server
command:
- /velero
env:
- name: VELERO_SCRATCH_DIR
value: /scratch
- name: VELERO_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: LD_LIBRARY_PATH
value: /plugins
image: velero/velero:v1.4.2
imagePullPolicy: IfNotPresent
name: velero
ports:
- containerPort: 8085
name: monitoring
volumeMounts:
- mountPath: /plugins
name: plugins
initContainers:
- image: velero/velero-plugin-for-aws:v1.1.0
imagePullPolicy: IfNotPresent
name: velero-plugin-for-aws
volumeMounts:
- mountPath: /target
name: plugins
restartPolicy: Always
serviceAccountName: velero
volumes:
- emptyDir: {}
name: plugins
- emptyDir: {}
name: scratch
2: |
apiVersion: velero.io/v1
kind: BackupStorageLocation
metadata:
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Tiller
app.kubernetes.io/name: velero
helm.sh/chart: velero-2.14.0
name: default
spec:
objectStorage:
bucket: null
provider: null
3: |
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Tiller
app.kubernetes.io/name: velero
helm.sh/chart: velero-2.14.0
name: restic
spec:
selector:
matchLabels:
name: restic
template:
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Tiller
app.kubernetes.io/name: velero
helm.sh/chart: velero-2.14.0
name: restic
spec:
containers:
- args:
- restic
- server
command:
- /velero
env:
- name: VELERO_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: VELERO_SCRATCH_DIR
value: /scratch
image: velero/velero:v1.4.2
imagePullPolicy: IfNotPresent
name: restic
securityContext:
privileged: false
volumeMounts:
- mountPath: /host_pods
mountPropagation: HostToContainer
name: host-pods
- mountPath: /scratch
name: scratch
securityContext:
runAsUser: 0
serviceAccountName: velero
volumes:
- hostPath:
path: /var/lib/kubelet/pods
name: host-pods
- emptyDir: {}
name: scratch
4: |
apiVersion: velero.io/v1
kind: VolumeSnapshotLocation
metadata:
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Tiller
app.kubernetes.io/name: velero
helm.sh/chart: velero-2.14.0
name: default
spec:
config:
bucket: velero
region: us-west-1
provider: aws
13 changes: 13 additions & 0 deletions charts/velero/tests/back-compat-snapshot_test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
templates:
- deployment.yaml
- backupstoragelocation.yaml
- restic-daemonset.yaml
- volumesnapshotlocation.yaml
tests:
- it: manifest should match snapshot
values:
- ../ci/test-values-back-compat.yaml
set:
restic.enabled: true
asserts:
- matchSnapshot: {}

0 comments on commit 12dd785

Please sign in to comment.