From 3f79d47f3bf567fc893c53d81839bffb76f957cc Mon Sep 17 00:00:00 2001 From: emosbaugh <371319+emosbaugh@users.noreply.github.com> Date: Thu, 7 Nov 2024 04:06:04 +0000 Subject: [PATCH] Create new Rook version --- addons/rook/1.15.5/Manifest | 13 + .../cluster/cephfs/cephfs-storageclass.yaml | 44 + .../1.15.5/cluster/cephfs/filesystem.yaml | 157 + .../cephfs/patches/cephfs-storageclass.yaml | 7 + .../cephfs/patches/filesystem-singlenode.yaml | 34 + .../patches/tmpl-filesystem-Json6902.yaml | 10 + .../cephfs/patches/tmpl-filesystem.yaml | 19 + addons/rook/1.15.5/cluster/cluster.yaml | 360 + addons/rook/1.15.5/cluster/kustomization.yaml | 15 + addons/rook/1.15.5/cluster/object-user.yaml | 11 + addons/rook/1.15.5/cluster/object.yaml | 161 + .../patches/ceph-cluster-tolerate.yaml | 11 + .../cluster/patches/cluster-nodes.tmpl.yaml | 12 + .../cluster/patches/object-Json6902.yaml | 4 + .../1.15.5/cluster/patches/tmpl-cluster.yaml | 26 + .../1.15.5/cluster/patches/tmpl-object.yaml | 21 + .../patches/tmpl-rbd-storageclass.yaml | 17 + .../1.15.5/cluster/tmpl-rbd-storageclass.yaml | 92 + addons/rook/1.15.5/crds.yaml | 14212 ++++++++++++++++ addons/rook/1.15.5/host-preflight.yaml | 40 + addons/rook/1.15.5/install.sh | 1157 ++ .../monitoring/ceph-cluster-dashboard.yaml | 2076 +++ .../rook/1.15.5/monitoring/kustomization.yaml | 3 + .../monitoring/rook-ceph-servicemonitor.yaml | 20 + addons/rook/1.15.5/operator/cluster-rbac.yaml | 363 + addons/rook/1.15.5/operator/clusterrole.yaml | 648 + .../1.15.5/operator/clusterrolebinding.yaml | 135 + .../configmap-rook-config-override.yaml | 12 + addons/rook/1.15.5/operator/configmap.yaml | 50 + addons/rook/1.15.5/operator/deployment.yaml | 82 + .../rook/1.15.5/operator/kustomization.yaml | 16 + addons/rook/1.15.5/operator/namespace.yaml | 5 + .../patches/deployment-privileged.yaml | 12 + .../patches/deployment-tolerations.yaml | 23 + addons/rook/1.15.5/operator/role.yaml | 89 + addons/rook/1.15.5/operator/rolebinding.yaml | 53 + .../operator/securityContextConstraints.yaml | 3 + .../rook/1.15.5/operator/serviceaccount.yaml | 83 + addons/rook/1.15.5/operator/toolbox.yaml | 131 + hack/testdata/manifest/clean | 2 +- scripts/Manifest | 2 +- web/src/installers/versions.js | 1 + 42 files changed, 20230 insertions(+), 2 deletions(-) create mode 100644 addons/rook/1.15.5/Manifest create mode 100644 addons/rook/1.15.5/cluster/cephfs/cephfs-storageclass.yaml create mode 100644 addons/rook/1.15.5/cluster/cephfs/filesystem.yaml create mode 100644 addons/rook/1.15.5/cluster/cephfs/patches/cephfs-storageclass.yaml create mode 100644 addons/rook/1.15.5/cluster/cephfs/patches/filesystem-singlenode.yaml create mode 100644 addons/rook/1.15.5/cluster/cephfs/patches/tmpl-filesystem-Json6902.yaml create mode 100644 addons/rook/1.15.5/cluster/cephfs/patches/tmpl-filesystem.yaml create mode 100644 addons/rook/1.15.5/cluster/cluster.yaml create mode 100644 addons/rook/1.15.5/cluster/kustomization.yaml create mode 100644 addons/rook/1.15.5/cluster/object-user.yaml create mode 100644 addons/rook/1.15.5/cluster/object.yaml create mode 100644 addons/rook/1.15.5/cluster/patches/ceph-cluster-tolerate.yaml create mode 100644 addons/rook/1.15.5/cluster/patches/cluster-nodes.tmpl.yaml create mode 100644 addons/rook/1.15.5/cluster/patches/object-Json6902.yaml create mode 100644 addons/rook/1.15.5/cluster/patches/tmpl-cluster.yaml create mode 100644 addons/rook/1.15.5/cluster/patches/tmpl-object.yaml create mode 100644 addons/rook/1.15.5/cluster/patches/tmpl-rbd-storageclass.yaml create mode 100644 addons/rook/1.15.5/cluster/tmpl-rbd-storageclass.yaml create mode 100644 addons/rook/1.15.5/crds.yaml create mode 100644 addons/rook/1.15.5/host-preflight.yaml create mode 100644 addons/rook/1.15.5/install.sh create mode 100644 addons/rook/1.15.5/monitoring/ceph-cluster-dashboard.yaml create mode 100644 addons/rook/1.15.5/monitoring/kustomization.yaml create mode 100644 addons/rook/1.15.5/monitoring/rook-ceph-servicemonitor.yaml create mode 100644 addons/rook/1.15.5/operator/cluster-rbac.yaml create mode 100644 addons/rook/1.15.5/operator/clusterrole.yaml create mode 100644 addons/rook/1.15.5/operator/clusterrolebinding.yaml create mode 100644 addons/rook/1.15.5/operator/configmap-rook-config-override.yaml create mode 100644 addons/rook/1.15.5/operator/configmap.yaml create mode 100644 addons/rook/1.15.5/operator/deployment.yaml create mode 100644 addons/rook/1.15.5/operator/kustomization.yaml create mode 100644 addons/rook/1.15.5/operator/namespace.yaml create mode 100644 addons/rook/1.15.5/operator/patches/deployment-privileged.yaml create mode 100644 addons/rook/1.15.5/operator/patches/deployment-tolerations.yaml create mode 100644 addons/rook/1.15.5/operator/role.yaml create mode 100644 addons/rook/1.15.5/operator/rolebinding.yaml create mode 100644 addons/rook/1.15.5/operator/securityContextConstraints.yaml create mode 100644 addons/rook/1.15.5/operator/serviceaccount.yaml create mode 100644 addons/rook/1.15.5/operator/toolbox.yaml diff --git a/addons/rook/1.15.5/Manifest b/addons/rook/1.15.5/Manifest new file mode 100644 index 0000000000..20a7e044e7 --- /dev/null +++ b/addons/rook/1.15.5/Manifest @@ -0,0 +1,13 @@ +yum lvm2 +yumol lvm2 +apt lvm2 + +image rook-ceph docker.io/rook/ceph:v1.15.5 +image ceph-ceph quay.io/ceph/ceph:v18.2.4 +image cephcsi-cephcsi quay.io/cephcsi/cephcsi:v3.12.2 +image sig-storage-csi-node-driver-registrar registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1 +image sig-storage-csi-resizer registry.k8s.io/sig-storage/csi-resizer:v1.11.1 +image sig-storage-csi-provisioner registry.k8s.io/sig-storage/csi-provisioner:v5.0.1 +image sig-storage-csi-snapshotter registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1 +image sig-storage-csi-attacher registry.k8s.io/sig-storage/csi-attacher:v4.6.1 +image csiaddons-k8s-sidecar quay.io/csiaddons/k8s-sidecar:v0.9.1 diff --git a/addons/rook/1.15.5/cluster/cephfs/cephfs-storageclass.yaml b/addons/rook/1.15.5/cluster/cephfs/cephfs-storageclass.yaml new file mode 100644 index 0000000000..8decbaeca4 --- /dev/null +++ b/addons/rook/1.15.5/cluster/cephfs/cephfs-storageclass.yaml @@ -0,0 +1,44 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-cephfs +provisioner: rook-ceph.cephfs.csi.ceph.com # csi-provisioner-name +parameters: + # clusterID is the namespace where the rook cluster is running + # If you change this namespace, also change the namespace below where the secret namespaces are defined + clusterID: rook-ceph # namespace:cluster + + # CephFS filesystem name into which the volume shall be created + fsName: rook-shared-fs + + # Ceph pool into which the volume shall be created + # Required for provisionVolume: "true" + pool: rook-shared-fs-replicated + + # The secrets contain Ceph admin credentials. These are generated automatically by the operator + # in the same namespace as the cluster. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster + + # (optional) Set it to true to encrypt each volume with encryption keys + # from a key management system (KMS) + # encrypted: "true" + + # (optional) Use external key management system (KMS) for encryption key by + # specifying a unique ID matching a KMS ConfigMap. The ID is only used for + # correlation to configmap entry. + # encryptionKMSID: + + # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel) + # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse + # or by setting the default mounter explicitly via --volumemounter command-line argument. + # mounter: kernel +reclaimPolicy: Delete +allowVolumeExpansion: true +mountOptions: + # uncomment the following line for debugging + #- debug diff --git a/addons/rook/1.15.5/cluster/cephfs/filesystem.yaml b/addons/rook/1.15.5/cluster/cephfs/filesystem.yaml new file mode 100644 index 0000000000..ccf683ca4f --- /dev/null +++ b/addons/rook/1.15.5/cluster/cephfs/filesystem.yaml @@ -0,0 +1,157 @@ +################################################################################################################# +# Create a filesystem with settings with replication enabled for a production environment. +# A minimum of 3 OSDs on different nodes are required in this example. +# If one mds daemon per node is too restrictive, see the podAntiAffinity below. +# kubectl create -f filesystem.yaml +################################################################################################################# + +apiVersion: ceph.rook.io/v1 +kind: CephFilesystem +metadata: + name: rook-shared-fs + namespace: rook-ceph # namespace:cluster +spec: + # The metadata pool spec. Must use replication. + metadataPool: + replicated: + size: 3 + requireSafeReplicaSize: true + parameters: + # Inline compression mode for the data pool + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression + compression_mode: + none + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size + #target_size_ratio: ".5" + # The list of data pool specs. Can use replication or erasure coding. + dataPools: + - name: replicated + failureDomain: host + replicated: + size: 3 + # Disallow setting pool with replica 1, this could lead to data loss without recovery. + # Make sure you're *ABSOLUTELY CERTAIN* that is what you want + requireSafeReplicaSize: true + parameters: + # Inline compression mode for the data pool + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression + compression_mode: + none + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size + #target_size_ratio: ".5" + # Whether to preserve filesystem after CephFilesystem CRD deletion + preserveFilesystemOnDelete: true + # The metadata service (mds) configuration + metadataServer: + # The number of active MDS instances + activeCount: 1 + # Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. + # If false, standbys will be available, but will not have a warm cache. + activeStandby: true + # The affinity rules to apply to the mds deployment + placement: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: role + # operator: In + # values: + # - mds-node + # topologySpreadConstraints: + # tolerations: + # - key: mds-node + # operator: Exists + # podAffinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-mds + ## Add this if you want to allow mds daemons for different filesystems to run on one + ## node. The value in "values" must match .metadata.name. + # - key: rook_file_system + # operator: In + # values: + # - rook-shared-fs + # topologyKey: kubernetes.io/hostname will place MDS across different hosts + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-mds + # topologyKey: */zone can be used to spread MDS across different AZ + # Use in k8s cluster if your cluster is v1.16 or lower + # Use in k8s cluster is v1.17 or upper + topologyKey: topology.kubernetes.io/zone + # A key/value list of annotations + # annotations: + # key: value + # A key/value list of labels + # labels: + # key: value + # resources: + # The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory + # limits: + # memory: "1024Mi" + # requests: + # cpu: "500m" + # memory: "1024Mi" + priorityClassName: system-cluster-critical + livenessProbe: + disabled: false + startupProbe: + disabled: false + # Filesystem mirroring settings + # mirroring: + # enabled: true + # # list of Kubernetes Secrets containing the peer token + # # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers + # # Add the secret name if it already exists else specify the empty list here. + # peers: + # secretNames: + # - secondary-cluster-peer + # # specify the schedule(s) on which snapshots should be taken + # # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules + # snapshotSchedules: + # - path: / + # interval: 24h # daily snapshots + # # The startTime should be mentioned in the format YYYY-MM-DDTHH:MM:SS + # # If startTime is not specified, then by default the start time is considered as midnight UTC. + # # see usage here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#usage + # # startTime: 2022-07-15T11:55:00 + # # manage retention policies + # # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies + # snapshotRetention: + # - path: / + # duration: "h 24" +--- +# create default csi subvolume group +apiVersion: ceph.rook.io/v1 +kind: CephFilesystemSubVolumeGroup +metadata: + name: rook-shared-fs-csi # lets keep the svg crd name same as `filesystem name + csi` for the default csi svg + namespace: rook-ceph # namespace:cluster +spec: + # The name of the subvolume group. If not set, the default is the name of the subvolumeGroup CR. + name: csi + # filesystemName is the metadata name of the CephFilesystem CR where the subvolume group will be created + filesystemName: rook-shared-fs + # reference https://docs.ceph.com/en/latest/cephfs/fs-volumes/#pinning-subvolumes-and-subvolume-groups + # only one out of (export, distributed, random) can be set at a time + # by default pinning is set with value: distributed=1 + # for disabling default values set (distributed=0) + pinning: + distributed: 1 # distributed=<0, 1> (disabled=0) + # export: # export=<0-256> (disabled=-1) + # random: # random=[0.0, 1.0](disabled=0.0) diff --git a/addons/rook/1.15.5/cluster/cephfs/patches/cephfs-storageclass.yaml b/addons/rook/1.15.5/cluster/cephfs/patches/cephfs-storageclass.yaml new file mode 100644 index 0000000000..845f4268bc --- /dev/null +++ b/addons/rook/1.15.5/cluster/cephfs/patches/cephfs-storageclass.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-cephfs +parameters: + pool: rook-shared-fs-data0 diff --git a/addons/rook/1.15.5/cluster/cephfs/patches/filesystem-singlenode.yaml b/addons/rook/1.15.5/cluster/cephfs/patches/filesystem-singlenode.yaml new file mode 100644 index 0000000000..42d749f46a --- /dev/null +++ b/addons/rook/1.15.5/cluster/cephfs/patches/filesystem-singlenode.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: ceph.rook.io/v1 +kind: CephFilesystem +metadata: + name: rook-shared-fs + namespace: rook-ceph +spec: + metadataServer: + placement: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: ~ + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-mds + # topologyKey: */zone can be used to spread MDS across different AZ + # Use in k8s cluster if your cluster is v1.16 or lower + # Use in k8s cluster is v1.17 or upper + topologyKey: topology.kubernetes.io/zone + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-mds + # topologyKey: kubernetes.io/hostname will place MDS across different hosts + topologyKey: kubernetes.io/hostname diff --git a/addons/rook/1.15.5/cluster/cephfs/patches/tmpl-filesystem-Json6902.yaml b/addons/rook/1.15.5/cluster/cephfs/patches/tmpl-filesystem-Json6902.yaml new file mode 100644 index 0000000000..419aa06d26 --- /dev/null +++ b/addons/rook/1.15.5/cluster/cephfs/patches/tmpl-filesystem-Json6902.yaml @@ -0,0 +1,10 @@ +--- +- op: replace + path: /spec/dataPools/0/name + value: data0 +- op: replace + path: /spec/dataPools/0/replicated/size + value: ${CEPH_POOL_REPLICAS} +- op: replace + path: /spec/dataPools/0/replicated/requireSafeReplicaSize + value: false diff --git a/addons/rook/1.15.5/cluster/cephfs/patches/tmpl-filesystem.yaml b/addons/rook/1.15.5/cluster/cephfs/patches/tmpl-filesystem.yaml new file mode 100644 index 0000000000..003bc18df3 --- /dev/null +++ b/addons/rook/1.15.5/cluster/cephfs/patches/tmpl-filesystem.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: ceph.rook.io/v1 +kind: CephFilesystem +metadata: + name: rook-shared-fs + namespace: rook-ceph +spec: + metadataPool: + replicated: + size: ${CEPH_POOL_REPLICAS} + requireSafeReplicaSize: false + metadataServer: + resources: + limits: + cpu: "500m" + memory: "1024Mi" + requests: + cpu: "500m" + memory: "1024Mi" diff --git a/addons/rook/1.15.5/cluster/cluster.yaml b/addons/rook/1.15.5/cluster/cluster.yaml new file mode 100644 index 0000000000..de194ff153 --- /dev/null +++ b/addons/rook/1.15.5/cluster/cluster.yaml @@ -0,0 +1,360 @@ +################################################################################################################# +# Define the settings for the rook-ceph cluster with common settings for a production cluster. +# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required +# in this example. See the documentation for more details on storage settings available. + +# For example, to create the cluster: +# kubectl create -f crds.yaml -f common.yaml -f operator.yaml +# kubectl create -f cluster.yaml +################################################################################################################# + +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph + namespace: rook-ceph # namespace:cluster +spec: + cephVersion: + # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). + # v17 is Quincy, v18 is Reef. + # RECOMMENDATION: In production, use a specific version tag instead of the general v17 flag, which pulls the latest release and could result in different + # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. + # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.4-20240724 + # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities + image: quay.io/ceph/ceph:v18.2.4 + # Whether to allow unsupported versions of Ceph. Currently `quincy` and `reef` are supported. + # Future versions such as `squid` (v19) would require this to be set to `true`. + # Do not set to true in production. + allowUnsupported: false + # The path on the host where configuration files will be persisted. Must be specified. If there are multiple clusters, the directory must be unique for each cluster. + # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. + # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. + dataDirHostPath: /var/lib/rook + # Whether or not upgrade should continue even if a check fails + # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise + # Use at your OWN risk + # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades + skipUpgradeChecks: false + # Whether or not continue if PGs are not clean during an upgrade + continueUpgradeAfterChecksEvenIfNotHealthy: false + # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart. + # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one + # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would + # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. + # The default wait timeout is 10 minutes. + waitTimeoutForHealthyOSDInMinutes: 10 + # Whether or not requires PGs are clean before an OSD upgrade. If set to `true` OSD upgrade process won't start until PGs are healthy. + # This configuration will be ignored if `skipUpgradeChecks` is `true`. + # Default is false. + upgradeOSDRequiresHealthyPGs: false + mon: + # Set the number of mons to be started. Generally recommended to be 3. + # For highest availability, an odd number of mons should be specified. + count: 3 + # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. + # Mons should only be allowed on the same node for test environments where data loss is acceptable. + allowMultiplePerNode: false + mgr: + # When higher availability of the mgr is needed, increase the count to 2. + # In that case, one mgr will be active and one in standby. When Ceph updates which + # mgr is active, Rook will update the mgr services to match the active mgr. + count: 2 + allowMultiplePerNode: false + modules: + # List of modules to optionally enable or disable. + # Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR. + - name: rook + enabled: true + # enable the ceph dashboard for viewing cluster status + dashboard: + enabled: true + # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) + # urlPrefix: /ceph-dashboard + # serve the dashboard at the given port. + # port: 8443 + # serve the dashboard using SSL + ssl: true + # The url of the Prometheus instance + # prometheusEndpoint: ://: + # Whether SSL should be verified if the Prometheus server is using https + # prometheusEndpointSSLVerify: false + # enable prometheus alerting for cluster + monitoring: + # requires Prometheus to be pre-installed + enabled: false + # Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled. + # If true, the prometheus mgr module and Ceph exporter are both disabled. Default is false. + metricsDisabled: false + # Ceph exporter metrics config. + exporter: + # Specifies which performance counters are exported. + # Corresponds to --prio-limit Ceph exporter flag + # 0 - all counters are exported + perfCountersPrioLimit: 5 + # Time to wait before sending requests again to exporter server (seconds) + # Corresponds to --stats-period Ceph exporter flag + statsPeriodSeconds: 5 + network: + connections: + # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network. + # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted. + # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check. + # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only, + # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class. + # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes. + encryption: + enabled: false + # Whether to compress the data in transit across the wire. The default is false. + # See the kernel requirements above for encryption. + compression: + enabled: false + # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled + # and clients will be required to connect to the Ceph cluster with the v2 port (3300). + # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer). + requireMsgr2: false + # enable host networking + #provider: host + # enable the Multus network provider + #provider: multus + #selectors: + # The selector keys are required to be `public` and `cluster`. + # Based on the configuration, the operator will do the following: + # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface + # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' + # + # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus + # + # public: public-conf --> NetworkAttachmentDefinition object name in Multus + # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus + # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 + #ipFamily: "IPv6" + # Ceph daemons to listen on both IPv4 and Ipv6 networks + #dualStack: false + # Enable multiClusterService to export the mon and OSD services to peer cluster. + # This is useful to support RBD mirroring between two clusters having overlapping CIDRs. + # Ensure that peer clusters are connected using an MCS API compatible application, like Globalnet Submariner. + #multiClusterService: + # enabled: false + + # enable the crash collector for ceph daemon crash collection + crashCollector: + disable: false + # Uncomment daysToRetain to prune ceph crash entries older than the + # specified number of days. + #daysToRetain: 30 + # enable log collector, daemons will log on files and rotate + logCollector: + enabled: true + periodicity: daily # one of: hourly, daily, weekly, monthly + maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M. + # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. + cleanupPolicy: + # Since cluster cleanup is destructive to data, confirmation is required. + # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". + # This value should only be set when the cluster is about to be deleted. After the confirmation is set, + # Rook will immediately stop configuring the cluster and only wait for the delete command. + # If the empty string is set, Rook will not destroy any data on hosts during uninstall. + confirmation: "" + # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion + sanitizeDisks: + # method indicates if the entire disk should be sanitized or simply ceph's metadata + # in both case, re-install is possible + # possible choices are 'complete' or 'quick' (default) + method: quick + # dataSource indicate where to get random bytes from to write on the disk + # possible choices are 'zero' (default) or 'random' + # using random sources will consume entropy from the system and will take much more time then the zero source + dataSource: zero + # iteration overwrite N times instead of the default (1) + # takes an integer value + iteration: 1 + # allowUninstallWithVolumes defines how the uninstall should be performed + # If set to true, cephCluster deletion does not wait for the PVs to be deleted. + allowUninstallWithVolumes: false + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. + # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and + # tolerate taints with a key of 'storage-node'. + # placement: + # all: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: role + # operator: In + # values: + # - storage-node + # podAffinity: + # podAntiAffinity: + # topologySpreadConstraints: + # tolerations: + # - key: storage-node + # operator: Exists + # The above placement information can also be specified for mon, osd, and mgr components + # mon: + # Monitor deployments may contain an anti-affinity rule for avoiding monitor + # collocation on the same node. This is a required rule when host network is used + # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a + # preferred rule with weight: 50. + # osd: + # prepareosd: + # mgr: + # cleanup: + annotations: + # all: + # mon: + # mgr: + # osd: + # exporter: + # crashcollector: + # cleanup: + # prepareosd: + # cmdreporter is for jobs to detect ceph and csi versions, and check network status + # cmdreporter: + # clusterMetadata annotations will be applied to only `rook-ceph-mon-endpoints` configmap and the `rook-ceph-mon` and `rook-ceph-admin-keyring` secrets. + # And clusterMetadata annotations will not be merged with `all` annotations. + # clusterMetadata: + # kubed.appscode.com/sync: "true" + # If no mgr annotations are set, prometheus scrape annotations will be set by default. + # mgr: + labels: + # all: + # mon: + # osd: + # cleanup: + # mgr: + # prepareosd: + # These labels are applied to ceph-exporter servicemonitor only + # exporter: + # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator. + # These labels can be passed as LabelSelector to Prometheus + # monitoring: + # crashcollector: + resources: + #The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory + # mgr: + # limits: + # memory: "1024Mi" + # requests: + # cpu: "500m" + # memory: "1024Mi" + # The above example requests/limits can also be added to the other components + # mon: + # osd: + # For OSD it also is a possible to specify requests/limits based on device class + # osd-hdd: + # osd-ssd: + # osd-nvme: + # prepareosd: + # mgr-sidecar: + # crashcollector: + # logcollector: + # cleanup: + # exporter: + # cmd-reporter: + # The option to automatically remove OSDs that are out and are safe to destroy. + removeOSDsIfOutAndSafeToRemove: false + priorityClassNames: + #all: rook-ceph-default-priority-class + mon: system-node-critical + osd: system-node-critical + mgr: system-cluster-critical + #crashcollector: rook-ceph-crashcollector-priority-class + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: true + #deviceFilter: + config: + # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map + # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. + # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB + # osdsPerDevice: "1" # this value can be overridden at the node or device level + # encryptedDevice: "true" # the default value for this option is "false" + # deviceClass: "myclass" # specify a device class for OSDs in the cluster + allowDeviceClassUpdate: false # whether to allow changing the device class of an OSD after it is created + allowOsdCrushWeightUpdate: false # whether to allow resizing the OSD crush weight after osd pvc is increased + # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named + # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. + # nodes: + # - name: "172.17.4.201" + # devices: # specific devices to use for storage can be specified for each node + # - name: "sdb" + # - name: "nvme01" # multiple osds can be created on high performance devices + # config: + # osdsPerDevice: "5" + # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths + # config: # configuration can be specified at the node level which overrides the cluster level config + # - name: "172.17.4.301" + # deviceFilter: "^sd." + # Whether to always schedule OSD pods on nodes declared explicitly in the "nodes" section, even if they are + # temporarily not schedulable. If set to true, consider adding placement tolerations for unschedulable nodes. + scheduleAlways: false + # when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd + onlyApplyOSDPlacement: false + # Time for which an OSD pod will sleep before restarting, if it stopped due to flapping + # flappingRestartIntervalHours: 24 + # The ratio at which Ceph should block IO if the OSDs are too full. The default is 0.95. + # fullRatio: 0.95 + # The ratio at which Ceph should stop backfilling data if the OSDs are too full. The default is 0.90. + # backfillFullRatio: 0.90 + # The ratio at which Ceph should raise a health warning if the OSDs are almost full. The default is 0.85. + # nearFullRatio: 0.85 + # The section for configuring management of daemon disruptions during upgrade or fencing. + disruptionManagement: + # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically + # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will + # block eviction of OSDs by default and unblock them safely when drains are detected. + managePodBudgets: true + # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the + # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. + osdMaintenanceTimeout: 30 + # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. + # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`. + # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. + pgHealthCheckTimeout: 0 + + # csi defines CSI Driver settings applied per cluster. + csi: + readAffinity: + # Enable read affinity to enable clients to optimize reads from an OSD in the same topology. + # Enabling the read affinity may cause the OSDs to consume some extra memory. + # For more details see this doc: + # https://rook.io/docs/rook/latest/Storage-Configuration/Ceph-CSI/ceph-csi-drivers/#enable-read-affinity-for-rbd-volumes + enabled: false + + # cephfs driver specific settings. + cephfs: + # Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options. + # kernelMountOptions: "" + # Set CephFS Fuse mount options to use https://docs.ceph.com/en/quincy/man/8/ceph-fuse/#options. + # fuseMountOptions: "" + + # healthChecks + # Valid values for daemons are 'mon', 'osd', 'status' + healthCheck: + daemonHealth: + mon: + disabled: false + interval: 45s + osd: + disabled: false + interval: 60s + status: + disabled: false + interval: 60s + # Change pod liveness probe timing or threshold values. Works for all mon,mgr,osd daemons. + livenessProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false + # Change pod startup probe timing or threshold values. Works for all mon,mgr,osd daemons. + startupProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false diff --git a/addons/rook/1.15.5/cluster/kustomization.yaml b/addons/rook/1.15.5/cluster/kustomization.yaml new file mode 100644 index 0000000000..0528eb71d2 --- /dev/null +++ b/addons/rook/1.15.5/cluster/kustomization.yaml @@ -0,0 +1,15 @@ +resources: +- object.yaml +- cluster.yaml + +patchesStrategicMerge: +- patches/ceph-cluster-tolerate.yaml + +patchesJson6902: +- target: + group: ceph.rook.io + version: v1 + kind: CephObjectStore + name: my-store + namespace: rook-ceph + path: patches/object-Json6902.yaml diff --git a/addons/rook/1.15.5/cluster/object-user.yaml b/addons/rook/1.15.5/cluster/object-user.yaml new file mode 100644 index 0000000000..69a9ff2b05 --- /dev/null +++ b/addons/rook/1.15.5/cluster/object-user.yaml @@ -0,0 +1,11 @@ +# https://github.com/rook/rook/blob/v1.4.9/cluster/examples/kubernetes/ceph/object-user.yaml + +--- +apiVersion: ceph.rook.io/v1 +kind: CephObjectStoreUser +metadata: + name: kurl + namespace: rook-ceph +spec: + store: rook-ceph-store + displayName: "kurl" diff --git a/addons/rook/1.15.5/cluster/object.yaml b/addons/rook/1.15.5/cluster/object.yaml new file mode 100644 index 0000000000..bee795abb9 --- /dev/null +++ b/addons/rook/1.15.5/cluster/object.yaml @@ -0,0 +1,161 @@ +################################################################################################################# +# Create an object store with settings for replication in a production environment. A minimum of 3 hosts with +# OSDs are required in this example. +# kubectl create -f object.yaml +################################################################################################################# + +apiVersion: ceph.rook.io/v1 +kind: CephObjectStore +metadata: + name: my-store + namespace: rook-ceph # namespace:cluster +spec: + # The pool spec used to create the metadata pools. Must use replication. + metadataPool: + failureDomain: host + replicated: + size: 3 + # Disallow setting pool with replica 1, this could lead to data loss without recovery. + # Make sure you're *ABSOLUTELY CERTAIN* that is what you want + requireSafeReplicaSize: true + parameters: + # Inline compression mode for the data pool + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression + compression_mode: none + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size + #target_size_ratio: ".5" + # The pool spec used to create the data pool. Can use replication or erasure coding. + dataPool: + failureDomain: host + replicated: + size: 3 + # Disallow setting pool with replica 1, this could lead to data loss without recovery. + # Make sure you're *ABSOLUTELY CERTAIN* that is what you want + requireSafeReplicaSize: true + parameters: + # Inline compression mode for the data pool + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression + compression_mode: none + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size + #target_size_ratio: ".5" + # Whether to preserve metadata and data pools on object store deletion + preservePoolsOnDelete: false + # The gateway service configuration + gateway: + # A reference to the secret in the rook namespace where the ssl certificate is stored + # sslCertificateRef: + # A reference to the secret in the rook namespace where the ca bundle is stored + # caBundleRef: + # The port that RGW pods will listen on (http) + port: 80 + # The port that RGW pods will listen on (https). An ssl certificate is required. + # securePort: 443 + # The number of pods in the rgw deployment + instances: 1 + # The affinity rules to apply to the rgw deployment. + placement: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-rgw + # topologyKey: */zone can be used to spread RGW across different AZ + # Use in k8s cluster if your cluster is v1.16 or lower + # Use in k8s cluster is v1.17 or upper + topologyKey: kubernetes.io/hostname + # A key/value list of annotations + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: role + # operator: In + # values: + # - rgw-node + # topologySpreadConstraints: + # tolerations: + # - key: rgw-node + # operator: Exists + # podAffinity: + # podAntiAffinity: + # A key/value list of annotations + annotations: + # key: value + # A key/value list of labels + labels: + # key: value + resources: + # The requests and limits set here, allow the object store gateway Pod(s) to use half of one CPU core and 1 gigabyte of memory + # limits: + # memory: "1024Mi" + # requests: + # cpu: "500m" + # memory: "1024Mi" + priorityClassName: system-cluster-critical + # # Add arbitrary volume mounts to RGW pods + # additionalVolumeMounts: + # # 'rgw-ldap' secret keys will show up as files in RGW container in dir /var/rgw/ldap + # - subPath: ldap + # volumeSource: + # secret: + # secretName: rgw-ldap + # defaultMode: 0600 + #zone: + #name: zone-a + # service endpoint healthcheck + healthCheck: + # Configure the pod probes for the rgw daemon + startupProbe: + disabled: false + readinessProbe: + disabled: false + # hosting: + # The list of subdomain names for virtual hosting of buckets. + # dnsNames: + # - "mystore.example.com" + + # If a CephObjectStoreUser is created in a namespace other than the Rook cluster namespace, + # the namespace must be added to the list of allowed namespaces, or specify "*" to allow all namespaces. + # allowUsersInNamespaces: + # - other-namespace + # security oriented settings + # security: + # To enable the Server Side Encryption configuration properly don't forget to uncomment the Secret at the end of the file + # kms: # configures RGW with AWS-SSE:KMS + # # name of the config map containing all the kms connection details + # connectionDetails: + # KMS_PROVIDER: "vault" + # VAULT_ADDR: VAULT_ADDR_CHANGE_ME # e,g: http://vault.my-domain.com:8200 + # VAULT_BACKEND_PATH: "rook" + # VAULT_SECRET_ENGINE: "kv" + # VAULT_BACKEND: v2 + # # name of the secret containing the kms authentication token + # tokenSecretName: rook-vault-token + # s3: # configures RGW with AWS-SSE:S3 + # # name of the config map containing all the kms connection details + # connectionDetails: + # KMS_PROVIDER: "vault" + # VAULT_ADDR: VAULT_ADDR_CHANGE_ME # e,g: http://vault.my-domain.com:8200 + # VAULT_BACKEND_PATH: "rook" + # VAULT_SECRET_ENGINE: "transit" + # # name of the secret containing the kms authentication token + # tokenSecretName: rook-vault-token +# # UNCOMMENT THIS TO ENABLE A KMS CONNECTION +# # Also, do not forget to replace both: +# # * ROOK_TOKEN_CHANGE_ME: with a base64 encoded value of the token to use +# # * VAULT_ADDR_CHANGE_ME: with the Vault address +# --- +# apiVersion: v1 +# kind: Secret +# metadata: +# name: rook-vault-token +# namespace: rook-ceph # namespace:cluster +# data: +# token: ROOK_TOKEN_CHANGE_ME diff --git a/addons/rook/1.15.5/cluster/patches/ceph-cluster-tolerate.yaml b/addons/rook/1.15.5/cluster/patches/ceph-cluster-tolerate.yaml new file mode 100644 index 0000000000..188ce973d6 --- /dev/null +++ b/addons/rook/1.15.5/cluster/patches/ceph-cluster-tolerate.yaml @@ -0,0 +1,11 @@ +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph + namespace: rook-ceph +spec: + placement: + all: + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists diff --git a/addons/rook/1.15.5/cluster/patches/cluster-nodes.tmpl.yaml b/addons/rook/1.15.5/cluster/patches/cluster-nodes.tmpl.yaml new file mode 100644 index 0000000000..4808f366b3 --- /dev/null +++ b/addons/rook/1.15.5/cluster/patches/cluster-nodes.tmpl.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph + namespace: rook-ceph +spec: + storage: + useAllNodes: false + useAllDevices: false + nodes: +$rook_nodes diff --git a/addons/rook/1.15.5/cluster/patches/object-Json6902.yaml b/addons/rook/1.15.5/cluster/patches/object-Json6902.yaml new file mode 100644 index 0000000000..5fb92c7dc0 --- /dev/null +++ b/addons/rook/1.15.5/cluster/patches/object-Json6902.yaml @@ -0,0 +1,4 @@ +--- +- op: replace + path: /metadata/name + value: rook-ceph-store diff --git a/addons/rook/1.15.5/cluster/patches/tmpl-cluster.yaml b/addons/rook/1.15.5/cluster/patches/tmpl-cluster.yaml new file mode 100644 index 0000000000..1c5a12362b --- /dev/null +++ b/addons/rook/1.15.5/cluster/patches/tmpl-cluster.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph + namespace: rook-ceph +spec: + mon: + count: 1 + mgr: + count: 1 + resources: + mgr: + requests: + cpu: "200m" + memory: "512Mi" + osd: + requests: + cpu: "500m" + memory: "1024Mi" + mon: + requests: + cpu: "300m" + memory: "1024Mi" + storage: + deviceFilter: "${ROOK_BLOCK_DEVICE_FILTER}" diff --git a/addons/rook/1.15.5/cluster/patches/tmpl-object.yaml b/addons/rook/1.15.5/cluster/patches/tmpl-object.yaml new file mode 100644 index 0000000000..f8769875aa --- /dev/null +++ b/addons/rook/1.15.5/cluster/patches/tmpl-object.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: ceph.rook.io/v1 +kind: CephObjectStore +metadata: + name: my-store + namespace: rook-ceph +spec: + metadataPool: + replicated: + size: ${CEPH_POOL_REPLICAS} + requireSafeReplicaSize: false + dataPool: + replicated: + size: ${CEPH_POOL_REPLICAS} + requireSafeReplicaSize: false + gateway: + instances: ${CEPH_POOL_REPLICAS} + resources: + requests: + cpu: "300m" + memory: "1024Mi" diff --git a/addons/rook/1.15.5/cluster/patches/tmpl-rbd-storageclass.yaml b/addons/rook/1.15.5/cluster/patches/tmpl-rbd-storageclass.yaml new file mode 100644 index 0000000000..bf83b92d88 --- /dev/null +++ b/addons/rook/1.15.5/cluster/patches/tmpl-rbd-storageclass.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: replicapool + namespace: rook-ceph +spec: + replicated: + size: ${CEPH_POOL_REPLICAS} + requireSafeReplicaSize: false +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: "${STORAGE_CLASS:-default}" + annotations: + storageclass.kubernetes.io/is-default-class: "true" diff --git a/addons/rook/1.15.5/cluster/tmpl-rbd-storageclass.yaml b/addons/rook/1.15.5/cluster/tmpl-rbd-storageclass.yaml new file mode 100644 index 0000000000..89cf704b45 --- /dev/null +++ b/addons/rook/1.15.5/cluster/tmpl-rbd-storageclass.yaml @@ -0,0 +1,92 @@ +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: replicapool + namespace: rook-ceph # namespace:cluster +spec: + failureDomain: host + replicated: + size: 3 + # Disallow setting pool with replica 1, this could lead to data loss without recovery. + # Make sure you're *ABSOLUTELY CERTAIN* that is what you want + requireSafeReplicaSize: true + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size + #targetSizeRatio: .5 +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: "${STORAGE_CLASS:-default}" +provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name +parameters: + # clusterID is the namespace where the rook cluster is running + # If you change this namespace, also change the namespace below where the secret namespaces are defined + clusterID: rook-ceph # namespace:cluster + + # If you want to use erasure coded pool with RBD, you need to create + # two pools. one erasure coded and one replicated. + # You need to specify the replicated pool here in the 'pool' parameter, it is + # used for the metadata of the images. + # The erasure coded pool must be set as the 'dataPool' parameter below. + #dataPool: ec-data-pool + pool: replicapool + + # (optional) mapOptions is a comma-separated list of map options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # mapOptions: lock_on_read,queue_depth=1024 + + # (optional) unmapOptions is a comma-separated list of unmap options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # unmapOptions: force + + # (optional) Set it to true to encrypt each volume with encryption keys + # from a key management system (KMS) + # encrypted: "true" + + # (optional) Use external key management system (KMS) for encryption key by + # specifying a unique ID matching a KMS ConfigMap. The ID is only used for + # correlation to configmap entry. + # encryptionKMSID: + + # RBD image format. Defaults to "2". + imageFormat: "2" + + # RBD image features + # Available for imageFormat: "2". Older releases of CSI RBD + # support only the 'layering' feature. The Linux kernel (KRBD) supports the + # full complement of features as of 5.4 + # 'layering' alone corresponds to Ceph's bitfield value of "2" ; + # 'layering' + 'fast-diff' + 'object-map' + 'deep-flatten' + 'exclusive-lock' together + # correspond to Ceph's OR'd bitfield value of "63". Here we use + # a symbolic, comma-separated format: + # For 5.4 or later kernels: + #imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock + # For 5.3 or earlier kernels: + imageFeatures: layering + + # The secrets contain Ceph admin credentials. These are generated automatically by the operator + # in the same namespace as the cluster. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as 'ext4'. Note that 'xfs' is not recommended due to potential deadlock + # in hyperconverged settings where the volume is mounted on the same node as the osds. + csi.storage.k8s.io/fstype: ext4 +# uncomment the following to use rbd-nbd as mounter on supported nodes +# **IMPORTANT**: CephCSI v3.4.0 onwards a volume healer functionality is added to reattach +# the PVC to application pod if nodeplugin pod restart. +# Its still in Alpha support. Therefore, this option is not recommended for production use. +#mounter: rbd-nbd +allowVolumeExpansion: true +reclaimPolicy: Delete diff --git a/addons/rook/1.15.5/crds.yaml b/addons/rook/1.15.5/crds.yaml new file mode 100644 index 0000000000..ea031aacdb --- /dev/null +++ b/addons/rook/1.15.5/crds.yaml @@ -0,0 +1,14212 @@ +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: objectbuckets.objectbucket.io + annotations: + helm.sh/resource-policy: keep +spec: + group: objectbucket.io + names: + kind: ObjectBucket + listKind: ObjectBucketList + plural: objectbuckets + singular: objectbucket + shortNames: + - ob + - obs + scope: Cluster + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + storageClassName: + type: string + endpoint: + type: object + nullable: true + properties: + bucketHost: + type: string + bucketPort: + type: integer + format: int32 + bucketName: + type: string + region: + type: string + subRegion: + type: string + additionalConfig: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + authentication: + type: object + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + additionalState: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + reclaimPolicy: + type: string + claimRef: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: objectbucketclaims.objectbucket.io + annotations: + helm.sh/resource-policy: keep +spec: + group: objectbucket.io + names: + kind: ObjectBucketClaim + listKind: ObjectBucketClaimList + plural: objectbucketclaims + singular: objectbucketclaim + shortNames: + - obc + - obcs + scope: Namespaced + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + storageClassName: + type: string + bucketName: + type: string + generateBucketName: + type: string + additionalConfig: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + objectBucketName: + type: string + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephrbdmirrors.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephRBDMirror + listKind: CephRBDMirrorList + plural: cephrbdmirrors + singular: cephrbdmirror + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: CephRBDMirror represents a Ceph RBD Mirror + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RBDMirroringSpec represents the specification of an RBD mirror daemon + properties: + annotations: + additionalProperties: + type: string + description: The annotations-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + count: + description: Count represents the number of rbd mirror instance to run + minimum: 1 + type: integer + labels: + additionalProperties: + type: string + description: The labels-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + placement: + nullable: true + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-preserve-unknown-fields: true + priorityClassName: + description: PriorityClassName sets priority class on the rbd mirror pods + type: string + resources: + description: The resource requirements for the rbd mirror pods + nullable: true + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - count + type: object + status: + description: Status represents the status of an object + properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephobjectzones.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectZone + listKind: CephObjectZoneList + plural: cephobjectzones + singular: cephobjectzone + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: CephObjectZone represents a Ceph Object Store Gateway Zone + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ObjectZoneSpec represent the spec of an ObjectZone + properties: + customEndpoints: + description: |- + If this zone cannot be accessed from other peer Ceph clusters via the ClusterIP Service + endpoint created by Rook, you must set this to the externally reachable endpoint(s). You may + include the port in the definition. For example: "https://my-object-store.my-domain.net:443". + In many cases, you should set this to the endpoint of the ingress resource that makes the + CephObjectStore associated with this CephObjectStoreZone reachable to peer clusters. + The list can have one or more endpoints pointing to different RGW servers in the zone. + + If a CephObjectStore endpoint is omitted from this list, that object store's gateways will + not receive multisite replication data + (see CephObjectStore.spec.gateway.disableMultisiteSyncTraffic). + items: + type: string + nullable: true + type: array + dataPool: + description: The data pool settings + nullable: true + properties: + application: + description: The application name to set on the pool. Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + metadataPool: + description: The metadata pool settings + nullable: true + properties: + application: + description: The application name to set on the pool. Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + preservePoolsOnDelete: + default: true + description: Preserve pools on object zone deletion + type: boolean + sharedPools: + description: The pool information when configuring RADOS namespaces in existing pools. + nullable: true + properties: + dataPoolName: + description: The data pool used for creating RADOS namespaces in the object store + type: string + x-kubernetes-validations: + - message: object store shared data pool is immutable + rule: self == oldSelf + metadataPoolName: + description: The metadata pool used for creating RADOS namespaces in the object store + type: string + x-kubernetes-validations: + - message: object store shared metadata pool is immutable + rule: self == oldSelf + poolPlacements: + description: |- + PoolPlacements control which Pools are associated with a particular RGW bucket. + Once PoolPlacements are defined, RGW client will be able to associate pool + with ObjectStore bucket by providing "" during s3 bucket creation + or "X-Storage-Policy" header during swift container creation. + See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets + PoolPlacement with name: "default" will be used as a default pool if no option + is provided during bucket creation. + If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools. + If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults. + items: + properties: + dataNonECPoolName: + description: |- + The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads). + If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName. + type: string + dataPoolName: + description: The data pool used to store ObjectStore objects data. + minLength: 1 + type: string + default: + description: |- + Sets given placement as default. Only one placement in the list can be marked as default. + Default is false. + type: boolean + metadataPoolName: + description: The metadata pool used to store ObjectStore bucket index. + minLength: 1 + type: string + name: + description: Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default. + minLength: 1 + pattern: ^[a-zA-Z0-9._/-]+$ + type: string + storageClasses: + description: |- + StorageClasses can be selected by user to override dataPoolName during object creation. + Each placement has default STANDARD StorageClass pointing to dataPoolName. + This list allows defining additional StorageClasses on top of default STANDARD storage class. + items: + properties: + dataPoolName: + description: DataPoolName is the data pool used to store ObjectStore objects data. + minLength: 1 + type: string + name: + description: |- + Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses, + however most clients/libs insist on AWS names so it is recommended to use + one of the valid x-amz-storage-class values for better compatibility: + REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE + See AWS docs: https://aws.amazon.com/de/s3/storage-classes/ + minLength: 1 + pattern: ^[a-zA-Z0-9._/-]+$ + type: string + required: + - dataPoolName + - name + type: object + type: array + required: + - dataPoolName + - metadataPoolName + - name + type: object + type: array + preserveRadosNamespaceDataOnDelete: + description: Whether the RADOS namespaces should be preserved on deletion of the object store + type: boolean + type: object + zoneGroup: + description: The display name for the ceph users + type: string + required: + - zoneGroup + type: object + status: + description: Status represents the status of an object + properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephobjectzonegroups.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectZoneGroup + listKind: CephObjectZoneGroupList + plural: cephobjectzonegroups + singular: cephobjectzonegroup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: CephObjectZoneGroup represents a Ceph Object Store Gateway Zone Group + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ObjectZoneGroupSpec represent the spec of an ObjectZoneGroup + properties: + realm: + description: The display name for the ceph users + type: string + required: + - realm + type: object + status: + description: Status represents the status of an object + properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephobjectstoreusers.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectStoreUser + listKind: CephObjectStoreUserList + plural: cephobjectstoreusers + shortNames: + - rcou + - objectuser + singular: cephobjectstoreuser + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: CephObjectStoreUser represents a Ceph Object Store Gateway User + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ObjectStoreUserSpec represent the spec of an Objectstoreuser + properties: + capabilities: + description: Additional admin-level capabilities for the Ceph object store user + nullable: true + properties: + amz-cache: + description: Add capabilities for user to send request to RGW Cache API header. Documented in https://docs.ceph.com/en/quincy/radosgw/rgw-cache/#cache-api + enum: + - '*' + - read + - write + - read, write + type: string + bilog: + description: Add capabilities for user to change bucket index logging. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + bucket: + description: Admin capabilities to read/write Ceph object store buckets. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + buckets: + description: Admin capabilities to read/write Ceph object store buckets. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + datalog: + description: Add capabilities for user to change data logging. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + info: + description: Admin capabilities to read/write information about the user. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + mdlog: + description: Add capabilities for user to change metadata logging. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + metadata: + description: Admin capabilities to read/write Ceph object store metadata. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + oidc-provider: + description: Add capabilities for user to change oidc provider. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + ratelimit: + description: Add capabilities for user to set rate limiter for user and bucket. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + roles: + description: Admin capabilities to read/write roles for user. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + usage: + description: Admin capabilities to read/write Ceph object store usage. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + user: + description: Admin capabilities to read/write Ceph object store users. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + user-policy: + description: Add capabilities for user to change user policies. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + users: + description: Admin capabilities to read/write Ceph object store users. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + zone: + description: Admin capabilities to read/write Ceph object store zones. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + enum: + - '*' + - read + - write + - read, write + type: string + type: object + clusterNamespace: + description: The namespace where the parent CephCluster and CephObjectStore are found + type: string + displayName: + description: The display name for the ceph users + type: string + quotas: + description: ObjectUserQuotaSpec can be used to set quotas for the object store user to limit their usage. See the [Ceph docs](https://docs.ceph.com/en/latest/radosgw/admin/?#quota-management) for more + nullable: true + properties: + maxBuckets: + description: Maximum bucket limit for the ceph user + nullable: true + type: integer + maxObjects: + description: Maximum number of objects across all the user's buckets + format: int64 + nullable: true + type: integer + maxSize: + anyOf: + - type: integer + - type: string + description: |- + Maximum size limit of all objects across all the user's buckets + See https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity for more info. + nullable: true + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + store: + description: The store the user will be created in + type: string + type: object + status: + description: ObjectStoreUserStatus represents the status Ceph Object Store Gateway User + properties: + info: + additionalProperties: + type: string + nullable: true + type: object + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephobjectstores.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectStore + listKind: CephObjectStoreList + plural: cephobjectstores + singular: cephobjectstore + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.info.endpoint + name: Endpoint + type: string + - jsonPath: .status.info.secureEndpoint + name: SecureEndpoint + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: CephObjectStore represents a Ceph Object Store Gateway + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ObjectStoreSpec represent the spec of a pool + properties: + allowUsersInNamespaces: + description: |- + The list of allowed namespaces in addition to the object store namespace + where ceph object store users may be created. Specify "*" to allow all + namespaces, otherwise list individual namespaces that are to be allowed. + This is useful for applications that need object store credentials + to be created in their own namespace, where neither OBCs nor COSI + is being used to create buckets. The default is empty. + items: + type: string + type: array + auth: + description: The authentication configuration + properties: + keystone: + description: The spec for Keystone + nullable: true + properties: + acceptedRoles: + description: The roles requires to serve requests. + items: + type: string + type: array + implicitTenants: + description: Create new users in their own tenants of the same name. Possible values are true, false, swift and s3. The latter have the effect of splitting the identity space such that only the indicated protocol will use implicit tenants. + type: string + revocationInterval: + description: The number of seconds between token revocation checks. + nullable: true + type: integer + serviceUserSecretName: + description: The name of the secret containing the credentials for the service user account used by RGW. It has to be in the same namespace as the object store resource. + type: string + tokenCacheSize: + description: The maximum number of entries in each Keystone token cache. + nullable: true + type: integer + url: + description: The URL for the Keystone server. + type: string + required: + - acceptedRoles + - serviceUserSecretName + - url + type: object + type: object + dataPool: + description: The data pool settings + nullable: true + properties: + application: + description: The application name to set on the pool. Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + gateway: + description: The rgw pod info + nullable: true + properties: + additionalVolumeMounts: + description: |- + AdditionalVolumeMounts allows additional volumes to be mounted to the RGW pod. + The root directory for each additional volume mount is `/var/rgw`. + Example: for an additional mount at subPath `ldap`, mounted from a secret that has key + `bindpass.secret`, the file would reside at `/var/rgw/ldap/bindpass.secret`. + items: + description: |- + AdditionalVolumeMount represents the source from where additional files in pod containers + should come from and what subdirectory they are made available in. + properties: + subPath: + description: |- + SubPath defines the sub-path (subdirectory) of the directory root where the volumeSource will + be mounted. All files/keys in the volume source's volume will be mounted to the subdirectory. + This is not the same as the Kubernetes `subPath` volume mount option. + Each subPath definition must be unique and must not contain ':'. + minLength: 1 + pattern: ^[^:]+$ + type: string + volumeSource: + properties: + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + type: object + required: + - subPath + - volumeSource + type: object + type: array + annotations: + additionalProperties: + type: string + description: The annotations-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + caBundleRef: + description: The name of the secret that stores custom ca-bundle with root and intermediate certificates. + nullable: true + type: string + dashboardEnabled: + description: Whether rgw dashboard is enabled for the rgw daemon. If not set, the rgw dashboard will be enabled. + nullable: true + type: boolean + x-kubernetes-preserve-unknown-fields: true + disableMultisiteSyncTraffic: + description: |- + DisableMultisiteSyncTraffic, when true, prevents this object store's gateways from + transmitting multisite replication data. Note that this value does not affect whether + gateways receive multisite replication traffic: see ObjectZone.spec.customEndpoints for that. + If false or unset, this object store's gateways will be able to transmit multisite + replication data. + type: boolean + externalRgwEndpoints: + description: |- + ExternalRgwEndpoints points to external RGW endpoint(s). Multiple endpoints can be given, but + for stability of ObjectBucketClaims, we highly recommend that users give only a single + external RGW endpoint that is a load balancer that sends requests to the multiple RGWs. + items: + description: |- + EndpointAddress is a tuple that describes a single IP address or host name. This is a subset of + Kubernetes's v1.EndpointAddress. + properties: + hostname: + description: The DNS-addressable Hostname of this endpoint. This field will be preferred over IP if both are given. + type: string + ip: + description: The IP of this endpoint. As a legacy behavior, this supports being given a DNS-addressable hostname as well. + type: string + type: object + x-kubernetes-map-type: atomic + nullable: true + type: array + hostNetwork: + description: Whether host networking is enabled for the rgw daemon. If not set, the network settings from the cluster CR will be applied. + nullable: true + type: boolean + x-kubernetes-preserve-unknown-fields: true + instances: + description: The number of pods in the rgw replicaset. + format: int32 + nullable: true + type: integer + labels: + additionalProperties: + type: string + description: The labels-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + placement: + nullable: true + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-preserve-unknown-fields: true + port: + description: The port the rgw service will be listening on (http) + format: int32 + type: integer + priorityClassName: + description: PriorityClassName sets priority classes on the rgw pods + type: string + resources: + description: The resource requirements for the rgw pods + nullable: true + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + securePort: + description: The port the rgw service will be listening on (https) + format: int32 + maximum: 65535 + minimum: 0 + nullable: true + type: integer + service: + description: The configuration related to add/set on each rgw service. + nullable: true + properties: + annotations: + additionalProperties: + type: string + description: |- + The annotations-related configuration to add/set on each rgw service. + nullable + optional + type: object + type: object + sslCertificateRef: + description: The name of the secret that stores the ssl certificate for secure rgw connections + nullable: true + type: string + type: object + healthCheck: + description: The RGW health probes + nullable: true + properties: + readinessProbe: + description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon + properties: + disabled: + description: Disabled determines whether probe is disable or not + type: boolean + probe: + description: |- + Probe describes a health check to be performed against a container to determine whether it is + alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + startupProbe: + description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon + properties: + disabled: + description: Disabled determines whether probe is disable or not + type: boolean + probe: + description: |- + Probe describes a health check to be performed against a container to determine whether it is + alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + type: object + hosting: + description: |- + Hosting settings for the object store. + A common use case for hosting configuration is to inform Rook of endpoints that support DNS + wildcards, which in turn allows virtual host-style bucket addressing. + nullable: true + properties: + advertiseEndpoint: + description: |- + AdvertiseEndpoint is the default endpoint Rook will return for resources dependent on this + object store. This endpoint will be returned to CephObjectStoreUsers, Object Bucket Claims, + and COSI Buckets/Accesses. + By default, Rook returns the endpoint for the object store's Kubernetes service using HTTPS + with `gateway.securePort` if it is defined (otherwise, HTTP with `gateway.port`). + nullable: true + properties: + dnsName: + description: |- + DnsName is the DNS name (in RFC-1123 format) of the endpoint. + If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the + wildcard itself in the list of hostnames. + E.g., use "mystore.example.com" instead of "*.mystore.example.com". + minLength: 1 + type: string + port: + description: Port is the port on which S3 connections can be made for this endpoint. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + useTls: + description: UseTls defines whether the endpoint uses TLS (HTTPS) or not (HTTP). + type: boolean + required: + - dnsName + - port + - useTls + type: object + dnsNames: + description: |- + A list of DNS host names on which object store gateways will accept client S3 connections. + When specified, object store gateways will reject client S3 connections to hostnames that are + not present in this list, so include all endpoints. + The object store's advertiseEndpoint and Kubernetes service endpoint, plus CephObjectZone + `customEndpoints` are automatically added to the list but may be set here again if desired. + Each DNS name must be valid according RFC-1123. + If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the + wildcard itself in the list of hostnames. + E.g., use "mystore.example.com" instead of "*.mystore.example.com". + The feature is supported only for Ceph v18 and later versions. + items: + type: string + type: array + type: object + metadataPool: + description: The metadata pool settings + nullable: true + properties: + application: + description: The application name to set on the pool. Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + preservePoolsOnDelete: + description: Preserve pools on object store deletion + type: boolean + protocols: + description: The protocol specification + properties: + s3: + description: The spec for S3 + nullable: true + properties: + authUseKeystone: + description: Whether to use Keystone for authentication. This option maps directly to the rgw_s3_auth_use_keystone option. Enabling it allows generating S3 credentials via an OpenStack API call, see the docs. If not given, the defaults of the corresponding RGW option apply. + nullable: true + type: boolean + enabled: + description: Whether to enable S3. This defaults to true (even if protocols.s3 is not present in the CRD). This maintains backwards compatibility – by default S3 is enabled. + nullable: true + type: boolean + type: object + swift: + description: The spec for Swift + nullable: true + properties: + accountInUrl: + description: Whether or not the Swift account name should be included in the Swift API URL. If set to false (the default), then the Swift API will listen on a URL formed like http://host:port//v1. If set to true, the Swift API URL will be http://host:port//v1/AUTH_. You must set this option to true (and update the Keystone service catalog) if you want radosgw to support publicly-readable containers and temporary URLs. + nullable: true + type: boolean + urlPrefix: + description: The URL prefix for the Swift API, to distinguish it from the S3 API endpoint. The default is swift, which makes the Swift API available at the URL http://host:port/swift/v1 (or http://host:port/swift/v1/AUTH_%(tenant_id)s if rgw swift account in url is enabled). + nullable: true + type: string + versioningEnabled: + description: Enables the Object Versioning of OpenStack Object Storage API. This allows clients to put the X-Versions-Location attribute on containers that should be versioned. + nullable: true + type: boolean + type: object + type: object + security: + description: Security represents security settings + nullable: true + properties: + keyRotation: + description: KeyRotation defines options for Key Rotation. + nullable: true + properties: + enabled: + default: false + description: Enabled represents whether the key rotation is enabled. + type: boolean + schedule: + description: Schedule represents the cron schedule for key rotation. + type: string + type: object + kms: + description: KeyManagementService is the main Key Management option + nullable: true + properties: + connectionDetails: + additionalProperties: + type: string + description: ConnectionDetails contains the KMS connection details (address, port etc) + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + tokenSecretName: + description: TokenSecretName is the kubernetes secret containing the KMS token + type: string + type: object + s3: + description: The settings for supporting AWS-SSE:S3 with RGW + nullable: true + properties: + connectionDetails: + additionalProperties: + type: string + description: ConnectionDetails contains the KMS connection details (address, port etc) + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + tokenSecretName: + description: TokenSecretName is the kubernetes secret containing the KMS token + type: string + type: object + type: object + sharedPools: + description: The pool information when configuring RADOS namespaces in existing pools. + nullable: true + properties: + dataPoolName: + description: The data pool used for creating RADOS namespaces in the object store + type: string + x-kubernetes-validations: + - message: object store shared data pool is immutable + rule: self == oldSelf + metadataPoolName: + description: The metadata pool used for creating RADOS namespaces in the object store + type: string + x-kubernetes-validations: + - message: object store shared metadata pool is immutable + rule: self == oldSelf + poolPlacements: + description: |- + PoolPlacements control which Pools are associated with a particular RGW bucket. + Once PoolPlacements are defined, RGW client will be able to associate pool + with ObjectStore bucket by providing "" during s3 bucket creation + or "X-Storage-Policy" header during swift container creation. + See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets + PoolPlacement with name: "default" will be used as a default pool if no option + is provided during bucket creation. + If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools. + If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults. + items: + properties: + dataNonECPoolName: + description: |- + The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads). + If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName. + type: string + dataPoolName: + description: The data pool used to store ObjectStore objects data. + minLength: 1 + type: string + default: + description: |- + Sets given placement as default. Only one placement in the list can be marked as default. + Default is false. + type: boolean + metadataPoolName: + description: The metadata pool used to store ObjectStore bucket index. + minLength: 1 + type: string + name: + description: Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default. + minLength: 1 + pattern: ^[a-zA-Z0-9._/-]+$ + type: string + storageClasses: + description: |- + StorageClasses can be selected by user to override dataPoolName during object creation. + Each placement has default STANDARD StorageClass pointing to dataPoolName. + This list allows defining additional StorageClasses on top of default STANDARD storage class. + items: + properties: + dataPoolName: + description: DataPoolName is the data pool used to store ObjectStore objects data. + minLength: 1 + type: string + name: + description: |- + Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses, + however most clients/libs insist on AWS names so it is recommended to use + one of the valid x-amz-storage-class values for better compatibility: + REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE + See AWS docs: https://aws.amazon.com/de/s3/storage-classes/ + minLength: 1 + pattern: ^[a-zA-Z0-9._/-]+$ + type: string + required: + - dataPoolName + - name + type: object + type: array + required: + - dataPoolName + - metadataPoolName + - name + type: object + type: array + preserveRadosNamespaceDataOnDelete: + description: Whether the RADOS namespaces should be preserved on deletion of the object store + type: boolean + type: object + zone: + description: The multisite info + nullable: true + properties: + name: + description: RGW Zone the Object Store is in + type: string + required: + - name + type: object + type: object + status: + description: ObjectStoreStatus represents the status of a Ceph Object Store resource + properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + endpoints: + properties: + insecure: + items: + type: string + nullable: true + type: array + secure: + items: + type: string + nullable: true + type: array + type: object + info: + additionalProperties: + type: string + nullable: true + type: object + message: + type: string + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + description: ConditionType represent a resource's status + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephobjectrealms.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectRealm + listKind: CephObjectRealmList + plural: cephobjectrealms + singular: cephobjectrealm + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephObjectRealm represents a Ceph Object Store Gateway Realm + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ObjectRealmSpec represent the spec of an ObjectRealm + nullable: true + properties: + pull: + description: PullSpec represents the pulling specification of a Ceph Object Storage Gateway Realm + properties: + endpoint: + pattern: ^https*:// + type: string + type: object + type: object + status: + description: Status represents the status of an object + properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephnfses.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephNFS + listKind: CephNFSList + plural: cephnfses + shortNames: + - nfs + singular: cephnfs + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephNFS represents a Ceph NFS + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NFSGaneshaSpec represents the spec of an nfs ganesha server + properties: + rados: + description: RADOS is the Ganesha RADOS specification + nullable: true + properties: + namespace: + description: |- + The namespace inside the Ceph pool (set by 'pool') where shared NFS-Ganesha config is stored. + This setting is deprecated as it is internally set to the name of the CephNFS. + type: string + pool: + description: |- + The Ceph pool used store the shared configuration for NFS-Ganesha daemons. + This setting is deprecated, as it is internally required to be ".nfs". + type: string + type: object + security: + description: Security allows specifying security configurations for the NFS cluster + nullable: true + properties: + kerberos: + description: Kerberos configures NFS-Ganesha to secure NFS client connections with Kerberos. + nullable: true + properties: + configFiles: + description: |- + ConfigFiles defines where the Kerberos configuration should be sourced from. Config files + will be placed into the `/etc/krb5.conf.rook/` directory. + + If this is left empty, Rook will not add any files. This allows you to manage the files + yourself however you wish. For example, you may build them into your custom Ceph container + image or use the Vault agent injector to securely add the files via annotations on the + CephNFS spec (passed to the NFS server pods). + + Rook configures Kerberos to log to stderr. We suggest removing logging sections from config + files to avoid consuming unnecessary disk space from logging to files. + properties: + volumeSource: + properties: + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + type: object + type: object + domainName: + description: DomainName should be set to the Kerberos Realm. + type: string + keytabFile: + description: |- + KeytabFile defines where the Kerberos keytab should be sourced from. The keytab file will be + placed into `/etc/krb5.keytab`. If this is left empty, Rook will not add the file. + This allows you to manage the `krb5.keytab` file yourself however you wish. For example, you + may build it into your custom Ceph container image or use the Vault agent injector to + securely add the file via annotations on the CephNFS spec (passed to the NFS server pods). + properties: + volumeSource: + properties: + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + type: object + type: object + principalName: + default: nfs + description: |- + PrincipalName corresponds directly to NFS-Ganesha's NFS_KRB5:PrincipalName config. In + practice, this is the service prefix of the principal name. The default is "nfs". + This value is combined with (a) the namespace and name of the CephNFS (with a hyphen between) + and (b) the Realm configured in the user-provided krb5.conf to determine the full principal + name: /-@. e.g., nfs/rook-ceph-my-nfs@example.net. + See https://github.com/nfs-ganesha/nfs-ganesha/wiki/RPCSEC_GSS for more detail. + type: string + type: object + sssd: + description: |- + SSSD enables integration with System Security Services Daemon (SSSD). SSSD can be used to + provide user ID mapping from a number of sources. See https://sssd.io for more information + about the SSSD project. + nullable: true + properties: + sidecar: + description: Sidecar tells Rook to run SSSD in a sidecar alongside the NFS-Ganesha server in each NFS pod. + properties: + additionalFiles: + description: |- + AdditionalFiles defines any number of additional files that should be mounted into the SSSD + sidecar with a directory root of `/etc/sssd/rook-additional/`. + These files may be referenced by the sssd.conf config file. + items: + description: |- + AdditionalVolumeMount represents the source from where additional files in pod containers + should come from and what subdirectory they are made available in. + properties: + subPath: + description: |- + SubPath defines the sub-path (subdirectory) of the directory root where the volumeSource will + be mounted. All files/keys in the volume source's volume will be mounted to the subdirectory. + This is not the same as the Kubernetes `subPath` volume mount option. + Each subPath definition must be unique and must not contain ':'. + minLength: 1 + pattern: ^[^:]+$ + type: string + volumeSource: + properties: + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + type: object + required: + - subPath + - volumeSource + type: object + type: array + debugLevel: + description: |- + DebugLevel sets the debug level for SSSD. If unset or set to 0, Rook does nothing. Otherwise, + this may be a value between 1 and 10. See SSSD docs for more info: + https://sssd.io/troubleshooting/basics.html#sssd-debug-logs + maximum: 10 + minimum: 0 + type: integer + image: + description: Image defines the container image that should be used for the SSSD sidecar. + minLength: 1 + type: string + resources: + description: Resources allow specifying resource requests/limits on the SSSD sidecar container. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + sssdConfigFile: + description: |- + SSSDConfigFile defines where the SSSD configuration should be sourced from. The config file + will be placed into `/etc/sssd/sssd.conf`. If this is left empty, Rook will not add the file. + This allows you to manage the `sssd.conf` file yourself however you wish. For example, you + may build it into your custom Ceph container image or use the Vault agent injector to + securely add the file via annotations on the CephNFS spec (passed to the NFS server pods). + properties: + volumeSource: + properties: + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + type: object + type: object + required: + - image + type: object + type: object + type: object + server: + description: Server is the Ganesha Server specification + properties: + active: + description: The number of active Ganesha servers + type: integer + annotations: + additionalProperties: + type: string + description: The annotations-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + hostNetwork: + description: Whether host networking is enabled for the Ganesha server. If not set, the network settings from the cluster CR will be applied. + nullable: true + type: boolean + labels: + additionalProperties: + type: string + description: The labels-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + livenessProbe: + description: |- + A liveness-probe to verify that Ganesha server has valid run-time state. + If LivenessProbe.Disabled is false and LivenessProbe.Probe is nil uses default probe. + properties: + disabled: + description: Disabled determines whether probe is disable or not + type: boolean + probe: + description: |- + Probe describes a health check to be performed against a container to determine whether it is + alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + logLevel: + description: LogLevel set logging level + type: string + placement: + nullable: true + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-preserve-unknown-fields: true + priorityClassName: + description: PriorityClassName sets the priority class on the pods + type: string + resources: + description: Resources set resource requests and limits + nullable: true + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - active + type: object + required: + - server + type: object + status: + description: Status represents the status of an object + properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephfilesystemsubvolumegroups.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephFilesystemSubVolumeGroup + listKind: CephFilesystemSubVolumeGroupList + plural: cephfilesystemsubvolumegroups + singular: cephfilesystemsubvolumegroup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - description: Name of the CephFileSystem + jsonPath: .spec.filesystemName + name: Filesystem + type: string + - jsonPath: .spec.quota + name: Quota + type: string + - jsonPath: .status.info.pinning + name: Pinning + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: CephFilesystemSubVolumeGroup represents a Ceph Filesystem SubVolumeGroup + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec represents the specification of a Ceph Filesystem SubVolumeGroup + properties: + dataPoolName: + description: The data pool name for the Ceph Filesystem subvolume group layout, if the default CephFS pool is not desired. + type: string + filesystemName: + description: |- + FilesystemName is the name of Ceph Filesystem SubVolumeGroup volume name. Typically it's the name of + the CephFilesystem CR. If not coming from the CephFilesystem CR, it can be retrieved from the + list of Ceph Filesystem volumes with `ceph fs volume ls`. To learn more about Ceph Filesystem + abstractions see https://docs.ceph.com/en/latest/cephfs/fs-volumes/#fs-volumes-and-subvolumes + type: string + x-kubernetes-validations: + - message: filesystemName is immutable + rule: self == oldSelf + name: + description: The name of the subvolume group. If not set, the default is the name of the subvolumeGroup CR. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + pinning: + description: |- + Pinning configuration of CephFilesystemSubVolumeGroup, + reference https://docs.ceph.com/en/latest/cephfs/fs-volumes/#pinning-subvolumes-and-subvolume-groups + only one out of (export, distributed, random) can be set at a time + properties: + distributed: + maximum: 1 + minimum: 0 + nullable: true + type: integer + export: + maximum: 256 + minimum: -1 + nullable: true + type: integer + random: + maximum: 1 + minimum: 0 + nullable: true + type: number + type: object + x-kubernetes-validations: + - message: only one pinning type should be set + rule: (has(self.export) && !has(self.distributed) && !has(self.random)) || (!has(self.export) && has(self.distributed) && !has(self.random)) || (!has(self.export) && !has(self.distributed) && has(self.random)) || (!has(self.export) && !has(self.distributed) && !has(self.random)) + quota: + anyOf: + - type: integer + - type: string + description: Quota size of the Ceph Filesystem subvolume group. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - filesystemName + type: object + status: + description: Status represents the status of a CephFilesystem SubvolumeGroup + properties: + info: + additionalProperties: + type: string + nullable: true + type: object + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + description: ConditionType represent a resource's status + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephfilesystems.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephFilesystem + listKind: CephFilesystemList + plural: cephfilesystems + singular: cephfilesystem + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Number of desired active MDS daemons + jsonPath: .spec.metadataServer.activeCount + name: ActiveMDS + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.phase + name: Phase + type: string + name: v1 + schema: + openAPIV3Schema: + description: CephFilesystem represents a Ceph Filesystem + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FilesystemSpec represents the spec of a file system + properties: + dataPools: + description: The data pool settings, with optional predefined pool name. + items: + description: NamedPoolSpec represents the named ceph pool spec + properties: + application: + description: The application name to set on the pool. Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + name: + description: Name of the pool + type: string + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + nullable: true + type: array + metadataPool: + description: The metadata pool settings + nullable: true + properties: + application: + description: The application name to set on the pool. Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + metadataServer: + description: The mds pod info + properties: + activeCount: + description: The number of metadata servers that are active. The remaining servers in the cluster will be in standby mode. + format: int32 + maximum: 50 + minimum: 1 + type: integer + activeStandby: + description: |- + Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. + If false, standbys will still be available, but will not have a warm metadata cache. + type: boolean + annotations: + additionalProperties: + type: string + description: The annotations-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + labels: + additionalProperties: + type: string + description: The labels-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + livenessProbe: + description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon + properties: + disabled: + description: Disabled determines whether probe is disable or not + type: boolean + probe: + description: |- + Probe describes a health check to be performed against a container to determine whether it is + alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + placement: + nullable: true + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-preserve-unknown-fields: true + priorityClassName: + description: PriorityClassName sets priority classes on components + type: string + resources: + description: The resource requirements for the mds pods + nullable: true + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + startupProbe: + description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon + properties: + disabled: + description: Disabled determines whether probe is disable or not + type: boolean + probe: + description: |- + Probe describes a health check to be performed against a container to determine whether it is + alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + required: + - activeCount + type: object + mirroring: + description: The mirroring settings + nullable: true + properties: + enabled: + description: Enabled whether this filesystem is mirrored or not + type: boolean + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotRetention: + description: |- + Retention is the retention policy for a snapshot schedule + One path has exactly one retention policy. + A policy can however contain multiple count-time period pairs in order to specify complex retention policies + items: + description: SnapshotScheduleRetentionSpec is a retention policy + properties: + duration: + description: Duration represents the retention duration for a snapshot + type: string + path: + description: Path is the path to snapshot + type: string + type: object + type: array + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored filesystems + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + preserveFilesystemOnDelete: + description: Preserve the fs in the cluster on CephFilesystem CR deletion. Setting this to true automatically implies PreservePoolsOnDelete is true. + type: boolean + preservePoolsOnDelete: + description: Preserve pools on filesystem deletion + type: boolean + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - dataPools + - metadataPool + - metadataServer + type: object + status: + description: CephFilesystemStatus represents the status of a Ceph Filesystem + properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + info: + additionalProperties: + type: string + description: Use only info and put mirroringStatus in it? + nullable: true + type: object + mirroringStatus: + description: MirroringStatus is the filesystem mirroring status + properties: + daemonsStatus: + description: PoolMirroringStatus is the mirroring status of a filesystem + items: + description: FilesystemMirrorInfoSpec is the filesystem mirror status of a given filesystem + properties: + daemon_id: + description: DaemonID is the cephfs-mirror name + type: integer + filesystems: + description: Filesystems is the list of filesystems managed by a given cephfs-mirror daemon + items: + description: FilesystemsSpec is spec for the mirrored filesystem + properties: + directory_count: + description: DirectoryCount is the number of directories in the filesystem + type: integer + filesystem_id: + description: FilesystemID is the filesystem identifier + type: integer + name: + description: Name is name of the filesystem + type: string + peers: + description: Peers represents the mirroring peers + items: + description: FilesystemMirrorInfoPeerSpec is the specification of a filesystem peer mirror + properties: + remote: + description: Remote are the remote cluster information + properties: + client_name: + description: ClientName is cephx name + type: string + cluster_name: + description: ClusterName is the name of the cluster + type: string + fs_name: + description: FsName is the filesystem name + type: string + type: object + stats: + description: Stats are the stat a peer mirror + properties: + failure_count: + description: FailureCount is the number of mirroring failure + type: integer + recovery_count: + description: RecoveryCount is the number of recovery attempted after failures + type: integer + type: object + uuid: + description: UUID is the peer unique identifier + type: string + type: object + type: array + type: object + type: array + type: object + nullable: true + type: array + details: + description: Details contains potential status errors + type: string + lastChanged: + description: LastChanged is the last time time the status last changed + type: string + lastChecked: + description: LastChecked is the last time time the status was checked + type: string + type: object + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + description: ConditionType represent a resource's status + type: string + snapshotScheduleStatus: + description: FilesystemSnapshotScheduleStatusSpec is the status of the snapshot schedule + properties: + details: + description: Details contains potential status errors + type: string + lastChanged: + description: LastChanged is the last time time the status last changed + type: string + lastChecked: + description: LastChecked is the last time time the status was checked + type: string + snapshotSchedules: + description: SnapshotSchedules is the list of snapshots scheduled + items: + description: FilesystemSnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool + properties: + fs: + description: Fs is the name of the Ceph Filesystem + type: string + path: + description: Path is the path on the filesystem + type: string + rel_path: + type: string + retention: + description: FilesystemSnapshotScheduleStatusRetention is the retention specification for a filesystem snapshot schedule + properties: + active: + description: Active is whether the scheduled is active or not + type: boolean + created: + description: Created is when the snapshot schedule was created + type: string + created_count: + description: CreatedCount is total amount of snapshots + type: integer + first: + description: First is when the first snapshot schedule was taken + type: string + last: + description: Last is when the last snapshot schedule was taken + type: string + last_pruned: + description: LastPruned is when the last snapshot schedule was pruned + type: string + pruned_count: + description: PrunedCount is total amount of pruned snapshots + type: integer + start: + description: Start is when the snapshot schedule starts + type: string + type: object + schedule: + type: string + subvol: + description: Subvol is the name of the sub volume + type: string + type: object + nullable: true + type: array + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephfilesystemmirrors.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephFilesystemMirror + listKind: CephFilesystemMirrorList + plural: cephfilesystemmirrors + singular: cephfilesystemmirror + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: CephFilesystemMirror is the Ceph Filesystem Mirror object definition + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FilesystemMirroringSpec is the filesystem mirroring specification + properties: + annotations: + additionalProperties: + type: string + description: The annotations-related configuration to add/set on each Pod related object. + nullable: true + type: object + labels: + additionalProperties: + type: string + description: The labels-related configuration to add/set on each Pod related object. + nullable: true + type: object + placement: + nullable: true + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + priorityClassName: + description: PriorityClassName sets priority class on the cephfs-mirror pods + type: string + resources: + description: The resource requirements for the cephfs-mirror pods + nullable: true + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + type: object + status: + description: Status represents the status of an object + properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephcosidrivers.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephCOSIDriver + listKind: CephCOSIDriverList + plural: cephcosidrivers + shortNames: + - cephcosi + singular: cephcosidriver + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephCOSIDriver represents the CRD for the Ceph COSI Driver Deployment + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec represents the specification of a Ceph COSI Driver + properties: + deploymentStrategy: + description: DeploymentStrategy is the strategy to use to deploy the COSI driver. + enum: + - Never + - Auto + - Always + type: string + image: + description: Image is the container image to run the Ceph COSI driver + type: string + objectProvisionerImage: + description: ObjectProvisionerImage is the container image to run the COSI driver sidecar + type: string + placement: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + resources: + description: Resources is the resource requirements for the COSI driver + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephclusters.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephCluster + listKind: CephClusterList + plural: cephclusters + singular: cephcluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Directory used on the K8s nodes + jsonPath: .spec.dataDirHostPath + name: DataDirHostPath + type: string + - description: Number of MONs + jsonPath: .spec.mon.count + name: MonCount + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.phase + name: Phase + type: string + - description: Message + jsonPath: .status.message + name: Message + type: string + - description: Ceph Health + jsonPath: .status.ceph.health + name: Health + type: string + - jsonPath: .spec.external.enable + name: External + type: boolean + - description: Ceph FSID + jsonPath: .status.ceph.fsid + name: FSID + type: string + name: v1 + schema: + openAPIV3Schema: + description: CephCluster is a Ceph storage cluster + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec represents the specification of Ceph Cluster + properties: + annotations: + additionalProperties: + additionalProperties: + type: string + description: Annotations are annotations + type: object + description: The annotations-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + cephConfig: + additionalProperties: + additionalProperties: + type: string + type: object + description: Ceph Config options + nullable: true + type: object + cephVersion: + description: The version information that instructs Rook to orchestrate a particular version of Ceph. + nullable: true + properties: + allowUnsupported: + description: Whether to allow unsupported versions (do not set to true in production) + type: boolean + image: + description: |- + Image is the container image used to launch the ceph daemons, such as quay.io/ceph/ceph: + The full list of images can be found at https://quay.io/repository/ceph/ceph?tab=tags + type: string + imagePullPolicy: + description: |- + ImagePullPolicy describes a policy for if/when to pull a container image + One of Always, Never, IfNotPresent. + enum: + - IfNotPresent + - Always + - Never + - "" + type: string + type: object + cleanupPolicy: + description: |- + Indicates user intent when deleting a cluster; blocks orchestration and should not be set if cluster + deletion is not imminent. + nullable: true + properties: + allowUninstallWithVolumes: + description: AllowUninstallWithVolumes defines whether we can proceed with the uninstall if they are RBD images still present + type: boolean + confirmation: + description: Confirmation represents the cleanup confirmation + nullable: true + pattern: ^$|^yes-really-destroy-data$ + type: string + sanitizeDisks: + description: SanitizeDisks represents way we sanitize disks + nullable: true + properties: + dataSource: + description: DataSource is the data source to use to sanitize the disk with + enum: + - zero + - random + type: string + iteration: + description: Iteration is the number of pass to apply the sanitizing + format: int32 + type: integer + method: + description: Method is the method we use to sanitize disks + enum: + - complete + - quick + type: string + type: object + type: object + continueUpgradeAfterChecksEvenIfNotHealthy: + description: ContinueUpgradeAfterChecksEvenIfNotHealthy defines if an upgrade should continue even if PGs are not clean + type: boolean + crashCollector: + description: A spec for the crash controller + nullable: true + properties: + daysToRetain: + description: DaysToRetain represents the number of days to retain crash until they get pruned + type: integer + disable: + description: Disable determines whether we should enable the crash collector + type: boolean + type: object + csi: + description: CSI Driver Options applied per cluster. + properties: + cephfs: + description: CephFS defines CSI Driver settings for CephFS driver. + properties: + fuseMountOptions: + description: FuseMountOptions defines the mount options for ceph fuse mounter. + type: string + kernelMountOptions: + description: KernelMountOptions defines the mount options for kernel mounter. + type: string + type: object + readAffinity: + description: ReadAffinity defines the read affinity settings for CSI driver. + properties: + crushLocationLabels: + description: |- + CrushLocationLabels defines which node labels to use + as CRUSH location. This should correspond to the values set in + the CRUSH map. + items: + type: string + type: array + enabled: + description: Enables read affinity for CSI driver. + type: boolean + type: object + type: object + dashboard: + description: Dashboard settings + nullable: true + properties: + enabled: + description: Enabled determines whether to enable the dashboard + type: boolean + port: + description: Port is the dashboard webserver port + maximum: 65535 + minimum: 0 + type: integer + prometheusEndpoint: + description: Endpoint for the Prometheus host + type: string + prometheusEndpointSSLVerify: + description: Whether to verify the ssl endpoint for prometheus. Set to false for a self-signed cert. + type: boolean + ssl: + description: SSL determines whether SSL should be used + type: boolean + urlPrefix: + description: URLPrefix is a prefix for all URLs to use the dashboard with a reverse proxy + type: string + type: object + dataDirHostPath: + description: The path on the host where config and data can be persisted + pattern: ^/(\S+) + type: string + x-kubernetes-validations: + - message: DataDirHostPath is immutable + rule: self == oldSelf + disruptionManagement: + description: A spec for configuring disruption management. + nullable: true + properties: + machineDisruptionBudgetNamespace: + description: Deprecated. Namespace to look for MDBs by the machineDisruptionBudgetController + type: string + manageMachineDisruptionBudgets: + description: Deprecated. This enables management of machinedisruptionbudgets. + type: boolean + managePodBudgets: + description: This enables management of poddisruptionbudgets + type: boolean + osdMaintenanceTimeout: + description: |- + OSDMaintenanceTimeout sets how many additional minutes the DOWN/OUT interval is for drained failure domains + it only works if managePodBudgets is true. + the default is 30 minutes + format: int64 + type: integer + pgHealthCheckTimeout: + description: |- + PGHealthCheckTimeout is the time (in minutes) that the operator will wait for the placement groups to become + healthy (active+clean) after a drain was completed and OSDs came back up. Rook will continue with the next drain + if the timeout exceeds. It only works if managePodBudgets is true. + No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. + format: int64 + type: integer + pgHealthyRegex: + description: |- + PgHealthyRegex is the regular expression that is used to determine which PG states should be considered healthy. + The default is `^(active\+clean|active\+clean\+scrubbing|active\+clean\+scrubbing\+deep)$` + type: string + type: object + external: + description: |- + Whether the Ceph Cluster is running external to this Kubernetes cluster + mon, mgr, osd, mds, and discover daemons will not be created for external clusters. + nullable: true + properties: + enable: + description: Enable determines whether external mode is enabled or not + type: boolean + type: object + x-kubernetes-preserve-unknown-fields: true + healthCheck: + description: Internal daemon healthchecks and liveness probe + nullable: true + properties: + daemonHealth: + description: DaemonHealth is the health check for a given daemon + nullable: true + properties: + mon: + description: Monitor represents the health check settings for the Ceph monitor + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + osd: + description: ObjectStorageDaemon represents the health check settings for the Ceph OSDs + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + status: + description: Status represents the health check settings for the Ceph health + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + livenessProbe: + additionalProperties: + description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon + properties: + disabled: + description: Disabled determines whether probe is disable or not + type: boolean + probe: + description: |- + Probe describes a health check to be performed against a container to determine whether it is + alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + description: LivenessProbe allows changing the livenessProbe configuration for a given daemon + type: object + startupProbe: + additionalProperties: + description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon + properties: + disabled: + description: Disabled determines whether probe is disable or not + type: boolean + probe: + description: |- + Probe describes a health check to be performed against a container to determine whether it is + alive or ready to receive traffic. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + description: StartupProbe allows changing the startupProbe configuration for a given daemon + type: object + type: object + labels: + additionalProperties: + additionalProperties: + type: string + description: Labels are label for a given daemons + type: object + description: The labels-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + logCollector: + description: Logging represents loggings settings + nullable: true + properties: + enabled: + description: Enabled represents whether the log collector is enabled + type: boolean + maxLogSize: + anyOf: + - type: integer + - type: string + description: MaxLogSize is the maximum size of the log per ceph daemons. Must be at least 1M. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodicity: + description: Periodicity is the periodicity of the log rotation. + pattern: ^$|^(hourly|daily|weekly|monthly|1h|24h|1d)$ + type: string + type: object + mgr: + description: A spec for mgr related options + nullable: true + properties: + allowMultiplePerNode: + description: AllowMultiplePerNode allows to run multiple managers on the same node (not recommended) + type: boolean + count: + description: Count is the number of manager daemons to run + maximum: 5 + minimum: 0 + type: integer + modules: + description: Modules is the list of ceph manager modules to enable/disable + items: + description: Module represents mgr modules that the user wants to enable or disable + properties: + enabled: + description: Enabled determines whether a module should be enabled or not + type: boolean + name: + description: Name is the name of the ceph manager module + type: string + settings: + description: Settings to further configure the module + properties: + balancerMode: + description: BalancerMode sets the `balancer` module with different modes like `upmap`, `crush-compact` etc + enum: + - "" + - crush-compat + - upmap + - read + - upmap-read + type: string + type: object + type: object + nullable: true + type: array + type: object + mon: + description: A spec for mon related options + nullable: true + properties: + allowMultiplePerNode: + description: AllowMultiplePerNode determines if we can run multiple monitors on the same node (not recommended) + type: boolean + count: + description: Count is the number of Ceph monitors + maximum: 9 + minimum: 0 + type: integer + failureDomainLabel: + type: string + stretchCluster: + description: StretchCluster is the stretch cluster specification + properties: + failureDomainLabel: + description: 'FailureDomainLabel the failure domain name (e,g: zone)' + type: string + subFailureDomain: + description: SubFailureDomain is the failure domain within a zone + type: string + zones: + description: Zones is the list of zones + items: + description: MonZoneSpec represents the specification of a zone in a Ceph Cluster + properties: + arbiter: + description: Arbiter determines if the zone contains the arbiter used for stretch cluster mode + type: boolean + name: + description: Name is the name of the zone + type: string + volumeClaimTemplate: + description: VolumeClaimTemplate is the PVC template + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: |- + spec defines the desired characteristics of a volume requested by a pod author. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + nullable: true + type: array + type: object + volumeClaimTemplate: + description: VolumeClaimTemplate is the PVC definition + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: |- + spec defines the desired characteristics of a volume requested by a pod author. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + zones: + description: Zones are specified when we want to provide zonal awareness to mons + items: + description: MonZoneSpec represents the specification of a zone in a Ceph Cluster + properties: + arbiter: + description: Arbiter determines if the zone contains the arbiter used for stretch cluster mode + type: boolean + name: + description: Name is the name of the zone + type: string + volumeClaimTemplate: + description: VolumeClaimTemplate is the PVC template + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: |- + spec defines the desired characteristics of a volume requested by a pod author. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + type: array + type: object + x-kubernetes-validations: + - message: zones must be less than or equal to count + rule: '!has(self.zones) || (has(self.zones) && (size(self.zones) <= self.count))' + - message: stretchCluster zones must be equal to 3 + rule: '!has(self.stretchCluster) || (has(self.stretchCluster) && (size(self.stretchCluster.zones) > 0) && (size(self.stretchCluster.zones) == 3))' + monitoring: + description: Prometheus based Monitoring settings + nullable: true + properties: + enabled: + description: |- + Enabled determines whether to create the prometheus rules for the ceph cluster. If true, the prometheus + types must exist or the creation will fail. Default is false. + type: boolean + exporter: + description: Ceph exporter configuration + properties: + perfCountersPrioLimit: + default: 5 + description: Only performance counters greater than or equal to this option are fetched + format: int64 + type: integer + statsPeriodSeconds: + default: 5 + description: Time to wait before sending requests again to exporter server (seconds) + format: int64 + type: integer + type: object + externalMgrEndpoints: + description: ExternalMgrEndpoints points to an existing Ceph prometheus exporter endpoint + items: + description: EndpointAddress is a tuple that describes single IP address. + properties: + hostname: + description: The Hostname of this endpoint + type: string + ip: + description: |- + The IP of this endpoint. + May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + or link-local multicast (224.0.0.0/24 or ff02::/16). + type: string + nodeName: + description: 'Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.' + type: string + targetRef: + description: Reference to object providing the endpoint. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - ip + type: object + x-kubernetes-map-type: atomic + nullable: true + type: array + externalMgrPrometheusPort: + description: ExternalMgrPrometheusPort Prometheus exporter port + maximum: 65535 + minimum: 0 + type: integer + interval: + description: Interval determines prometheus scrape interval + type: string + metricsDisabled: + description: |- + Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled. + If true, the prometheus mgr module and Ceph exporter are both disabled. Default is false. + type: boolean + port: + description: Port is the prometheus server port + maximum: 65535 + minimum: 0 + type: integer + type: object + network: + description: Network related configuration + nullable: true + properties: + addressRanges: + description: |- + AddressRanges specify a list of CIDRs that Rook will apply to Ceph's 'public_network' and/or + 'cluster_network' configurations. This config section may be used for the "host" or "multus" + network providers. + nullable: true + properties: + cluster: + description: Cluster defines a list of CIDRs to use for Ceph cluster network communication. + items: + description: |- + An IPv4 or IPv6 network CIDR. + + This naive kubebuilder regex provides immediate feedback for some typos and for a common problem + case where the range spec is forgotten (e.g., /24). Rook does in-depth validation in code. + pattern: ^[0-9a-fA-F:.]{2,}\/[0-9]{1,3}$ + type: string + type: array + public: + description: Public defines a list of CIDRs to use for Ceph public network communication. + items: + description: |- + An IPv4 or IPv6 network CIDR. + + This naive kubebuilder regex provides immediate feedback for some typos and for a common problem + case where the range spec is forgotten (e.g., /24). Rook does in-depth validation in code. + pattern: ^[0-9a-fA-F:.]{2,}\/[0-9]{1,3}$ + type: string + type: array + type: object + connections: + description: |- + Settings for network connections such as compression and encryption across the + wire. + nullable: true + properties: + compression: + description: Compression settings for the network connections. + nullable: true + properties: + enabled: + description: |- + Whether to compress the data in transit across the wire. + The default is not set. + type: boolean + type: object + encryption: + description: Encryption settings for the network connections. + nullable: true + properties: + enabled: + description: |- + Whether to encrypt the data in transit across the wire to prevent eavesdropping + the data on the network. The default is not set. Even if encryption is not enabled, + clients still establish a strong initial authentication for the connection + and data integrity is still validated with a crc check. When encryption is enabled, + all communication between clients and Ceph daemons, or between Ceph daemons will + be encrypted. + type: boolean + type: object + requireMsgr2: + description: |- + Whether to require msgr2 (port 3300) even if compression or encryption are not enabled. + If true, the msgr1 port (6789) will be disabled. + Requires a kernel that supports msgr2 (kernel 5.11 or CentOS 8.4 or newer). + type: boolean + type: object + dualStack: + description: DualStack determines whether Ceph daemons should listen on both IPv4 and IPv6 + type: boolean + hostNetwork: + description: |- + HostNetwork to enable host network. + If host networking is enabled or disabled on a running cluster, then the operator will automatically fail over all the mons to + apply the new network settings. + type: boolean + ipFamily: + description: IPFamily is the single stack IPv6 or IPv4 protocol + enum: + - IPv4 + - IPv6 + nullable: true + type: string + multiClusterService: + description: Enable multiClusterService to export the Services between peer clusters + properties: + clusterID: + description: |- + ClusterID uniquely identifies a cluster. It is used as a prefix to nslookup exported + services. For example: ...svc.clusterset.local + type: string + enabled: + description: |- + Enable multiClusterService to export the mon and OSD services to peer cluster. + Ensure that peer clusters are connected using an MCS API compatible application, + like Globalnet Submariner. + type: boolean + type: object + provider: + description: |- + Provider is what provides network connectivity to the cluster e.g. "host" or "multus". + If the Provider is updated from being empty to "host" on a running cluster, then the operator will automatically fail over all the mons to apply the "host" network settings. + enum: + - "" + - host + - multus + nullable: true + type: string + x-kubernetes-validations: + - message: network provider must be disabled (reverted to empty string) before a new provider is enabled + rule: self == '' || self == oldSelf + selectors: + additionalProperties: + type: string + description: |- + Selectors define NetworkAttachmentDefinitions to be used for Ceph public and/or cluster + networks when the "multus" network provider is used. This config section is not used for + other network providers. + + Valid keys are "public" and "cluster". Refer to Ceph networking documentation for more: + https://docs.ceph.com/en/reef/rados/configuration/network-config-ref/ + + Refer to Multus network annotation documentation for help selecting values: + https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/how-to-use.md#run-pod-with-network-annotation + + Rook will make a best-effort attempt to automatically detect CIDR address ranges for given + network attachment definitions. Rook's methods are robust but may be imprecise for + sufficiently complicated networks. Rook's auto-detection process obtains a new IP address + lease for each CephCluster reconcile. If Rook fails to detect, incorrectly detects, only + partially detects, or if underlying networks do not support reusing old IP addresses, it is + best to use the 'addressRanges' config section to specify CIDR ranges for the Ceph cluster. + + As a contrived example, one can use a theoretical Kubernetes-wide network for Ceph client + traffic and a theoretical Rook-only network for Ceph replication traffic as shown: + selectors: + public: "default/cluster-fast-net" + cluster: "rook-ceph/ceph-backend-net" + nullable: true + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + x-kubernetes-validations: + - message: at least one network selector must be specified when using multus + rule: '!has(self.provider) || (self.provider != ''multus'' || (self.provider == ''multus'' && size(self.selectors) > 0))' + - message: the legacy hostNetwork setting can only be set if the network.provider is set to the empty string + rule: '!has(self.hostNetwork) || self.hostNetwork == false || !has(self.provider) || self.provider == ""' + placement: + additionalProperties: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + priorityClassNames: + additionalProperties: + type: string + description: PriorityClassNames sets priority classes on components + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + removeOSDsIfOutAndSafeToRemove: + description: Remove the OSD that is out and safe to remove only if this option is true + type: boolean + resources: + additionalProperties: + description: ResourceRequirements describes the compute resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + description: Resources set resource requests and limits + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + security: + description: Security represents security settings + nullable: true + properties: + keyRotation: + description: KeyRotation defines options for Key Rotation. + nullable: true + properties: + enabled: + default: false + description: Enabled represents whether the key rotation is enabled. + type: boolean + schedule: + description: Schedule represents the cron schedule for key rotation. + type: string + type: object + kms: + description: KeyManagementService is the main Key Management option + nullable: true + properties: + connectionDetails: + additionalProperties: + type: string + description: ConnectionDetails contains the KMS connection details (address, port etc) + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + tokenSecretName: + description: TokenSecretName is the kubernetes secret containing the KMS token + type: string + type: object + type: object + skipUpgradeChecks: + description: SkipUpgradeChecks defines if an upgrade should be forced even if one of the check fails + type: boolean + storage: + description: A spec for available storage in the cluster and how it should be used + nullable: true + properties: + allowDeviceClassUpdate: + description: Whether to allow updating the device class after the OSD is initially provisioned + type: boolean + allowOsdCrushWeightUpdate: + description: |- + Whether Rook will resize the OSD CRUSH weight when the OSD PVC size is increased. + This allows cluster data to be rebalanced to make most effective use of new OSD space. + The default is false since data rebalancing can cause temporary cluster slowdown. + type: boolean + backfillFullRatio: + description: BackfillFullRatio is the ratio at which the cluster is too full for backfill. Backfill will be disabled if above this threshold. Default is 0.90. + maximum: 1 + minimum: 0 + nullable: true + type: number + config: + additionalProperties: + type: string + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + deviceFilter: + description: A regular expression to allow more fine-grained selection of devices on nodes across the cluster + type: string + devicePathFilter: + description: A regular expression to allow more fine-grained selection of devices with path names + type: string + devices: + description: List of devices to use as storage devices + items: + description: Device represents a disk to use in the cluster + properties: + config: + additionalProperties: + type: string + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + fullpath: + type: string + name: + type: string + type: object + nullable: true + type: array + x-kubernetes-preserve-unknown-fields: true + flappingRestartIntervalHours: + description: |- + FlappingRestartIntervalHours defines the time for which the OSD pods, that failed with zero exit code, will sleep before restarting. + This is needed for OSD flapping where OSD daemons are marked down more than 5 times in 600 seconds by Ceph. + Preventing the OSD pods to restart immediately in such scenarios will prevent Rook from marking OSD as `up` and thus + peering of the PGs mapped to the OSD. + User needs to manually restart the OSD pod if they manage to fix the underlying OSD flapping issue before the restart interval. + The sleep will be disabled if this interval is set to 0. + type: integer + fullRatio: + description: FullRatio is the ratio at which the cluster is considered full and ceph will stop accepting writes. Default is 0.95. + maximum: 1 + minimum: 0 + nullable: true + type: number + nearFullRatio: + description: NearFullRatio is the ratio at which the cluster is considered nearly full and will raise a ceph health warning. Default is 0.85. + maximum: 1 + minimum: 0 + nullable: true + type: number + nodes: + items: + description: Node is a storage nodes + properties: + config: + additionalProperties: + type: string + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + deviceFilter: + description: A regular expression to allow more fine-grained selection of devices on nodes across the cluster + type: string + devicePathFilter: + description: A regular expression to allow more fine-grained selection of devices with path names + type: string + devices: + description: List of devices to use as storage devices + items: + description: Device represents a disk to use in the cluster + properties: + config: + additionalProperties: + type: string + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + fullpath: + type: string + name: + type: string + type: object + nullable: true + type: array + x-kubernetes-preserve-unknown-fields: true + name: + type: string + resources: + description: ResourceRequirements describes the compute resource requirements. + nullable: true + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + useAllDevices: + description: Whether to consume all the storage devices found on a machine + type: boolean + volumeClaimTemplates: + description: PersistentVolumeClaims to use as storage + items: + description: VolumeClaimTemplate is a simplified version of K8s corev1's PVC. It has no type meta or status. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: |- + spec defines the desired characteristics of a volume requested by a pod author. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + type: object + type: array + type: object + nullable: true + type: array + onlyApplyOSDPlacement: + type: boolean + scheduleAlways: + description: Whether to always schedule OSDs on a node even if the node is not currently scheduleable or ready + type: boolean + storageClassDeviceSets: + items: + description: StorageClassDeviceSet is a storage class device set + properties: + config: + additionalProperties: + type: string + description: Provider-specific device configuration + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + count: + description: Count is the number of devices in this set + minimum: 1 + type: integer + encrypted: + description: Whether to encrypt the deviceSet + type: boolean + name: + description: Name is a unique identifier for the set + type: string + placement: + nullable: true + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-preserve-unknown-fields: true + portable: + description: Portable represents OSD portability across the hosts + type: boolean + preparePlacement: + nullable: true + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-preserve-unknown-fields: true + resources: + description: ResourceRequirements describes the compute resource requirements. + nullable: true + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + schedulerName: + description: Scheduler name for OSD pod placement + type: string + tuneDeviceClass: + description: TuneSlowDeviceClass Tune the OSD when running on a slow Device Class + type: boolean + tuneFastDeviceClass: + description: TuneFastDeviceClass Tune the OSD when running on a fast Device Class + type: boolean + volumeClaimTemplates: + description: VolumeClaimTemplates is a list of PVC templates for the underlying storage devices + items: + description: VolumeClaimTemplate is a simplified version of K8s corev1's PVC. It has no type meta or status. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: |- + spec defines the desired characteristics of a volume requested by a pod author. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + type: object + type: array + required: + - count + - name + - volumeClaimTemplates + type: object + nullable: true + type: array + store: + description: OSDStore is the backend storage type used for creating the OSDs + properties: + type: + description: Type of backend storage to be used while creating OSDs. If empty, then bluestore will be used + enum: + - bluestore + - bluestore-rdr + type: string + updateStore: + description: |- + UpdateStore updates the backend store for existing OSDs. It destroys each OSD one at a time, cleans up the backing disk + and prepares same OSD on that disk + pattern: ^$|^yes-really-update-store$ + type: string + type: object + useAllDevices: + description: Whether to consume all the storage devices found on a machine + type: boolean + useAllNodes: + type: boolean + volumeClaimTemplates: + description: PersistentVolumeClaims to use as storage + items: + description: VolumeClaimTemplate is a simplified version of K8s corev1's PVC. It has no type meta or status. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: |- + spec defines the desired characteristics of a volume requested by a pod author. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + type: object + type: array + type: object + upgradeOSDRequiresHealthyPGs: + description: |- + UpgradeOSDRequiresHealthyPGs defines if OSD upgrade requires PGs are clean. If set to `true` OSD upgrade process won't start until PGs are healthy. + This configuration will be ignored if `skipUpgradeChecks` is `true`. + Default is false. + type: boolean + waitTimeoutForHealthyOSDInMinutes: + description: |- + WaitTimeoutForHealthyOSDInMinutes defines the time the operator would wait before an OSD can be stopped for upgrade or restart. + If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one + if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would + continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. + The default wait timeout is 10 minutes. + format: int64 + type: integer + type: object + status: + description: ClusterStatus represents the status of a Ceph cluster + nullable: true + properties: + ceph: + description: CephStatus is the details health of a Ceph Cluster + properties: + capacity: + description: Capacity is the capacity information of a Ceph Cluster + properties: + bytesAvailable: + format: int64 + type: integer + bytesTotal: + format: int64 + type: integer + bytesUsed: + format: int64 + type: integer + lastUpdated: + type: string + type: object + details: + additionalProperties: + description: CephHealthMessage represents the health message of a Ceph Cluster + properties: + message: + type: string + severity: + type: string + required: + - message + - severity + type: object + type: object + fsid: + type: string + health: + type: string + lastChanged: + type: string + lastChecked: + type: string + previousHealth: + type: string + versions: + description: CephDaemonsVersions show the current ceph version for different ceph daemons + properties: + cephfs-mirror: + additionalProperties: + type: integer + description: CephFSMirror shows CephFSMirror Ceph version + type: object + mds: + additionalProperties: + type: integer + description: Mds shows Mds Ceph version + type: object + mgr: + additionalProperties: + type: integer + description: Mgr shows Mgr Ceph version + type: object + mon: + additionalProperties: + type: integer + description: Mon shows Mon Ceph version + type: object + osd: + additionalProperties: + type: integer + description: Osd shows Osd Ceph version + type: object + overall: + additionalProperties: + type: integer + description: Overall shows overall Ceph version + type: object + rbd-mirror: + additionalProperties: + type: integer + description: RbdMirror shows RbdMirror Ceph version + type: object + rgw: + additionalProperties: + type: integer + description: Rgw shows Rgw Ceph version + type: object + type: object + type: object + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + message: + type: string + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + description: ConditionType represent a resource's status + type: string + state: + description: ClusterState represents the state of a Ceph Cluster + type: string + storage: + description: CephStorage represents flavors of Ceph Cluster Storage + properties: + deprecatedOSDs: + additionalProperties: + items: + type: integer + type: array + type: object + deviceClasses: + items: + description: DeviceClasses represents device classes of a Ceph Cluster + properties: + name: + type: string + type: object + type: array + osd: + description: OSDStatus represents OSD status of the ceph Cluster + properties: + storeType: + additionalProperties: + type: integer + description: StoreType is a mapping between the OSD backend stores and number of OSDs using these stores + type: object + type: object + type: object + version: + description: ClusterVersion represents the version of a Ceph Cluster + properties: + image: + type: string + version: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephclients.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephClient + listKind: CephClientList + plural: cephclients + singular: cephclient + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: CephClient represents a Ceph Client + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec represents the specification of a Ceph Client + properties: + caps: + additionalProperties: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + name: + type: string + required: + - caps + type: object + status: + description: Status represents the status of a Ceph Client + properties: + info: + additionalProperties: + type: string + nullable: true + type: object + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + description: ConditionType represent a resource's status + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephbuckettopics.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephBucketTopic + listKind: CephBucketTopicList + plural: cephbuckettopics + singular: cephbuckettopic + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: CephBucketTopic represents a Ceph Object Topic for Bucket Notifications + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketTopicSpec represent the spec of a Bucket Topic + properties: + endpoint: + description: Contains the endpoint spec of the topic + properties: + amqp: + description: Spec of AMQP endpoint + properties: + ackLevel: + default: broker + description: The ack level required for this topic (none/broker/routeable) + enum: + - none + - broker + - routeable + type: string + disableVerifySSL: + description: Indicate whether the server certificate is validated by the client or not + type: boolean + exchange: + description: Name of the exchange that is used to route messages based on topics + minLength: 1 + type: string + uri: + description: The URI of the AMQP endpoint to push notification to + minLength: 1 + type: string + required: + - exchange + - uri + type: object + http: + description: Spec of HTTP endpoint + properties: + disableVerifySSL: + description: Indicate whether the server certificate is validated by the client or not + type: boolean + sendCloudEvents: + description: 'Send the notifications with the CloudEvents header: https://github.com/cloudevents/spec/blob/main/cloudevents/adapters/aws-s3.md' + type: boolean + uri: + description: The URI of the HTTP endpoint to push notification to + minLength: 1 + type: string + required: + - uri + type: object + kafka: + description: Spec of Kafka endpoint + properties: + ackLevel: + default: broker + description: The ack level required for this topic (none/broker) + enum: + - none + - broker + type: string + disableVerifySSL: + description: Indicate whether the server certificate is validated by the client or not + type: boolean + uri: + description: The URI of the Kafka endpoint to push notification to + minLength: 1 + type: string + useSSL: + description: Indicate whether to use SSL when communicating with the broker + type: boolean + required: + - uri + type: object + type: object + objectStoreName: + description: The name of the object store on which to define the topic + minLength: 1 + type: string + objectStoreNamespace: + description: The namespace of the object store on which to define the topic + minLength: 1 + type: string + opaqueData: + description: Data which is sent in each event + type: string + persistent: + description: Indication whether notifications to this endpoint are persistent or not + type: boolean + required: + - endpoint + - objectStoreName + - objectStoreNamespace + type: object + status: + description: BucketTopicStatus represents the Status of a CephBucketTopic + properties: + ARN: + description: The ARN of the topic generated by the RGW + nullable: true + type: string + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephbucketnotifications.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephBucketNotification + listKind: CephBucketNotificationList + plural: cephbucketnotifications + singular: cephbucketnotification + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephBucketNotification represents a Bucket Notifications + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketNotificationSpec represent the spec of a Bucket Notification + properties: + events: + description: List of events that should trigger the notification + items: + description: BucketNotificationSpec represent the event type of the bucket notification + enum: + - s3:ObjectCreated:* + - s3:ObjectCreated:Put + - s3:ObjectCreated:Post + - s3:ObjectCreated:Copy + - s3:ObjectCreated:CompleteMultipartUpload + - s3:ObjectRemoved:* + - s3:ObjectRemoved:Delete + - s3:ObjectRemoved:DeleteMarkerCreated + type: string + type: array + filter: + description: Spec of notification filter + properties: + keyFilters: + description: Filters based on the object's key + items: + description: NotificationKeyFilterRule represent a single key rule in the Notification Filter spec + properties: + name: + description: Name of the filter - prefix/suffix/regex + enum: + - prefix + - suffix + - regex + type: string + value: + description: Value to filter on + type: string + required: + - name + - value + type: object + type: array + metadataFilters: + description: Filters based on the object's metadata + items: + description: NotificationFilterRule represent a single rule in the Notification Filter spec + properties: + name: + description: Name of the metadata or tag + minLength: 1 + type: string + value: + description: Value to filter on + type: string + required: + - name + - value + type: object + type: array + tagFilters: + description: Filters based on the object's tags + items: + description: NotificationFilterRule represent a single rule in the Notification Filter spec + properties: + name: + description: Name of the metadata or tag + minLength: 1 + type: string + value: + description: Value to filter on + type: string + required: + - name + - value + type: object + type: array + type: object + topic: + description: The name of the topic associated with this notification + minLength: 1 + type: string + required: + - topic + type: object + status: + description: Status represents the status of an object + properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephblockpools.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephBlockPool + listKind: CephBlockPoolList + plural: cephblockpools + singular: cephblockpool + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.info.type + name: Type + type: string + - jsonPath: .status.info.failureDomain + name: FailureDomain + type: string + - jsonPath: .spec.replicated.size + name: Replication + priority: 1 + type: integer + - jsonPath: .spec.erasureCoded.codingChunks + name: EC-CodingChunks + priority: 1 + type: integer + - jsonPath: .spec.erasureCoded.dataChunks + name: EC-DataChunks + priority: 1 + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: CephBlockPool represents a Ceph Storage Pool + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + NamedBlockPoolSpec allows a block pool to be created with a non-default name. + This is more specific than the NamedPoolSpec so we get schema validation on the + allowed pool names that can be specified. + properties: + application: + description: The application name to set on the pool. Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + name: + description: The desired name of the pool if different from the CephBlockPool CR name. + enum: + - .rgw.root + - .nfs + - .mgr + type: string + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: CephBlockPoolStatus represents the mirroring status of Ceph Storage Pool + properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + info: + additionalProperties: + type: string + nullable: true + type: object + mirroringInfo: + description: MirroringInfoSpec is the status of the pool mirroring + properties: + details: + type: string + lastChanged: + type: string + lastChecked: + type: string + mode: + description: Mode is the mirroring mode + type: string + peers: + description: Peers are the list of peer sites connected to that cluster + items: + description: PeersSpec contains peer details + properties: + client_name: + description: ClientName is the CephX user used to connect to the peer + type: string + direction: + description: Direction is the peer mirroring direction + type: string + mirror_uuid: + description: MirrorUUID is the mirror UUID + type: string + site_name: + description: SiteName is the current site name + type: string + uuid: + description: UUID is the peer UUID + type: string + type: object + type: array + site_name: + description: SiteName is the current site name + type: string + type: object + mirroringStatus: + description: MirroringStatusSpec is the status of the pool mirroring + properties: + details: + description: Details contains potential status errors + type: string + lastChanged: + description: LastChanged is the last time time the status last changed + type: string + lastChecked: + description: LastChecked is the last time time the status was checked + type: string + summary: + description: Summary is the mirroring status summary + properties: + daemon_health: + description: DaemonHealth is the health of the mirroring daemon + type: string + health: + description: Health is the mirroring health + type: string + image_health: + description: ImageHealth is the health of the mirrored image + type: string + states: + description: States is the various state for all mirrored images + nullable: true + properties: + error: + description: Error is when the mirroring state is errored + type: integer + replaying: + description: Replaying is when the replay of the mirroring journal is on-going + type: integer + starting_replay: + description: StartingReplay is when the replay of the mirroring journal starts + type: integer + stopped: + description: Stopped is when the mirroring state is stopped + type: integer + stopping_replay: + description: StopReplaying is when the replay of the mirroring journal stops + type: integer + syncing: + description: Syncing is when the image is syncing + type: integer + unknown: + description: Unknown is when the mirroring state is unknown + type: integer + type: object + type: object + type: object + observedGeneration: + description: ObservedGeneration is the latest generation observed by the controller. + format: int64 + type: integer + phase: + description: ConditionType represent a resource's status + type: string + snapshotScheduleStatus: + description: SnapshotScheduleStatusSpec is the status of the snapshot schedule + properties: + details: + description: Details contains potential status errors + type: string + lastChanged: + description: LastChanged is the last time time the status last changed + type: string + lastChecked: + description: LastChecked is the last time time the status was checked + type: string + snapshotSchedules: + description: SnapshotSchedules is the list of snapshots scheduled + items: + description: SnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool + properties: + image: + description: Image is the mirrored image + type: string + items: + description: Items is the list schedules times for a given snapshot + items: + description: SnapshotSchedule is a schedule + properties: + interval: + description: Interval is the interval in which snapshots will be taken + type: string + start_time: + description: StartTime is the snapshot starting time + type: string + type: object + type: array + namespace: + description: Namespace is the RADOS namespace the image is part of + type: string + pool: + description: Pool is the pool name + type: string + type: object + nullable: true + type: array + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: rook-ceph/templates/resources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + helm.sh/resource-policy: keep + name: cephblockpoolradosnamespaces.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephBlockPoolRadosNamespace + listKind: CephBlockPoolRadosNamespaceList + plural: cephblockpoolradosnamespaces + singular: cephblockpoolradosnamespace + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - description: Name of the Ceph BlockPool + jsonPath: .spec.blockPoolName + name: BlockPool + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: CephBlockPoolRadosNamespace represents a Ceph BlockPool Rados Namespace + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec represents the specification of a Ceph BlockPool Rados Namespace + properties: + blockPoolName: + description: |- + BlockPoolName is the name of Ceph BlockPool. Typically it's the name of + the CephBlockPool CR. + type: string + x-kubernetes-validations: + - message: blockPoolName is immutable + rule: self == oldSelf + name: + description: The name of the CephBlockPoolRadosNamespaceSpec namespace. If not set, the default is the name of the CR. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + required: + - blockPoolName + type: object + status: + description: Status represents the status of a CephBlockPool Rados Namespace + properties: + info: + additionalProperties: + type: string + nullable: true + type: object + phase: + description: ConditionType represent a resource's status + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- diff --git a/addons/rook/1.15.5/host-preflight.yaml b/addons/rook/1.15.5/host-preflight.yaml new file mode 100644 index 0000000000..3538b437c0 --- /dev/null +++ b/addons/rook/1.15.5/host-preflight.yaml @@ -0,0 +1,40 @@ +apiVersion: troubleshoot.sh/v1beta2 +kind: HostPreflight +metadata: + name: kurl-builtin +spec: + collectors: + - blockDevices: + exclude: '{{kurl .IsUpgrade }}' + - tcpPortStatus: + collectorName: "Pod csi-rbdplugin Host Port" + port: 9090 + exclude: '{{kurl .IsUpgrade }}' + + analyzers: + - blockDevices: + includeUnmountedPartitions: true + minimumAcceptableSize: 10737418240 # 1024 ^ 3 * 10, 10GiB + exclude: '{{kurl or (.IsUpgrade) (gt .Installer.Spec.Rook.MinimumNodeCount 2) }}' + outcomes: + - pass: + when: "{{kurl if (and .Installer.Spec.Rook.Version .Installer.Spec.Rook.BlockDeviceFilter) }}{{kurl .Installer.Spec.Rook.BlockDeviceFilter }}{{kurl else }}.*{{kurl end }} == 1" + message: One available block device + - pass: + when: "{{kurl if (and .Installer.Spec.Rook.Version .Installer.Spec.Rook.BlockDeviceFilter) }}{{kurl .Installer.Spec.Rook.BlockDeviceFilter }}{{kurl else }}.*{{kurl end }} > 1" + message: Multiple available block devices + - fail: + message: "No available unformatted block devices were found, and Rook requires one. For further information see: https://kurl.sh/docs/add-ons/rook#block-storage" + - tcpPortStatus: + checkName: "Pod csi-rbdplugin Host Port Status" + collectorName: "Pod csi-rbdplugin Host Port" + exclude: '{{kurl .IsUpgrade }}' + outcomes: + - pass: + when: "connected" + message: "Port 9090 is available for use." + - fail: + when: "address-in-use" + message: "Another process is listening on port 9090." + - fail: + message: "Unexpected error connecting to port 9090." diff --git a/addons/rook/1.15.5/install.sh b/addons/rook/1.15.5/install.sh new file mode 100644 index 0000000000..4b5e150271 --- /dev/null +++ b/addons/rook/1.15.5/install.sh @@ -0,0 +1,1157 @@ +# shellcheck disable=SC2148 + +function rook_pre_init() { + local current_version + current_version="$(rook_version)" + + export SKIP_ROOK_INSTALL + if rook_should_skip_rook_install "$current_version" "$ROOK_VERSION" ; then + SKIP_ROOK_INSTALL=1 + + # If we do not upgrade Rook then the previous Rook version 1.0.4 is not compatible with Kubernetes 1.20+ + if [ "$KUBERNETES_TARGET_VERSION_MINOR" -ge 20 ] && [ "$current_version" = "1.0.4" ]; then + export KUBERNETES_UPGRADE=0 + export KUBERNETES_VERSION + KUBERNETES_VERSION=$(kubectl get nodes --sort-by='{.status.nodeInfo.kubeletVersion}' -o=jsonpath='{.items[0].status.nodeInfo.kubeletVersion}' | sed 's/^v*//') + parse_kubernetes_target_version + # There's no guarantee the packages from this version of Kubernetes are still available + export SKIP_KUBERNETES_HOST=1 + fi + fi + + if [ "${ROOK_BYPASS_UPGRADE_WARNING}" != "1" ]; then + if [ "$SKIP_ROOK_INSTALL" != "1" ] && [ -n "$current_version" ] && [ "$current_version" != "$ROOK_VERSION" ]; then + logWarn "WARNING: This installer will upgrade Rook to version ${ROOK_VERSION}." + logWarn "Upgrading a Rook cluster is not without risk, including data loss." + logWarn "The Rook cluster's storage may be unavailable for short periods during the upgrade process." + log "" + log "Would you like to continue? " + if ! confirmN ; then + logWarn "Will not upgrade rook-ceph cluster" + SKIP_ROOK_INSTALL=1 + fi + fi + fi + + # check Rook prerequisites + if rook_should_fail_install; then + bail "Rook ${ROOK_VERSION} will not be installed due to failed preflight checks." + fi + + rook_prompt_migrate_from_longhorn + + rook_lvm2 + + rook_modprobe_rbd +} + +function rook_post_init() { + local src="${DIR}/addons/rook/${ROOK_VERSION}" + + # apply Prometheus ServiceMonitor CR and Ceph Grafana dashboard + if [ -n "$PROMETHEUS_VERSION" ]; then + echo "Rook Post-init: Installing Prometheus ServiceMonitor and Ceph Grafana Dashboard" + kubectl -n monitoring apply -k "$src/monitoring/" + fi + + if [ "$ROOK_DID_DISABLE_EKCO_OPERATOR" = "1" ]; then + rook_enable_ekco_operator + fi +} + +ROOK_CEPH_IMAGE= +ROOK_DID_DISABLE_EKCO_OPERATOR=0 +function rook() { + local src="${DIR}/addons/rook/${ROOK_VERSION}" + export ROOK_CEPH_IMAGE="quay.io/ceph/ceph:v18.2.4" + + if [ "$SKIP_ROOK_INSTALL" = "1" ]; then + local version + version=$(rook_version) + echo "Rook $version is already installed, will not upgrade to ${ROOK_VERSION}" + rook_object_store_output + return 0 + fi + + # Disable EKCO updates + # Disallow the EKCO operator from updating Rook custom resources during a Rook upgrade + rook_disable_ekco_operator + ROOK_DID_DISABLE_EKCO_OPERATOR=1 + + # delete old clusterrolebinding + # see issue https://github.com/rook/rook/issues/6448 + kubectl delete --ignore-not-found clusterrolebinding rook-ceph-system-psp-users + + # removed flex driver in v1.8.0 + # https://github.com/rook/rook/pull/8799 + kubectl delete --ignore-not-found crd volumes.rook.io + + rook_operator_crds_deploy + rook_operator_deploy + rook_set_ceph_pool_replicas + rook_ready_spinner # creating the cluster before the operator is ready fails + + if [ -n "$ROOK_MINIMUM_NODE_COUNT" ] && [ "$ROOK_MINIMUM_NODE_COUNT" -gt "1" ]; then + # check if there is already a CephCluster - if there is, this code should manage it + if ! kubectl get cephcluster -n rook-ceph rook-ceph >/dev/null 2>&1; then + log "Rook minimumNodeCount parameter set to ${ROOK_MINIMUM_NODE_COUNT}. Ceph Cluster will be managed by EKCO." + return 0 # do not create a ceph cluster if it should instead be managed by ekco + fi + fi + + rook_cluster_deploy + + rook_dashboard_ready_spinner + export CEPH_DASHBOARD_URL=http://rook-ceph-mgr-dashboard.rook-ceph.svc.cluster.local:7000 + # Ceph v13+ requires login. Rook 1.0+ creates a secret in the rook-ceph namespace. + local cephDashboardPassword + cephDashboardPassword=$(kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode) + if [ -n "$cephDashboardPassword" ]; then + export CEPH_DASHBOARD_USER=admin + export CEPH_DASHBOARD_PASSWORD="$cephDashboardPassword" + fi + + if ! kubectl -n rook-ceph get pod -l app=rook-ceph-rgw -o jsonpath='{.items[0].status.phase}' 2>/dev/null | grep -q Running ; then + printf "\n\n%bRook Ceph 1.4+ requires a secondary, unformatted block device attached to the host.%b\n" "$GREEN" "$NC" + printf "%bIf you are stuck waiting at this step for more than two minutes, you are either missing the device or it is already formatted.%b\n" "$GREEN" "$NC" + printf "\t%b * If it is missing, attach it now and it will be picked up; or CTRL+C, attach, and re-start the installer%b\n" "$GREEN" "$NC" + printf "\t%b * If the disk is attached, try wiping it using the recommended zap procedure: https://rook.io/docs/rook/v1.10/Storage-Configuration/ceph-teardown/?h=zap#zapping-devices%b\n\n" "$GREEN" "$NC" + fi + + printf "checking for attached secondary block device (awaiting rook-ceph RGW pod)\n" + spinnerPodRunning rook-ceph rook-ceph-rgw-rook-ceph-store + kubectl -n rook-ceph apply -f "$src/cluster/object-user.yaml" + rook_object_store_output + + echo "Awaiting rook-ceph object store health" + if ! spinner_until 120 rook_rgw_is_healthy ; then + bail "Failed to detect healthy rook-ceph object store" + fi + + # migrate from Longhorn storage if applicable + rook_maybe_migrate_from_longhorn + + # wait for all pods in the rook-ceph namespace to rollout + log "Awaiting Rook rollout in rook-ceph namespace" + rook_maybe_wait_for_rollout +} + +function rook_join() { + rook_lvm2 +} + +function rook_already_applied() { + rook_object_store_output + export ROOK_CEPH_IMAGE="quay.io/ceph/ceph:v18.2.4" + rook_set_ceph_pool_replicas + "$DIR"/bin/kurl rook wait-for-health 120 + rook_maybe_wait_for_rollout +} + +function rook_operator_crds_deploy() { + local src="${DIR}/addons/rook/${ROOK_VERSION}" + local dst="${DIR}/kustomize/rook" + + mkdir -p "${dst}" + cp "$src/crds.yaml" "$dst/crds.yaml" + + # replace the CRDs if they already exist otherwise create them + # replace or create rather than apply to avoid the error "metadata.annotations: Too long" + if ! kubectl replace -f "$dst/crds.yaml" 2>/dev/null ; then + + if kubectl get ns rook-ceph >/dev/null 2>&1 ; then + # Rook 1.12 introduced a new CRD "cephcosidrivers.ceph.rook.io" which will cause + # `kubectl create` to fail on upgrades. The following logic will extract the new CRD yaml and create it. + semverParse "$ROOK_VERSION" + local rook_major_version="$major" + local rook_minor_version="$minor" + if [ "$rook_major_version" = "1" ] && [ "$rook_minor_version" -ge "12" ]; then + get_yaml_from_multidoc_yaml "$dst/crds.yaml" "cephcosidrivers.ceph.rook.io" | kubectl create -f - + fi + + return + fi + + kubectl create -f "$dst/crds.yaml" + fi +} + +function rook_operator_deploy() { + local src="${DIR}/addons/rook/${ROOK_VERSION}/operator" + local dst="${DIR}/kustomize/rook/operator" + + mkdir -p "${DIR}/kustomize/rook" + rm -rf "$dst" + cp -r "$src" "$dst" + + if [ "$ROOK_HOSTPATH_REQUIRES_PRIVILEGED" = "1" ]; then + insert_patches_strategic_merge "$dst/kustomization.yaml" patches/deployment-privileged.yaml + fi + + if [ "$KUBERNETES_TARGET_VERSION_MINOR" -lt "17" ]; then + bail "Kubernetes versions less than 1.17 unsupported" + fi + if [ "$IPV6_ONLY" = "1" ]; then + sed -i "/\[global\].*/a\ ms bind ipv6 = true" "$dst/configmap-rook-config-override.yaml" + sed -i "/\[global\].*/a\ ms bind ipv4 = false" "$dst/configmap-rook-config-override.yaml" + fi + + # upgrade first before applying auth_allow_insecure_global_id_reclaim policy + rook_maybe_auth_allow_insecure_global_id_reclaim + + # disable bluefs_buffered_io for rook ge 1.8.x + # See: + # - https://github.com/rook/rook/issues/10160#issuecomment-1168303067 + # - https://tracker.ceph.com/issues/54019 + rook_maybe_bluefs_buffered_io + + kubectl -n rook-ceph apply -k "$dst/" +} + +function rook_cluster_deploy() { + local src="${DIR}/addons/rook/${ROOK_VERSION}/cluster" + local dst="${DIR}/kustomize/rook/cluster" + + mkdir -p "${DIR}/kustomize/rook" + rm -rf "$dst" + cp -r "$src" "$dst" + + # resources + render_yaml_file_2 "$src/tmpl-rbd-storageclass.yaml" > "$dst/rbd-storageclass.yaml" + insert_resources "$dst/kustomization.yaml" rbd-storageclass.yaml + + # conditional cephfs + if [ "${ROOK_SHARED_FILESYSTEM_DISABLED}" != "1" ]; then + mkdir -p "$dst/cephfs" + touch "$dst/cephfs/kustomization.yaml" + insert_resources "$dst/cephfs/kustomization.yaml" cephfs-storageclass.yaml + insert_resources "$dst/cephfs/kustomization.yaml" filesystem.yaml + insert_patches_strategic_merge "$dst/cephfs/kustomization.yaml" patches/cephfs-storageclass.yaml + render_yaml_file_2 "$src/cephfs/patches/tmpl-filesystem.yaml" > "$dst/cephfs/patches/filesystem.yaml" + insert_patches_strategic_merge "$dst/cephfs/kustomization.yaml" patches/filesystem.yaml + + # MDS pod anti-affinity rules prevent them from co-scheduling on single-node installations + local ready_node_count + ready_node_count="$(kubectl get nodes --no-headers 2>/dev/null | grep -c ' Ready')" + if [ "$ready_node_count" -le "1" ]; then + insert_patches_strategic_merge "$dst/cephfs/kustomization.yaml" patches/filesystem-singlenode.yaml + fi + + render_yaml_file_2 "$src/cephfs/patches/tmpl-filesystem-Json6902.yaml" > "$dst/cephfs/patches/filesystem-Json6902.yaml" + insert_patches_json_6902 "$dst/cephfs/kustomization.yaml" patches/filesystem-Json6902.yaml ceph.rook.io v1 CephFilesystem rook-shared-fs rook-ceph + + insert_bases "$dst/kustomization.yaml" cephfs + fi + + # patches + render_yaml_file "$src/patches/tmpl-cluster.yaml" > "$dst/patches/cluster.yaml" + insert_patches_strategic_merge "$dst/kustomization.yaml" patches/cluster.yaml + if [ -n "$ROOK_NODES" ]; then + rook_render_cluster_nodes_tmpl_yaml "$ROOK_NODES" "$src" "$dst" + fi + render_yaml_file "$src/patches/tmpl-object.yaml" > "$dst/patches/object.yaml" + insert_patches_strategic_merge "$dst/kustomization.yaml" patches/object.yaml + render_yaml_file_2 "$src/patches/tmpl-rbd-storageclass.yaml" > "$dst/patches/rbd-storageclass.yaml" + insert_patches_strategic_merge "$dst/kustomization.yaml" patches/rbd-storageclass.yaml + + # Don't redeploy cluster - ekco may have made changes based on num of nodes in cluster + # This must come after the yaml is rendered as it relies on dst. + if kubernetes_resource_exists rook-ceph cephcluster rook-ceph ; then + echo "Cluster rook-ceph already deployed" + rook_cluster_deploy_upgrade + + # if we are enabling the shared filesystem for the first time, we need to create the filesystem + if [ "${ROOK_SHARED_FILESYSTEM_DISABLED}" != "1" ] && ! kubernetes_resource_exists rook-ceph cephfilesystem rook-shared-fs ; then + kubectl -n rook-ceph apply -k "$dst/cephfs/" + fi + return 0 + fi + + kubectl -n rook-ceph apply -k "$dst/" +} + +function rook_cluster_deploy_upgrade() { + # Prior to calling this function the following steps have been taken in the upgrade process: + # 1. https://rook.io/docs/rook/v1.6/ceph-upgrade.html#1-update-common-resources-and-crds + # rook_operator_crds_deploy + # rook_operator_deploy + # 2. https://rook.io/docs/rook/v1.5/ceph-upgrade.html#2-update-ceph-csi-versions + # Not needed, using default CSI images + # 3. https://rook.io/docs/rook/v1.6/ceph-upgrade.html#3-update-the-rook-operator + # rook_operator_deploy + + local ceph_image="quay.io/ceph/ceph:v18.2.4" + local ceph_version= + ceph_version="$(echo "${ceph_image}" | awk 'BEGIN { FS=":v" } ; {print $2}')" + + if rook_ceph_version_deployed "${ceph_version}" ; then + echo "Cluster rook-ceph up to date" + rook_patch_insecure_clients + + if [ -n "$ROOK_NODES" ]; then + rook_patch_cephcluster_nodes + fi + + rook_cluster_deploy_upgrade_flexvolumes_to_csi + return 0 + fi + + if kubernetes_resource_exists rook-ceph cephfilesystem rook-shared-fs ; then + # When upgrading we need both MDS pods and anti-affinity rules prevent them from co-scheduling on single-node installations + local ready_node_count + ready_node_count="$(kubectl get nodes --no-headers 2>/dev/null | grep -c ' Ready')" + if [ "$ready_node_count" -le "1" ]; then + rook_cephfilesystem_patch_singlenode + fi + fi + + # 4. https://rook.io/docs/rook/v1.6/ceph-upgrade.html#4-wait-for-the-upgrade-to-complete + echo "Awaiting rook-ceph operator" + if ! "$DIR"/bin/kurl rook wait-for-rook-version "$ROOK_VERSION" --timeout=1200 ; then + logWarn "Timeout waiting for Rook version rolled out" + logStep "Checking Rook versions and replicas" + kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \trook-version="}{.metadata.labels.rook-version}{"\n"}{end}' + local rook_versions= + rook_versions="$(kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq)" + if [ -n "${rook_versions}" ] && [ "$(echo "${rook_versions}" | wc -l)" -gt "1" ]; then + logWarn "Detected multiple Rook versions" + logWarn "${rook_versions}" + logWarn "Failed to verify the Rook upgrade, multiple Rook versions detected" + fi + fi + + # 5. https://rook.io/docs/rook/v1.6/ceph-upgrade.html#5-verify-the-updated-cluster + echo "Awaiting Ceph healthy" + + # CRD changes makes rook to restart and it takes time to reconcile + if ! "$DIR"/bin/kurl rook wait-for-health 300 ; then + kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph status + bail "Refusing to update cluster rook-ceph, Ceph is not healthy" + fi + + # https://rook.io/docs/rook/v1.6/ceph-upgrade.html#ceph-version-upgrades + logStep "Upgrading rook-ceph cluster" + + # https://rook.io/docs/rook/v1.6/ceph-upgrade.html#1-update-the-main-ceph-daemons + + kubectl -n rook-ceph patch cephcluster/rook-ceph --type='json' -p='[{"op": "replace", "path": "/spec/cephVersion/image", "value":"'"${ceph_image}"'"}]' + + # https://rook.io/docs/rook/v1.6/ceph-upgrade.html#2-wait-for-the-daemon-pod-updates-to-complete + if ! "$DIR"/bin/kurl rook wait-for-ceph-version "${ceph_version}-0" --timeout=1200 ; then + logWarn "Timeout waiting for Ceph version to be rolled out" + log "Checking Ceph versions and replicas" + kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \tceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' + local ceph_versions_found= + ceph_versions_found="$(kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{"ceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' | sort | uniq)" + # Fail when more than one version is found + if [ -n "${ceph_versions_found}" ] && [ "$(echo "${ceph_versions_found}" | wc -l)" -gt "1" ]; then + logWarn "Detected multiple Ceph versions" + logWarn "${ceph_versions_found}" + logWarn "Failed to verify the Ceph upgrade, multiple Ceph versions detected" + fi + + if [[ "$(echo "${ceph_versions_found}")" == *"${ceph_version}"* ]]; then + logWarn "Ceph version found ${ceph_versions_found}. New Ceph version ${ceph_version} failed to deploy" + fi + bail "New Ceph version failed to deploy" + fi + + rook_patch_insecure_clients + + if [ -n "$ROOK_NODES" ]; then + rook_patch_cephcluster_nodes + fi + + # https://rook.io/docs/rook/v1.6/ceph-upgrade.html#3-verify-the-updated-cluster + + echo "Awaiting Ceph healthy" + + if ! "$DIR"/bin/kurl rook wait-for-health 300 ; then + kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph status + bail "Failed to verify the updated cluster, Ceph is not healthy" + fi + + kubectl -n rook-ceph delete --ignore-not-found priorityclass rook-critical + + logStep "Checking if the Rook-Ceph cluster upgrade completed successfully" + verify_rook_updated_cluster + logSuccess "Rook-Ceph cluster upgraded successfully" +} + +# Before to finish end report that the upgrade was done with success ensure that +# only on rook version is found and the ceph status +# https://rook.io/docs/rook/v1.9/ceph-upgrade.html#3-verify-the-updated-cluster +function verify_rook_updated_cluster() { + log "Verifying Rook Version Deployed" + if ! "$DIR"/bin/kurl rook wait-for-rook-version "$ROOK_VERSION" --timeout=1200 ; then + logWarn "Timeout awaiting Rook version" + log "Rook versions and replicas" + kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \trook-version="}{.metadata.labels.rook-version}{"\n"}{end}' + local rook_versions= + rook_versions="$(kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq)" + if [ -n "${rook_versions}" ] && [ "$(echo "${rook_versions}" | wc -l)" -gt "1" ]; then + logWarn "Detected multiple Rook versions" + logWarn "${rook_versions}" + bail "Failed to verify the Rook upgrade, multiple Rook versions detected" + fi + fi + + log "Verifying Ceph version ${ceph_version} deployed" + if ! "$DIR"/bin/kurl rook wait-for-ceph-version "${ceph_version}-0" --timeout=1200 ; then + logWarn "Timeout waiting for Ceph version to be rolled out" + log "Checking Ceph versions and replicas" + kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \tceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' + local ceph_versions_found= + ceph_versions_found="$(kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{"ceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' | sort | uniq)" + if [ -n "${ceph_versions_found}" ] && [ "$(echo "${ceph_versions_found}" | wc -l)" -gt "1" ]; then + logWarn "Detected multiple Ceph versions" + logWarn "${ceph_versions_found}" + bail "Failed to verify the Ceph upgrade, multiple Ceph versions detected" + fi + + if [[ "$(echo "${ceph_versions_found}")" == *"${ceph_version}"* ]]; then + bail "Ceph version found ${ceph_versions_found}. New Ceph version ${ceph_version} failed to deploy" + fi + bail "New Ceph version ${ceph_version} failed to deploy" + fi + + log "Verifying Ceph status" + if ! $DIR/bin/kurl rook wait-for-health 300 ; then + kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph status + bail "Failed to verify the updated cluster, Ceph is not healthy" + fi +} + + +# rook_cluster_deploy_upgrade_flexvolumes_to_csi will check if the previous storageclass is using +# the flex volume provisioner (if this is an upgrade from 1.0.4) and will deploy a new storageclass +# with the CSI provisioner following this guide: +# https://rook.io/docs/rook/v1.7/flex-to-csi-migration.html +function rook_cluster_deploy_upgrade_flexvolumes_to_csi() { + local src="$DIR/addons/rook/$ROOK_VERSION/cluster" + local dst="$DIR/kustomize/rook/cluster" + + local src_sc="${STORAGE_CLASS:-default}" + local tmp_sc=rook-ceph-tmp + + local rook_did_scale_down_ekco=0 + local rook_did_scale_down_prometheus=0 + + # if the "default" storage class exists and it is still using the flex volume provisioner + if [ "$(kubectl get sc "$src_sc" --ignore-not-found -o jsonpath='{.provisioner}')" = "ceph.rook.io/block" ]; then + # patch the existing storage class to not be the default + kubectl patch storageclass "$src_sc" -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' + + # deploy a new storage class with the CSI provisioner + rook_cluster_deploy_upgrade_create_storageclass "$tmp_sc" + + if [ "$rook_did_scale_down_ekco" != "1" ]; then + rook_scale_down_ekco + fi + if [ "$rook_did_scale_down_prometheus" != "1" ]; then + rook_scale_down_prometheus + fi + + # run the actual flex volumes to csi volumes migration + rook_cluster_deploy_upgrade_pvmigrator "$src_sc" "$tmp_sc" + + kubectl delete sc "$src_sc" + fi + + # if there is still a temp storage class, it means we have not finished the migration + if kubectl get sc "$tmp_sc" >/dev/null 2>&1 ; then + # migrate a second time effectively renaming the temp storageclass back to "default" + rook_cluster_deploy_upgrade_create_storageclass "$src_sc" + + if [ "$rook_did_scale_down_ekco" != "1" ]; then + rook_scale_down_ekco + fi + if [ "$rook_did_scale_down_prometheus" != "1" ]; then + rook_scale_down_prometheus + fi + + rook_cluster_deploy_upgrade_pvmigrator "$tmp_sc" "$src_sc" + + # delete the temp storageclass + kubectl delete sc "$tmp_sc" + fi + + if [ "$rook_did_scale_down_ekco" = "1" ]; then + rook_scale_up_ekco + fi + if [ "$rook_did_scale_down_prometheus" = "1" ]; then + rook_scale_up_prometheus + fi +} + +# rook_cluster_deploy_upgrade_create_storageclass will render the necessary resources and create a +# storageclass +function rook_cluster_deploy_upgrade_create_storageclass() { + local dst_sc="$1" + + local kustomize_dir="$dst/rbd-storageclass-$dst_sc" + + mkdir -p "$kustomize_dir/patches/" + echo "" > "$kustomize_dir/kustomization.yaml" # clear the file + local o_storage_class="$STORAGE_CLASS" + export STORAGE_CLASS="$dst_sc" + render_yaml_file_2 "$src/tmpl-rbd-storageclass.yaml" > "$dst/rbd-storageclass.yaml" + render_yaml_file_2 "$src/patches/tmpl-rbd-storageclass.yaml" > "$dst/patches/rbd-storageclass.yaml" + STORAGE_CLASS="$o_storage_class" # restore the original value + cp "$dst/rbd-storageclass.yaml" "$kustomize_dir/rbd-storageclass.yaml" + cp "$dst/patches/rbd-storageclass.yaml" "$kustomize_dir/patches/rbd-storageclass.yaml" + insert_resources "$kustomize_dir/kustomization.yaml" rbd-storageclass.yaml + insert_patches_strategic_merge "$kustomize_dir/kustomization.yaml" patches/rbd-storageclass.yaml + kubectl apply -k "$kustomize_dir/" +} + +# rook_cluster_deploy_upgrade_pvmigrator will invoke the kurl rook flexvolume-to-csi command to run +# the actual flex volumes to csi volumes migration +function rook_cluster_deploy_upgrade_pvmigrator() { + local src_sc="$1" + local dst_sc="$2" + + logStep "Migrating Rook Flex volumes to CSI volumes" + local node_name= + node_name="$(get_local_node_name)" + local bin_path= + bin_path="$(realpath "$BIN_ROOK_PVMIGRATOR")" + + ( set -x; + "$BIN_KURL" rook flexvolume-to-csi \ + --source-sc "$src_sc" \ + --destination-sc "$dst_sc" \ + --node "$node_name" \ + --pv-migrator-bin-path "$bin_path" \ + --ceph-migrator-image "rook/ceph:v$ROOK_VERSION" ) + logSuccess "Rook Flex volumes to CSI volumes migrated successfully" +} + +# rook_scale_down_ekco will scale down ekco to 0 replicas +function rook_scale_down_ekco() { + if ! kubernetes_resource_exists kurl deployment ekc-operator ; then + return + fi + + if [ "$(kubectl -n kurl get deployments ekc-operator -o jsonpath='{.spec.replicas}')" = "0" ]; then + return + fi + + kubectl -n kurl scale deployment ekc-operator --replicas=0 + rook_did_scale_down_ekco=1 # local to caller + log "Waiting for ekco pods to be removed" + if ! spinner_until 120 ekco_pods_gone; then + logFail "Unable to scale down ekco operator" + return 1 + fi +} + +# rook_scale_up_ekco will scale up ekco to 1 replica +function rook_scale_up_ekco() { + if ! kubernetes_resource_exists kurl deployment ekc-operator ; then + return + fi + + kubectl -n kurl scale deployment ekc-operator --replicas=1 +} + +# rook_scale_down_prometheus will scale down prometheus to 0 replicas +function rook_scale_down_prometheus() { + if ! kubernetes_resource_exists monitoring prometheus k8s ; then + return + fi + + if [ "$(kubectl -n monitoring get prometheus k8s -o jsonpath='{.spec.replicas}')" = "0" ]; then + return + fi + + kubectl -n monitoring patch prometheus k8s --type='json' --patch '[{"op": "replace", "path": "/spec/replicas", value: 0}]' + rook_did_scale_down_prometheus=1 # local to caller + log "Waiting for prometheus pods to be removed" + spinner_until 300 prometheus_pods_gone +} + +# rook_scale_up_prometheus will scale up prometheus replicas to 2 +function rook_scale_up_prometheus() { + if ! kubernetes_resource_exists monitoring prometheus k8s ; then + return + fi + + kubectl -n monitoring patch prometheus k8s --type='json' --patch '[{"op": "replace", "path": "/spec/replicas", value: 2}]' +} + +function rook_dashboard_ready_spinner() { + echo "Awaiting rook-ceph dashboard password" + + spinner_until 300 kubernetes_resource_exists rook-ceph secret rook-ceph-dashboard-password +} + +function rook_ready_spinner() { + echo "Awaiting rook-ceph pods" + + spinner_until 60 kubernetes_resource_exists rook-ceph deployment rook-ceph-operator + spinner_until 60 kubernetes_resource_exists rook-ceph daemonset rook-discover + spinner_until 300 deployment_fully_updated rook-ceph rook-ceph-operator + spinner_until 60 daemonset_fully_updated rook-ceph rook-discover +} + +# rook_ceph_version_deployed check that there is only one ceph-version reported across the cluster +function rook_ceph_version_deployed() { + local ceph_version="$1" + # wait for our version to start reporting + if ! kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{"ceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' | grep -q "${ceph_version}" ; then + return 1 + fi + # wait for our version to be the only one reporting + if [ "$(kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{"ceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' | sort | uniq | wc -l)" != "1" ]; then + return 1 + fi + # sanity check + if ! kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{"ceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' | grep -q "${ceph_version}" ; then + return 1 + fi + return 0 +} + +# CEPH_POOL_REPLICAS is undefined when this function is called unless set explicitly with a flag. +# If set by flag use that value. +# Else if the replicapool cephbockpool CR in the rook-ceph namespace is found, set CEPH_POOL_REPLICAS to that. +# Then increase up to 3 based on the number of ready nodes found. +# The ceph-pool-replicas flag will override any value set here. +function rook_set_ceph_pool_replicas() { + if [ -n "$CEPH_POOL_REPLICAS" ]; then + return 0 + fi + CEPH_POOL_REPLICAS=1 + set +e + local discoveredCephPoolReplicas + discoveredCephPoolReplicas=$(kubectl -n rook-ceph get cephblockpool replicapool -o jsonpath="{.spec.replicated.size}" 2>/dev/null) + if [ -n "$discoveredCephPoolReplicas" ]; then + CEPH_POOL_REPLICAS="$discoveredCephPoolReplicas" + fi + local readyNodeCount + readyNodeCount=$(kubectl get nodes 2>/dev/null | grep -c ' Ready') + if [ "$readyNodeCount" -gt "$CEPH_POOL_REPLICAS" ] && [ "$readyNodeCount" -le "3" ]; then + CEPH_POOL_REPLICAS="$readyNodeCount" + fi + set -e +} + +function rook_object_store_output() { + # Rook operator creates this secret from the user CRD just applied + while ! kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl >/dev/null 2>&1 ; do + sleep 2 + done + + # create the docker-registry bucket through the S3 API + export OBJECT_STORE_ACCESS_KEY + OBJECT_STORE_ACCESS_KEY=$(kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl -o yaml | grep AccessKey | head -1 | awk '{print $2}' | base64 --decode) + export OBJECT_STORE_SECRET_KEY + OBJECT_STORE_SECRET_KEY=$(kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl -o yaml | grep SecretKey | head -1 | awk '{print $2}' | base64 --decode) + export OBJECT_STORE_CLUSTER_IP + OBJECT_STORE_CLUSTER_IP=$(kubectl -n rook-ceph get service rook-ceph-rgw-rook-ceph-store | tail -n1 | awk '{ print $3}') + export OBJECT_STORE_CLUSTER_HOST="http://rook-ceph-rgw-rook-ceph-store.rook-ceph" + # same as OBJECT_STORE_CLUSTER_IP for IPv4, wrapped in brackets for IPv6 + export OBJECT_STORE_CLUSTER_IP_BRACKETED + OBJECT_STORE_CLUSTER_IP_BRACKETED=$("$DIR"/bin/kurl netutil format-ip-address "$OBJECT_STORE_CLUSTER_IP") +} + +# deprecated, use object_store_create_bucket +function rook_create_bucket() { + local bucket=$1 + local acl="x-amz-acl:private" + local d + d=$(LC_TIME="en_US.UTF-8" TZ="UTC" date +"%a, %d %b %Y %T %z") + local string="PUT\n\n\n${d}\n${acl}\n/${bucket}" + local sig + sig=$(echo -en "${string}" | openssl dgst -sha1 -hmac "${OBJECT_STORE_SECRET_KEY}" -binary | base64) + + curl -X PUT \ + --globoff \ + --noproxy "*" \ + -H "Host: $OBJECT_STORE_CLUSTER_IP" \ + -H "Date: $d" \ + -H "$acl" \ + -H "Authorization: AWS $OBJECT_STORE_ACCESS_KEY:$sig" \ + "http://$OBJECT_STORE_CLUSTER_IP_BRACKETED/$bucket" >/dev/null +} + +function rook_rgw_is_healthy() { + curl --globoff --noproxy "*" --fail --silent --insecure "http://${OBJECT_STORE_CLUSTER_IP_BRACKETED}" > /dev/null +} + +function rook_version() { + kubectl -n rook-ceph get deploy rook-ceph-operator -oyaml 2>/dev/null \ + | grep ' image: ' \ + | awk -F':' 'NR==1 { print $3 }' \ + | sed 's/v\([^-]*\).*/\1/' +} + +function rook_lvm2() { + local src="${DIR}/addons/rook/${ROOK_VERSION}" + if commandExists lvm; then + return + fi + + if ! host_packages_shipped ; then + ensure_host_package lvm2 lvm2 + else + install_host_archives "$src" lvm2 + fi +} + +function rook_patch_insecure_clients { + echo "Patching allowance of insecure rook clients" + + # upgrade first before applying auth_allow_insecure_global_id_reclaim policy + if kubectl -n rook-ceph get configmap rook-config-override -ojsonpath='{.data.config}' | grep -q 'auth_allow_insecure_global_id_reclaim = true' ; then + local dst="${DIR}/kustomize/rook/operator" + sed -i 's/auth_allow_insecure_global_id_reclaim = true/auth_allow_insecure_global_id_reclaim = false/' "$dst/configmap-rook-config-override.yaml" + kubectl -n rook-ceph apply -f "$dst/configmap-rook-config-override.yaml" + fi + + # Disabling rook global_id reclaim + kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph config set mon auth_allow_insecure_global_id_reclaim false + + # restart all mons waiting for ok-to-stop + for mon in $(kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph health detail | grep 'mon\.[a-z][a-z]* has auth_allow_insecure_global_id_reclaim' | grep -o 'mon\.[a-z][a-z]*') ; do + echo "Awaiting $mon ok-to-stop" + if ! spinner_until 120 kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph mon ok-to-stop "$mon" >/dev/null 2>&1 ; then + logWarn "Failed to detect mon $mon ok-to-stop" + else + local mon_id mon_pod + mon_id="$(echo "$mon" | awk -F'.' '{ print $2 }')" + mon_pod="$(kubectl -n rook-ceph get pods -l ceph_daemon_type=mon -l mon="$mon_id" --no-headers | awk '{ print $1 }')" + kubectl -n rook-ceph delete pod "$mon_pod" + fi + done + + # Checking to ensure ceph status + if ! spinner_until 120 rook_clients_secure; then + logWarn "Mon is still allowing insecure clients" + fi +} + +function rook_clients_secure { + if kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph health detail | grep -q AUTH_INSECURE_GLOBAL_ID_RECLAIM ; then + return 1 + fi + return 0 +} + +# do not downgrade rook or upgrade more than one minor version at a time +function rook_should_skip_rook_install() { + local current_version="$1" + local next_version="$2" + + local current_version_minor='' current_version_patch='' + local next_version_minor='' next_version_patch='' + + semverParse "${current_version}" + current_version_minor="${minor}" + current_version_patch="${patch}" + + semverParse "${next_version}" + next_version_minor="${minor}" + next_version_patch="${patch}" + + if [ -n "${current_version}" ]; then + if [ "${current_version_minor}" != "${next_version_minor}" ]; then + if [ "${current_version_minor}" -gt "${next_version_minor}" ]; then + echo "Rook ${current_version} is already installed, will not downgrade to ${next_version}" + return 0 + # only upgrades from prior minor versions supported + elif [ "${current_version_minor}" -lt "$((next_version_minor-1))" ]; then + echo "Rook ${current_version} is already installed, will not upgrade to ${next_version}" + return 0 + fi + elif [ "${current_version_patch}" -gt "${next_version_patch}" ]; then + echo "Rook ${current_version} is already installed, will not downgrade to ${next_version}" + return 0 + fi + fi + return 1 +} + +function rook_should_fail_install() { + # Beginning with Rook 1.8, certain old kernels are not supported + local kernel_version= + kernel_version=$(uname -r) + + + # Centos 7.4.1708 Kernel is not supported: 3.10.0-693.el7.x86_64 + if [[ "$kernel_version" == *"3.10.0-693"* ]] ; then + logFail "Rook Pre-init: ${LSB_DIST}-${DIST_VERSION} Kernel $kernel_version is not supported." + return 0 + fi + + # Check compatibility with EKCO add-on + if [ -n "$EKCO_VERSION" ]; then + semverParse "$EKCO_VERSION" + local ekco_minor_version="${minor}" + if [ "$ekco_minor_version" -lt 23 ]; then + logFail "Rook Pre-init: Rook ${ROOK_VERSION} is only compatible with EKCO add-on version 0.23.0 and above." + return 0 + fi + fi + + return 1 +} + +function rook_maybe_bluefs_buffered_io() { + local dst="${DIR}/kustomize/rook/operator" + + semverParse "$ROOK_VERSION" + local rook_major_version="$major" + local rook_minor_version="$minor" + if [ "$rook_major_version" = "1" ] && [ "$rook_minor_version" -ge "8" ]; then + sed -i "/\[global\].*/a\ bluefs_buffered_io = false" "$dst/configmap-rook-config-override.yaml" + fi +} + +function rook_maybe_auth_allow_insecure_global_id_reclaim() { + local dst="${DIR}/kustomize/rook/operator" + + if ! kubectl -n rook-ceph get cephcluster rook-ceph >/dev/null 2>&1 ; then + # rook ceph not deployed, do not allow since not upgrading + return + fi + + local ceph_version + ceph_version="$(rook_detect_ceph_version)" + if rook_should_auth_allow_insecure_global_id_reclaim "$ceph_version" ; then + sed -i 's/auth_allow_insecure_global_id_reclaim = false/auth_allow_insecure_global_id_reclaim = true/' "$dst/configmap-rook-config-override.yaml" + return + fi +} + +function rook_should_auth_allow_insecure_global_id_reclaim() { + local ceph_version="$1" + + if [ -z "$ceph_version" ]; then + # rook ceph not deployed, do not allow since not upgrading + return 1 + fi + + # https://docs.ceph.com/en/latest/security/CVE-2021-20288/ + semverParse "$ceph_version" + local ceph_version_major="$major" + local ceph_version_patch="$patch" + + case "$ceph_version_major" in + # Pacific v16.2.1 (and later) + "16") + if [ "$ceph_version_patch" -lt "1" ]; then + return 0 + fi + ;; + # Octopus v15.2.11 (and later) + "15") + if [ "$ceph_version_patch" -lt "11" ]; then + return 0 + fi + ;; + # Nautilus v14.2.20 (and later) + "14") + if [ "$ceph_version_patch" -lt "20" ]; then + return 0 + fi + ;; + esac + + return 1 +} + +function rook_detect_ceph_version() { + local ceph_version= + ceph_version="$(kubectl -n rook-ceph get cephcluster rook-ceph -o jsonpath='{.status.version.version}' 2>/dev/null | awk -F'-' '{ print $1 }')" + if [ -n "$ceph_version" ]; then + echo "$ceph_version" + return + fi + # if cephcluster not found, try to detect ceph version from the metadata + kubectl -n rook-ceph get deployment rook-ceph-mgr-a -o jsonpath='{.metadata.labels.ceph-version}' 2>/dev/null | awk -F'-' '{ print $1 }' +} + +# rook_cephfilesystem_patch_singlenode will change the +# requiredDuringSchedulingIgnoredDuringExecution podAntiAffinity rule to the more lenient +# preferredDuringSchedulingIgnoredDuringExecution equivalent. This will allow Rook to continue with +# the upgrade. +function rook_cephfilesystem_patch_singlenode() { + if ! kubectl -n rook-ceph get cephfilesystem rook-shared-fs -o jsonpath='{.spec.metadataServer.placement.podAntiAffinity}' | grep -q requiredDuringSchedulingIgnoredDuringExecution ; then + # already patched + return + fi + + local src="$DIR/addons/rook/$ROOK_VERSION/cluster" + rook_cephfilesystem_patch "$src/cephfs/patches/filesystem-singlenode.yaml" +} + +function rook_cephfilesystem_patch() { + local patch="$1" + + local cephfs_generation mds_observedgeneration cephfs_nextgeneration + + cephfs_generation="$(kubectl -n rook-ceph get cephfilesystem rook-shared-fs -o jsonpath='{.metadata.generation}')" + mds_observedgeneration="$(rook_mds_deployments_observedgeneration)" + + kubectl -n rook-ceph patch cephfilesystem rook-shared-fs --type merge --patch "$(cat "$patch")" + + cephfs_nextgeneration="$(kubectl -n rook-ceph get cephfilesystem rook-shared-fs -o jsonpath='{.metadata.generation}')" + if [ "$cephfs_generation" = "$cephfs_nextgeneration" ]; then + # no change + return + fi + + echo "Awaiting Rook MDS deployments to roll out" + if ! spinner_until 1200 rook_mds_deployments_updated "$mds_observedgeneration" ; then + kubectl -n rook-ceph get deploy -l app=rook-ceph-mds + bail "Refusing to update cluster rook-ceph, MDS deployments did not roll out" + fi + + echo "Awaiting Rook MDS deployments up-to-date" + if ! spinner_until 1200 rook_mds_deployments_uptodate ; then + kubectl -n rook-ceph get deploy -l app=rook-ceph-mds + bail "Refusing to update cluster rook-ceph, MDS deployments not up-to-date" + fi + + # allow the mds daemon to come up + sleep 60 + + echo "Awaiting Rook MDS daemons ok-to-stop" + if ! spinner_until 1200 rook_mds_daemons_oktostop ; then + kubectl -n rook-ceph exec deployment/rook-ceph-tools -- ceph mds ok-to-stop a + kubectl -n rook-ceph exec deployment/rook-ceph-tools -- ceph mds ok-to-stop b + bail "Refusing to update cluster rook-ceph, MDS daemons not ok-to-stop" + fi + + echo "Awaiting Ceph healthy" + if ! "$DIR"/bin/kurl rook wait-for-health 1200 ; then + kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph status + bail "Refusing to update cluster rook-ceph, Ceph is not healthy" + fi +} + +function rook_mds_deployments_uptodate() { + local replicas ready_replicas updated_replicas + replicas="$(kubectl -n rook-ceph get deploy -l app=rook-ceph-mds -o jsonpath='{.items[*].status.replicas}')" + ready_replicas="$(kubectl -n rook-ceph get deploy -l app=rook-ceph-mds -o jsonpath='{.items[*].status.readyReplicas}')" + updated_replicas="$(kubectl -n rook-ceph get deploy -l app=rook-ceph-mds -o jsonpath='{.items[*].status.updatedReplicas}')" + [ -n "$replicas" ] && [ "$replicas" = "$ready_replicas" ] && [ "$replicas" = "$updated_replicas" ] +} + +function rook_mds_deployments_updated() { + local previous="$1" + for line in $previous; do + if rook_mds_deployments_observedgeneration | grep -q "$line" ; then + return 1 + fi + done + return 0 +} + +function rook_mds_deployments_observedgeneration() { + kubectl -n rook-ceph get deploy -l app=rook-ceph-mds -o jsonpath='{range .items[*]}{.metadata.name}={.status.observedGeneration}{"\n"}{end}' +} + +function rook_mds_daemons_oktostop() { + local ids= + ids="$(kubectl -n rook-ceph get deploy -l app=rook-ceph-mds -oname | sed 's/.*-rook-shared-fs-\(.*\)/\1/')" + for id in $ids; do + if ! kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph mds ok-to-stop "$id" >/dev/null 2>&1 ; then + return 1 + fi + done + return 0 +} + +# if longhorn is installed but is not specified in the kURL spec, migrate data to Rook. +function rook_maybe_migrate_from_longhorn() { + if [ -z "$LONGHORN_VERSION" ]; then + if kubectl get ns | grep -q longhorn-system; then + local rook_storage_class="${STORAGE_CLASS:-default}" + + # show validation errors from pvmigrate if there are errors, rook_maybe_longhorn_migration_checks() will bail + rook_maybe_longhorn_migration_checks "$rook_storage_class" + + longhorn_to_sc_migration "$rook_storage_class" "1" + migrate_minio_to_rgw + DID_MIGRATE_LONGHORN_PVCS=1 # used to automatically delete longhorn if object store data was also migrated + fi + fi +} + +function rook_maybe_longhorn_migration_checks() { + echo "Running Longhorn to Rook migration checks ..." + + if ! rook_is_healthy_to_upgrade; then + bail "Cannot upgrade from Rook to OpenEBS. Rook Ceph is unhealthy." + fi + + log "Awaiting 2 minutes to check Longhorn Pod(s) are Running" + if ! spinner_until 120 check_for_running_pods longhorn-system; then + logFail "Longhorn has unhealthy Pod(s). Check the namespace longhorn-system" + bail "Cannot upgrade from Longhorn to OpenEBS. Longhorn is unhealthy." + fi + + local rook_storage_class="$1" + + # get the list of StorageClasses that use longhorn + local longhorn_scs + local longhorn_default_sc + longhorn_scs=$(kubectl get storageclass | grep longhorn | grep -v '(default)' | awk '{ print $1}') # any non-default longhorn StorageClasses + longhorn_default_sc=$(kubectl get storageclass | grep longhorn | grep '(default)' | awk '{ print $1}') # any default longhorn StorageClasses + + local longhorn_scs_pvmigrate_dryrun_output + local longhorn_default_sc_pvmigrate_dryrun_output + for longhorn_sc in $longhorn_scs + do + # run validation checks for non default Longhorn storage classes + if longhorn_scs_pvmigrate_dryrun_output=$($BIN_PVMIGRATE --source-sc "$longhorn_sc" --dest-sc "$rook_storage_class" --rsync-image "$KURL_UTIL_IMAGE" --preflight-validation-only 2>&1) ; then + longhorn_scs_pvmigrate_dryrun_output="" + else + break + fi + done + + if [ -n "$longhorn_default_sc" ] ; then + # run validation checks for Rook default storage class + if longhorn_default_sc_pvmigrate_dryrun_output=$($BIN_PVMIGRATE --source-sc "$longhorn_default_sc" --dest-sc "$rook_storage_class" --rsync-image "$KURL_UTIL_IMAGE" --preflight-validation-only 2>&1) ; then + longhorn_default_sc_pvmigrate_dryrun_output="" + fi + fi + + if [ -n "$longhorn_scs_pvmigrate_dryrun_output" ] || [ -n "$longhorn_default_sc_pvmigrate_dryrun_output" ] ; then + log "$longhorn_scs_pvmigrate_dryrun_output" + log "$longhorn_default_sc_pvmigrate_dryrun_output" + longhorn_restore_migration_replicas + bail "Cannot upgrade from Longhorn to Rook due to previous error." + fi + + echo "Longhorn to Rook migration checks completed." +} + +# shows a prompt asking users for confirmation before starting to migrate data from Longhorn. +function rook_prompt_migrate_from_longhorn() { + # skip on new install or when Longhorn is specified in the kURL spec + if [ -z "$CURRENT_KUBERNETES_VERSION" ] || [ -n "$LONGHORN_VERSION" ]; then + return 0 + fi + + # do not proceed if Longhorn is not installed + if ! kubectl get ns | grep -q longhorn-system; then + return 0 + fi + + local rook_storage_class="${STORAGE_CLASS:-default}" + logWarn " Detected Longhorn is running in the cluster. Data migration will be initiated to move data from Longhorn to storage class $rook_storage_class." + logWarn " As part of this, all pods mounting PVCs will be stopped, taking down the application." + logWarn " It is recommended to take a snapshot or otherwise back up your data before proceeding." + + semverParse "$KUBERNETES_VERSION" + if [ "$minor" -gt 24 ] ; then + logFail " It appears that the Kubernetes version you are attempting to install ($KUBERNETES_VERSION) is incompatible with the version of Longhorn currently installed" + logFail " on your cluster. As a result, it is not possible to migrate data from Longhorn to Rook. To successfully migrate data, please choose a Kubernetes" + logFail " version that is compatible with the version of Longhorn running on your cluster (note: Longhorn is compatible with Kubernetes versions up to and" + logFail " including 1.24)." + bail "Not migrating" + fi + + log "Would you like to continue? " + if ! confirmN; then + bail "Not migrating" + fi + + local nodes=$(kubectl get nodes --no-headers | wc -l) + if [ "$nodes" -eq 1 ]; then + logFail " ERROR: Your cluster has only one node, making Rook an unsuitable choice as a storage provisioner. You must install OpenEBS instead." + logFail " Continuing with the Longhorn to Rook data migration under these conditions may result in unexpected errors potentially CAUSING DATA LOSS." + bail "Not migrating" + fi + + if ! longhorn_prepare_for_migration; then + bail "Not migrating" + fi +} + +function rook_ceph_cluster_ready_spinner() { + log "Awaiting CephCluster CR to report Ready" + local delay="$1" + local duration="$2" + local ready_threshold=5 + local successful_ready_status_count=0 + local spinstr='|/-\' + local start_time= + local end_time= + + # defaults + if [ -z "$delay" ]; then + delay=5 + fi + if [ -z "$duration" ]; then + duration=300 + fi + + start_time=$(date +%s) + end_time=$((start_time+duration)) + while [ "$(date +%s)" -lt $end_time ] + do + local temp=${spinstr#?} + local spinstr=$temp${spinstr%"$temp"} + local ceph_status_phase= + local ceph_status_msg= + ceph_status_phase=$(kubectl -n rook-ceph get cephcluster rook-ceph -o jsonpath='{.status.phase}') + ceph_status_msg=$(kubectl -n rook-ceph get cephcluster rook-ceph -o jsonpath='{.status.message}') + if [[ "$ceph_status_phase" == "Ready" ]]; then + log " Current CephCluster status is: $ceph_status_phase" + successful_ready_status_count=$((successful_ready_status_count+1)) + if [ $successful_ready_status_count -eq $ready_threshold ]; then + log "CephCluster is ready" + return 0 + fi + else + log " Current CephCluster status is $ceph_status_phase: $ceph_status_msg" + successful_ready_status_count=0 + fi + + # simulate a spinner + printf " [%c] " "$spinstr" + printf "\b\b\b\b\b\b" + sleep "$delay" + done + logWarn "Rook CephCluster is not ready" +} + +# wait for Rook deployment pods to be running/completed +function rook_maybe_wait_for_rollout() { + # wait for Rook CephCluster CR to report Ready + # probe set to 10s + # timeout set to 300s (5mins) + rook_ceph_cluster_ready_spinner 10 300 + + log "Awaiting Rook pods to transition to Running" + if ! spinner_until 120 check_for_running_pods "rook-ceph"; then + logWarn "Rook-ceph rollout did not complete within the allotted time" + fi +} + +function rook_render_cluster_nodes_tmpl_yaml() { + local rook_nodes="$1" + local src="$2" + local dst="$3" + mkdir -p "$dst/patches" + rook_nodes="$(echo "$rook_nodes" | yaml_indent " ")" + render_yaml_file_2 "$src/patches/cluster-nodes.tmpl.yaml" > "$dst/patches/cluster-nodes.yaml" + insert_patches_strategic_merge "$dst/kustomization.yaml" patches/cluster-nodes.yaml +} + +function rook_patch_cephcluster_nodes() { + kubectl -n rook-ceph patch cephcluster/rook-ceph --type=merge --patch-file="$dst/patches/cluster-nodes.yaml" +} + +function rook_modprobe_rbd() { + if ! lsmod | grep rbd; then + modprobe rbd + echo 'rbd' > /etc/modules-load.d/kurl-rook-rbd.conf + fi +} diff --git a/addons/rook/1.15.5/monitoring/ceph-cluster-dashboard.yaml b/addons/rook/1.15.5/monitoring/ceph-cluster-dashboard.yaml new file mode 100644 index 0000000000..896dc60be2 --- /dev/null +++ b/addons/rook/1.15.5/monitoring/ceph-cluster-dashboard.yaml @@ -0,0 +1,2076 @@ +# Grafana Ceph cluster dashboard was sourced and modified from: +# https://github.com/sysdiglabs/promcat-resources/blob/master/resources/ceph/include/ceph_grafana.json +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: monitoring + name: ceph-cluster-dashboard + annotations: + {} + labels: + grafana_dashboard: "1" +data: + ceph-cluster.json: |- + { + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "7.1.1" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Ceph Cluster overview.\r\n", + "editable": true, + "gnetId": 2842, + "graphTooltip": 0, + "id": null, + "iteration": 1600777591043, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 37, + "panels": [], + "repeat": null, + "title": "CLUSTER STATE", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 1 + }, + "id": 21, + "interval": "1m", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "ceph_health_status", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "1,2", + "title": "Status", + "type": "singlestat", + "valueFontSize": "100%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + }, + { + "op": "=", + "text": "WARNING", + "value": "1" + }, + { + "op": "=", + "text": "HEALTHY", + "value": "0" + }, + { + "op": "=", + "text": "ERROR", + "value": "2" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 4, + "y": 1 + }, + "id": 14, + "interval": "1m", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_mon_quorum_status)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "2,3", + "title": "Monitors In Quorum", + "type": "singlestat", + "valueFontSize": "100%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "$datasource", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 8, + "w": 4, + "x": 8, + "y": 1 + }, + "id": 23, + "interval": "1m", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "((ceph_cluster_total_bytes-ceph_cluster_total_used_bytes)/ceph_cluster_total_bytes)*100", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "10,30", + "title": "Available Capacity", + "type": "singlestat", + "valueFontSize": "70%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 12, + "y": 1 + }, + "id": 33, + "interval": "1m", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "targets": [ + { + "expr": "ceph_cluster_total_bytes", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "0.025,0.1", + "title": "Cluster Capacity", + "type": "singlestat", + "valueFontSize": "100%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "ms", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 16, + "y": 1 + }, + "id": 32, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg(ceph_osd_commit_latency_ms)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "10,50", + "title": "Average OSD Commit Latency", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 20, + "y": 1 + }, + "id": 22, + "interval": "1m", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(ceph_pool_max_avail)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "", + "title": "Pools", + "type": "singlestat", + "valueFontSize": "100%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 2, + "x": 0, + "y": 5 + }, + "id": 26, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_osd_in)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "", + "title": "OSDs IN", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": true, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 40, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 2, + "x": 2, + "y": 5 + }, + "id": 27, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(ceph_osd_up) - count(ceph_osd_in)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "1,1", + "title": "OSDs OUT", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 2, + "x": 4, + "y": 5 + }, + "id": 28, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_osd_up)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "", + "title": "OSDs UP", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": true, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 40, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 2, + "x": 6, + "y": 5 + }, + "id": 29, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(ceph_osd_up == 0) OR vector(0)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "1,1", + "title": "OSDs DOWN", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 12, + "y": 5 + }, + "id": 34, + "interval": "1m", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "targets": [ + { + "expr": "ceph_cluster_total_used_bytes", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "0.025,0.1", + "title": "Used Capacity", + "type": "singlestat", + "valueFontSize": "100%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "ms", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 16, + "y": 5 + }, + "id": 31, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg(ceph_osd_apply_latency_ms)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "10,50", + "title": "Average OSD Apply Latency", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 20, + "y": 5 + }, + "id": 30, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg(ceph_osd_numpg)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "thresholds": "250,300", + "title": "Agerage PGs per OSD", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "collapsed": false, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 39, + "panels": [], + "repeat": null, + "title": "CLUSTER", + "type": "row" + }, + { + "aliasColors": { + "Available": "#EAB839", + "Total Capacity": "#447EBC", + "Used": "#BF1B00", + "total_avail": "#6ED0E0", + "total_space": "#7EB26D", + "total_used": "#890F02" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 4, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 10 + }, + "height": "300", + "hiddenSeries": false, + "id": 1, + "interval": "$interval", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Total Capacity", + "fill": 0, + "linewidth": 3, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_cluster_total_bytes-ceph_cluster_total_used_bytes", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Available", + "refId": "A", + "step": 300 + }, + { + "expr": "ceph_cluster_total_used_bytes", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Used", + "refId": "B", + "step": 300 + }, + { + "expr": "ceph_cluster_total_bytes", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Total Capacity", + "refId": "C", + "step": 300 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Capacity", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Total Capacity": "#7EB26D", + "Used": "#BF1B00", + "total_avail": "#6ED0E0", + "total_space": "#7EB26D", + "total_used": "#890F02" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "decimals": 0, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 10 + }, + "height": "300", + "hiddenSeries": false, + "id": 3, + "interval": "$interval", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(ceph_osd_op_w[$interval]))", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A", + "step": 300 + }, + { + "expr": "sum(rate(ceph_osd_op_r[$interval]))", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B", + "step": 300 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "IOPS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 10 + }, + "height": "300", + "hiddenSeries": false, + "id": 7, + "interval": "$interval", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(ceph_osd_op_w_in_bytes[$interval]))", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A", + "step": 300 + }, + { + "expr": "sum(irate(ceph_osd_op_r_out_bytes[$interval]))", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B", + "step": 300 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Throughput", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 18 + }, + "id": 40, + "panels": [], + "repeat": null, + "title": "LATENCY", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "decimals": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 16, + "x": 0, + "y": 19 + }, + "hiddenSeries": false, + "id": 35, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 1, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg(ceph_osd_apply_latency_ms)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 2, + "legendFormat": "apply", + "metric": "ceph_osd_perf_apply_latency_seconds", + "refId": "A", + "step": 4 + }, + { + "expr": "avg(ceph_osd_commit_latency_ms)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 2, + "legendFormat": "commit", + "metric": "ceph_osd_perf_commit_latency_seconds", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "OSD Apply + Commit Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "decimals": null, + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 19 + }, + "hiddenSeries": false, + "id": 36, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 1, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg(ceph_monitor_latency_seconds)", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "latency", + "metric": "ceph_monitor_latency_seconds", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Monitor Latency (Currently not available with Ceph MGR)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 41, + "panels": [], + "repeat": null, + "title": "OBJECTS", + "type": "row" + }, + { + "collapsed": false, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 27 + }, + "id": 42, + "panels": [], + "repeat": null, + "title": "RECOVERY", + "type": "row" + } + ], + "refresh": "1m", + "schemaVersion": 26, + "style": "dark", + "tags": [ + "ceph", + "cluster" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Sysdig", + "value": "Sysdig" + }, + "hide": 0, + "includeAll": false, + "label": "Data Source", + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "auto": true, + "auto_count": 10, + "auto_min": "1m", + "current": { + "selected": false, + "text": "auto", + "value": "$__auto_interval_interval" + }, + "datasource": null, + "hide": 0, + "includeAll": false, + "label": "Interval", + "multi": false, + "name": "interval", + "options": [ + { + "selected": true, + "text": "auto", + "value": "$__auto_interval_interval" + }, + { + "selected": false, + "text": "5s", + "value": "5s" + }, + { + "selected": false, + "text": "10s", + "value": "10s" + }, + { + "selected": false, + "text": "30s", + "value": "30s" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + }, + { + "selected": false, + "text": "7d", + "value": "7d" + }, + { + "selected": false, + "text": "14d", + "value": "14d" + }, + { + "selected": false, + "text": "30d", + "value": "30d" + } + ], + "query": "5s,10s,30s,1m,10m,30m,1h,6h,12h,1d,7d,14d,30d", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Ceph - Cluster", + "uid": "vwcB0Bzmk", + "version": 5 + } diff --git a/addons/rook/1.15.5/monitoring/kustomization.yaml b/addons/rook/1.15.5/monitoring/kustomization.yaml new file mode 100644 index 0000000000..df2418976f --- /dev/null +++ b/addons/rook/1.15.5/monitoring/kustomization.yaml @@ -0,0 +1,3 @@ +resources: +- ceph-cluster-dashboard.yaml +- rook-ceph-servicemonitor.yaml \ No newline at end of file diff --git a/addons/rook/1.15.5/monitoring/rook-ceph-servicemonitor.yaml b/addons/rook/1.15.5/monitoring/rook-ceph-servicemonitor.yaml new file mode 100644 index 0000000000..86b1b643c2 --- /dev/null +++ b/addons/rook/1.15.5/monitoring/rook-ceph-servicemonitor.yaml @@ -0,0 +1,20 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: rook-ceph-servicemonitor + namespace: monitoring + labels: + team: rook +spec: + namespaceSelector: + matchNames: + - rook-ceph + selector: + matchLabels: + app: rook-ceph-mgr + rook_cluster: rook-ceph + ceph_daemon_id: a + endpoints: + - port: http-metrics + path: /metrics + interval: 5s diff --git a/addons/rook/1.15.5/operator/cluster-rbac.yaml b/addons/rook/1.15.5/operator/cluster-rbac.yaml new file mode 100644 index 0000000000..51d0aca8c3 --- /dev/null +++ b/addons/rook/1.15.5/operator/cluster-rbac.yaml @@ -0,0 +1,363 @@ +# Source: rook-ceph/templates/cluster-rbac.yaml +# Allow the osd purge job to run in this namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-purge-osd + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-purge-osd +subjects: + - kind: ServiceAccount + name: rook-ceph-purge-osd + namespace: rook-ceph # namespace:cluster +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-cmd-reporter +subjects: + - kind: ServiceAccount + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Allow the ceph mgr to access resources in the Rook operator namespace necessary for mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-system + namespace: rook-ceph # namespace:operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-mgr-system +subjects: + - kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Allow the ceph mgr to access resources scoped to the CephCluster namespace necessary for mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-mgr +subjects: + - kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Allow the osd pods in this namespace to work with configmaps +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-osd +subjects: + - kind: ServiceAccount + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Allow the operator to create resources in this cluster's namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-cluster-mgmt + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-cluster-mgmt +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Aspects of ceph osd purge job that require access to the cluster namespace +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-purge-osd + namespace: rook-ceph # namespace:cluster +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "delete"] + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["get", "list", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "update", "delete", "list"] +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +rules: + - apiGroups: + - "" + resources: + - pods + - configmaps + verbs: + - get + - list + - watch + - create + - update + - delete +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Aspects of ceph-mgr that operate within the cluster's namespace +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +rules: + - apiGroups: + - "" + resources: + - pods + - services + - pods/log + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - ceph.rook.io + resources: + - cephclients + - cephclusters + - cephblockpools + - cephfilesystems + - cephnfses + - cephobjectstores + - cephobjectstoreusers + - cephobjectrealms + - cephobjectzonegroups + - cephobjectzones + - cephbuckettopics + - cephbucketnotifications + - cephrbdmirrors + - cephfilesystemmirrors + - cephfilesystemsubvolumegroups + - cephblockpoolradosnamespaces + - cephcosidrivers + verbs: + - get + - list + - watch + - create + - update + - delete + - patch + - apiGroups: + - apps + resources: + - deployments/scale + - deployments + verbs: + - patch + - delete + - apiGroups: + - '' + resources: + - persistentvolumeclaims + verbs: + - delete +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +rules: + # this is needed for rook's "key-management" CLI to fetch the vault token from the secret when + # validating the connection details and for key rotation operations. + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: ["ceph.rook.io"] + resources: ["cephclusters", "cephclusters/finalizers"] + verbs: ["get", "list", "create", "update", "delete"] +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-osd +subjects: + - kind: ServiceAccount + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-mgr-cluster +subjects: + - kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Service account for other components +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-default + namespace: rook-ceph # namespace:cluster + labels: + operator: rook + storage-backend: ceph + + +# imagePullSecrets: +# - name: my-registry-secret +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Service account for RGW server +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-rgw + namespace: rook-ceph # namespace:cluster + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" + + +# imagePullSecrets: +# - name: my-registry-secret +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Service account for job that purges OSDs from a Rook-Ceph cluster +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-purge-osd + namespace: rook-ceph # namespace:cluster + + +# imagePullSecrets: +# - name: my-registry-secret +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Service account for the job that reports the Ceph version in an image +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" + + +# imagePullSecrets: +# - name: my-registry-secret +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Service account for Ceph mgrs +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" + + +# imagePullSecrets: +# - name: my-registry-secret +--- +# Source: rook-ceph/templates/cluster-rbac.yaml +# Service account for Ceph OSDs +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" + + +# imagePullSecrets: +# - name: my-registry-secret +--- diff --git a/addons/rook/1.15.5/operator/clusterrole.yaml b/addons/rook/1.15.5/operator/clusterrole.yaml new file mode 100644 index 0000000000..937ee3d585 --- /dev/null +++ b/addons/rook/1.15.5/operator/clusterrole.yaml @@ -0,0 +1,648 @@ +# Source: rook-ceph/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: objectstorage-provisioner-role + labels: + app.kubernetes.io/part-of: container-object-storage-interface + app.kubernetes.io/component: driver-ceph + app.kubernetes.io/name: cosi-driver-ceph +rules: + - apiGroups: ["objectstorage.k8s.io"] + resources: + [ + "buckets", + "bucketaccesses", + "bucketclaims", + "bucketaccessclasses", + "buckets/status", + "bucketaccesses/status", + "bucketclaims/status", + "bucketaccessclasses/status", + ] + verbs: ["get", "list", "watch", "update", "create", "delete"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [""] + resources: ["secrets", "events"] + verbs: ["get", "delete", "update", "create"] +--- +# Source: rook-ceph/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-external-provisioner-runner +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch", "create"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list", "watch", "patch", "update", "create"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotcontents"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts/token"] + verbs: ["create"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] +--- +# Source: rook-ceph/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-nodeplugin + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts/token"] + verbs: ["create"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] +--- +# Source: rook-ceph/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-external-provisioner-runner +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "patch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch", "create"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list", "watch", "patch", "update", "create"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotcontents"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotcontents/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts/token"] + verbs: ["create"] +--- +# Source: rook-ceph/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-nodeplugin +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts/token"] + verbs: ["create"] +--- +# Source: rook-ceph/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list +--- +# Source: rook-ceph/templates/clusterrole.yaml +# Used for provisioning ObjectBuckets (OBs) in response to ObjectBucketClaims (OBCs). +# Note: Rook runs a copy of the lib-bucket-provisioner's OBC controller. +# OBCs can be created in any Kubernetes namespace, so this must be a cluster-scoped role. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-object-bucket + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" +rules: + - apiGroups: [""] + resources: ["secrets", "configmaps"] + verbs: + # OBC controller creates secrets and configmaps containing information for users about how to + # connect to object buckets. It deletes them when an OBC is deleted. + - get + - create + - update + - delete + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: + # OBC controller gets parameters from the OBC's storageclass + # Rook gets additional parameters from the OBC's storageclass + - get + - apiGroups: ["objectbucket.io"] + resources: ["objectbucketclaims"] + verbs: + # OBC controller needs to list/watch OBCs and get latest version of a reconciled OBC + - list + - watch + - get + # Ideally, update should not be needed, but the OBC controller updates the OBC with bucket + # information outside of the status subresource + - update + # OBC controller does not delete OBCs; users do this + - apiGroups: ["objectbucket.io"] + resources: ["objectbuckets"] + verbs: + # OBC controller needs to list/watch OBs and get latest version of a reconciled OB + - list + - watch + - get + # OBC controller creates an OB when an OBC's bucket has been provisioned by Ceph, updates them + # when an OBC is updated, and deletes them when the OBC is de-provisioned. + - create + - update + - delete + - apiGroups: ["objectbucket.io"] + resources: ["objectbucketclaims/status", "objectbuckets/status"] + verbs: + # OBC controller updates OBC and OB statuses + - update + - apiGroups: ["objectbucket.io"] + # This does not strictly allow the OBC/OB controllers to update finalizers. That is handled by + # the direct "update" permissions above. Instead, this allows Rook's controller to create + # resources which are owned by OBs/OBCs and where blockOwnerDeletion is set. + resources: ["objectbucketclaims/finalizers", "objectbuckets/finalizers"] + verbs: + - update +--- +# Source: rook-ceph/templates/clusterrole.yaml +# Aspects of ceph-mgr that require access to the system namespace +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +# Source: rook-ceph/templates/clusterrole.yaml +# Aspects of ceph-mgr that require cluster-wide access +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-cluster + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" +rules: +- apiGroups: + - "" + resources: + - configmaps + - nodes + - nodes/proxy + - persistentvolumes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - list + - get + - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +--- +# Source: rook-ceph/templates/clusterrole.yaml +# The cluster role for managing the Rook CRDs +apiVersion: rbac.authorization.k8s.io/v1 +# Rook watches for its CRDs in all namespaces, so this should be a cluster-scoped role unless the +# operator config `ROOK_CURRENT_NAMESPACE_ONLY=true`. +kind: ClusterRole +metadata: + name: rook-ceph-global + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" +rules: +- apiGroups: + - "" + resources: + # Pod access is needed for fencing + - pods + # Node access is needed for determining nodes where mons should run + - nodes + - nodes/proxy + # Rook watches secrets which it uses to configure access to external resources. + # e.g., external Ceph cluster or object store + - secrets + # Rook watches for changes to the rook-operator-config configmap + - configmaps + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # Rook creates events for its custom resources + - events + # Rook creates PVs and PVCs for OSDs managed by the Rook provisioner + - persistentvolumes + - persistentvolumeclaims + # Rook creates endpoints for mgr and object store access + - endpoints + - services + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - get + - list + - watch + - create + - update + - delete + - deletecollection +# The Rook operator must be able to watch all ceph.rook.io resources to reconcile them. +- apiGroups: ["ceph.rook.io"] + resources: + - cephclients + - cephclusters + - cephblockpools + - cephfilesystems + - cephnfses + - cephobjectstores + - cephobjectstoreusers + - cephobjectrealms + - cephobjectzonegroups + - cephobjectzones + - cephbuckettopics + - cephbucketnotifications + - cephrbdmirrors + - cephfilesystemmirrors + - cephfilesystemsubvolumegroups + - cephblockpoolradosnamespaces + - cephcosidrivers + verbs: + - get + - list + - watch + # Ideally the update permission is not required, but Rook needs it to add finalizers to resources. + - update +# Rook must have update access to status subresources for its custom resources. +- apiGroups: ["ceph.rook.io"] + resources: + - cephclients/status + - cephclusters/status + - cephblockpools/status + - cephfilesystems/status + - cephnfses/status + - cephobjectstores/status + - cephobjectstoreusers/status + - cephobjectrealms/status + - cephobjectzonegroups/status + - cephobjectzones/status + - cephbuckettopics/status + - cephbucketnotifications/status + - cephrbdmirrors/status + - cephfilesystemmirrors/status + - cephfilesystemsubvolumegroups/status + - cephblockpoolradosnamespaces/status + verbs: ["update"] +# The "*/finalizers" permission may need to be strictly given for K8s clusters where +# OwnerReferencesPermissionEnforcement is enabled so that Rook can set blockOwnerDeletion on +# resources owned by Rook CRs (e.g., a Secret owned by an OSD Deployment). See more: +# https://kubernetes.io/docs/reference/access-authn-authz/_print/#ownerreferencespermissionenforcement +- apiGroups: ["ceph.rook.io"] + resources: + - cephclients/finalizers + - cephclusters/finalizers + - cephblockpools/finalizers + - cephfilesystems/finalizers + - cephnfses/finalizers + - cephobjectstores/finalizers + - cephobjectstoreusers/finalizers + - cephobjectrealms/finalizers + - cephobjectzonegroups/finalizers + - cephobjectzones/finalizers + - cephbuckettopics/finalizers + - cephbucketnotifications/finalizers + - cephrbdmirrors/finalizers + - cephfilesystemmirrors/finalizers + - cephfilesystemsubvolumegroups/finalizers + - cephblockpoolradosnamespaces/finalizers + verbs: ["update"] +- apiGroups: + - policy + - apps + - extensions + resources: + # This is for the clusterdisruption controller + - poddisruptionbudgets + # This is for both clusterdisruption and nodedrain controllers + - deployments + - replicasets + verbs: + - get + - list + - watch + - create + - update + - delete + - deletecollection +- apiGroups: + - apps + resources: + # This is to add osd deployment owner ref on key rotation + # cron jobs. + - deployments/finalizers + verbs: + - update +- apiGroups: + - healthchecking.openshift.io + resources: + - machinedisruptionbudgets + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - machine.openshift.io + resources: + - machines + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - create + - delete + - get + - update +- apiGroups: + - k8s.cni.cncf.io + resources: + - network-attachment-definitions + verbs: + - get +--- +# Source: rook-ceph/templates/clusterrole.yaml +# The cluster role for managing all the cluster-specific resources in a namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rook-ceph-cluster-mgmt + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" +rules: +- apiGroups: + - "" + - apps + - extensions + resources: + - secrets + - pods + - pods/log + - services + - configmaps + - deployments + - daemonsets + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +--- +# Source: rook-ceph/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-system + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" +rules: + # Most resources are represented by a string representation of their name, such as "pods", just as it appears in the URL for the relevant API endpoint. + # However, some Kubernetes APIs involve a "subresource", such as the logs for a pod. [...] + # To represent this in an RBAC role, use a slash to delimit the resource and subresource. + # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources + - apiGroups: [""] + resources: ["pods", "pods/log"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["pods/exec"] + verbs: ["create"] + - apiGroups: ["csiaddons.openshift.io"] + resources: ["networkfences"] + verbs: ["create", "get", "update", "delete", "watch", "list", "deletecollection"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get"] + - apiGroups: ["csi.ceph.io"] + resources: ["cephconnections"] + verbs: ["create", "delete", "get", "list","update", "watch"] + - apiGroups: ["csi.ceph.io"] + resources: ["clientprofiles"] + verbs: ["create", "delete", "get", "list", "update", "watch"] + - apiGroups: ["csi.ceph.io"] + resources: ["operatorconfigs"] + verbs: ["create", "delete", "get", "list" ,"update", "watch"] + - apiGroups: ["csi.ceph.io"] + resources: ["drivers"] + verbs: ["create", "delete", "get", "list" ,"update", "watch"] +--- diff --git a/addons/rook/1.15.5/operator/clusterrolebinding.yaml b/addons/rook/1.15.5/operator/clusterrolebinding.yaml new file mode 100644 index 0000000000..9d21446a63 --- /dev/null +++ b/addons/rook/1.15.5/operator/clusterrolebinding.yaml @@ -0,0 +1,135 @@ +# Source: rook-ceph/templates/clusterrolebinding.yaml +# RBAC for ceph cosi driver service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: objectstorage-provisioner-role-binding + labels: + app.kubernetes.io/part-of: container-object-storage-interface + app.kubernetes.io/component: driver-ceph + app.kubernetes.io/name: cosi-driver-ceph +subjects: + - kind: ServiceAccount + name: objectstorage-provisioner + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: objectstorage-provisioner-role + apiGroup: rbac.authorization.k8s.io +--- +# Source: rook-ceph/templates/clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-provisioner-role +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: rbd-external-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +# Source: rook-ceph/templates/clusterrolebinding.yaml +# This is required by operator-sdk to map the cluster/clusterrolebindings with SA +# otherwise operator-sdk will create a individual file for these. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-nodeplugin-role +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-plugin-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: cephfs-csi-nodeplugin + apiGroup: rbac.authorization.k8s.io +--- +# Source: rook-ceph/templates/clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-provisioner-role +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: cephfs-external-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +# Source: rook-ceph/templates/clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-nodeplugin +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-plugin-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: rbd-csi-nodeplugin + apiGroup: rbac.authorization.k8s.io +--- +# Source: rook-ceph/templates/clusterrolebinding.yaml +kind: ClusterRoleBinding +# Give Rook-Ceph Operator permissions to provision ObjectBuckets in response to ObjectBucketClaims. +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-object-bucket +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-object-bucket +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +# Source: rook-ceph/templates/clusterrolebinding.yaml +# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-global + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-global +subjects: +- kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +# Source: rook-ceph/templates/clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-system + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-system +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- diff --git a/addons/rook/1.15.5/operator/configmap-rook-config-override.yaml b/addons/rook/1.15.5/operator/configmap-rook-config-override.yaml new file mode 100644 index 0000000000..e6776dc7ea --- /dev/null +++ b/addons/rook/1.15.5/operator/configmap-rook-config-override.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: rook-config-override + namespace: rook-ceph +data: + config: | + [global] + osd pool default size = 1 + [mon] + auth_allow_insecure_global_id_reclaim = false diff --git a/addons/rook/1.15.5/operator/configmap.yaml b/addons/rook/1.15.5/operator/configmap.yaml new file mode 100644 index 0000000000..a8e30bdc7e --- /dev/null +++ b/addons/rook/1.15.5/operator/configmap.yaml @@ -0,0 +1,50 @@ +# Source: rook-ceph/templates/configmap.yaml +# Operator settings that can be updated without an operator restart +# Operator settings that require an operator restart are found in the operator env vars +kind: ConfigMap +apiVersion: v1 +metadata: + name: rook-ceph-operator-config + namespace: rook-ceph # namespace:operator +data: + ROOK_LOG_LEVEL: "INFO" + ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15" + ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true" + ROOK_CEPH_ALLOW_LOOP_DEVICES: "false" + ROOK_ENABLE_DISCOVERY_DAEMON: "true" + ROOK_CSI_ENABLE_RBD: "true" + ROOK_CSI_ENABLE_CEPHFS: "true" + ROOK_CSI_DISABLE_DRIVER: "false" + CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true" + CSI_ENABLE_NFS_SNAPSHOTTER: "true" + CSI_ENABLE_RBD_SNAPSHOTTER: "true" + CSI_PLUGIN_ENABLE_SELINUX_HOST_MOUNT: "false" + CSI_ENABLE_ENCRYPTION: "false" + CSI_ENABLE_OMAP_GENERATOR: "false" + CSI_ENABLE_HOST_NETWORK: "true" + CSI_DISABLE_HOLDER_PODS: "true" + CSI_ENABLE_METADATA: "false" + CSI_ENABLE_VOLUME_GROUP_SNAPSHOT: "true" + CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical" + CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical" + CSI_RBD_FSGROUPPOLICY: "File" + CSI_CEPHFS_FSGROUPPOLICY: "File" + CSI_NFS_FSGROUPPOLICY: "File" + ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.12.2" + ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1" + ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v5.0.1" + ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1" + ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.6.1" + ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.11.1" + ROOK_CSI_IMAGE_PULL_POLICY: "IfNotPresent" + CSI_ENABLE_CSIADDONS: "false" + ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.9.1" + CSI_ENABLE_TOPOLOGY: "false" + ROOK_CSI_ENABLE_NFS: "false" + CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true" + CSI_GRPC_TIMEOUT_SECONDS: "150" + CSI_PROVISIONER_REPLICAS: "2" + CSI_CEPHFS_ATTACH_REQUIRED: "true" + CSI_RBD_ATTACH_REQUIRED: "true" + CSI_NFS_ATTACH_REQUIRED: "true" +--- diff --git a/addons/rook/1.15.5/operator/deployment.yaml b/addons/rook/1.15.5/operator/deployment.yaml new file mode 100644 index 0000000000..d0c43adc03 --- /dev/null +++ b/addons/rook/1.15.5/operator/deployment.yaml @@ -0,0 +1,82 @@ +# Source: rook-ceph/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rook-ceph-operator + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" +spec: + replicas: 1 + selector: + matchLabels: + app: rook-ceph-operator + strategy: + type: Recreate + template: + metadata: + labels: + app: rook-ceph-operator + helm.sh/chart: "rook-ceph-v1.15.5" + spec: + tolerations: + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 5 + containers: + - name: rook-ceph-operator + image: "docker.io/rook/ceph:v1.15.5" + imagePullPolicy: IfNotPresent + args: ["ceph", "operator"] + securityContext: + capabilities: + drop: + - ALL + runAsGroup: 2016 + runAsNonRoot: true + runAsUser: 2016 + volumeMounts: + - mountPath: /var/lib/rook + name: rook-config + - mountPath: /etc/ceph + name: default-config-dir + env: + - name: ROOK_CURRENT_NAMESPACE_ONLY + value: "false" + - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED + value: "false" + - name: ROOK_DISABLE_DEVICE_HOTPLUG + value: "false" + - name: ROOK_DISCOVER_DEVICES_INTERVAL + value: "60m" + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + limits: + memory: 512Mi + requests: + cpu: 200m + memory: 128Mi + serviceAccountName: rook-ceph-system + volumes: + - name: rook-config + emptyDir: {} + - name: default-config-dir + emptyDir: {} +--- diff --git a/addons/rook/1.15.5/operator/kustomization.yaml b/addons/rook/1.15.5/operator/kustomization.yaml new file mode 100644 index 0000000000..05509333a6 --- /dev/null +++ b/addons/rook/1.15.5/operator/kustomization.yaml @@ -0,0 +1,16 @@ +resources: +- toolbox.yaml +- serviceaccount.yaml +- configmap.yaml +- clusterrole.yaml +- clusterrolebinding.yaml +- role.yaml +- cluster-rbac.yaml +- rolebinding.yaml +- deployment.yaml +- securityContextConstraints.yaml +- namespace.yaml +- configmap-rook-config-override.yaml + +patchesStrategicMerge: +- patches/deployment-tolerations.yaml diff --git a/addons/rook/1.15.5/operator/namespace.yaml b/addons/rook/1.15.5/operator/namespace.yaml new file mode 100644 index 0000000000..1696c56ee6 --- /dev/null +++ b/addons/rook/1.15.5/operator/namespace.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: rook-ceph diff --git a/addons/rook/1.15.5/operator/patches/deployment-privileged.yaml b/addons/rook/1.15.5/operator/patches/deployment-privileged.yaml new file mode 100644 index 0000000000..c05db4c0a2 --- /dev/null +++ b/addons/rook/1.15.5/operator/patches/deployment-privileged.yaml @@ -0,0 +1,12 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rook-ceph-operator +spec: + template: + spec: + containers: + - name: rook-ceph-operator + env: + - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED + value: "true" diff --git a/addons/rook/1.15.5/operator/patches/deployment-tolerations.yaml b/addons/rook/1.15.5/operator/patches/deployment-tolerations.yaml new file mode 100644 index 0000000000..207ee02ca1 --- /dev/null +++ b/addons/rook/1.15.5/operator/patches/deployment-tolerations.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rook-ceph-operator +spec: + template: + spec: + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - name: rook-ceph-operator + env: + - name: DISCOVER_TOLERATION_KEY + value: node-role.kubernetes.io/master + - name: CSI_PROVISIONER_TOLERATIONS + value: | + - key: node-role.kubernetes.io/master + operator: Exists + - name: CSI_PLUGIN_TOLERATIONS + value: | + - key: node-role.kubernetes.io/master + operator: Exists diff --git a/addons/rook/1.15.5/operator/role.yaml b/addons/rook/1.15.5/operator/role.yaml new file mode 100644 index 0000000000..7c3c3e659b --- /dev/null +++ b/addons/rook/1.15.5/operator/role.yaml @@ -0,0 +1,89 @@ +# Source: rook-ceph/templates/role.yaml +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-external-provisioner-cfg + namespace: rook-ceph # namespace:operator +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- +# Source: rook-ceph/templates/role.yaml +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-external-provisioner-cfg + namespace: rook-ceph # namespace:operator +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +--- +# Source: rook-ceph/templates/role.yaml +# Allow the operator to manage resources in its own namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: rook-ceph-system + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" +rules: +- apiGroups: + - "" + resources: + - pods + - configmaps + - services + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - statefulsets + - deployments + verbs: + - get + - list + - watch + - create + - update + - delete + - deletecollection +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - delete +- apiGroups: + - cert-manager.io + resources: + - certificates + - issuers + verbs: + - get + - create + - delete +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceexports + verbs: + - get + - create +--- diff --git a/addons/rook/1.15.5/operator/rolebinding.yaml b/addons/rook/1.15.5/operator/rolebinding.yaml new file mode 100644 index 0000000000..0e8eb1dadc --- /dev/null +++ b/addons/rook/1.15.5/operator/rolebinding.yaml @@ -0,0 +1,53 @@ +# Source: rook-ceph/templates/rolebinding.yaml +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-provisioner-role-cfg + namespace: rook-ceph # namespace:operator +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: Role + name: rbd-external-provisioner-cfg + apiGroup: rbac.authorization.k8s.io +--- +# Source: rook-ceph/templates/rolebinding.yaml +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-provisioner-role-cfg + namespace: rook-ceph # namespace:operator +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: Role + name: cephfs-external-provisioner-cfg + apiGroup: rbac.authorization.k8s.io +--- +# Source: rook-ceph/templates/rolebinding.yaml +# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-system + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-system +subjects: +- kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- diff --git a/addons/rook/1.15.5/operator/securityContextConstraints.yaml b/addons/rook/1.15.5/operator/securityContextConstraints.yaml new file mode 100644 index 0000000000..39309245c2 --- /dev/null +++ b/addons/rook/1.15.5/operator/securityContextConstraints.yaml @@ -0,0 +1,3 @@ +# Source: rook-ceph/templates/securityContextConstraints.yaml +# scc for the Rook and Ceph daemons +# for creating cluster in openshift diff --git a/addons/rook/1.15.5/operator/serviceaccount.yaml b/addons/rook/1.15.5/operator/serviceaccount.yaml new file mode 100644 index 0000000000..330733c84f --- /dev/null +++ b/addons/rook/1.15.5/operator/serviceaccount.yaml @@ -0,0 +1,83 @@ +# Source: rook-ceph/templates/serviceaccount.yaml +# Service account for Ceph COSI driver +apiVersion: v1 +kind: ServiceAccount +metadata: + name: objectstorage-provisioner + namespace: rook-ceph # namespace:operator + labels: + app.kubernetes.io/part-of: container-object-storage-interface + app.kubernetes.io/component: driver-ceph + app.kubernetes.io/name: cosi-driver-ceph + + +# imagePullSecrets: +# - name: my-registry-secret +--- +# Source: rook-ceph/templates/serviceaccount.yaml +# Service account for the RBD CSI provisioner +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator + + +# imagePullSecrets: +# - name: my-registry-secret +--- +# Source: rook-ceph/templates/serviceaccount.yaml +# Service account for the RBD CSI driver +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-csi-rbd-plugin-sa + namespace: rook-ceph # namespace:operator + + +# imagePullSecrets: +# - name: my-registry-secret +--- +# Source: rook-ceph/templates/serviceaccount.yaml +# Service account for the CephFS CSI provisioner +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator + + +# imagePullSecrets: +# - name: my-registry-secret +--- +# Source: rook-ceph/templates/serviceaccount.yaml +# Service account for the CephFS CSI driver +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-csi-cephfs-plugin-sa + namespace: rook-ceph # namespace:operator + + +# imagePullSecrets: +# - name: my-registry-secret +--- +# Source: rook-ceph/templates/serviceaccount.yaml +# Service account for the Rook-Ceph operator +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-system + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-v1.15.5" + + +# imagePullSecrets: +# - name: my-registry-secret +--- diff --git a/addons/rook/1.15.5/operator/toolbox.yaml b/addons/rook/1.15.5/operator/toolbox.yaml new file mode 100644 index 0000000000..a060b91b51 --- /dev/null +++ b/addons/rook/1.15.5/operator/toolbox.yaml @@ -0,0 +1,131 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rook-ceph-tools + namespace: rook-ceph # namespace:cluster + labels: + app: rook-ceph-tools +spec: + replicas: 1 + selector: + matchLabels: + app: rook-ceph-tools + template: + metadata: + labels: + app: rook-ceph-tools + spec: + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: rook-ceph-default + containers: + - name: rook-ceph-tools + image: quay.io/ceph/ceph:v18.2.4 + command: + - /bin/bash + - -c + - | + # Replicate the script from toolbox.sh inline so the ceph image + # can be run directly, instead of requiring the rook toolbox + CEPH_CONFIG="/etc/ceph/ceph.conf" + MON_CONFIG="/etc/rook/mon-endpoints" + KEYRING_FILE="/etc/ceph/keyring" + + # create a ceph config file in its default location so ceph/rados tools can be used + # without specifying any arguments + write_endpoints() { + endpoints=$(cat ${MON_CONFIG}) + + # filter out the mon names + # external cluster can have numbers or hyphens in mon names, handling them in regex + # shellcheck disable=SC2001 + mon_endpoints=$(echo "${endpoints}"| sed 's/[a-z0-9_-]\+=//g') + + DATE=$(date) + echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}" + cat < ${CEPH_CONFIG} + [global] + mon_host = ${mon_endpoints} + + [client.admin] + keyring = ${KEYRING_FILE} + EOF + } + + # watch the endpoints config file and update if the mon endpoints ever change + watch_endpoints() { + # get the timestamp for the target of the soft link + real_path=$(realpath ${MON_CONFIG}) + initial_time=$(stat -c %Z "${real_path}") + while true; do + real_path=$(realpath ${MON_CONFIG}) + latest_time=$(stat -c %Z "${real_path}") + + if [[ "${latest_time}" != "${initial_time}" ]]; then + write_endpoints + initial_time=${latest_time} + fi + + sleep 10 + done + } + + # read the secret from an env var (for backward compatibility), or from the secret file + ceph_secret=${ROOK_CEPH_SECRET} + if [[ "$ceph_secret" == "" ]]; then + ceph_secret=$(cat /var/lib/rook-ceph-mon/secret.keyring) + fi + + # create the keyring file + cat < ${KEYRING_FILE} + [${ROOK_CEPH_USERNAME}] + key = ${ceph_secret} + EOF + + # write the initial config file + write_endpoints + + # continuously update the mon endpoints if they fail over + watch_endpoints + imagePullPolicy: IfNotPresent + tty: true + securityContext: + runAsNonRoot: true + runAsUser: 2016 + runAsGroup: 2016 + capabilities: + drop: ["ALL"] + env: + - name: ROOK_CEPH_USERNAME + valueFrom: + secretKeyRef: + name: rook-ceph-mon + key: ceph-username + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - name: mon-endpoint-volume + mountPath: /etc/rook + - name: ceph-admin-secret + mountPath: /var/lib/rook-ceph-mon + readOnly: true + volumes: + - name: ceph-admin-secret + secret: + secretName: rook-ceph-mon + optional: false + items: + - key: ceph-secret + path: secret.keyring + - name: mon-endpoint-volume + configMap: + name: rook-ceph-mon-endpoints + items: + - key: data + path: mon-endpoints + - name: ceph-config + emptyDir: {} + tolerations: + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 5 diff --git a/hack/testdata/manifest/clean b/hack/testdata/manifest/clean index 0d1ce9607f..647190d284 100644 --- a/hack/testdata/manifest/clean +++ b/hack/testdata/manifest/clean @@ -11,7 +11,7 @@ KURL_BIN_UTILS_FILE= # STEP_VERSIONS array is generated by the server and injected at runtime based on supported k8s versions STEP_VERSIONS=(0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 1.16.4 1.17.13 1.18.20 1.19.16 1.20.15 1.21.14 1.22.17 1.23.17 1.24.17 1.25.14 1.26.15 1.27.16 1.28.15 1.29.10 1.30.6 1.31.2) # ROOK_STEP_VERSIONS array is generated by the server and injected at runtime based on supported rook versions -ROOK_STEP_VERSIONS=(1.0.4-14.2.21 0.0.0 0.0.0 0.0.0 1.4.9 1.5.12 1.6.11 1.7.11 1.8.10 1.9.12 1.10.11 1.11.8 1.12.8) +ROOK_STEP_VERSIONS=(1.0.4-14.2.21 0.0.0 0.0.0 0.0.0 1.4.9 1.5.12 1.6.11 1.7.11 1.8.10 1.9.12 1.10.11 1.11.8 1.12.8 0.0.0 0.0.0 1.15.5) # CONTAINERD_STEP_VERSIONS array is generated by the server and injected at runtime based on supported containerd versions CONTAINERD_STEP_VERSIONS=(1.2.13 1.3.9 1.4.13 1.5.11 1.6.33) INSTALLER_YAML= diff --git a/scripts/Manifest b/scripts/Manifest index 0d1ce9607f..647190d284 100644 --- a/scripts/Manifest +++ b/scripts/Manifest @@ -11,7 +11,7 @@ KURL_BIN_UTILS_FILE= # STEP_VERSIONS array is generated by the server and injected at runtime based on supported k8s versions STEP_VERSIONS=(0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 1.16.4 1.17.13 1.18.20 1.19.16 1.20.15 1.21.14 1.22.17 1.23.17 1.24.17 1.25.14 1.26.15 1.27.16 1.28.15 1.29.10 1.30.6 1.31.2) # ROOK_STEP_VERSIONS array is generated by the server and injected at runtime based on supported rook versions -ROOK_STEP_VERSIONS=(1.0.4-14.2.21 0.0.0 0.0.0 0.0.0 1.4.9 1.5.12 1.6.11 1.7.11 1.8.10 1.9.12 1.10.11 1.11.8 1.12.8) +ROOK_STEP_VERSIONS=(1.0.4-14.2.21 0.0.0 0.0.0 0.0.0 1.4.9 1.5.12 1.6.11 1.7.11 1.8.10 1.9.12 1.10.11 1.11.8 1.12.8 0.0.0 0.0.0 1.15.5) # CONTAINERD_STEP_VERSIONS array is generated by the server and injected at runtime based on supported containerd versions CONTAINERD_STEP_VERSIONS=(1.2.13 1.3.9 1.4.13 1.5.11 1.6.33) INSTALLER_YAML= diff --git a/web/src/installers/versions.js b/web/src/installers/versions.js index 9bb2197458..b864d22e5e 100644 --- a/web/src/installers/versions.js +++ b/web/src/installers/versions.js @@ -252,6 +252,7 @@ module.exports.InstallerVersions = { rook: [ "1.0.4", // cron-rook-update + "1.15.5", "1.12.8", "1.12.7", "1.12.6",