Skip to content

Commit

Permalink
Create new Rook version
Browse files Browse the repository at this point in the history
  • Loading branch information
emosbaugh authored Dec 25, 2023
1 parent 978dad6 commit cd21ea2
Show file tree
Hide file tree
Showing 42 changed files with 19,015 additions and 2 deletions.
13 changes: 13 additions & 0 deletions addons/rook/1.13.1/Manifest
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
yum lvm2
yumol lvm2
apt lvm2

image rook-ceph rook/ceph:v1.13.1
image ceph-ceph quay.io/ceph/ceph:v18.2.1
image cephcsi-cephcsi quay.io/cephcsi/cephcsi:v3.10.1
image sig-storage-csi-node-driver-registrar registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.1
image sig-storage-csi-resizer registry.k8s.io/sig-storage/csi-resizer:v1.9.2
image sig-storage-csi-provisioner registry.k8s.io/sig-storage/csi-provisioner:v3.6.2
image sig-storage-csi-snapshotter registry.k8s.io/sig-storage/csi-snapshotter:v6.3.2
image sig-storage-csi-attacher registry.k8s.io/sig-storage/csi-attacher:v4.4.2
image csiaddons-k8s-sidecar quay.io/csiaddons/k8s-sidecar:v0.8.0
36 changes: 36 additions & 0 deletions addons/rook/1.13.1/cluster/cephfs/cephfs-storageclass.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rook-cephfs
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph # namespace:cluster

# CephFS filesystem name into which the volume shall be created
fsName: rook-shared-fs

# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: rook-shared-fs-replicated

# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster

# (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
# If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse
# or by setting the default mounter explicitly via --volumemounter command-line argument.
# mounter: kernel
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
# uncomment the following line for debugging
#- debug
158 changes: 158 additions & 0 deletions addons/rook/1.13.1/cluster/cephfs/filesystem.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
#################################################################################################################
# Create a filesystem with settings with replication enabled for a production environment.
# A minimum of 3 OSDs on different nodes are required in this example.
# If one mds daemon per node is too restrictive, see the podAntiAffinity below.
# kubectl create -f filesystem.yaml
#################################################################################################################

apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: rook-shared-fs
namespace: rook-ceph # namespace:cluster
spec:
# The metadata pool spec. Must use replication.
metadataPool:
replicated:
size: 3
requireSafeReplicaSize: true
parameters:
# Inline compression mode for the data pool
# Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
compression_mode:
none
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
#target_size_ratio: ".5"
# The list of data pool specs. Can use replication or erasure coding.
dataPools:
- name: replicated
failureDomain: host
replicated:
size: 3
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
requireSafeReplicaSize: true
parameters:
# Inline compression mode for the data pool
# Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
compression_mode:
none
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
#target_size_ratio: ".5"
# Whether to preserve filesystem after CephFilesystem CRD deletion
preserveFilesystemOnDelete: true
# The metadata service (mds) configuration
metadataServer:
# The number of active MDS instances
activeCount: 1
# Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
# If false, standbys will be available, but will not have a warm cache.
activeStandby: true
# The affinity rules to apply to the mds deployment
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
# topologySpreadConstraints:
# tolerations:
# - key: mds-node
# operator: Exists
# podAffinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- rook-ceph-mds
## Add this if you want to allow mds daemons for different filesystems to run on one
## node. The value in "values" must match .metadata.name.
# - key: rook_file_system
# operator: In
# values:
# - rook-shared-fs
# topologyKey: kubernetes.io/hostname will place MDS across different hosts
topologyKey: kubernetes.io/hostname
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- rook-ceph-mds
# topologyKey: */zone can be used to spread MDS across different AZ
# Use <topologyKey: failure-domain.beta.kubernetes.io/zone> in k8s cluster if your cluster is v1.16 or lower
# Use <topologyKey: topology.kubernetes.io/zone> in k8s cluster is v1.17 or upper
topologyKey: topology.kubernetes.io/zone
# A key/value list of annotations
# annotations:
# key: value
# A key/value list of labels
# labels:
# key: value
# resources:
# The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
priorityClassName: system-cluster-critical
livenessProbe:
disabled: false
startupProbe:
disabled: false
# Filesystem mirroring settings
# mirroring:
# enabled: true
# list of Kubernetes Secrets containing the peer token
# for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers
# Add the secret name if it already exists else specify the empty list here.
# peers:
#secretNames:
#- secondary-cluster-peer
# specify the schedule(s) on which snapshots should be taken
# see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules
# snapshotSchedules:
# - path: /
# interval: 24h # daily snapshots
# The startTime should be mentioned in the format YYYY-MM-DDTHH:MM:SS
# If startTime is not specified, then by default the start time is considered as midnight UTC.
# see usage here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#usage
# startTime: 2022-07-15T11:55:00
# manage retention policies
# see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies
# snapshotRetention:
# - path: /
# duration: "h 24"
---
# create default csi subvolume group
apiVersion: ceph.rook.io/v1
kind: CephFilesystemSubVolumeGroup
metadata:
name: rook-shared-fs-csi # lets keep the svg crd name same as `filesystem name + csi` for the default csi svg
namespace: rook-ceph # namespace:cluster
spec:
# The name of the subvolume group. If not set, the default is the name of the subvolumeGroup CR.
name: csi
# filesystemName is the metadata name of the CephFilesystem CR where the subvolume group will be created
filesystemName: rook-shared-fs
# reference https://docs.ceph.com/en/latest/cephfs/fs-volumes/#pinning-subvolumes-and-subvolume-groups
# only one out of (export, distributed, random) can be set at a time
# by default pinning is set with value: distributed=1
# for disabling default values set (distributed=0)
pinning:
distributed: 1 # distributed=<0, 1> (disabled=0)
# export: # export=<0-256> (disabled=-1)
# random: # random=[0.0, 1.0](disabled=0.0)
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rook-cephfs
parameters:
pool: rook-shared-fs-data0
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
---
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: rook-shared-fs
namespace: rook-ceph
spec:
metadataServer:
placement:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution: ~
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- rook-ceph-mds
# topologyKey: */zone can be used to spread MDS across different AZ
# Use <topologyKey: failure-domain.beta.kubernetes.io/zone> in k8s cluster if your cluster is v1.16 or lower
# Use <topologyKey: topology.kubernetes.io/zone> in k8s cluster is v1.17 or upper
topologyKey: topology.kubernetes.io/zone
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- rook-ceph-mds
# topologyKey: kubernetes.io/hostname will place MDS across different hosts
topologyKey: kubernetes.io/hostname
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
- op: replace
path: /spec/dataPools/0/name
value: data0
- op: replace
path: /spec/dataPools/0/replicated/size
value: ${CEPH_POOL_REPLICAS}
- op: replace
path: /spec/dataPools/0/replicated/requireSafeReplicaSize
value: false
19 changes: 19 additions & 0 deletions addons/rook/1.13.1/cluster/cephfs/patches/tmpl-filesystem.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
---
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: rook-shared-fs
namespace: rook-ceph
spec:
metadataPool:
replicated:
size: ${CEPH_POOL_REPLICAS}
requireSafeReplicaSize: false
metadataServer:
resources:
limits:
cpu: "500m"
memory: "1024Mi"
requests:
cpu: "500m"
memory: "1024Mi"
Loading

0 comments on commit cd21ea2

Please sign in to comment.