From e73d41de29a2202f8e2eda725cd973ca1c6d3854 Mon Sep 17 00:00:00 2001 From: Steffen Illium Date: Wed, 24 Apr 2024 11:02:13 +0200 Subject: [PATCH] add rook --- .../04-rook-ceph/base/cluster-values.yaml | 175 ++++++++++++++++++ infrastructure/04-rook-ceph/base/ingress.yaml | 15 ++ .../04-rook-ceph/base/operator-values.yaml | 83 +++++++++ .../04-rook-ceph/kustomization.yaml | 23 +++ 4 files changed, 296 insertions(+) create mode 100644 infrastructure/04-rook-ceph/base/cluster-values.yaml create mode 100644 infrastructure/04-rook-ceph/base/ingress.yaml create mode 100644 infrastructure/04-rook-ceph/base/operator-values.yaml create mode 100644 infrastructure/04-rook-ceph/kustomization.yaml diff --git a/infrastructure/04-rook-ceph/base/cluster-values.yaml b/infrastructure/04-rook-ceph/base/cluster-values.yaml new file mode 100644 index 0000000..08b9a78 --- /dev/null +++ b/infrastructure/04-rook-ceph/base/cluster-values.yaml @@ -0,0 +1,175 @@ +# -- Namespace of the main rook operator +operatorNamespace: rook-ceph + +monitoring: + # -- Enable Prometheus integration + enabled: false + +cephClusterSpec: + cephVersion: + image: quay.io/ceph/ceph:v18.2.2 + + mon: + count: 3 + + mgr: + count: 2 + allowMultiplePerNode: false + + # enable the ceph dashboard for viewing cluster status + dashboard: + enabled: true + ssl: false + + # enable log collector, daemons will log on files and rotate + logCollector: + enabled: true + periodicity: daily # one of: hourly, daily, weekly, monthly + maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M. + + resources: + mgr: + limits: + memory: "1Gi" + requests: + cpu: "500m" + memory: "512Mi" + mon: + limits: + memory: "2Gi" + requests: + cpu: "1000m" + memory: "1Gi" + osd: + limits: + memory: "4Gi" + requests: + cpu: "1000m" + memory: "4Gi" + prepareosd: + # limits: It is not recommended to set limits on the OSD prepare job + # since it's a one-time burst for memory that must be allowed to + # complete without an OOM kill. Note however that if a k8s + # limitRange guardrail is defined external to Rook, the lack of + # a limit here may result in a sync failure, in which case a + # limit should be added. 1200Mi may suffice for up to 15Ti + # OSDs ; for larger devices 2Gi may be required. + # cf. https://github.com/rook/rook/pull/11103 + requests: + cpu: "500m" + memory: "50Mi" + mgr-sidecar: + limits: + memory: "100Mi" + requests: + cpu: "100m" + memory: "40Mi" + crashcollector: + limits: + memory: "60Mi" + requests: + cpu: "100m" + memory: "60Mi" + logcollector: + limits: + memory: "1Gi" + requests: + cpu: "100m" + memory: "100Mi" + cleanup: + limits: + memory: "1Gi" + requests: + cpu: "500m" + memory: "100Mi" + exporter: + limits: + memory: "128Mi" + requests: + cpu: "50m" + memory: "50Mi" + + # The option to automatically remove OSDs that are out and are safe to destroy. + removeOSDsIfOutAndSafeToRemove: false + + # priority classes to apply to ceph resources + priorityClassNames: + mon: system-node-critical + osd: system-node-critical + mgr: system-cluster-critical + +# -- A list of CephBlockPool configurations to deploy +# @default -- See [below](#ceph-block-pools) +cephBlockPools: + - name: ceph-blockpool + # see https://github.com/rook/rook/blob/v1.14.2/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration + spec: + failureDomain: host + replicated: + size: 3 + # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false. + # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics + # enableRBDStats: true + storageClass: + enabled: true + name: ceph-block + isDefault: true + reclaimPolicy: Delete + allowVolumeExpansion: true + volumeBindingMode: "Immediate" + mountOptions: [] + # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies + allowedTopologies: [] + # - matchLabelExpressions: + # - key: rook-ceph-role + # values: + # - storage-node + # see https://github.com/rook/rook/blob/v1.14.2/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration + parameters: + # (optional) mapOptions is a comma-separated list of map options. + # For krbd options refer + # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options + # mapOptions: lock_on_read,queue_depth=1024 + + # (optional) unmapOptions is a comma-separated list of unmap options. + # For krbd options refer + # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options + # unmapOptions: force + + # RBD image format. Defaults to "2". + imageFormat: "2" + + # RBD image features, equivalent to OR'd bitfield value: 63 + # Available for imageFormat: "2". Older releases of CSI RBD + # support only the `layering` feature. The Linux kernel (KRBD) supports the + # full feature complement as of 5.4 + imageFeatures: layering + + # These secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}" + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}" + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}" + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock + # in hyperconverged settings where the volume is mounted on the same node as the osds. + csi.storage.k8s.io/fstype: ext4 + +# -- A list of CephFileSystem configurations to deploy +# @default -- See [below](#ceph-file-systems) +cephFileSystems: + - name: ceph-filesystem + spec: + metadataServer: + resources: + limits: + memory: "4Gi" + requests: + cpu: "1000m" + memory: "2Gi" diff --git a/infrastructure/04-rook-ceph/base/ingress.yaml b/infrastructure/04-rook-ceph/base/ingress.yaml new file mode 100644 index 0000000..7c1c6d7 --- /dev/null +++ b/infrastructure/04-rook-ceph/base/ingress.yaml @@ -0,0 +1,15 @@ +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: rook-dashboard + namespace: rook-ceph +spec: + entryPoints: + - web-local + - websecure-local + routes: + - match: Host(`rook.steffenillium.de`) + kind: Rule + services: + - name: rook-ceph-dashboard + port: 80 diff --git a/infrastructure/04-rook-ceph/base/operator-values.yaml b/infrastructure/04-rook-ceph/base/operator-values.yaml new file mode 100644 index 0000000..9f89164 --- /dev/null +++ b/infrastructure/04-rook-ceph/base/operator-values.yaml @@ -0,0 +1,83 @@ +image: + repository: rook/ceph + tag: v1.14.2 + pullPolicy: IfNotPresent + +crds: + # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED. + enabled: true + + +# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG` +logLevel: INFO + +# Settings for whether to disable the drivers or other daemons if they are not +# needed +csi: + # -- Enable Ceph CSI RBD driver + enableRbdDriver: true + # -- Enable Ceph CSI CephFS driver + enableCephfsDriver: true + # -- Disable the CSI driver. + disableCsiDriver: "false" + + # -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary + # in some network configurations where the SDN does not provide access to an external cluster or + # there is significant drop in read/write performance + enableCSIHostNetwork: true + # -- Deprecation note: Rook uses "holder" pods to allow CSI to connect to the multus public network + # without needing hosts to the network. Holder pods are being removed. See issue for details: + # https://github.com/rook/rook/issues/13055. New Rook deployments should set this to "true". + disableHolderPods: true + + # -- Set replicas for csi provisioner deployment + provisionerReplicas: 2 + + # -- Set logging level for cephCSI containers maintained by the cephCSI. + # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. + logLevel: 0 + + + cephcsi: + # -- Ceph CSI image repository + repository: quay.io/cephcsi/cephcsi + # -- Ceph CSI image tag + tag: v3.11.0 + + registrar: + # -- Kubernetes CSI registrar image repository + repository: registry.k8s.io/sig-storage/csi-node-driver-registrar + # -- Registrar image tag + tag: v2.10.0 + + provisioner: + # -- Kubernetes CSI provisioner image repository + repository: registry.k8s.io/sig-storage/csi-provisioner + # -- Provisioner image tag + tag: v4.0.0 + + snapshotter: + # -- Kubernetes CSI snapshotter image repository + repository: registry.k8s.io/sig-storage/csi-snapshotter + # -- Snapshotter image tag + tag: v7.0.1 + + attacher: + # -- Kubernetes CSI Attacher image repository + repository: registry.k8s.io/sig-storage/csi-attacher + # -- Attacher image tag + tag: v4.5.0 + + resizer: + # -- Kubernetes CSI resizer image repository + repository: registry.k8s.io/sig-storage/csi-resizer + # -- Resizer image tag + tag: v1.10.0 + + # -- Image pull policy + imagePullPolicy: IfNotPresent + +monitoring: + # -- Enable monitoring. Requires Prometheus to be pre-installed. + # Enabling will also create RBAC rules to allow Operator to create ServiceMonitors + enabled: false diff --git a/infrastructure/04-rook-ceph/kustomization.yaml b/infrastructure/04-rook-ceph/kustomization.yaml new file mode 100644 index 0000000..f088160 --- /dev/null +++ b/infrastructure/04-rook-ceph/kustomization.yaml @@ -0,0 +1,23 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: rook-ceph + +resources: + - base/ingress.yaml + + +helmCharts: +- name: rook-ceph + includeCRDs: true + version: 1.14.2 + releaseName: "rook-ceph-cluster" + repo: https://charts.rook.io/release + valuesFile: base/operator-values.yaml +- name: rook-ceph + includeCRDs: true + version: 1.14.2 + releaseName: "rook-ceph" + repo: https://charts.rook.io/release + valuesFile: base/cluster-values.yaml +