177 lines
5.8 KiB
YAML
177 lines
5.8 KiB
YAML
# -- Namespace of the main rook operator
|
|
operatorNamespace: rook-ceph
|
|
|
|
monitoring:
|
|
# -- Enable Prometheus integration
|
|
enabled: false
|
|
|
|
cephClusterSpec:
|
|
cephVersion:
|
|
image: quay.io/ceph/ceph:v18.2.2
|
|
|
|
mon:
|
|
count: 3
|
|
|
|
mgr:
|
|
count: 2
|
|
allowMultiplePerNode: false
|
|
|
|
# enable the ceph dashboard for viewing cluster status
|
|
dashboard:
|
|
enabled: true
|
|
ssl: false
|
|
|
|
# enable log collector, daemons will log on files and rotate
|
|
logCollector:
|
|
enabled: true
|
|
periodicity: daily # one of: hourly, daily, weekly, monthly
|
|
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
|
|
|
|
resources:
|
|
mgr:
|
|
limits:
|
|
memory: "1Gi"
|
|
requests:
|
|
cpu: "500m"
|
|
memory: "512Mi"
|
|
mon:
|
|
limits:
|
|
memory: "2Gi"
|
|
requests:
|
|
cpu: "1000m"
|
|
memory: "1Gi"
|
|
osd:
|
|
limits:
|
|
memory: "4Gi"
|
|
requests:
|
|
cpu: "1000m"
|
|
memory: "4Gi"
|
|
prepareosd:
|
|
# limits: It is not recommended to set limits on the OSD prepare job
|
|
# since it's a one-time burst for memory that must be allowed to
|
|
# complete without an OOM kill. Note however that if a k8s
|
|
# limitRange guardrail is defined external to Rook, the lack of
|
|
# a limit here may result in a sync failure, in which case a
|
|
# limit should be added. 1200Mi may suffice for up to 15Ti
|
|
# OSDs ; for larger devices 2Gi may be required.
|
|
# cf. https://github.com/rook/rook/pull/11103
|
|
requests:
|
|
cpu: "500m"
|
|
memory: "50Mi"
|
|
mgr-sidecar:
|
|
limits:
|
|
memory: "100Mi"
|
|
requests:
|
|
cpu: "100m"
|
|
memory: "40Mi"
|
|
crashcollector:
|
|
limits:
|
|
memory: "60Mi"
|
|
requests:
|
|
cpu: "100m"
|
|
memory: "60Mi"
|
|
logcollector:
|
|
limits:
|
|
memory: "1Gi"
|
|
requests:
|
|
cpu: "100m"
|
|
memory: "100Mi"
|
|
cleanup:
|
|
limits:
|
|
memory: "1Gi"
|
|
requests:
|
|
cpu: "500m"
|
|
memory: "100Mi"
|
|
exporter:
|
|
limits:
|
|
memory: "128Mi"
|
|
requests:
|
|
cpu: "50m"
|
|
memory: "50Mi"
|
|
|
|
# The option to automatically remove OSDs that are out and are safe to destroy.
|
|
removeOSDsIfOutAndSafeToRemove: false
|
|
|
|
# priority classes to apply to ceph resources
|
|
priorityClassNames:
|
|
mon: system-node-critical
|
|
osd: system-node-critical
|
|
mgr: system-cluster-critical
|
|
|
|
cephBlockPools:
|
|
- name: ceph-blockpool
|
|
# see https://github.com/rook/rook/blob/v1.14.2/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
|
|
spec:
|
|
failureDomain: host
|
|
replicated:
|
|
size: 3
|
|
storageClass:
|
|
enabled: true
|
|
name: ceph-block
|
|
isDefault: true
|
|
reclaimPolicy: Delete
|
|
allowVolumeExpansion: true
|
|
volumeBindingMode: "Immediate"
|
|
mountOptions: []
|
|
# see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
|
|
allowedTopologies: []
|
|
# see https://github.com/rook/rook/blob/v1.14.2/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
|
|
parameters:
|
|
imageFormat: "2"
|
|
imageFeatures: layering
|
|
|
|
# These secrets contain Ceph admin credentials.
|
|
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
|
|
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
|
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
|
|
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
|
|
csi.storage.k8s.io/fstype: ext4
|
|
|
|
# -- A list of CephFileSystem configurations to deploy
|
|
# @default -- See [below](#ceph-file-systems)
|
|
cephFileSystems:
|
|
- name: ceph-filesystem
|
|
# see https://github.com/rook/rook/blob/v1.14.2/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
|
|
spec:
|
|
metadataPool:
|
|
replicated:
|
|
size: 3
|
|
dataPools:
|
|
- failureDomain: host
|
|
replicated:
|
|
size: 3
|
|
# Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/v1.14.2/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
|
|
name: data0
|
|
metadataServer:
|
|
activeCount: 1
|
|
activeStandby: true
|
|
resources:
|
|
limits:
|
|
memory: "4Gi"
|
|
requests:
|
|
cpu: "1000m"
|
|
memory: "2Gi"
|
|
priorityClassName: system-cluster-critical
|
|
storageClass:
|
|
enabled: true
|
|
isDefault: false
|
|
name: ceph-filesystem
|
|
# (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
|
|
pool: data0
|
|
reclaimPolicy: Delete
|
|
allowVolumeExpansion: true
|
|
volumeBindingMode: "Immediate"
|
|
mountOptions: []
|
|
# see https://github.com/rook/rook/blob/v1.14.2/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
|
|
parameters:
|
|
# The secrets contain Ceph admin credentials.
|
|
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
|
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
|
|
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
|
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
|
|
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
|
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
|
|
csi.storage.k8s.io/fstype: ext4
|