176 lines
5.8 KiB
YAML
176 lines
5.8 KiB
YAML
# -- Namespace of the main rook operator
|
|
operatorNamespace: rook-ceph
|
|
|
|
monitoring:
|
|
# -- Enable Prometheus integration
|
|
enabled: false
|
|
|
|
cephClusterSpec:
|
|
cephVersion:
|
|
image: quay.io/ceph/ceph:v18.2.2
|
|
|
|
mon:
|
|
count: 3
|
|
|
|
mgr:
|
|
count: 2
|
|
allowMultiplePerNode: false
|
|
|
|
# enable the ceph dashboard for viewing cluster status
|
|
dashboard:
|
|
enabled: true
|
|
ssl: false
|
|
|
|
# enable log collector, daemons will log on files and rotate
|
|
logCollector:
|
|
enabled: true
|
|
periodicity: daily # one of: hourly, daily, weekly, monthly
|
|
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
|
|
|
|
resources:
|
|
mgr:
|
|
limits:
|
|
memory: "1Gi"
|
|
requests:
|
|
cpu: "500m"
|
|
memory: "512Mi"
|
|
mon:
|
|
limits:
|
|
memory: "2Gi"
|
|
requests:
|
|
cpu: "1000m"
|
|
memory: "1Gi"
|
|
osd:
|
|
limits:
|
|
memory: "4Gi"
|
|
requests:
|
|
cpu: "1000m"
|
|
memory: "4Gi"
|
|
prepareosd:
|
|
# limits: It is not recommended to set limits on the OSD prepare job
|
|
# since it's a one-time burst for memory that must be allowed to
|
|
# complete without an OOM kill. Note however that if a k8s
|
|
# limitRange guardrail is defined external to Rook, the lack of
|
|
# a limit here may result in a sync failure, in which case a
|
|
# limit should be added. 1200Mi may suffice for up to 15Ti
|
|
# OSDs ; for larger devices 2Gi may be required.
|
|
# cf. https://github.com/rook/rook/pull/11103
|
|
requests:
|
|
cpu: "500m"
|
|
memory: "50Mi"
|
|
mgr-sidecar:
|
|
limits:
|
|
memory: "100Mi"
|
|
requests:
|
|
cpu: "100m"
|
|
memory: "40Mi"
|
|
crashcollector:
|
|
limits:
|
|
memory: "60Mi"
|
|
requests:
|
|
cpu: "100m"
|
|
memory: "60Mi"
|
|
logcollector:
|
|
limits:
|
|
memory: "1Gi"
|
|
requests:
|
|
cpu: "100m"
|
|
memory: "100Mi"
|
|
cleanup:
|
|
limits:
|
|
memory: "1Gi"
|
|
requests:
|
|
cpu: "500m"
|
|
memory: "100Mi"
|
|
exporter:
|
|
limits:
|
|
memory: "128Mi"
|
|
requests:
|
|
cpu: "50m"
|
|
memory: "50Mi"
|
|
|
|
# The option to automatically remove OSDs that are out and are safe to destroy.
|
|
removeOSDsIfOutAndSafeToRemove: false
|
|
|
|
# priority classes to apply to ceph resources
|
|
priorityClassNames:
|
|
mon: system-node-critical
|
|
osd: system-node-critical
|
|
mgr: system-cluster-critical
|
|
|
|
# -- A list of CephBlockPool configurations to deploy
|
|
# @default -- See [below](#ceph-block-pools)
|
|
cephBlockPools:
|
|
- name: ceph-blockpool
|
|
# see https://github.com/rook/rook/blob/v1.14.2/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
|
|
spec:
|
|
failureDomain: host
|
|
replicated:
|
|
size: 3
|
|
# Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
|
|
# For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
|
|
# enableRBDStats: true
|
|
storageClass:
|
|
enabled: true
|
|
name: ceph-block
|
|
isDefault: true
|
|
reclaimPolicy: Delete
|
|
allowVolumeExpansion: true
|
|
volumeBindingMode: "Immediate"
|
|
mountOptions: []
|
|
# see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
|
|
allowedTopologies: []
|
|
# - matchLabelExpressions:
|
|
# - key: rook-ceph-role
|
|
# values:
|
|
# - storage-node
|
|
# see https://github.com/rook/rook/blob/v1.14.2/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
|
|
parameters:
|
|
# (optional) mapOptions is a comma-separated list of map options.
|
|
# For krbd options refer
|
|
# https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
|
|
# For nbd options refer
|
|
# https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
|
|
# mapOptions: lock_on_read,queue_depth=1024
|
|
|
|
# (optional) unmapOptions is a comma-separated list of unmap options.
|
|
# For krbd options refer
|
|
# https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
|
|
# For nbd options refer
|
|
# https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
|
|
# unmapOptions: force
|
|
|
|
# RBD image format. Defaults to "2".
|
|
imageFormat: "2"
|
|
|
|
# RBD image features, equivalent to OR'd bitfield value: 63
|
|
# Available for imageFormat: "2". Older releases of CSI RBD
|
|
# support only the `layering` feature. The Linux kernel (KRBD) supports the
|
|
# full feature complement as of 5.4
|
|
imageFeatures: layering
|
|
|
|
# These secrets contain Ceph admin credentials.
|
|
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
|
|
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
|
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
|
|
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
|
|
# Specify the filesystem type of the volume. If not specified, csi-provisioner
|
|
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
|
|
# in hyperconverged settings where the volume is mounted on the same node as the osds.
|
|
csi.storage.k8s.io/fstype: ext4
|
|
|
|
# -- A list of CephFileSystem configurations to deploy
|
|
# @default -- See [below](#ceph-file-systems)
|
|
cephFileSystems:
|
|
- name: ceph-filesystem
|
|
spec:
|
|
metadataServer:
|
|
resources:
|
|
limits:
|
|
memory: "4Gi"
|
|
requests:
|
|
cpu: "1000m"
|
|
memory: "2Gi"
|