From a0ec34b3b5f0a1a59d659940a99ab8ad48406daa Mon Sep 17 00:00:00 2001 From: Redouane Kachach Date: Fri, 27 Oct 2023 13:13:45 +0200 Subject: [PATCH] test: adding a test example for cluster on local pvc creation Signed-off-by: Redouane Kachach --- .../examples/cluster-on-local-pvc-test.yaml | 181 ++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100644 deploy/examples/cluster-on-local-pvc-test.yaml diff --git a/deploy/examples/cluster-on-local-pvc-test.yaml b/deploy/examples/cluster-on-local-pvc-test.yaml new file mode 100644 index 0000000000000..84e7b649107ee --- /dev/null +++ b/deploy/examples/cluster-on-local-pvc-test.yaml @@ -0,0 +1,181 @@ +################################################################################################################# +# Define the settings for the rook-ceph cluster with common settings for a production cluster on top of bare metal. + +# This example expects three nodes, each with two available disks. Please modify it according to your environment. +# See the documentation for more details on storage settings available. + +# For example, to create the cluster: +# kubectl create -f crds.yaml -f common.yaml -f operator.yaml +# kubectl create -f cluster-on-local-pvc.yaml +################################################################################################################# +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer +--- +kind: PersistentVolume +apiVersion: v1 +metadata: + name: local0-0 +spec: + storageClassName: local-storage + capacity: + storage: 20Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + # PV for mon must be a filesystem volume. + volumeMode: Filesystem + local: + # If you want to use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`. + path: /dev/vdb + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - minikube +--- +kind: PersistentVolume +apiVersion: v1 +metadata: + name: local0-1 +spec: + storageClassName: local-storage + capacity: + storage: 20Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + # PV for mon must be a filesystem volume. + volumeMode: Block + local: + # If you want to use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`. + path: /dev/vdc + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - minikube +--- +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: my-cluster + namespace: rook-ceph # namespace:cluster +spec: + dataDirHostPath: /var/lib/rook + mon: + count: 1 + allowMultiplePerNode: true + volumeClaimTemplate: + spec: + storageClassName: local-storage + resources: + requests: + storage: 20Gi + mgr: + count: 1 + allowMultiplePerNode: true + dashboard: + enabled: true + crashCollector: + disable: true + cephVersion: + image: quay.io/ceph/ceph:v17.2.6 + allowUnsupported: false + skipUpgradeChecks: false + continueUpgradeAfterChecksEvenIfNotHealthy: false + mgr: + count: 1 + modules: + - name: pg_autoscaler + enabled: true + dashboard: + enabled: true + crashCollector: + disable: false + storage: + storageClassDeviceSets: + - name: set1 + count: 1 + portable: false + tuneDeviceClass: true + tuneFastDeviceClass: false + encrypted: false + placement: + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-osd + - rook-ceph-osd-prepare + preparePlacement: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-osd + - key: app + operator: In + values: + - rook-ceph-osd-prepare + topologyKey: kubernetes.io/hostname + resources: + # These are the OSD daemon limits. For OSD prepare limits, see the separate section below for "prepareosd" resources + # limits: + # cpu: "500m" + # memory: "4Gi" + # requests: + # cpu: "500m" + # memory: "4Gi" + volumeClaimTemplates: + - metadata: + name: data + # if you are looking at giving your OSD a different CRUSH device class than the one detected by Ceph + # annotations: + # crushDeviceClass: hybrid + spec: + resources: + requests: + storage: 20Gi + # IMPORTANT: Change the storage class depending on your environment + storageClassName: local-storage + volumeMode: Block + accessModes: + - ReadWriteOnce + # when onlyApplyOSDPlacement is false, will merge both placement.All() and storageClassDeviceSets.Placement + onlyApplyOSDPlacement: false + resources: + # prepareosd: + # limits: + # cpu: "200m" + # memory: "200Mi" + # requests: + # cpu: "200m" + # memory: "200Mi" + priorityClassNames: + mon: system-node-critical + osd: system-node-critical + mgr: system-cluster-critical + disruptionManagement: + managePodBudgets: true + osdMaintenanceTimeout: 30 + pgHealthCheckTimeout: 0