diff --git a/Documentation/Helm-Charts/ceph-cluster-chart.md b/Documentation/Helm-Charts/ceph-cluster-chart.md index 3d7e323f7262..0f17dbd81ef0 100644 --- a/Documentation/Helm-Charts/ceph-cluster-chart.md +++ b/Documentation/Helm-Charts/ceph-cluster-chart.md @@ -69,6 +69,7 @@ The following table lists the configurable parameters of the rook-operator chart | `cephObjectStores` | A list of CephObjectStore configurations to deploy | See [below](#ceph-object-stores) | | `clusterName` | The metadata.name of the CephCluster CR | The same as the namespace | | `configOverride` | Cluster ceph.conf override | `nil` | +| `csiDriverNamePrefix` | CSI driver name prefix for cephfs, rbd and nfs. | `namespace name where rook-ceph operator is deployed` | | `ingress.dashboard` | Enable an ingress for the ceph-dashboard | `{}` | | `kubeVersion` | Optional override of the target kubernetes version | `nil` | | `monitoring.createPrometheusRules` | Whether to create the Prometheus rules for Ceph alerts | `false` | diff --git a/Documentation/Helm-Charts/operator-chart.md b/Documentation/Helm-Charts/operator-chart.md index 5c81aa879306..c9f9439f8810 100644 --- a/Documentation/Helm-Charts/operator-chart.md +++ b/Documentation/Helm-Charts/operator-chart.md @@ -70,6 +70,7 @@ The following table lists the configurable parameters of the rook-operator chart | `csi.csiCephFSPluginVolume` | The volume of the CephCSI CephFS plugin DaemonSet | `nil` | | `csi.csiCephFSPluginVolumeMount` | The volume mounts of the CephCSI CephFS plugin DaemonSet | `nil` | | `csi.csiCephFSProvisionerResource` | CEPH CSI CephFS provisioner resource requirement list | see values.yaml | +| `csi.csiDriverNamePrefix` | CSI driver name prefix for cephfs, rbd and nfs. | `namespace name where rook-ceph operator is deployed` | | `csi.csiLeaderElectionLeaseDuration` | Duration in seconds that non-leader candidates will wait to force acquire leadership. | `137s` | | `csi.csiLeaderElectionRenewDeadline` | Deadline in seconds that the acting leader will retry refreshing leadership before giving up. | `107s` | | `csi.csiLeaderElectionRetryPeriod` | Retry period in seconds the LeaderElector clients should wait between tries of actions. | `26s` | diff --git a/Documentation/Storage-Configuration/Advanced/ceph-configuration.md b/Documentation/Storage-Configuration/Advanced/ceph-configuration.md index 92a0db0a2cd6..86a0d4048d9c 100644 --- a/Documentation/Storage-Configuration/Advanced/ceph-configuration.md +++ b/Documentation/Storage-Configuration/Advanced/ceph-configuration.md @@ -45,7 +45,6 @@ sed -i.bak \ -e "s/\(.*\):.*# namespace:cluster/\1: $ROOK_CLUSTER_NAMESPACE # namespace:cluster/g" \ -e "s/\(.*serviceaccount\):.*:\(.*\) # serviceaccount:namespace:operator/\1:$ROOK_OPERATOR_NAMESPACE:\2 # serviceaccount:namespace:operator/g" \ -e "s/\(.*serviceaccount\):.*:\(.*\) # serviceaccount:namespace:cluster/\1:$ROOK_CLUSTER_NAMESPACE:\2 # serviceaccount:namespace:cluster/g" \ - -e "s/\(.*\): [-_A-Za-z0-9]*\.\(.*\) # driver:namespace:operator/\1: $ROOK_OPERATOR_NAMESPACE.\2 # driver:namespace:operator/g" \ -e "s/\(.*\): [-_A-Za-z0-9]*\.\(.*\) # driver:namespace:cluster/\1: $ROOK_CLUSTER_NAMESPACE.\2 # driver:namespace:cluster/g" \ common.yaml operator.yaml cluster.yaml # add other files or change these as desired for your config @@ -53,6 +52,10 @@ sed -i.bak \ kubectl apply -f common.yaml -f operator.yaml -f cluster.yaml # add other files as desired for yourconfig ``` +Also see the CSI driver +[documentation](../Ceph-CSI/ceph-csi-drivers.md#Configure-CSI-Drivers-in-non-default-namespace) +to update the csi provisioner names in the storageclass and volumesnapshotclass. + ## Deploying a second cluster If you wish to create a new CephCluster in a separate namespace, you can easily do so diff --git a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md index fd8a79f25014..89b24a4bd14f 100644 --- a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md +++ b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md @@ -35,6 +35,59 @@ example, if the Rook operator is running in the namespace `my-namespace` the provisioner value should be `my-namespace.rbd.csi.ceph.com`. The same provisioner name must be set in both the storageclass and snapshotclass. +To find the provisioner name in the example storageclasses and +volumesnapshotclass, search for: `# csi-provisioner-name` + +### Configure custom Driver name prefix for CSI Drivers + +To use a custom prefix for the CSI drivers instead of the namespace prefix, set +the `CSI_DRIVER_NAME_PREFIX` environment variable in the operator configmap. +For instance, to use the prefix `my-prefix` for the CSI drivers, set +the following in the operator configmap: + +```console +kubectl patch cm rook-ceph-operator-config -n rook-ceph -p $'data:\n "CSI_DRIVER_NAME_PREFIX": "my-prefix"' +``` + +Once the configmap is updated, the CSI drivers will be deployed with the +`my-prefix` prefix. The same prefix must be set in both the storageclass and +snapshotclass. For example, to use the prefix `my-prefix` for the +CSI drivers, update the provisioner in the storageclass: + +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-block-sc +provisioner: my-prefix.rbd.csi.ceph.com +... +``` + +The same prefix must be set in the volumesnapshotclass as well: + +```yaml +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: rook-ceph-block-vsc +driver: my-prefix.rbd.csi.ceph.com +... +``` + +When the prefix is set, the driver names will be: + +* RBD: `my-prefix.rbd.csi.ceph.com` +* CephFS: `my-prefix.cephfs.csi.ceph.com` +* NFS: `my-prefix.nfs.csi.ceph.com` + +!!! note + Please be careful when setting the `CSI_DRIVER_NAME_PREFIX` + environment variable. It should be done only in fresh deployments because + changing the prefix in an existing cluster will result in unexpected behavior. + +To find the provisioner name in the example storageclasses and +volumesnapshotclass, search for: `# csi-provisioner-name` + ## Liveness Sidecar All CSI pods are deployed with a sidecar container that provides a Prometheus @@ -116,7 +169,6 @@ Refer to the [ephemeral-doc](https://kubernetes.io/docs/concepts/storage/ephemer See example manifests for an [RBD ephemeral volume](https://github.com/rook/rook/tree/master/deploy/examples/csi/rbd/pod-ephemeral.yaml) and a [CephFS ephemeral volume](https://github.com/rook/rook/tree/master/deploy/examples/csi/cephfs/pod-ephemeral.yaml). - ## CSI-Addons Controller The CSI-Addons Controller handles requests from users. Users create a CR diff --git a/deploy/charts/rook-ceph-cluster/templates/cephblockpool.yaml b/deploy/charts/rook-ceph-cluster/templates/cephblockpool.yaml index 0efb69cdffb8..23008111d8a0 100644 --- a/deploy/charts/rook-ceph-cluster/templates/cephblockpool.yaml +++ b/deploy/charts/rook-ceph-cluster/templates/cephblockpool.yaml @@ -16,7 +16,11 @@ metadata: name: {{ $blockpool.storageClass.name }} annotations: storageclass.kubernetes.io/is-default-class: "{{ if default false $blockpool.storageClass.isDefault }}true{{ else }}false{{ end }}" +{{- if $root.Values.csiDriverNamePrefix }} +provisioner: {{ $root.Values.csiDriverNamePrefix }}.rbd.csi.ceph.com +{{- else }} provisioner: {{ $root.Values.operatorNamespace }}.rbd.csi.ceph.com +{{- end }} parameters: pool: {{ $blockpool.name }} clusterID: {{ $root.Release.Namespace }} diff --git a/deploy/charts/rook-ceph-cluster/templates/cephecblockpool.yaml b/deploy/charts/rook-ceph-cluster/templates/cephecblockpool.yaml index 97ae8d1d48a6..85581f5cd632 100644 --- a/deploy/charts/rook-ceph-cluster/templates/cephecblockpool.yaml +++ b/deploy/charts/rook-ceph-cluster/templates/cephecblockpool.yaml @@ -16,7 +16,11 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: {{ $cephEcStorage.name }} -provisioner: {{ $cephEcStorage.provisioner }} +{{- if $root.Values.csiDriverNamePrefix }} +provisioner: {{ $root.Values.csiDriverNamePrefix }}.rbd.csi.ceph.com +{{- else }} +provisioner: {{ $root.Values.operatorNamespace }}.rbd.csi.ceph.com +{{- end }} parameters: clusterID: {{ $cephEcStorage.parameters.clusterID }} dataPool: {{ $cephEcStorage.parameters.dataPool }} diff --git a/deploy/charts/rook-ceph-cluster/templates/cephfilesystem.yaml b/deploy/charts/rook-ceph-cluster/templates/cephfilesystem.yaml index adbf45ddada0..35c6e72003d6 100644 --- a/deploy/charts/rook-ceph-cluster/templates/cephfilesystem.yaml +++ b/deploy/charts/rook-ceph-cluster/templates/cephfilesystem.yaml @@ -35,7 +35,11 @@ metadata: name: {{ $filesystem.storageClass.name }} annotations: storageclass.kubernetes.io/is-default-class: "{{ if default false $filesystem.storageClass.isDefault }}true{{ else }}false{{ end }}" +{{- if $root.Values.csiDriverNamePrefix }} +provisioner: {{ $root.Values.csiDriverNamePrefix }}.cephfs.csi.ceph.com +{{- else }} provisioner: {{ $root.Values.operatorNamespace }}.cephfs.csi.ceph.com +{{- end }} parameters: fsName: {{ $filesystem.name }} pool: {{ $filesystem.name }}-{{ default "data0" $filesystem.storageClass.pool }} diff --git a/deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml b/deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml index 5f15c94005b6..45b1ba3906ea 100644 --- a/deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml +++ b/deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml @@ -1,3 +1,4 @@ +{{- $root := . -}} {{- $filesystemvsc := .Values.cephFileSystemVolumeSnapshotClass -}} {{- $blockpoolvsc := .Values.cephBlockPoolsVolumeSnapshotClass -}} @@ -16,7 +17,11 @@ metadata: {{- if $filesystemvsc.annotations }} {{ toYaml $filesystemvsc.annotations | indent 4 }} {{- end }} -driver: {{ .Values.operatorNamespace }}.cephfs.csi.ceph.com +{{- if $root.Values.csiDriverNamePrefix }} +driver: {{ $root.Values.csiDriverNamePrefix }}.cephfs.csi.ceph.com +{{- else }} +driver: {{ $root.Values.operatorNamespace }}.cephfs.csi.ceph.com +{{- end }} parameters: clusterID: {{ .Release.Namespace }} csi.storage.k8s.io/snapshotter-secret-name: rook-csi-cephfs-provisioner @@ -42,7 +47,11 @@ metadata: {{- if $blockpoolvsc.annotations }} {{ toYaml $blockpoolvsc.annotations | indent 4 }} {{- end }} -driver: {{ .Values.operatorNamespace }}.rbd.csi.ceph.com +{{- if $root.Values.csiDriverNamePrefix }} +driver: {{ $root.Values.csiDriverNamePrefix }}.rbd.csi.ceph.com +{{- else }} +driver: {{ $root.Values.operatorNamespace }}.rbd.csi.ceph.com +{{- end }} parameters: clusterID: {{ .Release.Namespace }} csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner diff --git a/deploy/charts/rook-ceph-cluster/values.yaml b/deploy/charts/rook-ceph-cluster/values.yaml index addf9ddb1579..3953b65b447c 100644 --- a/deploy/charts/rook-ceph-cluster/values.yaml +++ b/deploy/charts/rook-ceph-cluster/values.yaml @@ -647,8 +647,6 @@ cephObjectStores: # if cephECBlockPools are uncommented you must remove the comments of cephEcStorageClass as well #cephECStorageClass: # name: rook-ceph-block -# # Change "rook-ceph" provisioner prefix to match the operator namespace if needed -# provisioner: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator # parameters: # # clusterID is the namespace where the rook cluster is running # # If you change this namespace, also change the namespace below where the secret namespaces are defined @@ -687,3 +685,7 @@ cephObjectStores: # imageFeatures: layering # allowVolumeExpansion: true # reclaimPolicy: Delete + +# -- CSI driver name prefix for cephfs, rbd and nfs. +# @default -- `namespace name where rook-ceph operator is deployed` +csiDriverNamePrefix: diff --git a/deploy/charts/rook-ceph/templates/configmap.yaml b/deploy/charts/rook-ceph/templates/configmap.yaml index a5e37e98489a..ee7e4814250b 100644 --- a/deploy/charts/rook-ceph/templates/configmap.yaml +++ b/deploy/charts/rook-ceph/templates/configmap.yaml @@ -25,6 +25,9 @@ data: CSI_ENABLE_OMAP_GENERATOR: {{ .Values.csi.enableOMAPGenerator | quote }} CSI_ENABLE_HOST_NETWORK: {{ .Values.csi.enableCSIHostNetwork | quote }} CSI_ENABLE_METADATA: {{ .Values.csi.enableMetadata | quote }} +{{- if .Values.csi.csiDriverNamePrefix }} + CSI_DRIVER_NAME_PREFIX: {{ .Values.csi.csiDriverNamePrefix | quote }} +{{- end }} {{- if .Values.csi.pluginPriorityClassName }} CSI_PLUGIN_PRIORITY_CLASSNAME: {{ .Values.csi.pluginPriorityClassName | quote }} {{- end }} diff --git a/deploy/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml index 770d9c4580c1..4ca32198bf72 100644 --- a/deploy/charts/rook-ceph/values.yaml +++ b/deploy/charts/rook-ceph/values.yaml @@ -147,6 +147,10 @@ csi: # @default -- `0` sidecarLogLevel: + # -- CSI driver name prefix for cephfs, rbd and nfs. + # @default -- `namespace name where rook-ceph operator is deployed` + csiDriverNamePrefix: + # -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate # @default -- `RollingUpdate` rbdPluginUpdateStrategy: diff --git a/deploy/examples/csi/cephfs/snapshotclass.yaml b/deploy/examples/csi/cephfs/snapshotclass.yaml index 5a8cdcece7a2..bc1905accc77 100644 --- a/deploy/examples/csi/cephfs/snapshotclass.yaml +++ b/deploy/examples/csi/cephfs/snapshotclass.yaml @@ -3,7 +3,7 @@ apiVersion: snapshot.storage.k8s.io/v1 kind: VolumeSnapshotClass metadata: name: csi-cephfsplugin-snapclass -driver: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator +driver: rook-ceph.cephfs.csi.ceph.com # csi-provisioner-name parameters: # Specify a string that identifies your cluster. Ceph CSI supports any # unique string. When Ceph CSI is deployed by Rook use the Rook namespace, diff --git a/deploy/examples/csi/cephfs/storageclass-ec.yaml b/deploy/examples/csi/cephfs/storageclass-ec.yaml index 1a3559a22726..62e658bf469b 100644 --- a/deploy/examples/csi/cephfs/storageclass-ec.yaml +++ b/deploy/examples/csi/cephfs/storageclass-ec.yaml @@ -2,8 +2,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: rook-cephfs -# Change "rook-ceph" provisioner prefix to match the operator namespace if needed -provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator +provisioner: rook-ceph.cephfs.csi.ceph.com # csi-provisioner-name parameters: # clusterID is the namespace where the rook cluster is running # If you change this namespace, also change the namespace below where the secret namespaces are defined diff --git a/deploy/examples/csi/cephfs/storageclass.yaml b/deploy/examples/csi/cephfs/storageclass.yaml index c9f599a8366b..0d0bea832a4e 100644 --- a/deploy/examples/csi/cephfs/storageclass.yaml +++ b/deploy/examples/csi/cephfs/storageclass.yaml @@ -2,8 +2,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: rook-cephfs -# Change "rook-ceph" provisioner prefix to match the operator namespace if needed -provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator +provisioner: rook-ceph.cephfs.csi.ceph.com # csi-provisioner-name parameters: # clusterID is the namespace where the rook cluster is running # If you change this namespace, also change the namespace below where the secret namespaces are defined diff --git a/deploy/examples/csi/nfs/snapshotclass.yaml b/deploy/examples/csi/nfs/snapshotclass.yaml index b11cefd35759..82cd870bffb6 100644 --- a/deploy/examples/csi/nfs/snapshotclass.yaml +++ b/deploy/examples/csi/nfs/snapshotclass.yaml @@ -3,7 +3,7 @@ apiVersion: snapshot.storage.k8s.io/v1 kind: VolumeSnapshotClass metadata: name: csi-nfsplugin-snapclass -driver: rook-ceph.nfs.csi.ceph.com # driver:namespace:operator +driver: rook-ceph.nfs.csi.ceph.com # csi-provisioner-name parameters: # Specify a string that identifies your cluster. Ceph CSI supports any # unique string. When Ceph CSI is deployed by Rook use the Rook namespace, diff --git a/deploy/examples/csi/nfs/storageclass.yaml b/deploy/examples/csi/nfs/storageclass.yaml index 85e60f75458b..a7fa31ee33d1 100644 --- a/deploy/examples/csi/nfs/storageclass.yaml +++ b/deploy/examples/csi/nfs/storageclass.yaml @@ -2,8 +2,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: rook-nfs -# Change "rook-ceph" provisioner prefix to match the operator namespace if needed -provisioner: rook-ceph.nfs.csi.ceph.com +provisioner: rook-ceph.nfs.csi.ceph.com # csi-provisioner-name parameters: # nfsCluster is the name of the NFS cluster as managed by Ceph (sometimes called the NFS cluster ID). # With Rook, this should get the name of the CephNFS resource. diff --git a/deploy/examples/csi/rbd/snapshotclass.yaml b/deploy/examples/csi/rbd/snapshotclass.yaml index 79ed5522d0ab..11d489769d7d 100644 --- a/deploy/examples/csi/rbd/snapshotclass.yaml +++ b/deploy/examples/csi/rbd/snapshotclass.yaml @@ -3,7 +3,7 @@ apiVersion: snapshot.storage.k8s.io/v1 kind: VolumeSnapshotClass metadata: name: csi-rbdplugin-snapclass -driver: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator +driver: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name parameters: # Specify a string that identifies your cluster. Ceph CSI supports any # unique string. When Ceph CSI is deployed by Rook use the Rook namespace, diff --git a/deploy/examples/csi/rbd/storageclass-ec.yaml b/deploy/examples/csi/rbd/storageclass-ec.yaml index 6804b0b237b3..e611f4a7a506 100644 --- a/deploy/examples/csi/rbd/storageclass-ec.yaml +++ b/deploy/examples/csi/rbd/storageclass-ec.yaml @@ -30,8 +30,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: rook-ceph-block -# Change "rook-ceph" provisioner prefix to match the operator namespace if needed -provisioner: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator +provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name parameters: # clusterID is the namespace where the rook cluster is running # If you change this namespace, also change the namespace below where the secret namespaces are defined diff --git a/deploy/examples/csi/rbd/storageclass-test.yaml b/deploy/examples/csi/rbd/storageclass-test.yaml index f5cf3b45c9d1..4ed16cec812d 100644 --- a/deploy/examples/csi/rbd/storageclass-test.yaml +++ b/deploy/examples/csi/rbd/storageclass-test.yaml @@ -18,8 +18,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: rook-ceph-block -# Change "rook-ceph" provisioner prefix to match the operator namespace if needed -provisioner: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator +provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name parameters: # clusterID is the namespace where the rook cluster is running # If you change this namespace, also change the namespace below where the secret namespaces are defined diff --git a/deploy/examples/csi/rbd/storageclass.yaml b/deploy/examples/csi/rbd/storageclass.yaml index c1218982c143..09672960cb65 100644 --- a/deploy/examples/csi/rbd/storageclass.yaml +++ b/deploy/examples/csi/rbd/storageclass.yaml @@ -18,8 +18,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: rook-ceph-block -# Change "rook-ceph" provisioner prefix to match the operator namespace if needed -provisioner: rook-ceph.rbd.csi.ceph.com +provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name parameters: # clusterID is the namespace where the rook cluster is running # If you change this namespace, also change the namespace below where the secret namespaces are defined @@ -47,14 +46,14 @@ parameters: # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options # unmapOptions: force - # (optional) Set it to true to encrypt each volume with encryption keys - # from a key management system (KMS) - # encrypted: "true" + # (optional) Set it to true to encrypt each volume with encryption keys + # from a key management system (KMS) + # encrypted: "true" - # (optional) Use external key management system (KMS) for encryption key by - # specifying a unique ID matching a KMS ConfigMap. The ID is only used for - # correlation to configmap entry. - # encryptionKMSID: + # (optional) Use external key management system (KMS) for encryption key by + # specifying a unique ID matching a KMS ConfigMap. The ID is only used for + # correlation to configmap entry. + # encryptionKMSID: # RBD image format. Defaults to "2". imageFormat: "2" diff --git a/deploy/examples/import-external-cluster.sh b/deploy/examples/import-external-cluster.sh index c5faa06069a2..241f53333244 100644 --- a/deploy/examples/import-external-cluster.sh +++ b/deploy/examples/import-external-cluster.sh @@ -22,8 +22,9 @@ RBD_STORAGE_CLASS_NAME=ceph-rbd CEPHFS_STORAGE_CLASS_NAME=cephfs ROOK_EXTERNAL_MONITOR_SECRET=mon-secret OPERATOR_NAMESPACE=rook-ceph # default set to rook-ceph -RBD_PROVISIONER=$OPERATOR_NAMESPACE".rbd.csi.ceph.com" # driver:namespace:operator -CEPHFS_PROVISIONER=$OPERATOR_NAMESPACE".cephfs.csi.ceph.com" # driver:namespace:operator +CSI_DRIVER_NAME_PREFIX=${CSI_DRIVER_NAME_PREFIX:-$OPERATOR_NAMESPACE} +RBD_PROVISIONER=$CSI_DRIVER_NAME_PREFIX".rbd.csi.ceph.com" # csi-provisioner-name +CEPHFS_PROVISIONER=$CSI_DRIVER_NAME_PREFIX=".cephfs.csi.ceph.com" # csi-provisioner-name CLUSTER_ID_RBD=$NAMESPACE CLUSTER_ID_CEPHFS=$NAMESPACE : "${ROOK_EXTERNAL_ADMIN_SECRET:=admin-secret}" diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml index d19a6d83b47c..35611ed2c896 100644 --- a/deploy/examples/operator-openshift.yaml +++ b/deploy/examples/operator-openshift.yaml @@ -614,6 +614,12 @@ data: # (Optional) Retry period in seconds the LeaderElector clients should wait between tries of actions. Defaults to 26 seconds. # CSI_LEADER_ELECTION_RETRY_PERIOD: "26s" + # csi driver name prefix for cephfs, rbd and nfs. if not specified, default + # will be the namespace name where rook-ceph operator is deployed. + # search for `# csi-provisioner-name` in the storageclass and + # volumesnashotclass and update the name accordingly. + # CSI_DRIVER_NAME_PREFIX: "rook-ceph" + # Rook Discover toleration. Will tolerate all taints with all keys. # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format. # DISCOVER_TOLERATIONS: | diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml index 02671da35088..a41ffe5ef994 100644 --- a/deploy/examples/operator.yaml +++ b/deploy/examples/operator.yaml @@ -61,6 +61,12 @@ data: # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity. # CSI_SIDECAR_LOG_LEVEL: "0" + # csi driver name prefix for cephfs, rbd and nfs. if not specified, default + # will be the namespace name where rook-ceph operator is deployed. + # search for `# csi-provisioner-name` in the storageclass and + # volumesnashotclass and update the name accordingly. + # CSI_DRIVER_NAME_PREFIX: "rook-ceph" + # Set replicas for csi provisioner deployment. CSI_PROVISIONER_REPLICAS: "2" diff --git a/pkg/operator/ceph/csi/csi.go b/pkg/operator/ceph/csi/csi.go index f4aba9cdd047..5415ed626a14 100644 --- a/pkg/operator/ceph/csi/csi.go +++ b/pkg/operator/ceph/csi/csi.go @@ -320,5 +320,8 @@ func (r *ReconcileCSI) setParams(ver *version.Info) error { if strings.EqualFold(k8sutil.GetValue(r.opConfig.Parameters, "CSI_NFS_ATTACH_REQUIRED", "true"), "false") { CSIParam.NFSAttachRequired = false } + + CSIParam.DriverNamePrefix = k8sutil.GetValue(r.opConfig.Parameters, "CSI_DRIVER_NAME_PREFIX", r.opConfig.OperatorNamespace) + return nil } diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go index 9190f031f7e8..f4ef7986598f 100644 --- a/pkg/operator/ceph/csi/spec.go +++ b/pkg/operator/ceph/csi/spec.go @@ -20,6 +20,7 @@ import ( "context" _ "embed" "fmt" + "strings" "time" "github.com/rook/rook/pkg/operator/ceph/cluster/telemetry" @@ -36,6 +37,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/kubernetes" cephcsi "github.com/ceph/ceph-csi/api/deploy/kubernetes" ) @@ -263,11 +265,17 @@ const ( csiCephFSProvisioner = "csi-cephfsplugin-provisioner" csiNFSProvisioner = "csi-nfsplugin-provisioner" + // cephcsi container names + csiRBDContainerName = "csi-rbdplugin" + csiCephFSContainerName = "csi-cephfsplugin" + csiNFSContainerName = "csi-nfsplugin" + RBDDriverShortName = "rbd" CephFSDriverShortName = "cephfs" NFSDriverShortName = "nfs" rbdDriverSuffix = "rbd.csi.ceph.com" cephFSDriverSuffix = "cephfs.csi.ceph.com" + nfsDriverSuffix = "nfs.csi.ceph.com" ) func CSIEnabled() bool { @@ -307,11 +315,25 @@ func (r *ReconcileCSI) startDrivers(ver *version.Info, ownerInfo *k8sutil.OwnerI Namespace: r.opConfig.OperatorNamespace, } - tp.DriverNamePrefix = fmt.Sprintf("%s.", r.opConfig.OperatorNamespace) + if strings.HasSuffix(tp.DriverNamePrefix, ".") { + // As operator is adding a dot at the end of the prefix, we should not + // allow the user to add a dot at the end of the prefix. as it will + // result in two dots at the end of the prefix. which cases the csi + // driver name creation failure + return errors.Errorf("driver name prefix %q should not end with a dot", tp.DriverNamePrefix) + } + + err = validateCSIDriverNamePrefix(r.opManagerContext, r.context.Clientset, r.opConfig.OperatorNamespace, tp.DriverNamePrefix) + if err != nil { + return err + } + // Add a dot at the end of the prefix for having the driver name prefix + // with format . + tp.DriverNamePrefix = fmt.Sprintf("%s.", tp.DriverNamePrefix) CephFSDriverName = tp.DriverNamePrefix + cephFSDriverSuffix RBDDriverName = tp.DriverNamePrefix + rbdDriverSuffix - NFSDriverName = tp.DriverNamePrefix + "nfs.csi.ceph.com" + NFSDriverName = tp.DriverNamePrefix + nfsDriverSuffix tp.Param.MountCustomCephConf = CustomCSICephConfigExists @@ -935,3 +957,83 @@ func GenerateNetNamespaceFilePath(ctx context.Context, client client.Client, clu func generateNetNamespaceFilePath(kubeletDirPath, driverFullName, clusterNamespace string) string { return fmt.Sprintf("%s/plugins/%s/%s.net.ns", kubeletDirPath, driverFullName, clusterNamespace) } + +func validateCSIDriverNamePrefix(ctx context.Context, clientset kubernetes.Interface, namespace, driverNamePrefix string) error { + if EnableRBD { + rbdDriverNamePrefix, err := getCSIDriverNamePrefixFromDeployment(ctx, clientset, namespace, csiRBDProvisioner, "csi-rbdplugin") + if err != nil { + return err + } + if rbdDriverNamePrefix != "" { + if rbdDriverNamePrefix != driverNamePrefix { + return errors.Errorf("rbd driver already exists with prefix %q, cannot use prefix %q", rbdDriverNamePrefix, driverNamePrefix) + } + return nil + } + } + + if EnableCephFS { + cephFSDriverNamePrefix, err := getCSIDriverNamePrefixFromDeployment(ctx, clientset, namespace, csiCephFSProvisioner, "csi-cephfsplugin") + if err != nil { + return err + } + if cephFSDriverNamePrefix != "" { + if cephFSDriverNamePrefix != driverNamePrefix { + return errors.Errorf("cephFS driver already exists with prefix %q, cannot use prefix %q", cephFSDriverNamePrefix, driverNamePrefix) + } + return nil + + } + } + + if EnableNFS { + nfsDriverNamePrefix, err := getCSIDriverNamePrefixFromDeployment(ctx, clientset, namespace, csiNFSProvisioner, "csi-nfsplugin") + if err != nil { + return err + } + if nfsDriverNamePrefix != "" { + if nfsDriverNamePrefix != driverNamePrefix { + return errors.Errorf("nfs driver already exists with prefix %q, cannot use prefix %q", nfsDriverNamePrefix, driverNamePrefix) + } + return nil + } + } + + return nil +} + +func getCSIDriverNamePrefixFromDeployment(ctx context.Context, clientset kubernetes.Interface, namespace, deploymentName, containerName string) (string, error) { + deployment, err := clientset.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) + if kerrors.IsNotFound(err) { + return "", nil + } + if err != nil { + return "", errors.Wrapf(err, "failed to get deployment %q", deploymentName) + } + + for _, container := range deployment.Spec.Template.Spec.Containers { + if container.Name == containerName { + for _, arg := range container.Args { + if prefix, ok := getPrefixFromArg(arg); ok { + return prefix, nil + } + } + } + } + + return "", errors.Errorf("failed to get CSI driver name from deployment %q", deploymentName) +} + +func getPrefixFromArg(arg string) (string, bool) { + if strings.Contains(arg, "--drivername=") { + driverName := strings.Split(arg, "=")[1] + + for _, suffix := range []string{rbdDriverSuffix, cephFSDriverSuffix, nfsDriverSuffix} { + // Add a dot as we are adding it to the Prefix + if prefix, ok := strings.CutSuffix(driverName, "."+suffix); ok { + return prefix, true + } + } + } + return "", false +} diff --git a/pkg/operator/ceph/csi/spec_test.go b/pkg/operator/ceph/csi/spec_test.go index 9f911785ddfa..5e75fdfbf4d7 100644 --- a/pkg/operator/ceph/csi/spec_test.go +++ b/pkg/operator/ceph/csi/spec_test.go @@ -29,9 +29,11 @@ import ( "github.com/rook/rook/pkg/operator/k8sutil" testop "github.com/rook/rook/pkg/operator/test" "github.com/stretchr/testify/assert" + apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + kfake "k8s.io/client-go/kubernetes/fake" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -129,3 +131,91 @@ func TestGenerateNetNamespaceFilePath(t *testing.T) { assert.Equal(t, "/foo/plugins/rook-ceph.cephfs.csi.ceph.com/rook-ceph.net.ns", netNsFilePath) }) } + +func Test_getCSIDriverNamePrefixFromDeployment(t *testing.T) { + namespace := "test" + deployment := func(name, containerName, drivernameSuffix string) *apps.Deployment { + return &apps.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Spec: apps.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: containerName, + Args: []string{ + "--drivername=test-prefix." + drivernameSuffix, + }, + }, + }, + }, + }, + }, + } + } + clientset := kfake.NewSimpleClientset() + + ctx := context.TODO() + csidrivers := []struct { + testCaseName string + deploymentName string + containerName string + driverNameSuffix string + expectedPrefix string + }{ + { + "get csi driver name prefix for rbd when deployment exists", + csiRBDProvisioner, + csiRBDContainerName, + rbdDriverSuffix, + "test-prefix", + }, + { + "get csi driver name prefix for rbd when deployment does not exist", + "", + "csi-rbdplugin", + "", + "", + }, + { + "get csi driver name prefix for cephfs when deployment exists", + csiCephFSProvisioner, + csiCephFSContainerName, + cephFSDriverSuffix, + "test-prefix", + }, + { + "get csi driver name prefix for cephfs when deployment does not exist", + "", + "csi-cephfsplugin", + "", + "", + }, + { + "get csi driver name prefix for nfs when deployment exists", + csiNFSProvisioner, + csiNFSContainerName, + nfsDriverSuffix, + "test-prefix", + }, + { + "get csi driver name prefix for nfs when deployment does not exist", + "", + "csi-nfsplugin", + "", + "", + }, + } + + for _, c := range csidrivers { + t.Run(c.testCaseName, func(t *testing.T) { + if c.deploymentName != "" { + _, err := clientset.AppsV1().Deployments(namespace).Create(ctx, deployment(c.deploymentName, c.containerName, c.driverNameSuffix), metav1.CreateOptions{}) + assert.NoError(t, err) + } + prefix, err := getCSIDriverNamePrefixFromDeployment(ctx, clientset, namespace, c.deploymentName, c.containerName) + assert.NoError(t, err) + assert.Equal(t, c.expectedPrefix, prefix) + }) + } +} diff --git a/tests/framework/installer/ceph_settings.go b/tests/framework/installer/ceph_settings.go index 1b1a27c7db29..41d1d01dcb76 100644 --- a/tests/framework/installer/ceph_settings.go +++ b/tests/framework/installer/ceph_settings.go @@ -110,8 +110,8 @@ func replaceNamespaces(name, manifest, operatorNamespace, clusterNamespace strin manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-csi-cephfs-provisioner-sa # serviceaccount:namespace:operator", operatorNamespace+":rook-csi-cephfs-provisioner-sa") // CSI Drivers - manifest = strings.ReplaceAll(manifest, "rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator", operatorNamespace+".cephfs.csi.ceph.com") - manifest = strings.ReplaceAll(manifest, "rook-ceph.rbd.csi.ceph.com # driver:namespace:operator", operatorNamespace+".rbd.csi.ceph.com") + manifest = strings.ReplaceAll(manifest, "rook-ceph.cephfs.csi.ceph.com # csi-provisioner-name", operatorNamespace+".cephfs.csi.ceph.com") + manifest = strings.ReplaceAll(manifest, "rook-ceph.rbd.csi.ceph.com # csi-provisioner-name", operatorNamespace+".rbd.csi.ceph.com") // Bucket storage class manifest = strings.ReplaceAll(manifest, "rook-ceph.ceph.rook.io/bucket # driver:namespace:cluster", clusterNamespace+".ceph.rook.io/bucket") diff --git a/tests/scripts/create-dev-cluster.sh b/tests/scripts/create-dev-cluster.sh index 674acf4b7f51..3809c84b3ea9 100755 --- a/tests/scripts/create-dev-cluster.sh +++ b/tests/scripts/create-dev-cluster.sh @@ -35,7 +35,7 @@ update_namespaces() { -e "s/\(.*\):.*# namespace:cluster/\1: $ROOK_CLUSTER_NS # namespace:cluster/g" \ -e "s/\(.*serviceaccount\):.*:\(.*\) # serviceaccount:namespace:operator/\1:$ROOK_OPERATOR_NS:\2 # serviceaccount:namespace:operator/g" \ -e "s/\(.*serviceaccount\):.*:\(.*\) # serviceaccount:namespace:cluster/\1:$ROOK_CLUSTER_NS:\2 # serviceaccount:namespace:cluster/g" \ - -e "s/\(.*\): [-_A-Za-z0-9]*\.\(.*\) # driver:namespace:operator/\1: $ROOK_OPERATOR_NS.\2 # driver:namespace:operator/g" \ + -e "s/\(.*\): [-_A-Za-z0-9]*\.\(.*\) # csi-provisioner-name/\1: $ROOK_OPERATOR_NS.\2 # csi-provisioner-name/g" \ -e "s/\(.*\): [-_A-Za-z0-9]*\.\(.*\) # driver:namespace:cluster/\1: $ROOK_CLUSTER_NS.\2 # driver:namespace:cluster/g" \ "$file" done