diff --git a/Documentation/Helm-Charts/operator-chart.md b/Documentation/Helm-Charts/operator-chart.md index 352c5498e785..45f930cbf8fa 100644 --- a/Documentation/Helm-Charts/operator-chart.md +++ b/Documentation/Helm-Charts/operator-chart.md @@ -146,6 +146,7 @@ The following table lists the configurable parameters of the rook-operator chart | `logLevel` | Global log level for the operator. Options: `ERROR`, `WARNING`, `INFO`, `DEBUG` | `"INFO"` | | `monitoring.enabled` | Enable monitoring. Requires Prometheus to be pre-installed. Enabling will also create RBAC rules to allow Operator to create ServiceMonitors | `false` | | `nodeSelector` | Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment. | `{}` | +| `obcProvisionerNamePrefix` | Specify the prefix for the OBC provisioner in place of the cluster namespace | `ceph cluster namespace` | | `priorityClassName` | Set the priority class for the rook operator deployment if desired | `nil` | | `pspEnable` | If true, create & use PSP resources | `false` | | `rbacAggregate.enableOBCs` | If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims | `false` | diff --git a/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md b/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md index f95664a3bd41..4e80e6eeb782 100644 --- a/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md +++ b/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md @@ -10,6 +10,11 @@ Rook supports the creation of new buckets and access to existing buckets via two An OBC references a storage class which is created by an administrator. The storage class defines whether the bucket requested is a new bucket or an existing bucket. It also defines the bucket retention policy. Users request a new or existing bucket by creating an OBC which is shown below. The ceph provisioner detects the OBC and creates a new bucket or grants access to an existing bucket, depending the storage class referenced in the OBC. It also generates a Secret which provides credentials to access the bucket, and a ConfigMap which contains the bucket's endpoint. Application pods consume the information in the Secret and ConfigMap to access the bucket. Please note that to make provisioner watch the cluster namespace only you need to set `ROOK_OBC_WATCH_OPERATOR_NAMESPACE` to `true` in the operator manifest, otherwise it watches all namespaces. +The OBC provisioner name found in the storage class by default includes the operator namespace as a prefix. A custom prefix can be applied by the operator setting in the `rook-ceph-operator-config` configmap: `ROOK_OBC_PROVISIONER_NAME_PREFIX`. + +!!! Note + Changing the prefix is not supported on existing clusters. This may impact the function of existing OBCs. + ## Example ### OBC Custom Resource diff --git a/PendingReleaseNotes.md b/PendingReleaseNotes.md index 838182e0c9f7..0b46e4ecb2dc 100644 --- a/PendingReleaseNotes.md +++ b/PendingReleaseNotes.md @@ -16,3 +16,4 @@ read affinity setting in cephCluster CR (CSIDriverOptions section) in [PR](https - Create object stores with shared metadata and data pools. Isolation between object stores is enabled via RADOS namespaces. - The feature support for VolumeSnapshotGroup has been added to the RBD and CephFS CSI driver. - Support for virtual style hosting for s3 buckets in the CephObjectStore. +- Add option to specify prefix for the OBC provisioner. diff --git a/deploy/charts/rook-ceph/templates/configmap.yaml b/deploy/charts/rook-ceph/templates/configmap.yaml index 4ce7b75dc278..60a143010418 100644 --- a/deploy/charts/rook-ceph/templates/configmap.yaml +++ b/deploy/charts/rook-ceph/templates/configmap.yaml @@ -9,6 +9,9 @@ data: ROOK_LOG_LEVEL: {{ .Values.logLevel | quote }} ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: {{ .Values.cephCommandsTimeoutSeconds | quote }} ROOK_OBC_WATCH_OPERATOR_NAMESPACE: {{ .Values.enableOBCWatchOperatorNamespace | quote }} +{{- if .Values.obcProvisionerNamePrefix }} + ROOK_OBC_PROVISIONER_NAME_PREFIX: {{ .Values.obcProvisionerNamePrefix | quote }} +{{- end }} ROOK_CEPH_ALLOW_LOOP_DEVICES: {{ .Values.allowLoopDevices | quote }} ROOK_ENABLE_DISCOVERY_DAEMON: {{ .Values.enableDiscoveryDaemon | quote }} {{- if .Values.discoverDaemonUdev }} diff --git a/deploy/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml index c01b4016dc3d..66c4b1687ec1 100644 --- a/deploy/charts/rook-ceph/values.yaml +++ b/deploy/charts/rook-ceph/values.yaml @@ -616,6 +616,10 @@ imagePullSecrets: # -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used enableOBCWatchOperatorNamespace: true +# -- Specify the prefix for the OBC provisioner in place of the cluster namespace +# @default -- `ceph cluster namespace` +obcProvisionerNamePrefix: + monitoring: # -- Enable monitoring. Requires Prometheus to be pre-installed. # Enabling will also create RBAC rules to allow Operator to create ServiceMonitors diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml index d269206da14f..0a2128edb12e 100644 --- a/deploy/examples/operator-openshift.yaml +++ b/deploy/examples/operator-openshift.yaml @@ -539,6 +539,9 @@ data: # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true" + # Custom prefix value for the OBC provisioner instead of ceph cluster namespace, do not set on existing cluster + # ROOK_OBC_PROVISIONER_NAME_PREFIX: "custom-prefix" + # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster. # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs. ROOK_ENABLE_DISCOVERY_DAEMON: "false" diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml index 7bb91ac1f065..96eb0fc8c57e 100644 --- a/deploy/examples/operator.yaml +++ b/deploy/examples/operator.yaml @@ -481,9 +481,12 @@ data: # (Optional) Retry Period in seconds the LeaderElector clients should wait between tries of actions. Defaults to 26 seconds. # CSI_LEADER_ELECTION_RETRY_PERIOD: "26s" - # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used + # Whether the OBC provisioner should watch on the ceph cluster namespace or not, if not default provisioner value is set ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true" + # Custom prefix value for the OBC provisioner instead of ceph cluster namespace, do not set on existing cluster + # ROOK_OBC_PROVISIONER_NAME_PREFIX: "custom-prefix" + # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster. # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs. ROOK_ENABLE_DISCOVERY_DAEMON: "false" diff --git a/pkg/operator/ceph/object/bucket/util.go b/pkg/operator/ceph/object/bucket/util.go index 32c07308ff6a..56afa0910db8 100644 --- a/pkg/operator/ceph/object/bucket/util.go +++ b/pkg/operator/ceph/object/bucket/util.go @@ -43,7 +43,10 @@ const ( func NewBucketController(cfg *rest.Config, p *Provisioner, data map[string]string) (*provisioner.Provisioner, error) { const allNamespaces = "" - provName := cephObject.GetObjectBucketProvisioner(data, p.clusterInfo.Namespace) + provName, err := cephObject.GetObjectBucketProvisioner(data, p.clusterInfo.Namespace) + if err != nil { + return nil, errors.Wrap(err, "failed to get provisioner name") + } logger.Infof("ceph bucket provisioner launched watching for provisioner %q", provName) return provisioner.NewProvisioner(cfg, provName, p, allNamespaces) diff --git a/pkg/operator/ceph/object/objectstore.go b/pkg/operator/ceph/object/objectstore.go index 722313e12a06..7c65e77f9592 100644 --- a/pkg/operator/ceph/object/objectstore.go +++ b/pkg/operator/ceph/object/objectstore.go @@ -42,6 +42,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + validation "k8s.io/apimachinery/pkg/util/validation" ) const ( @@ -1074,13 +1075,20 @@ func poolName(poolPrefix, poolName string) string { } // GetObjectBucketProvisioner returns the bucket provisioner name appended with operator namespace if OBC is watching on it -func GetObjectBucketProvisioner(data map[string]string, namespace string) string { +func GetObjectBucketProvisioner(data map[string]string, namespace string) (string, error) { provName := bucketProvisionerName obcWatchOnNamespace := k8sutil.GetValue(data, "ROOK_OBC_WATCH_OPERATOR_NAMESPACE", "false") - if strings.EqualFold(obcWatchOnNamespace, "true") { + obcProvisionerNamePrefix := k8sutil.GetValue(data, "ROOK_OBC_PROVISIONER_NAME_PREFIX", "") + if obcProvisionerNamePrefix != "" { + errList := validation.IsDNS1123Label(obcProvisionerNamePrefix) + if len(errList) > 0 { + return "", errors.Errorf("invalid OBC provisioner name prefix %q. %v", obcProvisionerNamePrefix, errList) + } + provName = fmt.Sprintf("%s.%s", obcProvisionerNamePrefix, bucketProvisionerName) + } else if obcWatchOnNamespace == "true" { provName = fmt.Sprintf("%s.%s", namespace, bucketProvisionerName) } - return provName + return provName, nil } // CheckDashboardUser returns true if the dashboard user exists and has the same credentials as the given user, else return false diff --git a/pkg/operator/ceph/object/objectstore_test.go b/pkg/operator/ceph/object/objectstore_test.go index 96e7d63d58db..517560cd4036 100644 --- a/pkg/operator/ceph/object/objectstore_test.go +++ b/pkg/operator/ceph/object/objectstore_test.go @@ -502,17 +502,40 @@ func TestGetObjectBucketProvisioner(t *testing.T) { testNamespace := "test-namespace" t.Setenv(k8sutil.PodNamespaceEnvVar, testNamespace) - t.Run("watch single namespace", func(t *testing.T) { + t.Run("watch ceph cluster namespace", func(t *testing.T) { data := map[string]string{"ROOK_OBC_WATCH_OPERATOR_NAMESPACE": "true"} - bktprovisioner := GetObjectBucketProvisioner(data, testNamespace) + bktprovisioner, err := GetObjectBucketProvisioner(data, testNamespace) assert.Equal(t, fmt.Sprintf("%s.%s", testNamespace, bucketProvisionerName), bktprovisioner) + assert.NoError(t, err) }) t.Run("watch all namespaces", func(t *testing.T) { data := map[string]string{"ROOK_OBC_WATCH_OPERATOR_NAMESPACE": "false"} - bktprovisioner := GetObjectBucketProvisioner(data, testNamespace) + bktprovisioner, err := GetObjectBucketProvisioner(data, testNamespace) assert.Equal(t, bucketProvisionerName, bktprovisioner) + assert.NoError(t, err) + }) + + t.Run("prefix object provisioner", func(t *testing.T) { + data := map[string]string{"ROOK_OBC_PROVISIONER_NAME_PREFIX": "my-prefix"} + bktprovisioner, err := GetObjectBucketProvisioner(data, testNamespace) + assert.Equal(t, "my-prefix."+bucketProvisionerName, bktprovisioner) + assert.NoError(t, err) + }) + + t.Run("watch ceph cluster namespace and prefix object provisioner", func(t *testing.T) { + data := map[string]string{"ROOK_OBC_WATCH_OPERATOR_NAMESPACE": "true", "ROOK_OBC_PROVISIONER_NAME_PREFIX": "my-prefix"} + bktprovisioner, err := GetObjectBucketProvisioner(data, testNamespace) + assert.Equal(t, "my-prefix."+bucketProvisionerName, bktprovisioner) + assert.NoError(t, err) + }) + + t.Run("invalid prefix value for object provisioner", func(t *testing.T) { + data := map[string]string{"ROOK_OBC_PROVISIONER_NAME_PREFIX": "my-prefix."} + _, err := GetObjectBucketProvisioner(data, testNamespace) + assert.Error(t, err) }) + } func TestRGWPGNumVersion(t *testing.T) {