From c78bd1cbc61b885ae4a2385ec6432b121dd21f1c Mon Sep 17 00:00:00 2001 From: subhamkrai Date: Fri, 22 Sep 2023 16:04:30 +0530 Subject: [PATCH] external: update healthchecker caps for rbd command when creating networkFence CR, it requires IP's to block which we get from running `rbd status ...` command. But, the client.healthchecker user didn't had the right caps to run hence it was giving error. Now, adding the required caps `profile rbd-read-only` to osd so that rbd command can be executed. Signed-off-by: subhamkrai --- .../CRDs/Cluster/external-cluster.md | 70 ++++++++-------- ...create-external-cluster-resources-tests.py | 3 +- .../create-external-cluster-resources.py | 81 ++++++++++++------- 3 files changed, 92 insertions(+), 62 deletions(-) diff --git a/Documentation/CRDs/Cluster/external-cluster.md b/Documentation/CRDs/Cluster/external-cluster.md index a2e97ae3878b..c53e988a037b 100644 --- a/Documentation/CRDs/Cluster/external-cluster.md +++ b/Documentation/CRDs/Cluster/external-cluster.md @@ -16,8 +16,8 @@ In external mode, Rook will provide the configuration for the CSI driver and oth Create the desired types of storage in the provider Ceph cluster: -- [RBD pools](https://docs.ceph.com/en/latest/rados/operations/pools/#create-a-pool) -- [CephFS filesystem](https://docs.ceph.com/en/quincy/cephfs/createfs/) +* [RBD pools](https://docs.ceph.com/en/latest/rados/operations/pools/#create-a-pool) +* [CephFS filesystem](https://docs.ceph.com/en/quincy/cephfs/createfs/) ## Commands on the source Ceph cluster @@ -31,35 +31,35 @@ Run the python script [create-external-cluster-resources.py](https://github.com/ python3 create-external-cluster-resources.py --rbd-data-pool-name --cephfs-filesystem-name --rgw-endpoint --namespace --format bash ``` -- `--namespace`: Namespace where CephCluster will run, for example `rook-ceph-external` -- `--format bash`: The format of the output -- `--rbd-data-pool-name`: The name of the RBD data pool -- `--alias-rbd-data-pool-name`: Provides an alias for the RBD data pool name, necessary if a special character is present in the pool name such as a period or underscore -- `--rgw-endpoint`: (optional) The RADOS Gateway endpoint in the format `:` or `:`. -- `--rgw-pool-prefix`: (optional) The prefix of the RGW pools. If not specified, the default prefix is `default` -- `--rgw-tls-cert-path`: (optional) RADOS Gateway endpoint TLS certificate file path -- `--rgw-skip-tls`: (optional) Ignore TLS certification validation when a self-signed certificate is provided (NOT RECOMMENDED) -- `--rbd-metadata-ec-pool-name`: (optional) Provides the name of erasure coded RBD metadata pool, used for creating ECRBDStorageClass. -- `--monitoring-endpoint`: (optional) Ceph Manager prometheus exporter endpoints (comma separated list of entries of active and standby mgrs) -- `--monitoring-endpoint-port`: (optional) Ceph Manager prometheus exporter port -- `--skip-monitoring-endpoint`: (optional) Skip prometheus exporter endpoints, even if they are available. Useful if the prometheus module is not enabled -- `--ceph-conf`: (optional) Provide a Ceph conf file -- `--keyring`: (optional) Path to Ceph keyring file, to be used with `--ceph-conf` -- `--k8s-cluster-name`: (optional) Kubernetes cluster name -- `--output`: (optional) Output will be stored into the provided file -- `--dry-run`: (optional) Prints the executed commands without running them -- `--run-as-user`: (optional) Provides a user name to check the cluster's health status, must be prefixed by `client`. -- `--cephfs-metadata-pool-name`: (optional) Provides the name of the cephfs metadata pool -- `--cephfs-filesystem-name`: (optional) The name of the filesystem, used for creating CephFS StorageClass -- `--cephfs-data-pool-name`: (optional) Provides the name of the CephFS data pool, used for creating CephFS StorageClass -- `--rados-namespace`: (optional) Divides a pool into separate logical namespaces, used for creating RBD PVC in a RadosNamespaces -- `--subvolume-group`: (optional) Provides the name of the subvolume group, used for creating CephFS PVC in a subvolumeGroup -- `--rgw-realm-name`: (optional) Provides the name of the rgw-realm -- `--rgw-zone-name`: (optional) Provides the name of the rgw-zone -- `--rgw-zonegroup-name`: (optional) Provides the name of the rgw-zone-group -- `--upgrade`: (optional) Upgrades the 'Ceph CSI keyrings (For example: client.csi-cephfs-provisioner) with new permissions needed for the new cluster version and older permission will still be applied. -- `--restricted-auth-permission`: (optional) Restrict cephCSIKeyrings auth permissions to specific pools, and cluster. Mandatory flags that need to be set are `--rbd-data-pool-name`, and `--k8s-cluster-name`. `--cephfs-filesystem-name` flag can also be passed in case of CephFS user restriction, so it can restrict users to particular CephFS filesystem. -- `--v2-port-enable`: (optional) Enables the v2 mon port (3300) for mons. +* `--namespace`: Namespace where CephCluster will run, for example `rook-ceph-external` +* `--format bash`: The format of the output +* `--rbd-data-pool-name`: The name of the RBD data pool +* `--alias-rbd-data-pool-name`: Provides an alias for the RBD data pool name, necessary if a special character is present in the pool name such as a period or underscore +* `--rgw-endpoint`: (optional) The RADOS Gateway endpoint in the format `:` or `:`. +* `--rgw-pool-prefix`: (optional) The prefix of the RGW pools. If not specified, the default prefix is `default` +* `--rgw-tls-cert-path`: (optional) RADOS Gateway endpoint TLS certificate file path +* `--rgw-skip-tls`: (optional) Ignore TLS certification validation when a self-signed certificate is provided (NOT RECOMMENDED) +* `--rbd-metadata-ec-pool-name`: (optional) Provides the name of erasure coded RBD metadata pool, used for creating ECRBDStorageClass. +* `--monitoring-endpoint`: (optional) Ceph Manager prometheus exporter endpoints (comma separated list of entries of active and standby mgrs) +* `--monitoring-endpoint-port`: (optional) Ceph Manager prometheus exporter port +* `--skip-monitoring-endpoint`: (optional) Skip prometheus exporter endpoints, even if they are available. Useful if the prometheus module is not enabled +* `--ceph-conf`: (optional) Provide a Ceph conf file +* `--keyring`: (optional) Path to Ceph keyring file, to be used with `--ceph-conf` +* `--k8s-cluster-name`: (optional) Kubernetes cluster name +* `--output`: (optional) Output will be stored into the provided file +* `--dry-run`: (optional) Prints the executed commands without running them +* `--run-as-user`: (optional) Provides a user name to check the cluster's health status, must be prefixed by `client`. +* `--cephfs-metadata-pool-name`: (optional) Provides the name of the cephfs metadata pool +* `--cephfs-filesystem-name`: (optional) The name of the filesystem, used for creating CephFS StorageClass +* `--cephfs-data-pool-name`: (optional) Provides the name of the CephFS data pool, used for creating CephFS StorageClass +* `--rados-namespace`: (optional) Divides a pool into separate logical namespaces, used for creating RBD PVC in a RadosNamespaces +* `--subvolume-group`: (optional) Provides the name of the subvolume group, used for creating CephFS PVC in a subvolumeGroup +* `--rgw-realm-name`: (optional) Provides the name of the rgw-realm +* `--rgw-zone-name`: (optional) Provides the name of the rgw-zone +* `--rgw-zonegroup-name`: (optional) Provides the name of the rgw-zone-group +* `--upgrade`: (optional) Upgrades the cephCSIKeyrings(For example: client.csi-cephfs-provisioner) and client.healthchecker ceph users with new permissions needed for the new cluster version and older permission will still be applied. +* `--restricted-auth-permission`: (optional) Restrict cephCSIKeyrings auth permissions to specific pools, and cluster. Mandatory flags that need to be set are `--rbd-data-pool-name`, and `--k8s-cluster-name`. `--cephfs-filesystem-name` flag can also be passed in case of CephFS user restriction, so it can restrict users to particular CephFS filesystem. +* `--v2-port-enable`: (optional) Enables the v2 mon port (3300) for mons. ### Multi-tenancy @@ -87,6 +87,7 @@ python3 create-external-cluster-resources.py --rbd-data-pool-name -- ### Upgrade Example 1) If consumer cluster doesn't have restricted caps, this will upgrade all the default csi-users (non-restricted): + ```console python3 create-external-cluster-resources.py --upgrade ``` @@ -170,8 +171,9 @@ If not installing with Helm, here are the steps to install with manifests. rook-ceph-external /var/lib/rook 162m Connected HEALTH_OK ``` -2. Verify the creation of the storage class depending on the rbd pools and filesystem provided. +2. Verify the creation of the storage class depending on the rbd pools and filesystem provided. `ceph-rbd` and `cephfs` would be the respective names for the RBD and CephFS storage classes. + ```console kubectl -n rook-ceph-external get sc ``` @@ -203,16 +205,18 @@ Create the object store resources: If encryption or compression on the wire is needed, specify the `--v2-port-enable` flag. If the v2 address type is present in the `ceph quorum_status`, then the output of 'ceph mon data' i.e, `ROOK_EXTERNAL_CEPH_MON_DATA` will use the v2 port(`3300`). -## Exporting Rook to another cluster +## Exporting Rook to another cluster If you have multiple K8s clusters running, and want to use the local `rook-ceph` cluster as the central storage, you can export the settings from this cluster with the following steps. 1) Copy create-external-cluster-resources.py into the directory `/etc/ceph/` of the toolbox. + ```console toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') kubectl -n rook-ceph cp deploy/examples/create-external-cluster-resources.py $toolbox:/etc/ceph ``` + 2) Exec to the toolbox pod and execute create-external-cluster-resources.py with needed options to create required [users and keys](#supported-features). !!! important diff --git a/deploy/examples/create-external-cluster-resources-tests.py b/deploy/examples/create-external-cluster-resources-tests.py index 327581a65b52..df37f0d0ca4b 100644 --- a/deploy/examples/create-external-cluster-resources-tests.py +++ b/deploy/examples/create-external-cluster-resources-tests.py @@ -85,7 +85,7 @@ def test_non_zero_return_and_error(self): self.rjObj.cluster.return_val = 1 self.rjObj.cluster.err_message = "Dummy Error" try: - self.rjObj.create_checkerKey() + self.rjObj.create_checkerKey("client.healthchecker") self.fail("Failed to raise an exception, 'ext.ExecutionFailureException'") except ext.ExecutionFailureException as err: print(f"Successfully thrown error.\nError: {err}") @@ -175,6 +175,7 @@ def test_upgrade_user_permissions(self): ) # for testing, we are using 'DummyRados' object self.rjObj.cluster = ext.DummyRados.Rados() + self.rjObj._arg_parser.rgw_pool_prefix = "default" self.rjObj.main() def test_monitoring_endpoint_validation(self): diff --git a/deploy/examples/create-external-cluster-resources.py b/deploy/examples/create-external-cluster-resources.py index 5e6cd896ba25..3d5f8b8cdbdc 100644 --- a/deploy/examples/create-external-cluster-resources.py +++ b/deploy/examples/create-external-cluster-resources.py @@ -114,8 +114,8 @@ def _init_cmd_output_map(self): self.cmd_names["mgr services"] ] = """{"dashboard":"https://ceph-dashboard:8443/","prometheus":"http://ceph-dashboard-db:9283/"}""" self.cmd_output_map[ - """{"caps": ["mon", "allow r, allow command quorum_status", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}""" - ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]""" + """{"caps": ["mon", "allow r, allow command quorum_status", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}""" + ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]""" self.cmd_output_map[ """{"caps": ["mon", "profile rbd, allow command 'osd blocklist'", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}""" ] = """[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd, allow command 'osd blocklist'","osd":"profile rbd"}}]""" @@ -135,8 +135,11 @@ def _init_cmd_output_map(self): """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=myfs"], "entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get-or-create"}""" ] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage-myfs","key":"CQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=myfs"}}]""" self.cmd_output_map[ - """{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}""" - ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]""" + """{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}""" + ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]""" + self.cmd_output_map[ + """{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth caps"}""" + ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSRKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]""" self.cmd_output_map[ """{"format": "json", "prefix": "mgr services"}""" ] = """{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}""" @@ -145,7 +148,7 @@ def _init_cmd_output_map(self): ] = """{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}""" self.cmd_output_map[ """{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}""" - ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]""" + ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]""" self.cmd_output_map[ """{"entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get"}""" ] = """[]""" @@ -471,7 +474,7 @@ def gen_arg_parser(cls, args_to_parse=None): "--upgrade", action="store_true", default=False, - help="Upgrades the cephCSIKeyrings(For example: client.csi-cephfs-provisioner) with new permissions needed for the new cluster version and older permission will still be applied." + help="Upgrades the cephCSIKeyrings(For example: client.csi-cephfs-provisioner) and client.healthchecker ceph users with new permissions needed for the new cluster version and older permission will still be applied." + "Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade`, this will upgrade all the default csi users(non-restricted)" + "For restricted users(For example: client.csi-cephfs-provisioner-openshift-storage-myfs), users created using --restricted-auth-permission flag need to pass mandatory flags" + "mandatory flags: '--rbd-data-pool-name, --k8s-cluster-name and --run-as-user' flags while upgrading" @@ -629,15 +632,6 @@ def __init__(self, arg_list=None): self.output_file = self._arg_parser.output self.ceph_conf = self._arg_parser.ceph_conf self.ceph_keyring = self._arg_parser.keyring - self.MIN_USER_CAP_PERMISSIONS = { - "mgr": "allow command config", - "mon": "allow r, allow command quorum_status, allow command version", - "osd": "allow rwx pool={0}.rgw.meta, " - + "allow r pool=.rgw.root, " - + "allow rw pool={0}.rgw.control, " - + "allow rx pool={0}.rgw.log, " - + "allow x pool={0}.rgw.buckets.index", - } # if user not provided, give a default user if not self.run_as_user and not self._arg_parser.upgrade: self.run_as_user = self.EXTERNAL_USER_NAME @@ -969,6 +963,16 @@ def get_rbd_node_caps_and_entity(self): return caps, entity + def get_healthchecker_caps_and_entity(self): + entity = "client.healthchecker" + caps = { + "mon": "allow r, allow command quorum_status, allow command version", + "mgr": "allow command config", + "osd": f"profile rbd-read-only, allow rwx pool={self._arg_parser.rgw_pool_prefix}.rgw.meta, allow r pool=.rgw.root, allow rw pool={self._arg_parser.rgw_pool_prefix}.rgw.control, allow rx pool={self._arg_parser.rgw_pool_prefix}.rgw.log, allow x pool={self._arg_parser.rgw_pool_prefix}.rgw.buckets.index", + } + + return caps, entity + def get_caps_and_entity(self, user_name): if "client.csi-cephfs-provisioner" in user_name: if "client.csi-cephfs-provisioner" != user_name: @@ -986,6 +990,10 @@ def get_caps_and_entity(self, user_name): if "client.csi-rbd-node" != user_name: self._arg_parser.restricted_auth_permission = True return self.get_rbd_node_caps_and_entity() + if "client.healthchecker" in user_name: + if "client.healthchecker" != user_name: + self._arg_parser.restricted_auth_permission = True + return self.get_healthchecker_caps_and_entity() raise ExecutionFailureException( f"no user found with user_name: {user_name}, " @@ -1131,22 +1139,15 @@ def get_cephfs_data_pool_details(self): f"Using the data-pool: '{self._arg_parser.cephfs_data_pool_name}'\n" ) - def create_checkerKey(self): + def create_checkerKey(self, user): + caps, entity = self.get_caps_and_entity(user) cmd_json = { "prefix": "auth get-or-create", - "entity": self.run_as_user, - "caps": [ - "mon", - self.MIN_USER_CAP_PERMISSIONS["mon"], - "mgr", - self.MIN_USER_CAP_PERMISSIONS["mgr"], - "osd", - self.MIN_USER_CAP_PERMISSIONS["osd"].format( - self._arg_parser.rgw_pool_prefix - ), - ], + "entity": entity, + "caps": [cap for cap_list in list(caps.items()) for cap in cap_list], "format": "json", } + if self._arg_parser.dry_run: return self.dry_run( "ceph " @@ -1156,6 +1157,11 @@ def create_checkerKey(self): + " " + " ".join(cmd_json["caps"]) ) + # check if user already exist + user_key = self.check_user_exist(entity) + if user_key != "": + return user_key, f"{entity.split('.', 1)[1]}" + ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) # if there is an unsuccessful attempt, if ret_val != 0 or len(json_out) == 0: @@ -1467,7 +1473,9 @@ def _gen_output_map(self): self.out_map["ROOK_EXTERNAL_FSID"] = self.get_fsid() self.out_map["ROOK_EXTERNAL_USERNAME"] = self.run_as_user self.out_map["ROOK_EXTERNAL_CEPH_MON_DATA"] = self.get_ceph_external_mon_data() - self.out_map["ROOK_EXTERNAL_USER_SECRET"] = self.create_checkerKey() + self.out_map["ROOK_EXTERNAL_USER_SECRET"] = self.create_checkerKey( + "client.healthchecker" + ) self.out_map["ROOK_EXTERNAL_DASHBOARD_LINK"] = self.get_ceph_dashboard_link() ( self.out_map["CSI_RBD_NODE_SECRET"], @@ -1793,12 +1801,28 @@ def upgrade_users_permissions(self): "client.csi-cephfs-provisioner", "client.csi-rbd-node", "client.csi-rbd-provisioner", + "client.healthchecker", ] if self.run_as_user != "" and self.run_as_user not in users: users.append(self.run_as_user) for user in users: self.upgrade_user_permissions(user) + def get_rgw_pool_name_during_upgrade(self, user, caps): + if user == "client.healthchecker": + # when admin has not provided rgw pool name during upgrade, + # get the rgw pool name from client.healthchecker user which was used during connection + if not self._arg_parser.rgw_pool_prefix: + # To get value 'default' which is rgw pool name from 'allow rwx pool=default.rgw.meta' + pattern = r"pool=(.*?)\.rgw\.meta" + match = re.search(pattern, caps) + if match: + self._arg_parser.rgw_pool_prefix = match.group(1) + else: + raise ExecutionFailureException( + "failed to get rgw pool name for upgrade" + ) + def upgrade_user_permissions(self, user): # check whether the given user exists or not cmd_json = {"prefix": "auth get", "entity": f"{user}", "format": "json"} @@ -1807,6 +1831,7 @@ def upgrade_user_permissions(self, user): print(f"user {user} not found for upgrading.") return existing_caps = json_out[0]["caps"] + self.get_rgw_pool_name_during_upgrade(user, str(existing_caps)) new_cap, _ = self.get_caps_and_entity(user) cap_keys = ["mon", "mgr", "osd", "mds"] caps = []