@@ -101,9 +107,11 @@ export const ObjectServiceDetailsCard: React.FC<{}> = () => {
}
error={systemLoadError || dashboardLinkLoadError}
>
-
- {t('Multicloud Object Gateway')}
-
+ {hasMCG && (
+
+ {t('Multicloud Object Gateway')}
+
+ )}
{hasRGW && (
{
return providersList;
};
-const ResourceProviders: React.FC<{}> = () => {
- const { t } = useCustomTranslation();
-
+const ResourceProvidersBody_: React.FC = () => {
const [providersTypesQueryResult, providersTypesQueryResultError] =
useCustomPrometheusPoll({
query: RESOURCE_PROVIDERS_QUERY.PROVIDERS_TYPES,
@@ -79,6 +81,35 @@ const ResourceProviders: React.FC<{}> = () => {
const providerTypes = filterProviders(allProviders);
+ return (
+
+ {providerTypes.map((provider) => (
+
+ ))}
+
+ );
+};
+
+const ResourceProviders: React.FC<{}> = () => {
+ const { t } = useCustomTranslation();
+
+ const { namespace: clusterNs } = useParams();
+ const { systemFlags } = useODFSystemFlagsSelector();
+ const hasMCG = systemFlags[clusterNs]?.isNoobaaAvailable;
+
return (
@@ -90,24 +121,7 @@ const ResourceProviders: React.FC<{}> = () => {
-
- {providerTypes.map((provider) => (
-
- ))}
-
+ {hasMCG ? : }
);
diff --git a/packages/ocs/dashboards/object-service/status-card/status-card.tsx b/packages/ocs/dashboards/object-service/status-card/status-card.tsx
index d7bfd94bd..6d72977e1 100644
--- a/packages/ocs/dashboards/object-service/status-card/status-card.tsx
+++ b/packages/ocs/dashboards/object-service/status-card/status-card.tsx
@@ -1,9 +1,9 @@
import * as React from 'react';
-import { RGW_FLAG } from '@odf/core/features';
-import { useSafeK8sWatchResource } from '@odf/core/hooks';
import { CephObjectStoreModel } from '@odf/core/models';
import { NooBaaSystemModel } from '@odf/core/models';
+import { useODFSystemFlagsSelector } from '@odf/core/redux';
import { secretResource } from '@odf/core/resources';
+import { getResourceInNs } from '@odf/core/utils';
import {
useCustomPrometheusPoll,
usePrometheusBasePath,
@@ -17,10 +17,7 @@ import {
filterRGWAlerts,
} from '@odf/shared/utils';
import { referenceForModel } from '@odf/shared/utils';
-import {
- useK8sWatchResource,
- useFlag,
-} from '@openshift-console/dynamic-plugin-sdk';
+import { useK8sWatchResource } from '@openshift-console/dynamic-plugin-sdk';
import {
AlertsBody,
AlertItem,
@@ -28,6 +25,7 @@ import {
} from '@openshift-console/dynamic-plugin-sdk-internal';
import { SubsystemHealth } from '@openshift-console/dynamic-plugin-sdk/lib/extensions/dashboard-types';
import * as _ from 'lodash-es';
+import { useParams } from 'react-router-dom-v5-compat';
import {
Gallery,
GalleryItem,
@@ -42,6 +40,7 @@ import {
dataResiliencyQueryMap,
ObjectServiceDashboardQuery,
} from '../../../queries';
+import { ODFSystemParams } from '../../../types';
import { decodeRGWPrefix } from '../../../utils';
import { ObjectServiceStatus } from './object-service-health';
import { getNooBaaState, getRGWHealthState } from './statuses';
@@ -59,6 +58,8 @@ const cephObjectStoreResource = {
const ObjectStorageAlerts = () => {
const [alerts, loaded, loadError] = useAlerts();
+ // ToDo (epic 4422): Get StorageCluster name and namespace from the Alert object
+ // and filter Alerts based on that for a particular cluster.
const filteredAlerts =
loaded && !loadError && !_.isEmpty(alerts)
? [...filterNooBaaAlerts(alerts), ...filterRGWAlerts(alerts)]
@@ -79,16 +80,24 @@ const ObjectStorageAlerts = () => {
};
const StatusCard: React.FC<{}> = () => {
- const isRGWSupported = useFlag(RGW_FLAG);
+ const { namespace: clusterNs } = useParams();
+ const { systemFlags } = useODFSystemFlagsSelector();
+ const isRGWSupported = systemFlags[clusterNs]?.isRGWAvailable;
+ const isMCGSupported = systemFlags[clusterNs]?.isNoobaaAvailable;
+ const managedByOCS = systemFlags[clusterNs]?.ocsClusterName;
+
const { t } = useCustomTranslation();
const [secretData, secretLoaded, secretLoadError] =
- useSafeK8sWatchResource(secretResource);
- const [noobaa, noobaaLoaded, noobaaLoadError] =
+ useK8sWatchResource(secretResource(clusterNs));
+ const [noobaas, noobaaLoaded, noobaaLoadError] =
useK8sWatchResource(noobaaResource);
- const [rgw, rgwLoaded, rgwLoadError] = useK8sWatchResource(
- cephObjectStoreResource
- );
+ const [rgws, rgwLoaded, rgwLoadError] = useK8sWatchResource<
+ K8sResourceKind[]
+ >(cephObjectStoreResource);
+
+ const noobaa = getResourceInNs(noobaas, clusterNs);
+ const rgw = getResourceInNs(rgws, clusterNs);
const rgwPrefix = React.useMemo(
() =>
@@ -98,10 +107,9 @@ const StatusCard: React.FC<{}> = () => {
[secretData, secretLoaded, secretLoadError, isRGWSupported]
);
- const rgwResiliencyQuery =
- dataResiliencyQueryMap[
- ObjectServiceDashboardQuery.RGW_REBUILD_PROGRESS_QUERY
- ](rgwPrefix);
+ const rgwResiliencyQuery = dataResiliencyQueryMap[
+ ObjectServiceDashboardQuery.RGW_REBUILD_PROGRESS_QUERY
+ ](rgwPrefix, managedByOCS);
const [healthStatusResult, healthStatusError] = useCustomPrometheusPoll({
query: StatusCardQueries.HEALTH_QUERY,
@@ -130,7 +138,7 @@ const StatusCard: React.FC<{}> = () => {
);
const RGWState =
- !rgwLoadError && rgwLoaded ? getRGWHealthState(rgw[0]) : undefined;
+ !rgwLoadError && rgwLoaded ? getRGWHealthState(rgw) : undefined;
const dataResiliencyState: SubsystemHealth = getDataResiliencyState(
[{ response: progressResult, error: progressError }],
@@ -152,14 +160,14 @@ const StatusCard: React.FC<{}> = () => {
diff --git a/packages/ocs/dashboards/object-service/storage-efficiency-card/storage-efficiency-card.tsx b/packages/ocs/dashboards/object-service/storage-efficiency-card/storage-efficiency-card.tsx
index c24982965..eb1c973f4 100644
--- a/packages/ocs/dashboards/object-service/storage-efficiency-card/storage-efficiency-card.tsx
+++ b/packages/ocs/dashboards/object-service/storage-efficiency-card/storage-efficiency-card.tsx
@@ -1,6 +1,8 @@
import * as React from 'react';
+import { useODFSystemFlagsSelector } from '@odf/core/redux';
import { ObjectStorageEfficiencyQueries } from '@odf/ocs/queries';
import { EfficiencyItemBody } from '@odf/shared/dashboards/storage-efficiency/storage-efficiency-card-item';
+import { DataUnavailableError } from '@odf/shared/generic';
import {
useCustomPrometheusPoll,
usePrometheusBasePath,
@@ -8,9 +10,11 @@ import {
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { humanizeBinaryBytes, humanizePercentage } from '@odf/shared/utils';
import { getGaugeValue } from '@odf/shared/utils';
+import { useParams } from 'react-router-dom-v5-compat';
import { Card, CardBody, CardHeader, CardTitle } from '@patternfly/react-core';
+import { ODFSystemParams } from '../../../types';
-const StorageEfficiencyCard: React.FC<{}> = () => {
+const EfficiencyItemBody_: React.FC = () => {
const { t } = useCustomTranslation();
const [compressionQueryResult, compressionQueryResultError] =
@@ -84,14 +88,28 @@ const StorageEfficiencyCard: React.FC<{}> = () => {
getStats: savingStats,
};
+ return (
+ <>
+
+
+ >
+ );
+};
+
+const StorageEfficiencyCard: React.FC<{}> = () => {
+ const { t } = useCustomTranslation();
+
+ const { namespace: clusterNs } = useParams();
+ const { systemFlags } = useODFSystemFlagsSelector();
+ const hasMCG = systemFlags[clusterNs]?.isNoobaaAvailable;
+
return (
{t('Storage efficiency')}
-
-
+ {hasMCG ? : }
);
diff --git a/packages/ocs/dashboards/ocs-system-dashboard.tsx b/packages/ocs/dashboards/ocs-system-dashboard.tsx
index cac7fd0bd..708f18683 100644
--- a/packages/ocs/dashboards/ocs-system-dashboard.tsx
+++ b/packages/ocs/dashboards/ocs-system-dashboard.tsx
@@ -6,12 +6,7 @@
*/
import * as React from 'react';
-import {
- OCS_INDEPENDENT_FLAG,
- MCG_FLAG,
- CEPH_FLAG,
- OCS_NFS_ENABLED,
-} from '@odf/core/features';
+import { useODFSystemFlagsSelector } from '@odf/core/redux';
import { LoadingBox } from '@odf/shared/generic/status-box';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import Tabs, { TabPage } from '@odf/shared/utils/Tabs';
@@ -19,9 +14,10 @@ import {
Overview,
OverviewGrid,
OverviewGridCard,
- useFlag,
} from '@openshift-console/dynamic-plugin-sdk';
import { TFunction } from 'i18next';
+import { useParams } from 'react-router-dom-v5-compat';
+import { ODFSystemParams } from '../types';
import { StatusCard as NFSStatusCard } from './network-file-system/status-card/status-card';
import { ThroughputCard } from './network-file-system/throughput-card/throughput-card';
import { TopClientsCard } from './network-file-system/top-clients-card/top-clients-card';
@@ -184,10 +180,15 @@ const nfsPage = (t: TFunction): TabPage => {
const OCSSystemDashboard: React.FC<{}> = () => {
const { t } = useCustomTranslation();
- const isIndependent = useFlag(OCS_INDEPENDENT_FLAG);
- const isObjectServiceAvailable = useFlag(MCG_FLAG);
- const isCephAvailable = useFlag(CEPH_FLAG);
- const isNFSEnabled = useFlag(OCS_NFS_ENABLED);
+ const { namespace: clusterNs } = useParams();
+ const { systemFlags } = useODFSystemFlagsSelector();
+
+ const isIndependent = systemFlags[clusterNs]?.isExternalMode;
+ const isMCGAvailable = systemFlags[clusterNs]?.isNoobaaAvailable;
+ const isRGWAvailable = systemFlags[clusterNs]?.isRGWAvailable;
+ const isObjectServiceAvailable = isMCGAvailable || isRGWAvailable;
+ const isCephAvailable = systemFlags[clusterNs]?.isCephAvailable;
+ const isNFSEnabled = systemFlags[clusterNs]?.isNFSEnabled;
const showInternalDashboard = !isIndependent && isCephAvailable;
const showNFSDashboard = !isIndependent && isNFSEnabled;
diff --git a/packages/ocs/dashboards/odf-system-dashboard.tsx b/packages/ocs/dashboards/odf-system-dashboard.tsx
index a83407274..4db0d3c1b 100644
--- a/packages/ocs/dashboards/odf-system-dashboard.tsx
+++ b/packages/ocs/dashboards/odf-system-dashboard.tsx
@@ -1,25 +1,28 @@
import * as React from 'react';
import NamespaceSafetyBox from '@odf/core/components/utils/safety-box';
+import { useODFSystemFlagsSelector } from '@odf/core/redux';
import PageHeading from '@odf/shared/heading/page-heading';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { referenceForModel } from '@odf/shared/utils';
import Tabs, { TabPage } from '@odf/shared/utils/Tabs';
import { useFlag } from '@openshift-console/dynamic-plugin-sdk';
import { useParams } from 'react-router-dom-v5-compat';
-import {
- CEPH_FLAG,
- OCS_INDEPENDENT_FLAG,
- PROVIDER_MODE,
-} from '../../odf/features';
+import { PROVIDER_MODE } from '../../odf/features';
import { BlockPoolListPage } from '../block-pool/BlockPoolListPage';
import { CephBlockPoolModel } from '../models';
+import { ODFSystemParams } from '../types';
import OCSSystemDashboard from './ocs-system-dashboard';
const blockPoolHref = referenceForModel(CephBlockPoolModel);
const ODFSystemDashboard: React.FC<{}> = ({}) => {
- const { systemName: title } = useParams();
const { t } = useCustomTranslation();
+
+ const { systemName: title, namespace: clusterNs } =
+ useParams();
+ const { systemFlags, areFlagsLoaded, flagsLoadError } =
+ useODFSystemFlagsSelector();
+
const breadcrumbs = [
{
name: t('StorageSystems'),
@@ -38,9 +41,9 @@ const ODFSystemDashboard: React.FC<{}> = ({}) => {
component: OCSSystemDashboard,
},
]);
- const isCephAvailable = useFlag(CEPH_FLAG);
- const isExternal = useFlag(OCS_INDEPENDENT_FLAG);
const isProviderMode = useFlag(PROVIDER_MODE);
+ const isCephAvailable = systemFlags[clusterNs]?.isCephAvailable;
+ const isExternal = systemFlags[clusterNs]?.isExternalMode;
React.useEffect(() => {
const isBlockPoolAdded = pages.find((page) => page.href === blockPoolHref);
@@ -64,12 +67,15 @@ const ODFSystemDashboard: React.FC<{}> = ({}) => {
}
}, [isExternal, isCephAvailable, isProviderMode, pages, setPages, t]);
- const arePagesLoaded = pages.length > 0;
+ const loaded = pages.length > 0 && areFlagsLoaded;
return (
<>
-
+
>
diff --git a/packages/ocs/dashboards/persistent-external/breakdown-card.tsx b/packages/ocs/dashboards/persistent-external/breakdown-card.tsx
index c2c4fe1b5..421a56781 100644
--- a/packages/ocs/dashboards/persistent-external/breakdown-card.tsx
+++ b/packages/ocs/dashboards/persistent-external/breakdown-card.tsx
@@ -1,5 +1,8 @@
import * as React from 'react';
-import { useODFNamespaceSelector } from '@odf/core/redux';
+import {
+ useODFNamespaceSelector,
+ useODFSystemFlagsSelector,
+} from '@odf/core/redux';
import { BreakdownCardBody } from '@odf/shared/dashboards/breakdown-card/breakdown-body';
import { getSelectOptions } from '@odf/shared/dashboards/breakdown-card/breakdown-dropdown';
import {
@@ -16,6 +19,7 @@ import {
getInstantVectorStats,
sortInstantVectorStats,
} from '@odf/shared/utils';
+import { useParams } from 'react-router-dom-v5-compat';
import {
Select,
SelectProps,
@@ -25,6 +29,7 @@ import {
CardTitle,
} from '@patternfly/react-core';
import { getBreakdownMetricsQuery } from '../../queries';
+import { ODFSystemParams } from '../../types';
import { getStackChartStats } from '../../utils/metrics';
import {
NamespaceDropdown,
@@ -41,10 +46,16 @@ export const BreakdownCard: React.FC = () => {
const [isOpenBreakdownSelect, setBreakdownSelect] = React.useState(false);
const [pvcNamespace, setPVCNamespace] = React.useState('');
+ const { namespace: clusterNs } = useParams();
const { odfNamespace } = useODFNamespaceSelector();
+ const { systemFlags } = useODFSystemFlagsSelector();
+
+ // name of created StorageClasses are prefix by StorageCluster name
+ const storageClassNamePrefix = systemFlags[clusterNs]?.ocsClusterName;
const { queries, model, metric } = getBreakdownMetricsQuery(
metricType,
+ storageClassNamePrefix,
pvcNamespace,
true
);
diff --git a/packages/ocs/dashboards/persistent-external/details-card.tsx b/packages/ocs/dashboards/persistent-external/details-card.tsx
index 4ee8de349..58b1f3652 100644
--- a/packages/ocs/dashboards/persistent-external/details-card.tsx
+++ b/packages/ocs/dashboards/persistent-external/details-card.tsx
@@ -1,23 +1,32 @@
import * as React from 'react';
import { CEPH_BRAND_NAME, OCS_OPERATOR } from '@odf/core/constants';
import { ODF_MODEL_FLAG } from '@odf/core/features';
-import { useSafeK8sWatchResources } from '@odf/core/hooks';
import { useODFNamespaceSelector } from '@odf/core/redux';
import { getOperatorVersion } from '@odf/core/utils';
+import { getStorageClusterInNs } from '@odf/core/utils';
import { ODF_OPERATOR } from '@odf/shared/constants';
import { useFetchCsv } from '@odf/shared/hooks/use-fetch-csv';
import { SecretModel } from '@odf/shared/models';
import { getName } from '@odf/shared/selectors';
-import { SecretKind, K8sResourceKind } from '@odf/shared/types';
+import {
+ SecretKind,
+ K8sResourceKind,
+ StorageClusterKind,
+} from '@odf/shared/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { referenceForModel } from '@odf/shared/utils';
import { ExternalLink } from '@odf/shared/utils/link';
-import { useFlag } from '@openshift-console/dynamic-plugin-sdk';
+import {
+ useFlag,
+ useK8sWatchResources,
+} from '@openshift-console/dynamic-plugin-sdk';
import { DetailsBody } from '@openshift-console/dynamic-plugin-sdk-internal';
import { OverviewDetailItem as DetailItem } from '@openshift-console/plugin-shared';
import { Base64 } from 'js-base64';
+import { useParams } from 'react-router-dom-v5-compat';
import { Card, CardBody, CardHeader, CardTitle } from '@patternfly/react-core';
import { StorageClusterModel } from '../../models';
+import { ODFSystemParams } from '../../types';
import { getNetworkEncryption } from '../../utils';
const getCephLink = (secret: SecretKind): string => {
@@ -33,16 +42,14 @@ type ResourcesObject = {
};
};
-const k8sResources = (ns: string) => ({
+const k8sResources = (clusterNs: string) => ({
ocs: {
kind: referenceForModel(StorageClusterModel),
- namespaced: true,
isList: true,
- namespace: ns,
},
secret: {
kind: SecretModel.kind,
- namespace: ns,
+ namespace: clusterNs,
name: 'rook-ceph-dashboard-link',
},
});
@@ -52,15 +59,19 @@ export const DetailsCard: React.FC = () => {
const isODF = useFlag(ODF_MODEL_FLAG);
const { odfNamespace, isNsSafe } = useODFNamespaceSelector();
+ const { namespace: clusterNs } = useParams();
- const resourcesObj: ResourcesObject = useSafeK8sWatchResources(k8sResources);
- const inTransitEncryptionStatus = getNetworkEncryption(
- resourcesObj['ocs'].data?.[0]
- )
+ const resourcesObj: ResourcesObject = useK8sWatchResources(
+ k8sResources(clusterNs)
+ );
+ const ocsCluster = getStorageClusterInNs(
+ resourcesObj['ocs'].data as StorageClusterKind[],
+ clusterNs
+ );
+ const inTransitEncryptionStatus = getNetworkEncryption(ocsCluster)
? t('Enabled')
: t('Disabled');
-
- const ocsName = getName(resourcesObj['ocs'].data?.[0]);
+ const ocsName = getName(ocsCluster);
const [csv, csvLoaded, csvError] = useFetchCsv({
specName: !isODF ? OCS_OPERATOR : ODF_OPERATOR,
diff --git a/packages/ocs/dashboards/persistent-external/status-card.tsx b/packages/ocs/dashboards/persistent-external/status-card.tsx
index 24ca11c3f..5530cac32 100644
--- a/packages/ocs/dashboards/persistent-external/status-card.tsx
+++ b/packages/ocs/dashboards/persistent-external/status-card.tsx
@@ -1,12 +1,15 @@
import * as React from 'react';
import { cephClusterResource } from '@odf/core/resources';
-import { K8sResourceKind } from '@odf/shared/types';
+import { getResourceInNs as getCephClusterInNs } from '@odf/core/utils';
+import { getCephHealthState } from '@odf/ocs/utils';
+import { K8sResourceKind, CephClusterKind } from '@odf/shared/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { useK8sWatchResource } from '@openshift-console/dynamic-plugin-sdk';
import {
HealthBody,
HealthItem,
} from '@openshift-console/dynamic-plugin-sdk-internal';
+import { useParams } from 'react-router-dom-v5-compat';
import {
GalleryItem,
Gallery,
@@ -14,15 +17,23 @@ import {
CardHeader,
CardTitle,
} from '@patternfly/react-core';
-import { getCephHealthState } from '../persistent-internal/status-card/utils';
+import { ODFSystemParams } from '../../types';
export const StatusCard: React.FC = () => {
const { t } = useCustomTranslation();
const [data, loaded, loadError] =
useK8sWatchResource(cephClusterResource);
+ const { namespace: clusterNs } = useParams();
+
const cephHealth = getCephHealthState(
- { ceph: { data, loaded, loadError } },
+ {
+ ceph: {
+ data: getCephClusterInNs(data as CephClusterKind[], clusterNs),
+ loaded,
+ loadError,
+ },
+ },
t
);
diff --git a/packages/ocs/dashboards/persistent-external/utilization-card.tsx b/packages/ocs/dashboards/persistent-external/utilization-card.tsx
index 508761468..efef3cf5e 100644
--- a/packages/ocs/dashboards/persistent-external/utilization-card.tsx
+++ b/packages/ocs/dashboards/persistent-external/utilization-card.tsx
@@ -1,4 +1,5 @@
import * as React from 'react';
+import { useODFSystemFlagsSelector } from '@odf/core/redux';
import { PrometheusUtilizationItem } from '@odf/shared/dashboards/utilization-card/prometheus-utilization-item';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { humanizeBinaryBytes } from '@odf/shared/utils';
@@ -7,6 +8,7 @@ import {
UtilizationBody,
} from '@openshift-console/dynamic-plugin-sdk-internal';
import { ByteDataTypes } from '@openshift-console/dynamic-plugin-sdk/lib/api/internal-types';
+import { useParams } from 'react-router-dom-v5-compat';
import {
Card,
CardActions,
@@ -18,16 +20,25 @@ import {
StorageDashboardQuery,
INDEPENDENT_UTILIZATION_QUERIES,
} from '../../queries';
+import { ODFSystemParams } from '../../types';
export const UtilizationContent: React.FC = () => {
const { t } = useCustomTranslation();
+ const { namespace: clusterNs } = useParams();
+ const { systemFlags } = useODFSystemFlagsSelector();
+
+ // name of created StorageClasses are prefix by StorageCluster name
+ const storageClassNamePrefix = systemFlags[clusterNs]?.ocsClusterName;
+
return (
{
{
- const { odfNamespace } = useODFNamespaceSelector();
+ const { namespace: clusterNs } = useParams();
const [pvcs, pvcLoaded] =
useK8sWatchResource(pvcResource);
@@ -81,8 +85,8 @@ const RecentEvent: React.FC = () => {
const memoizedPVCNames = useDeepCompareMemoize(validPVC, true);
const ocsEventsFilter = React.useCallback(
- () => isPersistentStorageEvent(memoizedPVCNames, odfNamespace),
- [memoizedPVCNames, odfNamespace]
+ () => isPersistentStorageEvent(memoizedPVCNames, clusterNs),
+ [memoizedPVCNames, clusterNs]
);
const eventObject = {
@@ -98,29 +102,39 @@ const RecentEvent: React.FC = () => {
export const subscriptionResource = {
isList: true,
kind: referenceForModel(SubscriptionModel),
- namespaced: false,
};
export const storageClusterResource = {
isList: true,
kind: referenceForModel(StorageClusterModel),
- namespaced: false,
};
const OngoingActivity = () => {
+ const { namespace: clusterNs } = useParams();
+ const { systemFlags } = useODFSystemFlagsSelector();
+ const managedByOCS = systemFlags[clusterNs]?.ocsClusterName;
+
const [subscriptions, subLoaded] =
useK8sWatchResource(subscriptionResource);
- const [cluster, clusterLoaded] = useK8sWatchResource(storageClusterResource);
+ const [clusters, clusterLoaded] = useK8sWatchResource(
+ storageClusterResource
+ );
const [resiliencyMetric, , metricsLoading] = useCustomPrometheusPoll({
- query: DATA_RESILIENCY_QUERY[StorageDashboardQuery.RESILIENCY_PROGRESS],
+ query:
+ DATA_RESILIENCY_QUERY(managedByOCS)[
+ StorageDashboardQuery.RESILIENCY_PROGRESS
+ ],
endpoint: 'api/v1/query' as any,
basePath: usePrometheusBasePath(),
});
const ocsSubscription: SubscriptionKind = getOCSSubscription(subscriptions);
- const ocsCluster: K8sResourceKind = cluster?.[0];
+ const ocsCluster: K8sResourceKind = getStorageClusterInNs(
+ clusters,
+ clusterNs
+ );
const prometheusActivities = [];
const resourceActivities = [];
diff --git a/packages/ocs/dashboards/persistent-internal/capacity-breakdown-card/capacity-breakdown-card.tsx b/packages/ocs/dashboards/persistent-internal/capacity-breakdown-card/capacity-breakdown-card.tsx
index 28652125a..8df4bb16e 100644
--- a/packages/ocs/dashboards/persistent-internal/capacity-breakdown-card/capacity-breakdown-card.tsx
+++ b/packages/ocs/dashboards/persistent-internal/capacity-breakdown-card/capacity-breakdown-card.tsx
@@ -1,5 +1,8 @@
import * as React from 'react';
-import { useODFNamespaceSelector } from '@odf/core/redux';
+import {
+ useODFNamespaceSelector,
+ useODFSystemFlagsSelector,
+} from '@odf/core/redux';
import { namespaceResource } from '@odf/core/resources';
import { BreakdownCardBody } from '@odf/shared/dashboards/breakdown-card/breakdown-body';
import { getSelectOptions } from '@odf/shared/dashboards/breakdown-card/breakdown-dropdown';
@@ -22,6 +25,7 @@ import {
sortInstantVectorStats,
} from '@odf/shared/utils';
import { K8sResourceCommon } from '@openshift-console/dynamic-plugin-sdk';
+import { useParams } from 'react-router-dom-v5-compat';
import {
Select,
SelectProps,
@@ -35,6 +39,7 @@ import {
CEPH_CAPACITY_BREAKDOWN_QUERIES,
StorageDashboardQuery,
} from '../../../queries/ceph-storage';
+import { ODFSystemParams } from '../../../types';
import { getStackChartStats } from '../../../utils/metrics';
import './capacity-breakdown-card.scss';
@@ -130,9 +135,19 @@ const BreakdownCard: React.FC = () => {
const [isOpenBreakdownSelect, setBreakdownSelect] = React.useState(false);
const [pvcNamespace, setPVCNamespace] = React.useState('');
+ const { namespace: clusterNs } = useParams();
+ const { systemFlags } = useODFSystemFlagsSelector();
+
+ // name of the created StorageClasses are prefix by StorageCluster name,
+ // it is also the value of the "managedBy" label in the metrics.
+ const ocsCluster = systemFlags[clusterNs]?.ocsClusterName;
+
const { queries, model, metric } = getBreakdownMetricsQuery(
metricType,
- pvcNamespace
+ ocsCluster,
+ pvcNamespace,
+ false,
+ ocsCluster
);
const { odfNamespace } = useODFNamespaceSelector();
@@ -150,8 +165,9 @@ const BreakdownCard: React.FC = () => {
basePath: usePrometheusBasePath(),
});
const [cephUsedMetric, cephError, cephLoading] = useCustomPrometheusPoll({
- query:
- CEPH_CAPACITY_BREAKDOWN_QUERIES[StorageDashboardQuery.CEPH_CAPACITY_USED],
+ query: CEPH_CAPACITY_BREAKDOWN_QUERIES(ocsCluster, ocsCluster)[
+ StorageDashboardQuery.CEPH_CAPACITY_USED
+ ],
endpoint: 'api/v1/query' as any,
basePath: usePrometheusBasePath(),
});
diff --git a/packages/ocs/dashboards/persistent-internal/details-card.tsx b/packages/ocs/dashboards/persistent-internal/details-card.tsx
index 79a6b2cc1..59f6d9ca0 100644
--- a/packages/ocs/dashboards/persistent-internal/details-card.tsx
+++ b/packages/ocs/dashboards/persistent-internal/details-card.tsx
@@ -1,7 +1,10 @@
import * as React from 'react';
-import { useSafeK8sList } from '@odf/core/hooks';
import { useODFNamespaceSelector } from '@odf/core/redux';
import { getOperatorVersion } from '@odf/core/utils';
+import {
+ getStorageClusterInNs,
+ getResourceInNs as getCephClusterInNs,
+} from '@odf/core/utils';
import { OSDMigrationDetails } from '@odf/ocs/modals/osd-migration/osd-migration-details';
import { ODF_OPERATOR } from '@odf/shared/constants';
import { useK8sGet } from '@odf/shared/hooks/k8s-get-hook';
@@ -21,42 +24,57 @@ import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import {
getInfrastructurePlatform,
resourcePathFromModel,
+ referenceForModel,
} from '@odf/shared/utils';
+import { useK8sWatchResource } from '@openshift-console/dynamic-plugin-sdk';
import { DetailsBody } from '@openshift-console/dynamic-plugin-sdk-internal';
import { OverviewDetailItem as DetailItem } from '@openshift-console/plugin-shared';
-import { Link } from 'react-router-dom-v5-compat';
+import { Link, useParams } from 'react-router-dom-v5-compat';
import { Card, CardBody, CardHeader, CardTitle } from '@patternfly/react-core';
import { StorageClusterModel } from '../../models';
+import { ODFSystemParams } from '../../types';
import { getNetworkEncryption } from '../../utils';
+const storageClusterResource = {
+ kind: referenceForModel(StorageClusterModel),
+ isList: true,
+};
+
+const cephClusterResource = {
+ kind: referenceForModel(CephClusterModel),
+ isList: true,
+};
+
const DetailsCard: React.FC = () => {
const { t } = useCustomTranslation();
+ const { namespace: ocsNs } = useParams();
const { odfNamespace, isNsSafe } = useODFNamespaceSelector();
const [infrastructure, infrastructureLoaded, infrastructureError] =
useK8sGet(InfrastructureModel, 'cluster');
- const [ocsData, ocsLoaded, ocsError] = useSafeK8sList(
- StorageClusterModel,
- odfNamespace
- );
- const [cephData, cephLoaded, cephLoadError] = useSafeK8sList(
- CephClusterModel,
- odfNamespace
- );
+ const [cephData, cephLoaded, cephLoadError] =
+ useK8sWatchResource(cephClusterResource);
+
+ const [ocsData, ocsLoaded, ocsError] = useK8sWatchResource<
+ StorageClusterKind[]
+ >(storageClusterResource);
const [csv, csvLoaded, csvError] = useFetchCsv({
specName: ODF_OPERATOR,
namespace: odfNamespace,
startPollingInstantly: isNsSafe,
});
+
const infrastructurePlatform = getInfrastructurePlatform(infrastructure);
- const cluster: StorageClusterKind = ocsData?.find(
- (item: StorageClusterKind) => item.status.phase !== 'Ignored'
+ const storageCluster: StorageClusterKind = getStorageClusterInNs(
+ ocsData,
+ ocsNs
);
- const ocsName = getName(cluster);
- const inTransitEncryptionStatus = getNetworkEncryption(cluster)
+ const cephCluster: CephClusterKind = getCephClusterInNs(cephData, ocsNs);
+ const ocsName = getName(storageCluster);
+ const inTransitEncryptionStatus = getNetworkEncryption(storageCluster)
? t('Enabled')
: t('Disabled');
@@ -123,10 +141,10 @@ const DetailsCard: React.FC = () => {
error={cephLoadError as any}
>
diff --git a/packages/ocs/dashboards/persistent-internal/inventory-card.tsx b/packages/ocs/dashboards/persistent-internal/inventory-card.tsx
index aa8d8882e..7534b1183 100644
--- a/packages/ocs/dashboards/persistent-internal/inventory-card.tsx
+++ b/packages/ocs/dashboards/persistent-internal/inventory-card.tsx
@@ -1,6 +1,5 @@
import * as React from 'react';
import { cephStorageLabel } from '@odf/core/constants';
-import { useODFNamespaceSelector } from '@odf/core/redux';
import {
NodeModel,
PersistentVolumeClaimModel,
@@ -18,7 +17,9 @@ import { NodeKind } from '@odf/shared/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { useK8sWatchResources } from '@openshift-console/dynamic-plugin-sdk';
import { ResourceInventoryItem } from '@openshift-console/dynamic-plugin-sdk-internal';
+import { useParams } from 'react-router-dom-v5-compat';
import { Card, CardBody, CardHeader, CardTitle } from '@patternfly/react-core';
+import { ODFSystemParams } from '../../types';
import {
getCephNodes,
getCephPVCs,
@@ -47,7 +48,7 @@ const watchResources = {
const InventoryCard: React.FC = () => {
const { t } = useCustomTranslation();
- const { odfNamespace } = useODFNamespaceSelector();
+ const { namespace: clusterNs } = useParams();
const resources = useK8sWatchResources(watchResources);
@@ -79,7 +80,7 @@ const InventoryCard: React.FC = () => {
isLoading={!nodesLoaded}
error={!!nodesLoadError}
kind={NodeModel as any}
- resources={getCephNodes(nodesData, odfNamespace)}
+ resources={getCephNodes(nodesData, clusterNs)}
mapper={getNodeStatusGroups}
basePath={ocsNodesHref}
/>
diff --git a/packages/ocs/dashboards/persistent-internal/raw-capacity-card/raw-capacity-card.tsx b/packages/ocs/dashboards/persistent-internal/raw-capacity-card/raw-capacity-card.tsx
index 02e753da5..141c9d0d1 100644
--- a/packages/ocs/dashboards/persistent-internal/raw-capacity-card/raw-capacity-card.tsx
+++ b/packages/ocs/dashboards/persistent-internal/raw-capacity-card/raw-capacity-card.tsx
@@ -1,15 +1,18 @@
import * as React from 'react';
+import { useODFSystemFlagsSelector } from '@odf/core/redux';
import {
useCustomPrometheusPoll,
usePrometheusBasePath,
} from '@odf/shared/hooks/custom-prometheus-poll';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { getInstantVectorStats } from '@odf/shared/utils';
+import { useParams } from 'react-router-dom-v5-compat';
import { compose } from 'redux';
import {
CAPACITY_INFO_QUERIES,
StorageDashboardQuery,
} from '../../../queries/ceph-storage';
+import { ODFSystemParams } from '../../../types';
import {
CapacityCard,
CapacityCardProps,
@@ -21,13 +24,23 @@ const parser = compose((val) => val?.[0]?.y, getInstantVectorStats);
const RawCapacityCard: React.FC = () => {
const { t } = useCustomTranslation();
+ const { namespace: clusterNs } = useParams();
+ const { systemFlags } = useODFSystemFlagsSelector();
+ const managedByOCS = systemFlags[clusterNs]?.ocsClusterName;
+
const [totalCapacity, totalError, totalLoading] = useCustomPrometheusPoll({
- query: CAPACITY_INFO_QUERIES[StorageDashboardQuery.RAW_CAPACITY_TOTAL],
+ query:
+ CAPACITY_INFO_QUERIES(managedByOCS)[
+ StorageDashboardQuery.RAW_CAPACITY_TOTAL
+ ],
endpoint: 'api/v1/query' as any,
basePath: usePrometheusBasePath(),
});
const [usedCapacity, usedError, usedLoading] = useCustomPrometheusPoll({
- query: CAPACITY_INFO_QUERIES[StorageDashboardQuery.RAW_CAPACITY_USED],
+ query:
+ CAPACITY_INFO_QUERIES(managedByOCS)[
+ StorageDashboardQuery.RAW_CAPACITY_USED
+ ],
endpoint: 'api/v1/query' as any,
basePath: usePrometheusBasePath(),
});
diff --git a/packages/ocs/dashboards/persistent-internal/status-card/status-card.tsx b/packages/ocs/dashboards/persistent-internal/status-card/status-card.tsx
index 4f2533583..c9c4e573a 100644
--- a/packages/ocs/dashboards/persistent-internal/status-card/status-card.tsx
+++ b/packages/ocs/dashboards/persistent-internal/status-card/status-card.tsx
@@ -1,4 +1,7 @@
import * as React from 'react';
+import { useODFSystemFlagsSelector } from '@odf/core/redux';
+import { getResourceInNs as getCephClusterInNs } from '@odf/core/utils';
+import { getCephHealthState } from '@odf/ocs/utils';
import { healthStateMapping } from '@odf/shared/dashboards/status-card/states';
import {
useCustomPrometheusPoll,
@@ -22,6 +25,7 @@ import {
} from '@openshift-console/dynamic-plugin-sdk-internal';
import { SubsystemHealth } from '@openshift-console/dynamic-plugin-sdk/lib/extensions/dashboard-types';
import * as _ from 'lodash-es';
+import { useParams } from 'react-router-dom-v5-compat';
import {
Gallery,
GalleryItem,
@@ -33,13 +37,16 @@ import {
} from '@patternfly/react-core';
import { CephClusterModel } from '../../../models';
import { DATA_RESILIENCY_QUERY, StorageDashboardQuery } from '../../../queries';
+import { ODFSystemParams } from '../../../types';
import { OSDMigrationProgress } from './osd-migration/osd-migration-progress';
-import { getCephHealthState, getDataResiliencyState } from './utils';
+import { getDataResiliencyState } from './utils';
import { whitelistedHealthChecksRef } from './whitelisted-health-checks';
import './healthchecks.scss';
-const resiliencyProgressQuery =
- DATA_RESILIENCY_QUERY[StorageDashboardQuery.RESILIENCY_PROGRESS];
+const resiliencyProgressQuery = (managedByOCS: string) =>
+ DATA_RESILIENCY_QUERY(managedByOCS)[
+ StorageDashboardQuery.RESILIENCY_PROGRESS
+ ];
const generateDocumentationLink = (alert: Alert): string => {
return `https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html-single/troubleshooting_openshift_data_foundation/index#${_.toLower(
@@ -60,6 +67,8 @@ const getDocumentationLink = (alert: Alert): string => {
export const CephAlerts: React.FC = () => {
const [alerts, loaded, error] = useAlerts();
+ // ToDo (epic 4422): Get StorageCluster name and namespace from the Alert object
+ // and filter Alerts based on that for a particular cluster.
const filteredAlerts =
loaded && !error && !_.isEmpty(alerts) ? filterCephAlerts(alerts) : [];
@@ -110,9 +119,8 @@ const CephHealthCheck: React.FC = ({
);
};
-export const cephClusterResource = {
+const cephClusterResource = {
kind: referenceForModel(CephClusterModel),
- namespaced: false,
isList: true,
};
@@ -121,16 +129,22 @@ export const StatusCard: React.FC = () => {
const [data, loaded, loadError] =
useK8sWatchResource(cephClusterResource);
+ const { namespace: clusterNs } = useParams();
+ const { systemFlags } = useODFSystemFlagsSelector();
+ const managedByOCS = systemFlags[clusterNs]?.ocsClusterName;
+
const [resiliencyProgress, resiliencyProgressError] = useCustomPrometheusPoll(
{
- query: resiliencyProgressQuery,
+ query: resiliencyProgressQuery(managedByOCS),
endpoint: 'api/v1/query' as any,
basePath: usePrometheusBasePath(),
}
);
+ const cephCluster = getCephClusterInNs(data, clusterNs);
+
const cephHealthState = getCephHealthState(
- { ceph: { data, loaded, loadError } },
+ { ceph: { data: cephCluster, loaded, loadError } },
t
);
const dataResiliencyState = getDataResiliencyState(
@@ -140,7 +154,7 @@ export const StatusCard: React.FC = () => {
const pattern = /[A-Z]+_*|error/g;
const healthChecks: CephHealthCheckType[] = [];
- const cephDetails = data?.[0]?.status?.ceph?.details;
+ const cephDetails = cephCluster?.status?.ceph?.details;
for (const key in cephDetails) {
if (pattern.test(key)) {
const healthCheckObject: CephHealthCheckType = {
@@ -185,7 +199,7 @@ export const StatusCard: React.FC = () => {
diff --git a/packages/ocs/dashboards/persistent-internal/status-card/utils.ts b/packages/ocs/dashboards/persistent-internal/status-card/utils.ts
index 31df14ed1..9718d0bca 100644
--- a/packages/ocs/dashboards/persistent-internal/status-card/utils.ts
+++ b/packages/ocs/dashboards/persistent-internal/status-card/utils.ts
@@ -1,52 +1,6 @@
import { getResiliencyProgress } from '@odf/shared/utils';
import { HealthState } from '@openshift-console/dynamic-plugin-sdk';
-import {
- PrometheusHealthHandler,
- ResourceHealthHandler,
- SubsystemHealth,
-} from '@openshift-console/dynamic-plugin-sdk/lib/extensions/dashboard-types';
-import { TFunction } from 'i18next';
-import { WatchCephResource } from '../../../utils';
-
-const CephHealthStatus = (status: string, t: TFunction): SubsystemHealth => {
- switch (status) {
- case 'HEALTH_OK':
- return {
- state: HealthState.OK,
- };
- case 'HEALTH_WARN':
- return {
- state: HealthState.WARNING,
- message: t('Warning'),
- };
- case 'HEALTH_ERR':
- return {
- state: HealthState.ERROR,
- message: t('Error'),
- };
- default:
- return { state: HealthState.UNKNOWN };
- }
-};
-
-export const getCephHealthState: ResourceHealthHandler = (
- { ceph },
- t
-) => {
- const { data, loaded, loadError } = ceph;
- const status = data?.[0]?.status?.ceph?.health;
-
- if (loadError) {
- return { state: HealthState.NOT_AVAILABLE };
- }
- if (!loaded) {
- return { state: HealthState.LOADING };
- }
- if (data.length === 0) {
- return { state: HealthState.NOT_AVAILABLE };
- }
- return CephHealthStatus(status, t);
-};
+import { PrometheusHealthHandler } from '@openshift-console/dynamic-plugin-sdk/lib/extensions/dashboard-types';
export const getDataResiliencyState: PrometheusHealthHandler = (
responses,
diff --git a/packages/ocs/dashboards/persistent-internal/storage-efficiency-card/storage-efficiency-card.tsx b/packages/ocs/dashboards/persistent-internal/storage-efficiency-card/storage-efficiency-card.tsx
index 3e53794cd..063af8492 100644
--- a/packages/ocs/dashboards/persistent-internal/storage-efficiency-card/storage-efficiency-card.tsx
+++ b/packages/ocs/dashboards/persistent-internal/storage-efficiency-card/storage-efficiency-card.tsx
@@ -1,4 +1,5 @@
import * as React from 'react';
+import { useODFSystemFlagsSelector } from '@odf/core/redux';
import { EfficiencyItemBody } from '@odf/shared/dashboards/storage-efficiency/storage-efficiency-card-item';
import {
useCustomPrometheusPoll,
@@ -6,19 +7,25 @@ import {
} from '@odf/shared/hooks/custom-prometheus-poll';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { getGaugeValue, humanizeBinaryBytes } from '@odf/shared/utils';
+import { useParams } from 'react-router-dom-v5-compat';
import { Card, CardBody, CardHeader, CardTitle } from '@patternfly/react-core';
import {
POOL_STORAGE_EFFICIENCY_QUERIES,
StorageDashboardQuery,
} from '../../../queries';
+import { ODFSystemParams } from '../../../types';
export const StorageEfficiencyContent: React.FC = () => {
const { t } = useCustomTranslation();
+ const { namespace: clusterNs } = useParams();
+ const { systemFlags } = useODFSystemFlagsSelector();
+ const managedByOCS = systemFlags[clusterNs]?.ocsClusterName;
+
const [poolCapacityRatioResult, poolCapacityRatioResultError] =
useCustomPrometheusPoll({
query:
- POOL_STORAGE_EFFICIENCY_QUERIES[
+ POOL_STORAGE_EFFICIENCY_QUERIES(managedByOCS)[
StorageDashboardQuery.POOL_CAPACITY_RATIO
],
endpoint: 'api/v1/query' as any,
@@ -27,7 +34,7 @@ export const StorageEfficiencyContent: React.FC = () => {
const [poolSavedResult, poolSavedResultError] = useCustomPrometheusPoll({
query:
- POOL_STORAGE_EFFICIENCY_QUERIES[
+ POOL_STORAGE_EFFICIENCY_QUERIES(managedByOCS)[
StorageDashboardQuery.POOL_SAVED_CAPACITY
],
endpoint: 'api/v1/query' as any,
diff --git a/packages/ocs/dashboards/persistent-internal/utilization-card/utilization-card.tsx b/packages/ocs/dashboards/persistent-internal/utilization-card/utilization-card.tsx
index e61acbde0..b1373ebd6 100644
--- a/packages/ocs/dashboards/persistent-internal/utilization-card/utilization-card.tsx
+++ b/packages/ocs/dashboards/persistent-internal/utilization-card/utilization-card.tsx
@@ -1,5 +1,6 @@
// TODO (@rexagod): https://github.com/openshift/console/pull/10470#discussion_r766453369
import * as React from 'react';
+import { useODFSystemFlagsSelector } from '@odf/core/redux';
import ConsumerPopover from '@odf/shared/dashboards/utilization-card/ConsumerPopover';
import { PrometheusMultilineUtilizationItem } from '@odf/shared/dashboards/utilization-card/prometheus-multi-utilization-item';
import { PrometheusUtilizationItem } from '@odf/shared/dashboards/utilization-card/prometheus-utilization-item';
@@ -11,6 +12,7 @@ import {
} from '@odf/shared/utils';
import { UtilizationDurationDropdown } from '@openshift-console/dynamic-plugin-sdk-internal';
import { ByteDataTypes } from '@openshift-console/dynamic-plugin-sdk/lib/api/internal-types';
+import { useParams } from 'react-router-dom-v5-compat';
import {
Card,
CardActions,
@@ -23,21 +25,29 @@ import {
utilizationPopoverQueryMap,
UTILIZATION_QUERY,
} from '../../../queries';
+import { ODFSystemParams } from '../../../types';
import { humanizeIOPS, humanizeLatency } from './utils';
import '@odf/shared/dashboards/utilization-card/utilization-card.scss';
export const UtilizationContent: React.FC = () => {
const { t } = useCustomTranslation();
+
+ const { namespace: clusterNs } = useParams();
+ const { systemFlags } = useODFSystemFlagsSelector();
+ // name of the created StorageClasses are prefix by StorageCluster name,
+ // it is also the value of the "managedBy" label in the metrics.
+ const ocsCluster = systemFlags[clusterNs]?.ocsClusterName;
+
const storagePopover = React.useCallback(
({ current }) => (
),
- [t]
+ [t, ocsCluster]
);
return (
@@ -45,7 +55,9 @@ export const UtilizationContent: React.FC = () => {
{
{
{
{
{
+export const useGetOCSHealth: UseGetOCSHealth = (systems) => {
const { t } = useCustomTranslation();
+
const [cephData, cephLoaded, cephLoadError] =
useK8sWatchResource(cephClusterResource);
const [cephObjData, cephObjLoaded, cephObjLoadError] = useK8sWatchResource<
@@ -65,78 +68,139 @@ export const useGetOCSHealth = () => {
basePath: usePrometheusBasePath(),
});
- const cephHealthState = getCephHealthState(
- { ceph: { data: cephData, loaded: cephLoaded, loadError: cephLoadError } },
- t
- ).state;
- const interimRGWState =
- !cephObjLoadError && cephObjLoaded
- ? getRGWHealthState(cephObjData[0]).state
- : NA;
-
- const interimMCGState = getNooBaaState(
- [
- {
- response: noobaaHealthStatus,
- error: noobaaQueryLoadError,
- },
- ],
+ return React.useMemo(() => {
+ let unifiedHealthStates: UnifiedHealthStates = {};
+ systems?.forEach((system: StorageSystemKind) => {
+ if (isOCSStorageSystem(system)) {
+ const systemName = getName(system);
+ const systemNamespace = getNamespace(system);
+
+ const cephCluster = cephData?.find(
+ (ceph) => getNamespace(ceph) === systemNamespace
+ );
+ const cephObjectStore = cephObjData?.find(
+ (cephObj) => getNamespace(cephObj) === systemNamespace
+ );
+ const noobaaCluster = noobaaData?.find(
+ (noobaa) => getNamespace(noobaa) === systemNamespace
+ );
+
+ const cephHealthState = getCephHealthState(
+ {
+ ceph: {
+ data: cephCluster,
+ loaded: cephLoaded,
+ loadError: cephLoadError,
+ },
+ },
+ t
+ ).state;
+
+ const interimRGWState =
+ !cephObjLoadError && cephObjLoaded
+ ? getRGWHealthState(cephObjectStore).state
+ : NA;
+
+ // there will only be single NooBaa instance (even for multiple StorageSystems)
+ // and its status should only be linked with the corresponding StorageSystem/StorageCluster.
+ const interimMCGState = !_.isEmpty(noobaaCluster)
+ ? getNooBaaState(
+ [
+ {
+ response: noobaaHealthStatus,
+ error: noobaaQueryLoadError,
+ },
+ ],
+ t,
+ {
+ loaded: noobaaLoaded,
+ loadError: noobaaLoadError,
+ data: noobaaData,
+ }
+ ).state
+ : NA;
+
+ const mcgState = AcceptableHealthStates.includes(interimMCGState)
+ ? HealthState.OK
+ : HealthState.ERROR;
+
+ const rgwState = AcceptableHealthStates.includes(interimRGWState)
+ ? HealthState.OK
+ : HealthState.ERROR;
+
+ const cephStorageHealthStatus = AcceptableHealthStates.includes(
+ cephHealthState
+ )
+ ? HealthState.OK
+ : HealthState.ERROR;
+
+ const unifiedObjectHealth =
+ mcgState === HealthState.ERROR || rgwState === HealthState.ERROR
+ ? HealthState.ERROR
+ : HealthState.OK;
+
+ let unifiedHealthState: UnifiedHealthState;
+ if (
+ unifiedObjectHealth === HealthState.ERROR &&
+ cephStorageHealthStatus === HealthState.ERROR
+ ) {
+ unifiedHealthState = {
+ rawHealthState: '2',
+ errorMessages: [
+ t('Block and File service is unhealthy'),
+ t('Object service is unhealthy'),
+ ],
+ };
+ } else if (unifiedObjectHealth === HealthState.ERROR) {
+ unifiedHealthState = {
+ rawHealthState: '1',
+ errorMessages: [t('Object service is unhealthy')],
+ errorComponent:
+ rgwState !== HealthState.OK ? 'block-file' : 'object',
+ };
+ } else if (cephStorageHealthStatus === HealthState.ERROR) {
+ unifiedHealthState = {
+ rawHealthState: '1',
+ errorMessages: [t('Block and File service is unhealthy')],
+ errorComponent: 'block-file',
+ };
+ } else {
+ unifiedHealthState = {
+ rawHealthState: '0',
+ };
+ }
+
+ unifiedHealthStates[`${systemName}${systemNamespace}`] =
+ unifiedHealthState;
+ }
+ });
+
+ return unifiedHealthStates;
+ }, [
+ systems,
+ cephData,
+ cephLoaded,
+ cephLoadError,
+ cephObjData,
+ cephObjLoaded,
+ cephObjLoadError,
+ noobaaData,
+ noobaaLoaded,
+ noobaaLoadError,
+ noobaaHealthStatus,
+ noobaaQueryLoadError,
t,
- {
- loaded: noobaaLoaded,
- loadError: noobaaLoadError,
- data: noobaaData,
- }
- ).state;
-
- const mcgState = AcceptableHealthStates.includes(interimMCGState)
- ? HealthState.OK
- : HealthState.ERROR;
-
- const rgwState = AcceptableHealthStates.includes(interimRGWState)
- ? HealthState.OK
- : HealthState.ERROR;
-
- const cephStorageHealthStatus = AcceptableHealthStates.includes(
- cephHealthState
- )
- ? HealthState.OK
- : HealthState.ERROR;
-
- const unifiedObjectHealth =
- mcgState === HealthState.ERROR || rgwState === HealthState.ERROR
- ? HealthState.ERROR
- : HealthState.OK;
-
- const unifiedHealthState = React.useMemo(() => {
- if (
- unifiedObjectHealth === HealthState.ERROR &&
- cephStorageHealthStatus === HealthState.ERROR
- ) {
- return {
- rawHealthState: '2',
- errorMessages: [
- t('Block and File service is unhealthy'),
- t('Object service is unhealthy'),
- ],
- };
- } else if (unifiedObjectHealth === HealthState.ERROR) {
- return {
- rawHealthState: '1',
- errorMessages: [t('Object service is unhealthy')],
- errorComponent: rgwState !== HealthState.OK ? 'block-file' : 'object',
- };
- } else if (cephStorageHealthStatus === HealthState.ERROR) {
- return {
- rawHealthState: '1',
- errorMessages: [t('Block and File service is unhealthy')],
- errorComponent: 'block-file',
- };
- }
- return {
- rawHealthState: '0',
- };
- }, [unifiedObjectHealth, cephStorageHealthStatus, rgwState, t]);
-
- return unifiedHealthState;
+ ]);
+};
+
+type UnifiedHealthState = {
+ rawHealthState: string;
+ errorMessages?: string[];
+ errorComponent?: string;
+};
+
+type UnifiedHealthStates = {
+ [systemNameAndNamespace: string]: UnifiedHealthState;
};
+
+type UseGetOCSHealth = (systems: StorageSystemKind[]) => UnifiedHealthStates;
diff --git a/packages/ocs/modals/block-pool/create-block-pool-modal.tsx b/packages/ocs/modals/block-pool/create-block-pool-modal.tsx
index 627623274..073e0c1ef 100644
--- a/packages/ocs/modals/block-pool/create-block-pool-modal.tsx
+++ b/packages/ocs/modals/block-pool/create-block-pool-modal.tsx
@@ -1,7 +1,4 @@
import * as React from 'react';
-import { useSafeK8sWatchResource } from '@odf/core/hooks';
-import { useODFNamespaceSelector } from '@odf/core/redux';
-import { K8sResourceObj } from '@odf/core/types';
import { ONE_SECOND } from '@odf/shared/constants';
import { ModalTitle, ModalFooter } from '@odf/shared/generic/ModalTitle';
import {
@@ -9,10 +6,18 @@ import {
withHandlePromise,
} from '@odf/shared/generic/promise-component';
import { ModalBody } from '@odf/shared/modals/Modal';
+import { getNamespace } from '@odf/shared/selectors';
import { CephClusterKind } from '@odf/shared/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
-import { referenceForModel } from '@odf/shared/utils';
-import { k8sCreate } from '@openshift-console/dynamic-plugin-sdk';
+import {
+ referenceForModel,
+ getValidWatchK8sResourceObj,
+} from '@odf/shared/utils';
+import {
+ k8sCreate,
+ useK8sWatchResource,
+ WatchK8sResource,
+} from '@openshift-console/dynamic-plugin-sdk';
import { ModalComponent } from '@openshift-console/dynamic-plugin-sdk/lib/app/modal-support/ModalProvider';
import { Modal } from '@patternfly/react-core';
import { BlockPoolStatus, BlockPoolBody } from '../../block-pool/body';
@@ -28,12 +33,20 @@ import { StoragePoolKind } from '../../types';
import { BlockPoolModalFooter, FooterPrimaryActions } from './modal-footer';
import './create-block-pool-modal.scss';
+const poolResource = (poolName: string, ns: string): WatchK8sResource => ({
+ kind: referenceForModel(CephBlockPoolModel),
+ namespaced: true,
+ isList: false,
+ name: poolName,
+ namespace: ns,
+});
+
export const CreateBlockPoolModal = withHandlePromise(
(props: CreateBlockPoolModalProps) => {
- const { cephClusters, onPoolCreation, handlePromise, errorMessage } = props;
+ const { cephCluster, onPoolCreation, handlePromise, errorMessage } = props;
const { t } = useCustomTranslation();
- const { odfNamespace } = useODFNamespaceSelector();
+ const poolNs = getNamespace(cephCluster);
const [state, dispatch] = React.useReducer(
blockPoolReducer,
@@ -42,25 +55,21 @@ export const CreateBlockPoolModal = withHandlePromise(
const [isSubmit, setIsSubmit] = React.useState(false);
const [timer, setTimer] = React.useState(null);
+ const poolName = state.poolName;
+
const MODAL_DESC = t(
'A BlockPool is a logical entity providing elastic capacity to applications and workloads. Pools provide a means of supporting policies for access data resilience and storage efficiency.'
);
const MODAL_TITLE = t('Create BlockPool');
// Watch newly created pool after submit
- const poolResource: K8sResourceObj = React.useCallback(
- (ns) => ({
- kind: referenceForModel(CephBlockPoolModel),
- namespaced: true,
- isList: false,
- name: state.poolName,
- namespace: ns,
- }),
- [state.poolName]
- );
-
const [newPool, newPoolLoaded, newPoolLoadError] =
- useSafeK8sWatchResource(poolResource);
+ useK8sWatchResource(
+ getValidWatchK8sResourceObj(
+ poolResource(poolName, poolNs),
+ isSubmit && !!poolName
+ )
+ );
React.useEffect(() => {
if (isSubmit) {
@@ -75,7 +84,7 @@ export const CreateBlockPoolModal = withHandlePromise(
});
setIsSubmit(false);
clearTimeout(timer);
- onPoolCreation(state.poolName);
+ onPoolCreation(poolName);
} else if (
newPoolLoaded &&
(newPool?.status?.phase === POOL_STATE.RECONCILE_FAILED ||
@@ -106,7 +115,7 @@ export const CreateBlockPoolModal = withHandlePromise(
newPoolLoadError,
newPoolLoaded,
onPoolCreation,
- state.poolName,
+ poolName,
timer,
]);
@@ -117,7 +126,7 @@ export const CreateBlockPoolModal = withHandlePromise(
type: BlockPoolActionType.SET_POOL_STATUS,
payload: POOL_PROGRESS.PROGRESS,
});
- const poolObj: StoragePoolKind = getPoolKindObj(state, odfNamespace);
+ const poolObj: StoragePoolKind = getPoolKindObj(state, poolNs);
handlePromise(
k8sCreate({ model: CephBlockPoolModel, data: poolObj }),
@@ -157,13 +166,13 @@ export const CreateBlockPoolModal = withHandlePromise(
) : (
void;
} & React.ComponentProps &
HandlePromiseProps;
diff --git a/packages/ocs/modals/block-pool/delete-block-pool-modal.tsx b/packages/ocs/modals/block-pool/delete-block-pool-modal.tsx
index f5d50c365..8481da223 100644
--- a/packages/ocs/modals/block-pool/delete-block-pool-modal.tsx
+++ b/packages/ocs/modals/block-pool/delete-block-pool-modal.tsx
@@ -1,7 +1,7 @@
import * as React from 'react';
+import { useODFSystemFlagsSelector } from '@odf/core/redux';
import { ModalFooter } from '@odf/shared/generic/ModalTitle';
import { StatusBox } from '@odf/shared/generic/status-box';
-import { useDeepCompareMemoize } from '@odf/shared/hooks/deep-compare-memoize';
import { useK8sGet } from '@odf/shared/hooks/k8s-get-hook';
import {
CommonModalProps,
@@ -12,18 +12,16 @@ import {
PersistentVolumeClaimModel,
StorageClassModel,
} from '@odf/shared/models';
+import { getName, getNamespace } from '@odf/shared/selectors';
import {
ListKind,
PersistentVolumeClaimKind,
StorageClassResourceKind,
- CephClusterKind,
} from '@odf/shared/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
-import { referenceForModel } from '@odf/shared/utils';
import {
k8sDelete,
K8sKind,
- useK8sWatchResource,
YellowExclamationTriangleIcon,
} from '@openshift-console/dynamic-plugin-sdk';
import { Trans } from 'react-i18next';
@@ -36,18 +34,12 @@ import {
blockPoolInitialState,
blockPoolReducer,
} from '../../block-pool/reducer';
-import { CEPH_EXTERNAL_CR_NAME, POOL_PROGRESS } from '../../constants';
-import { CephBlockPoolModel, CephClusterModel } from '../../models';
+import { POOL_PROGRESS } from '../../constants';
+import { CephBlockPoolModel } from '../../models';
import { StoragePoolKind } from '../../types';
import { getStorageClassName } from '../../utils/common';
import { BlockPoolModalFooter, FooterPrimaryActions } from './modal-footer';
-const cephClusterResource = {
- kind: referenceForModel(CephClusterModel),
- namespaced: false,
- isList: true,
-};
-
export const toList = (text: string[]): React.ReactNode => (
= (props) => {
isOpen,
closeModal,
} = props;
- const poolName = resource?.metadata.name;
+ const poolName = getName(resource);
+ const poolNamespace = getNamespace(resource);
const [state, dispatch] = React.useReducer(
blockPoolReducer,
@@ -77,22 +70,18 @@ const DeleteBlockPoolModal: React.FC
= (props) => {
const [scNames, setScNames] = React.useState();
const [inProgress, setProgress] = React.useState(false);
- const [cephClusters, isLoaded, loadError] =
- useK8sWatchResource(cephClusterResource);
const [scResources, scLoaded, scLoadError] =
useK8sGet>(StorageClassModel);
const [pvcResources, pvcLoaded, pvcLoadError] = useK8sGet<
ListKind
>(PersistentVolumeClaimModel);
- const cephCluster: CephClusterKind = useDeepCompareMemoize(
- cephClusters[0],
- true
- );
+
+ const { systemFlags } = useODFSystemFlagsSelector();
+ const isExternalSC = systemFlags[poolNamespace]?.isExternalMode;
React.useEffect(() => {
// restrict pool management for default pool and external cluster
- cephCluster?.metadata.name === CEPH_EXTERNAL_CR_NAME ||
- isDefaultPool(resource)
+ isExternalSC || isDefaultPool(resource)
? dispatch({
type: BlockPoolActionType.SET_POOL_STATUS,
payload: POOL_PROGRESS.NOTALLOWED,
@@ -101,7 +90,7 @@ const DeleteBlockPoolModal: React.FC = (props) => {
type: BlockPoolActionType.SET_POOL_NAME,
payload: poolName,
});
- }, [resource, cephCluster, isLoaded, loadError, poolName]);
+ }, [resource, isExternalSC, poolName]);
React.useEffect(() => {
if (
@@ -172,10 +161,7 @@ const DeleteBlockPoolModal: React.FC = (props) => {
variant={ModalVariant.small}
onClose={closeModal}
>
- {isLoaded &&
- pvcLoaded &&
- scLoaded &&
- !(loadError && pvcLoadError && scLoadError) ? (
+ {pvcLoaded && scLoaded && !(pvcLoadError && scLoadError) ? (
<>
{state.poolStatus === POOL_PROGRESS.NOTALLOWED ? (
@@ -221,8 +207,8 @@ const DeleteBlockPoolModal: React.FC = (props) => {
>
) : (
)}
diff --git a/packages/ocs/modals/block-pool/update-block-pool-modal.tsx b/packages/ocs/modals/block-pool/update-block-pool-modal.tsx
index c3803ef75..fd4d91a83 100644
--- a/packages/ocs/modals/block-pool/update-block-pool-modal.tsx
+++ b/packages/ocs/modals/block-pool/update-block-pool-modal.tsx
@@ -1,8 +1,10 @@
import * as React from 'react';
+import { useODFSystemFlagsSelector } from '@odf/core/redux';
+import { getResourceInNs as getCephClusterInNs } from '@odf/core/utils';
import { ModalFooter } from '@odf/shared/generic/ModalTitle';
import { StatusBox } from '@odf/shared/generic/status-box';
-import { useDeepCompareMemoize } from '@odf/shared/hooks/deep-compare-memoize';
import { CommonModalProps, ModalBody } from '@odf/shared/modals/Modal';
+import { getNamespace } from '@odf/shared/selectors';
import { CephClusterKind } from '@odf/shared/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { referenceForModel } from '@odf/shared/utils';
@@ -18,11 +20,7 @@ import {
blockPoolInitialState,
blockPoolReducer,
} from '../../block-pool/reducer';
-import {
- CEPH_EXTERNAL_CR_NAME,
- COMPRESSION_ON,
- POOL_PROGRESS,
-} from '../../constants';
+import { COMPRESSION_ON, POOL_PROGRESS } from '../../constants';
import { CephBlockPoolModel, CephClusterModel } from '../../models';
import { StoragePoolKind } from '../../types';
import { isDefaultPool } from '../../utils';
@@ -35,7 +33,6 @@ type UpdateBlockPoolModalProps = CommonModalProps<{
const cephClusterResource = {
kind: referenceForModel(CephClusterModel),
- namespaced: false,
isList: true,
};
@@ -46,6 +43,7 @@ const UpdateBlockPoolModal: React.FC = (props) => {
closeModal,
isOpen,
} = props;
+ const poolNamespace = getNamespace(resource);
const [state, dispatch] = React.useReducer(
blockPoolReducer,
@@ -54,10 +52,12 @@ const UpdateBlockPoolModal: React.FC = (props) => {
const [inProgress, setProgress] = React.useState(false);
const [cephClusters, isLoaded, loadError] =
useK8sWatchResource(cephClusterResource);
- const cephCluster: CephClusterKind = useDeepCompareMemoize(
- cephClusters[0],
- true
- );
+
+ // only single cluster per Namespace
+ const cephCluster = getCephClusterInNs(
+ cephClusters,
+ poolNamespace
+ ) as CephClusterKind;
const MODAL_TITLE = t('Edit BlockPool');
const MODAL_DESC = t(
@@ -82,16 +82,18 @@ const UpdateBlockPoolModal: React.FC = (props) => {
[dispatch]
);
+ const { systemFlags } = useODFSystemFlagsSelector();
+ const isExternalSC = systemFlags[poolNamespace]?.isExternalMode;
+
React.useEffect(() => {
// restrict pool management for default pool and external cluster
- cephCluster?.metadata.name === CEPH_EXTERNAL_CR_NAME ||
- isDefaultPool(resource)
+ isExternalSC || isDefaultPool(resource)
? dispatch({
type: BlockPoolActionType.SET_POOL_STATUS,
payload: POOL_PROGRESS.NOTALLOWED,
})
: populateBlockPoolData(resource);
- }, [resource, cephCluster, populateBlockPoolData]);
+ }, [resource, isExternalSC, populateBlockPoolData]);
// Update block pool
const updatePool = () => {
diff --git a/packages/ocs/queries/ceph-storage.spec.ts b/packages/ocs/queries/ceph-storage.spec.ts
index ee995b486..f46e729f5 100644
--- a/packages/ocs/queries/ceph-storage.spec.ts
+++ b/packages/ocs/queries/ceph-storage.spec.ts
@@ -14,41 +14,47 @@ describe('tests for get breakdown metrics query', () => {
it('should return correct query for PVCS', () => {
const metricType = BreakdownCardFieldsWithParams.PVCS;
const namespace = 'example-namespace';
- const result = getBreakdownMetricsQuery(metricType, namespace);
+ const scPrefix = 'test-prefix';
+ const result = getBreakdownMetricsQuery(metricType, scPrefix, namespace);
expect(result).toEqual({
model: PersistentVolumeClaimModel,
metric: 'persistentvolumeclaim',
- queries: getPVCNamespaceQuery(namespace),
+ queries: getPVCNamespaceQuery(namespace, scPrefix),
});
});
it('should return correct query for other metric types', () => {
const metricType = BreakdownCardFields.PODS;
- const result = getBreakdownMetricsQuery(metricType);
+ const scPrefix = 'test-prefix';
+ const result = getBreakdownMetricsQuery(metricType, scPrefix);
- expect(result).toEqual(breakdownQueryMapCEPH[metricType]);
+ expect(result).toEqual(
+ breakdownQueryMapCEPH(scPrefix, scPrefix)[metricType]
+ );
});
});
describe('tests for get pvc namespace query method', () => {
it('should return queries with an empty namespace', () => {
const namespace = '';
- const result = getPVCNamespaceQuery(namespace);
+ const scPrefix = 'test-prefix';
+ const result = getPVCNamespaceQuery(namespace, scPrefix);
expect(result).toEqual({
- [StorageDashboardQuery.PVC_NAMESPACES_BY_USED]: `sum by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace='${namespace}'} * on (namespace, persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"}))`,
- [StorageDashboardQuery.PVC_NAMESPACES_TOTAL_USED]: `sum(sum by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace='${namespace}'} * on (namespace, persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"})))`,
+ [StorageDashboardQuery.PVC_NAMESPACES_BY_USED]: `sum by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace='${namespace}'} * on (namespace, persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(.*ceph.rook.io/block)",storageclass=~"${scPrefix}.*"}))`,
+ [StorageDashboardQuery.PVC_NAMESPACES_TOTAL_USED]: `sum(sum by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace='${namespace}'} * on (namespace, persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(.*ceph.rook.io/block)",storageclass=~"${scPrefix}.*"})))`,
});
});
it('should return queries with a non-empty namespace', () => {
const namespace = 'example-namespace';
- const result = getPVCNamespaceQuery(namespace);
+ const scPrefix = 'test-prefix';
+ const result = getPVCNamespaceQuery(namespace, scPrefix);
expect(result).toEqual({
- [StorageDashboardQuery.PVC_NAMESPACES_BY_USED]: `sum by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace='${namespace}'} * on (namespace, persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"}))`,
- [StorageDashboardQuery.PVC_NAMESPACES_TOTAL_USED]: `sum(sum by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace='${namespace}'} * on (namespace, persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"})))`,
+ [StorageDashboardQuery.PVC_NAMESPACES_BY_USED]: `sum by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace='${namespace}'} * on (namespace, persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(.*ceph.rook.io/block)",storageclass=~"${scPrefix}.*"}))`,
+ [StorageDashboardQuery.PVC_NAMESPACES_TOTAL_USED]: `sum(sum by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace='${namespace}'} * on (namespace, persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(.*ceph.rook.io/block)",storageclass=~"${scPrefix}.*"})))`,
});
});
});
diff --git a/packages/ocs/queries/ceph-storage.ts b/packages/ocs/queries/ceph-storage.ts
index f3a466585..da7c39c84 100644
--- a/packages/ocs/queries/ceph-storage.ts
+++ b/packages/ocs/queries/ceph-storage.ts
@@ -50,45 +50,48 @@ export enum StorageDashboardQuery {
POOL_UTILIZATION_THROUGHPUT_QUERY = 'POOL_UTILIZATION_THROUGHPUT_QUERY',
}
-export const DATA_RESILIENCY_QUERY = {
- [StorageDashboardQuery.RESILIENCY_PROGRESS]:
- '(ceph_pg_clean and ceph_pg_active)/ceph_pg_total',
-};
+// ToDo (epic 4422): This should work (for now) as "managedBy" will be unique,
+// but moving forward add a label to metric for CephCluster namespace and use that instead (update query).
+export const DATA_RESILIENCY_QUERY = (managedByOCS: string) => ({
+ [StorageDashboardQuery.RESILIENCY_PROGRESS]: `(ceph_pg_clean{managedBy="${managedByOCS}"} and ceph_pg_active{managedBy="${managedByOCS}"})/ceph_pg_total{managedBy="${managedByOCS}"}`,
+});
-export const CEPH_CAPACITY_BREAKDOWN_QUERIES = {
- [StorageDashboardQuery.PROJECTS_TOTAL_USED]:
- 'sum(sum(topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"})) by (namespace))',
- [StorageDashboardQuery.PROJECTS_BY_USED]:
- 'sum(topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"})) by (namespace)',
- [StorageDashboardQuery.STORAGE_CLASSES_TOTAL_USED]:
- 'sum(sum(topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"})) by (storageclass, provisioner))',
- [StorageDashboardQuery.STORAGE_CLASSES_BY_USED]:
- 'sum(topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"})) by (storageclass, provisioner)',
- [StorageDashboardQuery.PODS_TOTAL_USED]:
- 'sum (((max by(namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes)) * on (namespace,persistentvolumeclaim) group_right() ((kube_running_pod_ready*0+1) * on(namespace, pod) group_right() kube_pod_spec_volumes_persistentvolumeclaims_info)) * on(namespace,persistentvolumeclaim) group_left(provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"}))',
- [StorageDashboardQuery.PODS_BY_USED]:
- 'sum by(namespace,pod) (((max by(namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes)) * on (namespace,persistentvolumeclaim) group_right() ((kube_running_pod_ready*0+1) * on(namespace, pod) group_right() kube_pod_spec_volumes_persistentvolumeclaims_info)) * on(namespace,persistentvolumeclaim) group_left(provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"}))',
- [StorageDashboardQuery.CEPH_CAPACITY_TOTAL]: 'ceph_cluster_total_bytes',
- [StorageDashboardQuery.CEPH_CAPACITY_USED]:
- 'sum(topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"}))',
- [StorageDashboardQuery.CEPH_CAPACITY_AVAILABLE]:
- 'max(ceph_pool_max_avail * on (pool_id) group_left(name)ceph_pool_metadata{name=~"(.*file.*)|(.*block.*)"})',
-};
+// ToDo (epic 4422): This should work (for now) as "managedBy" will be unique,
+// but moving forward add a label to metric for CephCluster namespace and use that instead (update query).
+export const CEPH_CAPACITY_BREAKDOWN_QUERIES = (
+ storageClassNamePrefix: string,
+ managedByOCS?: string
+) => ({
+ [StorageDashboardQuery.PROJECTS_TOTAL_USED]: `sum(sum(topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)",storageclass=~"${storageClassNamePrefix}.*"})) by (namespace))`,
+ [StorageDashboardQuery.PROJECTS_BY_USED]: `sum(topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)",storageclass=~"${storageClassNamePrefix}.*"})) by (namespace)`,
+ [StorageDashboardQuery.STORAGE_CLASSES_TOTAL_USED]: `sum(sum(topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)",storageclass=~"${storageClassNamePrefix}.*"})) by (storageclass, provisioner))`,
+ [StorageDashboardQuery.STORAGE_CLASSES_BY_USED]: `sum(topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)",storageclass=~"${storageClassNamePrefix}.*"})) by (storageclass, provisioner)`,
+ [StorageDashboardQuery.PODS_TOTAL_USED]: `sum (((max by(namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes)) * on (namespace,persistentvolumeclaim) group_right() ((kube_running_pod_ready*0+1) * on(namespace, pod) group_right() kube_pod_spec_volumes_persistentvolumeclaims_info)) * on(namespace,persistentvolumeclaim) group_left(provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)",storageclass=~"${storageClassNamePrefix}.*"}))`,
+ [StorageDashboardQuery.PODS_BY_USED]: `sum by(namespace,pod) (((max by(namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes)) * on (namespace,persistentvolumeclaim) group_right() ((kube_running_pod_ready*0+1) * on(namespace, pod) group_right() kube_pod_spec_volumes_persistentvolumeclaims_info)) * on(namespace,persistentvolumeclaim) group_left(provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)",storageclass=~"${storageClassNamePrefix}.*"}))`,
+ [StorageDashboardQuery.CEPH_CAPACITY_TOTAL]: `ceph_cluster_total_bytes{managedBy="${managedByOCS}"}`,
+ [StorageDashboardQuery.CEPH_CAPACITY_USED]: `sum(topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)",storageclass=~"${storageClassNamePrefix}.*"}))`,
+ [StorageDashboardQuery.CEPH_CAPACITY_AVAILABLE]: `max(ceph_pool_max_avail{managedBy="${managedByOCS}"} * on (pool_id) group_left(name)ceph_pool_metadata{name=~"(.*file.*)|(.*block.*)",managedBy="${managedByOCS}"})`,
+});
-export const breakdownQueryMapCEPH: BreakdownCardQueryMap = {
+export const breakdownQueryMapCEPH = (
+ storageClassNamePrefix: string,
+ managedByOCS: string
+): BreakdownCardQueryMap => ({
[BreakdownCardFields.PROJECTS]: {
model: ProjectModel,
metric: 'namespace',
queries: {
[StorageDashboardQuery.PROJECTS_BY_USED]: `(topk(6,(${
- CEPH_CAPACITY_BREAKDOWN_QUERIES[StorageDashboardQuery.PROJECTS_BY_USED]
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix, managedByOCS)[
+ StorageDashboardQuery.PROJECTS_BY_USED
+ ]
})))`,
[StorageDashboardQuery.PROJECTS_TOTAL_USED]:
- CEPH_CAPACITY_BREAKDOWN_QUERIES[
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix, managedByOCS)[
StorageDashboardQuery.PROJECTS_TOTAL_USED
],
[StorageDashboardQuery.CEPH_CAPACITY_USED]:
- CEPH_CAPACITY_BREAKDOWN_QUERIES[
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix, managedByOCS)[
StorageDashboardQuery.CEPH_CAPACITY_USED
],
},
@@ -98,16 +101,16 @@ export const breakdownQueryMapCEPH: BreakdownCardQueryMap = {
metric: 'storageclass',
queries: {
[StorageDashboardQuery.STORAGE_CLASSES_BY_USED]: `(topk(6,(${
- CEPH_CAPACITY_BREAKDOWN_QUERIES[
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix, managedByOCS)[
StorageDashboardQuery.STORAGE_CLASSES_BY_USED
]
})))`,
[StorageDashboardQuery.STORAGE_CLASSES_TOTAL_USED]:
- CEPH_CAPACITY_BREAKDOWN_QUERIES[
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix, managedByOCS)[
StorageDashboardQuery.STORAGE_CLASSES_TOTAL_USED
],
[StorageDashboardQuery.CEPH_CAPACITY_USED]:
- CEPH_CAPACITY_BREAKDOWN_QUERIES[
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix, managedByOCS)[
StorageDashboardQuery.CEPH_CAPACITY_USED
],
},
@@ -117,92 +120,104 @@ export const breakdownQueryMapCEPH: BreakdownCardQueryMap = {
metric: 'pod',
queries: {
[StorageDashboardQuery.PODS_BY_USED]: `(topk(6,(${
- CEPH_CAPACITY_BREAKDOWN_QUERIES[StorageDashboardQuery.PODS_BY_USED]
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix, managedByOCS)[
+ StorageDashboardQuery.PODS_BY_USED
+ ]
})))`,
- [StorageDashboardQuery.PODS_TOTAL_USED]:
- CEPH_CAPACITY_BREAKDOWN_QUERIES[StorageDashboardQuery.PODS_TOTAL_USED],
+ [StorageDashboardQuery.PODS_TOTAL_USED]: CEPH_CAPACITY_BREAKDOWN_QUERIES(
+ storageClassNamePrefix,
+ managedByOCS
+ )[StorageDashboardQuery.PODS_TOTAL_USED],
[StorageDashboardQuery.CEPH_CAPACITY_USED]:
- CEPH_CAPACITY_BREAKDOWN_QUERIES[
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix, managedByOCS)[
StorageDashboardQuery.CEPH_CAPACITY_USED
],
},
},
-};
+});
export const getBreakdownMetricsQuery = (
metricType: BreakdownCardFieldsWithParams | BreakdownCardFields,
+ storageClassNamePrefix: string,
namespace?: string,
- isExternal?: boolean
+ isExternal?: boolean,
+ managedByOCS?: string
): BreakdownCardFieldsValues => {
if (metricType === BreakdownCardFieldsWithParams.PVCS) {
return {
model: PersistentVolumeClaimModel,
metric: 'persistentvolumeclaim',
- queries: getPVCNamespaceQuery(namespace),
+ queries: getPVCNamespaceQuery(namespace, storageClassNamePrefix),
};
}
return !isExternal
- ? breakdownQueryMapCEPH[metricType]
- : breakdownIndependentQueryMap[metricType];
+ ? breakdownQueryMapCEPH(storageClassNamePrefix, managedByOCS)[metricType]
+ : breakdownIndependentQueryMap(storageClassNamePrefix)[metricType];
};
-export const CAPACITY_INFO_QUERIES = {
- [StorageDashboardQuery.RAW_CAPACITY_TOTAL]: 'ceph_cluster_total_bytes',
- [StorageDashboardQuery.RAW_CAPACITY_USED]:
- 'ceph_cluster_total_used_raw_bytes',
-};
+// ToDo (epic 4422): This should work (for now) as "managedBy" will be unique,
+// but moving forward add a label to metric for CephCluster namespace and use that instead (update query).
+export const CAPACITY_INFO_QUERIES = (managedByOCS: string) => ({
+ [StorageDashboardQuery.RAW_CAPACITY_TOTAL]: `ceph_cluster_total_bytes{managedBy="${managedByOCS}"}`,
+ [StorageDashboardQuery.RAW_CAPACITY_USED]: `ceph_cluster_total_used_raw_bytes{managedBy="${managedByOCS}"}`,
+});
-export const POOL_STORAGE_EFFICIENCY_QUERIES = {
- [StorageDashboardQuery.POOL_CAPACITY_RATIO]:
- 'sum(ceph_bluestore_bluestore_compressed_original) / clamp_min(sum(ceph_bluestore_bluestore_compressed_allocated),1)',
- [StorageDashboardQuery.POOL_SAVED_CAPACITY]:
- '(sum(ceph_bluestore_bluestore_compressed_original) - sum(ceph_bluestore_bluestore_compressed_allocated))',
-};
+// ToDo (epic 4422): This should work (for now) as "managedBy" will be unique,
+// but moving forward add a label to metric for CephCluster namespace and use that instead (update query).
+export const POOL_STORAGE_EFFICIENCY_QUERIES = (managedByOCS: string) => ({
+ [StorageDashboardQuery.POOL_CAPACITY_RATIO]: `sum(ceph_bluestore_bluestore_compressed_original{managedBy="${managedByOCS}"}) / clamp_min(sum(ceph_bluestore_bluestore_compressed_allocated{managedBy="${managedByOCS}"}),1)`,
+ [StorageDashboardQuery.POOL_SAVED_CAPACITY]: `(sum(ceph_bluestore_bluestore_compressed_original{managedBy="${managedByOCS}"}) - sum(ceph_bluestore_bluestore_compressed_allocated{managedBy="${managedByOCS}"}))`,
+});
-export const UTILIZATION_QUERY = {
- [StorageDashboardQuery.CEPH_CAPACITY_USED]:
- 'sum(topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"}))',
+// ToDo (epic 4422): This should work (for now) as "managedBy" will be unique,
+// but moving forward add a label to metric for CephCluster namespace and use that instead (update query).
+export const UTILIZATION_QUERY = (
+ managedByOCS: string,
+ storageClassNamePrefix?: string
+) => ({
+ [StorageDashboardQuery.CEPH_CAPACITY_USED]: `sum(topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)",storageclass=~"${storageClassNamePrefix}.*"}))`,
[StorageDashboardQuery.UTILIZATION_IOPS_READ_QUERY]: {
- query: 'sum(rate(ceph_pool_rd[1m]))',
+ query: `sum(rate(ceph_pool_rd{managedBy="${managedByOCS}"}[1m]))`,
desc: 'Reads',
},
[StorageDashboardQuery.UTILIZATION_IOPS_WRITE_QUERY]: {
- query: 'sum(rate(ceph_pool_wr[1m]))',
+ query: `sum(rate(ceph_pool_wr{managedBy="${managedByOCS}"}[1m]))`,
desc: 'Writes',
},
[StorageDashboardQuery.UTILIZATION_LATENCY_READ_QUERY]: {
- query: 'cluster:ceph_disk_latency_read:join_ceph_node_disk_rate1m',
+ query: `cluster:ceph_disk_latency_read:join_ceph_node_disk_rate1m{managedBy="${managedByOCS}"}`,
desc: 'Reads',
},
[StorageDashboardQuery.UTILIZATION_LATENCY_WRITE_QUERY]: {
- query: 'cluster:ceph_disk_latency_write:join_ceph_node_disk_rate1m',
+ query: `cluster:ceph_disk_latency_write:join_ceph_node_disk_rate1m{managedBy="${managedByOCS}"}`,
desc: 'Writes',
},
[StorageDashboardQuery.UTILIZATION_THROUGHPUT_READ_QUERY]: {
- query: 'sum(rate(ceph_pool_rd_bytes[1m]))',
+ query: `sum(rate(ceph_pool_rd_bytes{managedBy="${managedByOCS}"}[1m]))`,
desc: 'Reads',
},
[StorageDashboardQuery.UTILIZATION_THROUGHPUT_WRITE_QUERY]: {
- query: 'sum(rate(ceph_pool_wr_bytes[1m]))',
+ query: `sum(rate(ceph_pool_wr_bytes{managedBy="${managedByOCS}"}[1m]))`,
desc: 'Writes',
},
- [StorageDashboardQuery.UTILIZATION_RECOVERY_RATE_QUERY]:
- '(sum(ceph_pool_recovering_bytes_per_sec))',
-};
+ [StorageDashboardQuery.UTILIZATION_RECOVERY_RATE_QUERY]: `(sum(ceph_pool_recovering_bytes_per_sec{managedBy="${managedByOCS}"}))`,
+});
-export const utilizationPopoverQueryMap = [
+export const utilizationPopoverQueryMap = (storageClassNamePrefix: string) => [
{
model: ProjectModel,
metric: 'namespace',
query: `(sort_desc(topk(25,(${
- CEPH_CAPACITY_BREAKDOWN_QUERIES[StorageDashboardQuery.PROJECTS_BY_USED]
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix)[
+ StorageDashboardQuery.PROJECTS_BY_USED
+ ]
}))))`,
},
{
model: StorageClassModel,
metric: 'storageclass',
query: `(sort_desc(topk(25,(${
- CEPH_CAPACITY_BREAKDOWN_QUERIES[
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix)[
StorageDashboardQuery.STORAGE_CLASSES_BY_USED
]
}))))`,
@@ -211,53 +226,65 @@ export const utilizationPopoverQueryMap = [
model: PodModel,
metric: 'pod',
query: `(sort_desc(topk(25, (${
- CEPH_CAPACITY_BREAKDOWN_QUERIES[StorageDashboardQuery.PODS_BY_USED]
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix)[
+ StorageDashboardQuery.PODS_BY_USED
+ ]
}))))`,
},
];
-export const getPVCNamespaceQuery = (namespace: string = '') => {
+export const getPVCNamespaceQuery = (
+ namespace: string = '',
+ storageClassNamePrefix: string
+) => {
const queries = {
- [StorageDashboardQuery.PVC_NAMESPACES_BY_USED]: `sum by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace='${namespace}'} * on (namespace, persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"}))`,
- [StorageDashboardQuery.PVC_NAMESPACES_TOTAL_USED]: `sum(sum by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace='${namespace}'} * on (namespace, persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"})))`,
+ [StorageDashboardQuery.PVC_NAMESPACES_BY_USED]: `sum by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace='${namespace}'} * on (namespace, persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(.*ceph.rook.io/block)",storageclass=~"${storageClassNamePrefix}.*"}))`,
+ [StorageDashboardQuery.PVC_NAMESPACES_TOTAL_USED]: `sum(sum by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace='${namespace}'} * on (namespace, persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(.*ceph.rook.io/block)",storageclass=~"${storageClassNamePrefix}.*"})))`,
};
return queries;
};
+// ToDo (epic 4422): This should work (for now) as "managedBy" will be unique,
+// but moving forward add a label to metric for CephCluster namespace and use that instead (update query).
export const getPoolQuery = (
poolNames: string[],
- queryName: StorageDashboardQuery
+ queryName: StorageDashboardQuery,
+ managedByOCS: string
) => {
const names = poolNames.join('|');
const queries = {
- [StorageDashboardQuery.POOL_RAW_CAPACITY_USED]: `ceph_pool_bytes_used * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}'}`,
- [StorageDashboardQuery.POOL_MAX_CAPACITY_AVAILABLE]: `ceph_pool_max_avail * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}'}`,
- [StorageDashboardQuery.POOL_UTILIZATION_IOPS_QUERY]: `(rate(ceph_pool_wr[1m]) + rate(ceph_pool_rd[1m])) * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}'}`,
- [StorageDashboardQuery.POOL_UTILIZATION_THROUGHPUT_QUERY]: `(rate(ceph_pool_wr_bytes[1m]) + rate(ceph_pool_rd_bytes[1m])) * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}'}`,
- [StorageDashboardQuery.POOL_COMPRESSION_SAVINGS]: `(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used) * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}'}`,
- [StorageDashboardQuery.POOL_COMPRESSION_ELIGIBILITY]: `(((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100) * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}'}`,
- [StorageDashboardQuery.POOL_COMPRESSION_RATIO]: `((ceph_pool_compress_under_bytes / ceph_pool_compress_bytes_used > 0) and on(pool_id) (((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100 > 0.5)) * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}'}`,
+ [StorageDashboardQuery.POOL_RAW_CAPACITY_USED]: `ceph_pool_bytes_used{managedBy='${managedByOCS}'} * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}',managedBy='${managedByOCS}'}`,
+ [StorageDashboardQuery.POOL_MAX_CAPACITY_AVAILABLE]: `ceph_pool_max_avail{managedBy='${managedByOCS}'} * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}',managedBy='${managedByOCS}'}`,
+ [StorageDashboardQuery.POOL_UTILIZATION_IOPS_QUERY]: `(rate(ceph_pool_wr{managedBy='${managedByOCS}'}[1m]) + rate(ceph_pool_rd{managedBy='${managedByOCS}'}[1m])) * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}',managedBy='${managedByOCS}'}`,
+ [StorageDashboardQuery.POOL_UTILIZATION_THROUGHPUT_QUERY]: `(rate(ceph_pool_wr_bytes{managedBy='${managedByOCS}'}[1m]) + rate(ceph_pool_rd_bytes{managedBy='${managedByOCS}'}[1m])) * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}',managedBy='${managedByOCS}'}`,
+ [StorageDashboardQuery.POOL_COMPRESSION_SAVINGS]: `(ceph_pool_compress_under_bytes{managedBy='${managedByOCS}'} - ceph_pool_compress_bytes_used{managedBy='${managedByOCS}'}) * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}',managedBy='${managedByOCS}'}`,
+ [StorageDashboardQuery.POOL_COMPRESSION_ELIGIBILITY]: `(((ceph_pool_compress_under_bytes{managedBy='${managedByOCS}'} > 0) / ceph_pool_stored_raw{managedBy='${managedByOCS}'}) * 100) * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}',managedBy='${managedByOCS}'}`,
+ [StorageDashboardQuery.POOL_COMPRESSION_RATIO]: `((ceph_pool_compress_under_bytes{managedBy='${managedByOCS}'} / ceph_pool_compress_bytes_used{managedBy='${managedByOCS}'} > 0) and on(pool_id) (((ceph_pool_compress_under_bytes{managedBy='${managedByOCS}'} > 0) / ceph_pool_stored_raw{managedBy='${managedByOCS}'}) * 100 > 0.5)) * on (pool_id) group_left(name)ceph_pool_metadata{name=~'${names}',managedBy='${managedByOCS}'}`,
};
return queries[queryName];
};
-export const INDEPENDENT_UTILIZATION_QUERIES = {
- [StorageDashboardQuery.REQUESTED_CAPACITY]:
- 'sum((kube_persistentvolumeclaim_resource_requests_storage_bytes * on (namespace,persistentvolumeclaim) group_right() kube_pod_spec_volumes_persistentvolumeclaims_info) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"}))',
- [StorageDashboardQuery.USED_CAPACITY]:
- 'sum((topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_right() kube_pod_spec_volumes_persistentvolumeclaims_info) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"}))',
-};
+export const INDEPENDENT_UTILIZATION_QUERIES = (
+ storageClassNamePrefix: string
+) => ({
+ [StorageDashboardQuery.REQUESTED_CAPACITY]: `sum((kube_persistentvolumeclaim_resource_requests_storage_bytes * on (namespace,persistentvolumeclaim) group_right() kube_pod_spec_volumes_persistentvolumeclaims_info) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)",storageclass=~"${storageClassNamePrefix}.*"}))`,
+ [StorageDashboardQuery.USED_CAPACITY]: `sum((topk by (namespace,persistentvolumeclaim) (1, kubelet_volume_stats_used_bytes) * on (namespace,persistentvolumeclaim) group_right() kube_pod_spec_volumes_persistentvolumeclaims_info) * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)",storageclass=~"${storageClassNamePrefix}.*"}))`,
+});
-export const breakdownIndependentQueryMap: BreakdownCardQueryMap = {
+export const breakdownIndependentQueryMap = (
+ storageClassNamePrefix: string
+): BreakdownCardQueryMap => ({
[BreakdownCardFields.PROJECTS]: {
model: ProjectModel,
metric: 'namespace',
queries: {
[StorageDashboardQuery.PROJECTS_BY_USED]: `(topk(6,(${
- CEPH_CAPACITY_BREAKDOWN_QUERIES[StorageDashboardQuery.PROJECTS_BY_USED]
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix)[
+ StorageDashboardQuery.PROJECTS_BY_USED
+ ]
})))`,
[StorageDashboardQuery.PROJECTS_TOTAL_USED]:
- CEPH_CAPACITY_BREAKDOWN_QUERIES[
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix)[
StorageDashboardQuery.PROJECTS_TOTAL_USED
],
},
@@ -267,12 +294,12 @@ export const breakdownIndependentQueryMap: BreakdownCardQueryMap = {
metric: 'storageclass',
queries: {
[StorageDashboardQuery.STORAGE_CLASSES_BY_USED]: `(topk(6,(${
- CEPH_CAPACITY_BREAKDOWN_QUERIES[
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix)[
StorageDashboardQuery.STORAGE_CLASSES_BY_USED
]
})))`,
[StorageDashboardQuery.STORAGE_CLASSES_TOTAL_USED]:
- CEPH_CAPACITY_BREAKDOWN_QUERIES[
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix)[
StorageDashboardQuery.STORAGE_CLASSES_TOTAL_USED
],
},
@@ -282,10 +309,13 @@ export const breakdownIndependentQueryMap: BreakdownCardQueryMap = {
metric: 'pod',
queries: {
[StorageDashboardQuery.PODS_BY_USED]: `(topk(6,(${
- CEPH_CAPACITY_BREAKDOWN_QUERIES[StorageDashboardQuery.PODS_BY_USED]
+ CEPH_CAPACITY_BREAKDOWN_QUERIES(storageClassNamePrefix)[
+ StorageDashboardQuery.PODS_BY_USED
+ ]
})))`,
- [StorageDashboardQuery.PODS_TOTAL_USED]:
- CEPH_CAPACITY_BREAKDOWN_QUERIES[StorageDashboardQuery.PODS_TOTAL_USED],
+ [StorageDashboardQuery.PODS_TOTAL_USED]: CEPH_CAPACITY_BREAKDOWN_QUERIES(
+ storageClassNamePrefix
+ )[StorageDashboardQuery.PODS_TOTAL_USED],
},
},
-};
+});
diff --git a/packages/ocs/queries/object-storage.ts b/packages/ocs/queries/object-storage.ts
index 3f1aaa43a..f7a6369cf 100644
--- a/packages/ocs/queries/object-storage.ts
+++ b/packages/ocs/queries/object-storage.ts
@@ -37,22 +37,28 @@ export enum ObjectServiceDashboardQuery {
RGW_REBUILD_PROGRESS_QUERY = 'RGW_REBUILD_PROGRESS_QUERY',
}
+// ToDo (epic 4422): This should work (for now) as "managedBy" will be unique,
+// but moving forward add a label to metric for CephCluster namespace and use that instead (update query).
export const dataResiliencyQueryMap = {
[ObjectServiceDashboardQuery.MCG_REBUILD_PROGRESS_QUERY]:
'NooBaa_rebuild_progress/100',
[ObjectServiceDashboardQuery.MCG_REBUILD_TIME_QUERY]: 'NooBaa_rebuild_time',
[ObjectServiceDashboardQuery.RGW_REBUILD_PROGRESS_QUERY]: (
- rgwPrefix: string = ''
+ rgwPrefix: string = '',
+ managedByOCS: string
) =>
_.template(
- 'sum(ceph_pool_metadata{name=~"<%= name %>"}*on (job, namespace, pool_id) group_right(name) (ceph_pg_active and ceph_pg_clean)) / sum(ceph_pool_metadata{name=~"<%= name %>"} *on (job, namespace, pool_id) group_right(name) ceph_pg_total)'
+ 'sum(ceph_pool_metadata{name=~"<%= name %>",managedBy="<%= managedByOCS %>"}*on (job, namespace, pool_id) group_right(name) (ceph_pg_active{managedBy="<%= managedByOCS %>"} and ceph_pg_clean{managedBy="<%= managedByOCS %>"})) / sum(ceph_pool_metadata{name=~"<%= name %>",managedBy="<%= managedByOCS %>"} *on (job, namespace, pool_id) group_right(name) ceph_pg_total{managedBy="<%= managedByOCS %>"})'
)({
name: rgwPrefix
? `${rgwPrefix}.rgw.*`
- : '(ocs-storagecluster-cephblockpool)|(ocs-storagecluster-cephfilesystem-data0)',
+ : `(${managedByOCS}-cephblockpool)|(${managedByOCS}-cephfilesystem-data0)`,
+ managedByOCS: managedByOCS,
}),
};
+// ToDo (epic 4422): This should work (for now) as "managedBy" will be unique,
+// but moving forward add a label to metric for CephCluster namespace and use that instead (update query).
export const MCG_CAPACITY_BREAKDOWN_QUERIES = {
[ObjectServiceDashboardQuery.PROJECTS_BY_USED]:
'NooBaa_projects_capacity_usage',
@@ -60,17 +66,22 @@ export const MCG_CAPACITY_BREAKDOWN_QUERIES = {
'NooBaa_bucket_class_capacity_usage',
[ObjectServiceDashboardQuery.NOOBAA_TOTAL_USED]:
'sum(NooBaa_providers_physical_size)',
- [ObjectServiceDashboardQuery.RGW_TOTAL_USED]: (rgwPrefix: string = '') =>
+ [ObjectServiceDashboardQuery.RGW_TOTAL_USED]: (
+ rgwPrefix: string = '',
+ managedByOCS: string
+ ) =>
_.template(
- 'sum(ceph_pool_metadata{name=~"<%= name %>rgw.buckets.data"} *on (job, namespace, pool_id) group_right(name) ceph_pool_stored) - max(NooBaa_providers_physical_size{type="S3_COMPATIBLE"} or vector(0))'
- )({ name: rgwPrefix ? `${rgwPrefix}.` : '.*' }),
+ 'sum(ceph_pool_metadata{name=~"<%= name %>rgw.buckets.data",managedBy="<%= managedByOCS %>"} *on (job, namespace, pool_id) group_right(name) ceph_pool_stored{managedBy="<%= managedByOCS %>"}) - max(NooBaa_providers_physical_size{type="S3_COMPATIBLE"} or vector(0))'
+ )({ name: rgwPrefix ? `${rgwPrefix}.` : '.*', managedByOCS: managedByOCS }),
[ObjectServiceDashboardQuery.OBJECT_STORAGE_TOTAL_USED]: (
- rgwPrefix: string = ''
+ rgwPrefix: string = '',
+ managedByOCS: string
) =>
_.template(
- 'sum(ceph_pool_metadata{name=~"<%= name %>rgw.buckets.data"} *on (job, namespace, pool_id) group_right(name) ceph_pool_stored) + max(sum(NooBaa_providers_physical_size{type!="S3_COMPATIBLE"}) or vector(0))'
+ 'sum(ceph_pool_metadata{name=~"<%= name %>rgw.buckets.data",managedBy="<%= managedByOCS %>"} *on (job, namespace, pool_id) group_right(name) ceph_pool_stored{managedBy="<%= managedByOCS %>"}) + max(sum(NooBaa_providers_physical_size{type!="S3_COMPATIBLE"}) or vector(0))'
)({
name: rgwPrefix ? `${rgwPrefix}.` : '.*',
+ managedByOCS: managedByOCS,
}),
};
@@ -79,11 +90,11 @@ export const breakdownQueryMapMCG = {
[CapacityBreakdown.Metrics.TOTAL]: {
model: null,
metric: '',
- queries: (rgwPrefix: string = '') => ({
+ queries: (rgwPrefix: string = '', managedByOCS: string) => ({
[ObjectServiceDashboardQuery.RGW_TOTAL_USED]: (() =>
MCG_CAPACITY_BREAKDOWN_QUERIES[
ObjectServiceDashboardQuery.RGW_TOTAL_USED
- ](rgwPrefix))(),
+ ](rgwPrefix, managedByOCS))(),
[ObjectServiceDashboardQuery.NOOBAA_TOTAL_USED]:
MCG_CAPACITY_BREAKDOWN_QUERIES[
ObjectServiceDashboardQuery.NOOBAA_TOTAL_USED
@@ -91,7 +102,7 @@ export const breakdownQueryMapMCG = {
[ObjectServiceDashboardQuery.OBJECT_STORAGE_TOTAL_USED]: (() =>
MCG_CAPACITY_BREAKDOWN_QUERIES[
ObjectServiceDashboardQuery.OBJECT_STORAGE_TOTAL_USED
- ](rgwPrefix))(),
+ ](rgwPrefix, managedByOCS))(),
}),
},
},
@@ -149,20 +160,22 @@ export const breakdownQueryMapMCG = {
[CapacityBreakdown.Metrics.TOTAL]: {
model: null,
metric: '',
- queries: (rgwPrefix: string = '') => ({
+ queries: (rgwPrefix: string = '', managedByOCS: string) => ({
[ObjectServiceDashboardQuery.RGW_TOTAL_USED]: (() =>
MCG_CAPACITY_BREAKDOWN_QUERIES[
ObjectServiceDashboardQuery.RGW_TOTAL_USED
- ](rgwPrefix))(),
+ ](rgwPrefix, managedByOCS))(),
[ObjectServiceDashboardQuery.RGW_USED]: (() =>
MCG_CAPACITY_BREAKDOWN_QUERIES[
ObjectServiceDashboardQuery.RGW_TOTAL_USED
- ](rgwPrefix))(),
+ ](rgwPrefix, managedByOCS))(),
}),
},
},
};
+// ToDo (epic 4422): This should work (for now) as "managedBy" will be unique,
+// but moving forward add a label to metric for CephCluster namespace and use that instead (update query).
export const DATA_CONSUMPTION_QUERIES = {
[ServiceType.MCG]: {
[Breakdown.ACCOUNTS]: {
@@ -196,18 +209,16 @@ export const DATA_CONSUMPTION_QUERIES = {
},
},
},
- [ServiceType.RGW]: {
+ [ServiceType.RGW]: (managedByOCS: string) => ({
[Metrics.LATENCY]: {
- latencyGet:
- 'avg(rate(ceph_rgw_get_initial_lat_sum[1m])) /avg(rate(ceph_rgw_get_initial_lat_count[1m])>0)',
- latencyPut:
- 'avg(rate(ceph_rgw_put_initial_lat_sum[1m])) /avg(rate(ceph_rgw_put_initial_lat_count[1m])>0)',
+ latencyGet: `avg(rate(ceph_rgw_get_initial_lat_sum{managedBy="${managedByOCS}"}[1m])) /avg(rate(ceph_rgw_get_initial_lat_count{managedBy="${managedByOCS}"}[1m])>0)`,
+ latencyPut: `avg(rate(ceph_rgw_put_initial_lat_sum{managedBy="${managedByOCS}"}[1m])) /avg(rate(ceph_rgw_put_initial_lat_count{managedBy="${managedByOCS}"}[1m])>0)`,
},
[Metrics.BANDWIDTH]: {
- bandwidthGet: 'sum(rate(ceph_rgw_get_b[1m]))',
- bandwidthPut: 'sum(rate(ceph_rgw_put_b[1m]))',
+ bandwidthGet: `sum(rate(ceph_rgw_get_b{managedBy="${managedByOCS}"}[1m]))`,
+ bandwidthPut: `sum(rate(ceph_rgw_put_b{managedBy="${managedByOCS}"}[1m]))`,
},
- },
+ }),
};
export enum ObjectStorageEfficiencyQueries {
diff --git a/packages/ocs/storage-class/sc-form.tsx b/packages/ocs/storage-class/sc-form.tsx
index 1c9084e44..dc28ff2bd 100644
--- a/packages/ocs/storage-class/sc-form.tsx
+++ b/packages/ocs/storage-class/sc-form.tsx
@@ -9,50 +9,55 @@ import {
createCsiKmsResources,
} from '@odf/core/components/kms-config/utils';
import {
- OCS_INTERNAL_CR_NAME,
- OCS_EXTERNAL_CR_NAME,
KMS_PROVIDER,
KMSConfigMapCSIName,
SupportedProviders,
DescriptionKey,
} from '@odf/core/constants';
-import { OCS_INDEPENDENT_FLAG } from '@odf/core/features';
import { useSafeK8sWatchResource } from '@odf/core/hooks';
-import { useODFNamespaceSelector } from '@odf/core/redux';
import {
- cephBlockPoolResource,
- cephClusterResource,
-} from '@odf/core/resources';
+ useODFNamespaceSelector,
+ useODFSystemFlagsSelector,
+} from '@odf/core/redux';
+import { cephClusterResource } from '@odf/core/resources';
import {
ProviderNames,
KmsCsiConfigKeysMapping,
KMSConfigMap,
K8sResourceObj,
} from '@odf/core/types';
+import { getResourceInNs } from '@odf/core/utils';
+import { CephBlockPoolModel } from '@odf/ocs/models';
+import ResourceDropdown from '@odf/shared/dropdown/ResourceDropdown';
import { ButtonBar } from '@odf/shared/generic/ButtonBar';
import { StatusBox } from '@odf/shared/generic/status-box';
import { useDeepCompareMemoize } from '@odf/shared/hooks/deep-compare-memoize';
import { useK8sGet } from '@odf/shared/hooks/k8s-get-hook';
+import { useK8sList } from '@odf/shared/hooks/useK8sList';
import {
ConfigMapModel,
InfrastructureModel,
StorageClassModel,
SecretModel,
+ ODFStorageSystem,
} from '@odf/shared/models';
+import { getName, getNamespace } from '@odf/shared/selectors';
import {
CephClusterKind,
ConfigMapKind,
K8sResourceKind,
StorageClassResourceKind,
SecretKind,
+ StorageSystemKind,
} from '@odf/shared/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
+import { isOCSStorageSystem, referenceForModel } from '@odf/shared/utils';
import { getInfrastructurePlatform } from '@odf/shared/utils';
import {
ProvisionerProps,
- useFlag,
useK8sWatchResource,
useModal,
+ WatchK8sResource,
} from '@openshift-console/dynamic-plugin-sdk';
import * as _ from 'lodash-es';
import {
@@ -71,8 +76,6 @@ import {
} from '@patternfly/react-core';
import { CaretDownIcon } from '@patternfly/react-icons';
import {
- CEPH_EXTERNAL_CR_NAME,
- CEPH_INTERNAL_CR_NAME,
CLUSTER_STATUS,
POOL_STATE,
CEPH_NS_SESSION_STORAGE,
@@ -83,63 +86,199 @@ import './sc-form.scss';
type OnParamChange = (id: string, paramName: string, checkbox: boolean) => void;
+const storageSystemResource: WatchK8sResource = {
+ kind: referenceForModel(ODFStorageSystem),
+ isList: true,
+};
+
+const cephBlockPoolResource: WatchK8sResource = {
+ kind: referenceForModel(CephBlockPoolModel),
+ isList: true,
+};
+
+const filterOCSStorageSystems = (resource) =>
+ isOCSStorageSystem(resource as StorageSystemKind);
+
+const setSessionValueAgain = (systemNamespace: string) => {
+ // session value will get removed after clicking "Create" (check "mutators.ts"),
+ // so in case of any error (after clicking "Create") form persists but session gets removed.
+ const sessionNsValue = sessionStorage.getItem(CEPH_NS_SESSION_STORAGE);
+ if (!sessionNsValue && !!systemNamespace)
+ sessionStorage.setItem(CEPH_NS_SESSION_STORAGE, systemNamespace);
+};
+
+const getPoolDropdownItems = (
+ poolData,
+ cephCluster,
+ handleDropdownChange,
+ onPoolCreation,
+ launchModal,
+ t
+) =>
+ _.reduce(
+ poolData,
+ (res, pool: StoragePoolKind) => {
+ const compressionText =
+ pool?.spec?.compressionMode === 'none' ||
+ pool?.spec?.compressionMode === ''
+ ? t('no compression')
+ : t('with compression');
+ if (
+ pool?.status?.phase === POOL_STATE.READY &&
+ cephCluster?.status?.phase === CLUSTER_STATUS.READY
+ ) {
+ res.push(
+
+ {pool?.metadata?.name}
+
+ );
+ }
+ return res;
+ },
+ [
+
+ launchModal(CreateBlockPoolModal, {
+ cephCluster,
+ onPoolCreation,
+ })
+ }
+ >
+ {t('Create New Pool')}
+ ,
+ ,
+ ]
+ );
+
+const StorageSystemDropdown: React.FC<{
+ onSelect: (resource: K8sResourceKind) => void;
+ systemNamespace: string;
+}> = ({ onSelect, systemNamespace }) => {
+ const { t } = useCustomTranslation();
+
+ const initialSSSelection = React.useCallback(
+ (resources) => {
+ return !systemNamespace
+ ? resources?.[0]
+ : resources?.find((system) => getNamespace(system) === systemNamespace);
+ },
+ [systemNamespace]
+ );
+
+ return (
+
+
+
+ className="pf-c-dropdown dropdown--full-width"
+ onSelect={onSelect}
+ initialSelection={initialSSSelection}
+ filterResource={filterOCSStorageSystems}
+ id="system-name"
+ data-test="storage-system-dropdown"
+ resource={storageSystemResource}
+ resourceModel={ODFStorageSystem}
+ />
+
+ {t('StorageSystem which will be used for storage needs')}
+
+
+ );
+};
+
export const CephFsNameComponent: React.FC = ({
parameterKey,
parameterValue,
onParamChange,
}) => {
const { t } = useCustomTranslation();
+ const [systemNamespace, setSystemNamespace] = React.useState();
const onParamChangeRef = React.useRef();
+ // refernce of "onParamChange" changes on each re-render, hence storing in a "useRef"
onParamChangeRef.current = onParamChange;
- const isExternal = useFlag(OCS_INDEPENDENT_FLAG);
- const scName = `${
- isExternal ? OCS_EXTERNAL_CR_NAME : OCS_INTERNAL_CR_NAME
- }-cephfs`;
- const [sc, scLoaded, scLoadError] = useK8sGet(
- StorageClassModel,
- scName
- );
+ const { systemFlags, areFlagsLoaded, flagsLoadError } =
+ useODFSystemFlagsSelector();
+ const isExternal = systemFlags[systemNamespace]?.isExternalMode;
+ const ocsName = systemFlags[systemNamespace]?.ocsClusterName;
+ const scName = `${ocsName}-cephfs`;
+
+ const [sces, scLoaded, scLoadError] =
+ useK8sList(StorageClassModel);
+ const sc = sces?.find((item) => getName(item) === scName);
React.useEffect(() => {
- if (scLoaded && !scLoadError) {
+ if (!!sc && scLoaded && !scLoadError) {
const fsName = sc?.parameters?.fsName;
if (fsName) {
onParamChangeRef.current(parameterKey, fsName, false);
}
}
+
+ return () => onParamChangeRef.current(parameterKey, '', false);
}, [sc, scLoaded, scLoadError, parameterKey]);
- // ToDo (epic 4422): Need to pass the namespace where ceph cluster is deployed (remove from here, add dropdown)
- React.useEffect(() => {
- sessionStorage.setItem(CEPH_NS_SESSION_STORAGE, 'openshift-storage');
- }, []);
+ const onSelect = React.useCallback(
+ (resource: K8sResourceKind) => {
+ const ns = getNamespace(resource);
+ sessionStorage.setItem(CEPH_NS_SESSION_STORAGE, ns);
+ setSystemNamespace(ns);
+ },
+ [setSystemNamespace]
+ );
- if (scLoaded && !scLoadError) {
+ setSessionValueAgain(systemNamespace);
+
+ if (scLoaded && areFlagsLoaded && !scLoadError && !flagsLoadError) {
return (
-
-
-
- onParamChange(parameterKey, e.currentTarget.value, false)
- }
- placeholder={t('Enter filesystem name')}
- id="filesystem-name"
- required
+ <>
+
-
- {t('CephFS filesystem name into which the volume shall be created')}
-
-
+
+
+
+ onParamChange(parameterKey, e.currentTarget.value, false)
+ }
+ placeholder={t('Enter filesystem name')}
+ id="filesystem-name"
+ required
+ />
+
+ {t('CephFS filesystem name into which the volume shall be created')}
+
+
+ >
);
}
- return ;
+ return (
+
+ );
};
export const PoolResourceComponent: React.FC = ({
@@ -147,12 +286,13 @@ export const PoolResourceComponent: React.FC = ({
onParamChange,
}) => {
const { t } = useCustomTranslation();
+ const onParamChangeRef = React.useRef();
+ // refernce of "onParamChange" changes on each re-render, hence storing in a "useRef"
+ onParamChangeRef.current = onParamChange;
const launchModal = useModal();
- const { isODFNsLoaded, odfNsLoadError, isNsSafe } = useODFNamespaceSelector();
-
- const [poolData, poolDataLoaded, poolDataLoadError] = useSafeK8sWatchResource<
+ const [poolsData, poolDataLoaded, poolDataLoadError] = useK8sWatchResource<
StoragePoolKind[]
>(cephBlockPoolResource);
@@ -161,6 +301,16 @@ export const PoolResourceComponent: React.FC = ({
const [isOpen, setOpen] = React.useState(false);
const [poolName, setPoolName] = React.useState('');
+ const [systemNamespace, setSystemNamespace] = React.useState();
+
+ const poolData = poolsData.filter(
+ (pool) => getNamespace(pool) === systemNamespace
+ );
+ const cephCluster = getResourceInNs(cephClusters, systemNamespace);
+
+ const { systemFlags, areFlagsLoaded, flagsLoadError, areFlagsSafe } =
+ useODFSystemFlagsSelector();
+ const isExternal = systemFlags[systemNamespace]?.isExternalMode;
const handleDropdownChange = (e: React.KeyboardEvent) => {
setPoolName(e.currentTarget.id);
@@ -177,90 +327,63 @@ export const PoolResourceComponent: React.FC = ({
onParamChange(parameterKey, e.currentTarget.value, false);
};
- const poolDropdownItems = _.reduce(
- poolData,
- (res, pool: StoragePoolKind) => {
- const compressionText =
- pool?.spec?.compressionMode === 'none' ||
- pool?.spec?.compressionMode === ''
- ? t('no compression')
- : t('with compression');
- if (
- pool?.status?.phase === POOL_STATE.READY &&
- cephClusters[0]?.status?.phase === CLUSTER_STATUS.READY
- ) {
- res.push(
-
- {pool?.metadata?.name}
-
- );
- }
- return res;
+ const onSelect = React.useCallback(
+ (resource: K8sResourceKind) => {
+ const ns = getNamespace(resource);
+ sessionStorage.setItem(CEPH_NS_SESSION_STORAGE, ns);
+ setSystemNamespace(ns);
+ setPoolName('');
+ onParamChangeRef.current(parameterKey, '', false);
},
- [
-
- launchModal(CreateBlockPoolModal, {
- cephClusters,
- onPoolCreation,
- })
- }
- >
- {t('Create New Pool')}
- ,
- ,
- ]
+ [setSystemNamespace, setPoolName, parameterKey]
);
- // ToDo (epic 4422): Need to pass the namespace where ceph cluster is deployed (remove from here, add dropdown)
- React.useEffect(() => {
- sessionStorage.setItem(CEPH_NS_SESSION_STORAGE, 'openshift-storage');
- }, []);
+ setSessionValueAgain(systemNamespace);
- if (isNsSafe && cephClusters[0]?.metadata.name === CEPH_INTERNAL_CR_NAME) {
+ if (areFlagsSafe && !isExternal) {
return (
<>
- {!poolDataLoadError && !odfNsLoadError && cephClusters && (
-
-
- setOpen(!isOpen)}
- toggleIndicator={CaretDownIcon}
- >
- {poolName || t('Select a Pool')}
-
- }
- isOpen={isOpen}
- dropdownItems={poolDropdownItems}
- onSelect={() => setOpen(false)}
- id="ocs-storage-pool"
+ {!poolDataLoadError && !flagsLoadError && (
+ <>
+
-
- {t('Storage pool into which volume data shall be stored')}
-
-
+
+
+ setOpen(!isOpen)}
+ toggleIndicator={CaretDownIcon}
+ >
+ {poolName || t('Select a Pool')}
+
+ }
+ isOpen={isOpen}
+ dropdownItems={getPoolDropdownItems(
+ poolData,
+ cephCluster,
+ handleDropdownChange,
+ onPoolCreation,
+ launchModal,
+ t
+ )}
+ onSelect={() => setOpen(false)}
+ id="ocs-storage-pool"
+ />
+
+ {t('Storage pool into which volume data shall be stored')}
+
+
+ >
)}
- {(poolDataLoadError || cephClusterLoadError || odfNsLoadError) && (
+ {(poolDataLoadError || cephClusterLoadError || flagsLoadError) && (
= ({
>
);
}
- if (isNsSafe && cephClusters[0]?.metadata.name === CEPH_EXTERNAL_CR_NAME) {
+ if (areFlagsSafe && isExternal) {
return (
-
-
-
+
-
- {t('Storage pool into which volume data shall be stored')}
-
-
+
+
+
+
+ {t('Storage pool into which volume data shall be stored')}
+
+
+ >
);
}
return (
);
};
@@ -524,6 +654,7 @@ export const StorageClassEncryptionKMSID: React.FC = ({
}) => {
const { t } = useCustomTranslation();
const onParamChangeRef = React.useRef();
+ // refernce of "onParamChange" changes on each re-render, hence storing in a "useRef"
onParamChangeRef.current = onParamChange;
const { odfNamespace, isODFNsLoaded, odfNsLoadError } =
@@ -568,20 +699,9 @@ export const StorageClassEncryptionKMSID: React.FC = ({
[parameterKey]
);
- // ToDo (Sanjal): "StorageClassForm" got refactored to a FC (https://github.com/openshift/console/pull/13036).
- // If any "parameter" specific "Component" in un-mounting, it do not have access to latest "onParamChange" (having latest "newStorageClass" object).
- // Talk to OCP team, maybe we can pass "onParamChange" as a "useRef" object, which can resolve this issue.
-
- // When user selects a connection from the dropdown, but, then un-checks the encryption checkbox,
- // and checks it back again. Component will be re-mounted, still Redux state will still
- // have previously selected parameterValue. This useEffect is to clean that up.
- /* React.useEffect(() => {
- return () => setEncryptionId('');
- }, [setEncryptionId]); */
-
/** When csiConfigMap is deleted from another tab, "csiConfigMapLoadError" == true (404 Not Found), but,
* "csiConfigMap" still contains same old object that was present before the deletion of the configMap.
- * Hence, dropdown was not updating dynamically. Used csiKmsDetails to handle that.
+ * Hence, dropdown was not updating dynamically. Used "csiKmsDetails" to handle that.
*/
const [csiKmsDetails, setCsiKmsDetails] = React.useState(null);
React.useEffect(() => {
@@ -695,6 +815,7 @@ export const StorageClassEncryptionKMSID: React.FC = ({
state={state.securityAndNetwork}
dispatch={dispatch}
infraType={infraType}
+ systemNamespace={odfNamespace}
className="ocs-storage-class-encryption"
/>
diff --git a/packages/ocs/types.ts b/packages/ocs/types.ts
index 9a3cf77aa..75527a30b 100644
--- a/packages/ocs/types.ts
+++ b/packages/ocs/types.ts
@@ -36,3 +36,5 @@ export type StoragePoolKind = K8sResourceCommon & {
};
};
};
+
+export type ODFSystemParams = { namespace: string; systemName: string };
diff --git a/packages/ocs/utils/block-pool.tsx b/packages/ocs/utils/block-pool.tsx
index 4d2c7d67c..e1abd88f1 100644
--- a/packages/ocs/utils/block-pool.tsx
+++ b/packages/ocs/utils/block-pool.tsx
@@ -1,6 +1,6 @@
import * as React from 'react';
import { ModalKeys } from '@odf/shared/modals/types';
-import { StorageClassResourceKind, CephClusterKind } from '@odf/shared/types';
+import { StorageClassResourceKind } from '@odf/shared/types';
import { getLastLanguage } from '@odf/shared/utils';
import { TFunction } from 'i18next';
import {
@@ -9,7 +9,7 @@ import {
ExclamationCircleIcon,
LockIcon,
} from '@patternfly/react-icons';
-import { CEPH_EXTERNAL_CR_NAME, POOL_PROGRESS, ROOK_MODEL } from '../constants';
+import { POOL_PROGRESS, ROOK_MODEL } from '../constants';
import { StorageClusterModel } from '../models';
import { StoragePoolKind } from '../types';
import { LoadingComponent } from './CustomLoading';
@@ -50,11 +50,11 @@ export const isDefaultPool = (blockPoolConfig: StoragePoolKind): boolean =>
export const disableMenuAction = (
blockPoolConfig: StoragePoolKind,
- cephCluster: CephClusterKind
+ isExternal: boolean
) =>
!!(
blockPoolConfig?.metadata?.deletionTimestamp ||
- cephCluster?.metadata?.name === CEPH_EXTERNAL_CR_NAME ||
+ isExternal ||
isDefaultPool(blockPoolConfig)
);
diff --git a/packages/ocs/utils/ceph-health.ts b/packages/ocs/utils/ceph-health.ts
index 80c1ac209..c70f576ad 100644
--- a/packages/ocs/utils/ceph-health.ts
+++ b/packages/ocs/utils/ceph-health.ts
@@ -1,12 +1,18 @@
+import { STATE_PRIORITY } from '@odf/shared/dashboards/status-card/states';
import { K8sResourceKind } from '@odf/shared/types';
import { HealthState } from '@openshift-console/dynamic-plugin-sdk';
import {
- ResourceHealthHandler,
SubsystemHealth,
+ ResourceHealthHandler,
} from '@openshift-console/dynamic-plugin-sdk/lib/extensions/dashboard-types';
import { TFunction } from 'i18next';
+import * as _ from 'lodash-es';
export type WatchCephResource = {
+ ceph: K8sResourceKind;
+};
+
+export type WatchCephResources = {
ceph: K8sResourceKind[];
};
@@ -39,7 +45,7 @@ export const getCephHealthState: ResourceHealthHandler
= (
t
) => {
const { data, loaded, loadError } = ceph;
- const status = data?.[0]?.status?.ceph?.health;
+ const status = data?.status?.ceph?.health;
if (loadError) {
return { state: HealthState.NOT_AVAILABLE };
@@ -47,12 +53,32 @@ export const getCephHealthState: ResourceHealthHandler = (
if (!loaded) {
return { state: HealthState.LOADING };
}
- if (data.length === 0) {
+ if (_.isEmpty(data)) {
return { state: HealthState.NOT_AVAILABLE };
}
return parseCephHealthStatus(status, t);
};
+export const getCephsHealthState: ResourceHealthHandler = (
+ { ceph },
+ t
+) => {
+ const { data, loaded, loadError } = ceph;
+ const cephHealthStates = data?.map((cephCluster: K8sResourceKind) =>
+ getCephHealthState({ ceph: { data: cephCluster, loaded, loadError } }, t)
+ );
+
+ let worstCephHealthState: SubsystemHealth;
+ STATE_PRIORITY.some((state) => {
+ worstCephHealthState = cephHealthStates?.find(
+ (cephHealthState) => cephHealthState.state === state
+ );
+ return !!worstCephHealthState ? true : false;
+ });
+
+ return worstCephHealthState || { state: HealthState.UNKNOWN };
+};
+
export enum Phase {
CONNECTED = 'Connected',
PROGRESSING = 'Progressing',
diff --git a/packages/ocs/utils/common.ts b/packages/ocs/utils/common.ts
index 5e764a3af..3d09d8360 100644
--- a/packages/ocs/utils/common.ts
+++ b/packages/ocs/utils/common.ts
@@ -33,28 +33,40 @@ export const isCephProvisioner = (scProvisioner: string): boolean => {
);
};
-export const isObjectStorageEvent = (event: EventKind): boolean => {
- const eventKind: string = event?.involvedObject?.kind;
- const objectStorageResources = [
- NooBaaBackingStoreModel.kind,
- NooBaaBucketClassModel.kind,
- NooBaaObjectBucketClaimModel.kind,
- CephObjectStoreModel.kind,
- ];
- if (
- eventKind !== PersistentVolumeClaimModel.kind &&
- eventKind !== PersistentVolumeModel.kind
- ) {
- const eventName: string = event?.involvedObject?.name;
- return _.startsWith(eventName, 'noobaa') || eventName?.includes('rgw');
- }
- return objectStorageResources.includes(eventKind);
-};
+export const isObjectStorageEvent =
+ (isRGW: boolean, isMCG: boolean) =>
+ (event: EventKind): boolean => {
+ const eventKind: string = event?.involvedObject?.kind;
+ const objectStorageResources = [
+ ...(isMCG
+ ? [
+ NooBaaBackingStoreModel.kind,
+ NooBaaBucketClassModel.kind,
+ NooBaaObjectBucketClaimModel.kind,
+ ]
+ : []),
+ ...(isRGW ? [CephObjectStoreModel.kind] : []),
+ ];
+ if (
+ ![PersistentVolumeClaimModel.kind, PersistentVolumeModel.kind].includes(
+ eventKind
+ )
+ ) {
+ const eventName: string = event?.involvedObject?.name;
+
+ if (isRGW && isMCG)
+ return _.startsWith(eventName, 'noobaa') || eventName?.includes('rgw');
+ if (isRGW) return eventName?.includes('rgw');
+ if (isMCG) return _.startsWith(eventName, 'noobaa');
+ }
+
+ return objectStorageResources.includes(eventKind);
+ };
export const isPersistentStorageEvent =
(pvcs: string[], ns: string) =>
(event: EventKind): boolean => {
- if (isObjectStorageEvent(event)) return false;
+ if (isObjectStorageEvent(true, true)(event)) return false;
const eventKind = event?.involvedObject?.kind;
const eventNamespace = getNamespace(event);
const eventObjectName = event?.involvedObject?.name;
diff --git a/packages/ocs/utils/noobaa-health.ts b/packages/ocs/utils/noobaa-health.ts
index 3462c2e8a..46f129156 100644
--- a/packages/ocs/utils/noobaa-health.ts
+++ b/packages/ocs/utils/noobaa-health.ts
@@ -1,5 +1,8 @@
import { getGaugeValue } from '@odf/shared/utils';
-import { HealthState } from '@openshift-console/dynamic-plugin-sdk';
+import {
+ HealthState,
+ K8sResourceCommon,
+} from '@openshift-console/dynamic-plugin-sdk';
import {
PrometheusHealthHandler,
SubsystemHealth,
@@ -43,6 +46,7 @@ export const getNooBaaState: PrometheusHealthHandler = (
const { response, error } = responses[0];
const noobaaLoaded = noobaa?.loaded;
const noobaaLoadError = noobaa?.loadError;
+ const noobaaData = noobaa?.data as K8sResourceCommon[];
const statusIndex: string = getGaugeValue(response);
if (error || noobaaLoadError) {
@@ -51,7 +55,7 @@ export const getNooBaaState: PrometheusHealthHandler = (
if (!noobaaLoaded || !response) {
return { state: HealthState.LOADING };
}
- if (!statusIndex) {
+ if (!statusIndex || !noobaaData?.length) {
return { state: HealthState.NOT_AVAILABLE };
}
return parseNoobaaStatus(statusIndex, t);
diff --git a/packages/odf/components/alerts/alert-action-path.tsx b/packages/odf/components/alerts/alert-action-path.tsx
index 389ce00a4..f265690a3 100644
--- a/packages/odf/components/alerts/alert-action-path.tsx
+++ b/packages/odf/components/alerts/alert-action-path.tsx
@@ -1,22 +1,35 @@
+import { getStorageClusterInNs } from '@odf/core/utils';
import { StorageClusterModel } from '@odf/ocs/models';
import { CEPH_STORAGE_NAMESPACE } from '@odf/shared/constants';
-import { k8sGet } from '@openshift-console/dynamic-plugin-sdk';
-import { OCS_INTERNAL_CR_NAME } from '../../constants';
+import { StorageClusterKind } from '@odf/shared/types';
+import { k8sList } from '@openshift-console/dynamic-plugin-sdk';
import { AddCapacityModal } from '../../modals/add-capacity/add-capacity-modal';
export const getDiskAlertActionPath = () =>
window.open('https://access.redhat.com/solutions/5194851');
+// ToDo (epic 4422): Get StorageCluster name and namespace from the Alert object and then use "k8sGet".
export const launchClusterExpansionModal = async (alert, launchModal) => {
try {
+ /*
const storageCluster = await k8sGet({
model: StorageClusterModel,
- name: OCS_INTERNAL_CR_NAME,
- // ToDo (epic 4422): Get StorageCluster name and namespace from the alert object
- // else add a wrapper around "AddCapacityModal" and poll for revelant SC there.
- ns: CEPH_STORAGE_NAMESPACE,
+ name: alert?.annotations?.target_name,
+ ns: alert?.annotations?.target_namespace,
});
launchModal(AddCapacityModal, { isOpen: true, storageCluster });
+ */
+ const storageCluster = (await k8sList({
+ model: StorageClusterModel,
+ queryParams: { ns: CEPH_STORAGE_NAMESPACE },
+ })) as StorageClusterKind[];
+ launchModal(AddCapacityModal, {
+ isOpen: true,
+ storageCluster: getStorageClusterInNs(
+ storageCluster,
+ CEPH_STORAGE_NAMESPACE
+ ),
+ });
} catch (e) {
// eslint-disable-next-line no-console
console.error('Error launching modal', e);
diff --git a/packages/odf/components/cluster-overview-extensions/HealthOverview.tsx b/packages/odf/components/cluster-overview-extensions/HealthOverview.tsx
index 80bdb3bb5..74b371ff3 100644
--- a/packages/odf/components/cluster-overview-extensions/HealthOverview.tsx
+++ b/packages/odf/components/cluster-overview-extensions/HealthOverview.tsx
@@ -1,5 +1,5 @@
import * as React from 'react';
-import { getCephHealthState, WatchCephResource } from '@odf/ocs/utils';
+import { getCephsHealthState, WatchCephResources } from '@odf/ocs/utils';
import { healthStateMessage } from '@odf/shared/dashboards/status-card/states';
import { CephClusterModel } from '@odf/shared/models';
import { Status } from '@odf/shared/status/Status';
@@ -8,6 +8,7 @@ import { referenceForModel } from '@odf/shared/utils';
import {
WatchK8sResults,
StatusPopupSection,
+ HealthState,
} from '@openshift-console/dynamic-plugin-sdk';
import { Link } from 'react-router-dom-v5-compat';
import { Stack, StackItem } from '@patternfly/react-core';
@@ -16,7 +17,7 @@ import '@odf/shared/popup/status-popup.scss';
export const StoragePopover: React.FC = ({ ceph }) => {
const { t } = useCustomTranslation();
- const health = getCephHealthState({ ceph }, t);
+ const health = getCephsHealthState({ ceph }, t);
const value = health.message || healthStateMessage(health.state, t);
const operatorName = t('Data Foundation');
@@ -34,7 +35,12 @@ export const StoragePopover: React.FC = ({ ceph }) => {
>
{operatorName}
-
+
@@ -42,7 +48,7 @@ export const StoragePopover: React.FC = ({ ceph }) => {
);
};
-export { getCephHealthState as healthHandler };
+export { getCephsHealthState as healthHandler };
export const healthResource = {
ceph: {
@@ -52,4 +58,4 @@ export const healthResource = {
},
};
-type StoragePopoverProps = WatchK8sResults;
+type StoragePopoverProps = WatchK8sResults;
diff --git a/packages/odf/components/create-storage-system/create-steps.tsx b/packages/odf/components/create-storage-system/create-steps.tsx
index ddaadf36e..df2d42f03 100644
--- a/packages/odf/components/create-storage-system/create-steps.tsx
+++ b/packages/odf/components/create-storage-system/create-steps.tsx
@@ -22,7 +22,8 @@ export const createSteps = (
dispatch: WizardDispatch,
infraType: string,
hasOCS: boolean,
- supportedExternalStorage: ExternalStorage[]
+ supportedExternalStorage: ExternalStorage[],
+ hasMultipleClusters: boolean
): WizardStep[] => {
const {
backingStorage,
@@ -36,7 +37,7 @@ export const createSteps = (
connectionDetails,
dataProtection,
} = state;
- const { externalStorage, deployment } = backingStorage;
+ const { systemNamespace, externalStorage, deployment } = backingStorage;
const { encryption, kms } = securityAndNetwork;
const isMCG = deployment === DeploymentType.MCG;
@@ -51,7 +52,7 @@ export const createSteps = (
storageClass={storageClass}
volumeSetName={createLocalVolumeSet.volumeSetName}
nodes={nodes}
- resourceProfile={capacityAndNodes.resourceProfile}
+ systemNamespace={systemNamespace}
/>
),
},
@@ -62,6 +63,7 @@ export const createSteps = (
securityAndNetworkState={securityAndNetwork}
dispatch={dispatch}
infraType={infraType}
+ systemNamespace={systemNamespace}
/>
),
},
@@ -73,7 +75,8 @@ export const createSteps = (
encryption={encryption}
kms={kms}
dispatch={dispatch}
- isMCG
+ isMCG={isMCG}
+ systemNamespace={systemNamespace}
/>
),
},
@@ -83,6 +86,7 @@ export const createSteps = (
),
@@ -103,6 +107,7 @@ export const createSteps = (
connectionDetailState={connectionDetails}
externalStorage={externalStorage}
supportedExternalStorage={supportedExternalStorage}
+ systemNamespace={systemNamespace}
/>
),
},
@@ -141,6 +146,7 @@ export const createSteps = (
nodes={nodes}
stepIdReached={stepIdReached}
isMCG={isMCG}
+ systemNamespace={systemNamespace}
/>
),
};
diff --git a/packages/odf/components/create-storage-system/create-storage-system-steps/backing-storage-step/backing-storage-step.scss b/packages/odf/components/create-storage-system/create-storage-system-steps/backing-storage-step/backing-storage-step.scss
index d8d718fbf..e76a22b03 100644
--- a/packages/odf/components/create-storage-system/create-storage-system-steps/backing-storage-step/backing-storage-step.scss
+++ b/packages/odf/components/create-storage-system/create-storage-system-steps/backing-storage-step/backing-storage-step.scss
@@ -27,6 +27,3 @@
.odf-backing-store__tls--margin-top {
margin-top: var(--pf-global--spacer--md);
}
-
-
-
diff --git a/packages/odf/components/create-storage-system/create-storage-system-steps/backing-storage-step/backing-storage-step.tsx b/packages/odf/components/create-storage-system/create-storage-system-steps/backing-storage-step/backing-storage-step.tsx
index 6053d9873..8eb547fc0 100644
--- a/packages/odf/components/create-storage-system/create-storage-system-steps/backing-storage-step/backing-storage-step.tsx
+++ b/packages/odf/components/create-storage-system/create-storage-system-steps/backing-storage-step/backing-storage-step.tsx
@@ -15,13 +15,13 @@ import { useK8sGet } from '@odf/shared/hooks/k8s-get-hook';
import {
ClusterServiceVersionModel,
StorageClassModel,
+ OCSStorageClusterModel,
} from '@odf/shared/models';
import { getName } from '@odf/shared/selectors';
import {
ListKind,
StorageClassResourceKind,
ClusterServiceVersionKind,
- StorageSystemKind,
} from '@odf/shared/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { isDefaultClass, getODFCsv, getGVKLabel } from '@odf/shared/utils';
@@ -54,6 +54,9 @@ const RHCS_SUPPORTED_INFRA = [
'IBMCloud',
];
+// ODF watches only 2 namespaces (other one is operator install namespace)
+const OCS_MULTIPLE_CLUSTER_NS = 'openshift-storage-extended';
+
const ExternalSystemSelection: React.FC = ({
dispatch,
stepIdReached,
@@ -164,21 +167,14 @@ type StorageClassSelectionProps = {
selected: WizardState['storageClass'];
};
-const formatStorageSystemList = (
- storageSystems: StorageSystemKind[] = []
-): StorageSystemSet =>
- storageSystems.reduce(
- (kinds: StorageSystemSet, ss: StorageSystemKind) => kinds.add(ss.spec.kind),
- new Set()
- );
-
-type StorageSystemSet = Set;
-
export const BackingStorage: React.FC = ({
state,
storageClass,
dispatch,
- storageSystems,
+ hasOCS,
+ hasExternal,
+ hasInternal,
+ hasMultipleClusters,
infraType,
error,
loaded,
@@ -206,18 +202,19 @@ export const BackingStorage: React.FC = ({
ListKind
>(ClusterServiceVersionModel, null, odfNamespace);
- const formattedSS: StorageSystemSet = formatStorageSystemList(storageSystems);
- const hasOCS: boolean = formattedSS.has(STORAGE_CLUSTER_SYSTEM_KIND);
-
- const odfCsv = getODFCsv(csvList?.items);
- const supportedODFVendors = getSupportedVendors(odfCsv);
-
const isFullDeployment = deployment === DeploymentType.FULL;
- const enableRhcs =
- RHCS_SUPPORTED_INFRA.includes(infraType) && isFullDeployment;
+ const isNonRHCSExternalType =
+ type === BackingStorageType.EXTERNAL &&
+ externalStorage !== OCSStorageClusterModel.kind;
- const allowedExternalStorage: ExternalStorage[] =
- !enableRhcs || hasOCS
+ const allowedExternalStorage: ExternalStorage[] = React.useMemo(() => {
+ const odfCsv = getODFCsv(csvList?.items);
+ const supportedODFVendors = getSupportedVendors(odfCsv);
+ const enableRhcs =
+ RHCS_SUPPORTED_INFRA.includes(infraType) && isFullDeployment;
+
+ // Only single external RHCS is allowed
+ return !enableRhcs || hasExternal
? supportedExternalStorage.filter(({ model }) => {
const kind = getGVKLabel(model);
return (
@@ -226,11 +223,41 @@ export const BackingStorage: React.FC = ({
);
})
: supportedExternalStorage;
+ }, [
+ isFullDeployment,
+ infraType,
+ csvList,
+ hasExternal,
+ supportedExternalStorage,
+ ]);
+
+ React.useEffect(() => {
+ /*
+ * Set the namespace where the StorageSystem will be created.
+ * First cluster should only be created in ODF install namespace.
+ */
+ const setODFInstallNsAsDefault = !hasOCS || isNonRHCSExternalType;
+ if (isODFNsLoaded && !odfNsLoadError) {
+ dispatch({
+ type: 'backingStorage/setSystemNamespace',
+ payload: setODFInstallNsAsDefault
+ ? odfNamespace
+ : OCS_MULTIPLE_CLUSTER_NS,
+ });
+ }
+ }, [
+ dispatch,
+ odfNamespace,
+ isODFNsLoaded,
+ odfNsLoadError,
+ hasOCS,
+ isNonRHCSExternalType,
+ ]);
React.useEffect(() => {
/*
* Allow pre selecting the "external connection" option instead of the "existing" option
- * if an OCS Storage System is already created and no external system is created.
+ * if an OCS Storage System is already created.
*/
if (hasOCS && allowedExternalStorage.length) {
dispatch({
@@ -252,7 +279,11 @@ export const BackingStorage: React.FC = ({
* Allow pre selecting the "create new storage class" option instead of the "existing" option
* if no storage classes present. This is true for a baremetal platform.
*/
- if (sc?.items?.length === 0 && type !== BackingStorageType.EXTERNAL) {
+ if (
+ sc?.items?.length === 0 &&
+ type !== BackingStorageType.EXTERNAL &&
+ !hasInternal
+ ) {
dispatch({
type: 'backingStorage/setType',
payload: BackingStorageType.LOCAL_DEVICES,
@@ -265,10 +296,11 @@ export const BackingStorage: React.FC = ({
},
});
}
- }, [deployment, dispatch, sc, type]);
+ }, [deployment, dispatch, sc, type, hasInternal]);
const showExternalStorageSelection =
type === BackingStorageType.EXTERNAL && allowedExternalStorage.length;
+ // Only single internal cluster allowed, should be created before external cluster
const showStorageClassSelection =
!hasOCS && type === BackingStorageType.EXISTING;
@@ -283,6 +315,9 @@ export const BackingStorage: React.FC = ({
isDefaultClass(item)
);
+ // Internal cluster should be created (or should already exist) before external mode cluster creation
+ // Block more than two OCS cluster creations
+ // Block internal cluster creation after external cluster already created
return (
= ({
className="odf-backing-store__radio--margin-bottom"
/>
- {isFullDeployment && !hasOCS && (
+ {/* Should be visible for both external and internal mode (even if one cluster already exists) */}
+ {isFullDeployment && !hasMultipleClusters && (
<>
= ({
/>
>
)}
-
- dispatch({
- type: 'backingStorage/useExternalPostgres',
- payload: !useExternalPostgres,
- })
- }
- className="odf-backing-store__radio--margin-bottom"
- />
- {useExternalPostgres && (
+ {/* Should be visible for both external and internal mode (but only single NooBaa is allowed, so should be hidden if any cluster already exists) */}
+ {!hasOCS && (
+
+ dispatch({
+ type: 'backingStorage/useExternalPostgres',
+ payload: !useExternalPostgres,
+ })
+ }
+ className="odf-backing-store__radio--margin-bottom"
+ />
+ )}
+ {useExternalPostgres && !hasOCS && (
= ({
type BackingStorageProps = {
dispatch: WizardDispatch;
state: WizardState['backingStorage'];
- storageSystems: StorageSystemKind[];
+ hasOCS: boolean;
+ hasExternal: boolean;
+ hasInternal: boolean;
+ hasMultipleClusters: boolean;
storageClass: WizardState['storageClass'];
stepIdReached: WizardState['stepIdReached'];
infraType: string;
diff --git a/packages/odf/components/create-storage-system/create-storage-system-steps/capacity-and-nodes-step/capacity-and-nodes-step.tsx b/packages/odf/components/create-storage-system/create-storage-system-steps/capacity-and-nodes-step/capacity-and-nodes-step.tsx
index 7ff0838cd..89f0a79b6 100644
--- a/packages/odf/components/create-storage-system/create-storage-system-steps/capacity-and-nodes-step/capacity-and-nodes-step.tsx
+++ b/packages/odf/components/create-storage-system/create-storage-system-steps/capacity-and-nodes-step/capacity-and-nodes-step.tsx
@@ -43,7 +43,6 @@ import {
TextContent,
TextInput,
} from '@patternfly/react-core';
-import { useODFNamespaceSelector } from '../../../../redux';
import { ValidationMessage } from '../../../utils/common-odf-install-el';
import { ErrorHandler } from '../../error-handler';
import { WizardDispatch, WizardNodeState, WizardState } from '../../reducer';
@@ -66,12 +65,10 @@ const onResourceProfileChange = _.curry(
);
const SelectNodesText: React.FC = React.memo(
- ({ text }) => {
+ ({ text, systemNamespace }) => {
const { t } = useCustomTranslation();
- const { odfNamespace } = useODFNamespaceSelector();
-
- const label = `cluster.ocs.openshift.io/${odfNamespace}=""`;
+ const label = `cluster.ocs.openshift.io/${systemNamespace}=""`;
return (
{text}
@@ -89,7 +86,10 @@ const SelectNodesText: React.FC = React.memo(
);
SelectNodesText.displayName = 'SelectNodesText';
-type SelectNodesTextProps = { text: JSX.Element };
+type SelectNodesTextProps = {
+ text: JSX.Element;
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
+};
const EnableTaintNodes: React.FC = ({
dispatch,
@@ -157,6 +157,7 @@ type SelectCapacityAndNodesProps = {
enableTaint: WizardState['capacityAndNodes']['enableTaint'];
enableSingleReplicaPool: WizardState['capacityAndNodes']['enableSingleReplicaPool'];
resourceProfile: WizardState['capacityAndNodes']['resourceProfile'];
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
};
const SelectCapacityAndNodes: React.FC = ({
@@ -166,6 +167,7 @@ const SelectCapacityAndNodes: React.FC = ({
enableTaint,
enableSingleReplicaPool,
resourceProfile,
+ systemNamespace,
}) => {
const { t } = useCustomTranslation();
@@ -233,10 +235,15 @@ const SelectCapacityAndNodes: React.FC = ({
text={t(
'Select at least 3 nodes preferably in 3 different zones. It is recommended to start with at least 14 CPUs and 34 GiB per node.'
)}
+ systemNamespace={systemNamespace}
/>
-
+
= ({
nodes,
enableSingleReplicaPool,
resourceProfile,
+ systemNamespace,
}) => {
const { t } = useCustomTranslation();
const [pv, pvLoaded, pvLoadError] =
@@ -407,6 +415,7 @@ const SelectedCapacityAndNodes: React.FC = ({
? attachDevicesWithArbiter(t, storageClassName)
: attachDevices(t, storageClassName)
}
+ systemNamespace={systemNamespace}
/>
@@ -440,6 +449,7 @@ type SelectedCapacityAndNodesProps = {
dispatch: WizardDispatch;
nodes: WizardNodeState[];
resourceProfile: WizardState['capacityAndNodes']['resourceProfile'];
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
};
export const CapacityAndNodes: React.FC = ({
@@ -448,7 +458,7 @@ export const CapacityAndNodes: React.FC = ({
storageClass,
volumeSetName,
nodes,
- resourceProfile,
+ systemNamespace,
}) => {
const {
capacity,
@@ -456,6 +466,7 @@ export const CapacityAndNodes: React.FC = ({
enableTaint,
arbiterLocation,
enableSingleReplicaPool,
+ resourceProfile,
} = state;
const isNoProvisioner = storageClass.provisioner === NO_PROVISIONER;
@@ -479,6 +490,7 @@ export const CapacityAndNodes: React.FC = ({
capacity={capacity}
enableSingleReplicaPool={enableSingleReplicaPool}
resourceProfile={resourceProfile}
+ systemNamespace={systemNamespace}
/>
) : (
= ({
nodes={nodes}
enableSingleReplicaPool={enableSingleReplicaPool}
resourceProfile={resourceProfile}
+ systemNamespace={systemNamespace}
/>
)}
{!!validations.length &&
@@ -503,7 +516,7 @@ type CapacityAndNodesProps = {
state: WizardState['capacityAndNodes'];
storageClass: WizardState['storageClass'];
nodes: WizardState['nodes'];
- resourceProfile: WizardState['capacityAndNodes']['resourceProfile'];
volumeSetName: WizardState['createLocalVolumeSet']['volumeSetName'];
dispatch: WizardDispatch;
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
};
diff --git a/packages/odf/components/create-storage-system/create-storage-system-steps/create-local-volume-set-step/body.tsx b/packages/odf/components/create-storage-system/create-storage-system-steps/create-local-volume-set-step/body.tsx
index af97c27bd..784c02547 100644
--- a/packages/odf/components/create-storage-system/create-storage-system-steps/create-local-volume-set-step/body.tsx
+++ b/packages/odf/components/create-storage-system/create-storage-system-steps/create-local-volume-set-step/body.tsx
@@ -99,6 +99,7 @@ export const LocalVolumeSetBody: React.FC = ({
nodes,
allNodes,
defaultVolumeMode,
+ systemNamespace,
}) => {
const { t } = useCustomTranslation();
const [radio, setRadio] = React.useState(FilterDiskBy.ALL_NODES);
@@ -311,7 +312,11 @@ export const LocalVolumeSetBody: React.FC = ({
id="create-lvs-radio-select-nodes"
/>
{radio === FilterDiskBy.SELECTED_NODES && (
-
+
)}
= ({
nodes,
stepIdReached,
isMCG,
+ systemNamespace,
}) => {
const { t } = useCustomTranslation();
const allNodes = React.useRef([]);
@@ -379,6 +380,7 @@ export const CreateLocalVolumeSet: React.FC = ({
? diskModeDropdownItems.FILESYSTEM
: diskModeDropdownItems.BLOCK
}
+ systemNamespace={systemNamespace}
/>
@@ -434,4 +436,5 @@ type CreateLocalVolumeSetProps = {
stepIdReached: WizardState['stepIdReached'];
dispatch: WizardDispatch;
isMCG: boolean;
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
};
diff --git a/packages/odf/components/create-storage-system/create-storage-system-steps/create-storage-class-step/create-storage-class-step.tsx b/packages/odf/components/create-storage-system/create-storage-system-steps/create-storage-class-step/create-storage-class-step.tsx
index ff7d7f1ed..40bf6b83d 100644
--- a/packages/odf/components/create-storage-system/create-storage-system-steps/create-storage-class-step/create-storage-class-step.tsx
+++ b/packages/odf/components/create-storage-system/create-storage-system-steps/create-storage-class-step/create-storage-class-step.tsx
@@ -66,6 +66,7 @@ export const CreateStorageClass: React.FC = ({
const [scData, scLoaded, scLoadError] =
useK8sList(StorageClassModel);
+ // Non-RHCS StorageSystems are only created in ODF install namespace
const [secretData, secretLoaded, secretLoadError] =
useSafeK8sList(SecretModel, odfNamespace);
const [flashSystemData, flashSystemLoaded, flashSystemLoadError] =
diff --git a/packages/odf/components/create-storage-system/create-storage-system-steps/review-and-create-step/review-and-create-step.tsx b/packages/odf/components/create-storage-system/create-storage-system-steps/review-and-create-step/review-and-create-step.tsx
index 74edf71cc..b6833f912 100644
--- a/packages/odf/components/create-storage-system/create-storage-system-steps/review-and-create-step/review-and-create-step.tsx
+++ b/packages/odf/components/create-storage-system/create-storage-system-steps/review-and-create-step/review-and-create-step.tsx
@@ -36,7 +36,10 @@ export const ReviewItem = ({ children, title }) => (
export const ReviewAndCreate: React.FC = ({
state,
+ // any internal or external exists
hasOCS,
+ // both internal and external exists
+ hasMultipleClusters,
supportedExternalStorage,
}) => {
const { t } = useCustomTranslation();
@@ -126,7 +129,7 @@ export const ReviewAndCreate: React.FC = ({
})}
)}
- {deployment === DeploymentType.FULL && !hasOCS && (
+ {deployment === DeploymentType.FULL && !hasMultipleClusters && (
{t(
'Set Ceph RBD as the default StorageClass: {{isCephRBDSetAsDefault}}',
@@ -259,5 +262,6 @@ export const ReviewAndCreate: React.FC = ({
type ReviewAndCreateProps = {
state: WizardState;
hasOCS: boolean;
+ hasMultipleClusters: boolean;
supportedExternalStorage: ExternalStorage[];
};
diff --git a/packages/odf/components/create-storage-system/create-storage-system-steps/security-and-network-step/configure.tsx b/packages/odf/components/create-storage-system/create-storage-system-steps/security-and-network-step/configure.tsx
index 59a1ec0eb..687d6c858 100644
--- a/packages/odf/components/create-storage-system/create-storage-system-steps/security-and-network-step/configure.tsx
+++ b/packages/odf/components/create-storage-system/create-storage-system-steps/security-and-network-step/configure.tsx
@@ -1,5 +1,4 @@
import * as React from 'react';
-import { useSafeK8sWatchResources } from '@odf/core/hooks';
import { NetworkAttachmentDefinitionModel } from '@odf/core/models';
import TechPreviewBadge from '@odf/shared/badges/TechPreviewBadge';
import { SingleSelectDropdown } from '@odf/shared/dropdown/singleselectdropdown';
@@ -12,11 +11,13 @@ import { referenceForModel } from '@odf/shared/utils';
import {
ResourceIcon,
WatchK8sResults,
+ useK8sWatchResources,
} from '@openshift-console/dynamic-plugin-sdk';
import { K8sResourceCommon } from '@openshift-console/dynamic-plugin-sdk-internal/lib/extensions/console-types';
import * as _ from 'lodash-es';
import { FormGroup, Radio, SelectOption } from '@patternfly/react-core';
import { NetworkType, NADSelectorType } from '../../../../types';
+import { WizardState } from '../../reducer';
import './configure.scss';
const resources = (ns: string) => ({
@@ -79,13 +80,14 @@ export const MultusDropdown: React.FC = ({
setNetwork,
clusterNetwork,
publicNetwork,
+ systemNamespace,
}) => {
const { t } = useCustomTranslation();
const clusterNetworkUID = getUID(clusterNetwork);
const publicNetworkUID = getUID(publicNetwork);
- const networkResources = useSafeK8sWatchResources(resources);
+ const networkResources = useK8sWatchResources(resources(systemNamespace));
const networkDevices: K8sResourceCommon[] = React.useMemo(() => {
const { loaded: resourcesLoaded, error: resourcesLoadError } =
@@ -190,6 +192,7 @@ type MultusDropdownProps = {
setNetwork: (type: NADSelectorType, resource: K8sResourceCommon) => void;
clusterNetwork: NetworkAttachmentDefinitionKind;
publicNetwork: NetworkAttachmentDefinitionKind;
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
};
export const NetworkFormGroup: React.FC = ({
@@ -198,6 +201,7 @@ export const NetworkFormGroup: React.FC = ({
setNetwork,
clusterNetwork,
publicNetwork,
+ systemNamespace,
}) => {
const { t } = useCustomTranslation();
@@ -251,6 +255,7 @@ export const NetworkFormGroup: React.FC = ({
setNetwork={setNetwork}
clusterNetwork={clusterNetwork}
publicNetwork={publicNetwork}
+ systemNamespace={systemNamespace}
/>
)}
>
@@ -263,4 +268,5 @@ type NetworkFormGroupProps = {
setNetwork: (type: NADSelectorType, resource: K8sResourceCommon) => void;
clusterNetwork: NetworkAttachmentDefinitionKind;
publicNetwork: NetworkAttachmentDefinitionKind;
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
};
diff --git a/packages/odf/components/create-storage-system/create-storage-system-steps/security-and-network-step/encryption.tsx b/packages/odf/components/create-storage-system/create-storage-system-steps/security-and-network-step/encryption.tsx
index e8eab0e56..deb7c706c 100644
--- a/packages/odf/components/create-storage-system/create-storage-system-steps/security-and-network-step/encryption.tsx
+++ b/packages/odf/components/create-storage-system/create-storage-system-steps/security-and-network-step/encryption.tsx
@@ -91,6 +91,7 @@ const KMSConnection: React.FC = ({
dispatch,
infraType,
isMCG,
+ systemNamespace,
}) => {
const { t } = useCustomTranslation();
@@ -136,6 +137,7 @@ const KMSConnection: React.FC = ({
infraType={infraType}
className="odf-security-kms-connection"
isMCG={isMCG}
+ systemNamespace={systemNamespace}
isWizardFlow
/>
)
@@ -152,6 +154,7 @@ export const Encryption: React.FC = ({
infraType,
isMCG,
isExternal,
+ systemNamespace,
}) => {
const { t } = useCustomTranslation();
const [encryptionChecked, setEncryptionChecked] = React.useState(
@@ -260,6 +263,7 @@ export const Encryption: React.FC = ({
dispatch={dispatch}
infraType={infraType}
isMCG={!!isMCG}
+ systemNamespace={systemNamespace}
/>
>
)
@@ -291,7 +295,8 @@ export const EncryptionForm: React.FC = ({
kms,
dispatch,
infraType,
- isMCG, // eslint-disable-line @typescript-eslint/no-unused-vars
+ isMCG,
+ systemNamespace,
}) => {
// enclosed in a "Form" so that child components can use default pf classes
return (
@@ -301,7 +306,8 @@ export const EncryptionForm: React.FC = ({
encryption={encryption}
kms={kms}
dispatch={dispatch}
- isMCG
+ isMCG={isMCG}
+ systemNamespace={systemNamespace}
/>
);
@@ -313,4 +319,5 @@ type EncryptionProps = {
infraType: string;
isMCG?: boolean;
isExternal?: boolean;
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
};
diff --git a/packages/odf/components/create-storage-system/create-storage-system-steps/security-and-network-step/security-and-network-step.tsx b/packages/odf/components/create-storage-system/create-storage-system-steps/security-and-network-step/security-and-network-step.tsx
index ae56519ae..85b71757e 100644
--- a/packages/odf/components/create-storage-system/create-storage-system-steps/security-and-network-step/security-and-network-step.tsx
+++ b/packages/odf/components/create-storage-system/create-storage-system-steps/security-and-network-step/security-and-network-step.tsx
@@ -16,6 +16,7 @@ export const SecurityAndNetwork: React.FC = ({
connectionDetailState,
externalStorage,
supportedExternalStorage,
+ systemNamespace,
}) => {
const {
networkType: nwType,
@@ -56,6 +57,7 @@ export const SecurityAndNetwork: React.FC = ({
dispatch={dispatch}
infraType={infraType}
isExternal={isExternal}
+ systemNamespace={systemNamespace}
/>
{!isExternal && (
= ({
setNetwork={setNetwork}
clusterNetwork={clusterNetwork}
publicNetwork={publicNetwork}
+ systemNamespace={systemNamespace}
/>
)}
{isExternal && (
@@ -86,4 +89,5 @@ type SecurityAndNetworkProps = {
connectionDetailState?: WizardState['connectionDetails'];
externalStorage?: WizardState['backingStorage']['externalStorage'];
supportedExternalStorage?: ExternalStorage[];
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
};
diff --git a/packages/odf/components/create-storage-system/create-storage-system.tsx b/packages/odf/components/create-storage-system/create-storage-system.tsx
index b605a6c99..944efd7df 100644
--- a/packages/odf/components/create-storage-system/create-storage-system.tsx
+++ b/packages/odf/components/create-storage-system/create-storage-system.tsx
@@ -1,18 +1,18 @@
import * as React from 'react';
+import { useODFSystemFlagsSelector } from '@odf/core/redux';
import {
StorageClassWizardStepExtensionProps as ExternalStorage,
isStorageClassWizardStep,
} from '@odf/odf-plugin-sdk/extensions';
import { useK8sGet } from '@odf/shared/hooks/k8s-get-hook';
import { InfrastructureModel } from '@odf/shared/models';
-import { ODFStorageSystem } from '@odf/shared/models';
-import { ListKind, StorageSystemKind } from '@odf/shared/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { getInfrastructurePlatform } from '@odf/shared/utils';
import { useResolvedExtensions } from '@openshift-console/dynamic-plugin-sdk';
import { useLocation } from 'react-router-dom-v5-compat';
import { Wizard, WizardStep } from '@patternfly/react-core';
-import { Steps, StepsName, STORAGE_CLUSTER_SYSTEM_KIND } from '../../constants';
+import { Steps, StepsName } from '../../constants';
+import { hasAnyExternalOCS, hasAnyInternalOCS } from '../../utils';
import { createSteps } from './create-steps';
import { BackingStorage } from './create-storage-system-steps';
import { EXTERNAL_CEPH_STORAGE } from './external-ceph-storage/system-connection-details';
@@ -27,21 +27,26 @@ const CreateStorageSystem: React.FC<{}> = () => {
reducer,
initialState
);
- const [ssList, ssLoaded, ssLoadError] =
- useK8sGet>(ODFStorageSystem);
+
const [infra, infraLoaded, infraLoadError] = useK8sGet(
InfrastructureModel,
'cluster'
);
+
const [extensions, extensionsResolved] = useResolvedExtensions(
isStorageClassWizardStep
);
+ const { systemFlags, areFlagsLoaded, flagsLoadError } =
+ useODFSystemFlagsSelector();
const infraType = getInfrastructurePlatform(infra);
const { pathname: url } = useLocation();
let wizardSteps: WizardStep[] = [];
let hasOCS: boolean = false;
+ let hasExternal: boolean = false;
+ let hasInternal: boolean = false;
+ let hasMultipleClusters: boolean = false;
const supportedExternalStorage: ExternalStorage[] = React.useMemo(() => {
if (extensionsResolved) {
@@ -55,17 +60,20 @@ const CreateStorageSystem: React.FC<{}> = () => {
return EXTERNAL_CEPH_STORAGE;
}, [extensions, extensionsResolved]);
- if (ssLoaded && !ssLoadError && infraLoaded && !infraLoadError) {
- hasOCS = ssList?.items?.some(
- (ss) => ss.spec.kind === STORAGE_CLUSTER_SYSTEM_KIND
- );
+ if (areFlagsLoaded && !flagsLoadError && infraLoaded && !infraLoadError) {
+ hasExternal = hasAnyExternalOCS(systemFlags);
+ hasInternal = hasAnyInternalOCS(systemFlags);
+ hasOCS = hasExternal || hasInternal;
+ hasMultipleClusters = hasExternal && hasInternal;
+
wizardSteps = createSteps(
t,
state,
dispatch,
infraType,
hasOCS,
- supportedExternalStorage
+ supportedExternalStorage,
+ hasMultipleClusters
);
}
@@ -78,11 +86,14 @@ const CreateStorageSystem: React.FC<{}> = () => {
state={state.backingStorage}
storageClass={state.storageClass}
dispatch={dispatch}
- storageSystems={ssList?.items || []}
+ hasOCS={hasOCS}
+ hasExternal={hasExternal}
+ hasInternal={hasInternal}
+ hasMultipleClusters={hasMultipleClusters}
stepIdReached={state.stepIdReached}
infraType={infraType}
- error={ssLoadError || infraLoadError}
- loaded={ssLoaded && infraLoaded}
+ error={infraLoadError || flagsLoadError}
+ loaded={infraLoaded && areFlagsLoaded}
supportedExternalStorage={supportedExternalStorage}
/>
),
@@ -102,7 +113,10 @@ const CreateStorageSystem: React.FC<{}> = () => {
hasOCS={hasOCS}
dispatch={dispatch}
disableNext={
- !ssLoaded || !!ssLoadError || !infraLoaded || !!infraLoadError
+ !areFlagsLoaded ||
+ !!flagsLoadError ||
+ !infraLoaded ||
+ !!infraLoadError
}
supportedExternalStorage={supportedExternalStorage}
/>
diff --git a/packages/odf/components/create-storage-system/footer.tsx b/packages/odf/components/create-storage-system/footer.tsx
index d351dc66f..1e7ddd0e5 100644
--- a/packages/odf/components/create-storage-system/footer.tsx
+++ b/packages/odf/components/create-storage-system/footer.tsx
@@ -16,11 +16,7 @@ import {
} from '@patternfly/react-core';
import { Steps, StepsName, STORAGE_CLUSTER_SYSTEM_KIND } from '../../constants';
import './create-storage-system.scss';
-import {
- MINIMUM_NODES,
- OCS_EXTERNAL_CR_NAME,
- OCS_INTERNAL_CR_NAME,
-} from '../../constants';
+import { MINIMUM_NODES } from '../../constants';
import { NetworkType, BackingStorageType, DeploymentType } from '../../types';
import {
labelOCSNamespace,
@@ -36,9 +32,13 @@ import {
createStorageSystem,
labelNodes,
taintNodes,
+ createMultiClusterNs,
} from './payloads';
import { WizardCommonProps, WizardState } from './reducer';
+const OCS_INTERNAL_CR_NAME = 'ocs-storagecluster';
+const OCS_EXTERNAL_CR_NAME = 'ocs-external-storagecluster';
+
const validateBackingStorageStep = (
backingStorage: WizardState['backingStorage'],
sc: WizardState['storageClass']
@@ -183,6 +183,7 @@ const handleReviewAndCreateNext = async (
capacityAndNodes,
} = state;
const {
+ systemNamespace,
externalStorage,
deployment,
type,
@@ -194,19 +195,7 @@ const handleReviewAndCreateNext = async (
const isRhcs: boolean = externalStorage === OCSStorageClusterModel.kind;
const isMCG: boolean = deployment === DeploymentType.MCG;
- const createAdditionalFeatureResources = async () => {
- if (capacityAndNodes.enableTaint && !isMCG) await taintNodes(nodes);
-
- if (encryption.advanced)
- await Promise.all(
- createClusterKmsResources(
- kms.providerState,
- odfNamespace,
- kms.provider,
- isMCG
- )
- );
-
+ const createNooBaaResources = async () => {
if (useExternalPostgres) {
let keyTexts = { private: '', public: '' };
if (externalPostgres.tls.enableClientSideCerts) {
@@ -217,7 +206,7 @@ const handleReviewAndCreateNext = async (
}
await Promise.all(
createNoobaaExternalPostgresResources(
- odfNamespace,
+ systemNamespace,
externalPostgres,
keyTexts
)
@@ -225,23 +214,51 @@ const handleReviewAndCreateNext = async (
}
};
+ const createAdditionalFeatureResources = async () => {
+ if (capacityAndNodes.enableTaint && !isMCG) await taintNodes(nodes);
+
+ /**
+ * CSI KMS ConfigMap and Secrets always needs to be created in ODF install namespace (that is, where Rook is deployed),
+ * whereas OCS KMS ConfigMap and Secrets needs to be created in the namespace where Ceph is being deployed (StorageSystem namespace).
+ * ToDo: External mode do not support KMS and only single Internal mode is allowed, so it should work for now,
+ * but in future, if need arises, first check whether CSI ConfigMap already exists or not before creating KMS related resources for multiple clusters.
+ * Also, change name of "ceph-csi-kms-token" token Secret (if need to create multiple in Rook namespace).
+ */
+ if (encryption.advanced)
+ // as currently only one internal cluster is allowed, "systemNamespace" (where intenal cluster is being created) and "odfNamespace" (where ODF is installed) will be same
+ await Promise.all(
+ createClusterKmsResources(
+ kms.providerState,
+ systemNamespace,
+ odfNamespace,
+ kms.provider,
+ isMCG
+ )
+ );
+
+ await createNooBaaResources();
+ };
+
try {
- await labelOCSNamespace(odfNamespace);
+ systemNamespace === odfNamespace
+ ? await labelOCSNamespace(systemNamespace)
+ : await createMultiClusterNs(systemNamespace);
+
if (isMCG) {
await createAdditionalFeatureResources();
- await createStorageCluster(state, odfNamespace);
+ await createStorageCluster(state, systemNamespace, OCS_INTERNAL_CR_NAME);
} else if (
type === BackingStorageType.EXISTING ||
type === BackingStorageType.LOCAL_DEVICES
) {
- await labelNodes(nodes, odfNamespace);
+ await labelNodes(nodes, systemNamespace);
await createAdditionalFeatureResources();
await createStorageSystem(
OCS_INTERNAL_CR_NAME,
STORAGE_CLUSTER_SYSTEM_KIND,
- odfNamespace
+ systemNamespace
);
- await createStorageCluster(state, odfNamespace);
+ await createStorageCluster(state, systemNamespace, OCS_INTERNAL_CR_NAME);
} else if (type === BackingStorageType.EXTERNAL) {
const { createPayload, model, displayName, waitToCreate } =
getExternalStorage(externalStorage, supportedExternalStorage) || {};
@@ -259,17 +276,24 @@ const handleReviewAndCreateNext = async (
systemName: subSystemName,
state: subSystemState,
model,
- namespace: odfNamespace,
+ namespace: systemNamespace,
storageClassName: storageClass.name,
inTransitStatus: inTransitChecked,
});
- await createStorageSystem(subSystemName, subSystemKind, odfNamespace);
+ await createStorageSystem(subSystemName, subSystemKind, systemNamespace);
+ // create internal mode cluster along with Non-RHCS StorageSystem (if any Ceph cluster already does not exists)
if (!hasOCS && !isRhcs) {
- await labelNodes(nodes, odfNamespace);
+ await labelNodes(nodes, systemNamespace);
await createAdditionalFeatureResources();
- await createStorageCluster(state, odfNamespace);
+ await createStorageCluster(
+ state,
+ systemNamespace,
+ OCS_INTERNAL_CR_NAME
+ );
}
+ // create additional NooBaa resources for external RHCS cluster (if opted via checkbox)
+ if (!hasOCS && isRhcs) await createNooBaaResources();
if (!isRhcs && !!waitToCreate) await waitToCreate(model);
await createExternalSubSystem(subSystemPayloads);
}
diff --git a/packages/odf/components/create-storage-system/payloads.ts b/packages/odf/components/create-storage-system/payloads.ts
index f9c70dc21..87a514ab7 100644
--- a/packages/odf/components/create-storage-system/payloads.ts
+++ b/packages/odf/components/create-storage-system/payloads.ts
@@ -17,6 +17,7 @@ import {
OCSStorageClusterModel,
ODFStorageSystem,
NodeModel,
+ NamespaceModel,
} from '@odf/shared/models';
import { Patch, StorageSystemKind } from '@odf/shared/types';
import { getAPIVersionForModel, k8sPatchByName } from '@odf/shared/utils';
@@ -37,19 +38,19 @@ import { WizardNodeState, WizardState } from './reducer';
export const createStorageSystem = async (
subSystemName: string,
subSystemKind: string,
- odfNamespace: string
+ systemNamespace: string
) => {
const payload: StorageSystemKind = {
apiVersion: getAPIVersionForModel(ODFStorageSystem),
kind: ODFStorageSystem.kind,
metadata: {
name: `${subSystemName}-storagesystem`,
- namespace: odfNamespace,
+ namespace: systemNamespace,
},
spec: {
name: subSystemName,
kind: subSystemKind,
- namespace: odfNamespace,
+ namespace: systemNamespace,
},
};
return k8sCreate({ model: ODFStorageSystem, data: payload });
@@ -142,7 +143,8 @@ export const createNoobaaExternalPostgresResources = (
export const createStorageCluster = async (
state: WizardState,
- odfNamespace: string
+ storageClusterNamespace: string,
+ storageClusterName: string
) => {
const {
storageClass,
@@ -214,11 +216,12 @@ export const createStorageCluster = async (
shouldSetCephRBDAsDefault,
isSingleReplicaPoolEnabled: enableSingleReplicaPool,
enableRDRPreparation,
- odfNamespace,
+ storageClusterNamespace,
enableNoobaaClientSideCerts: externalPostgres.tls.enableClientSideCerts,
useExternalPostgres: useExternalPostgres,
allowNoobaaPostgresSelfSignedCerts:
externalPostgres.tls.allowSelfSignedCerts,
+ storageClusterName,
});
return k8sCreate({ model: OCSStorageClusterModel, data: payload });
@@ -226,10 +229,10 @@ export const createStorageCluster = async (
export const labelNodes = async (
nodes: WizardNodeState[],
- odfNamespace: string
+ namespace: string
) => {
- const labelPath = `/metadata/labels/cluster.ocs.openshift.io~1${odfNamespace}`;
- const storageLabel = cephStorageLabel(odfNamespace);
+ const labelPath = `/metadata/labels/cluster.ocs.openshift.io~1${namespace}`;
+ const storageLabel = cephStorageLabel(namespace);
const patch: Patch[] = [
{
op: 'add',
@@ -280,3 +283,14 @@ export const createExternalSubSystem = async (subSystemPayloads: Payload[]) => {
throw err;
}
};
+
+export const createMultiClusterNs = async (systemNamespace: string) =>
+ k8sCreate({
+ model: NamespaceModel,
+ data: {
+ metadata: {
+ name: systemNamespace,
+ labels: { 'openshift.io/cluster-monitoring': 'true' },
+ },
+ },
+ });
diff --git a/packages/odf/components/create-storage-system/reducer.ts b/packages/odf/components/create-storage-system/reducer.ts
index 7cef401c1..cfdef351c 100644
--- a/packages/odf/components/create-storage-system/reducer.ts
+++ b/packages/odf/components/create-storage-system/reducer.ts
@@ -39,6 +39,7 @@ export const initialState: CreateStorageSystemState = {
nodes: [],
backingStorage: {
type: BackingStorageType.EXISTING,
+ systemNamespace: '',
enableNFS: false,
isRBDStorageClassDefault: false,
externalStorage: '',
@@ -111,6 +112,7 @@ type CreateStorageSystemState = {
nodes: WizardNodeState[];
backingStorage: {
type: BackingStorageType;
+ systemNamespace: string;
enableNFS: boolean;
isRBDStorageClassDefault: boolean;
externalStorage: string;
@@ -301,6 +303,9 @@ export const reducer: WizardReducer = (prevState, action) => {
break;
case 'backingStorage/setType':
return setBackingStorageType(newState, action.payload);
+ case 'backingStorage/setSystemNamespace':
+ newState.backingStorage.systemNamespace = action.payload;
+ break;
case 'backingStorage/enableNFS':
newState.backingStorage.enableNFS = action.payload;
break;
@@ -437,6 +442,10 @@ export type CreateStorageSystemAction =
type: 'backingStorage/setType';
payload: WizardState['backingStorage']['type'];
}
+ | {
+ type: 'backingStorage/setSystemNamespace';
+ payload: WizardState['backingStorage']['systemNamespace'];
+ }
| {
type: 'backingStorage/enableNFS';
payload: WizardState['backingStorage']['enableNFS'];
diff --git a/packages/odf/components/create-storage-system/select-nodes-table/select-nodes-table.tsx b/packages/odf/components/create-storage-system/select-nodes-table/select-nodes-table.tsx
index 794c216f6..7d12dfb4e 100644
--- a/packages/odf/components/create-storage-system/select-nodes-table/select-nodes-table.tsx
+++ b/packages/odf/components/create-storage-system/select-nodes-table/select-nodes-table.tsx
@@ -1,6 +1,5 @@
import * as React from 'react';
import { cephStorageLabel } from '@odf/core/constants';
-import { useODFNamespaceSelector } from '@odf/core/redux';
import {
getZone,
nodesWithoutTaints,
@@ -31,7 +30,7 @@ import classNames from 'classnames';
import * as _ from 'lodash-es';
import { Table, TableHeader, TableBody } from '@patternfly/react-table';
import { IRow, sortable } from '@patternfly/react-table';
-import { WizardNodeState } from '../reducer';
+import { WizardNodeState, WizardState } from '../reducer';
import { SelectNodesTableFooter } from './select-nodes-table-footer';
import './select-nodes-table.scss';
@@ -122,11 +121,10 @@ const InternalNodeTable: React.FC = ({
onRowSelected,
nodesData,
disableLabeledNodes,
+ systemNamespace,
}) => {
const { t } = useCustomTranslation();
- const { odfNamespace } = useODFNamespaceSelector();
-
const getColumns = React.useMemo(
() => [
{
@@ -184,7 +182,7 @@ const InternalNodeTable: React.FC = ({
setVisibleRows,
selectedNodes,
setSelectedNodes,
- odfNamespace,
+ systemNamespace,
disableLabeledNodes
)}
cells={getColumns}
@@ -205,12 +203,14 @@ type NodeTableProps = {
onRowSelected: (selectedNodes: NodeKind[]) => void;
nodesData: NodeKind[];
disableLabeledNodes: boolean;
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
};
export const SelectNodesTable: React.FC = ({
nodes,
onRowSelected,
disableLabeledNodes = false,
+ systemNamespace,
}) => {
const [nodesData, nodesLoaded, nodesLoadError] = useK8sWatchResource<
NodeKind[]
@@ -241,6 +241,7 @@ export const SelectNodesTable: React.FC = ({
onRowSelected={onRowSelected}
nodesData={filteredData as NodeKind[]}
disableLabeledNodes={disableLabeledNodes}
+ systemNamespace={systemNamespace}
/>
@@ -253,4 +254,5 @@ type NodeSelectTableProps = {
nodes: WizardNodeState[];
onRowSelected: (selectedNodes: NodeKind[]) => void;
disableLabeledNodes?: boolean;
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
};
diff --git a/packages/odf/components/kms-config/kms-config.tsx b/packages/odf/components/kms-config/kms-config.tsx
index bf3650e22..018d87921 100644
--- a/packages/odf/components/kms-config/kms-config.tsx
+++ b/packages/odf/components/kms-config/kms-config.tsx
@@ -41,6 +41,7 @@ export const KMSConfigure: React.FC = ({
infraType,
isWizardFlow,
isMCG,
+ systemNamespace,
}) => {
const { t } = useCustomTranslation();
@@ -104,6 +105,7 @@ export const KMSConfigure: React.FC = ({
dispatch={dispatch}
className={className}
isWizardFlow={isWizardFlow}
+ systemNamespace={systemNamespace}
isMCG={isMCG}
/>
diff --git a/packages/odf/components/kms-config/providers.ts b/packages/odf/components/kms-config/providers.ts
index 04151a60a..687488981 100644
--- a/packages/odf/components/kms-config/providers.ts
+++ b/packages/odf/components/kms-config/providers.ts
@@ -13,6 +13,7 @@ export type KMSConfigureProps = {
infraType?: string;
mode?: string;
isWizardFlow?: boolean;
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
isMCG?: boolean;
};
@@ -21,4 +22,5 @@ export type AdvancedKMSModalProps = {
dispatch: EncryptionDispatch;
mode?: string;
isWizardFlow?: boolean;
+ systemNamespace: WizardState['backingStorage']['systemNamespace'];
};
diff --git a/packages/odf/components/kms-config/utils.tsx b/packages/odf/components/kms-config/utils.tsx
index 0bb1a95a9..9713c5cd9 100644
--- a/packages/odf/components/kms-config/utils.tsx
+++ b/packages/odf/components/kms-config/utils.tsx
@@ -3,6 +3,7 @@ import { getName } from '@odf/shared/selectors';
import { K8sResourceKind, ConfigMapKind, SecretKind } from '@odf/shared/types';
import { getRandomChars, isValidIP } from '@odf/shared/utils';
import { k8sCreate, k8sPatch } from '@openshift-console/dynamic-plugin-sdk';
+import * as _ from 'lodash-es';
import {
KMSConfigMapName,
KMSVaultTokenSecretName,
@@ -52,11 +53,18 @@ export const isValidName = (name: string) => /^[a-zA-Z0-9_.-]*$/.test(name);
export const isLengthUnity = (items) => items.length === 1;
+const getCASecretName = (isCSISecret = false) =>
+ `${isCSISecret ? 'csi' : 'ocs'}-kms-ca-secret-${getRandomChars()}`;
+const getClientSecretName = (isCSISecret = false) =>
+ `${isCSISecret ? 'csi' : 'ocs'}-kms-client-cert-${getRandomChars()}`;
+const getClientKeySecretName = (isCSISecret = false) =>
+ `${isCSISecret ? 'csi' : 'ocs'}-kms-client-key-${getRandomChars()}`;
+
export const generateCASecret = (caCertificate: string, ns: string) => ({
apiVersion: SecretModel.apiVersion,
kind: SecretModel.kind,
metadata: {
- name: `ocs-kms-ca-secret-${getRandomChars()}`,
+ name: getCASecretName(),
namespace: ns,
},
stringData: {
@@ -71,7 +79,7 @@ export const generateClientSecret = (
apiVersion: SecretModel.apiVersion,
kind: SecretModel.kind,
metadata: {
- name: `ocs-kms-client-cert-${getRandomChars()}`,
+ name: getClientSecretName(),
namespace: ns,
},
stringData: {
@@ -83,7 +91,7 @@ export const generateClientKeySecret = (clientKey: string, ns: string) => ({
apiVersion: SecretModel.apiVersion,
kind: SecretModel.kind,
metadata: {
- name: `ocs-kms-client-key-${getRandomChars()}`,
+ name: getClientKeySecretName(),
namespace: ns,
},
stringData: {
@@ -229,36 +237,54 @@ const getCsiVaultResources = (
kms: VaultConfig,
update: boolean,
ns: string,
+ updateNameAndNamespace: boolean = false,
createAdvancedVaultResource: boolean = true
) => {
const csiKmsResources: Promise[] = [];
+ const kmsObj = _.cloneDeep(kms);
+ // needed during initial deployment, to make sure CSI and OCS resources are decoupled
+ if (updateNameAndNamespace) {
+ if (kmsObj.caCert) {
+ kmsObj.caCert.metadata.name = getCASecretName(true);
+ kmsObj.caCert.metadata.namespace = ns;
+ }
+ if (kmsObj.clientCert) {
+ kmsObj.clientCert.metadata.name = getClientSecretName(true);
+ kmsObj.clientCert.metadata.namespace = ns;
+ }
+ if (kmsObj.clientKey) {
+ kmsObj.clientKey.metadata.name = getClientKeySecretName(true);
+ kmsObj.clientKey.metadata.namespace = ns;
+ }
+ }
+
let csiConfigData: VaultConfigMap = {
KMS_PROVIDER: KmsImplementations.VAULT_TOKENS,
- KMS_SERVICE_NAME: kms.name.value,
- VAULT_ADDR: getKmsEndpoint(kms.address.value, kms.port.value),
- VAULT_BACKEND_PATH: kms.backend,
- VAULT_CACERT: kms.caCert?.metadata.name,
- VAULT_TLS_SERVER_NAME: kms.tls,
- VAULT_CLIENT_CERT: kms.clientCert?.metadata.name,
- VAULT_CLIENT_KEY: kms.clientKey?.metadata.name,
- VAULT_CACERT_FILE: kms.caCertFile,
- VAULT_CLIENT_CERT_FILE: kms.clientCertFile,
- VAULT_CLIENT_KEY_FILE: kms.clientKeyFile,
- VAULT_AUTH_METHOD: kms.authMethod,
+ KMS_SERVICE_NAME: kmsObj.name.value,
+ VAULT_ADDR: getKmsEndpoint(kmsObj.address.value, kmsObj.port.value),
+ VAULT_BACKEND_PATH: kmsObj.backend,
+ VAULT_CACERT: kmsObj.caCert?.metadata.name,
+ VAULT_TLS_SERVER_NAME: kmsObj.tls,
+ VAULT_CLIENT_CERT: kmsObj.clientCert?.metadata.name,
+ VAULT_CLIENT_KEY: kmsObj.clientKey?.metadata.name,
+ VAULT_CACERT_FILE: kmsObj.caCertFile,
+ VAULT_CLIENT_CERT_FILE: kmsObj.clientCertFile,
+ VAULT_CLIENT_KEY_FILE: kmsObj.clientKeyFile,
+ VAULT_AUTH_METHOD: kmsObj.authMethod,
};
- switch (kms.authMethod) {
+ switch (kmsObj.authMethod) {
case VaultAuthMethods.TOKEN:
csiConfigData = {
...csiConfigData,
VAULT_TOKEN_NAME: KMSVaultCSISecretName,
- VAULT_NAMESPACE: kms.providerNamespace,
+ VAULT_NAMESPACE: kmsObj.providerNamespace,
};
// token creation on ceph-csi deployment namespace from installation flow
- if (kms.authValue.value) {
+ if (kmsObj.authValue.value) {
const tokenSecret: SecretKind = getKmsVaultSecret(
- kms.authValue.value,
+ kmsObj.authValue.value,
KMSVaultCSISecretName,
ns
);
@@ -272,26 +298,25 @@ const getCsiVaultResources = (
csiConfigData.KMS_PROVIDER = KmsImplementations.VAULT_TENANT_SA;
csiConfigData = {
...csiConfigData,
- VAULT_AUTH_PATH: kms.providerAuthPath,
- VAULT_AUTH_NAMESPACE: kms.providerAuthNamespace,
- VAULT_NAMESPACE: kms.providerNamespace,
+ VAULT_AUTH_PATH: kmsObj.providerAuthPath,
+ VAULT_AUTH_NAMESPACE: kmsObj.providerAuthNamespace,
+ VAULT_NAMESPACE: kmsObj.providerNamespace,
};
break;
default:
}
const csiConfigObj: ConfigMapKind = generateCsiKmsConfigMap(
- kms.name.value,
+ kmsObj.name.value,
csiConfigData,
ns
);
- // skip if cluster-wide already taken care
if (createAdvancedVaultResource) {
- csiKmsResources.push(...createAdvancedVaultResources(kms));
+ csiKmsResources.push(...createAdvancedVaultResources(kmsObj));
}
if (update) {
- const cmPatch = [generateConfigMapPatch(kms.name.value, csiConfigData)];
+ const cmPatch = [generateConfigMapPatch(kmsObj.name.value, csiConfigData)];
csiKmsResources.push(
k8sPatch({ model: ConfigMapModel, resource: csiConfigObj, data: cmPatch })
);
@@ -314,8 +339,6 @@ const getCsiHpcsResources = (
let keySecret: SecretKind;
if (!secretName) {
- // not required, while setting up storage cluster.
- // required, while creating new storage class.
keySecret = generateHpcsSecret(kms, ns);
csiKmsResources.push(k8sCreate({ model: SecretModel, data: keySecret }));
}
@@ -446,7 +469,7 @@ const getClusterVaultResources = (kms: VaultConfig, ns: string) => {
const getClusterHpcsResources = (
kms: HpcsConfig,
ns: string
-): [string, Promise[]] => {
+): Promise[] => {
const clusterKmsResources: Promise[] = [];
const keySecret: SecretKind = generateHpcsSecret(kms, ns);
@@ -467,7 +490,7 @@ const getClusterHpcsResources = (
k8sCreate({ model: ConfigMapModel, data: configMapObj })
);
- return [secretName, clusterKmsResources];
+ return clusterKmsResources;
};
const getClusterThalesResources = (
@@ -501,28 +524,32 @@ export const getPort = (url: URL) => {
return url.port;
};
+// CSI KMS ConfigMap and Secrets always needs to be created in ODF install namespace (that is, where Rook is deployed)
export const createCsiKmsResources = (
kms: KMSConfiguration,
update: boolean,
- ns: string,
+ odfNamespace: string,
provider = ProviderNames.VAULT
): Promise[] => {
switch (provider) {
case ProviderNames.VAULT: {
- return getCsiVaultResources(kms as VaultConfig, update, ns);
+ return getCsiVaultResources(kms as VaultConfig, update, odfNamespace);
}
case ProviderNames.HPCS: {
- return getCsiHpcsResources(kms as HpcsConfig, update, ns);
+ return getCsiHpcsResources(kms as HpcsConfig, update, odfNamespace);
}
case ProviderNames.THALES: {
- return getCsiThalesResources(kms as ThalesConfig, update, ns);
+ return getCsiThalesResources(kms as ThalesConfig, update, odfNamespace);
}
}
};
+// CSI KMS ConfigMap and Secrets always needs to be created in ODF install namespace (that is, where Rook is deployed)
+// OCS KMS ConfigMap and Secrets needs to be created in the namespace where Ceph is being deployed (StorageSystem namespace)
export const createClusterKmsResources = (
kms: KMSConfiguration,
- ns: string,
+ storageClusterNs: string,
+ odfNamespace: string,
provider = ProviderNames.VAULT,
isMCGStandalone = false
): Promise[] => {
@@ -537,27 +564,22 @@ export const createClusterKmsResources = (
].supportedEncryptionType.includes(KmsEncryptionLevel.STORAGE_CLASS);
const clusterKmsResources = clusterWideSupported
- ? getClusterVaultResources(kms as VaultConfig, ns)
+ ? getClusterVaultResources(kms as VaultConfig, storageClusterNs)
: [];
const csiKmsResources =
!isMCGStandalone && storageClassSupported
- ? getCsiVaultResources(
- kms as VaultConfig,
- false,
- ns,
- !clusterWideSupported
- )
+ ? getCsiVaultResources(kms as VaultConfig, false, odfNamespace, true)
: [];
return [...clusterKmsResources, ...csiKmsResources];
}
case ProviderNames.HPCS: {
- const [secretName, clusterKmsResources] = getClusterHpcsResources(
+ const clusterKmsResources = getClusterHpcsResources(
kms as HpcsConfig,
- ns
+ storageClusterNs
);
const csiKmsResources = !isMCGStandalone
- ? getCsiHpcsResources(kms as HpcsConfig, false, ns, secretName)
+ ? getCsiHpcsResources(kms as HpcsConfig, false, odfNamespace)
: [];
return [...clusterKmsResources, ...csiKmsResources];
@@ -565,10 +587,10 @@ export const createClusterKmsResources = (
case ProviderNames.THALES: {
const clusterKmsResources = getClusterThalesResources(
kms as ThalesConfig,
- ns
+ storageClusterNs
);
const csiKmsResources = !isMCGStandalone
- ? getCsiThalesResources(kms as ThalesConfig, false, ns)
+ ? getCsiThalesResources(kms as ThalesConfig, false, odfNamespace)
: [];
return [...clusterKmsResources, ...csiKmsResources];
}
diff --git a/packages/odf/components/kms-config/vault-config.tsx b/packages/odf/components/kms-config/vault-config.tsx
index 0765f6801..91ea97c90 100644
--- a/packages/odf/components/kms-config/vault-config.tsx
+++ b/packages/odf/components/kms-config/vault-config.tsx
@@ -40,6 +40,7 @@ export const VaultConfigure: React.FC = ({
className,
isWizardFlow,
isMCG,
+ systemNamespace,
}) => {
const { t } = useCustomTranslation();
@@ -62,6 +63,7 @@ export const VaultConfigure: React.FC = ({
state,
dispatch,
isWizardFlow,
+ systemNamespace,
});
const updateVaultState = React.useCallback(
diff --git a/packages/odf/components/mcg/CreateObjectBucketClaim.tsx b/packages/odf/components/mcg/CreateObjectBucketClaim.tsx
index 125309033..7526331ae 100644
--- a/packages/odf/components/mcg/CreateObjectBucketClaim.tsx
+++ b/packages/odf/components/mcg/CreateObjectBucketClaim.tsx
@@ -414,9 +414,7 @@ export const CreateOBCPage: React.FC<{}> = () => {
isAllProjectsInitially.current = false;
} else if (initialNamespace.current !== namespace) {
navigate(
- `/odf/object-storage/resource/${referenceForModel(
- NooBaaObjectBucketClaimModel
- )}`
+ `/odf/object-storage/${referenceForModel(NooBaaObjectBucketClaimModel)}`
);
}
}, [navigate, namespace, setNamespace]);
@@ -480,9 +478,10 @@ export const CreateOBCPage: React.FC<{}> = () => {
});
};
- // Operator install namespace is determined using Subscriptions, which non-admin can not access (yet).
+ // Operator install namespace is determined using Subscriptions, which non-admin can not access.
// Using "allowFallback" in "NamespaceSafetyBox" so that they can default to "openshift-storage" (if case of access issues),
// which is current use case as well (as we do not officially support UI if ODF is installed in any other Namespace).
+ // ToDo (Sanjal): Update the non-admin "Role" to a "ClusterRole", then read list of NooBaa/BucketClasses across all namespaces.
return (
<>
diff --git a/packages/odf/components/namespace-store/namespace-store-dropdown.tsx b/packages/odf/components/namespace-store/namespace-store-dropdown.tsx
index e4f4fb45f..e244f95da 100644
--- a/packages/odf/components/namespace-store/namespace-store-dropdown.tsx
+++ b/packages/odf/components/namespace-store/namespace-store-dropdown.tsx
@@ -29,9 +29,10 @@ export const NamespaceStoreDropdown: React.FC = ({
const [isOpen, setOpen] = React.useState(false);
const [dropdownItems, setDropdownItems] = React.useState([]);
- // Operator install namespace is determined using Subscriptions, which non-admin can not access (yet).
+ // Operator install namespace is determined using Subscriptions, which non-admin can not access.
// Using "true" in "useSafeK8sWatchResource" so that they can default to "openshift-storage" (if case of access issues),
// which is current use case as well (as we do not officially support UI if ODF is installed in any other Namespace).
+ // ToDo (Sanjal): Update the non-admin "Role" to a "ClusterRole", then read list of NamespaceStores across all namespaces.
const [nnsData, , nnsLoadErr] = useSafeK8sWatchResource(
namespaceStoreResource,
true
diff --git a/packages/odf/components/odf-dashboard/dashboard.tsx b/packages/odf/components/odf-dashboard/dashboard.tsx
index de09b2530..006ced5db 100644
--- a/packages/odf/components/odf-dashboard/dashboard.tsx
+++ b/packages/odf/components/odf-dashboard/dashboard.tsx
@@ -1,5 +1,8 @@
import * as React from 'react';
-import { useODFNamespaceSelector } from '@odf/core/redux';
+import {
+ useODFNamespaceSelector,
+ useODFSystemFlagsSelector,
+} from '@odf/core/redux';
import {
HorizontalNavTab,
isHorizontalNavTab,
@@ -91,6 +94,7 @@ const ODFDashboardPage: React.FC<{}> = () => {
);
const { isODFNsLoaded, odfNsLoadError } = useODFNamespaceSelector();
+ const { areFlagsLoaded, flagsLoadError } = useODFSystemFlagsSelector();
const [extensions, isLoaded, error] = useResolvedExtensions(
isDashboardTab as ExtensionTypeGuard
@@ -122,8 +126,8 @@ const ODFDashboardPage: React.FC<{}> = () => {
{/** Todo(bipuladh): Move to usage of common PF Tabs component */}
diff --git a/packages/odf/components/odf-dashboard/performance-card/performance-card.tsx b/packages/odf/components/odf-dashboard/performance-card/performance-card.tsx
index 0ac7975db..ee069d1cf 100644
--- a/packages/odf/components/odf-dashboard/performance-card/performance-card.tsx
+++ b/packages/odf/components/odf-dashboard/performance-card/performance-card.tsx
@@ -1,6 +1,4 @@
import * as React from 'react';
-import { useSafeK8sWatchResource } from '@odf/core/hooks';
-import { K8sResourceObj } from '@odf/core/types';
import LineGraph, {
LineGraphProps,
} from '@odf/shared/dashboards/line-graph/line-graph';
@@ -21,6 +19,7 @@ import {
referenceFor,
referenceForModel,
} from '@odf/shared/utils';
+import { useK8sWatchResource } from '@openshift-console/dynamic-plugin-sdk';
import {
UtilizationDurationDropdown,
useUtilizationDuration,
@@ -40,6 +39,7 @@ import { generateDataFrames } from './utils';
type RowProps = {
systemName: string;
+ systemNamespace: string;
managedSystemKind: string;
managedSystemName: string;
currentLocation: string;
@@ -57,6 +57,7 @@ type GetRow = (
const getRow: GetRow = ({
managedSystemKind,
systemName,
+ systemNamespace,
iopsData,
throughputData,
latencyData,
@@ -67,7 +68,7 @@ const getRow: GetRow = ({
return [
,
@@ -94,11 +95,10 @@ const getRow: GetRow = ({
];
};
-const storageSystemResource: K8sResourceObj = (ns) => ({
+const storageSystemResource = {
kind: referenceForModel(ODFStorageSystem),
- namespace: ns,
isList: true,
-});
+};
const nameSort = (a: RowProps, b: RowProps, c: SortByDirection) => {
const negation = c !== SortByDirection.asc;
@@ -149,7 +149,7 @@ const PerformanceCard: React.FC = () => {
[t]
);
- const [systems, systemLoaded, systemLoadError] = useSafeK8sWatchResource<
+ const [systems, systemLoaded, systemLoadError] = useK8sWatchResource<
StorageSystemKind[]
>(storageSystemResource);
const { duration } = useUtilizationDuration();
diff --git a/packages/odf/components/odf-dashboard/performance-card/utils.ts b/packages/odf/components/odf-dashboard/performance-card/utils.ts
index 1aad2c308..d1d03af87 100644
--- a/packages/odf/components/odf-dashboard/performance-card/utils.ts
+++ b/packages/odf/components/odf-dashboard/performance-card/utils.ts
@@ -10,6 +10,7 @@ import * as _ from 'lodash-es';
type DataFrame = {
systemName: string;
+ systemNamespace: string;
managedSystemKind: string;
managedSystemName: string;
currentLocation: string;
@@ -25,6 +26,9 @@ const getDatForSystem = (
humanizer: Function
) => {
const systemName = system.spec.name;
+ // ToDo (epic 4422): This equality check should work (for now) as "managedBy" will be unique,
+ // but moving forward add a label to metric for StorageSystem namespace as well and use that instead (update query as well).
+ // Equality check should be updated as well with "&&" condition on StorageSystem namespace.
const relatedMetrics = promData?.data?.result?.find(
(value) => value.metric.managedBy === systemName
);
@@ -51,6 +55,7 @@ export const generateDataFrames = (
managedSystemKind: curr.spec.kind,
managedSystemName: curr.spec.name,
systemName: curr.metadata.name,
+ systemNamespace: curr.metadata.namespace,
currentLocation: '/',
iopsData: {
data: getDatForSystem(id, curr, humanizeIOPS),
diff --git a/packages/odf/components/odf-dashboard/queries.ts b/packages/odf/components/odf-dashboard/queries.ts
index 584e844c9..042a11dca 100644
--- a/packages/odf/components/odf-dashboard/queries.ts
+++ b/packages/odf/components/odf-dashboard/queries.ts
@@ -27,7 +27,8 @@ export const UTILIZATION_QUERY = {
[StorageDashboard.THROUGHPUT]: 'odf_system_throughput_total_bytes',
};
-// ToDo (epic 4422): Need to update as per updates in the metrics
+// ToDo (epic 4422): Need to update as per updates in the metrics (if needed/once confirmed).
+// Assuming "namespace" in "odf_system.*"" metrics (except "odf_system_map" which is pushed by ODF opr and already has "target_namespace") is where system is deployed.
export const STATUS_QUERIES = {
- [StorageDashboard.HEALTH]: `(label_replace(odf_system_map{target_namespace="openshift-storage"} , "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy) group_right(storage_system) ${SYSTEM_HEALTH_METRIC}`,
+ [StorageDashboard.HEALTH]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (target_namespace, managedBy) group_right(storage_system) (label_replace(${SYSTEM_HEALTH_METRIC}, "target_namespace", "$1", "namespace", "(.*)"))`,
};
diff --git a/packages/odf/components/odf-dashboard/status-card/status-card-popover.tsx b/packages/odf/components/odf-dashboard/status-card/status-card-popover.tsx
index 645554426..91568e7af 100644
--- a/packages/odf/components/odf-dashboard/status-card/status-card-popover.tsx
+++ b/packages/odf/components/odf-dashboard/status-card/status-card-popover.tsx
@@ -11,7 +11,7 @@ import {
import './status-card-popover.scss';
export type ResourceHealthMap = {
- itemName: string;
+ systemName: string;
healthState: HealthState;
link?: string;
extraTexts?: string[];
@@ -47,15 +47,15 @@ const StatusCardPopover: React.FC = ({
>
{resourceHealthMap.map((resource) => (
{resource.link ? (
- {resource.itemName}
+ {resource.systemName}
) : (
- <>{resource.itemName}>
+ <>{resource.systemName}>
)}
{!!resource.extraTexts && (
diff --git a/packages/odf/components/odf-dashboard/status-card/status-card.tsx b/packages/odf/components/odf-dashboard/status-card/status-card.tsx
index be80a9a23..a33a919ca 100644
--- a/packages/odf/components/odf-dashboard/status-card/status-card.tsx
+++ b/packages/odf/components/odf-dashboard/status-card/status-card.tsx
@@ -11,6 +11,7 @@ import {
usePrometheusBasePath,
} from '@odf/shared/hooks/custom-prometheus-poll';
import { OCSStorageClusterModel, ODFStorageSystem } from '@odf/shared/models';
+import { getName, getNamespace } from '@odf/shared/selectors';
import {
ClusterServiceVersionKind,
StorageSystemKind,
@@ -28,7 +29,7 @@ import {
useK8sWatchResource,
} from '@openshift-console/dynamic-plugin-sdk';
import { HealthBody } from '@openshift-console/dynamic-plugin-sdk-internal';
-import { useHistory } from 'react-router';
+import { useNavigate } from 'react-router-dom-v5-compat';
import {
Gallery,
GalleryItem,
@@ -52,17 +53,16 @@ const operatorResource: K8sResourceObj = (ns) => ({
isList: true,
});
-const storageSystemResource: K8sResourceObj = (ns) => ({
+const storageSystemResource = {
kind: referenceForModel(ODFStorageSystem),
- namespace: ns,
isList: true,
-});
+};
export const StatusCard: React.FC = () => {
const { t } = useCustomTranslation();
const [csvData, csvLoaded, csvLoadError] =
useSafeK8sWatchResource(operatorResource);
- const [systems, systemsLoaded, systemsLoadError] = useSafeK8sWatchResource<
+ const [systems, systemsLoaded, systemsLoadError] = useK8sWatchResource<
StorageSystemKind[]
>(storageSystemResource);
@@ -78,7 +78,7 @@ export const StatusCard: React.FC = () => {
const operatorStatus = operator?.status?.phase;
// Todo(bipuladh): In 4.11 this should come in from an extension point
- const ocsHealthStatus = useGetOCSHealth();
+ const ocsHealthStatuses = useGetOCSHealth(systems);
const parsedHealthData =
!healthError &&
@@ -88,9 +88,15 @@ export const StatusCard: React.FC = () => {
healthData
? healthData?.data?.result?.reduce((acc, curr) => {
const systemName = curr.metric.storage_system;
+ // ToDo (epic 4422): This equality check should work (for now) as "storage_system" will be unique,
+ // but moving forward add a label to metric for StorageSystem namespace as well and use that instead (update query as well).
+ // Equality check should be updated as well with "&&" condition on StorageSystem namespace.
const storageSystem = systems.find(
- (system) => system.metadata.name === systemName
+ (system) => getName(system) === systemName
);
+ const systemNamespace = getNamespace(storageSystem);
+ const ocsHealthStatus =
+ ocsHealthStatuses[`${systemName}${systemNamespace}`];
const { apiGroup, apiVersion, kind } = getGVK(
storageSystem?.spec.kind
);
@@ -105,6 +111,7 @@ export const StatusCard: React.FC = () => {
link: getVendorDashboardLinkFromMetrics(
systemKind,
systemName,
+ systemNamespace,
ocsHealthStatus.errorComponent
),
@@ -118,7 +125,8 @@ export const StatusCard: React.FC = () => {
healthState: healthStateMap(curr.value[1]),
link: getVendorDashboardLinkFromMetrics(
systemKind,
- systemName
+ systemName,
+ systemNamespace
),
};
return [...acc, systemData];
@@ -149,11 +157,11 @@ export const StatusCard: React.FC = () => {
const clientAggregateHealth = getAggregateClientHealthState(clients);
- const history = useHistory();
+ const navigate = useNavigate();
const redirectToListPage = React.useCallback(() => {
- history.push('/odf/storage-clients');
- }, [history]);
+ navigate('/odf/storage-clients');
+ }, [navigate]);
return (
diff --git a/packages/odf/components/odf-dashboard/system-capacity-card/capacity-card.tsx b/packages/odf/components/odf-dashboard/system-capacity-card/capacity-card.tsx
index f5e861b76..0de41090e 100644
--- a/packages/odf/components/odf-dashboard/system-capacity-card/capacity-card.tsx
+++ b/packages/odf/components/odf-dashboard/system-capacity-card/capacity-card.tsx
@@ -1,6 +1,5 @@
import * as React from 'react';
-import { useSafeK8sWatchResource } from '@odf/core/hooks';
-import { K8sResourceObj } from '@odf/core/types';
+import { isMCGStandaloneCluster, isExternalCluster } from '@odf/core/utils';
import CapacityCard, {
CapacityMetricDatum,
} from '@odf/shared/dashboards/capacity-card/capacity-card';
@@ -18,40 +17,44 @@ import {
referenceFor,
referenceForModel,
} from '@odf/shared/utils';
-import { PrometheusResponse } from '@openshift-console/dynamic-plugin-sdk';
+import {
+ PrometheusResponse,
+ useK8sWatchResource,
+} from '@openshift-console/dynamic-plugin-sdk';
import * as _ from 'lodash-es';
import { Card, CardBody, CardHeader, CardTitle } from '@patternfly/react-core';
import { storageCapacityTooltip } from '../../../constants';
import { StorageDashboard, CAPACITY_QUERIES } from '../queries';
-const storageSystemResource: K8sResourceObj = (ns) => ({
+const storageSystemResource = {
kind: referenceForModel(ODFStorageSystem),
- namespace: ns,
isList: true,
-});
+};
-const storageClusterResource: K8sResourceObj = (ns) => ({
+const storageClusterResource = {
kind: referenceForModel(OCSStorageClusterModel),
- namespace: ns,
isList: true,
-});
+};
const getMetricForSystem = (
metric: PrometheusResponse,
system: StorageSystemKind
) =>
+ // ToDo (epic 4422): This equality check should work (for now) as "managedBy" will be unique,
+ // but moving forward add a label to metric for StorageSystem namespace as well and use that,
+ // equality check should be updated with "&&" condition on StorageSystem namespace.
metric?.data?.result?.find(
(value) => value.metric.managedBy === system.spec.name
);
const SystemCapacityCard: React.FC = () => {
const { t } = useCustomTranslation();
- const [systems, systemsLoaded, systemsLoadError] = useSafeK8sWatchResource<
+ const [systems, systemsLoaded, systemsLoadError] = useK8sWatchResource<
StorageSystemKind[]
>(storageSystemResource);
const [storageClusters, storageClustersLoaded, storageClustersLoadError] =
- useSafeK8sWatchResource(storageClusterResource);
+ useK8sWatchResource(storageClusterResource);
const [usedCapacity, errorUsedCapacity, loadingUsedCapacity] =
useCustomPrometheusPoll({
@@ -67,16 +70,6 @@ const SystemCapacityCard: React.FC = () => {
basePath: usePrometheusBasePath(),
});
- const isMCGCluster = (storageCluster: StorageClusterKind) => {
- return (
- storageCluster.spec?.multiCloudGateway?.reconcileStrategy === 'standalone'
- );
- };
-
- const isExternalCluster = (storageCluster: StorageClusterKind) => {
- return !_.isEmpty(storageCluster.spec?.externalStorage);
- };
-
// We are filtering internal only storagesystems as the metrics are not applicable for MCG standalone and external only StorageSystems.
// https://bugzilla.redhat.com/show_bug.cgi?id=2185042
const internalOnlySystems: StorageSystemKind[] = systems.filter((sys) => {
@@ -86,7 +79,8 @@ const SystemCapacityCard: React.FC = () => {
storageClusters.find((sc) => sc.metadata.name === sys.spec.name);
if (
!!storageCluster &&
- (isMCGCluster(storageCluster) || isExternalCluster(storageCluster))
+ (isMCGStandaloneCluster(storageCluster) ||
+ isExternalCluster(storageCluster))
) {
return false;
}
@@ -104,6 +98,7 @@ const SystemCapacityCard: React.FC = () => {
const totalMetric = getMetricForSystem(totalCapacity, system);
const datum = {
name: system.metadata.name,
+ namespace: system.metadata.namespace,
managedSystemName: system.spec.name,
managedSystemKind: referenceFor(apiGroup)(apiVersion)(kind),
usedValue: usedMetric
diff --git a/packages/odf/components/system-list/odf-system-list.tsx b/packages/odf/components/system-list/odf-system-list.tsx
index 30ce4ea6d..432823b80 100644
--- a/packages/odf/components/system-list/odf-system-list.tsx
+++ b/packages/odf/components/system-list/odf-system-list.tsx
@@ -6,6 +6,7 @@ import {
import { Kebab } from '@odf/shared/kebab/kebab';
import { ClusterServiceVersionModel } from '@odf/shared/models';
import { ODFStorageSystem } from '@odf/shared/models';
+import { getName, getNamespace } from '@odf/shared/selectors';
import { Status } from '@odf/shared/status/Status';
import {
ClusterServiceVersionKind,
@@ -46,7 +47,7 @@ import { OperandStatus } from '../utils';
import ODFSystemLink from './system-link';
type SystemMetrics = {
- [systeName: string]: {
+ [systeNameAndNamespace: string]: {
rawCapacity: HumanizeResult;
usedCapacity: HumanizeResult;
iops: HumanizeResult;
@@ -82,8 +83,11 @@ export const normalizeMetrics: MetricNormalize = (
) {
return {};
}
+ // ToDo (epic 4422): This equality check should work (for now) as "managedBy" will be unique,
+ // but moving forward add a label to metric for StorageSystem namespace as well and use that,
+ // equality check should be updated with "&&" condition on StorageSystem namespace.
return systems.reduce((acc, curr) => {
- acc[curr.metadata.name] = {
+ acc[`${getName(curr)}${getNamespace(curr)}`] = {
rawCapacity: humanizeBinaryBytes(
rawCapacity.data.result.find(
(item) => item?.metric?.managedBy === curr.spec.name
@@ -246,10 +250,12 @@ const StorageSystemRow: React.FC> = ({
const { t } = useCustomTranslation();
const { apiGroup, apiVersion, kind } = getGVK(obj.spec.kind);
const systemKind = referenceForGroupVersionKind(apiGroup)(apiVersion)(kind);
- const systemName = obj?.metadata?.name;
+ const systemName = getName(obj);
+ const systemNamespace = getNamespace(obj);
const { normalizedMetrics } = rowData;
- const metrics = normalizedMetrics?.normalizedMetrics?.[systemName];
+ const metrics =
+ normalizedMetrics?.normalizedMetrics?.[`${systemName}${systemNamespace}`];
const { rawCapacity, usedCapacity, iops, throughput, latency } =
metrics || {};
@@ -260,6 +266,7 @@ const StorageSystemRow: React.FC> = ({
kind={systemKind}
systemName={systemName}
providerName={systemName}
+ systemNamespace={systemNamespace}
/>
diff --git a/packages/odf/components/system-list/system-link.tsx b/packages/odf/components/system-list/system-link.tsx
index c8e71b72e..3cf825eb5 100644
--- a/packages/odf/components/system-list/system-link.tsx
+++ b/packages/odf/components/system-list/system-link.tsx
@@ -7,14 +7,16 @@ type ODFSystemLinkProps = {
kind: string;
providerName: string;
systemName: string;
+ systemNamespace: string;
};
const ODFSystemLink: React.FC = ({
kind,
systemName,
providerName,
+ systemNamespace,
}) => {
- const path = `/odf/system/${kind}/${providerName}/overview`;
+ const path = `/odf/system/ns/${systemNamespace}/${kind}/${providerName}/overview`;
return (
diff --git a/packages/odf/components/topology/Topology.tsx b/packages/odf/components/topology/Topology.tsx
index 92c544033..928d72384 100644
--- a/packages/odf/components/topology/Topology.tsx
+++ b/packages/odf/components/topology/Topology.tsx
@@ -1,6 +1,10 @@
import * as React from 'react';
import { useSafeK8sWatchResource } from '@odf/core/hooks';
-import { useODFNamespaceSelector } from '@odf/core/redux';
+import {
+ useODFNamespaceSelector,
+ useODFSystemFlagsSelector,
+} from '@odf/core/redux';
+import { getStorageClusterInNs } from '@odf/core/utils';
import HandleErrorAndLoading from '@odf/shared/error-handler/ErrorStateHandler';
import { useDeepCompareMemoize } from '@odf/shared/hooks/deep-compare-memoize';
import {
@@ -9,6 +13,7 @@ import {
NodeModel,
} from '@odf/shared/models';
import { getName, getUID } from '@odf/shared/selectors';
+import { BlueInfoCircleIcon } from '@odf/shared/status';
import {
createNode,
defaultLayoutFactory,
@@ -28,7 +33,6 @@ import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { referenceForModel } from '@odf/shared/utils';
import {
K8sResourceCommon,
- useFlag,
useK8sWatchResource,
} from '@openshift-console/dynamic-plugin-sdk';
import * as _ from 'lodash-es';
@@ -55,11 +59,6 @@ import {
GraphElement,
} from '@patternfly/react-topology';
import { cephStorageLabel } from '../../constants';
-import {
- CEPH_FLAG,
- MCG_STANDALONE,
- OCS_INDEPENDENT_FLAG,
-} from '../../features';
import {
nodeResource,
odfDaemonSetResource,
@@ -69,6 +68,12 @@ import {
odfStatefulSetResource,
storageClusterResource,
} from '../../resources';
+import {
+ hasAnyExternalOCS,
+ hasAnyInternalOCS,
+ hasAnyCeph,
+ hasAnyNoobaaStandalone,
+} from '../../utils';
import {
STEP_INTO_EVENT,
STEP_TO_CLUSTER,
@@ -112,6 +117,22 @@ type BackButtonProps = {
onClick: () => void;
};
+const MessageButton: React.FC = () => {
+ const { t } = useCustomTranslation();
+ const [showMessage, setShowMessage] = React.useState(false);
+
+ return (
+
+ );
+};
+
const BackButton: React.FC = ({ onClick }) => {
const { t } = useCustomTranslation();
return (
@@ -426,6 +447,7 @@ const TopologyViewComponent: React.FC = () => {
{currentView === TopologyViewLevel.DEPLOYMENTS && (
)}
+
);
@@ -449,7 +471,7 @@ const Topology: React.FC = () => {
const [nodes, nodesLoaded, nodesError] =
useK8sWatchResource(nodeResource);
- const [storageCluster, storageClusterLoaded, storageClusterError] =
+ const [storageClusters, storageClustersLoaded, storageClustersError] =
useK8sWatchResource(storageClusterResource);
const [deployments, deploymentsLoaded, deploymentsError] =
@@ -467,6 +489,13 @@ const Topology: React.FC = () => {
const [daemonSets, daemonSetsLoaded, daemonSetError] =
useSafeK8sWatchResource(odfDaemonSetResource);
+ // ToDo (epic 4422): This will work as Internal mode cluster will only be created in ODF install namespace.
+ // Still, make this generic so that this works even if it gets created in a different namespace.
+ const storageCluster: StorageClusterKind = getStorageClusterInNs(
+ storageClusters,
+ odfNamespace
+ );
+
const storageLabel = cephStorageLabel(odfNamespace);
const odfNodes = nodes.filter((node) =>
_.has(node.metadata.labels, storageLabel)
@@ -527,7 +556,7 @@ const Topology: React.FC = () => {
const loading =
!nodesLoaded ||
- !storageClusterLoaded ||
+ !storageClustersLoaded ||
!deploymentsLoaded ||
!podsLoaded ||
!statefulSetLoaded ||
@@ -541,7 +570,7 @@ const Topology: React.FC = () => {
{
{
type TopologyViewErrorMessageProps = {
isExternalMode?: boolean;
+ isInternalMode?: boolean;
};
const TopologyViewErrorMessage: React.FC = ({
isExternalMode,
+ isInternalMode,
}) => {
const { t } = useCustomTranslation();
@@ -608,20 +639,21 @@ const TopologyViewErrorMessage: React.FC = ({
const createLink = `/k8s/ns/${odfNamespace}/operators.coreos.com~v1alpha1~ClusterServiceVersion/${odfCsvName}/odf.openshift.io~v1alpha1~StorageSystem/~new`;
- const showCreateSSOption = !isExternalMode && isNsSafe;
+ // If external mode cluster exists, we do not allow internal mode cluster creation (in case of multiple StorageSystem support)
+ const hideCreateSSOption = (isExternalMode && !isInternalMode) || !isNsSafe;
return (
{isExternalMode
- ? t('Topology view is not supported for External Mode')
+ ? t('Topology view is not supported for External mode')
: t('No StorageCluster found')}
- {showCreateSSOption &&
+ {!hideCreateSSOption &&
t('Set up a storage cluster to view the topology')}
- {showCreateSSOption && (
+ {!hideCreateSSOption && (
{t('Create StorageSystem')}
)}
@@ -629,16 +661,22 @@ const TopologyViewErrorMessage: React.FC = ({
};
const TopologyWithErrorHandler: React.FC = () => {
- const isCephAvailable = useFlag(CEPH_FLAG);
- const isMCGAvailable = useFlag(MCG_STANDALONE);
- const isExternalMode = useFlag(OCS_INDEPENDENT_FLAG);
+ const { systemFlags } = useODFSystemFlagsSelector();
+
+ const isCephAvailable = hasAnyCeph(systemFlags);
+ const isMCGStandalone = hasAnyNoobaaStandalone(systemFlags);
+ const isExternalMode = hasAnyExternalOCS(systemFlags);
+ const isInternalMode = hasAnyInternalOCS(systemFlags);
- const showDashboard = (isCephAvailable || isMCGAvailable) && !isExternalMode;
+ const showDashboard = (isCephAvailable || isMCGStandalone) && isInternalMode;
return showDashboard ? (
) : (
-
+
);
};
diff --git a/packages/odf/components/topology/sidebar/storage-cluster/StorageClusterDetails.tsx b/packages/odf/components/topology/sidebar/storage-cluster/StorageClusterDetails.tsx
index 17b515138..d59130141 100644
--- a/packages/odf/components/topology/sidebar/storage-cluster/StorageClusterDetails.tsx
+++ b/packages/odf/components/topology/sidebar/storage-cluster/StorageClusterDetails.tsx
@@ -1,12 +1,9 @@
import * as React from 'react';
import { cephStorageLabel } from '@odf/core/constants';
-import { CEPH_FLAG, OCS_INDEPENDENT_FLAG } from '@odf/core/features';
-import { useSafeK8sList } from '@odf/core/hooks';
import { useODFNamespaceSelector } from '@odf/core/redux';
import { nodeResource } from '@odf/core/resources';
import { getDataResiliencyState } from '@odf/ocs/dashboards/persistent-internal/status-card/utils';
import { StorageEfficiencyContent } from '@odf/ocs/dashboards/persistent-internal/storage-efficiency-card/storage-efficiency-card';
-import { StorageClusterModel } from '@odf/ocs/models';
import { DATA_RESILIENCY_QUERY, StorageDashboardQuery } from '@odf/ocs/queries';
import { getCephNodes, getOperatorVersion } from '@odf/ocs/utils';
import { DASH, ODF_OPERATOR } from '@odf/shared/constants';
@@ -33,10 +30,7 @@ import {
resourcePathFromModel,
getInfrastructurePlatform,
} from '@odf/shared/utils';
-import {
- useFlag,
- useK8sWatchResource,
-} from '@openshift-console/dynamic-plugin-sdk';
+import { useK8sWatchResource } from '@openshift-console/dynamic-plugin-sdk';
import {
HealthItem,
ResourceInventoryItem,
@@ -44,8 +38,10 @@ import {
import * as _ from 'lodash-es';
import { Link } from 'react-router-dom-v5-compat';
-const resiliencyProgressQuery =
- DATA_RESILIENCY_QUERY[StorageDashboardQuery.RESILIENCY_PROGRESS];
+const resiliencyProgressQuery = (managedByOCS: string) =>
+ DATA_RESILIENCY_QUERY(managedByOCS)[
+ StorageDashboardQuery.RESILIENCY_PROGRESS
+ ];
export type StorageClusterDetailsProps = {
resource: StorageClusterKind;
@@ -60,15 +56,8 @@ export const StorageClusterDetails: React.FC = ({
const [infrastructure, infrastructureLoaded, infrastructureError] =
useK8sGet(InfrastructureModel, 'cluster');
- const [ocsData, ocsLoaded, ocsError] = useSafeK8sList(
- StorageClusterModel,
- odfNamespace
- );
- const cluster = ocsData?.find(
- (item: StorageClusterKind) => item.status.phase !== 'Ignored'
- );
- const ocsName = ocsLoaded && _.isEmpty(ocsError) ? getName(cluster) : DASH;
+ const ocsName = getName(storageCluster);
const infrastructurePlatform =
infrastructureLoaded && _.isEmpty(infrastructureError)
? getInfrastructurePlatform(infrastructure)
@@ -76,7 +65,7 @@ export const StorageClusterDetails: React.FC = ({
const [resiliencyProgress, resiliencyProgressError] = useCustomPrometheusPoll(
{
- query: resiliencyProgressQuery,
+ query: resiliencyProgressQuery(ocsName),
endpoint: 'api/v1/query' as any,
basePath: usePrometheusBasePath(),
}
@@ -86,10 +75,8 @@ export const StorageClusterDetails: React.FC = ({
t
);
- const isIndependent = useFlag(OCS_INDEPENDENT_FLAG);
- const isCephAvailable = useFlag(CEPH_FLAG);
- const mode =
- !isIndependent && isCephAvailable ? t('Internal') : t('External');
+ // Topology is only supported for Internal mode
+ const mode = t('Internal');
const [csv, csvLoaded, csvError] = useFetchCsv({
specName: ODF_OPERATOR,
diff --git a/packages/odf/components/topology/sidebar/storage-cluster/StorageClusterObserve.tsx b/packages/odf/components/topology/sidebar/storage-cluster/StorageClusterObserve.tsx
index a015b5474..96b224575 100644
--- a/packages/odf/components/topology/sidebar/storage-cluster/StorageClusterObserve.tsx
+++ b/packages/odf/components/topology/sidebar/storage-cluster/StorageClusterObserve.tsx
@@ -1,23 +1,10 @@
import * as React from 'react';
-import { UtilizationContent as ExternalUtilizationContent } from '@odf/ocs/dashboards/persistent-external/utilization-card';
import { UtilizationContent as InternalUtilizationContent } from '@odf/ocs/dashboards/persistent-internal/utilization-card/utilization-card';
-import {
- K8sResourceCommon,
- useFlag,
-} from '@openshift-console/dynamic-plugin-sdk';
-import { CEPH_FLAG, OCS_INDEPENDENT_FLAG } from '../../../../features';
+import { K8sResourceCommon } from '@openshift-console/dynamic-plugin-sdk';
export const StorageClusterObserve: React.FC<{
resource?: K8sResourceCommon;
odfNamespace?: string;
}> = () => {
- const isIndependent = useFlag(OCS_INDEPENDENT_FLAG);
- const isCephAvailable = useFlag(CEPH_FLAG);
- const isInternal = !isIndependent && isCephAvailable;
-
- return isInternal ? (
-
- ) : (
-
- );
+ return ;
};
diff --git a/packages/odf/components/topology/topology.scss b/packages/odf/components/topology/topology.scss
index a6fd034c8..7ddea6740 100644
--- a/packages/odf/components/topology/topology.scss
+++ b/packages/odf/components/topology/topology.scss
@@ -27,6 +27,16 @@
z-index: 2;
}
+.odf-topology__message-button {
+ color: #393F44;
+ right: var(--pf-global--gutter);
+ position: absolute;
+ top: var(--pf-global--gutter);
+ font-size: var(--pf-global--FontSize--xs);
+ font-weight: var(--pf-global--FontWeight--normal);
+ z-index: 2;
+}
+
.odf-topology__back-button:hover {
box-shadow: 1px 1px 3px 1px #aaabac;
cursor: pointer;
diff --git a/packages/odf/components/utils/common.ts b/packages/odf/components/utils/common.ts
index fbe77ba05..f62e9dd13 100644
--- a/packages/odf/components/utils/common.ts
+++ b/packages/odf/components/utils/common.ts
@@ -46,7 +46,6 @@ import {
OCS_DEVICE_SET_FLEXIBLE_REPLICA,
OCS_DEVICE_SET_MINIMUM_REPLICAS,
ATTACHED_DEVICES_ANNOTATION,
- OCS_INTERNAL_CR_NAME,
DISASTER_RECOVERY_TARGET_ANNOTATION,
} from '../../constants';
import { WizardNodeState, WizardState } from '../create-storage-system/reducer';
@@ -85,9 +84,10 @@ export const getAllZone = (nodes: WizardNodeState[]): Set =>
export const getVendorDashboardLinkFromMetrics = (
systemKind: string,
systemName: string,
+ systemNamespace: string,
subComponent?: string
) =>
- `/odf/system/${systemKind}/${systemName}/overview${
+ `/odf/system/ns/${systemNamespace}/${systemKind}/${systemName}/overview${
subComponent ? '/' + subComponent : ''
}`;
@@ -383,10 +383,11 @@ type OCSRequestData = {
shouldSetCephRBDAsDefault?: boolean;
isSingleReplicaPoolEnabled?: boolean;
enableRDRPreparation?: boolean;
- odfNamespace: string;
+ storageClusterNamespace: string;
useExternalPostgres?: boolean;
allowNoobaaPostgresSelfSignedCerts?: boolean;
enableNoobaaClientSideCerts?: boolean;
+ storageClusterName: string;
};
export const getOCSRequestData = ({
@@ -407,10 +408,11 @@ export const getOCSRequestData = ({
shouldSetCephRBDAsDefault,
isSingleReplicaPoolEnabled,
enableRDRPreparation,
- odfNamespace,
+ storageClusterNamespace,
useExternalPostgres,
allowNoobaaPostgresSelfSignedCerts,
enableNoobaaClientSideCerts,
+ storageClusterName,
}: OCSRequestData): StorageClusterKind => {
const scName: string = storageClass.name;
const isNoProvisioner: boolean = storageClass?.provisioner === NO_PROVISIONER;
@@ -427,8 +429,8 @@ export const getOCSRequestData = ({
apiVersion: 'ocs.openshift.io/v1',
kind: 'StorageCluster',
metadata: {
- name: OCS_INTERNAL_CR_NAME,
- namespace: odfNamespace,
+ name: storageClusterName,
+ namespace: storageClusterNamespace,
},
spec: {},
};
diff --git a/packages/odf/constants/common.ts b/packages/odf/constants/common.ts
index 07389f2c6..db30f36d4 100644
--- a/packages/odf/constants/common.ts
+++ b/packages/odf/constants/common.ts
@@ -8,8 +8,6 @@ import { TFunction } from 'i18next';
export const CEPH_BRAND_NAME = 'Red Hat Ceph Storage';
export const NO_PROVISIONER = 'kubernetes.io/no-provisioner';
export const STORAGE_CLUSTER_SYSTEM_KIND = 'storagecluster.ocs.openshift.io/v1';
-export const OCS_EXTERNAL_CR_NAME = 'ocs-external-storagecluster';
-export const OCS_INTERNAL_CR_NAME = 'ocs-storagecluster';
export const HOSTNAME_LABEL_KEY = 'kubernetes.io/hostname';
export const LABEL_OPERATOR = 'In';
export const OCS_SUPPORT_ANNOTATION = 'features.ocs.openshift.io/enabled';
diff --git a/packages/odf/features.ts b/packages/odf/features.ts
index e543df469..37e288704 100644
--- a/packages/odf/features.ts
+++ b/packages/odf/features.ts
@@ -1,34 +1,28 @@
import {
ODFStorageSystem,
- OCSStorageClusterModel,
StorageClassModel,
- CephClusterModel,
+ OCSStorageClusterModel,
} from '@odf/shared/models';
import { SelfSubjectAccessReviewModel } from '@odf/shared/models';
import {
- StorageClusterKind,
StorageClassResourceKind,
+ StorageClusterKind,
} from '@odf/shared/types';
import {
SetFeatureFlag,
k8sList,
k8sCreate,
- K8sResourceCommon,
SelfSubjectAccessReviewKind,
+ K8sResourceCommon,
} from '@openshift-console/dynamic-plugin-sdk';
import * as _ from 'lodash-es';
import { SECOND, RGW_PROVISIONER, NOOBAA_PROVISIONER } from './constants';
+import { isExternalCluster, isClusterIgnored } from './utils';
export const ODF_MODEL_FLAG = 'ODF_MODEL'; // Based on the existence of StorageSystem CRD
-export const OCS_INDEPENDENT_FLAG = 'OCS_INDEPENDENT'; // Set to "true" if it is external mode StorageCluster
-export const OCS_CONVERGED_FLAG = 'OCS_CONVERGED'; // Set to "true" if it is internal mode StorageCluster
export const ROSA_FLAG = 'ROSA'; // Set to "true" if we are using ROSA
export const RGW_FLAG = 'RGW'; // Based on the existence of StorageClass with RGW provisioner ("openshift-storage.ceph.rook.io/bucket")
-export const MCG_STANDALONE = 'MCG_STANDALONE'; // Based on the existence of NooBaa only system (no Ceph)
export const MCG_FLAG = 'MCG'; // Based on the existence of NooBaa StorageClass (which only gets created if NooBaaSystem is present)
-export const CEPH_FLAG = 'CEPH'; // Based on the existence of CephCluster
-export const OCS_FLAG = 'OCS'; // Based on the existence of StorageCluster
-export const OCS_NFS_ENABLED = 'NFS'; // Based on the enablement of NFS from StorageCluster spec
export const ODF_ADMIN = 'ODF_ADMIN'; // Set to "true" if user is an "openshift-storage" admin (access to StorageSystems)
export const PROVIDER_MODE = 'PROVIDER_MODE'; // Set to "true" if user has deployed it in provider mode
@@ -47,17 +41,6 @@ const ssarChecks = [
const isProviderMode = (cluster: StorageClusterKind): boolean =>
!!cluster.spec.allowRemoteStorageConsumers;
-const setOCSFlagsFalse = (setFlag: SetFeatureFlag) => {
- setFlag(OCS_FLAG, false);
- setFlag(OCS_CONVERGED_FLAG, false);
- setFlag(OCS_INDEPENDENT_FLAG, false);
- setFlag(MCG_STANDALONE, false);
- setFlag(OCS_NFS_ENABLED, false);
-};
-
-export const setODFFlag = (setFlag: SetFeatureFlag) =>
- setFlag(ODF_MODEL_FLAG, true);
-
export const setOCSFlags = async (setFlag: SetFeatureFlag) => {
let ocsIntervalId = null;
// to prevent unnecessary re-render every 15 sec
@@ -72,27 +55,18 @@ export const setOCSFlags = async (setFlag: SetFeatureFlag) => {
requestInit: null,
})) as StorageClusterKind[];
if (storageClusters?.length > 0) {
- const storageCluster = storageClusters.find(
- (sc: StorageClusterKind) => sc.status.phase !== 'Ignored'
+ const internalStorageCluster = storageClusters.find(
+ (sc: StorageClusterKind) =>
+ !isClusterIgnored(sc) && !isExternalCluster(sc)
);
- const isInternal = _.isEmpty(storageCluster?.spec?.externalStorage);
- setFlag(OCS_CONVERGED_FLAG, isInternal);
- setFlag(OCS_INDEPENDENT_FLAG, !isInternal);
- setFlag(OCS_FLAG, true);
- setFlag(
- MCG_STANDALONE,
- storageCluster?.spec?.multiCloudGateway?.reconcileStrategy ===
- 'standalone'
- );
- setFlag(OCS_NFS_ENABLED, storageCluster?.spec?.nfs?.enable === true);
- setFlag(PROVIDER_MODE, isProviderMode(storageCluster));
+ setFlag(PROVIDER_MODE, isProviderMode(internalStorageCluster));
clearInterval(ocsIntervalId);
} else if (setFlagFalse) {
setFlagFalse = false;
- setOCSFlagsFalse(setFlag);
+ setFlag(PROVIDER_MODE, false);
}
} catch (error) {
- setOCSFlagsFalse(setFlag);
+ setFlag(PROVIDER_MODE, false);
}
};
@@ -102,6 +76,9 @@ export const setOCSFlags = async (setFlag: SetFeatureFlag) => {
ocsIntervalId = setInterval(ocsDetector, 15 * SECOND);
};
+export const setODFFlag = (setFlag: SetFeatureFlag) =>
+ setFlag(ODF_MODEL_FLAG, true);
+
const handleError = (
res: any,
flags: string[],
@@ -205,24 +182,8 @@ export const detectSSAR = (setFlag: SetFeatureFlag) => {
export const detectComponents: FeatureDetector = async (
setFlag: SetFeatureFlag
) => {
- let cephIntervalId = null;
let noobaaIntervalId = null;
- const cephDetector = async () => {
- try {
- const cephClusters = (await k8sList({
- model: CephClusterModel,
- queryParams: { ns: null },
- })) as K8sResourceCommon[];
- if (cephClusters?.length > 0) {
- setFlag(CEPH_FLAG, true);
- clearInterval(cephIntervalId);
- }
- } catch {
- setFlag(CEPH_FLAG, false);
- }
- };
-
// Setting flag based on presence of NooBaa StorageClass gets created only if NooBaa CR is present
const noobaaDetector = async () => {
try {
@@ -244,9 +205,7 @@ export const detectComponents: FeatureDetector = async (
// calling first time instantaneously
// else it will wait for 15s before start polling
- cephDetector();
noobaaDetector();
- cephIntervalId = setInterval(cephDetector, 15 * SECOND);
noobaaIntervalId = setInterval(noobaaDetector, 15 * SECOND);
};
diff --git a/packages/odf/modals/add-capacity/add-capacity-modal.tsx b/packages/odf/modals/add-capacity/add-capacity-modal.tsx
index c48c1c00d..2f34cb8e0 100644
--- a/packages/odf/modals/add-capacity/add-capacity-modal.tsx
+++ b/packages/odf/modals/add-capacity/add-capacity-modal.tsx
@@ -208,14 +208,21 @@ export const AddCapacityModal: React.FC = ({
const { odfNamespace } = useODFNamespaceSelector();
+ const ocsClusterName = getName(ocsConfig);
const [cephTotal, totalError, totalLoading] = useCustomPrometheusPoll({
endpoint: 'api/v1/query' as PrometheusEndpoint,
- query: CAPACITY_INFO_QUERIES[StorageDashboardQuery.RAW_CAPACITY_TOTAL],
+ query:
+ CAPACITY_INFO_QUERIES(ocsClusterName)[
+ StorageDashboardQuery.RAW_CAPACITY_TOTAL
+ ],
basePath: usePrometheusBasePath(),
});
const [cephUsed, usedError, usedLoading] = useCustomPrometheusPoll({
endpoint: 'api/v1/query' as PrometheusEndpoint,
- query: CAPACITY_INFO_QUERIES[StorageDashboardQuery.RAW_CAPACITY_USED],
+ query:
+ CAPACITY_INFO_QUERIES(ocsClusterName)[
+ StorageDashboardQuery.RAW_CAPACITY_USED
+ ],
basePath: usePrometheusBasePath(),
});
const [values, loading, loadError] = [
@@ -242,7 +249,7 @@ export const AddCapacityModal: React.FC = ({
const [osdSize, unit] = osdSizeWithUnit.split(/(\d+)/).filter(Boolean);
const osdSizeWithoutUnit: number = +osdSize / SIZE_IN_TB[unit];
const isNoProvionerSC: boolean = storageClass?.provisioner === NO_PROVISIONER;
- const selectedSCName: string = storageClass?.metadata?.name;
+ const selectedSCName: string = getName(storageClass);
const deviceSetIndex: number = getCurrentDeviceSetIndex(
deviceSets,
selectedSCName
@@ -254,7 +261,7 @@ export const AddCapacityModal: React.FC = ({
hasFlexibleScaling,
createWizardNodeState(getCephNodes(nodesData, odfNamespace))
);
- const name = ocsConfig?.metadata?.name;
+
const totalCapacityMetric = values?.[0];
const usedCapacityMetric = values?.[1];
const usedCapacity = humanizeBinaryBytes(usedCapacityMetric);
@@ -408,9 +415,13 @@ export const AddCapacityModal: React.FC = ({
>
-
- Adding capacity for {{ name }}, may increase your
- expenses.
+
+ Adding capacity for {{ ocsClusterName }}, may
+ increase your expenses.
= (props) => {
- const { closeModal, state, dispatch, isWizardFlow } = props;
+ const { closeModal, state, dispatch, isWizardFlow, systemNamespace } = props;
const kms = state.kms.providerState as VaultConfig;
const { t } = useCustomTranslation();
- const { odfNamespace } = useODFNamespaceSelector();
-
const [backendPath, setBackendPath] = React.useState(kms?.backend || '');
const [authPath, setAuthPath] = React.useState(kms?.providerAuthPath || '');
const [authNamespace, setAuthNamespace] = React.useState(
@@ -113,18 +110,18 @@ const AdvancedVaultModal: ModalComponent = (props) => {
};
caCertificate && caCertificate !== ''
- ? (kmsAdvanced.caCert = generateCASecret(caCertificate, odfNamespace))
+ ? (kmsAdvanced.caCert = generateCASecret(caCertificate, systemNamespace))
: (kmsAdvanced.caCert = null);
clientCertificate && clientCertificate !== ''
? (kmsAdvanced.clientCert = generateClientSecret(
clientCertificate,
- odfNamespace
+ systemNamespace
))
: (kmsAdvanced.clientCert = null);
clientKey && clientCertificate !== ''
? (kmsAdvanced.clientKey = generateClientKeySecret(
clientKey,
- odfNamespace
+ systemNamespace
))
: (kmsAdvanced.clientKey = null);
diff --git a/packages/odf/modals/configure-mons/configure-mons.tsx b/packages/odf/modals/configure-mons/configure-mons.tsx
index 8130be76b..22a0be3a7 100644
--- a/packages/odf/modals/configure-mons/configure-mons.tsx
+++ b/packages/odf/modals/configure-mons/configure-mons.tsx
@@ -41,7 +41,7 @@ const LowMonAlertModal: ModalComponent = ({ closeModal }) => {
const [errorMessage, setErrorMessage] = React.useState('');
const [inProgress, setProgress] = React.useState(false);
- //Todo(bipuladh): Update it to use information from the alert
+ // ToDo (epic 4422) (bipuladh): Update it to use information from the alert.
const [storageCluster, storageClusterLoaded, storageCluserLoadError] =
useSafeK8sGet(StorageClusterModel, null, odfNamespace);
const [nodes, nodesLoaded, nodesLoadError] = useK8sList(NodeModel);
diff --git a/packages/odf/modals/configure-performance/configure-performance-modal.tsx b/packages/odf/modals/configure-performance/configure-performance-modal.tsx
index c180fcc9a..17212a175 100644
--- a/packages/odf/modals/configure-performance/configure-performance-modal.tsx
+++ b/packages/odf/modals/configure-performance/configure-performance-modal.tsx
@@ -12,7 +12,6 @@ import {
RESOURCE_PROFILE_REQUIREMENTS_MAP,
resourceRequirementsTooltip,
} from '@odf/core/constants';
-import { useODFNamespaceSelector } from '@odf/core/redux';
import { ResourceProfile, ValidationType } from '@odf/core/types';
import { isResourceProfileAllowed } from '@odf/core/utils';
import { FieldLevelHelp } from '@odf/shared/generic';
@@ -21,6 +20,7 @@ import { useK8sGet } from '@odf/shared/hooks';
import { CommonModalProps } from '@odf/shared/modals/common';
import { ModalBody, ModalFooter, ModalHeader } from '@odf/shared/modals/Modal';
import { OCSStorageClusterModel } from '@odf/shared/models';
+import { getNamespace } from '@odf/shared/selectors';
import {
NodeKind,
StorageClusterKind,
@@ -98,7 +98,8 @@ const ConfigurePerformanceModal: React.FC = ({
isOpen,
}) => {
const { t } = useCustomTranslation();
- const { odfNamespace, isNsSafe } = useODFNamespaceSelector();
+ const systemNs = getNamespace(storageCluster);
+
const [inProgress, setProgress] = React.useState(false);
const [errorMessage, setError] = React.useState(null);
@@ -134,7 +135,7 @@ const ConfigurePerformanceModal: React.FC = ({
return;
}
try {
- await labelNodes(selectedNodes, odfNamespace);
+ await labelNodes(selectedNodes, systemNs);
const patch: Patch = {
op: 'replace',
@@ -177,6 +178,7 @@ const ConfigurePerformanceModal: React.FC = ({
nodes={selectedNodes}
onRowSelected={onRowSelected}
disableLabeledNodes={true}
+ systemNamespace={systemNs}
/>
{validation && (
= ({
data-test-id="confirm-action"
variant="primary"
onClick={submit}
- isDisabled={!resourceProfile || !isNsSafe || !!validation}
+ isDisabled={!resourceProfile || !!validation}
>
{t('Save changes')}
diff --git a/packages/odf/queries/common.ts b/packages/odf/queries/common.ts
index af44af361..80980bc4c 100644
--- a/packages/odf/queries/common.ts
+++ b/packages/odf/queries/common.ts
@@ -3,8 +3,9 @@ export enum StorageDashboardQuery {
RAW_CAPACITY_USED = 'RAW_CAPACITY_USED',
}
-export const CAPACITY_INFO_QUERIES = {
- [StorageDashboardQuery.RAW_CAPACITY_TOTAL]: 'ceph_cluster_total_bytes',
- [StorageDashboardQuery.RAW_CAPACITY_USED]:
- 'ceph_cluster_total_used_raw_bytes',
-};
+// ToDo (epic 4422): This should work (for now) as "managedBy" will be unique,
+// but moving forward add a label to metric for CephCluster namespace and use that instead (update query).
+export const CAPACITY_INFO_QUERIES = (managedByOCS: string) => ({
+ [StorageDashboardQuery.RAW_CAPACITY_TOTAL]: `ceph_cluster_total_bytes{managedBy="${managedByOCS}"}`,
+ [StorageDashboardQuery.RAW_CAPACITY_USED]: `ceph_cluster_total_used_raw_bytes{managedBy="${managedByOCS}"}`,
+});
diff --git a/packages/odf/redux/actions/index.ts b/packages/odf/redux/actions/index.ts
index 311c45d9b..45a58f0e1 100644
--- a/packages/odf/redux/actions/index.ts
+++ b/packages/odf/redux/actions/index.ts
@@ -1 +1,2 @@
export * from './odf-namespace';
+export * from './odf-system-flags';
diff --git a/packages/odf/redux/actions/odf-system-flags.ts b/packages/odf/redux/actions/odf-system-flags.ts
new file mode 100644
index 000000000..456c4b7cf
--- /dev/null
+++ b/packages/odf/redux/actions/odf-system-flags.ts
@@ -0,0 +1,33 @@
+import { action } from 'typesafe-actions';
+
+export type ODFSystemFlagsPayload = {
+ systemFlags: {
+ [namespace: string]: {
+ odfSystemName: string;
+ ocsClusterName: string;
+ isInternalMode: boolean;
+ isExternalMode: boolean;
+ isNoobaaStandalone: boolean;
+ isNoobaaAvailable: boolean;
+ isCephAvailable: boolean;
+ isRGWAvailable: boolean;
+ isNFSEnabled: boolean;
+ };
+ };
+ areFlagsLoaded: boolean;
+ flagsLoadError: unknown;
+};
+
+export enum ODFSystemFlagsActions {
+ SetODFSystemFlags = 'setODFSystemFlags',
+}
+
+export type odfSystemFlagsActionTypes = {
+ type: ODFSystemFlagsActions;
+ payload: ODFSystemFlagsPayload;
+};
+
+export const setODFSystemFlags = (
+ payload: ODFSystemFlagsPayload
+): odfSystemFlagsActionTypes =>
+ action(ODFSystemFlagsActions.SetODFSystemFlags, payload);
diff --git a/packages/odf/redux/combineReducers.ts b/packages/odf/redux/combineReducers.ts
index b38a95f9f..c5ab4e772 100644
--- a/packages/odf/redux/combineReducers.ts
+++ b/packages/odf/redux/combineReducers.ts
@@ -1,7 +1,11 @@
import { combineReducers } from 'redux';
-import { odfNamespaceReducer } from './reducers';
-import { odfNamespaceReducerName } from './selectors';
+import { odfNamespaceReducer, odfSystemFlagsReducer } from './reducers';
+import {
+ odfNamespaceReducerName,
+ odfSystemFlagsReducerName,
+} from './selectors';
export default combineReducers({
[odfNamespaceReducerName]: odfNamespaceReducer,
+ [odfSystemFlagsReducerName]: odfSystemFlagsReducer,
});
diff --git a/packages/odf/redux/constants.ts b/packages/odf/redux/constants.ts
new file mode 100644
index 000000000..bc2b9a2ab
--- /dev/null
+++ b/packages/odf/redux/constants.ts
@@ -0,0 +1 @@
+export const reduxReducerScope = 'odfConsoleRedux';
diff --git a/packages/odf/redux/dispatchers/index.ts b/packages/odf/redux/dispatchers/index.ts
index 311c45d9b..45a58f0e1 100644
--- a/packages/odf/redux/dispatchers/index.ts
+++ b/packages/odf/redux/dispatchers/index.ts
@@ -1 +1,2 @@
export * from './odf-namespace';
+export * from './odf-system-flags';
diff --git a/packages/odf/redux/dispatchers/odf-system-flags.ts b/packages/odf/redux/dispatchers/odf-system-flags.ts
new file mode 100644
index 000000000..d1089e6dd
--- /dev/null
+++ b/packages/odf/redux/dispatchers/odf-system-flags.ts
@@ -0,0 +1,16 @@
+import { useCallback } from 'react';
+import { useDispatch } from 'react-redux';
+import { ODFSystemFlagsPayload, setODFSystemFlags } from '../actions';
+
+type UseODFSystemFlagsDispatch = () => (payload: ODFSystemFlagsPayload) => void;
+
+export const useODFSystemFlagsDispatch: UseODFSystemFlagsDispatch = () => {
+ const dispatch = useDispatch();
+
+ return useCallback(
+ (payload: ODFSystemFlagsPayload) => {
+ dispatch(setODFSystemFlags(payload));
+ },
+ [dispatch]
+ );
+};
diff --git a/packages/odf/redux/provider-hooks/index.ts b/packages/odf/redux/provider-hooks/index.ts
index d7292f191..d89c89174 100644
--- a/packages/odf/redux/provider-hooks/index.ts
+++ b/packages/odf/redux/provider-hooks/index.ts
@@ -1 +1,2 @@
export * from './useODFNamespace';
+export * from './useODFSystemFlags';
diff --git a/packages/odf/redux/provider-hooks/useODFNamespace.ts b/packages/odf/redux/provider-hooks/useODFNamespace.ts
index 59c269de5..c2b4d07a3 100644
--- a/packages/odf/redux/provider-hooks/useODFNamespace.ts
+++ b/packages/odf/redux/provider-hooks/useODFNamespace.ts
@@ -2,14 +2,13 @@ import * as React from 'react';
import { SubscriptionModel } from '@odf/shared/models';
import { getNamespace } from '@odf/shared/selectors';
import { SubscriptionKind } from '@odf/shared/types';
+import { isAbortError } from '@odf/shared/utils';
import { k8sList } from '@openshift-console/dynamic-plugin-sdk';
import { useODFNamespaceDispatch } from '../dispatchers';
const FALLBACK_NAMESPACE = 'openshift-storage';
const SPEC_NAME = 'odf-operator';
-const isAbortError = (err): boolean => err?.name === 'AbortError';
-
const namespaceDetector = async (maxAttempt = 5): Promise => {
let attempt = 0;
let ns = null;
diff --git a/packages/odf/redux/provider-hooks/useODFSystemFlags.ts b/packages/odf/redux/provider-hooks/useODFSystemFlags.ts
new file mode 100644
index 000000000..aad16db8b
--- /dev/null
+++ b/packages/odf/redux/provider-hooks/useODFSystemFlags.ts
@@ -0,0 +1,166 @@
+import * as React from 'react';
+import {
+ isMCGStandaloneCluster,
+ isExternalCluster,
+ isClusterIgnored,
+ isNFSEnabled,
+} from '@odf/core/utils';
+import {
+ CephClusterModel,
+ CephObjectStoreModel,
+ NooBaaSystemModel,
+ StorageClusterModel,
+} from '@odf/ocs/models';
+import { useDeepCompareMemoize } from '@odf/shared/hooks/deep-compare-memoize';
+import { ODFStorageSystem } from '@odf/shared/models';
+import {
+ getName,
+ getNamespace,
+ getOwnerReferences,
+} from '@odf/shared/selectors';
+import {
+ StorageClusterKind,
+ CephClusterKind,
+ NoobaaSystemKind,
+ K8sResourceKind,
+} from '@odf/shared/types';
+import { referenceForModel } from '@odf/shared/utils';
+import { useK8sWatchResources } from '@openshift-console/dynamic-plugin-sdk';
+import { ODFSystemFlagsPayload } from '../actions';
+import { useODFSystemFlagsDispatch } from '../dispatchers';
+
+const watchResources = {
+ scs: {
+ kind: referenceForModel(StorageClusterModel),
+ isList: true,
+ },
+ ccs: {
+ kind: referenceForModel(CephClusterModel),
+ isList: true,
+ },
+ coss: {
+ kind: referenceForModel(CephObjectStoreModel),
+ isList: true,
+ },
+ nss: {
+ kind: referenceForModel(NooBaaSystemModel),
+ isList: true,
+ },
+};
+
+type UseODFSystemFlagsPayload = {
+ storageClusters: StorageClusterKind[];
+ cephClusters: CephClusterKind[];
+ objectStores: K8sResourceKind[];
+ noobaas: NoobaaSystemKind[];
+ allLoaded: boolean;
+ anyError: Error;
+};
+
+const useODFSystemFlagsPayload = ({
+ storageClusters,
+ cephClusters,
+ objectStores,
+ noobaas,
+ allLoaded,
+ anyError,
+}: UseODFSystemFlagsPayload): ODFSystemFlagsPayload => {
+ const payload: ODFSystemFlagsPayload = React.useMemo(() => {
+ if (allLoaded && !anyError) {
+ return storageClusters?.reduce(
+ (acc: ODFSystemFlagsPayload, sc) => {
+ if (!isClusterIgnored(sc)) {
+ const clusterNamespace = getNamespace(sc);
+ const ceph = cephClusters?.find(
+ (cc) => getNamespace(cc) === clusterNamespace
+ );
+ const cephObjStore = objectStores?.find(
+ (cos) => getNamespace(cos) === clusterNamespace
+ );
+ const noobaa = noobaas?.find(
+ (ns) => getNamespace(ns) === clusterNamespace
+ );
+
+ const odfSystemFlags = {
+ odfSystemName: getOwnerReferences(sc)?.find(
+ (o) => o.kind === ODFStorageSystem.kind
+ )?.name,
+ ocsClusterName: getName(sc),
+ // Set to "true" if it is internal mode StorageCluster
+ isInternalMode: !isExternalCluster(sc),
+ // Set to "true" if it is external mode StorageCluster
+ isExternalMode: isExternalCluster(sc),
+ // Based on the existence of NooBaa only system (no Ceph)
+ isNoobaaStandalone: isMCGStandaloneCluster(sc),
+ // Based on the existence of NooBaa CR
+ isNoobaaAvailable: !!noobaa,
+ // Based on the existence of Ceph CR
+ isCephAvailable: !!ceph,
+ // Based on the existence of CephObjectStore CR
+ isRGWAvailable: !!cephObjStore,
+ // Based on the enablement of NFS from StorageCluster spec
+ isNFSEnabled: isNFSEnabled(sc),
+ };
+ acc.systemFlags[clusterNamespace] = odfSystemFlags;
+ }
+ return acc;
+ },
+ {
+ systemFlags: {},
+ areFlagsLoaded: allLoaded,
+ flagsLoadError: anyError,
+ } as ODFSystemFlagsPayload
+ );
+ }
+ return {
+ systemFlags: {},
+ areFlagsLoaded: allLoaded,
+ flagsLoadError: anyError,
+ };
+ }, [
+ storageClusters,
+ cephClusters,
+ objectStores,
+ noobaas,
+ allLoaded,
+ anyError,
+ ]);
+
+ return useDeepCompareMemoize(payload);
+};
+
+export const useODFSystemFlags = (): void => {
+ const dispatch = useODFSystemFlagsDispatch();
+
+ const resources = useK8sWatchResources(watchResources);
+
+ const storageClusters = (resources?.scs?.data ?? []) as StorageClusterKind[];
+ const scLoaded = resources?.scs?.loaded;
+ const scError = resources?.scs?.loadError;
+
+ const cephClusters = (resources?.ccs?.data ?? []) as CephClusterKind[];
+ const ccLoaded = resources?.ccs?.loaded;
+ const ccError = resources?.ccs?.loadError;
+
+ const objectStores = (resources?.coss?.data ?? []) as K8sResourceKind[];
+ const cosLoaded = resources?.coss?.loaded;
+ const cosError = resources?.coss?.loadError;
+
+ const noobaas = (resources?.nss?.data ?? []) as NoobaaSystemKind[];
+ const nsLoaded = resources?.nss?.loaded;
+ const nsError = resources?.nss?.loadError;
+
+ const allLoaded = scLoaded && ccLoaded && cosLoaded && nsLoaded;
+ const anyError = scError || ccError || cosError || nsError;
+
+ const memoizedPayload = useODFSystemFlagsPayload({
+ storageClusters,
+ cephClusters,
+ objectStores,
+ noobaas,
+ allLoaded,
+ anyError,
+ });
+
+ React.useEffect(() => dispatch(memoizedPayload), [dispatch, memoizedPayload]);
+};
diff --git a/packages/odf/redux/reducers/index.ts b/packages/odf/redux/reducers/index.ts
index 311c45d9b..45a58f0e1 100644
--- a/packages/odf/redux/reducers/index.ts
+++ b/packages/odf/redux/reducers/index.ts
@@ -1 +1,2 @@
export * from './odf-namespace';
+export * from './odf-system-flags';
diff --git a/packages/odf/redux/reducers/odf-system-flags.ts b/packages/odf/redux/reducers/odf-system-flags.ts
new file mode 100644
index 000000000..4828323dd
--- /dev/null
+++ b/packages/odf/redux/reducers/odf-system-flags.ts
@@ -0,0 +1,29 @@
+import { produce } from 'immer';
+import {
+ odfSystemFlagsActionTypes,
+ ODFSystemFlagsPayload,
+ ODFSystemFlagsActions,
+} from '../actions';
+
+const initialState: ODFSystemFlagsPayload = {
+ systemFlags: {},
+ areFlagsLoaded: false,
+ flagsLoadError: undefined,
+};
+
+export const odfSystemFlagsReducer = (
+ odfNamespaceState = initialState,
+ action: odfSystemFlagsActionTypes
+): ODFSystemFlagsPayload => {
+ const payload = action.payload;
+ switch (action.type) {
+ case ODFSystemFlagsActions.SetODFSystemFlags:
+ return produce(odfNamespaceState, (draft) => {
+ draft.systemFlags = payload.systemFlags;
+ draft.areFlagsLoaded = payload.areFlagsLoaded;
+ draft.flagsLoadError = payload.flagsLoadError;
+ });
+ default:
+ return odfNamespaceState;
+ }
+};
diff --git a/packages/odf/redux/selectors/index.ts b/packages/odf/redux/selectors/index.ts
index 311c45d9b..45a58f0e1 100644
--- a/packages/odf/redux/selectors/index.ts
+++ b/packages/odf/redux/selectors/index.ts
@@ -1 +1,2 @@
export * from './odf-namespace';
+export * from './odf-system-flags';
diff --git a/packages/odf/redux/selectors/odf-namespace.ts b/packages/odf/redux/selectors/odf-namespace.ts
index eb0673214..2413a3820 100644
--- a/packages/odf/redux/selectors/odf-namespace.ts
+++ b/packages/odf/redux/selectors/odf-namespace.ts
@@ -1,8 +1,8 @@
+import { reduxReducerScope } from '@odf/core/redux/constants';
import { useSelector } from 'react-redux';
import { nsPayload } from '../actions';
export const odfNamespaceReducerName = 'odfInstallNs';
-export const reduxReducerScope = 'odfConsoleRedux';
const getODFNamespace = (state): nsPayload =>
state.plugins?.[reduxReducerScope]?.[odfNamespaceReducerName] || {};
diff --git a/packages/odf/redux/selectors/odf-system-flags.ts b/packages/odf/redux/selectors/odf-system-flags.ts
new file mode 100644
index 000000000..c1eaf0bec
--- /dev/null
+++ b/packages/odf/redux/selectors/odf-system-flags.ts
@@ -0,0 +1,26 @@
+import { reduxReducerScope } from '@odf/core/redux/constants';
+import { useSelector } from 'react-redux';
+import { ODFSystemFlagsPayload } from '../actions';
+
+export const odfSystemFlagsReducerName = 'odfSystemFlags';
+
+const getODFSystemFlags = (state): ODFSystemFlagsPayload =>
+ state.plugins?.[reduxReducerScope]?.[odfSystemFlagsReducerName] || {};
+
+export const useODFSystemFlagsSelector = (): ODFSystemFlagsPayload & {
+ areFlagsSafe: boolean;
+} => {
+ const { systemFlags, areFlagsLoaded, flagsLoadError } =
+ useSelector(getODFSystemFlags);
+
+ return {
+ // namespace-wise flags (as there can only be one cluster per namespace)
+ systemFlags,
+ // are all flags loaded and stored in redux
+ areFlagsLoaded,
+ // flags loading error object (if any)
+ flagsLoadError,
+ // is safe to use the flags
+ areFlagsSafe: areFlagsLoaded && !flagsLoadError,
+ };
+};
diff --git a/packages/odf/resources.ts b/packages/odf/resources.ts
index 6dcb55dcb..ba7b18e68 100644
--- a/packages/odf/resources.ts
+++ b/packages/odf/resources.ts
@@ -26,7 +26,6 @@ import { WatchK8sResource } from '@openshift-console/dynamic-plugin-sdk';
export const cephClusterResource: WatchK8sResource = {
kind: referenceForModel(CephClusterModel),
- namespaced: false,
isList: true,
};
@@ -81,7 +80,6 @@ export const nodesDiscoveriesResource: WatchK8sResource = {
export const storageClusterResource: WatchK8sResource = {
isList: true,
kind: referenceForModel(OCSStorageClusterModel),
- namespaced: false,
};
export const odfPodsResource: K8sResourceObj = (ns) => ({
diff --git a/packages/odf/utils/odf.ts b/packages/odf/utils/odf.ts
index 82a768e51..70025e427 100644
--- a/packages/odf/utils/odf.ts
+++ b/packages/odf/utils/odf.ts
@@ -1,9 +1,14 @@
+import { getNamespace } from '@odf/shared/selectors';
import { ClusterServiceVersionKind } from '@odf/shared/types';
import { K8sResourceKind } from '@odf/shared/types';
-import { StorageClassResourceKind } from '@odf/shared/types';
+import {
+ StorageClassResourceKind,
+ StorageClusterKind,
+} from '@odf/shared/types';
import { isDefaultClass } from '@odf/shared/utils';
import * as _ from 'lodash-es';
import { ODF_VENDOR_ANNOTATION } from '../constants';
+import { ODFSystemFlagsPayload } from '../redux/actions';
export const getSupportedVendors = (
csv: ClusterServiceVersionKind
@@ -37,3 +42,45 @@ export const getStorageClassDescription = (
export const getOperatorVersion = (operator: K8sResourceKind): string =>
operator?.spec?.version;
+
+export const isMCGStandaloneCluster = (storageCluster: StorageClusterKind) =>
+ storageCluster?.spec?.multiCloudGateway?.reconcileStrategy === 'standalone';
+
+export const isExternalCluster = (storageCluster: StorageClusterKind) =>
+ !_.isEmpty(storageCluster?.spec?.externalStorage);
+
+export const isClusterIgnored = (storageCluster: StorageClusterKind) =>
+ storageCluster?.status?.phase === 'Ignored';
+
+export const isNFSEnabled = (storageCluster: StorageClusterKind) =>
+ storageCluster?.spec?.nfs?.enable === true;
+
+export const getStorageClusterInNs = (
+ storageClusters: StorageClusterKind[],
+ namespace: string
+) =>
+ storageClusters?.find(
+ (sc: StorageClusterKind) =>
+ !isClusterIgnored(sc) && getNamespace(sc) === namespace
+ );
+
+export const getResourceInNs = (
+ resources: K8sResourceKind[],
+ namespace: string
+) => resources?.find((r: K8sResourceKind) => getNamespace(r) === namespace);
+
+export const hasAnyExternalOCS = (
+ systemFlags: ODFSystemFlagsPayload['systemFlags']
+): boolean => _.some(systemFlags, (flags) => !!flags.isExternalMode);
+
+export const hasAnyInternalOCS = (
+ systemFlags: ODFSystemFlagsPayload['systemFlags']
+): boolean => _.some(systemFlags, (flags) => !!flags.isInternalMode);
+
+export const hasAnyCeph = (
+ systemFlags: ODFSystemFlagsPayload['systemFlags']
+): boolean => _.some(systemFlags, (flags) => !!flags.isCephAvailable);
+
+export const hasAnyNoobaaStandalone = (
+ systemFlags: ODFSystemFlagsPayload['systemFlags']
+): boolean => _.some(systemFlags, (flags) => !!flags.isNoobaaStandalone);
diff --git a/packages/shared/src/dashboards/capacity-card/capacity-card.tsx b/packages/shared/src/dashboards/capacity-card/capacity-card.tsx
index cacd19ca9..e9bd05952 100644
--- a/packages/shared/src/dashboards/capacity-card/capacity-card.tsx
+++ b/packages/shared/src/dashboards/capacity-card/capacity-card.tsx
@@ -24,6 +24,7 @@ import './capacity-card.scss';
export type CapacityMetricDatum = {
name: string;
+ namespace?: string;
managedSystemName?: string;
managedSystemKind?: string;
totalValue?: HumanizeResult;
@@ -184,7 +185,11 @@ const CapacityCardRow: React.FC = ({
{data?.managedSystemKind ? (
= ({
const isPercentage = !!item?.totalValue;
return (
> = ({
}
k8sList({
model: ClusterServiceVersionModel,
- queryParams: { namespace },
+ queryParams: { ns: namespace },
requestInit: null,
})
.then((data) => {
diff --git a/packages/shared/src/selectors/k8s.ts b/packages/shared/src/selectors/k8s.ts
index 6d87f971b..82eda1657 100644
--- a/packages/shared/src/selectors/k8s.ts
+++ b/packages/shared/src/selectors/k8s.ts
@@ -41,3 +41,13 @@ export const getAnnotations = (
export const getAPIVersion = (
value: A
) => _.get(value, 'apiVersion') as K8sResourceCommon['apiVersion'];
+
+export const getOwnerReferences = <
+ A extends K8sResourceCommon = K8sResourceCommon
+>(
+ value: A
+) =>
+ _.get(
+ value,
+ 'metadata.ownerReferences'
+ ) as K8sResourceCommon['metadata']['ownerReferences'];
diff --git a/packages/shared/src/types/storage.ts b/packages/shared/src/types/storage.ts
index c7177e460..69f1d20c6 100644
--- a/packages/shared/src/types/storage.ts
+++ b/packages/shared/src/types/storage.ts
@@ -163,3 +163,5 @@ export type StorageConsumerKind = K8sResourceCommon & {
spec?: StorageConsumerSpec;
status: StorageConsumerStatus;
};
+
+export type NoobaaSystemKind = K8sResourceCommon;
diff --git a/packages/shared/src/utils/common.ts b/packages/shared/src/utils/common.ts
index da08e214f..2c6bb6f60 100644
--- a/packages/shared/src/utils/common.ts
+++ b/packages/shared/src/utils/common.ts
@@ -120,8 +120,10 @@ export const referenceForOwnerRef = (
groupVersionFor(ownerRef.apiVersion).version
)(ownerRef.kind);
-export const isFunctionThenApply = (fn: any) => (args: string) =>
- typeof fn === 'function' ? fn(args) : fn;
+export const isFunctionThenApply =
+ (fn: any) =>
+ (...args) =>
+ typeof fn === 'function' ? fn(...args) : fn;
export const getInfrastructurePlatform = (
infrastructure: K8sResourceKind
@@ -131,7 +133,7 @@ export const getInfrastructurePlatform = (
export const getGVKLabel = ({ kind, apiGroup, apiVersion }: Model) =>
`${kind.toLowerCase()}.${apiGroup}/${apiVersion}`;
-export const getRandomChars = () => Math.random().toString(36).substring(7);
+export const getRandomChars = () => Math.random().toString(36).substring(2, 10);
export const getErrorMessage = (error: Error) => error?.message;
@@ -142,3 +144,5 @@ export const isValidIP = (address) =>
export const getValidatedProp = (error: boolean) =>
error ? 'error' : 'default';
+
+export const isAbortError = (err): boolean => err?.name === 'AbortError';
diff --git a/packages/shared/src/utils/dashboard.ts b/packages/shared/src/utils/dashboard.ts
index a5540d882..ddba15c2a 100644
--- a/packages/shared/src/utils/dashboard.ts
+++ b/packages/shared/src/utils/dashboard.ts
@@ -8,8 +8,12 @@ import { TFunction } from 'i18next';
const DASH_PREFIX = '/odf/system';
-export const getDashboardLink = (systemKind: string, systemName: string) =>
- `${DASH_PREFIX}/${systemKind}/${systemName}/overview`;
+export const getDashboardLink = (
+ systemKind: string,
+ systemName: string,
+ systemNamespace: string
+) =>
+ `${DASH_PREFIX}/ns/${systemNamespace}/${systemKind}/${systemName}/overview`;
export const getWorstStatus = (
componentsHealth: SubsystemHealth[],
diff --git a/plugins/odf/console-extensions.json b/plugins/odf/console-extensions.json
index b5a1eb75c..bd109483e 100644
--- a/plugins/odf/console-extensions.json
+++ b/plugins/odf/console-extensions.json
@@ -62,6 +62,14 @@
}
}
},
+ {
+ "type": "console.flag/hookProvider",
+ "properties": {
+ "handler": {
+ "$codeRef": "odfReduxReducers.useODFSystemFlags"
+ }
+ }
+ },
{
"type": "console.navigation/href",
"properties": {
@@ -133,7 +141,7 @@
"type": "console.page/route",
"properties": {
"exact": true,
- "path": "/odf/system/ocs.openshift.io~v1~StorageCluster/:systemName/ceph.rook.io~v1~CephBlockPool/create/~new",
+ "path": "/odf/system/ns/:namespace/ocs.openshift.io~v1~StorageCluster/:systemName/ceph.rook.io~v1~CephBlockPool/create/~new",
"component": {
"$codeRef": "createBlockPools.default"
}
@@ -142,7 +150,7 @@
{
"type": "console.page/route",
"properties": {
- "path": "/odf/system/ocs.openshift.io~v1~StorageCluster/:systemName/ceph.rook.io~v1~CephBlockPool/:poolName",
+ "path": "/odf/system/ns/:namespace/ocs.openshift.io~v1~StorageCluster/:systemName/ceph.rook.io~v1~CephBlockPool/:poolName",
"exact": false,
"component": {
"$codeRef": "blockPoolDetailsPage.default"
@@ -212,7 +220,7 @@
{
"type": "console.page/route",
"properties": {
- "path": "/odf/system/ocs.openshift.io~v1~StorageCluster/:systemName",
+ "path": "/odf/system/ns/:namespace/ocs.openshift.io~v1~StorageCluster/:systemName",
"exact": false,
"component": {
"$codeRef": "ocs.default"