Skip to content

Commit

Permalink
refactoring (support multiple StorageSystems per ODF)
Browse files Browse the repository at this point in the history
Removed OCS related flags from "features.ts" file.
Added those flags as a part of redux store and created releated utils.
Created Hook to read these flags.
Updated Wizard to support multiple Ceph based clusters (only 1 internal + 1 external needed for now).
Updated KMS resources creation utils (Secrets) so that CSI and OCS resources are de-coupled.
Added a Namespace field to OCS wizard redux (used across entire wizard flow).
Updated dashboards so that each StorageCluster should only show related components, not any/all (NooBaa/RGW).
Updated StorageCluster dashboard Routes by adding Namespace as well.
Updated all queries using "odf_system_" and "ceph_" metrics, relying on "managedBy" label for now (Block/File/Object).
Updated StorageClass creation flow (ceph-rbd/fs) with a dropdown for corresponding StorageSystem selection.
Updated "HealthOverview" (injected to OCP dashboard's Status card currently) to incorporate status of all Ceph clusters.
  • Loading branch information
SanjalKatiyar committed Dec 12, 2023
1 parent e70f38c commit 5516bd9
Show file tree
Hide file tree
Showing 122 changed files with 2,369 additions and 1,253 deletions.
18 changes: 11 additions & 7 deletions locales/en/plugin__odf-console.json
Original file line number Diff line number Diff line change
Expand Up @@ -376,9 +376,9 @@
"Estimating {{formattedEta}} to completion": "Estimating {{formattedEta}} to completion",
"Object_one": "Object",
"Object_other": "Object",
"NooBaa Bucket": "NooBaa Bucket",
"Buckets": "Buckets",
"Buckets card represents the number of S3 buckets managed on Multicloud Object Gateway and the number of ObjectBucketClaims and the ObjectBuckets managed on both Multicloud Object Gateway and RGW (if deployed).": "Buckets card represents the number of S3 buckets managed on Multicloud Object Gateway and the number of ObjectBucketClaims and the ObjectBuckets managed on both Multicloud Object Gateway and RGW (if deployed).",
"NooBaa Bucket": "NooBaa Bucket",
"Cluster-wide": "Cluster-wide",
"Any NON Object bucket claims that were created via an S3 client or via the NooBaa UI system.": "Any NON Object bucket claims that were created via an S3 client or via the NooBaa UI system.",
"Break by": "Break by",
Expand Down Expand Up @@ -483,13 +483,15 @@
"Optimise cluster for Regional-DR?": "Optimise cluster for Regional-DR?",
"Configure the cluster for a Regional-DR setup by migrating OSDs. Migration may take some time depending on several factors. To learn more about OSDs migration best practices and its consequences refer to the documentation.": "Configure the cluster for a Regional-DR setup by migrating OSDs. Migration may take some time depending on several factors. To learn more about OSDs migration best practices and its consequences refer to the documentation.",
"Optimise": "Optimise",
"Filesystem name": "Filesystem name",
"Enter filesystem name": "Enter filesystem name",
"CephFS filesystem name into which the volume shall be created": "CephFS filesystem name into which the volume shall be created",
"no compression": "no compression",
"with compression": "with compression",
"Replica {{poolSize}} {{compressionText}}": "Replica {{poolSize}} {{compressionText}}",
"Create New Pool": "Create New Pool",
"Storage system": "Storage system",
"StorageSystem which will be used for storage needs": "StorageSystem which will be used for storage needs",
"Filesystem name": "Filesystem name",
"Enter filesystem name": "Enter filesystem name",
"CephFS filesystem name into which the volume shall be created": "CephFS filesystem name into which the volume shall be created",
"Storage Pool": "Storage Pool",
"Select a Pool": "Select a Pool",
"Storage pool into which volume data shall be stored": "Storage pool into which volume data shall be stored",
Expand Down Expand Up @@ -959,7 +961,6 @@
"Client onboarding token": "Client onboarding token",
"Raw Capacity": "Raw Capacity",
"Add Capacity": "Add Capacity",
"External": "External",
"Cluster details": "Cluster details",
"Data resiliency": "Data resiliency",
"Nodes": "Nodes",
Expand All @@ -969,8 +970,11 @@
"Search...": "Search...",
"Expand to fullscreen": "Expand to fullscreen",
"Exit fullscreen": "Exit fullscreen",
"This view is only supported for Internal mode cluster.": "This view is only supported for Internal mode cluster.",
"Show message": "Show message",
"Hide message": "Hide message",
"Back to main view": "Back to main view",
"Topology view is not supported for External Mode": "Topology view is not supported for External Mode",
"Topology view is not supported for External mode": "Topology view is not supported for External mode",
"No StorageCluster found": "No StorageCluster found",
"Set up a storage cluster to view the topology": "Set up a storage cluster to view the topology",
"A minimal cluster deployment will be performed.": "A minimal cluster deployment will be performed.",
Expand Down Expand Up @@ -1023,7 +1027,7 @@
"No StorageClass selected": "No StorageClass selected",
"The Arbiter stretch cluster requires a minimum of 4 nodes (2 different zones, 2 nodes per zone). Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.": "The Arbiter stretch cluster requires a minimum of 4 nodes (2 different zones, 2 nodes per zone). Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.",
"The StorageCluster requires a minimum of 3 nodes. Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.": "The StorageCluster requires a minimum of 3 nodes. Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.",
"Adding capacity for <1>{{name}}</1>, may increase your expenses.": "Adding capacity for <1>{{name}}</1>, may increase your expenses.",
"Adding capacity for <1>{{ocsClusterName}}</1>, may increase your expenses.": "Adding capacity for <1>{{ocsClusterName}}</1>, may increase your expenses.",
"Currently Used:": "Currently Used:",
"Add": "Add",
"{{availableCapacity}} / {{replica}} replicas": "{{availableCapacity}} / {{replica}} replicas",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ const watchResources = {
kind: StorageClassModel.kind,
},
};

export const InventoryCard: React.FC = () => {
const { t } = useCustomTranslation();

Expand Down
14 changes: 7 additions & 7 deletions packages/mco/components/mco-dashboard/queries.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,11 @@ export const LAST_SYNC_TIME_QUERY = 'ramen_sync_duration_seconds';
export const getLastSyncPerClusterQuery = () =>
`${LAST_SYNC_TIME_QUERY}{${DRPC_OBJECT_TYPE}, ${RAMEN_HUB_OPERATOR_METRICS_SERVICE}}`;

// ToDo (epic 4422): Need to update as per updates in the metrics
export const CAPACITY_QUERIES = {
// ToDo (epic 4422): For 4.15, Assuming "managedBy" is unique for each StorageSystem. Need to add "target_namesapce" as an another key.
[StorageDashboard.TOTAL_CAPACITY_FILE_BLOCK]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind, target_namespace) ${TOTAL_CAPACITY_FILE_BLOCK_METRIC}`,
[StorageDashboard.USED_CAPACITY_FILE_BLOCK]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind, target_namespace) ${USED_CAPACITY_FILE_BLOCK_METRIC}`,
// ToDo (epic 4422): Need to update as per updates in the metrics (if needed/once confirmed).
// Assuming "namespace" in "odf_system.*"" metrics (except "odf_system_map" which is pushed by ODF opr and already has "target_namespace") is where system is deployed.
[StorageDashboard.TOTAL_CAPACITY_FILE_BLOCK]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (target_namespace, managedBy, cluster) group_right(storage_system, target_kind) (label_replace(${TOTAL_CAPACITY_FILE_BLOCK_METRIC}, "target_namespace", "$1", "namespace", "(.*)"))`,
[StorageDashboard.USED_CAPACITY_FILE_BLOCK]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (target_namespace, managedBy, cluster) group_right(storage_system, target_kind) (label_replace(${USED_CAPACITY_FILE_BLOCK_METRIC}, "target_namespace", "$1", "namespace", "(.*)"))`,
};

export const getRBDSnapshotUtilizationQuery = (
Expand All @@ -46,10 +46,10 @@ export const getRBDSnapshotUtilizationQuery = (
return queries[queryName];
};

// ToDo (epic 4422): Need to update as per updates in the metrics
export const STATUS_QUERIES = {
// ToDo (epic 4422): For 4.15, Assuming "managedBy" is unique for each StorageSystem. Need to add "target_namesapce" as an another key.
[StorageDashboard.SYSTEM_HEALTH]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind, target_namespace) ${SYSTEM_HEALTH_METRIC}`,
// ToDo (epic 4422): Need to update as per updates in the metrics (if needed/once confirmed).
// Assuming "namespace" in "odf_system.*"" metrics (except "odf_system_map" which is pushed by ODF opr and already has "target_namespace") is where system is deployed.
[StorageDashboard.SYSTEM_HEALTH]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (target_namespace, managedBy, cluster) group_right(storage_system, target_kind) (label_replace(${SYSTEM_HEALTH_METRIC}, "target_namespace", "$1", "namespace", "(.*)"))`,
[StorageDashboard.HEALTH]: SYSTEM_HEALTH_METRIC,
[StorageDashboard.CSV_STATUS]: `csv_succeeded{name=~"${ODF_OPERATOR}.*"}`,
[StorageDashboard.CSV_STATUS_ALL_WHITELISTED]: 'csv_succeeded',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ const setSubSystemMap = (
subSysHealthData: PrometheusResponse,
subSystemMap: SubSystemMap
) =>
// ToDo (epic 4422): Assuming "namespace" in "odf_system.*"" metrics (except "odf_system_map" which is pushed by ODF opr and already has "target_namespace")
// is where system is deployed (update query if needed).
subSysHealthData?.data?.result?.forEach(
(item: PrometheusResult) =>
!item?.metric.managedBy &&
Expand All @@ -70,6 +72,8 @@ const setHealthData = (
healthData: SystemHealthMap[],
subSystemMap: SubSystemMap
) =>
// ToDo (epic 4422): Assuming "namespace" in "odf_system.*"" metrics (except "odf_system_map" which is pushed by ODF opr and already has "target_namespace")
// is where system is deployed (update query if needed).
sysHealthData?.data?.result?.forEach((item: PrometheusResult) => {
const { apiGroup } = getGVK(item?.metric.target_kind);
const healthVal = item?.value[1];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,12 +151,20 @@ const headerColumns = (t: TFunction) => [
];

const getRow: GetRow = (
{ systemName, targetKind, clusterName, totalValue, usedValue, clusterURL },
{
systemName,
namespace: systemNamespace,
targetKind,
clusterName,
totalValue,
usedValue,
clusterURL,
},
index
) => {
const { apiGroup, apiVersion, kind } = getGVK(targetKind);
const systemKind = referenceForGroupVersionKind(apiGroup)(apiVersion)(kind);
const systemPath = getDashboardLink(systemKind, systemName);
const systemPath = getDashboardLink(systemKind, systemName, systemNamespace);
const isPercentage = !!totalValue;
const progress = isPercentage ? getPercentage(usedValue, totalValue) : 100;
const value = isPercentage
Expand Down Expand Up @@ -280,6 +288,8 @@ const SystemCapacityCard: React.FC = () => {
!loadingUsedCapacity && !errorUsedCapacity
? usedCapacity?.data?.result?.reduce(
(acc: CapacityMetricDatumMap, usedMetric: PrometheusResult) => {
// ToDo (epic 4422): Assuming "namespace" in "odf_system.*"" metrics (except "odf_system_map" which is pushed by ODF opr and already has "target_namespace")
// is where system is deployed (update query if needed).
const systemName = usedMetric?.metric?.storage_system;
const namespace = usedMetric?.metric?.target_namespace;
const targetKind = usedMetric?.metric?.target_kind;
Expand All @@ -305,6 +315,8 @@ const SystemCapacityCard: React.FC = () => {
!loadingTotalCapacity &&
!errorTotalCapacity &&
totalCapacity?.data?.result?.forEach((totalMetric: PrometheusResult) => {
// ToDo (epic 4422): Assuming "namespace" in "odf_system.*"" metrics (except "odf_system_map" which is pushed by ODF opr and already has "target_namespace")
// is where system is deployed (update query if needed).
const dataMapKey = getUniqueKey(
totalMetric?.metric?.storage_system,
totalMetric?.metric?.target_namespace,
Expand Down
29 changes: 12 additions & 17 deletions packages/ocs/block-pool/BlockPoolDetailsPage.tsx
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
import * as React from 'react';
import { useSafeK8sWatchResource } from '@odf/core/hooks';
import { useODFNamespaceSelector } from '@odf/core/redux';
import DetailsPage from '@odf/shared/details-page/DetailsPage';
import { Kebab } from '@odf/shared/kebab/kebab';
import { ModalKeys } from '@odf/shared/modals/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { referenceForModel } from '@odf/shared/utils';
import { EventStreamWrapped, YAMLEditorWrapped } from '@odf/shared/utils/Tabs';
import { useK8sWatchResource } from '@openshift-console/dynamic-plugin-sdk';
import { useParams, useLocation } from 'react-router-dom-v5-compat';
import { BlockPoolDashboard } from '../dashboards/block-pool/block-pool-dashboard';
import { CephBlockPoolModel, CephClusterModel } from '../models';
Expand All @@ -21,20 +20,16 @@ export const cephClusterResource = {
export const BlockPoolDetailsPage: React.FC<{}> = () => {
const { t } = useCustomTranslation();

const { poolName } = useParams();
const { poolName, namespace: poolNs } = useParams();
const location = useLocation();
const kind = referenceForModel(CephBlockPoolModel);

const { odfNamespace, isODFNsLoaded, odfNsLoadError } =
useODFNamespaceSelector();

const [resource, loaded, loadError] =
useSafeK8sWatchResource<StoragePoolKind>((ns: string) => ({
kind,
name: poolName,
namespace: ns,
isList: false,
}));
const [resource, loaded, loadError] = useK8sWatchResource<StoragePoolKind>({
kind,
name: poolName,
namespace: poolNs,
isList: false,
});

const breadcrumbs = [
{
Expand All @@ -58,7 +53,7 @@ export const BlockPoolDetailsPage: React.FC<{}> = () => {
extraProps={{
resource,
resourceModel: CephBlockPoolModel,
namespace: odfNamespace,
namespace: poolNs,
}}
customKebabItems={[
{
Expand All @@ -78,12 +73,12 @@ export const BlockPoolDetailsPage: React.FC<{}> = () => {
]}
/>
);
}, [resource, odfNamespace, t]);
}, [resource, poolNs, t]);

return (
<DetailsPage
loaded={loaded && isODFNsLoaded}
loadError={loadError || odfNsLoadError}
loaded={loaded}
loadError={loadError}
breadcrumbs={breadcrumbs}
actions={actions}
resourceModel={CephBlockPoolModel}
Expand Down
Loading

0 comments on commit 5516bd9

Please sign in to comment.