Skip to content

Commit

Permalink
refactoring (support multiple StorageSystems per ODF)
Browse files Browse the repository at this point in the history
  • Loading branch information
SanjalKatiyar committed Nov 30, 2023
1 parent 9b55fef commit c71a477
Show file tree
Hide file tree
Showing 98 changed files with 1,649 additions and 858 deletions.
10 changes: 6 additions & 4 deletions locales/en/plugin__odf-console.json
Original file line number Diff line number Diff line change
Expand Up @@ -376,9 +376,9 @@
"Estimating {{formattedEta}} to completion": "Estimating {{formattedEta}} to completion",
"Object_one": "Object",
"Object_other": "Object",
"NooBaa Bucket": "NooBaa Bucket",
"Buckets": "Buckets",
"Buckets card represents the number of S3 buckets managed on Multicloud Object Gateway and the number of ObjectBucketClaims and the ObjectBuckets managed on both Multicloud Object Gateway and RGW (if deployed).": "Buckets card represents the number of S3 buckets managed on Multicloud Object Gateway and the number of ObjectBucketClaims and the ObjectBuckets managed on both Multicloud Object Gateway and RGW (if deployed).",
"NooBaa Bucket": "NooBaa Bucket",
"Cluster-wide": "Cluster-wide",
"Any NON Object bucket claims that were created via an S3 client or via the NooBaa UI system.": "Any NON Object bucket claims that were created via an S3 client or via the NooBaa UI system.",
"Break by": "Break by",
Expand Down Expand Up @@ -919,7 +919,6 @@
"Num Volumes": "Num Volumes",
"Raw Capacity": "Raw Capacity",
"Add Capacity": "Add Capacity",
"External": "External",
"Cluster details": "Cluster details",
"Data resiliency": "Data resiliency",
"Nodes": "Nodes",
Expand All @@ -929,8 +928,11 @@
"Search...": "Search...",
"Expand to fullscreen": "Expand to fullscreen",
"Exit fullscreen": "Exit fullscreen",
"This view is only supported for Internal mode cluster.": "This view is only supported for Internal mode cluster.",
"Show message": "Show message",
"Hide message": "Hide message",
"Back to main view": "Back to main view",
"Topology view is not supported for External Mode": "Topology view is not supported for External Mode",
"Topology view is not supported for External mode": "Topology view is not supported for External mode",
"No StorageCluster found": "No StorageCluster found",
"Set up a storage cluster to view the topology": "Set up a storage cluster to view the topology",
"A minimal cluster deployment will be performed.": "A minimal cluster deployment will be performed.",
Expand Down Expand Up @@ -980,7 +982,7 @@
"No StorageClass selected": "No StorageClass selected",
"The Arbiter stretch cluster requires a minimum of 4 nodes (2 different zones, 2 nodes per zone). Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.": "The Arbiter stretch cluster requires a minimum of 4 nodes (2 different zones, 2 nodes per zone). Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.",
"The StorageCluster requires a minimum of 3 nodes. Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.": "The StorageCluster requires a minimum of 3 nodes. Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.",
"Adding capacity for <1>{{name}}</1>, may increase your expenses.": "Adding capacity for <1>{{name}}</1>, may increase your expenses.",
"Adding capacity for <1>{{ocsClusterName}}</1>, may increase your expenses.": "Adding capacity for <1>{{ocsClusterName}}</1>, may increase your expenses.",
"Currently Used:": "Currently Used:",
"Add": "Add",
"{{availableCapacity}} / {{replica}} replicas": "{{availableCapacity}} / {{replica}} replicas",
Expand Down
6 changes: 2 additions & 4 deletions packages/mco/components/mco-dashboard/queries.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,8 @@ export const LAST_SYNC_TIME_QUERY = 'ramen_sync_duration_seconds';
export const getLastSyncPerClusterQuery = () =>
`${LAST_SYNC_TIME_QUERY}{${DRPC_OBJECT_TYPE}, ${RAMEN_HUB_OPERATOR_METRICS_SERVICE}}`;

// ToDo (epic 4422): Need to update as per updates in the metrics
export const CAPACITY_QUERIES = {
// ToDo (epic 4422): For 4.15, Assuming "managedBy" is unique for each StorageSystem. Need to add "target_namesapce" as an another key.
// ToDo (epic 4422): For 4.15, assuming "managedBy" is unique for each StorageSystem (works for now). Need to add "target_namesapce" as an another key to capacity metrics.
[StorageDashboard.TOTAL_CAPACITY_FILE_BLOCK]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind, target_namespace) ${TOTAL_CAPACITY_FILE_BLOCK_METRIC}`,
[StorageDashboard.USED_CAPACITY_FILE_BLOCK]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind, target_namespace) ${USED_CAPACITY_FILE_BLOCK_METRIC}`,
};
Expand All @@ -46,9 +45,8 @@ export const getRBDSnapshotUtilizationQuery = (
return queries[queryName];
};

// ToDo (epic 4422): Need to update as per updates in the metrics
export const STATUS_QUERIES = {
// ToDo (epic 4422): For 4.15, Assuming "managedBy" is unique for each StorageSystem. Need to add "target_namesapce" as an another key.
// ToDo (epic 4422): For 4.15, assuming "managedBy" is unique for each StorageSystem (works for now). Need to add "target_namesapce" as an another key to health metrics.
[StorageDashboard.SYSTEM_HEALTH]: `(label_replace(odf_system_map, "managedBy", "$1", "target_name", "(.*)")) * on (namespace, managedBy, cluster) group_right(storage_system, target_kind, target_namespace) ${SYSTEM_HEALTH_METRIC}`,
[StorageDashboard.HEALTH]: SYSTEM_HEALTH_METRIC,
[StorageDashboard.CSV_STATUS]: `csv_succeeded{name=~"${ODF_OPERATOR}.*"}`,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ const setSubSystemMap = (
subSysHealthData: PrometheusResponse,
subSystemMap: SubSystemMap
) =>
// ToDo (epic 4422): "namespace" in "odf_system_*" metrics signifies odf-operator pod's namespace.
// Add a label to metric for StorageSystem namespace as well and use that instead (update query).
subSysHealthData?.data?.result?.forEach(
(item: PrometheusResult) =>
!item?.metric.managedBy &&
Expand All @@ -70,6 +72,8 @@ const setHealthData = (
healthData: SystemHealthMap[],
subSystemMap: SubSystemMap
) =>
// ToDo (epic 4422): "namespace" in "odf_system_*" metrics signifies odf-operator pod's namespace.
// Add a label to metric for StorageSystem namespace as well and use that instead (update query).
sysHealthData?.data?.result?.forEach((item: PrometheusResult) => {
const { apiGroup } = getGVK(item?.metric.target_kind);
const healthVal = item?.value[1];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,12 +151,20 @@ const headerColumns = (t: TFunction) => [
];

const getRow: GetRow = (
{ systemName, targetKind, clusterName, totalValue, usedValue, clusterURL },
{
systemName,
namespace: systemNamespace,
targetKind,
clusterName,
totalValue,
usedValue,
clusterURL,
},
index
) => {
const { apiGroup, apiVersion, kind } = getGVK(targetKind);
const systemKind = referenceForGroupVersionKind(apiGroup)(apiVersion)(kind);
const systemPath = getDashboardLink(systemKind, systemName);
const systemPath = getDashboardLink(systemKind, systemName, systemNamespace);
const isPercentage = !!totalValue;
const progress = isPercentage ? getPercentage(usedValue, totalValue) : 100;
const value = isPercentage
Expand Down Expand Up @@ -280,6 +288,8 @@ const SystemCapacityCard: React.FC = () => {
!loadingUsedCapacity && !errorUsedCapacity
? usedCapacity?.data?.result?.reduce(
(acc: CapacityMetricDatumMap, usedMetric: PrometheusResult) => {
// ToDo (epic 4422): "namespace" in "odf_system_*" metrics signifies odf-operator pod's namespace.
// Add a label to metric for StorageSystem namespace as well and use that instead (update query).
const systemName = usedMetric?.metric?.storage_system;
const namespace = usedMetric?.metric?.target_namespace;
const targetKind = usedMetric?.metric?.target_kind;
Expand All @@ -305,6 +315,8 @@ const SystemCapacityCard: React.FC = () => {
!loadingTotalCapacity &&
!errorTotalCapacity &&
totalCapacity?.data?.result?.forEach((totalMetric: PrometheusResult) => {
// ToDo (epic 4422): "namespace" in "odf_system_*" metrics signifies odf-operator pod's namespace.
// Add a label to metric for StorageSystem namespace as well and use that instead (update query).
const dataMapKey = getUniqueKey(
totalMetric?.metric?.storage_system,
totalMetric?.metric?.target_namespace,
Expand Down
32 changes: 14 additions & 18 deletions packages/ocs/block-pool/BlockPoolDetailsPage.tsx
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
import * as React from 'react';
import { useSafeK8sWatchResource } from '@odf/core/hooks';
import { useODFNamespaceSelector } from '@odf/core/redux';
import DetailsPage from '@odf/shared/details-page/DetailsPage';
import { Kebab } from '@odf/shared/kebab/kebab';
import { ModalKeys } from '@odf/shared/modals/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { referenceForModel } from '@odf/shared/utils';
import { EventStreamWrapped, YAMLEditorWrapped } from '@odf/shared/utils/Tabs';
import { RouteComponentProps, useLocation } from 'react-router-dom';
import { useK8sWatchResource } from '@openshift-console/dynamic-plugin-sdk';
import { RouteComponentProps, useLocation, useParams } from 'react-router-dom';
import { BlockPoolDashboard } from '../dashboards/block-pool/block-pool-dashboard';
import { CephBlockPoolModel, CephClusterModel } from '../models';
import { StoragePoolKind } from '../types';
import { StoragePoolKind, ODFSystemParams } from '../types';

type BlockPoolDetailsPageProps = {
match: RouteComponentProps<{ poolName: string }>['match'];
Expand All @@ -30,18 +29,15 @@ export const BlockPoolDetailsPage: React.FC<BlockPoolDetailsPageProps> = ({

const { poolName } = match.params;
const location = useLocation();
const { namespace: poolNs } = useParams<ODFSystemParams>();
const kind = referenceForModel(CephBlockPoolModel);

const { odfNamespace, isODFNsLoaded, odfNsLoadError } =
useODFNamespaceSelector();

const [resource, loaded, loadError] =
useSafeK8sWatchResource<StoragePoolKind>((ns: string) => ({
kind,
name: poolName,
namespace: ns,
isList: false,
}));
const [resource, loaded, loadError] = useK8sWatchResource<StoragePoolKind>({
kind,
name: poolName,
namespace: poolNs,
isList: false,
});

const breadcrumbs = [
{
Expand All @@ -65,7 +61,7 @@ export const BlockPoolDetailsPage: React.FC<BlockPoolDetailsPageProps> = ({
extraProps={{
resource,
resourceModel: CephBlockPoolModel,
namespace: odfNamespace,
namespace: poolNs,
}}
customKebabItems={[
{
Expand All @@ -85,12 +81,12 @@ export const BlockPoolDetailsPage: React.FC<BlockPoolDetailsPageProps> = ({
]}
/>
);
}, [resource, odfNamespace, t]);
}, [resource, poolNs, t]);

return (
<DetailsPage
loaded={loaded && isODFNsLoaded}
loadError={loadError || odfNsLoadError}
loaded={loaded}
loadError={loadError}
breadcrumbs={breadcrumbs}
actions={actions}
resourceModel={CephBlockPoolModel}
Expand Down
Loading

0 comments on commit c71a477

Please sign in to comment.