From c3cca9377e12221a5b2644ee1da0d9ab138c9f00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=9C=E9=A3=8E?= <1335799468@qq.com> Date: Sat, 23 Dec 2023 18:50:39 +0800 Subject: [PATCH] support monitoring NebulaGraph metrics and add help doc --- .../nebulagraph/NebulaGraphCollectImpl.java | 196 +++++ .../collector/dispatch/DispatchConstants.java | 4 + ...ertzbeat.collector.collect.AbstractCollect | 1 + .../hertzbeat/common/entity/job/Metrics.java | 5 + .../job/protocol/NebulaGraphProtocol.java | 43 + home/docs/help/nebulagraph.md | 74 ++ .../current/help/nebulagraph.md | 123 +++ .../main/resources/define/app-nebulaGraph.yml | 768 ++++++++++++++++++ 8 files changed, 1214 insertions(+) create mode 100644 collector/src/main/java/org/dromara/hertzbeat/collector/collect/nebulagraph/NebulaGraphCollectImpl.java create mode 100644 common/src/main/java/org/dromara/hertzbeat/common/entity/job/protocol/NebulaGraphProtocol.java create mode 100644 home/docs/help/nebulagraph.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph.md create mode 100644 manager/src/main/resources/define/app-nebulaGraph.yml diff --git a/collector/src/main/java/org/dromara/hertzbeat/collector/collect/nebulagraph/NebulaGraphCollectImpl.java b/collector/src/main/java/org/dromara/hertzbeat/collector/collect/nebulagraph/NebulaGraphCollectImpl.java new file mode 100644 index 00000000000..0ce9f0312f0 --- /dev/null +++ b/collector/src/main/java/org/dromara/hertzbeat/collector/collect/nebulagraph/NebulaGraphCollectImpl.java @@ -0,0 +1,196 @@ +package org.dromara.hertzbeat.collector.collect.nebulagraph; + +import lombok.extern.slf4j.Slf4j; +import org.apache.http.HttpHeaders; +import org.apache.http.HttpHost; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.client.methods.RequestBuilder; +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.http.protocol.HttpContext; +import org.apache.http.util.EntityUtils; +import org.dromara.hertzbeat.collector.collect.AbstractCollect; +import org.dromara.hertzbeat.collector.collect.common.http.CommonHttpClient; +import org.dromara.hertzbeat.collector.dispatch.DispatchConstants; +import org.dromara.hertzbeat.collector.util.CollectUtil; +import org.dromara.hertzbeat.common.constants.CollectorConstants; +import org.dromara.hertzbeat.common.constants.CommonConstants; +import org.dromara.hertzbeat.common.entity.job.Metrics; +import org.dromara.hertzbeat.common.entity.job.protocol.NebulaGraphProtocol; +import org.dromara.hertzbeat.common.entity.message.CollectRep; +import org.dromara.hertzbeat.common.util.CommonUtil; +import org.dromara.hertzbeat.common.util.IpDomainUtil; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + + +/** + * @author dongfeng + */ +@Slf4j +public class NebulaGraphCollectImpl extends AbstractCollect { + private final static int SUCCESS_CODE = 200; + + private final static String[] TIME_RANGE = new String[]{"5", "60", "600", "3600"}; + + private final static String REGEX = "\\.%s\\="; + + private final static String STR_SPLIT = "\n"; + + private final static String STORAGE_SPLIT_KEY_VALUE = "="; + + private final static String GRAPH_API = "/stats"; + + private final static String STORAGE_API = "/rocksdb_stats"; + + + @Override + public void collect(CollectRep.MetricsData.Builder builder, long monitorId, String app, Metrics metrics) { + long startTime = System.currentTimeMillis(); + if (metrics == null || metrics.getNebulaGraph() == null) { + builder.setCode(CollectRep.Code.FAIL); + builder.setMsg("NebulaGraph collect must has NebulaGraph params"); + return; + } + NebulaGraphProtocol nebulaGraph = metrics.getNebulaGraph(); + String timePeriod = nebulaGraph.getTimePeriod(); + + if (!Objects.isNull(nebulaGraph.getTimePeriod())&&!Arrays.asList(TIME_RANGE).contains(timePeriod)) { + builder.setCode(CollectRep.Code.FAIL); + builder.setMsg("The time range for metric statistics, currently supporting 5 seconds, 60 seconds, 600 seconds, and 3600 seconds."); + return; + } + + if (nebulaGraph.getHost() == null || nebulaGraph.getHost().isEmpty()) { + builder.setCode(CollectRep.Code.FAIL); + builder.setMsg("The host of NebulaGraph must be set"); + return; + } + + String resp; + long responseTime; + HashMap resultMap = new HashMap<>(64); + CloseableHttpResponse response; + HttpContext httpContext = createHttpContext(nebulaGraph.getHost(), nebulaGraph.getPort()); + HttpUriRequest request = createHttpRequest(nebulaGraph.getHost(), nebulaGraph.getPort(), + nebulaGraph.getUrl(), nebulaGraph.getTimeout()); + try { + // 发起http请求,获取响应数据 + response = CommonHttpClient.getHttpClient().execute(request, httpContext); + int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode != SUCCESS_CODE) { + builder.setCode(CollectRep.Code.FAIL); + builder.setMsg("StatusCode " + statusCode); + return; + } + resp = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); + responseTime = System.currentTimeMillis() - startTime; + resultMap.put(CollectorConstants.RESPONSE_TIME, Long.toString(responseTime)); + // 根据API进行不同解析 + if (GRAPH_API.equals(nebulaGraph.getUrl())) { + parseStatsResponse(resp, nebulaGraph.getTimePeriod(), resultMap); + } else if (STORAGE_API.equals(nebulaGraph.getUrl())) { + parseStorageResponse(resp, resultMap); + } + List aliasFields = metrics.getAliasFields(); + CollectRep.ValueRow.Builder valueRowBuilder = CollectRep.ValueRow.newBuilder(); + for (String field : aliasFields) { + String fieldValue = resultMap.get(field); + valueRowBuilder.addColumns(Objects.requireNonNullElse(fieldValue, CommonConstants.NULL_VALUE)); + } + builder.addValues(valueRowBuilder.build()); + } catch (IOException e) { + String errorMsg = CommonUtil.getMessageFromThrowable(e); + log.info(errorMsg); + builder.setCode(CollectRep.Code.FAIL); + builder.setMsg(errorMsg); + } + } + + + @Override + public String supportProtocol() { + return DispatchConstants.PROTOCOL_NEBULAGRAPH; + } + + private HttpContext createHttpContext(String host, String port) { + HttpHost httpHost = new HttpHost(host, Integer.parseInt(port)); + HttpClientContext httpClientContext = new HttpClientContext(); + httpClientContext.setTargetHost(httpHost); + return httpClientContext; + } + + private HttpUriRequest createHttpRequest(String host, String port, String url, String timeoutStr) { + RequestBuilder requestBuilder = RequestBuilder.get(); + // uri + String uri = CollectUtil.replaceUriSpecialChar(url); + if (IpDomainUtil.isHasSchema(host)) { + requestBuilder.setUri(host + ":" + port + uri); + } else { + String ipAddressType = IpDomainUtil.checkIpAddressType(host); + String baseUri = CollectorConstants.IPV6.equals(ipAddressType) + ? String.format("[%s]:%s", host, port + uri) + : String.format("%s:%s", host, port + uri); + + requestBuilder.setUri(CollectorConstants.HTTP_HEADER + baseUri); + } + + requestBuilder.addHeader(HttpHeaders.CONNECTION, "keep-alive"); + requestBuilder.addHeader(HttpHeaders.USER_AGENT, "Mozilla/5.0 (Windows NT 6.1; WOW64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36"); + + requestBuilder.addHeader(HttpHeaders.ACCEPT, "text/plain"); + + int timeout = Integer.parseInt(timeoutStr); + if (timeout > 0) { + RequestConfig requestConfig = RequestConfig.custom() + .setConnectTimeout(timeout) + .setSocketTimeout(timeout) + .setRedirectsEnabled(true) + .build(); + requestBuilder.setConfig(requestConfig); + } + return requestBuilder.build(); + } + + /** + * 解析Stats响应通过时间间隔进行筛选 + * + * @param responseBody 响应体 + * @param timePeriod 时间间隔 + */ + private void parseStatsResponse(String responseBody, String timePeriod, HashMap resultMap) { + // 设置正则匹配 + String timeRegex = String.format(REGEX, timePeriod); + Pattern pattern = Pattern.compile(timeRegex); + String[] strArray = responseBody.split(STR_SPLIT); + for (String str : strArray) { + Matcher matcher = pattern.matcher(str); + if (matcher.find()) { + String[] split = str.split(timeRegex); + resultMap.put(split[0], split[1]); + } + } + } + + + /** + * 解析Storage响应通过时间间隔进行筛选 + * + * @param responseBody 响应体 + */ + private void parseStorageResponse(String responseBody, HashMap resultMap) { + String[] strArray = responseBody.split(STR_SPLIT); + for (String str : strArray) { + String[] split = str.split(STORAGE_SPLIT_KEY_VALUE); + resultMap.put(split[0], split[1]); + } + } +} diff --git a/collector/src/main/java/org/dromara/hertzbeat/collector/dispatch/DispatchConstants.java b/collector/src/main/java/org/dromara/hertzbeat/collector/dispatch/DispatchConstants.java index 2bbb8918b09..b41294cf512 100644 --- a/collector/src/main/java/org/dromara/hertzbeat/collector/dispatch/DispatchConstants.java +++ b/collector/src/main/java/org/dromara/hertzbeat/collector/dispatch/DispatchConstants.java @@ -55,6 +55,10 @@ public interface DispatchConstants { * protocol memcached */ String PROTOCOL_MEMCACHED = "memcached"; + /** + * protocol nebulagraph + */ + String PROTOCOL_NEBULAGRAPH = "nebulaGraph"; /** * protocol udp */ diff --git a/collector/src/main/resources/META-INF/services/org.dromara.hertzbeat.collector.collect.AbstractCollect b/collector/src/main/resources/META-INF/services/org.dromara.hertzbeat.collector.collect.AbstractCollect index 07a2fc05c6f..e026bc70e68 100644 --- a/collector/src/main/resources/META-INF/services/org.dromara.hertzbeat.collector.collect.AbstractCollect +++ b/collector/src/main/resources/META-INF/services/org.dromara.hertzbeat.collector.collect.AbstractCollect @@ -18,4 +18,5 @@ org.dromara.hertzbeat.collector.collect.push.PushCollectImpl org.dromara.hertzbeat.collector.collect.dns.DnsCollectImpl org.dromara.hertzbeat.collector.collect.nginx.NginxCollectImpl org.dromara.hertzbeat.collector.collect.memcached.MemcachedCollectImpl +org.dromara.hertzbeat.collector.collect.nebulagraph.NebulaGraphCollectImpl org.dromara.hertzbeat.collector.collect.pop3.Pop3CollectImpl diff --git a/common/src/main/java/org/dromara/hertzbeat/common/entity/job/Metrics.java b/common/src/main/java/org/dromara/hertzbeat/common/entity/job/Metrics.java index 171c66cb0be..9d771d2b824 100644 --- a/common/src/main/java/org/dromara/hertzbeat/common/entity/job/Metrics.java +++ b/common/src/main/java/org/dromara/hertzbeat/common/entity/job/Metrics.java @@ -135,6 +135,11 @@ public class Metrics { * 使用memcached的监控配置信息 */ private MemcachedProtocol memcached; + /** + * Monitoring configuration information using the nebulaGraph protocol + * 使用nebulaGraph的监控配置信息 + */ + private NebulaGraphProtocol nebulaGraph; /** * Use udp implemented by socket for service port detection configuration information * 使用socket实现的udp进行服务端口探测配置信息 diff --git a/common/src/main/java/org/dromara/hertzbeat/common/entity/job/protocol/NebulaGraphProtocol.java b/common/src/main/java/org/dromara/hertzbeat/common/entity/job/protocol/NebulaGraphProtocol.java new file mode 100644 index 00000000000..2927642b1bb --- /dev/null +++ b/common/src/main/java/org/dromara/hertzbeat/common/entity/job/protocol/NebulaGraphProtocol.java @@ -0,0 +1,43 @@ +package org.dromara.hertzbeat.common.entity.job.protocol; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * @author dongfeng + */ +@Data +@Builder +@AllArgsConstructor +@NoArgsConstructor +public class NebulaGraphProtocol { + /** + * NebulaGraph 主机ip或域名 + */ + private String host; + + /** + * NebulaGraph Graph 服务端口默认为 19669 + * NebulaGraph Storage 服务端口默认为 19779 + */ + private String port; + + /** + * NebulaGraph Graph 服务监控API为/stats + * NebulaGraph Storage 服务监控API为/rocksdb_stats + */ + private String url; + + /** + * NebulaGraph 监控时间间隔 + */ + private String timePeriod; + + /** + * 超时时间 + */ + private String timeout; + +} diff --git a/home/docs/help/nebulagraph.md b/home/docs/help/nebulagraph.md new file mode 100644 index 00000000000..3e1aed85cbe --- /dev/null +++ b/home/docs/help/nebulagraph.md @@ -0,0 +1,74 @@ +--- +id: nebulaGraph +title: Monitoring NebulaGraph +sidebar_label: NebulaGraph Monitor +keywords: [ open source monitoring tool, open source NebulaGraph monitoring tool, monitoring NebulaGraph metrics ] +--- + +> Collect and monitor the general performance Metrics of nebulaGraph. + +**Protocol Use:nebulaGraph** + +```text +The monitoring has two parts,nebulaGraph_stats and rocksdb_stats. +nebulaGraph_stats is nebulaGraph's statistics, and rocksdb_stats is rocksdb's statistics. +``` + +### + +**1、Obtain available parameters through the stats and rocksdb stats interfaces.** + +1.1、 If you only need to get nebulaGraph_stats, you need to ensure that you have access to stats, or you'll get errors. + +The default port is 19669 and the access address is http://ip:19669/stats + +1.2、If you need to obtain additional parameters for rocksdb stats, you need to ensure that you have access to rocksdb +stats, otherwise an error will be reported. + +Once you connect to NebulaGraph for the first time, you must first register your Storage service in order to properly +query your data. + +**There is help_doc: https://docs.nebula-graph.com.cn/3.4.3/4.deployment-and-installation/connect-to-nebula-graph/** + +**https://docs.nebula-graph.com.cn/3.4.3/2.quick-start/3.quick-start-on-premise/3.1add-storage-hosts/** + +The default port is 19779 and the access address is:http://ip:19779/rocksdb_stats + +### Configuration parameter + +| Parameter name | Parameter help description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Monitoring Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://) | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique | +| graphPort | Port of the Graph service provided by Nebula Graph | +| timePeriod | The value can be 5 seconds, 60 seconds, 600 seconds, or 3600 seconds, indicating the last 5 seconds, last 1 minute, last 10 minutes, and last 1 hour, respectively. | +| storagePort | Port of the storage service provided by Nebula Graph | +| Timeout | Allow collection response time | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Whether to detect | Whether to detect and check the availability of monitoring before adding monitoring. Adding and modifying operations will continue only after the detection is successful | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here | + +### Collection Metrics + +#### Metrics Set:nebulaGraph_stats + +Too many indicators, related links are as follows +**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** + +| Metric name | Metric unit | Metric help description | +|---------------------------------------|-------------|--------------------------------------------------------------| +| num_queries_hit_memory_watermark_rate | | The rate of statements that reached the memory watermark. | +| num_queries_hit_memory_watermark_sum | | The sum of statements that reached the memory watermark. | +| num_reclaimed_expired_sessions_sum | | Number of expired sessions actively reclaimed by the server. | +| ... | | ... | + +#### Metrics Set:rocksdb_stats + +Too many indicators, related links are as follows +**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** + +| Metric name | Metric unit | Metric help description | +|----------------------------|-------------|-------------------------------------------------------------| +| rocksdb.backup.read.bytes | | Number of bytes read during the RocksDB database backup. | +| rocksdb.backup.write.bytes | | Number of bytes written during the RocksDB database backup. | +| ... | | ... | \ No newline at end of file diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph.md new file mode 100644 index 00000000000..1f98a4747ae --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/nebulagraph.md @@ -0,0 +1,123 @@ +--- +id: nebulaGraph +title: 监控 NebulaGraph +sidebar_label: NebulaGraph 监控 +keywords: [ 开源监控工具, 开源 NebulaGraph 监控工具, 监控 NebulaGraph 指标 ] +--- + +> 收集和监控 NebulaGraph 的常规性能指标。 + +**使用协议:nebulaGraph** + +```text +监控分为两个部分,nebulaGraph_stats 和 rocksdb_stats。 +nebulaGraph_stats 是 NebulaGraph 的统计信息,rocksdb_stats 是 RocksDB 的统计信息。 +``` + +### + +**1、通过 stats 和 rocksdb stats 接口获取可用参数。** + +1.1、如果只需要获取 nebulaGraph_stats,需要确保可以访问 stats,否则会出现错误。 + +默认端口是 19669,访问地址为 http://ip:19669/stats + +1.2、如果需要获取 rocksdb stats 的附加参数,需要确保可以访问 rocksdb stats,否则会报错。 + +首次连接 NebulaGraph 时,必须先注册 Storage 服务,以便正确查询数据。 + +**有帮助文档:https://docs.nebula-graph.com.cn/3.4.3/4.deployment-and-installation/connect-to-nebula-graph/** + +**https://docs.nebula-graph.com.cn/3.4.3/2.quick-start/3.quick-start-on-premise/3.1add-storage-hosts/** + +默认端口是 19779,访问地址为:http://ip:19779/rocksdb_stats + +### 配置参数 + +| 参数名称 | 参数帮助描述 | +|-------------|--------------------------------------------------------------------| +| 监控主机 | 被监控的 IPV4、IPV6 或域名。注意⚠️没有协议头(例如:https://、http://) | +| 监控名称 | 识别此监控的名称。名称需要唯一 | +| graphPort | Nebula Graph 提供的 Graph 服务的端口 | +| timePeriod | 可以是 5 秒、60 秒、600 秒或 3600 秒,分别表示最近 5 秒、最近 1 分钟、最近 10 分钟和最近 1 小时的时间段 | +| storagePort | Nebula Graph 提供的 Storage 服务的端口 | +| 超时 | 允许收集响应时间 | +| 收集间隔 | 监控周期性数据收集的间隔时间,单位:秒,最小可设置的间隔为 30 秒 | +| 是否检测 | 是否检测和验证添加监控之前的可用性。只有检测成功后,添加和修改操作才会继续进行 | +| 描述备注 | 用于识别和描述此监控的更多信息,用户可以在此处记录信息 | + +### 收集指标 + +#### 指标集:nebulaGraph_stats + +指标太多,相关链接如下 +**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/1.query-performance-metrics/** + +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------------------------------------------------|------|--------| +| 达到内存水位线的语句的数量(rate) | | | +| 达到内存水位线的语句的数量(sum) | | | +| 服务端主动回收的过期的会话数量(rate) | | | +| 服务端主动回收的过期的会话数量(sum) | | | +| 慢查询延迟时间(avg) | | | +| 慢查询延迟时间(p75) | | | +| 慢查询延迟时间(p95) | | | +| 慢查询延迟时间(p99) | | | +| 慢查询延迟时间(p999) | | | +| 查询延迟时间(avg) | | | +| 查询延迟时间(p75) | | | +| 查询延迟时间(p95) | | | +| 查询延迟时间(p99) | | | +| 查询延迟时间(p999) | | | +| 因用户名密码错误导验证失败的会话数量(rate) | | | +| 因用户名密码错误导验证失败的会话数量(sum) | | | +| 查询次数(rate) | | | +| 查询次数(sum) | | | +| 排序(Sort)算子执行时间(rate) | | | +| 排序(Sort)算子执行时间(sum) | | | +| Graphd 服务发给 Storaged 服务的 RPC 请求失败的数量(rate) | | | +| Graphd 服务发给 Storaged 服务的 RPC 请求失败的数量(sum) | | | +| 登录验证失败的会话数量(rate) | | | +| 登录验证失败的会话数量(sum) | | | +| 查询报错语句数量(rate) | | | +| 查询报错语句数量(sum) | | | +| 被终止的查询数量(rate) | | | +| 被终止的查询数量(sum) | | | +| 因查询错误而导致的 Leader 变更的次数(rate) | | | +| 因查询错误而导致的 Leader 变更的次数(sum) | | | +| Graphd 服务发给 Metad 服务的 RPC 请求数量(rate) | | | +| Graphd 服务发给 Metad 服务的 RPC 请求数量(sum) | | | +| 慢查询次数(rate) | | | +| 慢查询次数(sum) | | | +| 活跃的会话数的变化数(sum) | | | +| 活跃的查询语句数的变化数(sum) | | | +| Graphd 服务接收的语句数(rate) | | | +| Graphd 服务接收的语句数(sum) | | | +| 聚合(Aggregate)算子执行时间(rate) | | | +| 聚合(Aggregate)算子执行时间(sum) | | | +| 优化器阶段延迟时间(avg) | | | +| 优化器阶段延迟时间(p75) | | | +| 优化器阶段延迟时间(p95) | | | +| 优化器阶段延迟时间(p99) | | | +| 优化器阶段延迟时间(p999) | | | +| Graphd 服务发给 Metad 的 RPC 请求失败的数量(rate) | | | +| Graphd 服务发给 Metad 的 RPC 请求失败的数量(sum) | | | +| 索引扫描(IndexScan)算子执行时间(rate) | | | +| 索引扫描(IndexScan)算子执行时间(sum) | | | +| 服务端建立过的会话数量(rate) | | | +| 服务端建立过的会话数量(sum) | | | +| 因为超过FLAG_OUT_OF_MAX_ALLOWED_CONNECTIONS参数导致的验证登录的失败的会话数量(rate) | | | +| 因为超过FLAG_OUT_OF_MAX_ALLOWED_CONNECTIONS参数导致的验证登录的失败的会话数量(sum) | | | +| Graphd 服务发给 Storaged 服务的 RPC 请求数量(rate) | | | +| Graphd 服务发给 Storaged 服务的 RPC 请求数量(sum) | | | + +#### 指标集:rocksdb_stats + +指标太多,相关链接如下 +**https://docs.nebula-graph.com.cn/3.4.3/6.monitor-and-metrics/2.rocksdb-statistics/** + +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------------------|------|------------------------| +| rocksdb.backup.read.bytes | | 备份 RocksDB 数据库期间读取的字节数 | +| rocksdb.backup.write.bytes | | 指标名称 | +| ... | | ... | \ No newline at end of file diff --git a/manager/src/main/resources/define/app-nebulaGraph.yml b/manager/src/main/resources/define/app-nebulaGraph.yml new file mode 100644 index 00000000000..4c417ab3585 --- /dev/null +++ b/manager/src/main/resources/define/app-nebulaGraph.yml @@ -0,0 +1,768 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# 监控类型所属类别:service-应用服务 program-应用程序 db-数据库 custom-自定义 os-操作系统 bigdata-大数据 mid-中间件 webserver-web服务器 cache-缓存 cn-云原生 network-网络监控等等 +category: db +# 监控应用类型名称(与文件名保持一致) eg: linux windows tomcat mysql aws... +app: nebulaGraph +# The app api i18n name +# app api国际化名称 +name: + zh-CN: NebulaGraph + en-US: NebulaGraph +# 监控类型的帮助描述信息 +helpLink: +# app api所需输入参数定义(根据定义渲染页面UI) +# Input params define for app api(render web ui by the definition) +params: + # field-param field key + # field-字段名称标识符 + - field: host + # name-param field display i18n name + # name-参数字段显示名称 + name: + zh-CN: 目标Host + en-US: Target Host + # type-param field type(most mapping the html input type) + # type-字段类型,样式(大部分映射input标签type属性) + type: host + # required-true or false + # 是否是必输项 true-必填 false-可选 + required: true + # field-param field key + # field-变量字段标识符 + - field: graphPort + # name-param field display i18n name + # name-参数字段显示名称 + name: + zh-CN: graph端口 + en-US: graphPort + # type-param field type(most mapping the html input type) + # type-字段类型,样式(大部分映射input标签type属性) + type: number + # when type is number, range is required + # 当type为number时,用range表示范围 + range: '[0,65535]' + # required-true or false + # required-是否是必输项 true-必填 false-可选 + required: true + # default value + # 默认值 + defaultValue: 19669 + - field: timePeriod + # name-param field display i18n name + # name-参数字段显示名称 + name: + zh-CN: graph指标统计的时间范围(s) + en-US: graph indicator statistics time range (s) + # type-param field type(most mapping the html input type) + # type-字段类型,样式(大部分映射input标签type属性) + type: number + # when type is number, range is required + # 当type为number时,用range表示范围 + range: '[0,3600]' + # required-true or false + # 是否是必输项 true-必填 false-可选 + required: true + # default value 6000 + # 默认值 6000 + defaultValue: 60 + - field: storagePort + # name-param field display i18n name + # name-参数字段显示名称 + name: + zh-CN: storage端口 + en-US: storagePort + # type-param field type(most mapping the html input type) + # type-字段类型,样式(大部分映射input标签type属性) + type: number + # when type is number, range is required + # 当type为number时,用range表示范围 + range: '[0,65535]' + # required-true or false + # required-是否是必输项 true-必填 false-可选 + required: true + # default value + # 默认值 + defaultValue: 19779 + # field-param field key + # field-变量字段标识符 + - field: timeout + # name-param field display i18n name + # name-参数字段显示名称 + name: + zh-CN: 连接超时时间(ms) + en-US: Connect Timeout(ms) + # type-param field type(most mapping the html input type) + # type-字段类型,样式(大部分映射input标签type属性) + type: number + # when type is number, range is required + # 当type为number时,用range表示范围 + range: '[0,100000]' + # required-true or false + # 是否是必输项 true-必填 false-可选 + required: true + # default value 6000 + # 默认值 6000 + defaultValue: 6000 + + +# collect metrics config list +# 采集指标配置列表 +metrics: + # metrics - available + # 监控指标 - available + - name: nebulaGraph_stats + i18n: + zh-CN: nebulaGraph 统计信息 + en-US: nebulaGraph stats + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + # 指标采集调度优先级(0->127)->(优先级高->低) 优先级低的指标会等优先级高的指标采集完成后才会被调度, 相同优先级的指标会并行调度采集 + # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 + priority: 0 + fields: + - field: responseTime + type: 0 + unit: ms + i18n: + zh-CN: 响应时间 + en-US: Response Time + - field: num_queries_hit_memory_watermark_rate + type: 0 + i18n: + zh-CN: 达到内存水位线的语句的数量(rate) + en-US: num_queries_hit_memory_watermark.rate + - field: num_queries_hit_memory_watermark_sum + type: 0 + i18n: + zh-CN: 达到内存水位线的语句的数量(sum) + en-US: num_queries_hit_memory_watermark.sum + - field: num_reclaimed_expired_sessions_rate + type: 0 + i18n: + zh-CN: 服务端主动回收的过期的会话数量(rate) + en-US: num_reclaimed_expired_sessions.rate + - field: num_reclaimed_expired_sessions_sum + type: 0 + i18n: + zh-CN: 服务端主动回收的过期的会话数量(sum) + en-US: num_reclaimed_expired_sessions.sum + - field: slow_query_latency_us_avg + type: 0 + i18n: + zh-CN: 慢查询延迟时间(avg) + en-US: slow_query_latency_us.avg + - field: slow_query_latency_us_p75 + type: 0 + i18n: + zh-CN: 慢查询延迟时间(p75) + en-US: slow_query_latency_us.p75 + - field: slow_query_latency_us_p95 + type: 0 + i18n: + zh-CN: 慢查询延迟时间(p95) + en-US: slow_query_latency_us.p95 + - field: slow_query_latency_us_p99 + type: 0 + i18n: + zh-CN: 慢查询延迟时间(p99) + en-US: slow_query_latency_us.p99 + - field: slow_query_latency_us_p999 + type: 0 + i18n: + zh-CN: 慢查询延迟时间(p999) + en-US: slow_query_latency_us.p999 + - field: query_latency_us_avg + type: 0 + i18n: + zh-CN: 查询延迟时间(avg) + en-US: query_latency_us.avg + - field: query_latency_us_p75 + type: 0 + i18n: + zh-CN: 查询延迟时间(p75) + en-US: query_latency_us.p75 + - field: query_latency_us_p95 + type: 0 + i18n: + zh-CN: 查询延迟时间(p95) + en-US: query_latency_us.p95 + - field: query_latency_us_p99 + type: 0 + i18n: + zh-CN: 查询延迟时间(p99) + en-US: query_latency_us.p99 + - field: query_latency_us_p999 + type: 0 + i18n: + zh-CN: 查询延迟时间(p999) + en-US: query_latency_us_p999 + - field: num_auth_failed_sessions_bad_username_password_rate + type: 0 + i18n: + zh-CN: 因用户名密码错误导验证失败的会话数量(rate) + en-US: num_auth_failed_sessions_bad_username_password.rate + - field: num_auth_failed_sessions_bad_username_password_sum + type: 0 + i18n: + zh-CN: 因用户名密码错误导验证失败的会话数量(sum) + en-US: num_auth_failed_sessions_bad_username_password.sum + - field: num_queries_rate + type: 0 + i18n: + zh-CN: 查询次数(rate) + en-US: num_queries.rate + - field: num_queries_sum + type: 0 + i18n: + zh-CN: 查询次数(sum) + en-US: num_queries.sum + - field: num_sort_executors_rate + type: 0 + i18n: + zh-CN: 排序(Sort)算子执行时间(rate) + en-US: num_sort_executors.rate + - field: num_sort_executors_sum + type: 0 + i18n: + zh-CN: 排序(Sort)算子执行时间(sum) + en-US: num_sort_executors.sum + - field: num_rpc_sent_to_storaged_failed_rate + type: 0 + i18n: + zh-CN: Graphd 服务发给 Storaged 服务的 RPC 请求失败的数量(rate) + en-US: num_rpc_sent_to_storaged_failed.rate + - field: num_rpc_sent_to_storaged_failed_sum + type: 0 + i18n: + zh-CN: Graphd 服务发给 Storaged 服务的 RPC 请求失败的数量(sum) + en-US: num_rpc_sent_to_storaged_failed.sum + - field: num_auth_failed_sessions_rate + type: 0 + i18n: + zh-CN: 登录验证失败的会话数量(rate) + en-US: num_auth_failed_sessions.rate + - field: num_auth_failed_sessions_sum + type: 0 + i18n: + zh-CN: 登录验证失败的会话数量(sum) + en-US: num_auth_failed_sessions.sum + - field: num_query_errors_rate + type: 0 + i18n: + zh-CN: 查询报错语句数量(rate) + en-US: num_query_errors_rate + - field: num_query_errors_sum + type: 0 + i18n: + zh-CN: 查询报错语句数量(sum) + en-US: num_query_errors.sum + - field: num_killed_queries_rate + type: 0 + i18n: + zh-CN: 被终止的查询数量(rate) + en-US: num_killed_queries.rate + - field: num_killed_queries_sum + type: 0 + i18n: + zh-CN: 被终止的查询数量(sum) + en-US: num_killed_queries.sum + - field: num_query_errors_leader_changes_rate + type: 0 + i18n: + zh-CN: 因查询错误而导致的 Leader 变更的次数(rate) + en-US: num_query_errors_leader_changes.rate + - field: num_query_errors_leader_changes_sum + type: 0 + i18n: + zh-CN: 因查询错误而导致的 Leader 变更的次数(sum) + en-US: num_query_errors_leader_changes.sum + - field: num_rpc_sent_to_metad_rate + type: 0 + i18n: + zh-CN: Graphd 服务发给 Metad 服务的 RPC 请求数量(rate) + en-US: num_rpc_sent_to_metad.rate + - field: num_rpc_sent_to_metad_sum + type: 0 + i18n: + zh-CN: Graphd 服务发给 Metad 服务的 RPC 请求数量(sum) + en-US: num_rpc_sent_to_metad.sum + - field: num_slow_queries_rate + type: 0 + i18n: + zh-CN: 慢查询次数(rate) + en-US: num_slow_queries.rate + - field: num_slow_queries_sum + type: 0 + i18n: + zh-CN: 慢查询次数(sum) + en-US: num_slow_queries.sum + - field: num_active_sessions_sum + type: 0 + i18n: + zh-CN: 活跃的会话数的变化数(sum) + en-US: num_active_sessions.sum + - field: num_active_queries_sum + type: 0 + i18n: + zh-CN: 活跃的查询语句数的变化数(sum) + en-US: num_active_queries.sum + - field: num_sentences_rate + type: 0 + i18n: + zh-CN: Graphd 服务接收的语句数(rate) + en-US: num_sentences.rate + - field: num_sentences_sum + type: 0 + i18n: + zh-CN: Graphd 服务接收的语句数(sum) + en-US: num_sentences.sum + - field: num_aggregate_executors_rate + type: 0 + i18n: + zh-CN: 聚合(Aggregate)算子执行时间(rate) + en-US: num_aggregate_executors.rate + - field: num_aggregate_executors_sum + type: 0 + i18n: + zh-CN: 聚合(Aggregate)算子执行时间(sum) + en-US: num_aggregate_executors.sum + - field: optimizer_latency_us_avg + type: 0 + i18n: + zh-CN: 优化器阶段延迟时间(avg) + en-US: optimizer_latency_us.avg + - field: optimizer_latency_us_p75 + type: 0 + i18n: + zh-CN: 优化器阶段延迟时间(p75) + en-US: optimizer_latency_us.p75 + - field: optimizer_latency_us_p95 + type: 0 + i18n: + zh-CN: 优化器阶段延迟时间(p95) + en-US: optimizer_latency_us.p95 + - field: optimizer_latency_us_p99 + type: 0 + i18n: + zh-CN: 优化器阶段延迟时间(p99) + en-US: optimizer_latency_us.p99 + - field: optimizer_latency_us_p999 + type: 0 + i18n: + zh-CN: 优化器阶段延迟时间(p999) + en-US: optimizer_latency_us.p999 + - field: num_rpc_sent_to_metad_failed_rate + type: 0 + i18n: + zh-CN: Graphd 服务发给 Metad 的 RPC 请求失败的数量(rate) + en-US: num_rpc_sent_to_metad_failed.rate + - field: num_rpc_sent_to_metad_failed_sum + type: 0 + i18n: + zh-CN: Graphd 服务发给 Metad 的 RPC 请求失败的数量(sum) + en-US: num_rpc_sent_to_metad_failed.sum + - field: num_indexscan_executors_rate + type: 0 + i18n: + zh-CN: 索引扫描(IndexScan)算子执行时间(rate) + en-US: num_indexscan_executors.rate + - field: num_indexscan_executors_sum + type: 0 + i18n: + zh-CN: 索引扫描(IndexScan)算子执行时间(sum) + en-US: num_indexscan_executors.sum + - field: num_opened_sessions_rate + type: 0 + i18n: + zh-CN: 服务端建立过的会话数量(rate) + en-US: num_opened_sessions.rate + - field: num_opened_sessions_sum + type: 0 + i18n: + zh-CN: 服务端建立过的会话数量(sum) + en-US: num_opened_sessions.sum + - field: num_auth_failed_sessions_out_of_max_allowed_rate + type: 0 + i18n: + zh-CN: 因为超过FLAG_OUT_OF_MAX_ALLOWED_CONNECTIONS参数导致的验证登录的失败的会话数量(rate) + en-US: num_auth_failed_sessions_out_of_max_allowed.rate + - field: num_auth_failed_sessions_out_of_max_allowed_sum + type: 0 + i18n: + zh-CN: 因为超过FLAG_OUT_OF_MAX_ALLOWED_CONNECTIONS参数导致的验证登录的失败的会话数量(sum) + en-US: num_auth_failed_sessions_out_of_max_allowed.sum + - field: num_rpc_sent_to_storaged_rate + type: 0 + i18n: + zh-CN: Graphd 服务发给 Storaged 服务的 RPC 请求数量(rate) + en-US: num_rpc_sent_to_storaged.rate + - field: num_rpc_sent_to_storaged_sum + type: 0 + i18n: + zh-CN: Graphd 服务发给 Storaged 服务的 RPC 请求数量(sum) + en-US: num_rpc_sent_to_storaged.sum + # 指标别名列表,用于在查询结果中识别指标 + aliasFields: + - responseTime + - num_queries_hit_memory_watermark.rate + - num_queries_hit_memory_watermark.sum + - num_reclaimed_expired_sessions.rate + - num_reclaimed_expired_sessions.sum + - slow_query_latency_us.avg + - slow_query_latency_us.p75 + - slow_query_latency_us.p95 + - slow_query_latency_us.p99 + - slow_query_latency_us.p999 + - query_latency_us.avg + - query_latency_us.p75 + - query_latency_us.p95 + - query_latency_us.p99 + - query_latency_us.p999 + - num_auth_failed_sessions_bad_username_password.rate + - num_auth_failed_sessions_bad_username_password.sum + - num_queries.rate + - num_queries.sum + - num_sort_executors.rate + - num_sort_executors.sum + - num_rpc_sent_to_storaged_failed.rate + - num_rpc_sent_to_storaged_failed.sum + - num_auth_failed_sessions.rate + - num_auth_failed_sessions.sum + - num_query_errors.rate + - num_query_errors.sum + - num_killed_queries.rate + - num_killed_queries.sum + - num_query_errors_leader_changes.rate + - num_query_errors_leader_changes.sum + - num_rpc_sent_to_metad.rate + - num_rpc_sent_to_metad.sum + - num_slow_queries.rate + - num_slow_queries.sum + - num_active_sessions.sum + - num_active_queries.sum + - num_sentences.rate + - num_sentences.sum + - num_aggregate_executors.rate + - num_aggregate_executors.sum + - optimizer_latency_us.avg + - optimizer_latency_us.p75 + - optimizer_latency_us.p95 + - optimizer_latency_us.p99 + - optimizer_latency_us.p999 + - num_rpc_sent_to_metad_failed.rate + - num_rpc_sent_to_metad_failed.sum + - num_indexscan_executors.rate + - num_indexscan_executors.sum + - num_opened_sessions.rate + - num_opened_sessions.sum + - num_auth_failed_sessions_out_of_max_allowed.rate + - num_auth_failed_sessions_out_of_max_allowed.sum + - num_rpc_sent_to_storaged.rate + - num_rpc_sent_to_storaged.sum + # mapping and conversion expressions, use thesand aliasField above to calculate metrics value# (可选)指标映射转换计算表达式,与上面的别名一起作用,计算出最终需要的指标值# eg: cores=core1+core2, usage=usage, waitTimeallTime-runningTime + calculates: + - responseTime=responseTime + - num_queries_hit_memory_watermark_rate=num_queries_hit_memory_watermark.rate + - num_queries_hit_memory_watermark_sum=num_queries_hit_memory_watermark.sum + - num_reclaimed_expired_sessions_rate=num_reclaimed_expired_sessions.rate + - num_reclaimed_expired_sessions_sum=num_reclaimed_expired_sessions.sum + - slow_query_latency_us_avg=slow_query_latency_us.avg + - slow_query_latency_us_p75=slow_query_latency_us.p75 + - slow_query_latency_us_p95=slow_query_latency_us.p95 + - slow_query_latency_us_p99=slow_query_latency_us.p99 + - slow_query_latency_us_p999=slow_query_latency_us.p999 + - query_latency_us_avg=query_latency_us.avg + - query_latency_us_p75=query_latency_us.p75 + - query_latency_us_p95=query_latency_us.p95 + - query_latency_us_p99=query_latency_us.p99 + - query_latency_us_p999=query_latency_us.p999 + - num_auth_failed_sessions_bad_username_password_rate=num_auth_failed_sessions_bad_username_password.rate + - num_auth_failed_sessions_bad_username_password_sum=num_auth_failed_sessions_bad_username_password.sum + - num_queries_rate=num_queries.rate + - num_queries_sum=num_queries.sum + - num_sort_executors_rate=num_sort_executors.rate + - num_sort_executors_sum=num_sort_executors.sum + - num_rpc_sent_to_storaged_failed_rate=num_rpc_sent_to_storaged_failed.rate + - num_rpc_sent_to_storaged_failed_sum=num_rpc_sent_to_storaged_failed.sum + - num_auth_failed_sessions_rate=num_auth_failed_sessions.rate + - num_auth_failed_sessions_sum=num_auth_failed_sessions.sum + - num_query_errors_rate=num_query_errors.rate + - num_query_errors_sum=num_query_errors.sum + - num_killed_queries_rate=num_killed_queries.rate + - num_killed_queries_sum=num_killed_queries.sum + - num_query_errors_leader_changes_rate=num_query_errors_leader_changes.rate + - num_query_errors_leader_changes_sum=num_query_errors_leader_changes.sum + - num_rpc_sent_to_metad_rate=num_rpc_sent_to_metad.rate + - num_rpc_sent_to_metad_sum=num_rpc_sent_to_metad.sum + - num_slow_queries_rate=num_slow_queries.rate + - num_slow_queries_sum=num_slow_queries.sum + - num_active_sessions_sum=num_active_sessions.sum + - num_active_queries_sum=num_active_queries.sum + - num_sentences_rate=num_sentences.rate + - num_sentences_sum=num_sentences.sum + - num_aggregate_executors_rate=num_aggregate_executors.rate + - num_aggregate_executors_sum=num_aggregate_executors.sum + - optimizer_latency_us_avg=optimizer_latency_us.avg + - optimizer_latency_us_p75=optimizer_latency_us.p75 + - optimizer_latency_us_p95=optimizer_latency_us.p95 + - optimizer_latency_us_p99=optimizer_latency_us.p99 + - optimizer_latency_us_p999=optimizer_latency_us.p999 + - num_rpc_sent_to_metad_failed_rate=num_rpc_sent_to_metad_failed.rate + - num_rpc_sent_to_metad_failed_sum=num_rpc_sent_to_metad_failed.sum + - num_indexscan_executors_rate=num_indexscan_executors.rate + - num_indexscan_executors_sum=num_indexscan_executors.sum + - num_opened_sessions_rate=num_opened_sessions.rate + - num_opened_sessions_sum=num_opened_sessions.sum + - num_auth_failed_sessions_out_of_max_allowed_rate=num_auth_failed_sessions_out_of_max_allowed.rate + - num_auth_failed_sessions_out_of_max_allowed_sum=num_auth_failed_sessions_out_of_max_allowed.sum + - num_rpc_sent_to_storaged_rate=num_rpc_sent_to_storaged.rate + - num_rpc_sent_to_storaged_sum=num_rpc_sent_to_storaged.sum + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk, nginx + protocol: nebulaGraph + nebulaGraph: + # http host: ipv4 ipv6 domain + host: ^_^host^_^ + # http port + port: ^_^graphPort^_^ + # 时间间隔 + timePeriod: ^_^timePeriod^_^ + # http url + # url请求接口路径 + url: /stats + # timeout + # 超时时间 + timeout: ^_^timeout^_^ + - name: rocksdb_stats + i18n: + zh-CN: rocksdb 统计数据 + en-US: rocksdb stats + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + # 指标采集调度优先级(0->127)->(优先级高->低) 优先级低的指标会等优先级高的指标采集完成后才会被调度, 相同优先级的指标会并行调度采集 + # 优先级为0的指标为可用性指标,即它会被首先调度,采集成功才会继续调度其它指标,采集失败则中断调度 + priority: 1 + fields: + - field: responseTime + type: 0 + unit: ms + i18n: + zh-CN: 响应时间 + en-US: Response Time + - field: rocksdb.backup.read.bytes + - field: rocksdb.backup.write.bytes + - field: rocksdb.blobdb.blob.file.bytes.read + - field: rocksdb.blobdb.blob.file.bytes.written + - field: rocksdb.blobdb.blob.file.synced + - field: rocksdb.blobdb.blob.index.evicted.count + - field: rocksdb.blobdb.blob.index.evicted.size + - field: rocksdb.blobdb.blob.index.expired.count + - field: rocksdb.blobdb.blob.index.expired.size + - field: rocksdb.blobdb.bytes.read + - field: rocksdb.blobdb.bytes.written + - field: rocksdb.blobdb.cache.add + - field: rocksdb.blobdb.cache.add.failures + - field: rocksdb.blobdb.cache.bytes.read + - field: rocksdb.blobdb.cache.bytes.write + - field: rocksdb.blobdb.cache.hit + - field: rocksdb.blobdb.cache.miss + - field: rocksdb.blobdb.fifo.bytes.evicted + - field: rocksdb.blobdb.fifo.num.files.evicted + - field: rocksdb.blobdb.fifo.num.keys.evicted + - field: rocksdb.blobdb.gc.bytes.expired + - field: rocksdb.blobdb.gc.bytes.overwritten + - field: rocksdb.blobdb.gc.bytes.relocated + - field: rocksdb.blobdb.gc.failures + - field: rocksdb.blobdb.gc.num.files + - field: rocksdb.blobdb.gc.num.keys.expired + - field: rocksdb.blobdb.gc.num.keys.overwritten + - field: rocksdb.blobdb.gc.num.keys.relocated + - field: rocksdb.blobdb.gc.num.new.files + - field: rocksdb.blobdb.num.get + - field: rocksdb.blobdb.num.keys.read + - field: rocksdb.blobdb.num.keys.written + - field: rocksdb.blobdb.num.multiget + - field: rocksdb.blobdb.num.next + - field: rocksdb.blobdb.num.prev + - field: rocksdb.blobdb.num.put + - field: rocksdb.blobdb.num.seek + - field: rocksdb.blobdb.num.write + - field: rocksdb.blobdb.write.blob + - field: rocksdb.blobdb.write.blob.ttl + - field: rocksdb.blobdb.write.inlined + - field: rocksdb.blobdb.write.inlined.ttl + - field: rocksdb.block.cache.add + - field: rocksdb.block.cache.add.failures + - field: rocksdb.block.cache.add.redundant + - field: rocksdb.block.cache.bytes.read + - field: rocksdb.block.cache.bytes.write + - field: rocksdb.block.cache.compression.dict.add + - field: rocksdb.block.cache.compression.dict.add.redundant + - field: rocksdb.block.cache.compression.dict.bytes.evict + - field: rocksdb.block.cache.compression.dict.bytes.insert + - field: rocksdb.block.cache.compression.dict.hit + - field: rocksdb.block.cache.compression.dict.miss + - field: rocksdb.block.cache.data.add + - field: rocksdb.block.cache.data.add.redundant + - field: rocksdb.block.cache.data.bytes.insert + - field: rocksdb.block.cache.data.hit + - field: rocksdb.block.cache.data.miss + - field: rocksdb.block.cache.filter.add + - field: rocksdb.block.cache.filter.add.redundant + - field: rocksdb.block.cache.filter.bytes.evict + - field: rocksdb.block.cache.filter.bytes.insert + - field: rocksdb.block.cache.filter.hit + - field: rocksdb.block.cache.filter.miss + - field: rocksdb.block.cache.hit + - field: rocksdb.block.cache.index.add + - field: rocksdb.block.cache.index.add.redundant + - field: rocksdb.block.cache.index.bytes.evict + - field: rocksdb.block.cache.index.bytes.insert + - field: rocksdb.block.cache.index.hit + - field: rocksdb.block.cache.index.miss + - field: rocksdb.block.cache.miss + - field: rocksdb.block.cachecompressed.add + - field: rocksdb.block.cachecompressed.add.failures + - field: rocksdb.block.cachecompressed.hit + - field: rocksdb.block.cachecompressed.miss + - field: rocksdb.block.checksum.compute.count + - field: rocksdb.bloom.filter.full.positive + - field: rocksdb.bloom.filter.full.true.positive + - field: rocksdb.bloom.filter.micros + - field: rocksdb.bloom.filter.prefix.checked + - field: rocksdb.bloom.filter.prefix.useful + - field: rocksdb.bloom.filter.useful + - field: rocksdb.bytes.read + - field: rocksdb.bytes.written + - field: rocksdb.cold.file.read.bytes + - field: rocksdb.cold.file.read.count + - field: rocksdb.compact.read.bytes + - field: rocksdb.compact.read.marked.bytes + - field: rocksdb.compact.read.periodic.bytes + - field: rocksdb.compact.read.ttl.bytes + - field: rocksdb.compact.write.bytes + - field: rocksdb.compact.write.marked.bytes + - field: rocksdb.compact.write.periodic.bytes + - field: rocksdb.compact.write.ttl.bytes + - field: rocksdb.compaction.cancelled + - field: rocksdb.compaction.key.drop.new + - field: rocksdb.compaction.key.drop.obsolete + - field: rocksdb.compaction.key.drop.range_del + - field: rocksdb.compaction.key.drop.user + - field: rocksdb.compaction.optimized.del.drop.obsolete + - field: rocksdb.compaction.range_del.drop.obsolete + - field: rocksdb.db.iter.bytes.read + - field: rocksdb.db.mutex.wait.micros + - field: rocksdb.error.handler.autoresume.count + - field: rocksdb.error.handler.autoresume.retry.total.count + - field: rocksdb.error.handler.autoresume.success.count + - field: rocksdb.error.handler.bg.errro.count + - field: rocksdb.error.handler.bg.io.errro.count + - field: rocksdb.error.handler.bg.retryable.io.errro.count + - field: rocksdb.files.deleted.immediately + - field: rocksdb.files.marked.trash + - field: rocksdb.filter.operation.time.nanos + - field: rocksdb.flush.write.bytes + - field: rocksdb.getupdatessince.calls + - field: rocksdb.hot.file.read.bytes + - field: rocksdb.hot.file.read.count + - field: rocksdb.l0.hit + - field: rocksdb.l0.num.files.stall.micros + - field: rocksdb.l0.slowdown.micros + - field: rocksdb.l1.hit + - field: rocksdb.l2andup.hit + - field: rocksdb.last.level.read.bytes + - field: rocksdb.last.level.read.count + - field: rocksdb.memtable.compaction.micros + - field: rocksdb.memtable.garbage.bytes.at.flush + - field: rocksdb.memtable.hit + - field: rocksdb.memtable.miss + - field: rocksdb.memtable.payload.bytes.at.flush + - field: rocksdb.merge.operation.time.nanos + - field: rocksdb.multiget.coroutine.count + - field: rocksdb.no.file.closes + - field: rocksdb.no.file.errors + - field: rocksdb.no.file.opens + - field: rocksdb.non.last.level.read.bytes + - field: rocksdb.non.last.level.read.count + - field: rocksdb.num.iterator.created + - field: rocksdb.num.iterator.deleted + - field: rocksdb.num.iterators + - field: rocksdb.number.block.compressed + - field: rocksdb.number.block.decompressed + - field: rocksdb.number.block.not_compressed + - field: rocksdb.number.db.next + - field: rocksdb.number.db.next.found + - field: rocksdb.number.db.prev + - field: rocksdb.number.db.prev.found + - field: rocksdb.number.db.seek + - field: rocksdb.number.db.seek.found + - field: rocksdb.number.deletes.filtered + - field: rocksdb.number.direct.load.table.properties + - field: rocksdb.number.iter.skip + - field: rocksdb.number.keys.read + - field: rocksdb.number.keys.updated + - field: rocksdb.number.keys.written + - field: rocksdb.number.merge.failures + - field: rocksdb.number.multiget.bytes.read + - field: rocksdb.number.multiget.get + - field: rocksdb.number.multiget.keys.found + - field: rocksdb.number.multiget.keys.read + - field: rocksdb.number.rate_limiter.drains + - field: rocksdb.number.reseeks.iteration + - field: rocksdb.number.superversion_acquires + - field: rocksdb.number.superversion_cleanups + - field: rocksdb.number.superversion_releases + - field: rocksdb.persistent.cache.hit + - field: rocksdb.persistent.cache.miss + - field: rocksdb.rate.limit.delay.millis + - field: rocksdb.read.amp.estimate.useful.bytes + - field: rocksdb.read.amp.total.read.bytes + - field: rocksdb.remote.compact.read.bytes + - field: rocksdb.remote.compact.write.bytes + - field: rocksdb.row.cache.hit + - field: rocksdb.row.cache.miss + - field: rocksdb.secondary.cache.hits + - field: rocksdb.sim.block.cache.hit + - field: rocksdb.sim.block.cache.miss + - field: rocksdb.stall.micros + - field: rocksdb.txn.get.tryagain + - field: rocksdb.txn.overhead.duplicate.key + - field: rocksdb.txn.overhead.mutex.old.commit.map + - field: rocksdb.txn.overhead.mutex.prepare + - field: rocksdb.txn.overhead.mutex.snapshot + - field: rocksdb.verify_checksum.read.bytes + - field: rocksdb.wal.bytes + - field: rocksdb.wal.synced + - field: rocksdb.warm.file.read.bytes + - field: rocksdb.warm.file.read.count + - field: rocksdb.write.other + - field: rocksdb.write.self + - field: rocksdb.write.timeout + - field: rocksdb.write.wal + # the protocol used for monitoring, eg: sql, ssh, http, telnet, wmi, snmp, sdk, nginx + protocol: nebulaGraph + nebulaGraph: + # http host: ipv4 ipv6 domain + host: ^_^host^_^ + # http port + port: ^_^storagePort^_^ + # http url + # url请求接口路径 + url: /rocksdb_stats + # timeout + # 超时时间 + timeout: ^_^timeout^_^ \ No newline at end of file