Skip to content

Commit

Permalink
Merge pull request #69 from promhippie/prepare-release
Browse files Browse the repository at this point in the history
Prepare release 1.2.0
  • Loading branch information
tboerger authored May 11, 2022
2 parents 9c7b1c5 + 550dff9 commit 502d4e3
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 49 deletions.
13 changes: 11 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,13 +1,22 @@
# Changelog for unreleased
# Changelog for 1.2.0

The following sections list the changes for unreleased.
The following sections list the changes for 1.2.0.

## Summary

* Chg #67: Add collector for server metrics
* Chg #53: Integrate standard web config

## Details

* Change #67: Add collector for server metrics

Hetzner Cloud collects basic metrics on the hypervisor-level for each server. We have added a
new collector which scrapes the latest available metric point for each running server. It is
enabled by default.

https://github.com/promhippie/hcloud_exporter/pull/67

* Change #53: Integrate standard web config

We integrated the new web config from the Prometheus toolkit which provides a configuration
Expand Down
7 changes: 7 additions & 0 deletions changelog/1.2.0_2022-05-11/servermetrics-collector.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
Change: Add collector for server metrics

Hetzner Cloud collects basic metrics on the hypervisor-level for each server. We
have added a new collector which scrapes the latest available metric point for
each running server. It is enabled by default.

https://github.com/promhippie/hcloud_exporter/pull/67
File renamed without changes.
87 changes: 40 additions & 47 deletions pkg/exporter/server-metrics.go → pkg/exporter/server_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ func NewServerMetricsCollector(logger log.Logger, client *hcloud.Client, failure
}

labels := []string{"id", "name", "datacenter"}
diskLabels := []string{"id", "name", "datacenter", "disk"}
networkLabels := []string{"id", "name", "datacenter", "interface"}
diskLabels := append(labels, "disk")
networkLabels := append(labels, "interface")
return &ServerMetricsCollector{
client: client,
logger: log.With(logger, "collector", "server-metrics"),
Expand Down Expand Up @@ -115,7 +115,6 @@ func NewServerMetricsCollector(logger log.Logger, client *hcloud.Client, failure
// Metrics simply returns the list metric descriptors for generating a documentation.
func (c *ServerMetricsCollector) Metrics() []*prometheus.Desc {
return []*prometheus.Desc{

c.CPU,
c.DiskReadIops,
c.DiskWriteIops,
Expand All @@ -130,7 +129,6 @@ func (c *ServerMetricsCollector) Metrics() []*prometheus.Desc {

// Describe sends the super-set of all possible descriptors of metrics collected by this Collector.
func (c *ServerMetricsCollector) Describe(ch chan<- *prometheus.Desc) {

ch <- c.CPU
ch <- c.DiskReadIops
ch <- c.DiskWriteIops
Expand All @@ -148,17 +146,11 @@ func (c *ServerMetricsCollector) Collect(ch chan<- prometheus.Metric) {
defer cancel()

now := time.Now()
opts := hcloud.ServerListOpts{
servers, _, err := c.client.Server.List(ctx, hcloud.ServerListOpts{
Status: []hcloud.ServerStatus{
hcloud.ServerStatusRunning,
},
}
servers, _, err := c.client.Server.List(ctx, opts)

level.Debug(c.logger).Log(
"msg", "Fetched online servers",
"count", len(servers),
)
})

if err != nil {
level.Error(c.logger).Log(
Expand All @@ -170,30 +162,35 @@ func (c *ServerMetricsCollector) Collect(ch chan<- prometheus.Metric) {
return
}

level.Debug(c.logger).Log(
"msg", "Fetched online servers",
"count", len(servers),
)

type empty struct{}
sem := make(chan empty, len(servers))

for _, server := range servers {

labels := []string{
strconv.Itoa(server.ID),
server.Name,
server.Datacenter.Name,
}

go func(c *ServerMetricsCollector, ctx context.Context, server *hcloud.Server) {

metricsOpts := hcloud.ServerGetMetricsOpts{
Types: []hcloud.ServerMetricType{
hcloud.ServerMetricCPU,
hcloud.ServerMetricDisk,
hcloud.ServerMetricNetwork,
metrics, _, err := c.client.Server.GetMetrics(
ctx,
server,
hcloud.ServerGetMetricsOpts{
Types: []hcloud.ServerMetricType{
hcloud.ServerMetricCPU,
hcloud.ServerMetricDisk,
hcloud.ServerMetricNetwork,
},
Start: time.Now(),
End: time.Now(),
},
Start: time.Now(),
End: time.Now(),
}

metrics, _, err := c.client.Server.GetMetrics(ctx, server, metricsOpts)
)

sem <- empty{}

Expand All @@ -207,96 +204,92 @@ func (c *ServerMetricsCollector) Collect(ch chan<- prometheus.Metric) {
return
}

// Hetzner currently only provides a single 0-indexed timeseries for each metric, so it's simply hardcoded.
// If Hetzner ever extends this, determining the amount of returned timeseries would be better.
diskLabels := append(labels, "0")
networkLabels := append(labels, "0")

CPU, _ := strconv.ParseFloat(metrics.TimeSeries["cpu"][0].Value, 64)
DiskReadIops, _ := strconv.ParseFloat(metrics.TimeSeries["disk.0.iops.read"][0].Value, 64)
DiskWriteIops, _ := strconv.ParseFloat(metrics.TimeSeries["disk.0.iops.write"][0].Value, 64)
DiskReadBps, _ := strconv.ParseFloat(metrics.TimeSeries["disk.0.bandwidth.read"][0].Value, 64)
DiskWriteBps, _ := strconv.ParseFloat(metrics.TimeSeries["disk.0.bandwidth.write"][0].Value, 64)
NetworkInPps, _ := strconv.ParseFloat(metrics.TimeSeries["network.0.pps.in"][0].Value, 64)
NetworkOutPps, _ := strconv.ParseFloat(metrics.TimeSeries["network.0.pps.out"][0].Value, 64)
NetworkInBps, _ := strconv.ParseFloat(metrics.TimeSeries["network.0.bandwidth.in"][0].Value, 64)
NetworkOutBps, _ := strconv.ParseFloat(metrics.TimeSeries["network.0.bandwidth.out"][0].Value, 64)
cpuUsage, _ := strconv.ParseFloat(metrics.TimeSeries["cpu"][0].Value, 64)
diskReadIops, _ := strconv.ParseFloat(metrics.TimeSeries["disk.0.iops.read"][0].Value, 64)
diskWriteIops, _ := strconv.ParseFloat(metrics.TimeSeries["disk.0.iops.write"][0].Value, 64)
diskReadBps, _ := strconv.ParseFloat(metrics.TimeSeries["disk.0.bandwidth.read"][0].Value, 64)
diskWriteBps, _ := strconv.ParseFloat(metrics.TimeSeries["disk.0.bandwidth.write"][0].Value, 64)
networkInPps, _ := strconv.ParseFloat(metrics.TimeSeries["network.0.pps.in"][0].Value, 64)
networkOutPps, _ := strconv.ParseFloat(metrics.TimeSeries["network.0.pps.out"][0].Value, 64)
networkInBps, _ := strconv.ParseFloat(metrics.TimeSeries["network.0.bandwidth.in"][0].Value, 64)
networkOutBps, _ := strconv.ParseFloat(metrics.TimeSeries["network.0.bandwidth.out"][0].Value, 64)

ch <- prometheus.MustNewConstMetric(
c.CPU,
prometheus.GaugeValue,
CPU,
cpuUsage,
labels...,
)

ch <- prometheus.MustNewConstMetric(
c.DiskReadIops,
prometheus.GaugeValue,
DiskReadIops,
diskReadIops,
diskLabels...,
)

ch <- prometheus.MustNewConstMetric(
c.DiskWriteIops,
prometheus.GaugeValue,
DiskWriteIops,
diskWriteIops,
diskLabels...,
)

ch <- prometheus.MustNewConstMetric(
c.DiskReadBps,
prometheus.GaugeValue,
DiskReadBps,
diskReadBps,
diskLabels...,
)

ch <- prometheus.MustNewConstMetric(
c.DiskWriteBps,
prometheus.GaugeValue,
DiskWriteBps,
diskWriteBps,
diskLabels...,
)

ch <- prometheus.MustNewConstMetric(
c.NetworkInPps,
prometheus.GaugeValue,
NetworkInPps,
networkInPps,
networkLabels...,
)

ch <- prometheus.MustNewConstMetric(
c.NetworkOutPps,
prometheus.GaugeValue,
NetworkOutPps,
networkOutPps,
networkLabels...,
)

ch <- prometheus.MustNewConstMetric(
c.NetworkInBps,
prometheus.GaugeValue,
NetworkInBps,
networkInBps,
networkLabels...,
)

ch <- prometheus.MustNewConstMetric(
c.NetworkOutBps,
prometheus.GaugeValue,
NetworkOutBps,
networkOutBps,
networkLabels...,
)
}(c, ctx, server)

}

// Wait for all go-routines to signal finished metrics fetch
for i := 0; i < len(servers); i++ {
<-sem
}

c.duration.WithLabelValues("server-metrics").Observe(time.Since(now).Seconds())
level.Debug(c.logger).Log(
"msg", "Fetched server metrics",
"count", len(servers),
)

c.duration.WithLabelValues("server-metrics").Observe(time.Since(now).Seconds())
}

0 comments on commit 502d4e3

Please sign in to comment.