Skip to content

Commit

Permalink
refactor(zfs): Allow testing collector output
Browse files Browse the repository at this point in the history
TODO: Test caching
  • Loading branch information
pdf committed Sep 4, 2021
1 parent 1ca8717 commit b84092b
Show file tree
Hide file tree
Showing 16 changed files with 1,102 additions and 67 deletions.
3 changes: 3 additions & 0 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ jobs:

# Steps represent a sequence of tasks that will be executed as part of the job
steps:
- name: Go Report Card
uses: creekorful/[email protected]

# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- name: Checkout
id: checkout
Expand Down
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# ZFS Exporter

[![Test](https://github.com/pdf/zfs_exporter/actions/workflows/test.yml/badge.svg)](https://github.com/pdf/zfs_exporter/actions/workflows/test.yml)
[![Release](https://github.com/pdf/zfs_exporter/actions/workflows/release.yml/badge.svg)](https://github.com/pdf/zfs_exporter/actions/workflows/release.yml)
[![Go Report Card](https://goreportcard.com/badge/github.com/pdf/zfs_exporter)](https://goreportcard.com/report/github.com/pdf/zfs_exporter)
[![License](https://img.shields.io/badge/License-MIT-%23a31f34)](https://github.com/pdf/zfs_exporter/blob/master/LICENSE)

Prometheus exporter for ZFS (pools, filesystems, snapshots and volumes). Other implementations exist, however performance can be quite variable, producing occasional timeouts (and associated alerts). This exporter was built with a few features aimed at allowing users to avoid collecting more than they need to, and to ensure timeouts cannot occur, but that we eventually return useful data:
Expand Down
4 changes: 3 additions & 1 deletion collector/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"strings"

"github.com/go-kit/log"
"github.com/pdf/zfs_exporter/zfs"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
)
Expand Down Expand Up @@ -45,7 +46,7 @@ var (
errUnsupportedProperty = errors.New(`unsupported property`)
)

type factoryFunc func(l log.Logger, properties []string) (Collector, error)
type factoryFunc func(l log.Logger, c zfs.Client, properties []string) (Collector, error)

type transformFunc func(string) (float64, error)

Expand All @@ -60,6 +61,7 @@ type State struct {
// Collector defines the minimum functionality for registering a collector
type Collector interface {
update(ch chan<- metric, pools []string, excludes regexpCollection) error
describe(ch chan<- *prometheus.Desc)
}

type metric struct {
Expand Down
52 changes: 52 additions & 0 deletions collector/collector_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
package collector

import (
"bytes"
"context"
"time"

"github.com/go-kit/kit/log"
"github.com/pdf/zfs_exporter/zfs"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil"
)

var (
logger = log.NewNopLogger()
//logger = log.NewLogfmtLogger(os.Stderr)
)

func callCollector(ctx context.Context, collector prometheus.Collector, metricResults []byte, metricNames []string) error {
result := make(chan error)
go func() {
result <- testutil.CollectAndCompare(collector, bytes.NewBuffer(metricResults), metricNames...)
}()

select {
case err := <-result:
return err
case <-ctx.Done():
return ctx.Err()
}
}

func defaultConfig(z zfs.Client) (ZFSConfig, error) {
duration, err := time.ParseDuration(`5m`)
if err != nil {
return ZFSConfig{}, err
}
return ZFSConfig{
DisableMetrics: true,
Deadline: duration,
Logger: logger,
ZFSClient: z,
}, nil
}

func stringPointer(s string) *string {
return &s
}

func boolPointer(b bool) *bool {
return &b
}
48 changes: 31 additions & 17 deletions collector/dataset.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pdf/zfs_exporter/zfs"
"github.com/prometheus/client_golang/prometheus"
)

const (
Expand Down Expand Up @@ -165,9 +166,21 @@ func init() {
}

type datasetCollector struct {
kind zfs.DatasetKind
log log.Logger
props []string
kind zfs.DatasetKind
log log.Logger
client zfs.Client
props []string
}

func (c *datasetCollector) describe(ch chan<- *prometheus.Desc) {
for _, k := range c.props {
prop, err := datasetProperties.find(k)
if err != nil {
_ = level.Warn(c.log).Log(`msg`, propertyUnsupportedMsg, `help`, helpIssue, `collector`, c.kind, `property`, k, `err`, err)
continue
}
ch <- prop.desc
}
}

func (c *datasetCollector) update(ch chan<- metric, pools []string, excludes regexpCollection) error {
Expand All @@ -193,13 +206,14 @@ func (c *datasetCollector) update(ch chan<- metric, pools []string, excludes reg
}

func (c *datasetCollector) updatePoolMetrics(ch chan<- metric, pool string, excludes regexpCollection) error {
datasets, err := zfs.DatasetProperties(pool, c.kind, c.props...)
datasets := c.client.Datasets(pool, c.kind)
props, err := datasets.Properties(c.props...)
if err != nil {
return err
}

for _, dataset := range datasets {
if excludes.MatchString(dataset.Name) {
for _, dataset := range props {
if excludes.MatchString(dataset.DatasetName()) {
continue
}
if err = c.updateDatasetMetrics(ch, pool, dataset); err != nil {
Expand All @@ -210,10 +224,10 @@ func (c *datasetCollector) updatePoolMetrics(ch chan<- metric, pool string, excl
return nil
}

func (c *datasetCollector) updateDatasetMetrics(ch chan<- metric, pool string, dataset zfs.Dataset) error {
labelValues := []string{dataset.Name, pool, string(c.kind)}
func (c *datasetCollector) updateDatasetMetrics(ch chan<- metric, pool string, dataset zfs.DatasetProperties) error {
labelValues := []string{dataset.DatasetName(), pool, string(c.kind)}

for k, v := range dataset.Properties {
for k, v := range dataset.Properties() {
prop, err := datasetProperties.find(k)
if err != nil {
_ = level.Warn(c.log).Log(`msg`, propertyUnsupportedMsg, `help`, helpIssue, `collector`, c.kind, `property`, k, `err`, err)
Expand All @@ -226,24 +240,24 @@ func (c *datasetCollector) updateDatasetMetrics(ch chan<- metric, pool string, d
return nil
}

func newDatasetCollector(kind zfs.DatasetKind, l log.Logger, props []string) (Collector, error) {
func newDatasetCollector(kind zfs.DatasetKind, l log.Logger, c zfs.Client, props []string) (Collector, error) {
switch kind {
case zfs.DatasetFilesystem, zfs.DatasetSnapshot, zfs.DatasetVolume:
default:
return nil, fmt.Errorf("unknown dataset type: %s", kind)
}

return &datasetCollector{kind: kind, log: l, props: props}, nil
return &datasetCollector{kind: kind, log: l, client: c, props: props}, nil
}

func newFilesystemCollector(l log.Logger, props []string) (Collector, error) {
return newDatasetCollector(zfs.DatasetFilesystem, l, props)
func newFilesystemCollector(l log.Logger, c zfs.Client, props []string) (Collector, error) {
return newDatasetCollector(zfs.DatasetFilesystem, l, c, props)
}

func newSnapshotCollector(l log.Logger, props []string) (Collector, error) {
return newDatasetCollector(zfs.DatasetSnapshot, l, props)
func newSnapshotCollector(l log.Logger, c zfs.Client, props []string) (Collector, error) {
return newDatasetCollector(zfs.DatasetSnapshot, l, c, props)
}

func newVolumeCollector(l log.Logger, props []string) (Collector, error) {
return newDatasetCollector(zfs.DatasetVolume, l, props)
func newVolumeCollector(l log.Logger, c zfs.Client, props []string) (Collector, error) {
return newDatasetCollector(zfs.DatasetVolume, l, c, props)
}
Loading

0 comments on commit b84092b

Please sign in to comment.