Skip to content

Commit

Permalink
Scrape at an interval
Browse files Browse the repository at this point in the history
  • Loading branch information
gesellix committed Oct 21, 2023
1 parent d4c761e commit 0dc5f46
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 22 deletions.
12 changes: 10 additions & 2 deletions couchdb-exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ type exporterConfigType struct {
couchdbUsername string
couchdbPassword string
couchdbInsecure bool
scrapeInterval time.Duration
databases string
databaseViews bool
databaseConcurrentRequests uint
Expand Down Expand Up @@ -131,6 +132,14 @@ func init() {
Value: true,
Destination: &exporterConfig.couchdbInsecure,
}),
altsrc.NewDurationFlag(&cli.DurationFlag{
Name: "scrape.interval",
Usage: fmt.Sprintf("Duration between metrics collection from the CouchDB cluster. '0s' collects only on Prometheus scrapes"),
EnvVars: []string{"SCRAPE_INTERVAL"},
Hidden: false,
Value: 0 * time.Second,
Destination: &exporterConfig.scrapeInterval,
}),
// TODO use cli.StringSliceFlag?
altsrc.NewStringFlag(&cli.StringFlag{
Name: "databases",
Expand Down Expand Up @@ -214,14 +223,13 @@ func main() {
databases = strings.Split(exporterConfig.databases, ",")
}

scrapeInterval, _ := time.ParseDuration("0s")
exporter := lib.NewExporter(
exporterConfig.couchdbURI,
lib.BasicAuth{
Username: exporterConfig.couchdbUsername,
Password: exporterConfig.couchdbPassword},
lib.CollectorConfig{
ScrapeInterval: scrapeInterval,
ScrapeInterval: exporterConfig.scrapeInterval,
Databases: databases,
CollectViews: exporterConfig.databaseViews,
CollectSchedulerJobs: exporterConfig.schedulerJobs,
Expand Down
19 changes: 14 additions & 5 deletions couchdb-exporter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,11 +121,10 @@ func couchdbResponse(t *testing.T, versionSuffix string) Handler {
}
}

func performCouchdbStatsTest(t *testing.T, couchdbVersion string, expectedMetricsCount int, expectedGetRequestCount float64, expectedDiskSize float64, expectedRequestCount float64) {
func performCouchdbStatsTest(t *testing.T, scrapeInterval time.Duration, couchdbVersion string, expectedMetricsCount int, expectedGetRequestCount float64, expectedDiskSize float64, expectedRequestCount float64) {
basicAuth := lib.BasicAuth{Username: "username", Password: "password"}
handler := http.HandlerFunc(BasicAuthHandler(basicAuth, couchdbResponse(t, couchdbVersion)))
server := httptest.NewServer(handler)
scrapeInterval, _ := time.ParseDuration("0s")

e := lib.NewExporter(server.URL, basicAuth, lib.CollectorConfig{
ScrapeInterval: scrapeInterval,
Expand All @@ -134,6 +133,11 @@ func performCouchdbStatsTest(t *testing.T, couchdbVersion string, expectedMetric
CollectSchedulerJobs: true,
}, true)

// scrapes might run asynchronously (scrapeInterval > 0), so let's wait at least one iteration
if scrapeInterval > 0 {
time.Sleep(scrapeInterval + time.Second*2)
}

ch := make(chan prometheus.Metric)
go func() {
defer close(ch)
Expand Down Expand Up @@ -176,15 +180,20 @@ func performCouchdbStatsTest(t *testing.T, couchdbVersion string, expectedMetric
}

func TestCouchdbStatsV1(t *testing.T) {
performCouchdbStatsTest(t, "v1", 59, 4711, 12396, 11)
performCouchdbStatsTest(t, 0, "v1", 59, 4711, 12396, 11)
}

func TestCouchdbStatsV2(t *testing.T) {
performCouchdbStatsTest(t, "v2", 307, 4712, 58570, 17)
performCouchdbStatsTest(t, 0, "v2", 307, 4712, 58570, 17)
}

func TestCouchdbStatsV2Async(t *testing.T) {
scrapeInterval, _ := time.ParseDuration("1s")
performCouchdbStatsTest(t, scrapeInterval, "v2", 307, 4712, 58570, 17)
}

func TestCouchdbStatsV2Prerelease(t *testing.T) {
performCouchdbStatsTest(t, "v2-pre", 295, 4712, 58570, 17)
performCouchdbStatsTest(t, 0, "v2-pre", 295, 4712, 58570, 17)
}

func TestCouchdbStatsV1Integration(t *testing.T) {
Expand Down
25 changes: 11 additions & 14 deletions lib/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,13 @@ func (e *Exporter) getObservedDatabaseNames(candidates []string) ([]string, erro
}

func (e *Exporter) scrape() error {
if e.collectorConfig.ScrapeInterval != 0 {
// we have to protect collects during scrapes when scraping asynchronously
// otherwise the Collect() might get only partial stats
e.mutex.Lock()
defer e.mutex.Unlock()
}

e.resetAllMetrics()

e.up.Set(0)
Expand Down Expand Up @@ -334,16 +341,12 @@ func (e *Exporter) collect(ch chan<- prometheus.Metric) error {
}
defer sendStatus()

if e.collectorConfig.ScrapeInterval.Seconds() == 0 {
// sync
// old behaviour: scrape the CouchDB when the exporter is being scraped by Prometheus
if e.collectorConfig.ScrapeInterval == 0 {
// scrape now, before collecting stats into metrics
err := e.scrape()
if err != nil {
return err
}
} else {
// async, continuously
// new behaviour: scrape the CouchDB at an interval, deliver most recent metrics when the exporter is being scraped by Prometheus
}

e.databasesTotal.Collect(ch)
Expand Down Expand Up @@ -437,14 +440,8 @@ func (e *Exporter) collect(ch chan<- prometheus.Metric) error {
// Collect fetches the stats from configured couchdb location and delivers them
// as Prometheus metrics. It implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.mutex.Lock() // To protect metrics from concurrent collects.
defer e.mutex.Unlock()
//registry := prometheus.NewRegistry()
//registry.Register(e)
//gather, err := registry.Gather()
//for _, m := range gather{
// registry.
//}.
e.mutex.RLock() // To protect metrics from concurrent collects.
defer e.mutex.RUnlock()
if err := e.collect(ch); err != nil {
klog.Error(fmt.Sprintf("Error collecting stats: %s", err))
}
Expand Down
28 changes: 27 additions & 1 deletion lib/exporter.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
package lib

import (
"k8s.io/klog/v2"
"sync"
"time"

"github.com/prometheus/client_golang/prometheus"
)
Expand Down Expand Up @@ -102,9 +104,31 @@ type Exporter struct {
schedulerJobs *prometheus.GaugeVec
}

func (e *Exporter) maybeStartScraping() {
if e.collectorConfig.ScrapeInterval > 0 {
klog.Infof("Asynchronously scraping the CouchDB stats at an interval of %v", e.collectorConfig.ScrapeInterval)
ticker := time.NewTicker(e.collectorConfig.ScrapeInterval)
quit := make(chan struct{})
go func() {
for {
select {
case <-ticker.C:
err := e.scrape()
if err != nil {
klog.Error(err)
}
case <-quit:
ticker.Stop()
return
}
}
}()
}
}

func NewExporter(uri string, basicAuth BasicAuth, collectorConfig CollectorConfig, insecure bool) *Exporter {

return &Exporter{
e := &Exporter{
client: NewCouchdbClient(uri, basicAuth, insecure),
collectorConfig: collectorConfig,

Expand Down Expand Up @@ -731,4 +755,6 @@ func NewExporter(uri string, basicAuth BasicAuth, collectorConfig CollectorConfi
},
[]string{"node_name"}),
}
e.maybeStartScraping()
return e
}

0 comments on commit 0dc5f46

Please sign in to comment.