From 5c1fd689e864aadc15f2a2c18856d559a738254d Mon Sep 17 00:00:00 2001 From: Oliver Fesseler Date: Wed, 14 Dec 2016 15:59:16 +0100 Subject: [PATCH 1/9] changed version --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 28af839..a53741c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.2.5 \ No newline at end of file +0.2.6 \ No newline at end of file From c5b0afc25397e42ce5de62970ea2f4c75c0b02be Mon Sep 17 00:00:00 2001 From: Oliver Fesseler Date: Wed, 15 Feb 2017 16:59:33 +0100 Subject: [PATCH 2/9] adds heal info to gluster exporter metric volume_heal_info_file_count exposed with label for volume --- gluster_client.go | 56 ++++++++++++++++--- main.go | 48 +++++++++++++---- structs/xmlStructs.go | 60 ++++++++++++++++++--- structs/xmlStructs_test.go | 49 ++++++++++++++++- test/gluster_volume_heal_info.xml | 25 +++++++++ test/gluster_volume_heal_info_err_node1.xml | 35 ++++++++++++ test/gluster_volume_heal_info_err_node2.xml | 33 ++++++++++++ 7 files changed, 279 insertions(+), 27 deletions(-) create mode 100644 test/gluster_volume_heal_info.xml create mode 100644 test/gluster_volume_heal_info_err_node1.xml create mode 100644 test/gluster_volume_heal_info_err_node2.xml diff --git a/gluster_client.go b/gluster_client.go index 98fa68e..b08c513 100644 --- a/gluster_client.go +++ b/gluster_client.go @@ -3,12 +3,13 @@ package main import ( "bytes" "os/exec" + "strconv" "github.com/ofesseler/gluster_exporter/structs" "github.com/prometheus/common/log" ) -func execGlusterCommand(arg ...string) *bytes.Buffer { +func execGlusterCommand(arg ...string) (*bytes.Buffer, error) { stdoutBuffer := &bytes.Buffer{} argXML := append(arg, "--xml") glusterExec := exec.Command(GlusterCmd, argXML...) @@ -16,16 +17,20 @@ func execGlusterCommand(arg ...string) *bytes.Buffer { err := glusterExec.Run() if err != nil { - log.Fatal(err) + log.Errorf("tried to execute %v and got error: %v", arg, err) + return stdoutBuffer, err } - return stdoutBuffer + return stdoutBuffer, nil } // ExecVolumeInfo executes "gluster volume info" at the local machine and // returns VolumeInfoXML struct and error func ExecVolumeInfo() (structs.VolumeInfoXML, error) { args := []string{"volume", "info"} - bytesBuffer := execGlusterCommand(args...) + bytesBuffer, cmdErr := execGlusterCommand(args...) + if cmdErr != nil { + return structs.VolumeInfoXML{}, cmdErr + } volumeInfo, err := structs.VolumeInfoXMLUnmarshall(bytesBuffer) if err != nil { log.Errorf("Something went wrong while unmarshalling xml: %v", err) @@ -39,7 +44,10 @@ func ExecVolumeInfo() (structs.VolumeInfoXML, error) { // returns VolumeList struct and error func ExecVolumeList() (structs.VolList, error) { args := []string{"volume", "list"} - bytesBuffer := execGlusterCommand(args...) + bytesBuffer, cmdErr := execGlusterCommand(args...) + if cmdErr != nil { + return structs.VolList{}, cmdErr + } volumeList, err := structs.VolumeListXMLUnmarshall(bytesBuffer) if err != nil { log.Errorf("Something went wrong while unmarshalling xml: %v", err) @@ -53,7 +61,10 @@ func ExecVolumeList() (structs.VolList, error) { // returns PeerStatus struct and error func ExecPeerStatus() (structs.PeerStatus, error) { args := []string{"peer", "status"} - bytesBuffer := execGlusterCommand(args...) + bytesBuffer, cmdErr := execGlusterCommand(args...) + if cmdErr != nil { + return structs.PeerStatus{}, cmdErr + } peerStatus, err := structs.PeerStatusXMLUnmarshall(bytesBuffer) if err != nil { log.Errorf("Something went wrong while unmarshalling xml: %v", err) @@ -69,7 +80,10 @@ func ExecVolumeProfileGvInfoCumulative(volumeName string) (structs.VolProfile, e args := []string{"volume", "profile"} args = append(args, volumeName) args = append(args, "info", "cumulative") - bytesBuffer := execGlusterCommand(args...) + bytesBuffer, cmdErr := execGlusterCommand(args...) + if cmdErr != nil { + return structs.VolProfile{}, cmdErr + } volumeProfile, err := structs.VolumeProfileGvInfoCumulativeXMLUnmarshall(bytesBuffer) if err != nil { log.Errorf("Something went wrong while unmarshalling xml: %v", err) @@ -82,7 +96,10 @@ func ExecVolumeProfileGvInfoCumulative(volumeName string) (structs.VolProfile, e // returns VolumeStatusXML struct and error func ExecVolumeStatusAllDetail() (structs.VolumeStatusXML, error) { args := []string{"volume", "status", "all", "detail"} - bytesBuffer := execGlusterCommand(args...) + bytesBuffer, cmdErr := execGlusterCommand(args...) + if cmdErr != nil { + return structs.VolumeStatusXML{}, cmdErr + } volumeStatus, err := structs.VolumeStatusAllDetailXMLUnmarshall(bytesBuffer) if err != nil { log.Errorf("Something went wrong while unmarshalling xml: %v", err) @@ -90,3 +107,26 @@ func ExecVolumeStatusAllDetail() (structs.VolumeStatusXML, error) { } return volumeStatus, nil } + +// ExecVolumeHealInfo executes volume heal info on host system and processes input +// returns (int) number of unsynced files +func ExecVolumeHealInfo(volumeName string) (int, error) { + args := []string{"volume", "heal", volumeName, "info"} + entriesOutOfSync := 0 + bytesBuffer, cmdErr := execGlusterCommand(args...) + if cmdErr != nil { + return -1, cmdErr + } + healInfo, err := structs.VolumeHealInfoXMLUnmarshall(bytesBuffer) + if err != nil { + log.Error(err) + return -1, err + } + + for _, brick := range healInfo.HealInfo.Bricks.Brick { + var count int + count, _ = strconv.Atoi(brick.NumberOfEntries) + entriesOutOfSync += count + } + return entriesOutOfSync, nil +} diff --git a/main.go b/main.go index 9f2641a..526ec37 100644 --- a/main.go +++ b/main.go @@ -32,6 +32,7 @@ const ( // GlusterCmd is the default path to gluster binary GlusterCmd = "/usr/sbin/gluster" namespace = "gluster" + allVolumes = "_all" ) var ( @@ -118,6 +119,11 @@ var ( "Is peer connected to gluster cluster.", nil, nil, ) + + healInfoFilesCount = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "heal_info_files_count"), + "File count of files out of sync, when calling 'gluster v heal VOLNAME info", + []string{"volume"}, nil) ) // Exporter holds name, path and volumes to be monitored @@ -144,6 +150,7 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { ch <- brickFopLatencyAvg ch <- brickFopLatencyMin ch <- brickFopLatencyMax + ch <- healInfoFilesCount } // Collect collects all the metrics @@ -174,7 +181,7 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { ) for _, volume := range volumeInfo.VolInfo.Volumes.Volume { - if e.volumes[0] == "_all" || ContainsVolume(e.volumes, volume.Name) { + if e.volumes[0] == allVolumes || ContainsVolume(e.volumes, volume.Name) { ch <- prometheus.MustNewConstMetric( brickCount, prometheus.GaugeValue, float64(volume.BrickCount), volume.Name, @@ -187,9 +194,9 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { } // reads gluster peer status - peerStatus, err := ExecPeerStatus() - if err != nil { - log.Errorf("couldn't parse xml of peer status: %v", err) + peerStatus, peerStatusErr := ExecPeerStatus() + if peerStatusErr != nil { + log.Errorf("couldn't parse xml of peer status: %v", peerStatusErr) } count := 0 for range peerStatus.Peer { @@ -202,10 +209,10 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { // reads profile info if e.profile { for _, volume := range volumeInfo.VolInfo.Volumes.Volume { - if e.volumes[0] == "_all" || ContainsVolume(e.volumes, volume.Name) { - volumeProfile, err := ExecVolumeProfileGvInfoCumulative(volume.Name) - if err != nil { - log.Errorf("Error while executing or marshalling gluster profile output: %v", err) + if e.volumes[0] == allVolumes || ContainsVolume(e.volumes, volume.Name) { + volumeProfile, execVolProfileErr := ExecVolumeProfileGvInfoCumulative(volume.Name) + if execVolProfileErr != nil { + log.Errorf("Error while executing or marshalling gluster profile output: %v", execVolProfileErr) } for _, brick := range volumeProfile.Brick { if strings.HasPrefix(brick.BrickName, e.hostname) { @@ -262,6 +269,27 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { ) } } + vols := e.volumes + if vols[0] == allVolumes { + log.Warn("no Volumes were given.") + volumeList, volumeListErr := ExecVolumeList() + if volumeListErr != nil { + log.Error(volumeListErr) + } + vols = volumeList.Volume + } + + for _, vol := range vols { + log.Infof("Fetching heal info from volume %v", vol) + filesCount, volumeHealErr := ExecVolumeHealInfo(vol) + if volumeHealErr == nil { + log.Infof("got info: %v", filesCount) + ch <- prometheus.MustNewConstMetric( + healInfoFilesCount, prometheus.CounterValue, float64(filesCount), vol, + ) + log.Infof("healInfoFilesCount is %v for volume %v", filesCount, vol) + } + } } // ContainsVolume checks a slice if it cpntains a element @@ -309,7 +337,7 @@ func main() { metricPath = flag.String("metrics-path", "/metrics", "URL Endpoint for metrics") listenAddress = flag.String("listen-address", ":9189", "The address to listen on for HTTP requests.") showVersion = flag.Bool("version", false, "Prints version information") - glusterVolumes = flag.String("volumes", "_all", "Comma separated volume names: vol1,vol2,vol3. Default is '_all' to scrape all metrics") + glusterVolumes = flag.String("volumes", allVolumes, fmt.Sprintf("Comma separated volume names: vol1,vol2,vol3. Default is '%v' to scrape all metrics", allVolumes)) profile = flag.Bool("profile", false, "When profiling reports in gluster are enabled, set ' -profile true' to get more metrics") ) flag.Parse() @@ -320,7 +348,7 @@ func main() { hostname, err := os.Hostname() if err != nil { - log.Fatal(err) + log.Fatalf("While trying to get Hostname error happened: %v", err) } exporter, err := NewExporter(hostname, *glusterPath, *glusterVolumes, *profile) if err != nil { diff --git a/structs/xmlStructs.go b/structs/xmlStructs.go index 1c6ef4b..87f7867 100644 --- a/structs/xmlStructs.go +++ b/structs/xmlStructs.go @@ -3,8 +3,9 @@ package structs import ( "bytes" "encoding/xml" - "github.com/prometheus/common/log" "io/ioutil" + + "github.com/prometheus/common/log" ) // VolumeInfoXML struct repesents cliOutput element of "gluster volume info" command @@ -120,10 +121,10 @@ type BrickProfile struct { // CumulativeStats element of "gluster volume {volume} profile" command type CumulativeStats struct { - FopStats FopStats `xml:"fopStats"` - Duration int `xml:"duration"` - TotalRead int `xml:"totalRead"` - TotalWrite int `xml:"totalWrite"` + FopStats FopStats `xml:"fopStats"` + Duration int `xml:"duration"` + TotalRead int `xml:"totalRead"` + TotalWrite int `xml:"totalWrite"` } // FopStats element of "gluster volume {volume} profile" command @@ -131,15 +132,56 @@ type FopStats struct { Fop []Fop `xml:"fop"` } - +// Fop is struct for FopStats type Fop struct { - Name string `xml:"name"` - Hits int `xml:"hits"` + Name string `xml:"name"` + Hits int `xml:"hits"` AvgLatency float64 `xml:"avgLatency"` MinLatency float64 `xml:"minLatency"` MaxLatency float64 `xml:"maxLatency"` } +type HealInfoBrick struct { + XMLName xml.Name `xml:"brick"` + Name string `xml:"name"` + Status string `xml:"status"` + NumberOfEntries string `xml:"numberOfEntries"` +} + +type HealInfoBricks struct { + XMLName xml.Name `xml:"bricks"` + Brick []HealInfoBrick `xml:"brick"` +} + +type HealInfo struct { + XMLName xml.Name `xml:"healInfo"` + Bricks HealInfoBricks `xml:"bricks"` +} + +// VolumeHealInfoXML struct repesents cliOutput element of "gluster volume {volume} heal info" command +type VolumeHealInfoXML struct { + XMLName xml.Name `xml:"cliOutput"` + OpRet int `xml:"opRet"` + OpErrno int `xml:"opErrno"` + OpErrstr string `xml:"opErrstr"` + HealInfo HealInfo `xml:"healInfo"` +} + +// VolumeHealInfoXMLUnmarshall unmarshalls heal info of gluster cluster +func VolumeHealInfoXMLUnmarshall(cmdOutBuff *bytes.Buffer) (VolumeHealInfoXML, error) { + var vol VolumeHealInfoXML + b, err := ioutil.ReadAll(cmdOutBuff) + if err != nil { + log.Error(err) + return vol, err + } + err = xml.Unmarshal(b, &vol) + if err != nil { + log.Error(err) + } + return vol, nil +} + // VolumeListXMLUnmarshall unmarshalls bytes to VolumeListXML struct func VolumeListXMLUnmarshall(cmdOutBuff *bytes.Buffer) (VolumeListXML, error) { var vol VolumeListXML @@ -188,6 +230,7 @@ func VolumeProfileGvInfoCumulativeXMLUnmarshall(cmdOutBuff *bytes.Buffer) (Volum return vol, nil } +// VolumeStatusXML XML type of "gluster volume status" type VolumeStatusXML struct { XMLName xml.Name `xml:"cliOutput"` OpRet int `xml:"opRet"` @@ -221,6 +264,7 @@ type VolumeStatusXML struct { } `xml:"volStatus"` } +// VolumeStatusAllDetailXMLUnmarshall reads bytes.buffer and returns unmarshalled xml func VolumeStatusAllDetailXMLUnmarshall(cmdOutBuff *bytes.Buffer) (VolumeStatusXML, error) { var vol VolumeStatusXML b, err := ioutil.ReadAll(cmdOutBuff) diff --git a/structs/xmlStructs_test.go b/structs/xmlStructs_test.go index fad6f2a..2bd01cf 100644 --- a/structs/xmlStructs_test.go +++ b/structs/xmlStructs_test.go @@ -3,6 +3,8 @@ package structs import ( "bytes" "io/ioutil" + "log" + "strconv" "testing" ) @@ -167,7 +169,7 @@ func TestVolumeProfileGvInfoCumulativeXMLUnmarshall(t *testing.T) { t.Errorf("expected %v as name and got %v", expFopHits, fops[0].Hits) } - if fops[0].AvgLatency!= expAvgLatency { + if fops[0].AvgLatency != expAvgLatency { t.Errorf("expected %v as name and got %v", expAvgLatency, fops[0].AvgLatency) } @@ -179,3 +181,48 @@ func TestVolumeProfileGvInfoCumulativeXMLUnmarshall(t *testing.T) { t.Errorf("expected %v as name and got %v", expMaxLatency, fops[0].MaxLatency) } } + +func getCliBufferHelper(filename string) *bytes.Buffer { + dat, err := ioutil.ReadFile(filename) + if err != nil { + log.Fatal("Could not read test data from xml.", err) + } + return bytes.NewBuffer(dat) +} + +type testPair struct { + path string + expected int + nodeCount int +} + +func TestVolumeHealInfoXMLUnmarshall(t *testing.T) { + var test = []testPair{ + {path: "../test/gluster_volume_heal_info_err_node2.xml", expected: 3, nodeCount: 4}, + } + + for _, c := range test { + cmdOutBuffer := getCliBufferHelper(c.path) + healInfo, err := VolumeHealInfoXMLUnmarshall(cmdOutBuffer) + if err != nil { + t.Error(err) + } + if healInfo.OpErrno != 0 { + t.Error(healInfo.OpErrstr) + } + entriesOutOfSync := 0 + if len(healInfo.HealInfo.Bricks.Brick) != c.nodeCount { + t.Error(healInfo.HealInfo.Bricks) + t.Errorf("Excpected %v Bricks and len is %v", c.nodeCount, len(healInfo.HealInfo.Bricks.Brick)) + } + for _, brick := range healInfo.HealInfo.Bricks.Brick { + var count int + count, _ = strconv.Atoi(brick.NumberOfEntries) + entriesOutOfSync += count + } + if entriesOutOfSync != c.expected { + t.Errorf("Out of sync entries other than expected: %v and was %v", c.expected, entriesOutOfSync) + } + } + +} diff --git a/test/gluster_volume_heal_info.xml b/test/gluster_volume_heal_info.xml new file mode 100644 index 0000000..bf3e909 --- /dev/null +++ b/test/gluster_volume_heal_info.xml @@ -0,0 +1,25 @@ + + + + + + node1.example.com:/mnt/gluster/gv_test + Connected + 0 + + + node2.example.com:/mnt/gluster/gv_test + Connected + 0 + + + node3.example.com:/mnt/gluster/gv_test + Connected + 0 + + + + 0 + 0 + + diff --git a/test/gluster_volume_heal_info_err_node1.xml b/test/gluster_volume_heal_info_err_node1.xml new file mode 100644 index 0000000..e20b0ee --- /dev/null +++ b/test/gluster_volume_heal_info_err_node1.xml @@ -0,0 +1,35 @@ + + + + + + node1.example.com:/mnt/gluster/gv_test + <gfid:5ff0a22c-f8f1-4dc1-86a2-7db417fc75da> + <gfid:e5c164c8-a121-4286-b339-879fb743e105> + / + <gfid:bc3071f1-807a-49bf-90eb-997e6bd558fe> + <gfid:2d5db4f0-3ec1-43d9-8a7d-98da2eecac02> + Connected + 5 + + + node2.example.com:/mnt/gluster/gv_test + Transport endpoint is not connected + - + + + node3.example.com:/mnt/gluster/gv_test + Transport endpoint is not connected + - + + + node4.example.com:/mnt/gluster/gv_test + Transport endpoint is not connected + - + + + + 0 + 0 + + diff --git a/test/gluster_volume_heal_info_err_node2.xml b/test/gluster_volume_heal_info_err_node2.xml new file mode 100644 index 0000000..60114c6 --- /dev/null +++ b/test/gluster_volume_heal_info_err_node2.xml @@ -0,0 +1,33 @@ + + + + + + node1.example.com:/mnt/gluster/gv_test + Transport endpoint is not connected + - + + + node2.example.com:/mnt/gluster/gv_test + <gfid:2e4ef09b-bc83-48f9-8d0e-109d5033dd66> + <gfid:a7d55aba-7c7a-4fb9-9a72-ac14587ced82> + / + Connected + 3 + + + node3.example.com:/mnt/gluster/gv_test + Transport endpoint is not connected + - + + + node4.example.com:/mnt/gluster/gv_test + Transport endpoint is not connected + - + + + + 0 + 0 + + From 854c16d8cecdd7845424d1279ee2ceea2359a1f2 Mon Sep 17 00:00:00 2001 From: Oliver Fesseler Date: Wed, 15 Feb 2017 20:51:43 +0100 Subject: [PATCH 3/9] adds mount metrics volume_writeable{volume=<>, mountpoint=<>} mount_successful{volume=<>, mountpoint=<>} to check if gluster volumes are successfully mounted and writeable. --- gluster_client.go | 29 +++++++++++++++++++++ main.go | 66 +++++++++++++++++++++++++++++++++++++++++++++++ main_test.go | 14 ++++++++++ 3 files changed, 109 insertions(+) diff --git a/gluster_client.go b/gluster_client.go index b08c513..6441807 100644 --- a/gluster_client.go +++ b/gluster_client.go @@ -2,8 +2,11 @@ package main import ( "bytes" + "fmt" + "os" "os/exec" "strconv" + "time" "github.com/ofesseler/gluster_exporter/structs" "github.com/prometheus/common/log" @@ -23,6 +26,32 @@ func execGlusterCommand(arg ...string) (*bytes.Buffer, error) { return stdoutBuffer, nil } +func execMountCheck() (*bytes.Buffer, error) { + stdoutBuffer := &bytes.Buffer{} + mountCmd := exec.Command("mount", "-t", "fuse.glusterfs") + + mountCmd.Stdout = stdoutBuffer + err := mountCmd.Run() + + if err != nil { + return stdoutBuffer, err + } + return stdoutBuffer, nil +} + +func execTouchOnVolumes(mountpoint string) (bool, error) { + testFileName := "gluster_mount.test" + _, createErr := os.Create(fmt.Sprintf("%v/%v_%v", mountpoint, testFileName, time.Now())) + if createErr != nil { + return false, createErr + } + removeErr := os.Remove(testFileName) + if removeErr != nil { + return false, removeErr + } + return true, nil +} + // ExecVolumeInfo executes "gluster volume info" at the local machine and // returns VolumeInfoXML struct and error func ExecVolumeInfo() (structs.VolumeInfoXML, error) { diff --git a/main.go b/main.go index 526ec37..cf4dbe9 100644 --- a/main.go +++ b/main.go @@ -124,6 +124,16 @@ var ( prometheus.BuildFQName(namespace, "", "heal_info_files_count"), "File count of files out of sync, when calling 'gluster v heal VOLNAME info", []string{"volume"}, nil) + + volumeWriteable = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "volume_writeable"), + "Writes and deletes file in Volume and checks if it si writeable", + []string{"volume", "mountpoint"}, nil) + + mountSuccessful = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "", "mount_successful"), + "Checks if mountpoint exists, returns a bool value 0 or 1", + []string{"volume", "mountpoint"}, nil) ) // Exporter holds name, path and volumes to be monitored @@ -151,6 +161,8 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { ch <- brickFopLatencyMin ch <- brickFopLatencyMax ch <- healInfoFilesCount + ch <- volumeWriteable + ch <- mountSuccessful } // Collect collects all the metrics @@ -290,6 +302,60 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { log.Infof("healInfoFilesCount is %v for volume %v", filesCount, vol) } } + + for _, vol := range vols { + mountBuffer, execMountCheckErr := execMountCheck() + if execMountCheckErr != nil { + log.Error(execMountCheckErr) + } + mounts, err := parseMountOutput(vol, mountBuffer.String()) + if err != nil { + log.Error(err) + if mounts != nil && len(mounts) > 0 { + for _, mount := range mounts { + ch <- prometheus.MustNewConstMetric( + mountSuccessful, prometheus.GaugeValue, float64(0), mount.volume, mount.mountPoint, + ) + } + } + } + for _, mount := range mounts { + ch <- prometheus.MustNewConstMetric( + mountSuccessful, prometheus.GaugeValue, float64(1), mount.volume, mount.mountPoint, + ) + + isWriteable, err := execTouchOnVolumes(mount.mountPoint) + if err != nil { + log.Error(err) + } + if isWriteable { + ch <- prometheus.MustNewConstMetric( + volumeWriteable, prometheus.GaugeValue, float64(1), mount.volume, mount.mountPoint, + ) + } else { + ch <- prometheus.MustNewConstMetric( + volumeWriteable, prometheus.GaugeValue, float64(0), mount.volume, mount.mountPoint, + ) + } + } + } +} + +type mount struct { + mountPoint string + volume string +} + +// ParseMountOutput pares output of system execution 'mount' +func parseMountOutput(vol string, mountBuffer string) ([]mount, error) { + var mounts []mount + mountRows := strings.Split(mountBuffer, "\n") + for _, row := range mountRows { + trimmedRow := strings.TrimSpace(row) + mountColumns := strings.Split(trimmedRow, " ") + mounts = append(mounts, mount{mountPoint: mountColumns[2], volume: mountColumns[0]}) + } + return mounts, nil } // ContainsVolume checks a slice if it cpntains a element diff --git a/main_test.go b/main_test.go index 035ca95..b25beeb 100644 --- a/main_test.go +++ b/main_test.go @@ -9,3 +9,17 @@ func TestContainsVolume(t *testing.T) { t.Fatalf("Hasn't found %v in slice %v", expamle, testSlice) } } + +func TestParseMountOutput(t *testing.T) { + mountOutput := "/dev/mapper/cryptroot on / type ext4 (rw,relatime,data=ordered) \n /dev/mapper/cryptroot on /var/lib/docker/devicemapper type ext4 (rw,relatime,data=ordered)" + mounts, err := parseMountOutput("asd", mountOutput) + if err != nil { + t.Error(err) + } + expected := []string{"/", "/var/lib/docker/devicemapper"} + for i, mount := range mounts { + if mount.mountPoint != expected[i] { + t.Errorf("mountpoint is %v and %v was expected", mount.mountPoint, expected[i]) + } + } +} From f79de6bb94c2e4cd176cd19cb849d4c79302d2f7 Mon Sep 17 00:00:00 2001 From: Oliver Fesseler Date: Wed, 15 Feb 2017 20:52:32 +0100 Subject: [PATCH 4/9] changes Gluster version from 3.8.5 to 3.8.LATEST --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 2740849..64bf8d2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,11 +6,11 @@ EXPOSE 24007 EXPOSE 24008 # Gluster debian Repo -ADD http://download.gluster.org/pub/gluster/glusterfs/3.8/3.8.5/rsa.pub /tmp +ADD http://download.gluster.org/pub/gluster/glusterfs/3.8/LATEST/rsa.pub /tmp RUN apt-key add /tmp/rsa.pub && rm -f /tmp/rsa.pub # Add gluster debian repo and update apt -RUN echo deb http://download.gluster.org/pub/gluster/glusterfs/3.8/3.8.5/Debian/jessie/apt jessie main > /etc/apt/sources.list.d/gluster.list +RUN echo deb http://download.gluster.org/pub/gluster/glusterfs/3.8/LATEST/Debian/jessie/apt jessie main > /etc/apt/sources.list.d/gluster.list RUN apt-get update # Install Gluster server From 0f47b374bd760131131ddbf9f131483fd95ef527 Mon Sep 17 00:00:00 2001 From: Oliver Fesseler Date: Wed, 15 Feb 2017 22:53:35 +0100 Subject: [PATCH 5/9] fixes docker build error --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index 64bf8d2..576a98e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,6 +5,7 @@ EXPOSE 9189 EXPOSE 24007 EXPOSE 24008 +RUN apt-get update && apt-get install -y apt-transport-https ca-certificates # Gluster debian Repo ADD http://download.gluster.org/pub/gluster/glusterfs/3.8/LATEST/rsa.pub /tmp RUN apt-key add /tmp/rsa.pub && rm -f /tmp/rsa.pub From d31a14b65ba2574530bf696dbcbe6cd16157963d Mon Sep 17 00:00:00 2001 From: Oliver Fesseler Date: Wed, 15 Feb 2017 22:53:48 +0100 Subject: [PATCH 6/9] typo --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index cf4dbe9..633c814 100644 --- a/main.go +++ b/main.go @@ -127,7 +127,7 @@ var ( volumeWriteable = prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "volume_writeable"), - "Writes and deletes file in Volume and checks if it si writeable", + "Writes and deletes file in Volume and checks if it is writeable", []string{"volume", "mountpoint"}, nil) mountSuccessful = prometheus.NewDesc( From 231cbb253b0416c40ef8fa40d5f3d800e87d8112 Mon Sep 17 00:00:00 2001 From: Oliver Fesseler Date: Wed, 15 Feb 2017 22:54:01 +0100 Subject: [PATCH 7/9] fixes out of range error after quering mount and adds testcase --- main.go | 60 ++++++++++++++++++++++++++-------------------------- main_test.go | 35 +++++++++++++++++++++++------- 2 files changed, 57 insertions(+), 38 deletions(-) diff --git a/main.go b/main.go index 633c814..b6dfd6c 100644 --- a/main.go +++ b/main.go @@ -292,14 +292,11 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { } for _, vol := range vols { - log.Infof("Fetching heal info from volume %v", vol) filesCount, volumeHealErr := ExecVolumeHealInfo(vol) if volumeHealErr == nil { - log.Infof("got info: %v", filesCount) ch <- prometheus.MustNewConstMetric( healInfoFilesCount, prometheus.CounterValue, float64(filesCount), vol, ) - log.Infof("healInfoFilesCount is %v for volume %v", filesCount, vol) } } @@ -307,35 +304,36 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { mountBuffer, execMountCheckErr := execMountCheck() if execMountCheckErr != nil { log.Error(execMountCheckErr) - } - mounts, err := parseMountOutput(vol, mountBuffer.String()) - if err != nil { - log.Error(err) - if mounts != nil && len(mounts) > 0 { - for _, mount := range mounts { - ch <- prometheus.MustNewConstMetric( - mountSuccessful, prometheus.GaugeValue, float64(0), mount.volume, mount.mountPoint, - ) - } - } - } - for _, mount := range mounts { - ch <- prometheus.MustNewConstMetric( - mountSuccessful, prometheus.GaugeValue, float64(1), mount.volume, mount.mountPoint, - ) - - isWriteable, err := execTouchOnVolumes(mount.mountPoint) + } else { + mounts, err := parseMountOutput(vol, mountBuffer.String()) if err != nil { log.Error(err) + if mounts != nil && len(mounts) > 0 { + for _, mount := range mounts { + ch <- prometheus.MustNewConstMetric( + mountSuccessful, prometheus.GaugeValue, float64(0), mount.volume, mount.mountPoint, + ) + } + } } - if isWriteable { - ch <- prometheus.MustNewConstMetric( - volumeWriteable, prometheus.GaugeValue, float64(1), mount.volume, mount.mountPoint, - ) - } else { + for _, mount := range mounts { ch <- prometheus.MustNewConstMetric( - volumeWriteable, prometheus.GaugeValue, float64(0), mount.volume, mount.mountPoint, + mountSuccessful, prometheus.GaugeValue, float64(1), mount.volume, mount.mountPoint, ) + + isWriteable, err := execTouchOnVolumes(mount.mountPoint) + if err != nil { + log.Error(err) + } + if isWriteable { + ch <- prometheus.MustNewConstMetric( + volumeWriteable, prometheus.GaugeValue, float64(1), mount.volume, mount.mountPoint, + ) + } else { + ch <- prometheus.MustNewConstMetric( + volumeWriteable, prometheus.GaugeValue, float64(0), mount.volume, mount.mountPoint, + ) + } } } } @@ -348,12 +346,14 @@ type mount struct { // ParseMountOutput pares output of system execution 'mount' func parseMountOutput(vol string, mountBuffer string) ([]mount, error) { - var mounts []mount + mounts := make([]mount, 0, 2) mountRows := strings.Split(mountBuffer, "\n") for _, row := range mountRows { trimmedRow := strings.TrimSpace(row) - mountColumns := strings.Split(trimmedRow, " ") - mounts = append(mounts, mount{mountPoint: mountColumns[2], volume: mountColumns[0]}) + if len(row) > 3 { + mountColumns := strings.Split(trimmedRow, " ") + mounts = append(mounts, mount{mountPoint: mountColumns[2], volume: mountColumns[0]}) + } } return mounts, nil } diff --git a/main_test.go b/main_test.go index b25beeb..005f915 100644 --- a/main_test.go +++ b/main_test.go @@ -10,16 +10,35 @@ func TestContainsVolume(t *testing.T) { } } +type testCases struct { + mountOutput string + expected []string +} + func TestParseMountOutput(t *testing.T) { - mountOutput := "/dev/mapper/cryptroot on / type ext4 (rw,relatime,data=ordered) \n /dev/mapper/cryptroot on /var/lib/docker/devicemapper type ext4 (rw,relatime,data=ordered)" - mounts, err := parseMountOutput("asd", mountOutput) - if err != nil { - t.Error(err) + var tests = []testCases{ + { + mountOutput: "/dev/mapper/cryptroot on / type ext4 (rw,relatime,data=ordered) \n" + + "/dev/mapper/cryptroot on /var/lib/docker/devicemapper type ext4 (rw,relatime,data=ordered)", + expected: []string{"/", "/var/lib/docker/devicemapper"}, + }, + { + mountOutput: "/dev/mapper/cryptroot on / type ext4 (rw,relatime,data=ordered) \n" + + "", + expected: []string{"/"}, + }, } - expected := []string{"/", "/var/lib/docker/devicemapper"} - for i, mount := range mounts { - if mount.mountPoint != expected[i] { - t.Errorf("mountpoint is %v and %v was expected", mount.mountPoint, expected[i]) + for _, c := range tests { + mounts, err := parseMountOutput("asd", c.mountOutput) + if err != nil { + t.Error(err) + } + + for i, mount := range mounts { + if mount.mountPoint != c.expected[i] { + t.Errorf("mountpoint is %v and %v was expected", mount.mountPoint, c.expected[i]) + } } } + } From 600452a43ff68cc889a0d94e1b353f122a112173 Mon Sep 17 00:00:00 2001 From: Oliver Fesseler Date: Thu, 16 Feb 2017 10:33:04 +0100 Subject: [PATCH 8/9] fixes file not found while deleting test file The delete method had the wrong name. this is fixed now, so that every file which is created and deleted has the same name --- gluster_client.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gluster_client.go b/gluster_client.go index 6441807..94879a6 100644 --- a/gluster_client.go +++ b/gluster_client.go @@ -40,8 +40,8 @@ func execMountCheck() (*bytes.Buffer, error) { } func execTouchOnVolumes(mountpoint string) (bool, error) { - testFileName := "gluster_mount.test" - _, createErr := os.Create(fmt.Sprintf("%v/%v_%v", mountpoint, testFileName, time.Now())) + testFileName := fmt.Sprintf("%v/%v_%v", mountpoint, "gluster_mount.test", time.Now()) + _, createErr := os.Create(testFileName) if createErr != nil { return false, createErr } From 3a1f091fa8f6d0d3a935b3c950041755e3aad0cc Mon Sep 17 00:00:00 2001 From: Oliver Fesseler Date: Thu, 16 Feb 2017 10:35:14 +0100 Subject: [PATCH 9/9] fixes multiple usage of same metric - removes vol parameter from parseMountOutput - removes for loop around mountpoint check which caused the error. --- main.go | 29 ++++++++++++++--------------- main_test.go | 2 +- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/main.go b/main.go index b6dfd6c..aa87394 100644 --- a/main.go +++ b/main.go @@ -300,22 +300,21 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { } } - for _, vol := range vols { - mountBuffer, execMountCheckErr := execMountCheck() - if execMountCheckErr != nil { - log.Error(execMountCheckErr) - } else { - mounts, err := parseMountOutput(vol, mountBuffer.String()) - if err != nil { - log.Error(err) - if mounts != nil && len(mounts) > 0 { - for _, mount := range mounts { - ch <- prometheus.MustNewConstMetric( - mountSuccessful, prometheus.GaugeValue, float64(0), mount.volume, mount.mountPoint, - ) - } + mountBuffer, execMountCheckErr := execMountCheck() + if execMountCheckErr != nil { + log.Error(execMountCheckErr) + } else { + mounts, err := parseMountOutput(mountBuffer.String()) + if err != nil { + log.Error(err) + if mounts != nil && len(mounts) > 0 { + for _, mount := range mounts { + ch <- prometheus.MustNewConstMetric( + mountSuccessful, prometheus.GaugeValue, float64(0), mount.volume, mount.mountPoint, + ) } } + } else { for _, mount := range mounts { ch <- prometheus.MustNewConstMetric( mountSuccessful, prometheus.GaugeValue, float64(1), mount.volume, mount.mountPoint, @@ -345,7 +344,7 @@ type mount struct { } // ParseMountOutput pares output of system execution 'mount' -func parseMountOutput(vol string, mountBuffer string) ([]mount, error) { +func parseMountOutput(mountBuffer string) ([]mount, error) { mounts := make([]mount, 0, 2) mountRows := strings.Split(mountBuffer, "\n") for _, row := range mountRows { diff --git a/main_test.go b/main_test.go index 005f915..8eaffd3 100644 --- a/main_test.go +++ b/main_test.go @@ -29,7 +29,7 @@ func TestParseMountOutput(t *testing.T) { }, } for _, c := range tests { - mounts, err := parseMountOutput("asd", c.mountOutput) + mounts, err := parseMountOutput(c.mountOutput) if err != nil { t.Error(err) }