Skip to content
This repository has been archived by the owner on Jul 9, 2024. It is now read-only.

Commit

Permalink
Merge pull request #8 from ofesseler/dev
Browse files Browse the repository at this point in the history
0.2.6
  • Loading branch information
ofesseler authored Feb 16, 2017
2 parents 0e16052 + 3a1f091 commit a11d61e
Show file tree
Hide file tree
Showing 10 changed files with 410 additions and 30 deletions.
5 changes: 3 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@ EXPOSE 9189
EXPOSE 24007
EXPOSE 24008

RUN apt-get update && apt-get install -y apt-transport-https ca-certificates
# Gluster debian Repo
ADD http://download.gluster.org/pub/gluster/glusterfs/3.8/3.8.5/rsa.pub /tmp
ADD http://download.gluster.org/pub/gluster/glusterfs/3.8/LATEST/rsa.pub /tmp
RUN apt-key add /tmp/rsa.pub && rm -f /tmp/rsa.pub

# Add gluster debian repo and update apt
RUN echo deb http://download.gluster.org/pub/gluster/glusterfs/3.8/3.8.5/Debian/jessie/apt jessie main > /etc/apt/sources.list.d/gluster.list
RUN echo deb http://download.gluster.org/pub/gluster/glusterfs/3.8/LATEST/Debian/jessie/apt jessie main > /etc/apt/sources.list.d/gluster.list
RUN apt-get update

# Install Gluster server
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.2.5
0.2.6
85 changes: 77 additions & 8 deletions gluster_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,30 +2,64 @@ package main

import (
"bytes"
"fmt"
"os"
"os/exec"
"strconv"
"time"

"github.com/ofesseler/gluster_exporter/structs"
"github.com/prometheus/common/log"
)

func execGlusterCommand(arg ...string) *bytes.Buffer {
func execGlusterCommand(arg ...string) (*bytes.Buffer, error) {
stdoutBuffer := &bytes.Buffer{}
argXML := append(arg, "--xml")
glusterExec := exec.Command(GlusterCmd, argXML...)
glusterExec.Stdout = stdoutBuffer
err := glusterExec.Run()

if err != nil {
log.Fatal(err)
log.Errorf("tried to execute %v and got error: %v", arg, err)
return stdoutBuffer, err
}
return stdoutBuffer
return stdoutBuffer, nil
}

func execMountCheck() (*bytes.Buffer, error) {
stdoutBuffer := &bytes.Buffer{}
mountCmd := exec.Command("mount", "-t", "fuse.glusterfs")

mountCmd.Stdout = stdoutBuffer
err := mountCmd.Run()

if err != nil {
return stdoutBuffer, err
}
return stdoutBuffer, nil
}

func execTouchOnVolumes(mountpoint string) (bool, error) {
testFileName := fmt.Sprintf("%v/%v_%v", mountpoint, "gluster_mount.test", time.Now())
_, createErr := os.Create(testFileName)
if createErr != nil {
return false, createErr
}
removeErr := os.Remove(testFileName)
if removeErr != nil {
return false, removeErr
}
return true, nil
}

// ExecVolumeInfo executes "gluster volume info" at the local machine and
// returns VolumeInfoXML struct and error
func ExecVolumeInfo() (structs.VolumeInfoXML, error) {
args := []string{"volume", "info"}
bytesBuffer := execGlusterCommand(args...)
bytesBuffer, cmdErr := execGlusterCommand(args...)
if cmdErr != nil {
return structs.VolumeInfoXML{}, cmdErr
}
volumeInfo, err := structs.VolumeInfoXMLUnmarshall(bytesBuffer)
if err != nil {
log.Errorf("Something went wrong while unmarshalling xml: %v", err)
Expand All @@ -39,7 +73,10 @@ func ExecVolumeInfo() (structs.VolumeInfoXML, error) {
// returns VolumeList struct and error
func ExecVolumeList() (structs.VolList, error) {
args := []string{"volume", "list"}
bytesBuffer := execGlusterCommand(args...)
bytesBuffer, cmdErr := execGlusterCommand(args...)
if cmdErr != nil {
return structs.VolList{}, cmdErr
}
volumeList, err := structs.VolumeListXMLUnmarshall(bytesBuffer)
if err != nil {
log.Errorf("Something went wrong while unmarshalling xml: %v", err)
Expand All @@ -53,7 +90,10 @@ func ExecVolumeList() (structs.VolList, error) {
// returns PeerStatus struct and error
func ExecPeerStatus() (structs.PeerStatus, error) {
args := []string{"peer", "status"}
bytesBuffer := execGlusterCommand(args...)
bytesBuffer, cmdErr := execGlusterCommand(args...)
if cmdErr != nil {
return structs.PeerStatus{}, cmdErr
}
peerStatus, err := structs.PeerStatusXMLUnmarshall(bytesBuffer)
if err != nil {
log.Errorf("Something went wrong while unmarshalling xml: %v", err)
Expand All @@ -69,7 +109,10 @@ func ExecVolumeProfileGvInfoCumulative(volumeName string) (structs.VolProfile, e
args := []string{"volume", "profile"}
args = append(args, volumeName)
args = append(args, "info", "cumulative")
bytesBuffer := execGlusterCommand(args...)
bytesBuffer, cmdErr := execGlusterCommand(args...)
if cmdErr != nil {
return structs.VolProfile{}, cmdErr
}
volumeProfile, err := structs.VolumeProfileGvInfoCumulativeXMLUnmarshall(bytesBuffer)
if err != nil {
log.Errorf("Something went wrong while unmarshalling xml: %v", err)
Expand All @@ -82,11 +125,37 @@ func ExecVolumeProfileGvInfoCumulative(volumeName string) (structs.VolProfile, e
// returns VolumeStatusXML struct and error
func ExecVolumeStatusAllDetail() (structs.VolumeStatusXML, error) {
args := []string{"volume", "status", "all", "detail"}
bytesBuffer := execGlusterCommand(args...)
bytesBuffer, cmdErr := execGlusterCommand(args...)
if cmdErr != nil {
return structs.VolumeStatusXML{}, cmdErr
}
volumeStatus, err := structs.VolumeStatusAllDetailXMLUnmarshall(bytesBuffer)
if err != nil {
log.Errorf("Something went wrong while unmarshalling xml: %v", err)
return volumeStatus, err
}
return volumeStatus, nil
}

// ExecVolumeHealInfo executes volume heal info on host system and processes input
// returns (int) number of unsynced files
func ExecVolumeHealInfo(volumeName string) (int, error) {
args := []string{"volume", "heal", volumeName, "info"}
entriesOutOfSync := 0
bytesBuffer, cmdErr := execGlusterCommand(args...)
if cmdErr != nil {
return -1, cmdErr
}
healInfo, err := structs.VolumeHealInfoXMLUnmarshall(bytesBuffer)
if err != nil {
log.Error(err)
return -1, err
}

for _, brick := range healInfo.HealInfo.Bricks.Brick {
var count int
count, _ = strconv.Atoi(brick.NumberOfEntries)
entriesOutOfSync += count
}
return entriesOutOfSync, nil
}
113 changes: 103 additions & 10 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ const (
// GlusterCmd is the default path to gluster binary
GlusterCmd = "/usr/sbin/gluster"
namespace = "gluster"
allVolumes = "_all"
)

var (
Expand Down Expand Up @@ -118,6 +119,21 @@ var (
"Is peer connected to gluster cluster.",
nil, nil,
)

healInfoFilesCount = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "heal_info_files_count"),
"File count of files out of sync, when calling 'gluster v heal VOLNAME info",
[]string{"volume"}, nil)

volumeWriteable = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "volume_writeable"),
"Writes and deletes file in Volume and checks if it is writeable",
[]string{"volume", "mountpoint"}, nil)

mountSuccessful = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "mount_successful"),
"Checks if mountpoint exists, returns a bool value 0 or 1",
[]string{"volume", "mountpoint"}, nil)
)

// Exporter holds name, path and volumes to be monitored
Expand All @@ -144,6 +160,9 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
ch <- brickFopLatencyAvg
ch <- brickFopLatencyMin
ch <- brickFopLatencyMax
ch <- healInfoFilesCount
ch <- volumeWriteable
ch <- mountSuccessful
}

// Collect collects all the metrics
Expand Down Expand Up @@ -174,7 +193,7 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
)

for _, volume := range volumeInfo.VolInfo.Volumes.Volume {
if e.volumes[0] == "_all" || ContainsVolume(e.volumes, volume.Name) {
if e.volumes[0] == allVolumes || ContainsVolume(e.volumes, volume.Name) {

ch <- prometheus.MustNewConstMetric(
brickCount, prometheus.GaugeValue, float64(volume.BrickCount), volume.Name,
Expand All @@ -187,9 +206,9 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
}

// reads gluster peer status
peerStatus, err := ExecPeerStatus()
if err != nil {
log.Errorf("couldn't parse xml of peer status: %v", err)
peerStatus, peerStatusErr := ExecPeerStatus()
if peerStatusErr != nil {
log.Errorf("couldn't parse xml of peer status: %v", peerStatusErr)
}
count := 0
for range peerStatus.Peer {
Expand All @@ -202,10 +221,10 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
// reads profile info
if e.profile {
for _, volume := range volumeInfo.VolInfo.Volumes.Volume {
if e.volumes[0] == "_all" || ContainsVolume(e.volumes, volume.Name) {
volumeProfile, err := ExecVolumeProfileGvInfoCumulative(volume.Name)
if err != nil {
log.Errorf("Error while executing or marshalling gluster profile output: %v", err)
if e.volumes[0] == allVolumes || ContainsVolume(e.volumes, volume.Name) {
volumeProfile, execVolProfileErr := ExecVolumeProfileGvInfoCumulative(volume.Name)
if execVolProfileErr != nil {
log.Errorf("Error while executing or marshalling gluster profile output: %v", execVolProfileErr)
}
for _, brick := range volumeProfile.Brick {
if strings.HasPrefix(brick.BrickName, e.hostname) {
Expand Down Expand Up @@ -262,6 +281,80 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
)
}
}
vols := e.volumes
if vols[0] == allVolumes {
log.Warn("no Volumes were given.")
volumeList, volumeListErr := ExecVolumeList()
if volumeListErr != nil {
log.Error(volumeListErr)
}
vols = volumeList.Volume
}

for _, vol := range vols {
filesCount, volumeHealErr := ExecVolumeHealInfo(vol)
if volumeHealErr == nil {
ch <- prometheus.MustNewConstMetric(
healInfoFilesCount, prometheus.CounterValue, float64(filesCount), vol,
)
}
}

mountBuffer, execMountCheckErr := execMountCheck()
if execMountCheckErr != nil {
log.Error(execMountCheckErr)
} else {
mounts, err := parseMountOutput(mountBuffer.String())
if err != nil {
log.Error(err)
if mounts != nil && len(mounts) > 0 {
for _, mount := range mounts {
ch <- prometheus.MustNewConstMetric(
mountSuccessful, prometheus.GaugeValue, float64(0), mount.volume, mount.mountPoint,
)
}
}
} else {
for _, mount := range mounts {
ch <- prometheus.MustNewConstMetric(
mountSuccessful, prometheus.GaugeValue, float64(1), mount.volume, mount.mountPoint,
)

isWriteable, err := execTouchOnVolumes(mount.mountPoint)
if err != nil {
log.Error(err)
}
if isWriteable {
ch <- prometheus.MustNewConstMetric(
volumeWriteable, prometheus.GaugeValue, float64(1), mount.volume, mount.mountPoint,
)
} else {
ch <- prometheus.MustNewConstMetric(
volumeWriteable, prometheus.GaugeValue, float64(0), mount.volume, mount.mountPoint,
)
}
}
}
}
}

type mount struct {
mountPoint string
volume string
}

// ParseMountOutput pares output of system execution 'mount'
func parseMountOutput(mountBuffer string) ([]mount, error) {
mounts := make([]mount, 0, 2)
mountRows := strings.Split(mountBuffer, "\n")
for _, row := range mountRows {
trimmedRow := strings.TrimSpace(row)
if len(row) > 3 {
mountColumns := strings.Split(trimmedRow, " ")
mounts = append(mounts, mount{mountPoint: mountColumns[2], volume: mountColumns[0]})
}
}
return mounts, nil
}

// ContainsVolume checks a slice if it cpntains a element
Expand Down Expand Up @@ -309,7 +402,7 @@ func main() {
metricPath = flag.String("metrics-path", "/metrics", "URL Endpoint for metrics")
listenAddress = flag.String("listen-address", ":9189", "The address to listen on for HTTP requests.")
showVersion = flag.Bool("version", false, "Prints version information")
glusterVolumes = flag.String("volumes", "_all", "Comma separated volume names: vol1,vol2,vol3. Default is '_all' to scrape all metrics")
glusterVolumes = flag.String("volumes", allVolumes, fmt.Sprintf("Comma separated volume names: vol1,vol2,vol3. Default is '%v' to scrape all metrics", allVolumes))
profile = flag.Bool("profile", false, "When profiling reports in gluster are enabled, set ' -profile true' to get more metrics")
)
flag.Parse()
Expand All @@ -320,7 +413,7 @@ func main() {

hostname, err := os.Hostname()
if err != nil {
log.Fatal(err)
log.Fatalf("While trying to get Hostname error happened: %v", err)
}
exporter, err := NewExporter(hostname, *glusterPath, *glusterVolumes, *profile)
if err != nil {
Expand Down
33 changes: 33 additions & 0 deletions main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,36 @@ func TestContainsVolume(t *testing.T) {
t.Fatalf("Hasn't found %v in slice %v", expamle, testSlice)
}
}

type testCases struct {
mountOutput string
expected []string
}

func TestParseMountOutput(t *testing.T) {
var tests = []testCases{
{
mountOutput: "/dev/mapper/cryptroot on / type ext4 (rw,relatime,data=ordered) \n" +
"/dev/mapper/cryptroot on /var/lib/docker/devicemapper type ext4 (rw,relatime,data=ordered)",
expected: []string{"/", "/var/lib/docker/devicemapper"},
},
{
mountOutput: "/dev/mapper/cryptroot on / type ext4 (rw,relatime,data=ordered) \n" +
"",
expected: []string{"/"},
},
}
for _, c := range tests {
mounts, err := parseMountOutput(c.mountOutput)
if err != nil {
t.Error(err)
}

for i, mount := range mounts {
if mount.mountPoint != c.expected[i] {
t.Errorf("mountpoint is %v and %v was expected", mount.mountPoint, c.expected[i])
}
}
}

}
Loading

0 comments on commit a11d61e

Please sign in to comment.