Skip to content

Commit

Permalink
Add namedprocess_scrape_errors and namedprocess_scrape_permission_err…
Browse files Browse the repository at this point in the history
…ors counters:

former incremented when we have a general /proc reading error, the latter when
a /proc file isn't readable due to permissions.

Add Makefile and .promu.yml: now using promu as build tool.

Add Dockerfile: now make docker can be used to build an image.
Add -promfs argument to specify a different path to read from other than /proc.
  • Loading branch information
ncabatoff committed Oct 23, 2016
1 parent 23bdc82 commit 07423cc
Show file tree
Hide file tree
Showing 13 changed files with 280 additions and 68 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
.*.sw?
process-exporter
.tarballs
process-exporter-*.tar.gz
37 changes: 37 additions & 0 deletions .promu.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
repository:
path: github.com/ncabatoff/process-exporter
build:
binaries:
- name: process-exporter
path: ./cmd/process-exporter
flags: -a -tags netgo
tarball:
files:
- LICENSE
crossbuild:
platforms:
- linux/amd64
- linux/386
# - darwin/amd64
# - darwin/386
# - windows/amd64
# - windows/386
# - freebsd/amd64
# - freebsd/386
# - openbsd/amd64
# - openbsd/386
# - netbsd/amd64
# - netbsd/386
# - dragonfly/amd64
# - linux/arm
# - linux/arm64
# - freebsd/arm
# # Temporarily deactivated as golang.org/x/sys does not have syscalls
# # implemented for that os/platform combination.
# #- openbsd/arm
# #- linux/mips64
# #- linux/mips64le
# - netbsd/arm
# - linux/ppc64
# - linux/ppc64le

17 changes: 17 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# Start from a Debian image with the latest version of Go installed
# and a workspace (GOPATH) configured at /go.
FROM golang

# Copy the local package files to the container's workspace.
ADD . /go/src/github.com/ncabatoff/process-exporter

# Build the process-exporter command inside the container.
RUN make -C /go/src/github.com/ncabatoff/process-exporter

USER root

# Run the process-exporter command by default when the container starts.
ENTRYPOINT ["/go/src/github.com/ncabatoff/process-exporter/process-exporter"]

# Document that the service listens on port 9256.
EXPOSE 9256
71 changes: 71 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
# Copyright 2015 The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

GO := GO15VENDOREXPERIMENT=1 go
FIRST_GOPATH := $(firstword $(subst :, ,$(GOPATH)))
PROMU := $(FIRST_GOPATH)/bin/promu
pkgs = $(shell $(GO) list ./... | grep -v /vendor/)

PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_NAME ?= process-exporter
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))

ifdef DEBUG
bindata_flags = -debug
endif


all: format build test

style:
@echo ">> checking code style"
@! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'

test:
@echo ">> running short tests"
@$(GO) test -short $(pkgs)

format:
@echo ">> formatting code"
@$(GO) fmt $(pkgs)

vet:
@echo ">> vetting code"
@$(GO) vet $(pkgs)

build: promu
@echo ">> building binaries"
@$(PROMU) build --prefix $(PREFIX)

tarball: promu
@echo ">> building release tarball"
@$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)

crossbuild: promu
@echo ">> cross-building"
@$(PROMU) crossbuild
@$(PROMU) crossbuild tarballs

docker:
@echo ">> building docker image"
@docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .

promu:
@echo ">> fetching promu"
@GOOS=$(shell uname -s | tr A-Z a-z) \
GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \
$(GO) get -u github.com/prometheus/promu


.PHONY: all style format build test vet tarball crossbuild docker promu
21 changes: 20 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,29 @@ time.) It has been translated into fractional seconds of CPU consumed during
the polling interval.

Bytes read and written come from /proc/[pid]/io in recent enough kernels.
These correspond to the fields read_bytes and write_bytes respectively.
These correspond to the fields `read_bytes` and `write_bytes` respectively.

An example Grafana dashboard to view the metrics is available at https://grafana.net/dashboards/249

## Docker

A docker image can be created with

```
make docker
```

Then run the docker, e.g.

```
docker run --privileged --name pexporter -d -v /proc:/host/proc -p 127.0.0.1:9256:9256 process-exporter:master -procfs /host/proc -procnames chromium-browse,bash,prometheus,prombench,gvim,upstart:-user -namemapping "upstart,(-user)"
```

This will expose metrics on http://localhost:9256/metrics. Leave off the
`127.0.0.1:` to publish on all interfaces. Leave off the --priviliged and
add the --user docker run argument if you only need to monitor processes
belonging to a single user.

## History

An earlier version of this exporter had options to enable auto-discovery of
Expand Down
1 change: 1 addition & 0 deletions VERSION
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
0.0.1
97 changes: 67 additions & 30 deletions cmd/process-exporter/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,18 @@ var (
"number of bytes of memory in use",
[]string{"groupname", "memtype"},
nil)

scrapeErrorsDesc = prometheus.NewDesc(
"namedprocess_scrape_errors",
"non-permission scrape errors",
nil,
nil)

scrapePermissionErrorsDesc = prometheus.NewDesc(
"namedprocess_scrape_permission_errors",
"permission scrape errors (unreadable files under /proc)",
nil,
nil)
)

type (
Expand Down Expand Up @@ -124,6 +136,8 @@ func main() {
"Don't bind, instead just print the metrics once to stdout and exit")
procNames = flag.String("procnames", "",
"comma-seperated list of process names to monitor")
procfsPath = flag.String("procfs", "/proc",
"path to read proc data from")
nameMapping = flag.String("namemapping", "",
"comma-seperated list, alternating process name and capturing regex to apply to cmdline")
children = flag.Bool("children", true,
Expand Down Expand Up @@ -154,15 +168,14 @@ func main() {
for name := range wantNames {
names = append(names, name)
}
log.Println(names)
log.Printf("Reading metrics from %s for procnames: %v", *procfsPath, names)

if err != nil {
log.Fatalf("Error parsing -namemapping argument '%s': %v", *nameMapping, err)
}

pc := NewProcessCollector(names, *children, namemapper)

if err := pc.Init(); err != nil {
pc, err := NewProcessCollector(*procfsPath, names, *children, namemapper)
if err != nil {
log.Fatalf("Error initializing: %v", err)
}

Expand All @@ -172,9 +185,9 @@ func main() {
// We throw away the first result because that first collection primes the pump, and
// otherwise we won't see our counter metrics. This is specific to the implementation
// of NamedProcessCollector.Collect().
fs := fakescraper.NewFakeScraper()
fs.Scrape()
fmt.Print(fs.Scrape())
fscraper := fakescraper.NewFakeScraper()
fscraper.Scrape()
fmt.Print(fscraper.Scrape())
return
}

Expand All @@ -197,6 +210,9 @@ func main() {
type (
NamedProcessCollector struct {
*proc.Grouper
fs *proc.FS
scrapeErrors int
scrapePermissionErrors int
}
)

Expand All @@ -214,12 +230,27 @@ func (nm nameMapperRegex) Name(nacl proc.NameAndCmdline) string {
return nacl.Name
}

func NewProcessCollector(procnames []string, children bool, n proc.Namer) *NamedProcessCollector {
return &NamedProcessCollector{proc.NewGrouper(procnames, children, n)}
}
func NewProcessCollector(
procfsPath string,
procnames []string,
children bool,
n proc.Namer,
) (*NamedProcessCollector, error) {
fs, err := proc.NewFS(procfsPath)
if err != nil {
return nil, err
}
p := &NamedProcessCollector{
Grouper: proc.NewGrouper(procnames, children, n),
fs: fs,
}

_, err = p.Update(p.fs.AllProcs())
if err != nil {
return nil, err
}

func (p *NamedProcessCollector) Init() error {
return p.Update(proc.AllProcs())
return p, nil
}

// Describe implements prometheus.Collector.
Expand All @@ -229,29 +260,35 @@ func (p *NamedProcessCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- readBytesDesc
ch <- writeBytesDesc
ch <- membytesDesc
ch <- scrapeErrorsDesc
ch <- scrapePermissionErrorsDesc
}

// Collect implements prometheus.Collector.
func (p *NamedProcessCollector) Collect(ch chan<- prometheus.Metric) {
err := p.Update(proc.AllProcs())
permErrs, err := p.Update(p.fs.AllProcs())
p.scrapePermissionErrors += permErrs
if err != nil {
// TODO inc scrape failure
p.scrapeErrors++
log.Printf("error reading procs: %v", err)
return
}

for gname, gcounts := range p.Groups() {
ch <- prometheus.MustNewConstMetric(numprocsDesc,
prometheus.GaugeValue, float64(gcounts.Procs), gname)
ch <- prometheus.MustNewConstMetric(membytesDesc,
prometheus.GaugeValue, float64(gcounts.Memresident), gname, "resident")
ch <- prometheus.MustNewConstMetric(membytesDesc,
prometheus.GaugeValue, float64(gcounts.Memvirtual), gname, "virtual")
ch <- prometheus.MustNewConstMetric(cpuSecsDesc,
prometheus.CounterValue, gcounts.Cpu, gname)
ch <- prometheus.MustNewConstMetric(readBytesDesc,
prometheus.CounterValue, float64(gcounts.ReadBytes), gname)
ch <- prometheus.MustNewConstMetric(writeBytesDesc,
prometheus.CounterValue, float64(gcounts.WriteBytes), gname)
} else {
for gname, gcounts := range p.Groups() {
ch <- prometheus.MustNewConstMetric(numprocsDesc,
prometheus.GaugeValue, float64(gcounts.Procs), gname)
ch <- prometheus.MustNewConstMetric(membytesDesc,
prometheus.GaugeValue, float64(gcounts.Memresident), gname, "resident")
ch <- prometheus.MustNewConstMetric(membytesDesc,
prometheus.GaugeValue, float64(gcounts.Memvirtual), gname, "virtual")
ch <- prometheus.MustNewConstMetric(cpuSecsDesc,
prometheus.CounterValue, gcounts.Cpu, gname)
ch <- prometheus.MustNewConstMetric(readBytesDesc,
prometheus.CounterValue, float64(gcounts.ReadBytes), gname)
ch <- prometheus.MustNewConstMetric(writeBytesDesc,
prometheus.CounterValue, float64(gcounts.WriteBytes), gname)
}
}
ch <- prometheus.MustNewConstMetric(scrapeErrorsDesc,
prometheus.CounterValue, float64(p.scrapeErrors))
ch <- prometheus.MustNewConstMetric(scrapePermissionErrorsDesc,
prometheus.CounterValue, float64(p.scrapePermissionErrors))
}
13 changes: 7 additions & 6 deletions proc/grouper.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,11 +84,12 @@ func (g *Grouper) checkAncestry(idinfo ProcIdInfo, newprocs map[ProcId]ProcIdInf
}

// Update tracks any new procs that should be according to policy, and updates
// the metrics for already tracked procs.
func (g *Grouper) Update(iter ProcIter) error {
newProcs, err := g.tracker.Update(iter)
// the metrics for already tracked procs. Permission errors are returned as a
// count, and will not affect the error return value.
func (g *Grouper) Update(iter ProcIter) (int, error) {
newProcs, permErrs, err := g.tracker.Update(iter)
if err != nil {
return err
return permErrs, err
}

// Step 1: track any new proc that should be tracked based on its name and cmdline.
Expand All @@ -105,7 +106,7 @@ func (g *Grouper) Update(iter ProcIter) error {

// Step 2: track any untracked new proc that should be tracked because its parent is tracked.
if !g.trackChildren {
return nil
return permErrs, nil
}

for _, idinfo := range untracked {
Expand All @@ -116,7 +117,7 @@ func (g *Grouper) Update(iter ProcIter) error {

g.checkAncestry(idinfo, untracked)
}
return nil
return permErrs, nil
}

// groups returns the aggregate metrics for all groups tracked. This reflects
Expand Down
Loading

0 comments on commit 07423cc

Please sign in to comment.