mirror of
https://github.com/prometheus/statsd_exporter.git
synced 2024-11-22 23:41:00 +00:00
Bump prometheus/client_golang to v0.9.2
Signed-off-by: Simon Pasquier <spasquie@redhat.com>
This commit is contained in:
parent
b3bf4d1f8b
commit
a856251d79
64 changed files with 5300 additions and 1559 deletions
9
go.mod
9
go.mod
|
@ -6,24 +6,19 @@ require (
|
||||||
github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1 // indirect
|
github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1 // indirect
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc // indirect
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc // indirect
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf // indirect
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf // indirect
|
||||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607
|
github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607
|
||||||
github.com/kr/pretty v0.1.0 // indirect
|
github.com/kr/pretty v0.1.0 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.4 // indirect
|
github.com/mattn/go-isatty v0.0.4 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.0 // indirect
|
|
||||||
github.com/onsi/ginkgo v1.7.0 // indirect
|
github.com/onsi/ginkgo v1.7.0 // indirect
|
||||||
github.com/onsi/gomega v1.4.3 // indirect
|
github.com/onsi/gomega v1.4.3 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_golang v0.8.0
|
github.com/prometheus/client_golang v0.9.2
|
||||||
github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 // indirect
|
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
||||||
github.com/prometheus/common v0.0.0-20170731114204-61f87aac8082
|
|
||||||
github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8 // indirect
|
|
||||||
github.com/sergi/go-diff v1.0.0 // indirect
|
github.com/sergi/go-diff v1.0.0 // indirect
|
||||||
github.com/sirupsen/logrus v1.0.3 // indirect
|
github.com/sirupsen/logrus v1.0.3 // indirect
|
||||||
github.com/stretchr/testify v1.2.2 // indirect
|
github.com/stretchr/testify v1.2.2 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20170825220121-81e90905daef // indirect
|
golang.org/x/crypto v0.0.0-20170825220121-81e90905daef // indirect
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect
|
|
||||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
|
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.5
|
gopkg.in/alecthomas/kingpin.v2 v2.2.5
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||||
|
|
26
go.sum
26
go.sum
|
@ -8,8 +8,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5Vpd
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
|
@ -27,8 +27,8 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
|
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
|
||||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.0 h1:YNOwxxSJzSUARoD9KRZLzM9Y858MNGCOACTvCW9TSAc=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
@ -36,14 +36,14 @@ github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8=
|
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
|
||||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||||
github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 h1:13pIdM2tpaDi4OVe24fgoIS7ZTqMt0QI+bwQsX5hq+g=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||||
github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/common v0.0.0-20170731114204-61f87aac8082 h1:M/45ksQhBkhxI65UXRNvyuF6sV7A08GMYk39aGZQlJQ=
|
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
|
||||||
github.com/prometheus/common v0.0.0-20170731114204-61f87aac8082/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8 h1:uZfczEBIA1FZfOQo4/JWgGnMNd/4HVsM9A+B30wtlkA=
|
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
|
||||||
github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
github.com/sirupsen/logrus v1.0.3 h1:B5C/igNWoiULof20pKfY4VntcIPqKuwEmoLZrabbUrc=
|
github.com/sirupsen/logrus v1.0.3 h1:B5C/igNWoiULof20pKfY4VntcIPqKuwEmoLZrabbUrc=
|
||||||
|
@ -54,6 +54,8 @@ golang.org/x/crypto v0.0.0-20170825220121-81e90905daef h1:R8ubLIilYRXIXpgjOg2l/E
|
||||||
golang.org/x/crypto v0.0.0-20170825220121-81e90905daef/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20170825220121-81e90905daef/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0=
|
||||||
|
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
|
34
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
34
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
|
@ -77,15 +77,20 @@ func NewHighBiased(epsilon float64) *Stream {
|
||||||
// is guaranteed to be within (Quantile±Epsilon).
|
// is guaranteed to be within (Quantile±Epsilon).
|
||||||
//
|
//
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||||
func NewTargeted(targets map[float64]float64) *Stream {
|
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||||
|
// Convert map to slice to avoid slow iterations on a map.
|
||||||
|
// ƒ is called on the hot path, so converting the map to a slice
|
||||||
|
// beforehand results in significant CPU savings.
|
||||||
|
targets := targetMapToSlice(targetMap)
|
||||||
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
var m = math.MaxFloat64
|
var m = math.MaxFloat64
|
||||||
var f float64
|
var f float64
|
||||||
for quantile, epsilon := range targets {
|
for _, t := range targets {
|
||||||
if quantile*s.n <= r {
|
if t.quantile*s.n <= r {
|
||||||
f = (2 * epsilon * r) / quantile
|
f = (2 * t.epsilon * r) / t.quantile
|
||||||
} else {
|
} else {
|
||||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||||
}
|
}
|
||||||
if f < m {
|
if f < m {
|
||||||
m = f
|
m = f
|
||||||
|
@ -96,6 +101,25 @@ func NewTargeted(targets map[float64]float64) *Stream {
|
||||||
return newStream(ƒ)
|
return newStream(ƒ)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type target struct {
|
||||||
|
quantile float64
|
||||||
|
epsilon float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||||
|
targets := make([]target, 0, len(targetMap))
|
||||||
|
|
||||||
|
for quantile, epsilon := range targetMap {
|
||||||
|
t := target{
|
||||||
|
quantile: quantile,
|
||||||
|
epsilon: epsilon,
|
||||||
|
}
|
||||||
|
targets = append(targets, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return targets
|
||||||
|
}
|
||||||
|
|
||||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||||
// design. Take care when using across multiple goroutines.
|
// design. Take care when using across multiple goroutines.
|
||||||
type Stream struct {
|
type Stream struct {
|
||||||
|
|
1
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
generated
vendored
Normal file
1
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
cover.dat
|
7
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
generated
vendored
Normal file
7
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
all:
|
||||||
|
|
||||||
|
cover:
|
||||||
|
go test -cover -v -coverprofile=cover.dat ./...
|
||||||
|
go tool cover -func cover.dat
|
||||||
|
|
||||||
|
.PHONY: cover
|
18
vendor/github.com/prometheus/client_golang/AUTHORS.md
generated
vendored
18
vendor/github.com/prometheus/client_golang/AUTHORS.md
generated
vendored
|
@ -1,18 +0,0 @@
|
||||||
The Prometheus project was started by Matt T. Proud (emeritus) and
|
|
||||||
Julius Volz in 2012.
|
|
||||||
|
|
||||||
Maintainers of this repository:
|
|
||||||
|
|
||||||
* Björn Rabenstein <beorn@soundcloud.com>
|
|
||||||
|
|
||||||
The following individuals have contributed code to this repository
|
|
||||||
(listed in alphabetical order):
|
|
||||||
|
|
||||||
* Bernerd Schaefer <bj.schaefer@gmail.com>
|
|
||||||
* Björn Rabenstein <beorn@soundcloud.com>
|
|
||||||
* Daniel Bornkessel <daniel@soundcloud.com>
|
|
||||||
* Jeff Younker <jeff@drinktomi.com>
|
|
||||||
* Julius Volz <julius.volz@gmail.com>
|
|
||||||
* Matt T. Proud <matt.proud@gmail.com>
|
|
||||||
* Tobias Schmidt <ts@soundcloud.com>
|
|
||||||
|
|
73
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
73
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
|
@ -29,27 +29,72 @@ type Collector interface {
|
||||||
// collected by this Collector to the provided channel and returns once
|
// collected by this Collector to the provided channel and returns once
|
||||||
// the last descriptor has been sent. The sent descriptors fulfill the
|
// the last descriptor has been sent. The sent descriptors fulfill the
|
||||||
// consistency and uniqueness requirements described in the Desc
|
// consistency and uniqueness requirements described in the Desc
|
||||||
// documentation. (It is valid if one and the same Collector sends
|
// documentation.
|
||||||
// duplicate descriptors. Those duplicates are simply ignored. However,
|
//
|
||||||
// two different Collectors must not send duplicate descriptors.) This
|
// It is valid if one and the same Collector sends duplicate
|
||||||
// method idempotently sends the same descriptors throughout the
|
// descriptors. Those duplicates are simply ignored. However, two
|
||||||
// lifetime of the Collector. If a Collector encounters an error while
|
// different Collectors must not send duplicate descriptors.
|
||||||
// executing this method, it must send an invalid descriptor (created
|
//
|
||||||
// with NewInvalidDesc) to signal the error to the registry.
|
// Sending no descriptor at all marks the Collector as “unchecked”,
|
||||||
|
// i.e. no checks will be performed at registration time, and the
|
||||||
|
// Collector may yield any Metric it sees fit in its Collect method.
|
||||||
|
//
|
||||||
|
// This method idempotently sends the same descriptors throughout the
|
||||||
|
// lifetime of the Collector. It may be called concurrently and
|
||||||
|
// therefore must be implemented in a concurrency safe way.
|
||||||
|
//
|
||||||
|
// If a Collector encounters an error while executing this method, it
|
||||||
|
// must send an invalid descriptor (created with NewInvalidDesc) to
|
||||||
|
// signal the error to the registry.
|
||||||
Describe(chan<- *Desc)
|
Describe(chan<- *Desc)
|
||||||
// Collect is called by the Prometheus registry when collecting
|
// Collect is called by the Prometheus registry when collecting
|
||||||
// metrics. The implementation sends each collected metric via the
|
// metrics. The implementation sends each collected metric via the
|
||||||
// provided channel and returns once the last metric has been sent. The
|
// provided channel and returns once the last metric has been sent. The
|
||||||
// descriptor of each sent metric is one of those returned by
|
// descriptor of each sent metric is one of those returned by Describe
|
||||||
// Describe. Returned metrics that share the same descriptor must differ
|
// (unless the Collector is unchecked, see above). Returned metrics that
|
||||||
// in their variable label values. This method may be called
|
// share the same descriptor must differ in their variable label
|
||||||
// concurrently and must therefore be implemented in a concurrency safe
|
// values.
|
||||||
// way. Blocking occurs at the expense of total performance of rendering
|
//
|
||||||
// all registered metrics. Ideally, Collector implementations support
|
// This method may be called concurrently and must therefore be
|
||||||
// concurrent readers.
|
// implemented in a concurrency safe way. Blocking occurs at the expense
|
||||||
|
// of total performance of rendering all registered metrics. Ideally,
|
||||||
|
// Collector implementations support concurrent readers.
|
||||||
Collect(chan<- Metric)
|
Collect(chan<- Metric)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DescribeByCollect is a helper to implement the Describe method of a custom
|
||||||
|
// Collector. It collects the metrics from the provided Collector and sends
|
||||||
|
// their descriptors to the provided channel.
|
||||||
|
//
|
||||||
|
// If a Collector collects the same metrics throughout its lifetime, its
|
||||||
|
// Describe method can simply be implemented as:
|
||||||
|
//
|
||||||
|
// func (c customCollector) Describe(ch chan<- *Desc) {
|
||||||
|
// DescribeByCollect(c, ch)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// However, this will not work if the metrics collected change dynamically over
|
||||||
|
// the lifetime of the Collector in a way that their combined set of descriptors
|
||||||
|
// changes as well. The shortcut implementation will then violate the contract
|
||||||
|
// of the Describe method. If a Collector sometimes collects no metrics at all
|
||||||
|
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
|
||||||
|
// metrics after a metric with a fully specified label set has been accessed),
|
||||||
|
// it might even get registered as an unchecked Collecter (cf. the Register
|
||||||
|
// method of the Registerer interface). Hence, only use this shortcut
|
||||||
|
// implementation of Describe if you are certain to fulfill the contract.
|
||||||
|
//
|
||||||
|
// The Collector example demonstrates a use of DescribeByCollect.
|
||||||
|
func DescribeByCollect(c Collector, descs chan<- *Desc) {
|
||||||
|
metrics := make(chan Metric)
|
||||||
|
go func() {
|
||||||
|
c.Collect(metrics)
|
||||||
|
close(metrics)
|
||||||
|
}()
|
||||||
|
for m := range metrics {
|
||||||
|
descs <- m.Desc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// selfCollector implements Collector for a single Metric so that the Metric
|
// selfCollector implements Collector for a single Metric so that the Metric
|
||||||
// collects itself. Add it as an anonymous field to a struct that implements
|
// collects itself. Add it as an anonymous field to a struct that implements
|
||||||
// Metric, and call init with the Metric itself as an argument.
|
// Metric, and call init with the Metric itself as an argument.
|
||||||
|
|
191
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
191
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
|
@ -15,6 +15,10 @@ package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"math"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Counter is a Metric that represents a single numerical value that only ever
|
// Counter is a Metric that represents a single numerical value that only ever
|
||||||
|
@ -30,16 +34,8 @@ type Counter interface {
|
||||||
Metric
|
Metric
|
||||||
Collector
|
Collector
|
||||||
|
|
||||||
// Set is used to set the Counter to an arbitrary value. It is only used
|
// Inc increments the counter by 1. Use Add to increment it by arbitrary
|
||||||
// if you have to transfer a value from an external counter into this
|
// non-negative values.
|
||||||
// Prometheus metric. Do not use it for regular handling of a
|
|
||||||
// Prometheus counter (as it can be used to break the contract of
|
|
||||||
// monotonically increasing values).
|
|
||||||
//
|
|
||||||
// Deprecated: Use NewConstMetric to create a counter for an external
|
|
||||||
// value. A Counter should never be set.
|
|
||||||
Set(float64)
|
|
||||||
// Inc increments the counter by 1.
|
|
||||||
Inc()
|
Inc()
|
||||||
// Add adds the given value to the counter. It panics if the value is <
|
// Add adds the given value to the counter. It panics if the value is <
|
||||||
// 0.
|
// 0.
|
||||||
|
@ -50,6 +46,14 @@ type Counter interface {
|
||||||
type CounterOpts Opts
|
type CounterOpts Opts
|
||||||
|
|
||||||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
// NewCounter creates a new Counter based on the provided CounterOpts.
|
||||||
|
//
|
||||||
|
// The returned implementation tracks the counter value in two separate
|
||||||
|
// variables, a float64 and a uint64. The latter is used to track calls of the
|
||||||
|
// Inc method and calls of the Add method with a value that can be represented
|
||||||
|
// as a uint64. This allows atomic increments of the counter with optimal
|
||||||
|
// performance. (It is common to have an Inc call in very hot execution paths.)
|
||||||
|
// Both internal tracking values are added up in the Write method. This has to
|
||||||
|
// be taken into account when it comes to precision and overflow behavior.
|
||||||
func NewCounter(opts CounterOpts) Counter {
|
func NewCounter(opts CounterOpts) Counter {
|
||||||
desc := NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
@ -57,20 +61,58 @@ func NewCounter(opts CounterOpts) Counter {
|
||||||
nil,
|
nil,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
|
result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
type counter struct {
|
type counter struct {
|
||||||
value
|
// valBits contains the bits of the represented float64 value, while
|
||||||
|
// valInt stores values that are exact integers. Both have to go first
|
||||||
|
// in the struct to guarantee alignment for atomic operations.
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
valBits uint64
|
||||||
|
valInt uint64
|
||||||
|
|
||||||
|
selfCollector
|
||||||
|
desc *Desc
|
||||||
|
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Desc() *Desc {
|
||||||
|
return c.desc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *counter) Add(v float64) {
|
func (c *counter) Add(v float64) {
|
||||||
if v < 0 {
|
if v < 0 {
|
||||||
panic(errors.New("counter cannot decrease in value"))
|
panic(errors.New("counter cannot decrease in value"))
|
||||||
}
|
}
|
||||||
c.value.Add(v)
|
ival := uint64(v)
|
||||||
|
if float64(ival) == v {
|
||||||
|
atomic.AddUint64(&c.valInt, ival)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&c.valBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||||
|
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Inc() {
|
||||||
|
atomic.AddUint64(&c.valInt, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Write(out *dto.Metric) error {
|
||||||
|
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
|
||||||
|
ival := atomic.LoadUint64(&c.valInt)
|
||||||
|
val := fval + float64(ival)
|
||||||
|
|
||||||
|
return populateMetric(CounterValue, val, c.labelPairs, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CounterVec is a Collector that bundles a set of Counters that all share the
|
// CounterVec is a Collector that bundles a set of Counters that all share the
|
||||||
|
@ -78,16 +120,12 @@ func (c *counter) Add(v float64) {
|
||||||
// if you want to count the same thing partitioned by various dimensions
|
// if you want to count the same thing partitioned by various dimensions
|
||||||
// (e.g. number of HTTP requests, partitioned by response code and
|
// (e.g. number of HTTP requests, partitioned by response code and
|
||||||
// method). Create instances with NewCounterVec.
|
// method). Create instances with NewCounterVec.
|
||||||
//
|
|
||||||
// CounterVec embeds MetricVec. See there for a full list of methods with
|
|
||||||
// detailed documentation.
|
|
||||||
type CounterVec struct {
|
type CounterVec struct {
|
||||||
*MetricVec
|
*metricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||||
// partitioned by the given label names. At least one label name must be
|
// partitioned by the given label names.
|
||||||
// provided.
|
|
||||||
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||||
desc := NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
@ -96,34 +134,62 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &CounterVec{
|
return &CounterVec{
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
result := &counter{value: value{
|
if len(lvs) != len(desc.variableLabels) {
|
||||||
desc: desc,
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
||||||
valType: CounterValue,
|
}
|
||||||
labelPairs: makeLabelPairs(desc, lvs),
|
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
|
||||||
}}
|
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
return result
|
return result
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
// GetMetricWithLabelValues returns the Counter for the given slice of label
|
||||||
// MetricVec. The difference is that this method returns a Counter and not a
|
// values (same order as the VariableLabels in Desc). If that combination of
|
||||||
// Metric so that no type conversion is required.
|
// label values is accessed for the first time, a new Counter is created.
|
||||||
func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
|
//
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
// It is possible to call this method without using the returned Counter to only
|
||||||
|
// create the new Counter but leave it at its starting value 0. See also the
|
||||||
|
// SummaryVec example.
|
||||||
|
//
|
||||||
|
// Keeping the Counter for later use is possible (and should be considered if
|
||||||
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||||
|
// Delete can be used to delete the Counter from the CounterVec. In that case,
|
||||||
|
// the Counter will still exist, but it will not be exported anymore, even if a
|
||||||
|
// Counter with the same label values is created later.
|
||||||
|
//
|
||||||
|
// An error is returned if the number of label values is not the same as the
|
||||||
|
// number of VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||||
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
// See also the GaugeVec example.
|
||||||
|
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Counter), err
|
return metric.(Counter), err
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
// GetMetricWith returns the Counter for the given Labels map (the label names
|
||||||
// difference is that this method returns a Counter and not a Metric so that no
|
// must match those of the VariableLabels in Desc). If that label map is
|
||||||
// type conversion is required.
|
// accessed for the first time, a new Counter is created. Implications of
|
||||||
func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
// creating a Counter without using it and keeping the Counter for later use are
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
// the same as for GetMetricWithLabelValues.
|
||||||
|
//
|
||||||
|
// An error is returned if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as
|
||||||
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||||
|
// methods.
|
||||||
|
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWith(labels)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Counter), err
|
return metric.(Counter), err
|
||||||
}
|
}
|
||||||
|
@ -131,18 +197,57 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
// error, WithLabelValues allows shortcuts like
|
// error allows shortcuts like
|
||||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||||
func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
|
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Counter)
|
c, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||||
func (m *CounterVec) With(labels Labels) Counter {
|
func (v *CounterVec) With(labels Labels) Counter {
|
||||||
return m.MetricVec.With(labels).(Counter)
|
c, err := v.GetMetricWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||||
|
// returned vector has those labels pre-set for all labeled operations performed
|
||||||
|
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||||
|
// order of the remaining labels stays the same (just with the curried labels
|
||||||
|
// taken out of the sequence – which is relevant for the
|
||||||
|
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||||
|
// vector, but only with labels not yet used for currying before.
|
||||||
|
//
|
||||||
|
// The metrics contained in the CounterVec are shared between the curried and
|
||||||
|
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||||
|
// vectors behave identically in terms of collection. Only one must be
|
||||||
|
// registered with a given registry (usually the uncurried version). The Reset
|
||||||
|
// method deletes all metrics, even if called on a curried vector.
|
||||||
|
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
|
||||||
|
vec, err := v.curryWith(labels)
|
||||||
|
if vec != nil {
|
||||||
|
return &CounterVec{vec}, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||||
|
// returned an error.
|
||||||
|
func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
|
||||||
|
vec, err := v.CurryWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return vec
|
||||||
}
|
}
|
||||||
|
|
||||||
// CounterFunc is a Counter whose value is determined at collect time by calling a
|
// CounterFunc is a Counter whose value is determined at collect time by calling a
|
||||||
|
|
51
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
51
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
|
@ -16,33 +16,15 @@ package prometheus
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
|
|
||||||
labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
|
||||||
)
|
|
||||||
|
|
||||||
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
|
||||||
// label names.
|
|
||||||
const reservedLabelPrefix = "__"
|
|
||||||
|
|
||||||
// Labels represents a collection of label name -> value mappings. This type is
|
|
||||||
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
|
||||||
// metric vector Collectors, e.g.:
|
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
|
||||||
//
|
|
||||||
// The other use-case is the specification of constant label pairs in Opts or to
|
|
||||||
// create a Desc.
|
|
||||||
type Labels map[string]string
|
|
||||||
|
|
||||||
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
||||||
// the immutable meta-data of a Metric. The normal Metric implementations
|
// the immutable meta-data of a Metric. The normal Metric implementations
|
||||||
// included in this package manage their Desc under the hood. Users only have to
|
// included in this package manage their Desc under the hood. Users only have to
|
||||||
|
@ -78,32 +60,27 @@ type Desc struct {
|
||||||
// Help string. Each Desc with the same fqName must have the same
|
// Help string. Each Desc with the same fqName must have the same
|
||||||
// dimHash.
|
// dimHash.
|
||||||
dimHash uint64
|
dimHash uint64
|
||||||
// err is an error that occured during construction. It is reported on
|
// err is an error that occurred during construction. It is reported on
|
||||||
// registration time.
|
// registration time.
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
||||||
// and will be reported on registration time. variableLabels and constLabels can
|
// and will be reported on registration time. variableLabels and constLabels can
|
||||||
// be nil if no such labels should be set. fqName and help must not be empty.
|
// be nil if no such labels should be set. fqName must not be empty.
|
||||||
//
|
//
|
||||||
// variableLabels only contain the label names. Their label values are variable
|
// variableLabels only contain the label names. Their label values are variable
|
||||||
// and therefore not part of the Desc. (They are managed within the Metric.)
|
// and therefore not part of the Desc. (They are managed within the Metric.)
|
||||||
//
|
//
|
||||||
// For constLabels, the label values are constant. Therefore, they are fully
|
// For constLabels, the label values are constant. Therefore, they are fully
|
||||||
// specified in the Desc. See the Opts documentation for the implications of
|
// specified in the Desc. See the Collector example for a usage pattern.
|
||||||
// constant labels.
|
|
||||||
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
||||||
d := &Desc{
|
d := &Desc{
|
||||||
fqName: fqName,
|
fqName: fqName,
|
||||||
help: help,
|
help: help,
|
||||||
variableLabels: variableLabels,
|
variableLabels: variableLabels,
|
||||||
}
|
}
|
||||||
if help == "" {
|
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
||||||
d.err = errors.New("empty help string")
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
if !metricNameRE.MatchString(fqName) {
|
|
||||||
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
@ -116,7 +93,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
||||||
// First add only the const label names and sort them...
|
// First add only the const label names and sort them...
|
||||||
for labelName := range constLabels {
|
for labelName := range constLabels {
|
||||||
if !checkLabelName(labelName) {
|
if !checkLabelName(labelName) {
|
||||||
d.err = fmt.Errorf("%q is not a valid label name", labelName)
|
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
labelNames = append(labelNames, labelName)
|
labelNames = append(labelNames, labelName)
|
||||||
|
@ -127,12 +104,18 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
||||||
for _, labelName := range labelNames {
|
for _, labelName := range labelNames {
|
||||||
labelValues = append(labelValues, constLabels[labelName])
|
labelValues = append(labelValues, constLabels[labelName])
|
||||||
}
|
}
|
||||||
|
// Validate the const label values. They can't have a wrong cardinality, so
|
||||||
|
// use in len(labelValues) as expectedNumberOfValues.
|
||||||
|
if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
|
||||||
|
d.err = err
|
||||||
|
return d
|
||||||
|
}
|
||||||
// Now add the variable label names, but prefix them with something that
|
// Now add the variable label names, but prefix them with something that
|
||||||
// cannot be in a regular label name. That prevents matching the label
|
// cannot be in a regular label name. That prevents matching the label
|
||||||
// dimension with a different mix between preset and variable labels.
|
// dimension with a different mix between preset and variable labels.
|
||||||
for _, labelName := range variableLabels {
|
for _, labelName := range variableLabels {
|
||||||
if !checkLabelName(labelName) {
|
if !checkLabelName(labelName) {
|
||||||
d.err = fmt.Errorf("%q is not a valid label name", labelName)
|
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
labelNames = append(labelNames, "$"+labelName)
|
labelNames = append(labelNames, "$"+labelName)
|
||||||
|
@ -142,6 +125,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
||||||
d.err = errors.New("duplicate label names")
|
d.err = errors.New("duplicate label names")
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
vh := hashNew()
|
vh := hashNew()
|
||||||
for _, val := range labelValues {
|
for _, val := range labelValues {
|
||||||
vh = hashAdd(vh, val)
|
vh = hashAdd(vh, val)
|
||||||
|
@ -168,7 +152,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
||||||
Value: proto.String(v),
|
Value: proto.String(v),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
sort.Sort(LabelPairSorter(d.constLabelPairs))
|
sort.Sort(labelPairSorter(d.constLabelPairs))
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,8 +182,3 @@ func (d *Desc) String() string {
|
||||||
d.variableLabels,
|
d.variableLabels,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkLabelName(l string) bool {
|
|
||||||
return labelNameRE.MatchString(l) &&
|
|
||||||
!strings.HasPrefix(l, reservedLabelPrefix)
|
|
||||||
}
|
|
||||||
|
|
92
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
92
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
|
@ -11,13 +11,15 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package prometheus provides metrics primitives to instrument code for
|
// Package prometheus is the core instrumentation package. It provides metrics
|
||||||
// monitoring. It also offers a registry for metrics. Sub-packages allow to
|
// primitives to instrument code for monitoring. It also offers a registry for
|
||||||
// expose the registered metrics via HTTP (package promhttp) or push them to a
|
// metrics. Sub-packages allow to expose the registered metrics via HTTP
|
||||||
// Pushgateway (package push).
|
// (package promhttp) or push them to a Pushgateway (package push). There is
|
||||||
|
// also a sub-package promauto, which provides metrics constructors with
|
||||||
|
// automatic registration.
|
||||||
//
|
//
|
||||||
// All exported functions and methods are safe to be used concurrently unless
|
// All exported functions and methods are safe to be used concurrently unless
|
||||||
//specified otherwise.
|
// specified otherwise.
|
||||||
//
|
//
|
||||||
// A Basic Example
|
// A Basic Example
|
||||||
//
|
//
|
||||||
|
@ -26,6 +28,7 @@
|
||||||
// package main
|
// package main
|
||||||
//
|
//
|
||||||
// import (
|
// import (
|
||||||
|
// "log"
|
||||||
// "net/http"
|
// "net/http"
|
||||||
//
|
//
|
||||||
// "github.com/prometheus/client_golang/prometheus"
|
// "github.com/prometheus/client_golang/prometheus"
|
||||||
|
@ -59,7 +62,7 @@
|
||||||
// // The Handler function provides a default handler to expose metrics
|
// // The Handler function provides a default handler to expose metrics
|
||||||
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
||||||
// http.Handle("/metrics", promhttp.Handler())
|
// http.Handle("/metrics", promhttp.Handler())
|
||||||
// http.ListenAndServe(":8080", nil)
|
// log.Fatal(http.ListenAndServe(":8080", nil))
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
//
|
//
|
||||||
|
@ -69,9 +72,12 @@
|
||||||
// Metrics
|
// Metrics
|
||||||
//
|
//
|
||||||
// The number of exported identifiers in this package might appear a bit
|
// The number of exported identifiers in this package might appear a bit
|
||||||
// overwhelming. Hovever, in addition to the basic plumbing shown in the example
|
// overwhelming. However, in addition to the basic plumbing shown in the example
|
||||||
// above, you only need to understand the different metric types and their
|
// above, you only need to understand the different metric types and their
|
||||||
// vector versions for basic usage.
|
// vector versions for basic usage. Furthermore, if you are not concerned with
|
||||||
|
// fine-grained control of when and how to register metrics with the registry,
|
||||||
|
// have a look at the promauto package, which will effectively allow you to
|
||||||
|
// ignore registration altogether in simple cases.
|
||||||
//
|
//
|
||||||
// Above, you have already touched the Counter and the Gauge. There are two more
|
// Above, you have already touched the Counter and the Gauge. There are two more
|
||||||
// advanced metric types: the Summary and Histogram. A more thorough description
|
// advanced metric types: the Summary and Histogram. A more thorough description
|
||||||
|
@ -95,8 +101,8 @@
|
||||||
// SummaryVec, HistogramVec, and UntypedVec are not.
|
// SummaryVec, HistogramVec, and UntypedVec are not.
|
||||||
//
|
//
|
||||||
// To create instances of Metrics and their vector versions, you need a suitable
|
// To create instances of Metrics and their vector versions, you need a suitable
|
||||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
|
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
|
||||||
// HistogramOpts, or UntypedOpts.
|
// UntypedOpts.
|
||||||
//
|
//
|
||||||
// Custom Collectors and constant Metrics
|
// Custom Collectors and constant Metrics
|
||||||
//
|
//
|
||||||
|
@ -114,8 +120,18 @@
|
||||||
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
||||||
// NewConstSummary (and their respective Must… versions). That will happen in
|
// NewConstSummary (and their respective Must… versions). That will happen in
|
||||||
// the Collect method. The Describe method has to return separate Desc
|
// the Collect method. The Describe method has to return separate Desc
|
||||||
// instances, representative of the “throw-away” metrics to be created
|
// instances, representative of the “throw-away” metrics to be created later.
|
||||||
// later. NewDesc comes in handy to create those Desc instances.
|
// NewDesc comes in handy to create those Desc instances. Alternatively, you
|
||||||
|
// could return no Desc at all, which will marke the Collector “unchecked”. No
|
||||||
|
// checks are porformed at registration time, but metric consistency will still
|
||||||
|
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
||||||
|
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
||||||
|
// metrics that lead to inconsistencies in the total scrape result lies with the
|
||||||
|
// implementer of the Collector. While this is not a desirable state, it is
|
||||||
|
// sometimes necessary. The typical use case is a situatios where the exact
|
||||||
|
// metrics to be returned by a Collector cannot be predicted at registration
|
||||||
|
// time, but the implementer has sufficient knowledge of the whole system to
|
||||||
|
// guarantee metric consistency.
|
||||||
//
|
//
|
||||||
// The Collector example illustrates the use case. You can also look at the
|
// The Collector example illustrates the use case. You can also look at the
|
||||||
// source code of the processCollector (mirroring process metrics), the
|
// source code of the processCollector (mirroring process metrics), the
|
||||||
|
@ -129,34 +145,34 @@
|
||||||
// Advanced Uses of the Registry
|
// Advanced Uses of the Registry
|
||||||
//
|
//
|
||||||
// While MustRegister is the by far most common way of registering a Collector,
|
// While MustRegister is the by far most common way of registering a Collector,
|
||||||
// sometimes you might want to handle the errors the registration might
|
// sometimes you might want to handle the errors the registration might cause.
|
||||||
// cause. As suggested by the name, MustRegister panics if an error occurs. With
|
// As suggested by the name, MustRegister panics if an error occurs. With the
|
||||||
// the Register function, the error is returned and can be handled.
|
// Register function, the error is returned and can be handled.
|
||||||
//
|
//
|
||||||
// An error is returned if the registered Collector is incompatible or
|
// An error is returned if the registered Collector is incompatible or
|
||||||
// inconsistent with already registered metrics. The registry aims for
|
// inconsistent with already registered metrics. The registry aims for
|
||||||
// consistency of the collected metrics according to the Prometheus data
|
// consistency of the collected metrics according to the Prometheus data model.
|
||||||
// model. Inconsistencies are ideally detected at registration time, not at
|
// Inconsistencies are ideally detected at registration time, not at collect
|
||||||
// collect time. The former will usually be detected at start-up time of a
|
// time. The former will usually be detected at start-up time of a program,
|
||||||
// program, while the latter will only happen at scrape time, possibly not even
|
// while the latter will only happen at scrape time, possibly not even on the
|
||||||
// on the first scrape if the inconsistency only becomes relevant later. That is
|
// first scrape if the inconsistency only becomes relevant later. That is the
|
||||||
// the main reason why a Collector and a Metric have to describe themselves to
|
// main reason why a Collector and a Metric have to describe themselves to the
|
||||||
// the registry.
|
// registry.
|
||||||
//
|
//
|
||||||
// So far, everything we did operated on the so-called default registry, as it
|
// So far, everything we did operated on the so-called default registry, as it
|
||||||
// can be found in the global DefaultRegistry variable. With NewRegistry, you
|
// can be found in the global DefaultRegisterer variable. With NewRegistry, you
|
||||||
// can create a custom registry, or you can even implement the Registerer or
|
// can create a custom registry, or you can even implement the Registerer or
|
||||||
// Gatherer interfaces yourself. The methods Register and Unregister work in
|
// Gatherer interfaces yourself. The methods Register and Unregister work in the
|
||||||
// the same way on a custom registry as the global functions Register and
|
// same way on a custom registry as the global functions Register and Unregister
|
||||||
// Unregister on the default registry.
|
// on the default registry.
|
||||||
//
|
//
|
||||||
// There are a number of uses for custom registries: You can use registries
|
// There are a number of uses for custom registries: You can use registries with
|
||||||
// with special properties, see NewPedanticRegistry. You can avoid global state,
|
// special properties, see NewPedanticRegistry. You can avoid global state, as
|
||||||
// as it is imposed by the DefaultRegistry. You can use multiple registries at
|
// it is imposed by the DefaultRegisterer. You can use multiple registries at
|
||||||
// the same time to expose different metrics in different ways. You can use
|
// the same time to expose different metrics in different ways. You can use
|
||||||
// separate registries for testing purposes.
|
// separate registries for testing purposes.
|
||||||
//
|
//
|
||||||
// Also note that the DefaultRegistry comes registered with a Collector for Go
|
// Also note that the DefaultRegisterer comes registered with a Collector for Go
|
||||||
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
||||||
// NewProcessCollector). With a custom registry, you are in control and decide
|
// NewProcessCollector). With a custom registry, you are in control and decide
|
||||||
// yourself about the Collectors to register.
|
// yourself about the Collectors to register.
|
||||||
|
@ -166,16 +182,20 @@
|
||||||
// The Registry implements the Gatherer interface. The caller of the Gather
|
// The Registry implements the Gatherer interface. The caller of the Gather
|
||||||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||||
// above. The tools to expose metrics via HTTP are in the promhttp
|
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
|
||||||
// sub-package. (The top-level functions in the prometheus package are
|
// (The top-level functions in the prometheus package are deprecated.)
|
||||||
// deprecated.)
|
|
||||||
//
|
//
|
||||||
// Pushing to the Pushgateway
|
// Pushing to the Pushgateway
|
||||||
//
|
//
|
||||||
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
||||||
//
|
//
|
||||||
|
// Graphite Bridge
|
||||||
|
//
|
||||||
|
// Functions and examples to push metrics from a Gatherer to Graphite can be
|
||||||
|
// found in the graphite sub-package.
|
||||||
|
//
|
||||||
// Other Means of Exposition
|
// Other Means of Exposition
|
||||||
//
|
//
|
||||||
// More ways of exposing metrics can easily be added. Sending metrics to
|
// More ways of exposing metrics can easily be added by following the approaches
|
||||||
// Graphite would be an example that will soon be implemented.
|
// of the existing implementations.
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
13
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
|
@ -1,3 +1,16 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||||
|
|
204
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
204
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
|
@ -13,6 +13,14 @@
|
||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
// Gauge is a Metric that represents a single numerical value that can
|
// Gauge is a Metric that represents a single numerical value that can
|
||||||
// arbitrarily go up and down.
|
// arbitrarily go up and down.
|
||||||
//
|
//
|
||||||
|
@ -27,29 +35,95 @@ type Gauge interface {
|
||||||
|
|
||||||
// Set sets the Gauge to an arbitrary value.
|
// Set sets the Gauge to an arbitrary value.
|
||||||
Set(float64)
|
Set(float64)
|
||||||
// Inc increments the Gauge by 1.
|
// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
|
||||||
|
// values.
|
||||||
Inc()
|
Inc()
|
||||||
// Dec decrements the Gauge by 1.
|
// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
|
||||||
|
// values.
|
||||||
Dec()
|
Dec()
|
||||||
// Add adds the given value to the Gauge. (The value can be
|
// Add adds the given value to the Gauge. (The value can be negative,
|
||||||
// negative, resulting in a decrease of the Gauge.)
|
// resulting in a decrease of the Gauge.)
|
||||||
Add(float64)
|
Add(float64)
|
||||||
// Sub subtracts the given value from the Gauge. (The value can be
|
// Sub subtracts the given value from the Gauge. (The value can be
|
||||||
// negative, resulting in an increase of the Gauge.)
|
// negative, resulting in an increase of the Gauge.)
|
||||||
Sub(float64)
|
Sub(float64)
|
||||||
|
|
||||||
|
// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
|
||||||
|
SetToCurrentTime()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GaugeOpts is an alias for Opts. See there for doc comments.
|
// GaugeOpts is an alias for Opts. See there for doc comments.
|
||||||
type GaugeOpts Opts
|
type GaugeOpts Opts
|
||||||
|
|
||||||
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
||||||
|
//
|
||||||
|
// The returned implementation is optimized for a fast Set method. If you have a
|
||||||
|
// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
|
||||||
|
// the former. For example, the Inc method of the returned Gauge is slower than
|
||||||
|
// the Inc method of a Counter returned by NewCounter. This matches the typical
|
||||||
|
// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
|
||||||
|
// the latter Inc-heavy.
|
||||||
func NewGauge(opts GaugeOpts) Gauge {
|
func NewGauge(opts GaugeOpts) Gauge {
|
||||||
return newValue(NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
nil,
|
nil,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
), GaugeValue, 0)
|
)
|
||||||
|
result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
|
||||||
|
result.init(result) // Init self-collection.
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
type gauge struct {
|
||||||
|
// valBits contains the bits of the represented float64 value. It has
|
||||||
|
// to go first in the struct to guarantee alignment for atomic
|
||||||
|
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
valBits uint64
|
||||||
|
|
||||||
|
selfCollector
|
||||||
|
|
||||||
|
desc *Desc
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Desc() *Desc {
|
||||||
|
return g.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Set(val float64) {
|
||||||
|
atomic.StoreUint64(&g.valBits, math.Float64bits(val))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) SetToCurrentTime() {
|
||||||
|
g.Set(float64(time.Now().UnixNano()) / 1e9)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Inc() {
|
||||||
|
g.Add(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Dec() {
|
||||||
|
g.Add(-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Add(val float64) {
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&g.valBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
||||||
|
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Sub(val float64) {
|
||||||
|
g.Add(val * -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Write(out *dto.Metric) error {
|
||||||
|
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
|
||||||
|
return populateMetric(GaugeValue, val, g.labelPairs, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
||||||
|
@ -58,12 +132,11 @@ func NewGauge(opts GaugeOpts) Gauge {
|
||||||
// (e.g. number of operations queued, partitioned by user and operation
|
// (e.g. number of operations queued, partitioned by user and operation
|
||||||
// type). Create instances with NewGaugeVec.
|
// type). Create instances with NewGaugeVec.
|
||||||
type GaugeVec struct {
|
type GaugeVec struct {
|
||||||
*MetricVec
|
*metricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||||
// partitioned by the given label names. At least one label name must be
|
// partitioned by the given label names.
|
||||||
// provided.
|
|
||||||
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||||
desc := NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
@ -72,28 +145,62 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &GaugeVec{
|
return &GaugeVec{
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
return newValue(desc, GaugeValue, 0, lvs...)
|
if len(lvs) != len(desc.variableLabels) {
|
||||||
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
||||||
|
}
|
||||||
|
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
|
||||||
|
result.init(result) // Init self-collection.
|
||||||
|
return result
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
// GetMetricWithLabelValues returns the Gauge for the given slice of label
|
||||||
// MetricVec. The difference is that this method returns a Gauge and not a
|
// values (same order as the VariableLabels in Desc). If that combination of
|
||||||
// Metric so that no type conversion is required.
|
// label values is accessed for the first time, a new Gauge is created.
|
||||||
func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
|
//
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
// It is possible to call this method without using the returned Gauge to only
|
||||||
|
// create the new Gauge but leave it at its starting value 0. See also the
|
||||||
|
// SummaryVec example.
|
||||||
|
//
|
||||||
|
// Keeping the Gauge for later use is possible (and should be considered if
|
||||||
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||||
|
// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
|
||||||
|
// Gauge will still exist, but it will not be exported anymore, even if a
|
||||||
|
// Gauge with the same label values is created later. See also the CounterVec
|
||||||
|
// example.
|
||||||
|
//
|
||||||
|
// An error is returned if the number of label values is not the same as the
|
||||||
|
// number of VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||||
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Gauge), err
|
return metric.(Gauge), err
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
// GetMetricWith returns the Gauge for the given Labels map (the label names
|
||||||
// difference is that this method returns a Gauge and not a Metric so that no
|
// must match those of the VariableLabels in Desc). If that label map is
|
||||||
// type conversion is required.
|
// accessed for the first time, a new Gauge is created. Implications of
|
||||||
func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
// creating a Gauge without using it and keeping the Gauge for later use are
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
// the same as for GetMetricWithLabelValues.
|
||||||
|
//
|
||||||
|
// An error is returned if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as
|
||||||
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||||
|
// methods.
|
||||||
|
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWith(labels)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Gauge), err
|
return metric.(Gauge), err
|
||||||
}
|
}
|
||||||
|
@ -101,18 +208,57 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
// error, WithLabelValues allows shortcuts like
|
// error allows shortcuts like
|
||||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||||
func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Gauge)
|
g, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return g
|
||||||
}
|
}
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||||
func (m *GaugeVec) With(labels Labels) Gauge {
|
func (v *GaugeVec) With(labels Labels) Gauge {
|
||||||
return m.MetricVec.With(labels).(Gauge)
|
g, err := v.GetMetricWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||||
|
// returned vector has those labels pre-set for all labeled operations performed
|
||||||
|
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||||
|
// order of the remaining labels stays the same (just with the curried labels
|
||||||
|
// taken out of the sequence – which is relevant for the
|
||||||
|
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||||
|
// vector, but only with labels not yet used for currying before.
|
||||||
|
//
|
||||||
|
// The metrics contained in the GaugeVec are shared between the curried and
|
||||||
|
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||||
|
// vectors behave identically in terms of collection. Only one must be
|
||||||
|
// registered with a given registry (usually the uncurried version). The Reset
|
||||||
|
// method deletes all metrics, even if called on a curried vector.
|
||||||
|
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
|
||||||
|
vec, err := v.curryWith(labels)
|
||||||
|
if vec != nil {
|
||||||
|
return &GaugeVec{vec}, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||||
|
// returned an error.
|
||||||
|
func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
|
||||||
|
vec, err := v.CurryWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return vec
|
||||||
}
|
}
|
||||||
|
|
||||||
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
|
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
|
||||||
|
|
74
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
74
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
|
@ -1,3 +1,16 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -8,26 +21,39 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type goCollector struct {
|
type goCollector struct {
|
||||||
goroutines Gauge
|
goroutinesDesc *Desc
|
||||||
gcDesc *Desc
|
threadsDesc *Desc
|
||||||
|
gcDesc *Desc
|
||||||
|
goInfoDesc *Desc
|
||||||
|
|
||||||
// metrics to describe and collect
|
// metrics to describe and collect
|
||||||
metrics memStatsMetrics
|
metrics memStatsMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGoCollector returns a collector which exports metrics about the current
|
// NewGoCollector returns a collector which exports metrics about the current Go
|
||||||
// go process.
|
// process. This includes memory stats. To collect those, runtime.ReadMemStats
|
||||||
|
// is called. This causes a stop-the-world, which is very short with Go1.9+
|
||||||
|
// (~25µs). However, with older Go versions, the stop-the-world duration depends
|
||||||
|
// on the heap size and can be quite significant (~1.7 ms/GiB as per
|
||||||
|
// https://go-review.googlesource.com/c/go/+/34937).
|
||||||
func NewGoCollector() Collector {
|
func NewGoCollector() Collector {
|
||||||
return &goCollector{
|
return &goCollector{
|
||||||
goroutines: NewGauge(GaugeOpts{
|
goroutinesDesc: NewDesc(
|
||||||
Namespace: "go",
|
"go_goroutines",
|
||||||
Name: "goroutines",
|
"Number of goroutines that currently exist.",
|
||||||
Help: "Number of goroutines that currently exist.",
|
nil, nil),
|
||||||
}),
|
threadsDesc: NewDesc(
|
||||||
|
"go_threads",
|
||||||
|
"Number of OS threads created.",
|
||||||
|
nil, nil),
|
||||||
gcDesc: NewDesc(
|
gcDesc: NewDesc(
|
||||||
"go_gc_duration_seconds",
|
"go_gc_duration_seconds",
|
||||||
"A summary of the GC invocation durations.",
|
"A summary of the GC invocation durations.",
|
||||||
nil, nil),
|
nil, nil),
|
||||||
|
goInfoDesc: NewDesc(
|
||||||
|
"go_info",
|
||||||
|
"Information about the Go environment.",
|
||||||
|
nil, Labels{"version": runtime.Version()}),
|
||||||
metrics: memStatsMetrics{
|
metrics: memStatsMetrics{
|
||||||
{
|
{
|
||||||
desc: NewDesc(
|
desc: NewDesc(
|
||||||
|
@ -48,7 +74,7 @@ func NewGoCollector() Collector {
|
||||||
}, {
|
}, {
|
||||||
desc: NewDesc(
|
desc: NewDesc(
|
||||||
memstatNamespace("sys_bytes"),
|
memstatNamespace("sys_bytes"),
|
||||||
"Number of bytes obtained by system. Sum of all system allocations.",
|
"Number of bytes obtained from system.",
|
||||||
nil, nil,
|
nil, nil,
|
||||||
),
|
),
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
|
||||||
|
@ -111,12 +137,12 @@ func NewGoCollector() Collector {
|
||||||
valType: GaugeValue,
|
valType: GaugeValue,
|
||||||
}, {
|
}, {
|
||||||
desc: NewDesc(
|
desc: NewDesc(
|
||||||
memstatNamespace("heap_released_bytes_total"),
|
memstatNamespace("heap_released_bytes"),
|
||||||
"Total number of heap bytes released to OS.",
|
"Number of heap bytes released to OS.",
|
||||||
nil, nil,
|
nil, nil,
|
||||||
),
|
),
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
|
||||||
valType: CounterValue,
|
valType: GaugeValue,
|
||||||
}, {
|
}, {
|
||||||
desc: NewDesc(
|
desc: NewDesc(
|
||||||
memstatNamespace("heap_objects"),
|
memstatNamespace("heap_objects"),
|
||||||
|
@ -213,6 +239,14 @@ func NewGoCollector() Collector {
|
||||||
),
|
),
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
|
||||||
valType: GaugeValue,
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("gc_cpu_fraction"),
|
||||||
|
"The fraction of this program's available CPU time used by the GC since the program started.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
|
||||||
|
valType: GaugeValue,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -224,9 +258,10 @@ func memstatNamespace(s string) string {
|
||||||
|
|
||||||
// Describe returns all descriptions of the collector.
|
// Describe returns all descriptions of the collector.
|
||||||
func (c *goCollector) Describe(ch chan<- *Desc) {
|
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||||
ch <- c.goroutines.Desc()
|
ch <- c.goroutinesDesc
|
||||||
|
ch <- c.threadsDesc
|
||||||
ch <- c.gcDesc
|
ch <- c.gcDesc
|
||||||
|
ch <- c.goInfoDesc
|
||||||
for _, i := range c.metrics {
|
for _, i := range c.metrics {
|
||||||
ch <- i.desc
|
ch <- i.desc
|
||||||
}
|
}
|
||||||
|
@ -234,8 +269,9 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||||
|
|
||||||
// Collect returns the current state of all metrics of the collector.
|
// Collect returns the current state of all metrics of the collector.
|
||||||
func (c *goCollector) Collect(ch chan<- Metric) {
|
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||||
c.goroutines.Set(float64(runtime.NumGoroutine()))
|
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
||||||
ch <- c.goroutines
|
n, _ := runtime.ThreadCreateProfile(nil)
|
||||||
|
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
|
||||||
|
|
||||||
var stats debug.GCStats
|
var stats debug.GCStats
|
||||||
stats.PauseQuantiles = make([]time.Duration, 5)
|
stats.PauseQuantiles = make([]time.Duration, 5)
|
||||||
|
@ -246,7 +282,9 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
||||||
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
|
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
|
||||||
}
|
}
|
||||||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
||||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
|
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
|
||||||
|
|
||||||
|
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
||||||
|
|
||||||
ms := &runtime.MemStats{}
|
ms := &runtime.MemStats{}
|
||||||
runtime.ReadMemStats(ms)
|
runtime.ReadMemStats(ms)
|
||||||
|
|
304
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
304
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
|
@ -16,7 +16,9 @@ package prometheus
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// HistogramOpts bundles the options for creating a Histogram metric. It is
|
// HistogramOpts bundles the options for creating a Histogram metric. It is
|
||||||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
// mandatory to set Name to a non-empty string. All other fields are optional
|
||||||
// optional and can safely be left at their zero value.
|
// and can safely be left at their zero value, although it is strongly
|
||||||
|
// encouraged to set a Help string.
|
||||||
type HistogramOpts struct {
|
type HistogramOpts struct {
|
||||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||||
// name of the Histogram (created by joining these components with
|
// name of the Histogram (created by joining these components with
|
||||||
|
@ -120,29 +123,22 @@ type HistogramOpts struct {
|
||||||
Subsystem string
|
Subsystem string
|
||||||
Name string
|
Name string
|
||||||
|
|
||||||
// Help provides information about this Histogram. Mandatory!
|
// Help provides information about this Histogram.
|
||||||
//
|
//
|
||||||
// Metrics with the same fully-qualified name must have the same Help
|
// Metrics with the same fully-qualified name must have the same Help
|
||||||
// string.
|
// string.
|
||||||
Help string
|
Help string
|
||||||
|
|
||||||
// ConstLabels are used to attach fixed labels to this
|
// ConstLabels are used to attach fixed labels to this metric. Metrics
|
||||||
// Histogram. Histograms with the same fully-qualified name must have the
|
// with the same fully-qualified name must have the same label names in
|
||||||
// same label names in their ConstLabels.
|
// their ConstLabels.
|
||||||
//
|
//
|
||||||
// Note that in most cases, labels have a value that varies during the
|
// ConstLabels are only used rarely. In particular, do not use them to
|
||||||
// lifetime of a process. Those labels are usually managed with a
|
// attach the same labels to all your metrics. Those use cases are
|
||||||
// HistogramVec. ConstLabels serve only special purposes. One is for the
|
// better covered by target labels set by the scraping Prometheus
|
||||||
// special case where the value of a label does not change during the
|
// server, or by one specific metric (e.g. a build_info or a
|
||||||
// lifetime of a process, e.g. if the revision of the running binary is
|
// machine_role metric). See also
|
||||||
// put into a label. Another, more advanced purpose is if more than one
|
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
|
||||||
// Collector needs to collect Histograms with the same fully-qualified
|
|
||||||
// name. In that case, those Summaries must differ in the values of
|
|
||||||
// their ConstLabels. See the Collector examples.
|
|
||||||
//
|
|
||||||
// If the value of a label never changes (not even between binaries),
|
|
||||||
// that label most likely should not be a label at all (but part of the
|
|
||||||
// metric name).
|
|
||||||
ConstLabels Labels
|
ConstLabels Labels
|
||||||
|
|
||||||
// Buckets defines the buckets into which observations are counted. Each
|
// Buckets defines the buckets into which observations are counted. Each
|
||||||
|
@ -169,7 +165,7 @@ func NewHistogram(opts HistogramOpts) Histogram {
|
||||||
|
|
||||||
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
|
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if len(desc.variableLabels) != len(labelValues) {
|
||||||
panic(errInconsistentCardinality)
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range desc.variableLabels {
|
for _, n := range desc.variableLabels {
|
||||||
|
@ -191,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
||||||
desc: desc,
|
desc: desc,
|
||||||
upperBounds: opts.Buckets,
|
upperBounds: opts.Buckets,
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
labelPairs: makeLabelPairs(desc, labelValues),
|
||||||
|
counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
|
||||||
}
|
}
|
||||||
for i, upperBound := range h.upperBounds {
|
for i, upperBound := range h.upperBounds {
|
||||||
if i < len(h.upperBounds)-1 {
|
if i < len(h.upperBounds)-1 {
|
||||||
|
@ -207,28 +204,53 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Finally we know the final length of h.upperBounds and can make counts.
|
// Finally we know the final length of h.upperBounds and can make counts
|
||||||
h.counts = make([]uint64, len(h.upperBounds))
|
// for both states:
|
||||||
|
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
|
||||||
|
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
|
||||||
|
|
||||||
h.init(h) // Init self-collection.
|
h.init(h) // Init self-collection.
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
type histogram struct {
|
type histogramCounts struct {
|
||||||
// sumBits contains the bits of the float64 representing the sum of all
|
// sumBits contains the bits of the float64 representing the sum of all
|
||||||
// observations. sumBits and count have to go first in the struct to
|
// observations. sumBits and count have to go first in the struct to
|
||||||
// guarantee alignment for atomic operations.
|
// guarantee alignment for atomic operations.
|
||||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
sumBits uint64
|
sumBits uint64
|
||||||
count uint64
|
count uint64
|
||||||
|
buckets []uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type histogram struct {
|
||||||
|
// countAndHotIdx is a complicated one. For lock-free yet atomic
|
||||||
|
// observations, we need to save the total count of observations again,
|
||||||
|
// combined with the index of the currently-hot counts struct, so that
|
||||||
|
// we can perform the operation on both values atomically. The least
|
||||||
|
// significant bit defines the hot counts struct. The remaining 63 bits
|
||||||
|
// represent the total count of observations. This happens under the
|
||||||
|
// assumption that the 63bit count will never overflow. Rationale: An
|
||||||
|
// observations takes about 30ns. Let's assume it could happen in
|
||||||
|
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
|
||||||
|
// which is about 3000 years.
|
||||||
|
//
|
||||||
|
// This has to be first in the struct for 64bit alignment. See
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
countAndHotIdx uint64
|
||||||
|
|
||||||
selfCollector
|
selfCollector
|
||||||
// Note that there is no mutex required.
|
desc *Desc
|
||||||
|
writeMtx sync.Mutex // Only used in the Write method.
|
||||||
desc *Desc
|
|
||||||
|
|
||||||
upperBounds []float64
|
upperBounds []float64
|
||||||
counts []uint64
|
|
||||||
|
// Two counts, one is "hot" for lock-free observations, the other is
|
||||||
|
// "cold" for writing out a dto.Metric. It has to be an array of
|
||||||
|
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||||
|
counts [2]*histogramCounts
|
||||||
|
hotIdx int // Index of currently-hot counts. Only used within Write.
|
||||||
|
|
||||||
labelPairs []*dto.LabelPair
|
labelPairs []*dto.LabelPair
|
||||||
}
|
}
|
||||||
|
@ -248,36 +270,113 @@ func (h *histogram) Observe(v float64) {
|
||||||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
||||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||||
i := sort.SearchFloat64s(h.upperBounds, v)
|
i := sort.SearchFloat64s(h.upperBounds, v)
|
||||||
if i < len(h.counts) {
|
|
||||||
atomic.AddUint64(&h.counts[i], 1)
|
// We increment h.countAndHotIdx by 2 so that the counter in the upper
|
||||||
|
// 63 bits gets incremented by 1. At the same time, we get the new value
|
||||||
|
// back, which we can use to find the currently-hot counts.
|
||||||
|
n := atomic.AddUint64(&h.countAndHotIdx, 2)
|
||||||
|
hotCounts := h.counts[n%2]
|
||||||
|
|
||||||
|
if i < len(h.upperBounds) {
|
||||||
|
atomic.AddUint64(&hotCounts.buckets[i], 1)
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&h.count, 1)
|
|
||||||
for {
|
for {
|
||||||
oldBits := atomic.LoadUint64(&h.sumBits)
|
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||||
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
|
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Increment count last as we take it as a signal that the observation
|
||||||
|
// is complete.
|
||||||
|
atomic.AddUint64(&hotCounts.count, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *histogram) Write(out *dto.Metric) error {
|
func (h *histogram) Write(out *dto.Metric) error {
|
||||||
his := &dto.Histogram{}
|
var (
|
||||||
buckets := make([]*dto.Bucket, len(h.upperBounds))
|
his = &dto.Histogram{}
|
||||||
|
buckets = make([]*dto.Bucket, len(h.upperBounds))
|
||||||
|
hotCounts, coldCounts *histogramCounts
|
||||||
|
count uint64
|
||||||
|
)
|
||||||
|
|
||||||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
|
// For simplicity, we mutex the rest of this method. It is not in the
|
||||||
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
|
// hot path, i.e. Observe is called much more often than Write. The
|
||||||
var count uint64
|
// complication of making Write lock-free isn't worth it.
|
||||||
|
h.writeMtx.Lock()
|
||||||
|
defer h.writeMtx.Unlock()
|
||||||
|
|
||||||
|
// This is a bit arcane, which is why the following spells out this if
|
||||||
|
// clause in English:
|
||||||
|
//
|
||||||
|
// If the currently-hot counts struct is #0, we atomically increment
|
||||||
|
// h.countAndHotIdx by 1 so that from now on Observe will use the counts
|
||||||
|
// struct #1. Furthermore, the atomic increment gives us the new value,
|
||||||
|
// which, in its most significant 63 bits, tells us the count of
|
||||||
|
// observations done so far up to and including currently ongoing
|
||||||
|
// observations still using the counts struct just changed from hot to
|
||||||
|
// cold. To have a normal uint64 for the count, we bitshift by 1 and
|
||||||
|
// save the result in count. We also set h.hotIdx to 1 for the next
|
||||||
|
// Write call, and we will refer to counts #1 as hotCounts and to counts
|
||||||
|
// #0 as coldCounts.
|
||||||
|
//
|
||||||
|
// If the currently-hot counts struct is #1, we do the corresponding
|
||||||
|
// things the other way round. We have to _decrement_ h.countAndHotIdx
|
||||||
|
// (which is a bit arcane in itself, as we have to express -1 with an
|
||||||
|
// unsigned int...).
|
||||||
|
if h.hotIdx == 0 {
|
||||||
|
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
|
||||||
|
h.hotIdx = 1
|
||||||
|
hotCounts = h.counts[1]
|
||||||
|
coldCounts = h.counts[0]
|
||||||
|
} else {
|
||||||
|
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
|
||||||
|
h.hotIdx = 0
|
||||||
|
hotCounts = h.counts[0]
|
||||||
|
coldCounts = h.counts[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we have to wait for the now-declared-cold counts to actually cool
|
||||||
|
// down, i.e. wait for all observations still using it to finish. That's
|
||||||
|
// the case once the count in the cold counts struct is the same as the
|
||||||
|
// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
|
||||||
|
for {
|
||||||
|
if count == atomic.LoadUint64(&coldCounts.count) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
runtime.Gosched() // Let observations get work done.
|
||||||
|
}
|
||||||
|
|
||||||
|
his.SampleCount = proto.Uint64(count)
|
||||||
|
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
|
||||||
|
var cumCount uint64
|
||||||
for i, upperBound := range h.upperBounds {
|
for i, upperBound := range h.upperBounds {
|
||||||
count += atomic.LoadUint64(&h.counts[i])
|
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
|
||||||
buckets[i] = &dto.Bucket{
|
buckets[i] = &dto.Bucket{
|
||||||
CumulativeCount: proto.Uint64(count),
|
CumulativeCount: proto.Uint64(cumCount),
|
||||||
UpperBound: proto.Float64(upperBound),
|
UpperBound: proto.Float64(upperBound),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
his.Bucket = buckets
|
his.Bucket = buckets
|
||||||
out.Histogram = his
|
out.Histogram = his
|
||||||
out.Label = h.labelPairs
|
out.Label = h.labelPairs
|
||||||
|
|
||||||
|
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||||
|
atomic.AddUint64(&hotCounts.count, count)
|
||||||
|
atomic.StoreUint64(&coldCounts.count, 0)
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
|
||||||
|
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||||
|
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := range h.upperBounds {
|
||||||
|
atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
|
||||||
|
atomic.StoreUint64(&coldCounts.buckets[i], 0)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -287,12 +386,11 @@ func (h *histogram) Write(out *dto.Metric) error {
|
||||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||||
// instances with NewHistogramVec.
|
// instances with NewHistogramVec.
|
||||||
type HistogramVec struct {
|
type HistogramVec struct {
|
||||||
*MetricVec
|
*metricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
||||||
// partitioned by the given label names. At least one label name must be
|
// partitioned by the given label names.
|
||||||
// provided.
|
|
||||||
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
||||||
desc := NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
@ -301,47 +399,116 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &HistogramVec{
|
return &HistogramVec{
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
return newHistogram(desc, opts, lvs...)
|
return newHistogram(desc, opts, lvs...)
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
// GetMetricWithLabelValues returns the Histogram for the given slice of label
|
||||||
// MetricVec. The difference is that this method returns a Histogram and not a
|
// values (same order as the VariableLabels in Desc). If that combination of
|
||||||
// Metric so that no type conversion is required.
|
// label values is accessed for the first time, a new Histogram is created.
|
||||||
func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
|
//
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
// It is possible to call this method without using the returned Histogram to only
|
||||||
|
// create the new Histogram but leave it at its starting value, a Histogram without
|
||||||
|
// any observations.
|
||||||
|
//
|
||||||
|
// Keeping the Histogram for later use is possible (and should be considered if
|
||||||
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||||
|
// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
|
||||||
|
// Histogram will still exist, but it will not be exported anymore, even if a
|
||||||
|
// Histogram with the same label values is created later. See also the CounterVec
|
||||||
|
// example.
|
||||||
|
//
|
||||||
|
// An error is returned if the number of label values is not the same as the
|
||||||
|
// number of VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||||
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
// See also the GaugeVec example.
|
||||||
|
func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Histogram), err
|
return metric.(Observer), err
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
// GetMetricWith returns the Histogram for the given Labels map (the label names
|
||||||
// difference is that this method returns a Histogram and not a Metric so that no
|
// must match those of the VariableLabels in Desc). If that label map is
|
||||||
// type conversion is required.
|
// accessed for the first time, a new Histogram is created. Implications of
|
||||||
func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
|
// creating a Histogram without using it and keeping the Histogram for later use
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
// are the same as for GetMetricWithLabelValues.
|
||||||
|
//
|
||||||
|
// An error is returned if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as
|
||||||
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||||
|
// methods.
|
||||||
|
func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWith(labels)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Histogram), err
|
return metric.(Observer), err
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
// error, WithLabelValues allows shortcuts like
|
// error allows shortcuts like
|
||||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||||
func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
|
func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Histogram)
|
h, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
// With works as GetMetricWith but panics where GetMetricWithLabels would have
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||||
func (m *HistogramVec) With(labels Labels) Histogram {
|
func (v *HistogramVec) With(labels Labels) Observer {
|
||||||
return m.MetricVec.With(labels).(Histogram)
|
h, err := v.GetMetricWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||||
|
// returned vector has those labels pre-set for all labeled operations performed
|
||||||
|
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||||
|
// order of the remaining labels stays the same (just with the curried labels
|
||||||
|
// taken out of the sequence – which is relevant for the
|
||||||
|
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||||
|
// vector, but only with labels not yet used for currying before.
|
||||||
|
//
|
||||||
|
// The metrics contained in the HistogramVec are shared between the curried and
|
||||||
|
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||||
|
// vectors behave identically in terms of collection. Only one must be
|
||||||
|
// registered with a given registry (usually the uncurried version). The Reset
|
||||||
|
// method deletes all metrics, even if called on a curried vector.
|
||||||
|
func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
|
||||||
|
vec, err := v.curryWith(labels)
|
||||||
|
if vec != nil {
|
||||||
|
return &HistogramVec{vec}, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||||
|
// returned an error.
|
||||||
|
func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
|
||||||
|
vec, err := v.CurryWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return vec
|
||||||
}
|
}
|
||||||
|
|
||||||
type constHistogram struct {
|
type constHistogram struct {
|
||||||
|
@ -393,7 +560,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
|
||||||
// bucket.
|
// bucket.
|
||||||
//
|
//
|
||||||
// NewConstHistogram returns an error if the length of labelValues is not
|
// NewConstHistogram returns an error if the length of labelValues is not
|
||||||
// consistent with the variable labels in Desc.
|
// consistent with the variable labels in Desc or if Desc is invalid.
|
||||||
func NewConstHistogram(
|
func NewConstHistogram(
|
||||||
desc *Desc,
|
desc *Desc,
|
||||||
count uint64,
|
count uint64,
|
||||||
|
@ -401,8 +568,11 @@ func NewConstHistogram(
|
||||||
buckets map[float64]uint64,
|
buckets map[float64]uint64,
|
||||||
labelValues ...string,
|
labelValues ...string,
|
||||||
) (Metric, error) {
|
) (Metric, error) {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if desc.err != nil {
|
||||||
return nil, errInconsistentCardinality
|
return nil, desc.err
|
||||||
|
}
|
||||||
|
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return &constHistogram{
|
return &constHistogram{
|
||||||
desc: desc,
|
desc: desc,
|
||||||
|
|
252
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
252
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
|
@ -15,9 +15,7 @@ package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -41,19 +39,10 @@ const (
|
||||||
acceptEncodingHeader = "Accept-Encoding"
|
acceptEncodingHeader = "Accept-Encoding"
|
||||||
)
|
)
|
||||||
|
|
||||||
var bufPool sync.Pool
|
var gzipPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
func getBuf() *bytes.Buffer {
|
return gzip.NewWriter(nil)
|
||||||
buf := bufPool.Get()
|
},
|
||||||
if buf == nil {
|
|
||||||
return &bytes.Buffer{}
|
|
||||||
}
|
|
||||||
return buf.(*bytes.Buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func giveBuf(buf *bytes.Buffer) {
|
|
||||||
buf.Reset()
|
|
||||||
bufPool.Put(buf)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
||||||
|
@ -61,68 +50,50 @@ func giveBuf(buf *bytes.Buffer) {
|
||||||
// name).
|
// name).
|
||||||
//
|
//
|
||||||
// Deprecated: Please note the issues described in the doc comment of
|
// Deprecated: Please note the issues described in the doc comment of
|
||||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead
|
// InstrumentHandler. You might want to consider using promhttp.Handler instead.
|
||||||
// (which is non instrumented).
|
|
||||||
func Handler() http.Handler {
|
func Handler() http.Handler {
|
||||||
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
||||||
}
|
}
|
||||||
|
|
||||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
||||||
//
|
//
|
||||||
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
|
// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
|
||||||
|
// instead. See there for further documentation.
|
||||||
func UninstrumentedHandler() http.Handler {
|
func UninstrumentedHandler() http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
||||||
mfs, err := DefaultGatherer.Gather()
|
mfs, err := DefaultGatherer.Gather()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
|
httpError(rsp, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
contentType := expfmt.Negotiate(req.Header)
|
contentType := expfmt.Negotiate(req.Header)
|
||||||
buf := getBuf()
|
header := rsp.Header()
|
||||||
defer giveBuf(buf)
|
header.Set(contentTypeHeader, string(contentType))
|
||||||
writer, encoding := decorateWriter(req, buf)
|
|
||||||
enc := expfmt.NewEncoder(writer, contentType)
|
w := io.Writer(rsp)
|
||||||
var lastErr error
|
if gzipAccepted(req.Header) {
|
||||||
|
header.Set(contentEncodingHeader, "gzip")
|
||||||
|
gz := gzipPool.Get().(*gzip.Writer)
|
||||||
|
defer gzipPool.Put(gz)
|
||||||
|
|
||||||
|
gz.Reset(w)
|
||||||
|
defer gz.Close()
|
||||||
|
|
||||||
|
w = gz
|
||||||
|
}
|
||||||
|
|
||||||
|
enc := expfmt.NewEncoder(w, contentType)
|
||||||
|
|
||||||
for _, mf := range mfs {
|
for _, mf := range mfs {
|
||||||
if err := enc.Encode(mf); err != nil {
|
if err := enc.Encode(mf); err != nil {
|
||||||
lastErr = err
|
httpError(rsp, err)
|
||||||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if closer, ok := writer.(io.Closer); ok {
|
|
||||||
closer.Close()
|
|
||||||
}
|
|
||||||
if lastErr != nil && buf.Len() == 0 {
|
|
||||||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
header := w.Header()
|
|
||||||
header.Set(contentTypeHeader, string(contentType))
|
|
||||||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
|
||||||
if encoding != "" {
|
|
||||||
header.Set(contentEncodingHeader, encoding)
|
|
||||||
}
|
|
||||||
w.Write(buf.Bytes())
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
|
||||||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
|
||||||
// (which is empty if no compression is enabled).
|
|
||||||
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
|
|
||||||
header := request.Header.Get(acceptEncodingHeader)
|
|
||||||
parts := strings.Split(header, ",")
|
|
||||||
for _, part := range parts {
|
|
||||||
part := strings.TrimSpace(part)
|
|
||||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
|
||||||
return gzip.NewWriter(writer), "gzip"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return writer, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
var instLabels = []string{"method", "code"}
|
var instLabels = []string{"method", "code"}
|
||||||
|
|
||||||
type nower interface {
|
type nower interface {
|
||||||
|
@ -139,16 +110,6 @@ var now nower = nowFunc(func() time.Time {
|
||||||
return time.Now()
|
return time.Now()
|
||||||
})
|
})
|
||||||
|
|
||||||
func nowSeries(t ...time.Time) nower {
|
|
||||||
return nowFunc(func() time.Time {
|
|
||||||
defer func() {
|
|
||||||
t = t[1:]
|
|
||||||
}()
|
|
||||||
|
|
||||||
return t[0]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
||||||
// registers four metric collectors (if not already done) and reports HTTP
|
// registers four metric collectors (if not already done) and reports HTTP
|
||||||
// metrics to the (newly or already) registered collectors: http_requests_total
|
// metrics to the (newly or already) registered collectors: http_requests_total
|
||||||
|
@ -158,23 +119,16 @@ func nowSeries(t ...time.Time) nower {
|
||||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
// value. http_requests_total is a metric vector partitioned by HTTP method
|
||||||
// (label name "method") and HTTP status code (label name "code").
|
// (label name "method") and HTTP status code (label name "code").
|
||||||
//
|
//
|
||||||
// Deprecated: InstrumentHandler has several issues:
|
// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
|
||||||
//
|
// package promhttp instead. The issues are the following: (1) It uses Summaries
|
||||||
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
// rather than Histograms. Summaries are not useful if aggregation across
|
||||||
// aggregation across multiple instances is required.
|
// multiple instances is required. (2) It uses microseconds as unit, which is
|
||||||
//
|
// deprecated and should be replaced by seconds. (3) The size of the request is
|
||||||
// - It uses microseconds as unit, which is deprecated and should be replaced by
|
// calculated in a separate goroutine. Since this calculator requires access to
|
||||||
// seconds.
|
// the request header, it creates a race with any writes to the header performed
|
||||||
//
|
// during request handling. httputil.ReverseProxy is a prominent example for a
|
||||||
// - The size of the request is calculated in a separate goroutine. Since this
|
// handler performing such writes. (4) It has additional issues with HTTP/2, cf.
|
||||||
// calculator requires access to the request header, it creates a race with
|
// https://github.com/prometheus/client_golang/issues/272.
|
||||||
// any writes to the header performed during request handling.
|
|
||||||
// httputil.ReverseProxy is a prominent example for a handler
|
|
||||||
// performing such writes.
|
|
||||||
//
|
|
||||||
// Upcoming versions of this package will provide ways of instrumenting HTTP
|
|
||||||
// handlers that are more flexible and have fewer issues. Please prefer direct
|
|
||||||
// instrumentation in the meantime.
|
|
||||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
||||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
||||||
}
|
}
|
||||||
|
@ -184,12 +138,13 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun
|
||||||
// issues).
|
// issues).
|
||||||
//
|
//
|
||||||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
||||||
// InstrumentHandler is.
|
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
||||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||||
return InstrumentHandlerFuncWithOpts(
|
return InstrumentHandlerFuncWithOpts(
|
||||||
SummaryOpts{
|
SummaryOpts{
|
||||||
Subsystem: "http",
|
Subsystem: "http",
|
||||||
ConstLabels: Labels{"handler": handlerName},
|
ConstLabels: Labels{"handler": handlerName},
|
||||||
|
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||||
},
|
},
|
||||||
handlerFunc,
|
handlerFunc,
|
||||||
)
|
)
|
||||||
|
@ -222,7 +177,7 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
|
||||||
// SummaryOpts.
|
// SummaryOpts.
|
||||||
//
|
//
|
||||||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
||||||
// InstrumentHandler is.
|
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
||||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
||||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
||||||
}
|
}
|
||||||
|
@ -233,7 +188,7 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand
|
||||||
// SummaryOpts are used.
|
// SummaryOpts are used.
|
||||||
//
|
//
|
||||||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
||||||
// as InstrumentHandler is.
|
// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
||||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||||
reqCnt := NewCounterVec(
|
reqCnt := NewCounterVec(
|
||||||
CounterOpts{
|
CounterOpts{
|
||||||
|
@ -245,34 +200,52 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
|
||||||
},
|
},
|
||||||
instLabels,
|
instLabels,
|
||||||
)
|
)
|
||||||
|
if err := Register(reqCnt); err != nil {
|
||||||
|
if are, ok := err.(AlreadyRegisteredError); ok {
|
||||||
|
reqCnt = are.ExistingCollector.(*CounterVec)
|
||||||
|
} else {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
opts.Name = "request_duration_microseconds"
|
opts.Name = "request_duration_microseconds"
|
||||||
opts.Help = "The HTTP request latencies in microseconds."
|
opts.Help = "The HTTP request latencies in microseconds."
|
||||||
reqDur := NewSummary(opts)
|
reqDur := NewSummary(opts)
|
||||||
|
if err := Register(reqDur); err != nil {
|
||||||
|
if are, ok := err.(AlreadyRegisteredError); ok {
|
||||||
|
reqDur = are.ExistingCollector.(Summary)
|
||||||
|
} else {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
opts.Name = "request_size_bytes"
|
opts.Name = "request_size_bytes"
|
||||||
opts.Help = "The HTTP request sizes in bytes."
|
opts.Help = "The HTTP request sizes in bytes."
|
||||||
reqSz := NewSummary(opts)
|
reqSz := NewSummary(opts)
|
||||||
|
if err := Register(reqSz); err != nil {
|
||||||
|
if are, ok := err.(AlreadyRegisteredError); ok {
|
||||||
|
reqSz = are.ExistingCollector.(Summary)
|
||||||
|
} else {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
opts.Name = "response_size_bytes"
|
opts.Name = "response_size_bytes"
|
||||||
opts.Help = "The HTTP response sizes in bytes."
|
opts.Help = "The HTTP response sizes in bytes."
|
||||||
resSz := NewSummary(opts)
|
resSz := NewSummary(opts)
|
||||||
|
if err := Register(resSz); err != nil {
|
||||||
regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
|
if are, ok := err.(AlreadyRegisteredError); ok {
|
||||||
regReqDur := MustRegisterOrGet(reqDur).(Summary)
|
resSz = are.ExistingCollector.(Summary)
|
||||||
regReqSz := MustRegisterOrGet(reqSz).(Summary)
|
} else {
|
||||||
regResSz := MustRegisterOrGet(resSz).(Summary)
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
delegate := &responseWriterDelegator{ResponseWriter: w}
|
delegate := &responseWriterDelegator{ResponseWriter: w}
|
||||||
out := make(chan int)
|
out := computeApproximateRequestSize(r)
|
||||||
urlLen := 0
|
|
||||||
if r.URL != nil {
|
|
||||||
urlLen = len(r.URL.String())
|
|
||||||
}
|
|
||||||
go computeApproximateRequestSize(r, out, urlLen)
|
|
||||||
|
|
||||||
_, cn := w.(http.CloseNotifier)
|
_, cn := w.(http.CloseNotifier)
|
||||||
_, fl := w.(http.Flusher)
|
_, fl := w.(http.Flusher)
|
||||||
|
@ -290,39 +263,52 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
|
||||||
|
|
||||||
method := sanitizeMethod(r.Method)
|
method := sanitizeMethod(r.Method)
|
||||||
code := sanitizeCode(delegate.status)
|
code := sanitizeCode(delegate.status)
|
||||||
regReqCnt.WithLabelValues(method, code).Inc()
|
reqCnt.WithLabelValues(method, code).Inc()
|
||||||
regReqDur.Observe(elapsed)
|
reqDur.Observe(elapsed)
|
||||||
regResSz.Observe(float64(delegate.written))
|
resSz.Observe(float64(delegate.written))
|
||||||
regReqSz.Observe(float64(<-out))
|
reqSz.Observe(float64(<-out))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
|
func computeApproximateRequestSize(r *http.Request) <-chan int {
|
||||||
s += len(r.Method)
|
// Get URL length in current goroutine for avoiding a race condition.
|
||||||
s += len(r.Proto)
|
// HandlerFunc that runs in parallel may modify the URL.
|
||||||
for name, values := range r.Header {
|
s := 0
|
||||||
s += len(name)
|
if r.URL != nil {
|
||||||
for _, value := range values {
|
s += len(r.URL.String())
|
||||||
s += len(value)
|
}
|
||||||
|
|
||||||
|
out := make(chan int, 1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
s += len(r.Method)
|
||||||
|
s += len(r.Proto)
|
||||||
|
for name, values := range r.Header {
|
||||||
|
s += len(name)
|
||||||
|
for _, value := range values {
|
||||||
|
s += len(value)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
s += len(r.Host)
|
||||||
s += len(r.Host)
|
|
||||||
|
|
||||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||||
|
|
||||||
if r.ContentLength != -1 {
|
if r.ContentLength != -1 {
|
||||||
s += int(r.ContentLength)
|
s += int(r.ContentLength)
|
||||||
}
|
}
|
||||||
out <- s
|
out <- s
|
||||||
|
close(out)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
type responseWriterDelegator struct {
|
type responseWriterDelegator struct {
|
||||||
http.ResponseWriter
|
http.ResponseWriter
|
||||||
|
|
||||||
handler, method string
|
status int
|
||||||
status int
|
written int64
|
||||||
written int64
|
wroteHeader bool
|
||||||
wroteHeader bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||||
|
@ -488,3 +474,31 @@ func sanitizeCode(s int) string {
|
||||||
return strconv.Itoa(s)
|
return strconv.Itoa(s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
||||||
|
func gzipAccepted(header http.Header) bool {
|
||||||
|
a := header.Get(acceptEncodingHeader)
|
||||||
|
parts := strings.Split(a, ",")
|
||||||
|
for _, part := range parts {
|
||||||
|
part = strings.TrimSpace(part)
|
||||||
|
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpError removes any content-encoding header and then calls http.Error with
|
||||||
|
// the provided error and http.StatusInternalServerErrer. Error contents is
|
||||||
|
// supposed to be uncompressed plain text. However, same as with a plain
|
||||||
|
// http.Error, any header settings will be void if the header has already been
|
||||||
|
// sent. The error message will still be written to the writer, but it will
|
||||||
|
// probably be of limited use.
|
||||||
|
func httpError(rsp http.ResponseWriter, err error) {
|
||||||
|
rsp.Header().Del(contentEncodingHeader)
|
||||||
|
http.Error(
|
||||||
|
rsp,
|
||||||
|
"An error has occurred while serving metrics:\n\n"+err.Error(),
|
||||||
|
http.StatusInternalServerError,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
85
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
Normal file
85
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// metricSorter is a sortable slice of *dto.Metric.
|
||||||
|
type metricSorter []*dto.Metric
|
||||||
|
|
||||||
|
func (s metricSorter) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s metricSorter) Swap(i, j int) {
|
||||||
|
s[i], s[j] = s[j], s[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s metricSorter) Less(i, j int) bool {
|
||||||
|
if len(s[i].Label) != len(s[j].Label) {
|
||||||
|
// This should not happen. The metrics are
|
||||||
|
// inconsistent. However, we have to deal with the fact, as
|
||||||
|
// people might use custom collectors or metric family injection
|
||||||
|
// to create inconsistent metrics. So let's simply compare the
|
||||||
|
// number of labels in this case. That will still yield
|
||||||
|
// reproducible sorting.
|
||||||
|
return len(s[i].Label) < len(s[j].Label)
|
||||||
|
}
|
||||||
|
for n, lp := range s[i].Label {
|
||||||
|
vi := lp.GetValue()
|
||||||
|
vj := s[j].Label[n].GetValue()
|
||||||
|
if vi != vj {
|
||||||
|
return vi < vj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We should never arrive here. Multiple metrics with the same
|
||||||
|
// label set in the same scrape will lead to undefined ingestion
|
||||||
|
// behavior. However, as above, we have to provide stable sorting
|
||||||
|
// here, even for inconsistent metrics. So sort equal metrics
|
||||||
|
// by their timestamp, with missing timestamps (implying "now")
|
||||||
|
// coming last.
|
||||||
|
if s[i].TimestampMs == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if s[j].TimestampMs == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NormalizeMetricFamilies returns a MetricFamily slice with empty
|
||||||
|
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
||||||
|
// the slice, with the contained Metrics sorted within each MetricFamily.
|
||||||
|
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
||||||
|
for _, mf := range metricFamiliesByName {
|
||||||
|
sort.Sort(metricSorter(mf.Metric))
|
||||||
|
}
|
||||||
|
names := make([]string, 0, len(metricFamiliesByName))
|
||||||
|
for name, mf := range metricFamiliesByName {
|
||||||
|
if len(mf.Metric) > 0 {
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
result := make([]*dto.MetricFamily, 0, len(names))
|
||||||
|
for _, name := range names {
|
||||||
|
result = append(result, metricFamiliesByName[name])
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
87
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
Normal file
87
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Labels represents a collection of label name -> value mappings. This type is
|
||||||
|
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
||||||
|
// metric vector Collectors, e.g.:
|
||||||
|
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||||
|
//
|
||||||
|
// The other use-case is the specification of constant label pairs in Opts or to
|
||||||
|
// create a Desc.
|
||||||
|
type Labels map[string]string
|
||||||
|
|
||||||
|
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
||||||
|
// label names.
|
||||||
|
const reservedLabelPrefix = "__"
|
||||||
|
|
||||||
|
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
|
||||||
|
|
||||||
|
func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s: %q has %d variable labels named %q but %d values %q were provided",
|
||||||
|
errInconsistentCardinality, fqName,
|
||||||
|
len(labels), labels,
|
||||||
|
len(labelValues), labelValues,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
|
||||||
|
if len(labels) != expectedNumberOfValues {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s: expected %d label values but got %d in %#v",
|
||||||
|
errInconsistentCardinality, expectedNumberOfValues,
|
||||||
|
len(labels), labels,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, val := range labels {
|
||||||
|
if !utf8.ValidString(val) {
|
||||||
|
return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
|
||||||
|
if len(vals) != expectedNumberOfValues {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s: expected %d label values but got %d in %#v",
|
||||||
|
errInconsistentCardinality, expectedNumberOfValues,
|
||||||
|
len(vals), vals,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, val := range vals {
|
||||||
|
if !utf8.ValidString(val) {
|
||||||
|
return fmt.Errorf("label value %q is not valid UTF-8", val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLabelName(l string) bool {
|
||||||
|
return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
|
||||||
|
}
|
90
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
90
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
|
@ -15,6 +15,9 @@ package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
@ -43,9 +46,8 @@ type Metric interface {
|
||||||
// While populating dto.Metric, it is the responsibility of the
|
// While populating dto.Metric, it is the responsibility of the
|
||||||
// implementation to ensure validity of the Metric protobuf (like valid
|
// implementation to ensure validity of the Metric protobuf (like valid
|
||||||
// UTF-8 strings or syntactically valid metric and label names). It is
|
// UTF-8 strings or syntactically valid metric and label names). It is
|
||||||
// recommended to sort labels lexicographically. (Implementers may find
|
// recommended to sort labels lexicographically. Callers of Write should
|
||||||
// LabelPairSorter useful for that.) Callers of Write should still make
|
// still make sure of sorting if they depend on it.
|
||||||
// sure of sorting if they depend on it.
|
|
||||||
Write(*dto.Metric) error
|
Write(*dto.Metric) error
|
||||||
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
||||||
// dto.Metric protobuf to save allocations has disappeared. The
|
// dto.Metric protobuf to save allocations has disappeared. The
|
||||||
|
@ -57,8 +59,9 @@ type Metric interface {
|
||||||
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
|
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
|
||||||
// an alias of this type (which might change when the requirement arises.)
|
// an alias of this type (which might change when the requirement arises.)
|
||||||
//
|
//
|
||||||
// It is mandatory to set Name and Help to a non-empty string. All other fields
|
// It is mandatory to set Name to a non-empty string. All other fields are
|
||||||
// are optional and can safely be left at their zero value.
|
// optional and can safely be left at their zero value, although it is strongly
|
||||||
|
// encouraged to set a Help string.
|
||||||
type Opts struct {
|
type Opts struct {
|
||||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||||
// name of the Metric (created by joining these components with
|
// name of the Metric (created by joining these components with
|
||||||
|
@ -69,7 +72,7 @@ type Opts struct {
|
||||||
Subsystem string
|
Subsystem string
|
||||||
Name string
|
Name string
|
||||||
|
|
||||||
// Help provides information about this metric. Mandatory!
|
// Help provides information about this metric.
|
||||||
//
|
//
|
||||||
// Metrics with the same fully-qualified name must have the same Help
|
// Metrics with the same fully-qualified name must have the same Help
|
||||||
// string.
|
// string.
|
||||||
|
@ -79,20 +82,12 @@ type Opts struct {
|
||||||
// with the same fully-qualified name must have the same label names in
|
// with the same fully-qualified name must have the same label names in
|
||||||
// their ConstLabels.
|
// their ConstLabels.
|
||||||
//
|
//
|
||||||
// Note that in most cases, labels have a value that varies during the
|
// ConstLabels are only used rarely. In particular, do not use them to
|
||||||
// lifetime of a process. Those labels are usually managed with a metric
|
// attach the same labels to all your metrics. Those use cases are
|
||||||
// vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
|
// better covered by target labels set by the scraping Prometheus
|
||||||
// serve only special purposes. One is for the special case where the
|
// server, or by one specific metric (e.g. a build_info or a
|
||||||
// value of a label does not change during the lifetime of a process,
|
// machine_role metric). See also
|
||||||
// e.g. if the revision of the running binary is put into a
|
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
|
||||||
// label. Another, more advanced purpose is if more than one Collector
|
|
||||||
// needs to collect Metrics with the same fully-qualified name. In that
|
|
||||||
// case, those Metrics must differ in the values of their
|
|
||||||
// ConstLabels. See the Collector examples.
|
|
||||||
//
|
|
||||||
// If the value of a label never changes (not even between binaries),
|
|
||||||
// that label most likely should not be a label at all (but part of the
|
|
||||||
// metric name).
|
|
||||||
ConstLabels Labels
|
ConstLabels Labels
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,37 +113,22 @@ func BuildFQName(namespace, subsystem, name string) string {
|
||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
|
|
||||||
// LabelPairSorter implements sort.Interface. It is used to sort a slice of
|
// labelPairSorter implements sort.Interface. It is used to sort a slice of
|
||||||
// dto.LabelPair pointers. This is useful for implementing the Write method of
|
// dto.LabelPair pointers.
|
||||||
// custom metrics.
|
type labelPairSorter []*dto.LabelPair
|
||||||
type LabelPairSorter []*dto.LabelPair
|
|
||||||
|
|
||||||
func (s LabelPairSorter) Len() int {
|
func (s labelPairSorter) Len() int {
|
||||||
return len(s)
|
return len(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s LabelPairSorter) Swap(i, j int) {
|
func (s labelPairSorter) Swap(i, j int) {
|
||||||
s[i], s[j] = s[j], s[i]
|
s[i], s[j] = s[j], s[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s LabelPairSorter) Less(i, j int) bool {
|
func (s labelPairSorter) Less(i, j int) bool {
|
||||||
return s[i].GetName() < s[j].GetName()
|
return s[i].GetName() < s[j].GetName()
|
||||||
}
|
}
|
||||||
|
|
||||||
type hashSorter []uint64
|
|
||||||
|
|
||||||
func (s hashSorter) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s hashSorter) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s hashSorter) Less(i, j int) bool {
|
|
||||||
return s[i] < s[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
type invalidMetric struct {
|
type invalidMetric struct {
|
||||||
desc *Desc
|
desc *Desc
|
||||||
err error
|
err error
|
||||||
|
@ -164,3 +144,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric {
|
||||||
func (m *invalidMetric) Desc() *Desc { return m.desc }
|
func (m *invalidMetric) Desc() *Desc { return m.desc }
|
||||||
|
|
||||||
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
|
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
|
||||||
|
|
||||||
|
type timestampedMetric struct {
|
||||||
|
Metric
|
||||||
|
t time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m timestampedMetric) Write(pb *dto.Metric) error {
|
||||||
|
e := m.Metric.Write(pb)
|
||||||
|
pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
|
||||||
|
// way that it has an explicit timestamp set to the provided Time. This is only
|
||||||
|
// useful in rare cases as the timestamp of a Prometheus metric should usually
|
||||||
|
// be set by the Prometheus server during scraping. Exceptions include mirroring
|
||||||
|
// metrics with given timestamps from other metric
|
||||||
|
// sources.
|
||||||
|
//
|
||||||
|
// NewMetricWithTimestamp works best with MustNewConstMetric,
|
||||||
|
// MustNewConstHistogram, and MustNewConstSummary, see example.
|
||||||
|
//
|
||||||
|
// Currently, the exposition formats used by Prometheus are limited to
|
||||||
|
// millisecond resolution. Thus, the provided time will be rounded down to the
|
||||||
|
// next full millisecond value.
|
||||||
|
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
|
||||||
|
return timestampedMetric{Metric: m, t: t}
|
||||||
|
}
|
||||||
|
|
52
vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
Normal file
52
vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
// Observer is the interface that wraps the Observe method, which is used by
|
||||||
|
// Histogram and Summary to add observations.
|
||||||
|
type Observer interface {
|
||||||
|
Observe(float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The ObserverFunc type is an adapter to allow the use of ordinary
|
||||||
|
// functions as Observers. If f is a function with the appropriate
|
||||||
|
// signature, ObserverFunc(f) is an Observer that calls f.
|
||||||
|
//
|
||||||
|
// This adapter is usually used in connection with the Timer type, and there are
|
||||||
|
// two general use cases:
|
||||||
|
//
|
||||||
|
// The most common one is to use a Gauge as the Observer for a Timer.
|
||||||
|
// See the "Gauge" Timer example.
|
||||||
|
//
|
||||||
|
// The more advanced use case is to create a function that dynamically decides
|
||||||
|
// which Observer to use for observing the duration. See the "Complex" Timer
|
||||||
|
// example.
|
||||||
|
type ObserverFunc func(float64)
|
||||||
|
|
||||||
|
// Observe calls f(value). It implements Observer.
|
||||||
|
func (f ObserverFunc) Observe(value float64) {
|
||||||
|
f(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
|
||||||
|
type ObserverVec interface {
|
||||||
|
GetMetricWith(Labels) (Observer, error)
|
||||||
|
GetMetricWithLabelValues(lvs ...string) (Observer, error)
|
||||||
|
With(Labels) Observer
|
||||||
|
WithLabelValues(...string) Observer
|
||||||
|
CurryWith(Labels) (ObserverVec, error)
|
||||||
|
MustCurryWith(Labels) ObserverVec
|
||||||
|
|
||||||
|
Collector
|
||||||
|
}
|
218
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
218
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
|
@ -13,89 +13,139 @@
|
||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import "github.com/prometheus/procfs"
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs"
|
||||||
|
)
|
||||||
|
|
||||||
type processCollector struct {
|
type processCollector struct {
|
||||||
pid int
|
|
||||||
collectFn func(chan<- Metric)
|
collectFn func(chan<- Metric)
|
||||||
pidFn func() (int, error)
|
pidFn func() (int, error)
|
||||||
cpuTotal Counter
|
reportErrors bool
|
||||||
openFDs, maxFDs Gauge
|
cpuTotal *Desc
|
||||||
vsize, rss Gauge
|
openFDs, maxFDs *Desc
|
||||||
startTime Gauge
|
vsize, maxVsize *Desc
|
||||||
|
rss *Desc
|
||||||
|
startTime *Desc
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessCollectorOpts defines the behavior of a process metrics collector
|
||||||
|
// created with NewProcessCollector.
|
||||||
|
type ProcessCollectorOpts struct {
|
||||||
|
// PidFn returns the PID of the process the collector collects metrics
|
||||||
|
// for. It is called upon each collection. By default, the PID of the
|
||||||
|
// current process is used, as determined on construction time by
|
||||||
|
// calling os.Getpid().
|
||||||
|
PidFn func() (int, error)
|
||||||
|
// If non-empty, each of the collected metrics is prefixed by the
|
||||||
|
// provided string and an underscore ("_").
|
||||||
|
Namespace string
|
||||||
|
// If true, any error encountered during collection is reported as an
|
||||||
|
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
|
||||||
|
// and the collected metrics will be incomplete. (Possibly, no metrics
|
||||||
|
// will be collected at all.) While that's usually not desired, it is
|
||||||
|
// appropriate for the common "mix-in" of process metrics, where process
|
||||||
|
// metrics are nice to have, but failing to collect them should not
|
||||||
|
// disrupt the collection of the remaining metrics.
|
||||||
|
ReportErrors bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProcessCollector returns a collector which exports the current state of
|
// NewProcessCollector returns a collector which exports the current state of
|
||||||
// process metrics including cpu, memory and file descriptor usage as well as
|
// process metrics including CPU, memory and file descriptor usage as well as
|
||||||
// the process start time for the given process id under the given namespace.
|
// the process start time. The detailed behavior is defined by the provided
|
||||||
func NewProcessCollector(pid int, namespace string) Collector {
|
// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
|
||||||
return NewProcessCollectorPIDFn(
|
// collector for the current process with an empty namespace string and no error
|
||||||
func() (int, error) { return pid, nil },
|
// reporting.
|
||||||
namespace,
|
//
|
||||||
)
|
// Currently, the collector depends on a Linux-style proc filesystem and
|
||||||
}
|
// therefore only exports metrics for Linux.
|
||||||
|
//
|
||||||
|
// Note: An older version of this function had the following signature:
|
||||||
|
//
|
||||||
|
// NewProcessCollector(pid int, namespace string) Collector
|
||||||
|
//
|
||||||
|
// Most commonly, it was called as
|
||||||
|
//
|
||||||
|
// NewProcessCollector(os.Getpid(), "")
|
||||||
|
//
|
||||||
|
// The following call of the current version is equivalent to the above:
|
||||||
|
//
|
||||||
|
// NewProcessCollector(ProcessCollectorOpts{})
|
||||||
|
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||||
|
ns := ""
|
||||||
|
if len(opts.Namespace) > 0 {
|
||||||
|
ns = opts.Namespace + "_"
|
||||||
|
}
|
||||||
|
|
||||||
// NewProcessCollectorPIDFn returns a collector which exports the current state
|
c := &processCollector{
|
||||||
// of process metrics including cpu, memory and file descriptor usage as well
|
reportErrors: opts.ReportErrors,
|
||||||
// as the process start time under the given namespace. The given pidFn is
|
cpuTotal: NewDesc(
|
||||||
// called on each collect and is used to determine the process to export
|
ns+"process_cpu_seconds_total",
|
||||||
// metrics for.
|
"Total user and system CPU time spent in seconds.",
|
||||||
func NewProcessCollectorPIDFn(
|
nil, nil,
|
||||||
pidFn func() (int, error),
|
),
|
||||||
namespace string,
|
openFDs: NewDesc(
|
||||||
) Collector {
|
ns+"process_open_fds",
|
||||||
c := processCollector{
|
"Number of open file descriptors.",
|
||||||
pidFn: pidFn,
|
nil, nil,
|
||||||
collectFn: func(chan<- Metric) {},
|
),
|
||||||
|
maxFDs: NewDesc(
|
||||||
|
ns+"process_max_fds",
|
||||||
|
"Maximum number of open file descriptors.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
vsize: NewDesc(
|
||||||
|
ns+"process_virtual_memory_bytes",
|
||||||
|
"Virtual memory size in bytes.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
maxVsize: NewDesc(
|
||||||
|
ns+"process_virtual_memory_max_bytes",
|
||||||
|
"Maximum amount of virtual memory available in bytes.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
rss: NewDesc(
|
||||||
|
ns+"process_resident_memory_bytes",
|
||||||
|
"Resident memory size in bytes.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
startTime: NewDesc(
|
||||||
|
ns+"process_start_time_seconds",
|
||||||
|
"Start time of the process since unix epoch in seconds.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
cpuTotal: NewCounter(CounterOpts{
|
if opts.PidFn == nil {
|
||||||
Namespace: namespace,
|
pid := os.Getpid()
|
||||||
Name: "process_cpu_seconds_total",
|
c.pidFn = func() (int, error) { return pid, nil }
|
||||||
Help: "Total user and system CPU time spent in seconds.",
|
} else {
|
||||||
}),
|
c.pidFn = opts.PidFn
|
||||||
openFDs: NewGauge(GaugeOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Name: "process_open_fds",
|
|
||||||
Help: "Number of open file descriptors.",
|
|
||||||
}),
|
|
||||||
maxFDs: NewGauge(GaugeOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Name: "process_max_fds",
|
|
||||||
Help: "Maximum number of open file descriptors.",
|
|
||||||
}),
|
|
||||||
vsize: NewGauge(GaugeOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Name: "process_virtual_memory_bytes",
|
|
||||||
Help: "Virtual memory size in bytes.",
|
|
||||||
}),
|
|
||||||
rss: NewGauge(GaugeOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Name: "process_resident_memory_bytes",
|
|
||||||
Help: "Resident memory size in bytes.",
|
|
||||||
}),
|
|
||||||
startTime: NewGauge(GaugeOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Name: "process_start_time_seconds",
|
|
||||||
Help: "Start time of the process since unix epoch in seconds.",
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up process metric collection if supported by the runtime.
|
// Set up process metric collection if supported by the runtime.
|
||||||
if _, err := procfs.NewStat(); err == nil {
|
if _, err := procfs.NewStat(); err == nil {
|
||||||
c.collectFn = c.processCollect
|
c.collectFn = c.processCollect
|
||||||
|
} else {
|
||||||
|
c.collectFn = func(ch chan<- Metric) {
|
||||||
|
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe returns all descriptions of the collector.
|
// Describe returns all descriptions of the collector.
|
||||||
func (c *processCollector) Describe(ch chan<- *Desc) {
|
func (c *processCollector) Describe(ch chan<- *Desc) {
|
||||||
ch <- c.cpuTotal.Desc()
|
ch <- c.cpuTotal
|
||||||
ch <- c.openFDs.Desc()
|
ch <- c.openFDs
|
||||||
ch <- c.maxFDs.Desc()
|
ch <- c.maxFDs
|
||||||
ch <- c.vsize.Desc()
|
ch <- c.vsize
|
||||||
ch <- c.rss.Desc()
|
ch <- c.maxVsize
|
||||||
ch <- c.startTime.Desc()
|
ch <- c.rss
|
||||||
|
ch <- c.startTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect returns the current state of all metrics of the collector.
|
// Collect returns the current state of all metrics of the collector.
|
||||||
|
@ -103,40 +153,52 @@ func (c *processCollector) Collect(ch chan<- Metric) {
|
||||||
c.collectFn(ch)
|
c.collectFn(ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
|
|
||||||
// client allows users to configure the error behavior.
|
|
||||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||||
pid, err := c.pidFn()
|
pid, err := c.pidFn()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p, err := procfs.NewProc(pid)
|
p, err := procfs.NewProc(pid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if stat, err := p.NewStat(); err == nil {
|
if stat, err := p.NewStat(); err == nil {
|
||||||
c.cpuTotal.Set(stat.CPUTime())
|
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
|
||||||
ch <- c.cpuTotal
|
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
|
||||||
c.vsize.Set(float64(stat.VirtualMemory()))
|
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
||||||
ch <- c.vsize
|
|
||||||
c.rss.Set(float64(stat.ResidentMemory()))
|
|
||||||
ch <- c.rss
|
|
||||||
|
|
||||||
if startTime, err := stat.StartTime(); err == nil {
|
if startTime, err := stat.StartTime(); err == nil {
|
||||||
c.startTime.Set(startTime)
|
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
||||||
ch <- c.startTime
|
} else {
|
||||||
|
c.reportError(ch, c.startTime, err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if fds, err := p.FileDescriptorsLen(); err == nil {
|
if fds, err := p.FileDescriptorsLen(); err == nil {
|
||||||
c.openFDs.Set(float64(fds))
|
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
||||||
ch <- c.openFDs
|
} else {
|
||||||
|
c.reportError(ch, c.openFDs, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if limits, err := p.NewLimits(); err == nil {
|
if limits, err := p.NewLimits(); err == nil {
|
||||||
c.maxFDs.Set(float64(limits.OpenFiles))
|
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
||||||
ch <- c.maxFDs
|
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
|
||||||
|
if !c.reportErrors {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if desc == nil {
|
||||||
|
desc = NewInvalidDesc(err)
|
||||||
|
}
|
||||||
|
ch <- NewInvalidMetric(desc, err)
|
||||||
|
}
|
||||||
|
|
729
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
729
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
|
@ -15,15 +15,22 @@ package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/prometheus/common/expfmt"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -35,13 +42,14 @@ const (
|
||||||
// DefaultRegisterer and DefaultGatherer are the implementations of the
|
// DefaultRegisterer and DefaultGatherer are the implementations of the
|
||||||
// Registerer and Gatherer interface a number of convenience functions in this
|
// Registerer and Gatherer interface a number of convenience functions in this
|
||||||
// package act on. Initially, both variables point to the same Registry, which
|
// package act on. Initially, both variables point to the same Registry, which
|
||||||
// has a process collector (see NewProcessCollector) and a Go collector (see
|
// has a process collector (currently on Linux only, see NewProcessCollector)
|
||||||
// NewGoCollector) already registered. This approach to keep default instances
|
// and a Go collector (see NewGoCollector, in particular the note about
|
||||||
// as global state mirrors the approach of other packages in the Go standard
|
// stop-the-world implication with Go versions older than 1.9) already
|
||||||
// library. Note that there are caveats. Change the variables with caution and
|
// registered. This approach to keep default instances as global state mirrors
|
||||||
// only if you understand the consequences. Users who want to avoid global state
|
// the approach of other packages in the Go standard library. Note that there
|
||||||
// altogether should not use the convenience function and act on custom
|
// are caveats. Change the variables with caution and only if you understand the
|
||||||
// instances instead.
|
// consequences. Users who want to avoid global state altogether should not use
|
||||||
|
// the convenience functions and act on custom instances instead.
|
||||||
var (
|
var (
|
||||||
defaultRegistry = NewRegistry()
|
defaultRegistry = NewRegistry()
|
||||||
DefaultRegisterer Registerer = defaultRegistry
|
DefaultRegisterer Registerer = defaultRegistry
|
||||||
|
@ -49,7 +57,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
MustRegister(NewProcessCollector(os.Getpid(), ""))
|
MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
|
||||||
MustRegister(NewGoCollector())
|
MustRegister(NewGoCollector())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,7 +73,8 @@ func NewRegistry() *Registry {
|
||||||
|
|
||||||
// NewPedanticRegistry returns a registry that checks during collection if each
|
// NewPedanticRegistry returns a registry that checks during collection if each
|
||||||
// collected Metric is consistent with its reported Desc, and if the Desc has
|
// collected Metric is consistent with its reported Desc, and if the Desc has
|
||||||
// actually been registered with the registry.
|
// actually been registered with the registry. Unchecked Collectors (those whose
|
||||||
|
// Describe methed does not yield any descriptors) are excluded from the check.
|
||||||
//
|
//
|
||||||
// Usually, a Registry will be happy as long as the union of all collected
|
// Usually, a Registry will be happy as long as the union of all collected
|
||||||
// Metrics is consistent and valid even if some metrics are not consistent with
|
// Metrics is consistent and valid even if some metrics are not consistent with
|
||||||
|
@ -80,7 +89,7 @@ func NewPedanticRegistry() *Registry {
|
||||||
|
|
||||||
// Registerer is the interface for the part of a registry in charge of
|
// Registerer is the interface for the part of a registry in charge of
|
||||||
// registering and unregistering. Users of custom registries should use
|
// registering and unregistering. Users of custom registries should use
|
||||||
// Registerer as type for registration purposes (rather then the Registry type
|
// Registerer as type for registration purposes (rather than the Registry type
|
||||||
// directly). In that way, they are free to use custom Registerer implementation
|
// directly). In that way, they are free to use custom Registerer implementation
|
||||||
// (e.g. for testing purposes).
|
// (e.g. for testing purposes).
|
||||||
type Registerer interface {
|
type Registerer interface {
|
||||||
|
@ -95,8 +104,13 @@ type Registerer interface {
|
||||||
// returned error is an instance of AlreadyRegisteredError, which
|
// returned error is an instance of AlreadyRegisteredError, which
|
||||||
// contains the previously registered Collector.
|
// contains the previously registered Collector.
|
||||||
//
|
//
|
||||||
// It is in general not safe to register the same Collector multiple
|
// A Collector whose Describe method does not yield any Desc is treated
|
||||||
// times concurrently.
|
// as unchecked. Registration will always succeed. No check for
|
||||||
|
// re-registering (see previous paragraph) is performed. Thus, the
|
||||||
|
// caller is responsible for not double-registering the same unchecked
|
||||||
|
// Collector, and for providing a Collector that will not cause
|
||||||
|
// inconsistent metrics on collection. (This would lead to scrape
|
||||||
|
// errors.)
|
||||||
Register(Collector) error
|
Register(Collector) error
|
||||||
// MustRegister works like Register but registers any number of
|
// MustRegister works like Register but registers any number of
|
||||||
// Collectors and panics upon the first registration that causes an
|
// Collectors and panics upon the first registration that causes an
|
||||||
|
@ -105,7 +119,9 @@ type Registerer interface {
|
||||||
// Unregister unregisters the Collector that equals the Collector passed
|
// Unregister unregisters the Collector that equals the Collector passed
|
||||||
// in as an argument. (Two Collectors are considered equal if their
|
// in as an argument. (Two Collectors are considered equal if their
|
||||||
// Describe method yields the same set of descriptors.) The function
|
// Describe method yields the same set of descriptors.) The function
|
||||||
// returns whether a Collector was unregistered.
|
// returns whether a Collector was unregistered. Note that an unchecked
|
||||||
|
// Collector cannot be unregistered (as its Describe method does not
|
||||||
|
// yield any descriptor).
|
||||||
//
|
//
|
||||||
// Note that even after unregistering, it will not be possible to
|
// Note that even after unregistering, it will not be possible to
|
||||||
// register a new Collector that is inconsistent with the unregistered
|
// register a new Collector that is inconsistent with the unregistered
|
||||||
|
@ -123,15 +139,23 @@ type Registerer interface {
|
||||||
type Gatherer interface {
|
type Gatherer interface {
|
||||||
// Gather calls the Collect method of the registered Collectors and then
|
// Gather calls the Collect method of the registered Collectors and then
|
||||||
// gathers the collected metrics into a lexicographically sorted slice
|
// gathers the collected metrics into a lexicographically sorted slice
|
||||||
// of MetricFamily protobufs. Even if an error occurs, Gather attempts
|
// of uniquely named MetricFamily protobufs. Gather ensures that the
|
||||||
// to gather as many metrics as possible. Hence, if a non-nil error is
|
// returned slice is valid and self-consistent so that it can be used
|
||||||
// returned, the returned MetricFamily slice could be nil (in case of a
|
// for valid exposition. As an exception to the strict consistency
|
||||||
// fatal error that prevented any meaningful metric collection) or
|
// requirements described for metric.Desc, Gather will tolerate
|
||||||
// contain a number of MetricFamily protobufs, some of which might be
|
// different sets of label names for metrics of the same metric family.
|
||||||
// incomplete, and some might be missing altogether. The returned error
|
//
|
||||||
// (which might be a MultiError) explains the details. In scenarios
|
// Even if an error occurs, Gather attempts to gather as many metrics as
|
||||||
// where complete collection is critical, the returned MetricFamily
|
// possible. Hence, if a non-nil error is returned, the returned
|
||||||
// protobufs should be disregarded if the returned error is non-nil.
|
// MetricFamily slice could be nil (in case of a fatal error that
|
||||||
|
// prevented any meaningful metric collection) or contain a number of
|
||||||
|
// MetricFamily protobufs, some of which might be incomplete, and some
|
||||||
|
// might be missing altogether. The returned error (which might be a
|
||||||
|
// MultiError) explains the details. Note that this is mostly useful for
|
||||||
|
// debugging purposes. If the gathered protobufs are to be used for
|
||||||
|
// exposition in actual monitoring, it is almost always better to not
|
||||||
|
// expose an incomplete result and instead disregard the returned
|
||||||
|
// MetricFamily protobufs in case the returned error is non-nil.
|
||||||
Gather() ([]*dto.MetricFamily, error)
|
Gather() ([]*dto.MetricFamily, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,38 +176,6 @@ func MustRegister(cs ...Collector) {
|
||||||
DefaultRegisterer.MustRegister(cs...)
|
DefaultRegisterer.MustRegister(cs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterOrGet registers the provided Collector with the DefaultRegisterer and
|
|
||||||
// returns the Collector, unless an equal Collector was registered before, in
|
|
||||||
// which case that Collector is returned.
|
|
||||||
//
|
|
||||||
// Deprecated: RegisterOrGet is merely a convenience function for the
|
|
||||||
// implementation as described in the documentation for
|
|
||||||
// AlreadyRegisteredError. As the use case is relatively rare, this function
|
|
||||||
// will be removed in a future version of this package to clean up the
|
|
||||||
// namespace.
|
|
||||||
func RegisterOrGet(c Collector) (Collector, error) {
|
|
||||||
if err := Register(c); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
return are.ExistingCollector, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning
|
|
||||||
// an error.
|
|
||||||
//
|
|
||||||
// Deprecated: This is deprecated for the same reason RegisterOrGet is. See
|
|
||||||
// there for details.
|
|
||||||
func MustRegisterOrGet(c Collector) Collector {
|
|
||||||
c, err := RegisterOrGet(c)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unregister removes the registration of the provided Collector from the
|
// Unregister removes the registration of the provided Collector from the
|
||||||
// DefaultRegisterer.
|
// DefaultRegisterer.
|
||||||
//
|
//
|
||||||
|
@ -201,25 +193,6 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
|
||||||
return gf()
|
return gf()
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that
|
|
||||||
// gathers from the previous DefaultGatherers but then merges the MetricFamily
|
|
||||||
// protobufs returned from the provided hook function with the MetricFamily
|
|
||||||
// protobufs returned from the original DefaultGatherer.
|
|
||||||
//
|
|
||||||
// Deprecated: This function manipulates the DefaultGatherer variable. Consider
|
|
||||||
// the implications, i.e. don't do this concurrently with any uses of the
|
|
||||||
// DefaultGatherer. In the rare cases where you need to inject MetricFamily
|
|
||||||
// protobufs directly, it is recommended to use a custom Registry and combine it
|
|
||||||
// with a custom Gatherer using the Gatherers type (see
|
|
||||||
// there). SetMetricFamilyInjectionHook only exists for compatibility reasons
|
|
||||||
// with previous versions of this package.
|
|
||||||
func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
|
|
||||||
DefaultGatherer = Gatherers{
|
|
||||||
DefaultGatherer,
|
|
||||||
GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AlreadyRegisteredError is returned by the Register method if the Collector to
|
// AlreadyRegisteredError is returned by the Register method if the Collector to
|
||||||
// be registered has already been registered before, or a different Collector
|
// be registered has already been registered before, or a different Collector
|
||||||
// that collects the same metrics has been registered before. Registration fails
|
// that collects the same metrics has been registered before. Registration fails
|
||||||
|
@ -252,6 +225,13 @@ func (errs MultiError) Error() string {
|
||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Append appends the provided error if it is not nil.
|
||||||
|
func (errs *MultiError) Append(err error) {
|
||||||
|
if err != nil {
|
||||||
|
*errs = append(*errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
|
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
|
||||||
// contained error as error if len(errs is 1). In all other cases, it returns
|
// contained error as error if len(errs is 1). In all other cases, it returns
|
||||||
// the MultiError directly. This is helpful for returning a MultiError in a way
|
// the MultiError directly. This is helpful for returning a MultiError in a way
|
||||||
|
@ -276,6 +256,7 @@ type Registry struct {
|
||||||
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
|
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
|
||||||
descIDs map[uint64]struct{}
|
descIDs map[uint64]struct{}
|
||||||
dimHashesByName map[string]uint64
|
dimHashesByName map[string]uint64
|
||||||
|
uncheckedCollectors []Collector
|
||||||
pedanticChecksEnabled bool
|
pedanticChecksEnabled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -293,8 +274,13 @@ func (r *Registry) Register(c Collector) error {
|
||||||
close(descChan)
|
close(descChan)
|
||||||
}()
|
}()
|
||||||
r.mtx.Lock()
|
r.mtx.Lock()
|
||||||
defer r.mtx.Unlock()
|
defer func() {
|
||||||
// Coduct various tests...
|
// Drain channel in case of premature return to not leak a goroutine.
|
||||||
|
for range descChan {
|
||||||
|
}
|
||||||
|
r.mtx.Unlock()
|
||||||
|
}()
|
||||||
|
// Conduct various tests...
|
||||||
for desc := range descChan {
|
for desc := range descChan {
|
||||||
|
|
||||||
// Is the descriptor valid at all?
|
// Is the descriptor valid at all?
|
||||||
|
@ -333,9 +319,10 @@ func (r *Registry) Register(c Collector) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Did anything happen at all?
|
// A Collector yielding no Desc at all is considered unchecked.
|
||||||
if len(newDescIDs) == 0 {
|
if len(newDescIDs) == 0 {
|
||||||
return errors.New("collector has no descriptors")
|
r.uncheckedCollectors = append(r.uncheckedCollectors, c)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
if existing, exists := r.collectorsByID[collectorID]; exists {
|
if existing, exists := r.collectorsByID[collectorID]; exists {
|
||||||
return AlreadyRegisteredError{
|
return AlreadyRegisteredError{
|
||||||
|
@ -409,31 +396,25 @@ func (r *Registry) MustRegister(cs ...Collector) {
|
||||||
// Gather implements Gatherer.
|
// Gather implements Gatherer.
|
||||||
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
||||||
var (
|
var (
|
||||||
metricChan = make(chan Metric, capMetricChan)
|
checkedMetricChan = make(chan Metric, capMetricChan)
|
||||||
metricHashes = map[uint64]struct{}{}
|
uncheckedMetricChan = make(chan Metric, capMetricChan)
|
||||||
dimHashes = map[string]uint64{}
|
metricHashes = map[uint64]struct{}{}
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
errs MultiError // The collected errors to return in the end.
|
errs MultiError // The collected errors to return in the end.
|
||||||
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
||||||
)
|
)
|
||||||
|
|
||||||
r.mtx.RLock()
|
r.mtx.RLock()
|
||||||
|
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
|
||||||
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
|
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
|
||||||
|
checkedCollectors := make(chan Collector, len(r.collectorsByID))
|
||||||
// Scatter.
|
uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
|
||||||
// (Collectors could be complex and slow, so we call them all at once.)
|
|
||||||
wg.Add(len(r.collectorsByID))
|
|
||||||
go func() {
|
|
||||||
wg.Wait()
|
|
||||||
close(metricChan)
|
|
||||||
}()
|
|
||||||
for _, collector := range r.collectorsByID {
|
for _, collector := range r.collectorsByID {
|
||||||
go func(collector Collector) {
|
checkedCollectors <- collector
|
||||||
defer wg.Done()
|
}
|
||||||
collector.Collect(metricChan)
|
for _, collector := range r.uncheckedCollectors {
|
||||||
}(collector)
|
uncheckedCollectors <- collector
|
||||||
}
|
}
|
||||||
|
|
||||||
// In case pedantic checks are enabled, we have to copy the map before
|
// In case pedantic checks are enabled, we have to copy the map before
|
||||||
// giving up the RLock.
|
// giving up the RLock.
|
||||||
if r.pedanticChecksEnabled {
|
if r.pedanticChecksEnabled {
|
||||||
|
@ -442,127 +423,258 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
||||||
registeredDescIDs[id] = struct{}{}
|
registeredDescIDs[id] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r.mtx.RUnlock()
|
r.mtx.RUnlock()
|
||||||
|
|
||||||
// Drain metricChan in case of premature return.
|
wg.Add(goroutineBudget)
|
||||||
|
|
||||||
|
collectWorker := func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case collector := <-checkedCollectors:
|
||||||
|
collector.Collect(checkedMetricChan)
|
||||||
|
case collector := <-uncheckedCollectors:
|
||||||
|
collector.Collect(uncheckedMetricChan)
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the first worker now to make sure at least one is running.
|
||||||
|
go collectWorker()
|
||||||
|
goroutineBudget--
|
||||||
|
|
||||||
|
// Close checkedMetricChan and uncheckedMetricChan once all collectors
|
||||||
|
// are collected.
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(checkedMetricChan)
|
||||||
|
close(uncheckedMetricChan)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
|
||||||
defer func() {
|
defer func() {
|
||||||
for _ = range metricChan {
|
if checkedMetricChan != nil {
|
||||||
|
for range checkedMetricChan {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if uncheckedMetricChan != nil {
|
||||||
|
for range uncheckedMetricChan {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Gather.
|
// Copy the channel references so we can nil them out later to remove
|
||||||
for metric := range metricChan {
|
// them from the select statements below.
|
||||||
// This could be done concurrently, too, but it required locking
|
cmc := checkedMetricChan
|
||||||
// of metricFamiliesByName (and of metricHashes if checks are
|
umc := uncheckedMetricChan
|
||||||
// enabled). Most likely not worth it.
|
|
||||||
desc := metric.Desc()
|
for {
|
||||||
dtoMetric := &dto.Metric{}
|
select {
|
||||||
if err := metric.Write(dtoMetric); err != nil {
|
case metric, ok := <-cmc:
|
||||||
errs = append(errs, fmt.Errorf(
|
if !ok {
|
||||||
"error collecting metric %v: %s", desc, err,
|
cmc = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
errs.Append(processMetric(
|
||||||
|
metric, metricFamiliesByName,
|
||||||
|
metricHashes,
|
||||||
|
registeredDescIDs,
|
||||||
))
|
))
|
||||||
continue
|
case metric, ok := <-umc:
|
||||||
|
if !ok {
|
||||||
|
umc = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
errs.Append(processMetric(
|
||||||
|
metric, metricFamiliesByName,
|
||||||
|
metricHashes,
|
||||||
|
nil,
|
||||||
|
))
|
||||||
|
default:
|
||||||
|
if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
|
||||||
|
// All collectors are already being worked on or
|
||||||
|
// we have already as many goroutines started as
|
||||||
|
// there are collectors. Do the same as above,
|
||||||
|
// just without the default.
|
||||||
|
select {
|
||||||
|
case metric, ok := <-cmc:
|
||||||
|
if !ok {
|
||||||
|
cmc = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
errs.Append(processMetric(
|
||||||
|
metric, metricFamiliesByName,
|
||||||
|
metricHashes,
|
||||||
|
registeredDescIDs,
|
||||||
|
))
|
||||||
|
case metric, ok := <-umc:
|
||||||
|
if !ok {
|
||||||
|
umc = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
errs.Append(processMetric(
|
||||||
|
metric, metricFamiliesByName,
|
||||||
|
metricHashes,
|
||||||
|
nil,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Start more workers.
|
||||||
|
go collectWorker()
|
||||||
|
goroutineBudget--
|
||||||
|
runtime.Gosched()
|
||||||
}
|
}
|
||||||
metricFamily, ok := metricFamiliesByName[desc.fqName]
|
// Once both checkedMetricChan and uncheckdMetricChan are closed
|
||||||
if ok {
|
// and drained, the contraption above will nil out cmc and umc,
|
||||||
if metricFamily.GetHelp() != desc.help {
|
// and then we can leave the collect loop here.
|
||||||
errs = append(errs, fmt.Errorf(
|
if cmc == nil && umc == nil {
|
||||||
"collected metric %s %s has help %q but should have %q",
|
break
|
||||||
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// TODO(beorn7): Simplify switch once Desc has type.
|
|
||||||
switch metricFamily.GetType() {
|
|
||||||
case dto.MetricType_COUNTER:
|
|
||||||
if dtoMetric.Counter == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be a Counter",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case dto.MetricType_GAUGE:
|
|
||||||
if dtoMetric.Gauge == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be a Gauge",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case dto.MetricType_SUMMARY:
|
|
||||||
if dtoMetric.Summary == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be a Summary",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case dto.MetricType_UNTYPED:
|
|
||||||
if dtoMetric.Untyped == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be Untyped",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case dto.MetricType_HISTOGRAM:
|
|
||||||
if dtoMetric.Histogram == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be a Histogram",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("encountered MetricFamily with invalid type")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
metricFamily = &dto.MetricFamily{}
|
|
||||||
metricFamily.Name = proto.String(desc.fqName)
|
|
||||||
metricFamily.Help = proto.String(desc.help)
|
|
||||||
// TODO(beorn7): Simplify switch once Desc has type.
|
|
||||||
switch {
|
|
||||||
case dtoMetric.Gauge != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_GAUGE.Enum()
|
|
||||||
case dtoMetric.Counter != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_COUNTER.Enum()
|
|
||||||
case dtoMetric.Summary != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_SUMMARY.Enum()
|
|
||||||
case dtoMetric.Untyped != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_UNTYPED.Enum()
|
|
||||||
case dtoMetric.Histogram != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
|
|
||||||
default:
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"empty metric collected: %s", dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
metricFamiliesByName[desc.fqName] = metricFamily
|
|
||||||
}
|
}
|
||||||
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if r.pedanticChecksEnabled {
|
|
||||||
// Is the desc registered at all?
|
|
||||||
if _, exist := registeredDescIDs[desc.id]; !exist {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s with unregistered descriptor %s",
|
|
||||||
metricFamily.GetName(), dtoMetric, desc,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
|
|
||||||
}
|
}
|
||||||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
|
||||||
|
// Prometheus text format, and writes it to a temporary file. Upon success, the
|
||||||
|
// temporary file is renamed to the provided filename.
|
||||||
|
//
|
||||||
|
// This is intended for use with the textfile collector of the node exporter.
|
||||||
|
// Note that the node exporter expects the filename to be suffixed with ".prom".
|
||||||
|
func WriteToTextfile(filename string, g Gatherer) error {
|
||||||
|
tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer os.Remove(tmp.Name())
|
||||||
|
|
||||||
|
mfs, err := g.Gather()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, mf := range mfs {
|
||||||
|
if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := tmp.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Chmod(tmp.Name(), 0644); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Rename(tmp.Name(), filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
// processMetric is an internal helper method only used by the Gather method.
|
||||||
|
func processMetric(
|
||||||
|
metric Metric,
|
||||||
|
metricFamiliesByName map[string]*dto.MetricFamily,
|
||||||
|
metricHashes map[uint64]struct{},
|
||||||
|
registeredDescIDs map[uint64]struct{},
|
||||||
|
) error {
|
||||||
|
desc := metric.Desc()
|
||||||
|
// Wrapped metrics collected by an unchecked Collector can have an
|
||||||
|
// invalid Desc.
|
||||||
|
if desc.err != nil {
|
||||||
|
return desc.err
|
||||||
|
}
|
||||||
|
dtoMetric := &dto.Metric{}
|
||||||
|
if err := metric.Write(dtoMetric); err != nil {
|
||||||
|
return fmt.Errorf("error collecting metric %v: %s", desc, err)
|
||||||
|
}
|
||||||
|
metricFamily, ok := metricFamiliesByName[desc.fqName]
|
||||||
|
if ok { // Existing name.
|
||||||
|
if metricFamily.GetHelp() != desc.help {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s has help %q but should have %q",
|
||||||
|
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
// TODO(beorn7): Simplify switch once Desc has type.
|
||||||
|
switch metricFamily.GetType() {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
if dtoMetric.Counter == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s should be a Counter",
|
||||||
|
desc.fqName, dtoMetric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
if dtoMetric.Gauge == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s should be a Gauge",
|
||||||
|
desc.fqName, dtoMetric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
if dtoMetric.Summary == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s should be a Summary",
|
||||||
|
desc.fqName, dtoMetric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case dto.MetricType_UNTYPED:
|
||||||
|
if dtoMetric.Untyped == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s should be Untyped",
|
||||||
|
desc.fqName, dtoMetric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
if dtoMetric.Histogram == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s should be a Histogram",
|
||||||
|
desc.fqName, dtoMetric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("encountered MetricFamily with invalid type")
|
||||||
|
}
|
||||||
|
} else { // New name.
|
||||||
|
metricFamily = &dto.MetricFamily{}
|
||||||
|
metricFamily.Name = proto.String(desc.fqName)
|
||||||
|
metricFamily.Help = proto.String(desc.help)
|
||||||
|
// TODO(beorn7): Simplify switch once Desc has type.
|
||||||
|
switch {
|
||||||
|
case dtoMetric.Gauge != nil:
|
||||||
|
metricFamily.Type = dto.MetricType_GAUGE.Enum()
|
||||||
|
case dtoMetric.Counter != nil:
|
||||||
|
metricFamily.Type = dto.MetricType_COUNTER.Enum()
|
||||||
|
case dtoMetric.Summary != nil:
|
||||||
|
metricFamily.Type = dto.MetricType_SUMMARY.Enum()
|
||||||
|
case dtoMetric.Untyped != nil:
|
||||||
|
metricFamily.Type = dto.MetricType_UNTYPED.Enum()
|
||||||
|
case dtoMetric.Histogram != nil:
|
||||||
|
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("empty metric collected: %s", dtoMetric)
|
||||||
|
}
|
||||||
|
if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
metricFamiliesByName[desc.fqName] = metricFamily
|
||||||
|
}
|
||||||
|
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if registeredDescIDs != nil {
|
||||||
|
// Is the desc registered at all?
|
||||||
|
if _, exist := registeredDescIDs[desc.id]; !exist {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s with unregistered descriptor %s",
|
||||||
|
metricFamily.GetName(), dtoMetric, desc,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gatherers is a slice of Gatherer instances that implements the Gatherer
|
// Gatherers is a slice of Gatherer instances that implements the Gatherer
|
||||||
|
@ -588,7 +700,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
||||||
var (
|
var (
|
||||||
metricFamiliesByName = map[string]*dto.MetricFamily{}
|
metricFamiliesByName = map[string]*dto.MetricFamily{}
|
||||||
metricHashes = map[uint64]struct{}{}
|
metricHashes = map[uint64]struct{}{}
|
||||||
dimHashes = map[string]uint64{}
|
|
||||||
errs MultiError // The collected errors to return in the end.
|
errs MultiError // The collected errors to return in the end.
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -625,10 +736,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
||||||
existingMF.Name = mf.Name
|
existingMF.Name = mf.Name
|
||||||
existingMF.Help = mf.Help
|
existingMF.Help = mf.Help
|
||||||
existingMF.Type = mf.Type
|
existingMF.Type = mf.Type
|
||||||
|
if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
metricFamiliesByName[mf.GetName()] = existingMF
|
metricFamiliesByName[mf.GetName()] = existingMF
|
||||||
}
|
}
|
||||||
for _, m := range mf.Metric {
|
for _, m := range mf.Metric {
|
||||||
if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
|
if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -636,88 +751,80 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
// metricSorter is a sortable slice of *dto.Metric.
|
// checkSuffixCollisions checks for collisions with the “magic” suffixes the
|
||||||
type metricSorter []*dto.Metric
|
// Prometheus text format and the internal metric representation of the
|
||||||
|
// Prometheus server add while flattening Summaries and Histograms.
|
||||||
func (s metricSorter) Len() int {
|
func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
|
||||||
return len(s)
|
var (
|
||||||
}
|
newName = mf.GetName()
|
||||||
|
newType = mf.GetType()
|
||||||
func (s metricSorter) Swap(i, j int) {
|
newNameWithoutSuffix = ""
|
||||||
s[i], s[j] = s[j], s[i]
|
)
|
||||||
}
|
switch {
|
||||||
|
case strings.HasSuffix(newName, "_count"):
|
||||||
func (s metricSorter) Less(i, j int) bool {
|
newNameWithoutSuffix = newName[:len(newName)-6]
|
||||||
if len(s[i].Label) != len(s[j].Label) {
|
case strings.HasSuffix(newName, "_sum"):
|
||||||
// This should not happen. The metrics are
|
newNameWithoutSuffix = newName[:len(newName)-4]
|
||||||
// inconsistent. However, we have to deal with the fact, as
|
case strings.HasSuffix(newName, "_bucket"):
|
||||||
// people might use custom collectors or metric family injection
|
newNameWithoutSuffix = newName[:len(newName)-7]
|
||||||
// to create inconsistent metrics. So let's simply compare the
|
|
||||||
// number of labels in this case. That will still yield
|
|
||||||
// reproducible sorting.
|
|
||||||
return len(s[i].Label) < len(s[j].Label)
|
|
||||||
}
|
}
|
||||||
for n, lp := range s[i].Label {
|
if newNameWithoutSuffix != "" {
|
||||||
vi := lp.GetValue()
|
if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
|
||||||
vj := s[j].Label[n].GetValue()
|
switch existingMF.GetType() {
|
||||||
if vi != vj {
|
case dto.MetricType_SUMMARY:
|
||||||
return vi < vj
|
if !strings.HasSuffix(newName, "_bucket") {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric named %q collides with previously collected summary named %q",
|
||||||
|
newName, newNameWithoutSuffix,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric named %q collides with previously collected histogram named %q",
|
||||||
|
newName, newNameWithoutSuffix,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
|
||||||
// We should never arrive here. Multiple metrics with the same
|
if _, ok := mfs[newName+"_count"]; ok {
|
||||||
// label set in the same scrape will lead to undefined ingestion
|
return fmt.Errorf(
|
||||||
// behavior. However, as above, we have to provide stable sorting
|
"collected histogram or summary named %q collides with previously collected metric named %q",
|
||||||
// here, even for inconsistent metrics. So sort equal metrics
|
newName, newName+"_count",
|
||||||
// by their timestamp, with missing timestamps (implying "now")
|
)
|
||||||
// coming last.
|
}
|
||||||
if s[i].TimestampMs == nil {
|
if _, ok := mfs[newName+"_sum"]; ok {
|
||||||
return false
|
return fmt.Errorf(
|
||||||
}
|
"collected histogram or summary named %q collides with previously collected metric named %q",
|
||||||
if s[j].TimestampMs == nil {
|
newName, newName+"_sum",
|
||||||
return true
|
)
|
||||||
}
|
|
||||||
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
|
|
||||||
}
|
|
||||||
|
|
||||||
// normalizeMetricFamilies returns a MetricFamily slice whith empty
|
|
||||||
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
|
||||||
// the slice, with the contained Metrics sorted within each MetricFamily.
|
|
||||||
func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
|
||||||
for _, mf := range metricFamiliesByName {
|
|
||||||
sort.Sort(metricSorter(mf.Metric))
|
|
||||||
}
|
|
||||||
names := make([]string, 0, len(metricFamiliesByName))
|
|
||||||
for name, mf := range metricFamiliesByName {
|
|
||||||
if len(mf.Metric) > 0 {
|
|
||||||
names = append(names, name)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sort.Strings(names)
|
if newType == dto.MetricType_HISTOGRAM {
|
||||||
result := make([]*dto.MetricFamily, 0, len(names))
|
if _, ok := mfs[newName+"_bucket"]; ok {
|
||||||
for _, name := range names {
|
return fmt.Errorf(
|
||||||
result = append(result, metricFamiliesByName[name])
|
"collected histogram named %q collides with previously collected metric named %q",
|
||||||
|
newName, newName+"_bucket",
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return result
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkMetricConsistency checks if the provided Metric is consistent with the
|
// checkMetricConsistency checks if the provided Metric is consistent with the
|
||||||
// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
|
// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
|
||||||
// name. If the resulting hash is alread in the provided metricHashes, an error
|
// name. If the resulting hash is already in the provided metricHashes, an error
|
||||||
// is returned. If not, it is added to metricHashes. The provided dimHashes maps
|
// is returned. If not, it is added to metricHashes.
|
||||||
// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
|
|
||||||
// doesn't yet contain a hash for the provided MetricFamily, it is
|
|
||||||
// added. Otherwise, an error is returned if the existing dimHashes in not equal
|
|
||||||
// the calculated dimHash.
|
|
||||||
func checkMetricConsistency(
|
func checkMetricConsistency(
|
||||||
metricFamily *dto.MetricFamily,
|
metricFamily *dto.MetricFamily,
|
||||||
dtoMetric *dto.Metric,
|
dtoMetric *dto.Metric,
|
||||||
metricHashes map[uint64]struct{},
|
metricHashes map[uint64]struct{},
|
||||||
dimHashes map[string]uint64,
|
|
||||||
) error {
|
) error {
|
||||||
|
name := metricFamily.GetName()
|
||||||
|
|
||||||
// Type consistency with metric family.
|
// Type consistency with metric family.
|
||||||
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
|
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
|
||||||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
|
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
|
||||||
|
@ -725,41 +832,65 @@ func checkMetricConsistency(
|
||||||
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
|
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
|
||||||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
|
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"collected metric %s %s is not a %s",
|
"collected metric %q { %s} is not a %s",
|
||||||
metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
|
name, dtoMetric, metricFamily.GetType(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
|
previousLabelName := ""
|
||||||
|
for _, labelPair := range dtoMetric.GetLabel() {
|
||||||
|
labelName := labelPair.GetName()
|
||||||
|
if labelName == previousLabelName {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %q { %s} has two or more labels with the same name: %s",
|
||||||
|
name, dtoMetric, labelName,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if !checkLabelName(labelName) {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %q { %s} has a label with an invalid name: %s",
|
||||||
|
name, dtoMetric, labelName,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if dtoMetric.Summary != nil && labelName == quantileLabel {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %q { %s} must not have an explicit %q label",
|
||||||
|
name, dtoMetric, quantileLabel,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if !utf8.ValidString(labelPair.GetValue()) {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
|
||||||
|
name, dtoMetric, labelName, labelPair.GetValue())
|
||||||
|
}
|
||||||
|
previousLabelName = labelName
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
|
||||||
h := hashNew()
|
h := hashNew()
|
||||||
h = hashAdd(h, metricFamily.GetName())
|
h = hashAdd(h, name)
|
||||||
h = hashAddByte(h, separatorByte)
|
h = hashAddByte(h, separatorByte)
|
||||||
dh := hashNew()
|
|
||||||
// Make sure label pairs are sorted. We depend on it for the consistency
|
// Make sure label pairs are sorted. We depend on it for the consistency
|
||||||
// check.
|
// check.
|
||||||
sort.Sort(LabelPairSorter(dtoMetric.Label))
|
if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
|
||||||
|
// We cannot sort dtoMetric.Label in place as it is immutable by contract.
|
||||||
|
copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
|
||||||
|
copy(copiedLabels, dtoMetric.Label)
|
||||||
|
sort.Sort(labelPairSorter(copiedLabels))
|
||||||
|
dtoMetric.Label = copiedLabels
|
||||||
|
}
|
||||||
for _, lp := range dtoMetric.Label {
|
for _, lp := range dtoMetric.Label {
|
||||||
|
h = hashAdd(h, lp.GetName())
|
||||||
|
h = hashAddByte(h, separatorByte)
|
||||||
h = hashAdd(h, lp.GetValue())
|
h = hashAdd(h, lp.GetValue())
|
||||||
h = hashAddByte(h, separatorByte)
|
h = hashAddByte(h, separatorByte)
|
||||||
dh = hashAdd(dh, lp.GetName())
|
|
||||||
dh = hashAddByte(dh, separatorByte)
|
|
||||||
}
|
}
|
||||||
if _, exists := metricHashes[h]; exists {
|
if _, exists := metricHashes[h]; exists {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"collected metric %s %s was collected before with the same name and label values",
|
"collected metric %q { %s} was collected before with the same name and label values",
|
||||||
metricFamily.GetName(), dtoMetric,
|
name, dtoMetric,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
|
|
||||||
if dimHash != dh {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
|
|
||||||
metricFamily.GetName(), dtoMetric,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dimHashes[metricFamily.GetName()] = dh
|
|
||||||
}
|
|
||||||
metricHashes[h] = struct{}{}
|
metricHashes[h] = struct{}{}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -778,8 +909,8 @@ func checkDescConsistency(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is the desc consistent with the content of the metric?
|
// Is the desc consistent with the content of the metric?
|
||||||
lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
|
lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
|
||||||
lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
|
copy(lpsFromDesc, desc.constLabelPairs)
|
||||||
for _, l := range desc.variableLabels {
|
for _, l := range desc.variableLabels {
|
||||||
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
|
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
|
||||||
Name: proto.String(l),
|
Name: proto.String(l),
|
||||||
|
@ -791,7 +922,7 @@ func checkDescConsistency(
|
||||||
metricFamily.GetName(), dtoMetric, desc,
|
metricFamily.GetName(), dtoMetric, desc,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
sort.Sort(LabelPairSorter(lpsFromDesc))
|
sort.Sort(labelPairSorter(lpsFromDesc))
|
||||||
for i, lpFromDesc := range lpsFromDesc {
|
for i, lpFromDesc := range lpsFromDesc {
|
||||||
lpFromMetric := dtoMetric.Label[i]
|
lpFromMetric := dtoMetric.Label[i]
|
||||||
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
|
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
|
||||||
|
|
194
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
194
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
|
@ -36,7 +36,10 @@ const quantileLabel = "quantile"
|
||||||
//
|
//
|
||||||
// A typical use-case is the observation of request latencies. By default, a
|
// A typical use-case is the observation of request latencies. By default, a
|
||||||
// Summary provides the median, the 90th and the 99th percentile of the latency
|
// Summary provides the median, the 90th and the 99th percentile of the latency
|
||||||
// as rank estimations.
|
// as rank estimations. However, the default behavior will change in the
|
||||||
|
// upcoming v0.10 of the library. There will be no rank estimations at all by
|
||||||
|
// default. For a sane transition, it is recommended to set the desired rank
|
||||||
|
// estimations explicitly.
|
||||||
//
|
//
|
||||||
// Note that the rank estimations cannot be aggregated in a meaningful way with
|
// Note that the rank estimations cannot be aggregated in a meaningful way with
|
||||||
// the Prometheus query language (i.e. you cannot average or add them). If you
|
// the Prometheus query language (i.e. you cannot average or add them). If you
|
||||||
|
@ -54,6 +57,9 @@ type Summary interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefObjectives are the default Summary quantile values.
|
// DefObjectives are the default Summary quantile values.
|
||||||
|
//
|
||||||
|
// Deprecated: DefObjectives will not be used as the default objectives in
|
||||||
|
// v0.10 of the library. The default Summary will have no quantiles then.
|
||||||
var (
|
var (
|
||||||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
||||||
|
|
||||||
|
@ -75,8 +81,10 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// SummaryOpts bundles the options for creating a Summary metric. It is
|
// SummaryOpts bundles the options for creating a Summary metric. It is
|
||||||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
// mandatory to set Name to a non-empty string. While all other fields are
|
||||||
// optional and can safely be left at their zero value.
|
// optional and can safely be left at their zero value, it is recommended to set
|
||||||
|
// a help string and to explicitly set the Objectives field to the desired value
|
||||||
|
// as the default value will change in the upcoming v0.10 of the library.
|
||||||
type SummaryOpts struct {
|
type SummaryOpts struct {
|
||||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||||
// name of the Summary (created by joining these components with
|
// name of the Summary (created by joining these components with
|
||||||
|
@ -87,35 +95,39 @@ type SummaryOpts struct {
|
||||||
Subsystem string
|
Subsystem string
|
||||||
Name string
|
Name string
|
||||||
|
|
||||||
// Help provides information about this Summary. Mandatory!
|
// Help provides information about this Summary.
|
||||||
//
|
//
|
||||||
// Metrics with the same fully-qualified name must have the same Help
|
// Metrics with the same fully-qualified name must have the same Help
|
||||||
// string.
|
// string.
|
||||||
Help string
|
Help string
|
||||||
|
|
||||||
// ConstLabels are used to attach fixed labels to this
|
// ConstLabels are used to attach fixed labels to this metric. Metrics
|
||||||
// Summary. Summaries with the same fully-qualified name must have the
|
// with the same fully-qualified name must have the same label names in
|
||||||
// same label names in their ConstLabels.
|
// their ConstLabels.
|
||||||
//
|
//
|
||||||
// Note that in most cases, labels have a value that varies during the
|
// Due to the way a Summary is represented in the Prometheus text format
|
||||||
// lifetime of a process. Those labels are usually managed with a
|
// and how it is handled by the Prometheus server internally, “quantile”
|
||||||
// SummaryVec. ConstLabels serve only special purposes. One is for the
|
// is an illegal label name. Construction of a Summary or SummaryVec
|
||||||
// special case where the value of a label does not change during the
|
// will panic if this label name is used in ConstLabels.
|
||||||
// lifetime of a process, e.g. if the revision of the running binary is
|
|
||||||
// put into a label. Another, more advanced purpose is if more than one
|
|
||||||
// Collector needs to collect Summaries with the same fully-qualified
|
|
||||||
// name. In that case, those Summaries must differ in the values of
|
|
||||||
// their ConstLabels. See the Collector examples.
|
|
||||||
//
|
//
|
||||||
// If the value of a label never changes (not even between binaries),
|
// ConstLabels are only used rarely. In particular, do not use them to
|
||||||
// that label most likely should not be a label at all (but part of the
|
// attach the same labels to all your metrics. Those use cases are
|
||||||
// metric name).
|
// better covered by target labels set by the scraping Prometheus
|
||||||
|
// server, or by one specific metric (e.g. a build_info or a
|
||||||
|
// machine_role metric). See also
|
||||||
|
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
|
||||||
ConstLabels Labels
|
ConstLabels Labels
|
||||||
|
|
||||||
// Objectives defines the quantile rank estimates with their respective
|
// Objectives defines the quantile rank estimates with their respective
|
||||||
// absolute error. If Objectives[q] = e, then the value reported
|
// absolute error. If Objectives[q] = e, then the value reported for q
|
||||||
// for q will be the φ-quantile value for some φ between q-e and q+e.
|
// will be the φ-quantile value for some φ between q-e and q+e. The
|
||||||
// The default value is DefObjectives.
|
// default value is DefObjectives. It is used if Objectives is left at
|
||||||
|
// its zero value (i.e. nil). To create a Summary without Objectives,
|
||||||
|
// set it to an empty map (i.e. map[float64]float64{}).
|
||||||
|
//
|
||||||
|
// Deprecated: Note that the current value of DefObjectives is
|
||||||
|
// deprecated. It will be replaced by an empty map in v0.10 of the
|
||||||
|
// library. Please explicitly set Objectives to the desired value.
|
||||||
Objectives map[float64]float64
|
Objectives map[float64]float64
|
||||||
|
|
||||||
// MaxAge defines the duration for which an observation stays relevant
|
// MaxAge defines the duration for which an observation stays relevant
|
||||||
|
@ -169,7 +181,7 @@ func NewSummary(opts SummaryOpts) Summary {
|
||||||
|
|
||||||
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if len(desc.variableLabels) != len(labelValues) {
|
||||||
panic(errInconsistentCardinality)
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range desc.variableLabels {
|
for _, n := range desc.variableLabels {
|
||||||
|
@ -183,7 +195,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(opts.Objectives) == 0 {
|
if opts.Objectives == nil {
|
||||||
opts.Objectives = DefObjectives
|
opts.Objectives = DefObjectives
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -390,13 +402,21 @@ func (s quantSort) Less(i, j int) bool {
|
||||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||||
// instances with NewSummaryVec.
|
// instances with NewSummaryVec.
|
||||||
type SummaryVec struct {
|
type SummaryVec struct {
|
||||||
*MetricVec
|
*metricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
||||||
// partitioned by the given label names. At least one label name must be
|
// partitioned by the given label names.
|
||||||
// provided.
|
//
|
||||||
|
// Due to the way a Summary is represented in the Prometheus text format and how
|
||||||
|
// it is handled by the Prometheus server internally, “quantile” is an illegal
|
||||||
|
// label name. NewSummaryVec will panic if this label name is used.
|
||||||
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||||
|
for _, ln := range labelNames {
|
||||||
|
if ln == quantileLabel {
|
||||||
|
panic(errQuantileLabelNotAllowed)
|
||||||
|
}
|
||||||
|
}
|
||||||
desc := NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
|
@ -404,47 +424,116 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &SummaryVec{
|
return &SummaryVec{
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
return newSummary(desc, opts, lvs...)
|
return newSummary(desc, opts, lvs...)
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
// GetMetricWithLabelValues returns the Summary for the given slice of label
|
||||||
// MetricVec. The difference is that this method returns a Summary and not a
|
// values (same order as the VariableLabels in Desc). If that combination of
|
||||||
// Metric so that no type conversion is required.
|
// label values is accessed for the first time, a new Summary is created.
|
||||||
func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
|
//
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
// It is possible to call this method without using the returned Summary to only
|
||||||
|
// create the new Summary but leave it at its starting value, a Summary without
|
||||||
|
// any observations.
|
||||||
|
//
|
||||||
|
// Keeping the Summary for later use is possible (and should be considered if
|
||||||
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||||
|
// Delete can be used to delete the Summary from the SummaryVec. In that case,
|
||||||
|
// the Summary will still exist, but it will not be exported anymore, even if a
|
||||||
|
// Summary with the same label values is created later. See also the CounterVec
|
||||||
|
// example.
|
||||||
|
//
|
||||||
|
// An error is returned if the number of label values is not the same as the
|
||||||
|
// number of VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||||
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
// See also the GaugeVec example.
|
||||||
|
func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Summary), err
|
return metric.(Observer), err
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
// GetMetricWith returns the Summary for the given Labels map (the label names
|
||||||
// difference is that this method returns a Summary and not a Metric so that no
|
// must match those of the VariableLabels in Desc). If that label map is
|
||||||
// type conversion is required.
|
// accessed for the first time, a new Summary is created. Implications of
|
||||||
func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
|
// creating a Summary without using it and keeping the Summary for later use are
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
// the same as for GetMetricWithLabelValues.
|
||||||
|
//
|
||||||
|
// An error is returned if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as
|
||||||
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||||
|
// methods.
|
||||||
|
func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWith(labels)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Summary), err
|
return metric.(Observer), err
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
// error, WithLabelValues allows shortcuts like
|
// error allows shortcuts like
|
||||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||||
func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
|
func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Summary)
|
s, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||||
func (m *SummaryVec) With(labels Labels) Summary {
|
func (v *SummaryVec) With(labels Labels) Observer {
|
||||||
return m.MetricVec.With(labels).(Summary)
|
s, err := v.GetMetricWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||||
|
// returned vector has those labels pre-set for all labeled operations performed
|
||||||
|
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||||
|
// order of the remaining labels stays the same (just with the curried labels
|
||||||
|
// taken out of the sequence – which is relevant for the
|
||||||
|
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||||
|
// vector, but only with labels not yet used for currying before.
|
||||||
|
//
|
||||||
|
// The metrics contained in the SummaryVec are shared between the curried and
|
||||||
|
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||||
|
// vectors behave identically in terms of collection. Only one must be
|
||||||
|
// registered with a given registry (usually the uncurried version). The Reset
|
||||||
|
// method deletes all metrics, even if called on a curried vector.
|
||||||
|
func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
|
||||||
|
vec, err := v.curryWith(labels)
|
||||||
|
if vec != nil {
|
||||||
|
return &SummaryVec{vec}, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||||
|
// returned an error.
|
||||||
|
func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
|
||||||
|
vec, err := v.CurryWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return vec
|
||||||
}
|
}
|
||||||
|
|
||||||
type constSummary struct {
|
type constSummary struct {
|
||||||
|
@ -497,7 +586,7 @@ func (s *constSummary) Write(out *dto.Metric) error {
|
||||||
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
||||||
//
|
//
|
||||||
// NewConstSummary returns an error if the length of labelValues is not
|
// NewConstSummary returns an error if the length of labelValues is not
|
||||||
// consistent with the variable labels in Desc.
|
// consistent with the variable labels in Desc or if Desc is invalid.
|
||||||
func NewConstSummary(
|
func NewConstSummary(
|
||||||
desc *Desc,
|
desc *Desc,
|
||||||
count uint64,
|
count uint64,
|
||||||
|
@ -505,8 +594,11 @@ func NewConstSummary(
|
||||||
quantiles map[float64]float64,
|
quantiles map[float64]float64,
|
||||||
labelValues ...string,
|
labelValues ...string,
|
||||||
) (Metric, error) {
|
) (Metric, error) {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if desc.err != nil {
|
||||||
return nil, errInconsistentCardinality
|
return nil, desc.err
|
||||||
|
}
|
||||||
|
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return &constSummary{
|
return &constSummary{
|
||||||
desc: desc,
|
desc: desc,
|
||||||
|
|
54
vendor/github.com/prometheus/client_golang/prometheus/timer.go
generated
vendored
Normal file
54
vendor/github.com/prometheus/client_golang/prometheus/timer.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
// Copyright 2016 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// Timer is a helper type to time functions. Use NewTimer to create new
|
||||||
|
// instances.
|
||||||
|
type Timer struct {
|
||||||
|
begin time.Time
|
||||||
|
observer Observer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTimer creates a new Timer. The provided Observer is used to observe a
|
||||||
|
// duration in seconds. Timer is usually used to time a function call in the
|
||||||
|
// following way:
|
||||||
|
// func TimeMe() {
|
||||||
|
// timer := NewTimer(myHistogram)
|
||||||
|
// defer timer.ObserveDuration()
|
||||||
|
// // Do actual work.
|
||||||
|
// }
|
||||||
|
func NewTimer(o Observer) *Timer {
|
||||||
|
return &Timer{
|
||||||
|
begin: time.Now(),
|
||||||
|
observer: o,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObserveDuration records the duration passed since the Timer was created with
|
||||||
|
// NewTimer. It calls the Observe method of the Observer provided during
|
||||||
|
// construction with the duration in seconds as an argument. The observed
|
||||||
|
// duration is also returned. ObserveDuration is usually called with a defer
|
||||||
|
// statement.
|
||||||
|
//
|
||||||
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
|
// if used with Go1.9+.
|
||||||
|
func (t *Timer) ObserveDuration() time.Duration {
|
||||||
|
d := time.Since(t.begin)
|
||||||
|
if t.observer != nil {
|
||||||
|
t.observer.Observe(d.Seconds())
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
102
vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
102
vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
|
@ -13,108 +13,12 @@
|
||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
// Untyped is a Metric that represents a single numerical value that can
|
|
||||||
// arbitrarily go up and down.
|
|
||||||
//
|
|
||||||
// An Untyped metric works the same as a Gauge. The only difference is that to
|
|
||||||
// no type information is implied.
|
|
||||||
//
|
|
||||||
// To create Untyped instances, use NewUntyped.
|
|
||||||
type Untyped interface {
|
|
||||||
Metric
|
|
||||||
Collector
|
|
||||||
|
|
||||||
// Set sets the Untyped metric to an arbitrary value.
|
|
||||||
Set(float64)
|
|
||||||
// Inc increments the Untyped metric by 1.
|
|
||||||
Inc()
|
|
||||||
// Dec decrements the Untyped metric by 1.
|
|
||||||
Dec()
|
|
||||||
// Add adds the given value to the Untyped metric. (The value can be
|
|
||||||
// negative, resulting in a decrease.)
|
|
||||||
Add(float64)
|
|
||||||
// Sub subtracts the given value from the Untyped metric. (The value can
|
|
||||||
// be negative, resulting in an increase.)
|
|
||||||
Sub(float64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UntypedOpts is an alias for Opts. See there for doc comments.
|
// UntypedOpts is an alias for Opts. See there for doc comments.
|
||||||
type UntypedOpts Opts
|
type UntypedOpts Opts
|
||||||
|
|
||||||
// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
|
// UntypedFunc works like GaugeFunc but the collected metric is of type
|
||||||
func NewUntyped(opts UntypedOpts) Untyped {
|
// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
|
||||||
return newValue(NewDesc(
|
// type.
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
nil,
|
|
||||||
opts.ConstLabels,
|
|
||||||
), UntypedValue, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UntypedVec is a Collector that bundles a set of Untyped metrics that all
|
|
||||||
// share the same Desc, but have different values for their variable
|
|
||||||
// labels. This is used if you want to count the same thing partitioned by
|
|
||||||
// various dimensions. Create instances with NewUntypedVec.
|
|
||||||
type UntypedVec struct {
|
|
||||||
*MetricVec
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
|
|
||||||
// partitioned by the given label names. At least one label name must be
|
|
||||||
// provided.
|
|
||||||
func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
|
|
||||||
desc := NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
labelNames,
|
|
||||||
opts.ConstLabels,
|
|
||||||
)
|
|
||||||
return &UntypedVec{
|
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
|
||||||
return newValue(desc, UntypedValue, 0, lvs...)
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
|
||||||
// MetricVec. The difference is that this method returns an Untyped and not a
|
|
||||||
// Metric so that no type conversion is required.
|
|
||||||
func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Untyped), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
|
||||||
// difference is that this method returns an Untyped and not a Metric so that no
|
|
||||||
// type conversion is required.
|
|
||||||
func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Untyped), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|
||||||
// error, WithLabelValues allows shortcuts like
|
|
||||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
|
||||||
func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
|
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Untyped)
|
|
||||||
}
|
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
|
||||||
func (m *UntypedVec) With(labels Labels) Untyped {
|
|
||||||
return m.MetricVec.With(labels).(Untyped)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UntypedFunc is an Untyped whose value is determined at collect time by
|
|
||||||
// calling a provided function.
|
|
||||||
//
|
//
|
||||||
// To create UntypedFunc instances, use NewUntypedFunc.
|
// To create UntypedFunc instances, use NewUntypedFunc.
|
||||||
type UntypedFunc interface {
|
type UntypedFunc interface {
|
||||||
|
|
94
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
94
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
|
@ -14,15 +14,12 @@
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ValueType is an enumeration of metric types that represent a simple value.
|
// ValueType is an enumeration of metric types that represent a simple value.
|
||||||
|
@ -36,77 +33,6 @@ const (
|
||||||
UntypedValue
|
UntypedValue
|
||||||
)
|
)
|
||||||
|
|
||||||
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
|
|
||||||
|
|
||||||
// value is a generic metric for simple values. It implements Metric, Collector,
|
|
||||||
// Counter, Gauge, and Untyped. Its effective type is determined by
|
|
||||||
// ValueType. This is a low-level building block used by the library to back the
|
|
||||||
// implementations of Counter, Gauge, and Untyped.
|
|
||||||
type value struct {
|
|
||||||
// valBits containst the bits of the represented float64 value. It has
|
|
||||||
// to go first in the struct to guarantee alignment for atomic
|
|
||||||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
|
||||||
valBits uint64
|
|
||||||
|
|
||||||
selfCollector
|
|
||||||
|
|
||||||
desc *Desc
|
|
||||||
valType ValueType
|
|
||||||
labelPairs []*dto.LabelPair
|
|
||||||
}
|
|
||||||
|
|
||||||
// newValue returns a newly allocated value with the given Desc, ValueType,
|
|
||||||
// sample value and label values. It panics if the number of label
|
|
||||||
// values is different from the number of variable labels in Desc.
|
|
||||||
func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
|
|
||||||
if len(labelValues) != len(desc.variableLabels) {
|
|
||||||
panic(errInconsistentCardinality)
|
|
||||||
}
|
|
||||||
result := &value{
|
|
||||||
desc: desc,
|
|
||||||
valType: valueType,
|
|
||||||
valBits: math.Float64bits(val),
|
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
|
||||||
}
|
|
||||||
result.init(result)
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Desc() *Desc {
|
|
||||||
return v.desc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Set(val float64) {
|
|
||||||
atomic.StoreUint64(&v.valBits, math.Float64bits(val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Inc() {
|
|
||||||
v.Add(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Dec() {
|
|
||||||
v.Add(-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Add(val float64) {
|
|
||||||
for {
|
|
||||||
oldBits := atomic.LoadUint64(&v.valBits)
|
|
||||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
|
||||||
if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Sub(val float64) {
|
|
||||||
v.Add(val * -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Write(out *dto.Metric) error {
|
|
||||||
val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
|
|
||||||
return populateMetric(v.valType, val, v.labelPairs, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// valueFunc is a generic metric for simple values retrieved on collect time
|
// valueFunc is a generic metric for simple values retrieved on collect time
|
||||||
// from a function. It implements Metric and Collector. Its effective type is
|
// from a function. It implements Metric and Collector. Its effective type is
|
||||||
// determined by ValueType. This is a low-level building block used by the
|
// determined by ValueType. This is a low-level building block used by the
|
||||||
|
@ -151,10 +77,14 @@ func (v *valueFunc) Write(out *dto.Metric) error {
|
||||||
// operations. However, when implementing custom Collectors, it is useful as a
|
// operations. However, when implementing custom Collectors, it is useful as a
|
||||||
// throw-away metric that is generated on the fly to send it to Prometheus in
|
// throw-away metric that is generated on the fly to send it to Prometheus in
|
||||||
// the Collect method. NewConstMetric returns an error if the length of
|
// the Collect method. NewConstMetric returns an error if the length of
|
||||||
// labelValues is not consistent with the variable labels in Desc.
|
// labelValues is not consistent with the variable labels in Desc or if Desc is
|
||||||
|
// invalid.
|
||||||
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
|
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if desc.err != nil {
|
||||||
return nil, errInconsistentCardinality
|
return nil, desc.err
|
||||||
|
}
|
||||||
|
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return &constMetric{
|
return &constMetric{
|
||||||
desc: desc,
|
desc: desc,
|
||||||
|
@ -226,9 +156,7 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
||||||
Value: proto.String(labelValues[i]),
|
Value: proto.String(labelValues[i]),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
for _, lp := range desc.constLabelPairs {
|
labelPairs = append(labelPairs, desc.constLabelPairs...)
|
||||||
labelPairs = append(labelPairs, lp)
|
sort.Sort(labelPairSorter(labelPairs))
|
||||||
}
|
|
||||||
sort.Sort(LabelPairSorter(labelPairs))
|
|
||||||
return labelPairs
|
return labelPairs
|
||||||
}
|
}
|
||||||
|
|
516
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
516
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
|
@ -20,33 +20,180 @@ import (
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MetricVec is a Collector to bundle metrics of the same name that
|
// metricVec is a Collector to bundle metrics of the same name that differ in
|
||||||
// differ in their label values. MetricVec is usually not used directly but as a
|
// their label values. metricVec is not used directly (and therefore
|
||||||
// building block for implementations of vectors of a given metric
|
// unexported). It is used as a building block for implementations of vectors of
|
||||||
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
|
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
|
||||||
// provided in this package.
|
// It also handles label currying. It uses basicMetricVec internally.
|
||||||
type MetricVec struct {
|
type metricVec struct {
|
||||||
mtx sync.RWMutex // Protects the children.
|
*metricMap
|
||||||
children map[uint64][]metricWithLabelValues
|
|
||||||
desc *Desc
|
|
||||||
|
|
||||||
newMetric func(labelValues ...string) Metric
|
curry []curriedLabelValue
|
||||||
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
|
|
||||||
|
// hashAdd and hashAddByte can be replaced for testing collision handling.
|
||||||
|
hashAdd func(h uint64, s string) uint64
|
||||||
hashAddByte func(h uint64, b byte) uint64
|
hashAddByte func(h uint64, b byte) uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// newMetricVec returns an initialized MetricVec. The concrete value is
|
// newMetricVec returns an initialized metricVec.
|
||||||
// returned for embedding into another struct.
|
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
|
||||||
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
|
return &metricVec{
|
||||||
return &MetricVec{
|
metricMap: &metricMap{
|
||||||
children: map[uint64][]metricWithLabelValues{},
|
metrics: map[uint64][]metricWithLabelValues{},
|
||||||
desc: desc,
|
desc: desc,
|
||||||
newMetric: newMetric,
|
newMetric: newMetric,
|
||||||
|
},
|
||||||
hashAdd: hashAdd,
|
hashAdd: hashAdd,
|
||||||
hashAddByte: hashAddByte,
|
hashAddByte: hashAddByte,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteLabelValues removes the metric where the variable labels are the same
|
||||||
|
// as those passed in as labels (same order as the VariableLabels in Desc). It
|
||||||
|
// returns true if a metric was deleted.
|
||||||
|
//
|
||||||
|
// It is not an error if the number of label values is not the same as the
|
||||||
|
// number of VariableLabels in Desc. However, such inconsistent label count can
|
||||||
|
// never match an actual metric, so the method will always return false in that
|
||||||
|
// case.
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
|
||||||
|
// alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
// See also the CounterVec example.
|
||||||
|
func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
|
||||||
|
h, err := m.hashLabelValues(lvs)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the metric where the variable labels are the same as those
|
||||||
|
// passed in as labels. It returns true if a metric was deleted.
|
||||||
|
//
|
||||||
|
// It is not an error if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the VariableLabels in Desc. However, such inconsistent Labels
|
||||||
|
// can never match an actual metric, so the method will always return false in
|
||||||
|
// that case.
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
||||||
|
// there for pros and cons of the two methods.
|
||||||
|
func (m *metricVec) Delete(labels Labels) bool {
|
||||||
|
h, err := m.hashLabels(labels)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
|
||||||
|
var (
|
||||||
|
newCurry []curriedLabelValue
|
||||||
|
oldCurry = m.curry
|
||||||
|
iCurry int
|
||||||
|
)
|
||||||
|
for i, label := range m.desc.variableLabels {
|
||||||
|
val, ok := labels[label]
|
||||||
|
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
|
||||||
|
if ok {
|
||||||
|
return nil, fmt.Errorf("label name %q is already curried", label)
|
||||||
|
}
|
||||||
|
newCurry = append(newCurry, oldCurry[iCurry])
|
||||||
|
iCurry++
|
||||||
|
} else {
|
||||||
|
if !ok {
|
||||||
|
continue // Label stays uncurried.
|
||||||
|
}
|
||||||
|
newCurry = append(newCurry, curriedLabelValue{i, val})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
|
||||||
|
return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &metricVec{
|
||||||
|
metricMap: m.metricMap,
|
||||||
|
curry: newCurry,
|
||||||
|
hashAdd: m.hashAdd,
|
||||||
|
hashAddByte: m.hashAddByte,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||||
|
h, err := m.hashLabelValues(lvs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
|
||||||
|
h, err := m.hashLabels(labels)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
|
||||||
|
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
h = hashNew()
|
||||||
|
curry = m.curry
|
||||||
|
iVals, iCurry int
|
||||||
|
)
|
||||||
|
for i := 0; i < len(m.desc.variableLabels); i++ {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
h = m.hashAdd(h, curry[iCurry].value)
|
||||||
|
iCurry++
|
||||||
|
} else {
|
||||||
|
h = m.hashAdd(h, vals[iVals])
|
||||||
|
iVals++
|
||||||
|
}
|
||||||
|
h = m.hashAddByte(h, model.SeparatorByte)
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
|
||||||
|
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
h = hashNew()
|
||||||
|
curry = m.curry
|
||||||
|
iCurry int
|
||||||
|
)
|
||||||
|
for i, label := range m.desc.variableLabels {
|
||||||
|
val, ok := labels[label]
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
if ok {
|
||||||
|
return 0, fmt.Errorf("label name %q is already curried", label)
|
||||||
|
}
|
||||||
|
h = m.hashAdd(h, curry[iCurry].value)
|
||||||
|
iCurry++
|
||||||
|
} else {
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("label name %q missing in label map", label)
|
||||||
|
}
|
||||||
|
h = m.hashAdd(h, val)
|
||||||
|
}
|
||||||
|
h = m.hashAddByte(h, model.SeparatorByte)
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
// metricWithLabelValues provides the metric and its label values for
|
// metricWithLabelValues provides the metric and its label values for
|
||||||
// disambiguation on hash collision.
|
// disambiguation on hash collision.
|
||||||
type metricWithLabelValues struct {
|
type metricWithLabelValues struct {
|
||||||
|
@ -54,166 +201,72 @@ type metricWithLabelValues struct {
|
||||||
metric Metric
|
metric Metric
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe implements Collector. The length of the returned slice
|
// curriedLabelValue sets the curried value for a label at the given index.
|
||||||
// is always one.
|
type curriedLabelValue struct {
|
||||||
func (m *MetricVec) Describe(ch chan<- *Desc) {
|
index int
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
|
// metricMap is a helper for metricVec and shared between differently curried
|
||||||
|
// metricVecs.
|
||||||
|
type metricMap struct {
|
||||||
|
mtx sync.RWMutex // Protects metrics.
|
||||||
|
metrics map[uint64][]metricWithLabelValues
|
||||||
|
desc *Desc
|
||||||
|
newMetric func(labelValues ...string) Metric
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe implements Collector. It will send exactly one Desc to the provided
|
||||||
|
// channel.
|
||||||
|
func (m *metricMap) Describe(ch chan<- *Desc) {
|
||||||
ch <- m.desc
|
ch <- m.desc
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect implements Collector.
|
// Collect implements Collector.
|
||||||
func (m *MetricVec) Collect(ch chan<- Metric) {
|
func (m *metricMap) Collect(ch chan<- Metric) {
|
||||||
m.mtx.RLock()
|
m.mtx.RLock()
|
||||||
defer m.mtx.RUnlock()
|
defer m.mtx.RUnlock()
|
||||||
|
|
||||||
for _, metrics := range m.children {
|
for _, metrics := range m.metrics {
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
ch <- metric.metric
|
ch <- metric.metric
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWithLabelValues returns the Metric for the given slice of label
|
// Reset deletes all metrics in this vector.
|
||||||
// values (same order as the VariableLabels in Desc). If that combination of
|
func (m *metricMap) Reset() {
|
||||||
// label values is accessed for the first time, a new Metric is created.
|
|
||||||
//
|
|
||||||
// It is possible to call this method without using the returned Metric to only
|
|
||||||
// create the new Metric but leave it at its start value (e.g. a Summary or
|
|
||||||
// Histogram without any observations). See also the SummaryVec example.
|
|
||||||
//
|
|
||||||
// Keeping the Metric for later use is possible (and should be considered if
|
|
||||||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
|
||||||
// Delete can be used to delete the Metric from the MetricVec. In that case, the
|
|
||||||
// Metric will still exist, but it will not be exported anymore, even if a
|
|
||||||
// Metric with the same label values is created later. See also the CounterVec
|
|
||||||
// example.
|
|
||||||
//
|
|
||||||
// An error is returned if the number of label values is not the same as the
|
|
||||||
// number of VariableLabels in Desc.
|
|
||||||
//
|
|
||||||
// Note that for more than one label value, this method is prone to mistakes
|
|
||||||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
|
||||||
// an alternative to avoid that type of mistake. For higher label numbers, the
|
|
||||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
|
||||||
// with a performance overhead (for creating and processing the Labels map).
|
|
||||||
// See also the GaugeVec example.
|
|
||||||
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
|
||||||
h, err := m.hashLabelValues(lvs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWith returns the Metric for the given Labels map (the label names
|
|
||||||
// must match those of the VariableLabels in Desc). If that label map is
|
|
||||||
// accessed for the first time, a new Metric is created. Implications of
|
|
||||||
// creating a Metric without using it and keeping the Metric for later use are
|
|
||||||
// the same as for GetMetricWithLabelValues.
|
|
||||||
//
|
|
||||||
// An error is returned if the number and names of the Labels are inconsistent
|
|
||||||
// with those of the VariableLabels in Desc.
|
|
||||||
//
|
|
||||||
// This method is used for the same purpose as
|
|
||||||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
|
||||||
// methods.
|
|
||||||
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
|
||||||
h, err := m.hashLabels(labels)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.getOrCreateMetricWithLabels(h, labels), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
|
|
||||||
// occurs. The method allows neat syntax like:
|
|
||||||
// httpReqs.WithLabelValues("404", "POST").Inc()
|
|
||||||
func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
|
|
||||||
metric, err := m.GetMetricWithLabelValues(lvs...)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return metric
|
|
||||||
}
|
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics if an error occurs. The method allows
|
|
||||||
// neat syntax like:
|
|
||||||
// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
|
|
||||||
func (m *MetricVec) With(labels Labels) Metric {
|
|
||||||
metric, err := m.GetMetricWith(labels)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return metric
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteLabelValues removes the metric where the variable labels are the same
|
|
||||||
// as those passed in as labels (same order as the VariableLabels in Desc). It
|
|
||||||
// returns true if a metric was deleted.
|
|
||||||
//
|
|
||||||
// It is not an error if the number of label values is not the same as the
|
|
||||||
// number of VariableLabels in Desc. However, such inconsistent label count can
|
|
||||||
// never match an actual Metric, so the method will always return false in that
|
|
||||||
// case.
|
|
||||||
//
|
|
||||||
// Note that for more than one label value, this method is prone to mistakes
|
|
||||||
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
|
|
||||||
// alternative to avoid that type of mistake. For higher label numbers, the
|
|
||||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
|
||||||
// with a performance overhead (for creating and processing the Labels map).
|
|
||||||
// See also the CounterVec example.
|
|
||||||
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
h, err := m.hashLabelValues(lvs)
|
for h := range m.metrics {
|
||||||
if err != nil {
|
delete(m.metrics, h)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
return m.deleteByHashWithLabelValues(h, lvs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the metric where the variable labels are the same as those
|
|
||||||
// passed in as labels. It returns true if a metric was deleted.
|
|
||||||
//
|
|
||||||
// It is not an error if the number and names of the Labels are inconsistent
|
|
||||||
// with those of the VariableLabels in the Desc of the MetricVec. However, such
|
|
||||||
// inconsistent Labels can never match an actual Metric, so the method will
|
|
||||||
// always return false in that case.
|
|
||||||
//
|
|
||||||
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
|
||||||
// there for pros and cons of the two methods.
|
|
||||||
func (m *MetricVec) Delete(labels Labels) bool {
|
|
||||||
m.mtx.Lock()
|
|
||||||
defer m.mtx.Unlock()
|
|
||||||
|
|
||||||
h, err := m.hashLabels(labels)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.deleteByHashWithLabels(h, labels)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
|
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
|
||||||
// there are multiple matches in the bucket, use lvs to select a metric and
|
// there are multiple matches in the bucket, use lvs to select a metric and
|
||||||
// remove only that metric.
|
// remove only that metric.
|
||||||
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
|
func (m *metricMap) deleteByHashWithLabelValues(
|
||||||
metrics, ok := m.children[h]
|
h uint64, lvs []string, curry []curriedLabelValue,
|
||||||
|
) bool {
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
metrics, ok := m.metrics[h]
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
i := m.findMetricWithLabelValues(metrics, lvs)
|
i := findMetricWithLabelValues(metrics, lvs, curry)
|
||||||
if i >= len(metrics) {
|
if i >= len(metrics) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(metrics) > 1 {
|
if len(metrics) > 1 {
|
||||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
|
||||||
} else {
|
} else {
|
||||||
delete(m.children, h)
|
delete(m.metrics, h)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -221,69 +274,38 @@ func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
|
||||||
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
|
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
|
||||||
// are multiple matches in the bucket, use lvs to select a metric and remove
|
// are multiple matches in the bucket, use lvs to select a metric and remove
|
||||||
// only that metric.
|
// only that metric.
|
||||||
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
|
func (m *metricMap) deleteByHashWithLabels(
|
||||||
metrics, ok := m.children[h]
|
h uint64, labels Labels, curry []curriedLabelValue,
|
||||||
|
) bool {
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
metrics, ok := m.metrics[h]
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
i := m.findMetricWithLabels(metrics, labels)
|
i := findMetricWithLabels(m.desc, metrics, labels, curry)
|
||||||
if i >= len(metrics) {
|
if i >= len(metrics) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(metrics) > 1 {
|
if len(metrics) > 1 {
|
||||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
|
||||||
} else {
|
} else {
|
||||||
delete(m.children, h)
|
delete(m.metrics, h)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset deletes all metrics in this vector.
|
|
||||||
func (m *MetricVec) Reset() {
|
|
||||||
m.mtx.Lock()
|
|
||||||
defer m.mtx.Unlock()
|
|
||||||
|
|
||||||
for h := range m.children {
|
|
||||||
delete(m.children, h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
|
|
||||||
if len(vals) != len(m.desc.variableLabels) {
|
|
||||||
return 0, errInconsistentCardinality
|
|
||||||
}
|
|
||||||
h := hashNew()
|
|
||||||
for _, val := range vals {
|
|
||||||
h = m.hashAdd(h, val)
|
|
||||||
h = m.hashAddByte(h, model.SeparatorByte)
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
|
|
||||||
if len(labels) != len(m.desc.variableLabels) {
|
|
||||||
return 0, errInconsistentCardinality
|
|
||||||
}
|
|
||||||
h := hashNew()
|
|
||||||
for _, label := range m.desc.variableLabels {
|
|
||||||
val, ok := labels[label]
|
|
||||||
if !ok {
|
|
||||||
return 0, fmt.Errorf("label name %q missing in label map", label)
|
|
||||||
}
|
|
||||||
h = m.hashAdd(h, val)
|
|
||||||
h = m.hashAddByte(h, model.SeparatorByte)
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||||
// or creates it and returns the new one.
|
// or creates it and returns the new one.
|
||||||
//
|
//
|
||||||
// This function holds the mutex.
|
// This function holds the mutex.
|
||||||
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
|
func (m *metricMap) getOrCreateMetricWithLabelValues(
|
||||||
|
hash uint64, lvs []string, curry []curriedLabelValue,
|
||||||
|
) Metric {
|
||||||
m.mtx.RLock()
|
m.mtx.RLock()
|
||||||
metric, ok := m.getMetricWithLabelValues(hash, lvs)
|
metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
|
||||||
m.mtx.RUnlock()
|
m.mtx.RUnlock()
|
||||||
if ok {
|
if ok {
|
||||||
return metric
|
return metric
|
||||||
|
@ -291,13 +313,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
|
||||||
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
metric, ok = m.getMetricWithLabelValues(hash, lvs)
|
metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
|
||||||
if !ok {
|
if !ok {
|
||||||
// Copy to avoid allocation in case wo don't go down this code path.
|
inlinedLVs := inlineLabelValues(lvs, curry)
|
||||||
copiedLVs := make([]string, len(lvs))
|
metric = m.newMetric(inlinedLVs...)
|
||||||
copy(copiedLVs, lvs)
|
m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
|
||||||
metric = m.newMetric(copiedLVs...)
|
|
||||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
|
|
||||||
}
|
}
|
||||||
return metric
|
return metric
|
||||||
}
|
}
|
||||||
|
@ -306,9 +326,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
|
||||||
// or creates it and returns the new one.
|
// or creates it and returns the new one.
|
||||||
//
|
//
|
||||||
// This function holds the mutex.
|
// This function holds the mutex.
|
||||||
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
|
func (m *metricMap) getOrCreateMetricWithLabels(
|
||||||
|
hash uint64, labels Labels, curry []curriedLabelValue,
|
||||||
|
) Metric {
|
||||||
m.mtx.RLock()
|
m.mtx.RLock()
|
||||||
metric, ok := m.getMetricWithLabels(hash, labels)
|
metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
|
||||||
m.mtx.RUnlock()
|
m.mtx.RUnlock()
|
||||||
if ok {
|
if ok {
|
||||||
return metric
|
return metric
|
||||||
|
@ -316,33 +338,37 @@ func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metr
|
||||||
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
metric, ok = m.getMetricWithLabels(hash, labels)
|
metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
|
||||||
if !ok {
|
if !ok {
|
||||||
lvs := m.extractLabelValues(labels)
|
lvs := extractLabelValues(m.desc, labels, curry)
|
||||||
metric = m.newMetric(lvs...)
|
metric = m.newMetric(lvs...)
|
||||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
|
m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
|
||||||
}
|
}
|
||||||
return metric
|
return metric
|
||||||
}
|
}
|
||||||
|
|
||||||
// getMetricWithLabelValues gets a metric while handling possible collisions in
|
// getMetricWithHashAndLabelValues gets a metric while handling possible
|
||||||
// the hash space. Must be called while holding read mutex.
|
// collisions in the hash space. Must be called while holding the read mutex.
|
||||||
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
|
func (m *metricMap) getMetricWithHashAndLabelValues(
|
||||||
metrics, ok := m.children[h]
|
h uint64, lvs []string, curry []curriedLabelValue,
|
||||||
|
) (Metric, bool) {
|
||||||
|
metrics, ok := m.metrics[h]
|
||||||
if ok {
|
if ok {
|
||||||
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
|
if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
|
||||||
return metrics[i].metric, true
|
return metrics[i].metric, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// getMetricWithLabels gets a metric while handling possible collisions in
|
// getMetricWithHashAndLabels gets a metric while handling possible collisions in
|
||||||
// the hash space. Must be called while holding read mutex.
|
// the hash space. Must be called while holding read mutex.
|
||||||
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
|
func (m *metricMap) getMetricWithHashAndLabels(
|
||||||
metrics, ok := m.children[h]
|
h uint64, labels Labels, curry []curriedLabelValue,
|
||||||
|
) (Metric, bool) {
|
||||||
|
metrics, ok := m.metrics[h]
|
||||||
if ok {
|
if ok {
|
||||||
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
|
if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
|
||||||
return metrics[i].metric, true
|
return metrics[i].metric, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -351,9 +377,11 @@ func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool)
|
||||||
|
|
||||||
// findMetricWithLabelValues returns the index of the matching metric or
|
// findMetricWithLabelValues returns the index of the matching metric or
|
||||||
// len(metrics) if not found.
|
// len(metrics) if not found.
|
||||||
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
|
func findMetricWithLabelValues(
|
||||||
|
metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
|
||||||
|
) int {
|
||||||
for i, metric := range metrics {
|
for i, metric := range metrics {
|
||||||
if m.matchLabelValues(metric.values, lvs) {
|
if matchLabelValues(metric.values, lvs, curry) {
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -362,32 +390,51 @@ func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, l
|
||||||
|
|
||||||
// findMetricWithLabels returns the index of the matching metric or len(metrics)
|
// findMetricWithLabels returns the index of the matching metric or len(metrics)
|
||||||
// if not found.
|
// if not found.
|
||||||
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
|
func findMetricWithLabels(
|
||||||
|
desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
|
||||||
|
) int {
|
||||||
for i, metric := range metrics {
|
for i, metric := range metrics {
|
||||||
if m.matchLabels(metric.values, labels) {
|
if matchLabels(desc, metric.values, labels, curry) {
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return len(metrics)
|
return len(metrics)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
|
func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
|
||||||
if len(values) != len(lvs) {
|
if len(values) != len(lvs)+len(curry) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
var iLVs, iCurry int
|
||||||
for i, v := range values {
|
for i, v := range values {
|
||||||
if v != lvs[i] {
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
if v != curry[iCurry].value {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v != lvs[iLVs] {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
iLVs++
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
|
func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
|
||||||
if len(labels) != len(values) {
|
if len(values) != len(labels)+len(curry) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
for i, k := range m.desc.variableLabels {
|
iCurry := 0
|
||||||
|
for i, k := range desc.variableLabels {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
if values[i] != curry[iCurry].value {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
if values[i] != labels[k] {
|
if values[i] != labels[k] {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -395,10 +442,31 @@ func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MetricVec) extractLabelValues(labels Labels) []string {
|
func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
|
||||||
labelValues := make([]string, len(labels))
|
labelValues := make([]string, len(labels)+len(curry))
|
||||||
for i, k := range m.desc.variableLabels {
|
iCurry := 0
|
||||||
|
for i, k := range desc.variableLabels {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
labelValues[i] = curry[iCurry].value
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
labelValues[i] = labels[k]
|
labelValues[i] = labels[k]
|
||||||
}
|
}
|
||||||
return labelValues
|
return labelValues
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
|
||||||
|
labelValues := make([]string, len(lvs)+len(curry))
|
||||||
|
var iCurry, iLVs int
|
||||||
|
for i := range labelValues {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
labelValues[i] = curry[iCurry].value
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
labelValues[i] = lvs[iLVs]
|
||||||
|
iLVs++
|
||||||
|
}
|
||||||
|
return labelValues
|
||||||
|
}
|
||||||
|
|
179
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
Normal file
179
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
Normal file
|
@ -0,0 +1,179 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WrapRegistererWith returns a Registerer wrapping the provided
|
||||||
|
// Registerer. Collectors registered with the returned Registerer will be
|
||||||
|
// registered with the wrapped Registerer in a modified way. The modified
|
||||||
|
// Collector adds the provided Labels to all Metrics it collects (as
|
||||||
|
// ConstLabels). The Metrics collected by the unmodified Collector must not
|
||||||
|
// duplicate any of those labels.
|
||||||
|
//
|
||||||
|
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
||||||
|
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
||||||
|
//
|
||||||
|
// The Collector example demonstrates a use of WrapRegistererWith.
|
||||||
|
func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||||
|
return &wrappingRegisterer{
|
||||||
|
wrappedRegisterer: reg,
|
||||||
|
labels: labels,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapRegistererWithPrefix returns a Registerer wrapping the provided
|
||||||
|
// Registerer. Collectors registered with the returned Registerer will be
|
||||||
|
// registered with the wrapped Registerer in a modified way. The modified
|
||||||
|
// Collector adds the provided prefix to the name of all Metrics it collects.
|
||||||
|
//
|
||||||
|
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
|
||||||
|
// a sub-system. To make this work, register metrics of the sub-system with the
|
||||||
|
// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
|
||||||
|
// to use the same prefix for all metrics exposed. In particular, do not prefix
|
||||||
|
// metric names that are standardized across applications, as that would break
|
||||||
|
// horizontal monitoring, for example the metrics provided by the Go collector
|
||||||
|
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
|
||||||
|
// fact, those metrics are already prefixed with “go_” or “process_”,
|
||||||
|
// respectively.)
|
||||||
|
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
|
||||||
|
return &wrappingRegisterer{
|
||||||
|
wrappedRegisterer: reg,
|
||||||
|
prefix: prefix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type wrappingRegisterer struct {
|
||||||
|
wrappedRegisterer Registerer
|
||||||
|
prefix string
|
||||||
|
labels Labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *wrappingRegisterer) Register(c Collector) error {
|
||||||
|
return r.wrappedRegisterer.Register(&wrappingCollector{
|
||||||
|
wrappedCollector: c,
|
||||||
|
prefix: r.prefix,
|
||||||
|
labels: r.labels,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
||||||
|
for _, c := range cs {
|
||||||
|
if err := r.Register(c); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *wrappingRegisterer) Unregister(c Collector) bool {
|
||||||
|
return r.wrappedRegisterer.Unregister(&wrappingCollector{
|
||||||
|
wrappedCollector: c,
|
||||||
|
prefix: r.prefix,
|
||||||
|
labels: r.labels,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type wrappingCollector struct {
|
||||||
|
wrappedCollector Collector
|
||||||
|
prefix string
|
||||||
|
labels Labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *wrappingCollector) Collect(ch chan<- Metric) {
|
||||||
|
wrappedCh := make(chan Metric)
|
||||||
|
go func() {
|
||||||
|
c.wrappedCollector.Collect(wrappedCh)
|
||||||
|
close(wrappedCh)
|
||||||
|
}()
|
||||||
|
for m := range wrappedCh {
|
||||||
|
ch <- &wrappingMetric{
|
||||||
|
wrappedMetric: m,
|
||||||
|
prefix: c.prefix,
|
||||||
|
labels: c.labels,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *wrappingCollector) Describe(ch chan<- *Desc) {
|
||||||
|
wrappedCh := make(chan *Desc)
|
||||||
|
go func() {
|
||||||
|
c.wrappedCollector.Describe(wrappedCh)
|
||||||
|
close(wrappedCh)
|
||||||
|
}()
|
||||||
|
for desc := range wrappedCh {
|
||||||
|
ch <- wrapDesc(desc, c.prefix, c.labels)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type wrappingMetric struct {
|
||||||
|
wrappedMetric Metric
|
||||||
|
prefix string
|
||||||
|
labels Labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *wrappingMetric) Desc() *Desc {
|
||||||
|
return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *wrappingMetric) Write(out *dto.Metric) error {
|
||||||
|
if err := m.wrappedMetric.Write(out); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(m.labels) == 0 {
|
||||||
|
// No wrapping labels.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for ln, lv := range m.labels {
|
||||||
|
out.Label = append(out.Label, &dto.LabelPair{
|
||||||
|
Name: proto.String(ln),
|
||||||
|
Value: proto.String(lv),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
sort.Sort(labelPairSorter(out.Label))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
|
||||||
|
constLabels := Labels{}
|
||||||
|
for _, lp := range desc.constLabelPairs {
|
||||||
|
constLabels[*lp.Name] = *lp.Value
|
||||||
|
}
|
||||||
|
for ln, lv := range labels {
|
||||||
|
if _, alreadyUsed := constLabels[ln]; alreadyUsed {
|
||||||
|
return &Desc{
|
||||||
|
fqName: desc.fqName,
|
||||||
|
help: desc.help,
|
||||||
|
variableLabels: desc.variableLabels,
|
||||||
|
constLabelPairs: desc.constLabelPairs,
|
||||||
|
err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
constLabels[ln] = lv
|
||||||
|
}
|
||||||
|
// NewDesc will do remaining validations.
|
||||||
|
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
|
||||||
|
// Propagate errors if there was any. This will override any errer
|
||||||
|
// created by NewDesc above, i.e. earlier errors get precedence.
|
||||||
|
if desc.err != nil {
|
||||||
|
newDesc.err = desc.err
|
||||||
|
}
|
||||||
|
return newDesc
|
||||||
|
}
|
379
vendor/github.com/prometheus/client_model/go/metrics.pb.go
generated
vendored
379
vendor/github.com/prometheus/client_model/go/metrics.pb.go
generated
vendored
|
@ -1,34 +1,23 @@
|
||||||
// Code generated by protoc-gen-go.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: metrics.proto
|
// source: metrics.proto
|
||||||
// DO NOT EDIT!
|
|
||||||
|
|
||||||
/*
|
package io_prometheus_client // import "github.com/prometheus/client_model/go"
|
||||||
Package io_prometheus_client is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
metrics.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
LabelPair
|
|
||||||
Gauge
|
|
||||||
Counter
|
|
||||||
Quantile
|
|
||||||
Summary
|
|
||||||
Untyped
|
|
||||||
Histogram
|
|
||||||
Bucket
|
|
||||||
Metric
|
|
||||||
MetricFamily
|
|
||||||
*/
|
|
||||||
package io_prometheus_client
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
import math "math"
|
import math "math"
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
var _ = proto.Marshal
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
var _ = math.Inf
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
type MetricType int32
|
type MetricType int32
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -70,16 +59,41 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
|
||||||
*x = MetricType(value)
|
*x = MetricType(value)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (MetricType) EnumDescriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
type LabelPair struct {
|
type LabelPair struct {
|
||||||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
|
Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LabelPair) Reset() { *m = LabelPair{} }
|
func (m *LabelPair) Reset() { *m = LabelPair{} }
|
||||||
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
|
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
|
||||||
func (*LabelPair) ProtoMessage() {}
|
func (*LabelPair) ProtoMessage() {}
|
||||||
|
func (*LabelPair) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
|
||||||
|
}
|
||||||
|
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_LabelPair.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *LabelPair) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_LabelPair.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *LabelPair) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_LabelPair.Size(m)
|
||||||
|
}
|
||||||
|
func (m *LabelPair) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_LabelPair.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_LabelPair proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *LabelPair) GetName() string {
|
func (m *LabelPair) GetName() string {
|
||||||
if m != nil && m.Name != nil {
|
if m != nil && m.Name != nil {
|
||||||
|
@ -96,13 +110,35 @@ func (m *LabelPair) GetValue() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Gauge struct {
|
type Gauge struct {
|
||||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Gauge) Reset() { *m = Gauge{} }
|
func (m *Gauge) Reset() { *m = Gauge{} }
|
||||||
func (m *Gauge) String() string { return proto.CompactTextString(m) }
|
func (m *Gauge) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Gauge) ProtoMessage() {}
|
func (*Gauge) ProtoMessage() {}
|
||||||
|
func (*Gauge) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
|
||||||
|
}
|
||||||
|
func (m *Gauge) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Gauge.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Gauge) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Gauge.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Gauge) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Gauge.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Gauge) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Gauge.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Gauge proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Gauge) GetValue() float64 {
|
func (m *Gauge) GetValue() float64 {
|
||||||
if m != nil && m.Value != nil {
|
if m != nil && m.Value != nil {
|
||||||
|
@ -112,13 +148,35 @@ func (m *Gauge) GetValue() float64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Counter struct {
|
type Counter struct {
|
||||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Counter) Reset() { *m = Counter{} }
|
func (m *Counter) Reset() { *m = Counter{} }
|
||||||
func (m *Counter) String() string { return proto.CompactTextString(m) }
|
func (m *Counter) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Counter) ProtoMessage() {}
|
func (*Counter) ProtoMessage() {}
|
||||||
|
func (*Counter) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
|
||||||
|
}
|
||||||
|
func (m *Counter) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Counter.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Counter) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Counter.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Counter) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Counter.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Counter) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Counter.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Counter proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Counter) GetValue() float64 {
|
func (m *Counter) GetValue() float64 {
|
||||||
if m != nil && m.Value != nil {
|
if m != nil && m.Value != nil {
|
||||||
|
@ -128,14 +186,36 @@ func (m *Counter) GetValue() float64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Quantile struct {
|
type Quantile struct {
|
||||||
Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
|
Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
|
||||||
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
|
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Quantile) Reset() { *m = Quantile{} }
|
func (m *Quantile) Reset() { *m = Quantile{} }
|
||||||
func (m *Quantile) String() string { return proto.CompactTextString(m) }
|
func (m *Quantile) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Quantile) ProtoMessage() {}
|
func (*Quantile) ProtoMessage() {}
|
||||||
|
func (*Quantile) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
|
||||||
|
}
|
||||||
|
func (m *Quantile) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Quantile.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Quantile) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Quantile.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Quantile) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Quantile.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Quantile) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Quantile.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Quantile proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Quantile) GetQuantile() float64 {
|
func (m *Quantile) GetQuantile() float64 {
|
||||||
if m != nil && m.Quantile != nil {
|
if m != nil && m.Quantile != nil {
|
||||||
|
@ -152,15 +232,37 @@ func (m *Quantile) GetValue() float64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Summary struct {
|
type Summary struct {
|
||||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
|
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
|
||||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
|
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
|
||||||
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
|
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Summary) Reset() { *m = Summary{} }
|
func (m *Summary) Reset() { *m = Summary{} }
|
||||||
func (m *Summary) String() string { return proto.CompactTextString(m) }
|
func (m *Summary) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Summary) ProtoMessage() {}
|
func (*Summary) ProtoMessage() {}
|
||||||
|
func (*Summary) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
|
||||||
|
}
|
||||||
|
func (m *Summary) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Summary.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Summary) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Summary.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Summary) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Summary.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Summary) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Summary.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Summary proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Summary) GetSampleCount() uint64 {
|
func (m *Summary) GetSampleCount() uint64 {
|
||||||
if m != nil && m.SampleCount != nil {
|
if m != nil && m.SampleCount != nil {
|
||||||
|
@ -184,13 +286,35 @@ func (m *Summary) GetQuantile() []*Quantile {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Untyped struct {
|
type Untyped struct {
|
||||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Untyped) Reset() { *m = Untyped{} }
|
func (m *Untyped) Reset() { *m = Untyped{} }
|
||||||
func (m *Untyped) String() string { return proto.CompactTextString(m) }
|
func (m *Untyped) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Untyped) ProtoMessage() {}
|
func (*Untyped) ProtoMessage() {}
|
||||||
|
func (*Untyped) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
|
||||||
|
}
|
||||||
|
func (m *Untyped) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Untyped.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Untyped) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Untyped.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Untyped) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Untyped.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Untyped) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Untyped.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Untyped proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Untyped) GetValue() float64 {
|
func (m *Untyped) GetValue() float64 {
|
||||||
if m != nil && m.Value != nil {
|
if m != nil && m.Value != nil {
|
||||||
|
@ -200,15 +324,37 @@ func (m *Untyped) GetValue() float64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Histogram struct {
|
type Histogram struct {
|
||||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
|
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
|
||||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
|
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
|
||||||
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
|
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Histogram) Reset() { *m = Histogram{} }
|
func (m *Histogram) Reset() { *m = Histogram{} }
|
||||||
func (m *Histogram) String() string { return proto.CompactTextString(m) }
|
func (m *Histogram) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Histogram) ProtoMessage() {}
|
func (*Histogram) ProtoMessage() {}
|
||||||
|
func (*Histogram) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
|
||||||
|
}
|
||||||
|
func (m *Histogram) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Histogram.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Histogram) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Histogram.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Histogram) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Histogram.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Histogram) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Histogram.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Histogram proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Histogram) GetSampleCount() uint64 {
|
func (m *Histogram) GetSampleCount() uint64 {
|
||||||
if m != nil && m.SampleCount != nil {
|
if m != nil && m.SampleCount != nil {
|
||||||
|
@ -232,14 +378,36 @@ func (m *Histogram) GetBucket() []*Bucket {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Bucket struct {
|
type Bucket struct {
|
||||||
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
|
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
|
||||||
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
|
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Bucket) Reset() { *m = Bucket{} }
|
func (m *Bucket) Reset() { *m = Bucket{} }
|
||||||
func (m *Bucket) String() string { return proto.CompactTextString(m) }
|
func (m *Bucket) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Bucket) ProtoMessage() {}
|
func (*Bucket) ProtoMessage() {}
|
||||||
|
func (*Bucket) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
|
||||||
|
}
|
||||||
|
func (m *Bucket) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Bucket.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Bucket) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Bucket.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Bucket) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Bucket.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Bucket) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Bucket.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Bucket proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Bucket) GetCumulativeCount() uint64 {
|
func (m *Bucket) GetCumulativeCount() uint64 {
|
||||||
if m != nil && m.CumulativeCount != nil {
|
if m != nil && m.CumulativeCount != nil {
|
||||||
|
@ -256,19 +424,41 @@ func (m *Bucket) GetUpperBound() float64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Metric struct {
|
type Metric struct {
|
||||||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
|
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
|
||||||
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
|
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
|
||||||
Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
|
Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
|
||||||
Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
|
Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
|
||||||
Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
|
Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
|
||||||
Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
|
Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
|
||||||
TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
|
TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metric) Reset() { *m = Metric{} }
|
func (m *Metric) Reset() { *m = Metric{} }
|
||||||
func (m *Metric) String() string { return proto.CompactTextString(m) }
|
func (m *Metric) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Metric) ProtoMessage() {}
|
func (*Metric) ProtoMessage() {}
|
||||||
|
func (*Metric) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
|
||||||
|
}
|
||||||
|
func (m *Metric) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Metric.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Metric) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Metric.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Metric) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Metric.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Metric) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Metric.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Metric proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Metric) GetLabel() []*LabelPair {
|
func (m *Metric) GetLabel() []*LabelPair {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -320,16 +510,38 @@ func (m *Metric) GetTimestampMs() int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
type MetricFamily struct {
|
type MetricFamily struct {
|
||||||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
|
Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
|
||||||
Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
|
Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
|
||||||
Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
|
Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MetricFamily) Reset() { *m = MetricFamily{} }
|
func (m *MetricFamily) Reset() { *m = MetricFamily{} }
|
||||||
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
|
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
|
||||||
func (*MetricFamily) ProtoMessage() {}
|
func (*MetricFamily) ProtoMessage() {}
|
||||||
|
func (*MetricFamily) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
|
||||||
|
}
|
||||||
|
func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *MetricFamily) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_MetricFamily.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *MetricFamily) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_MetricFamily.Size(m)
|
||||||
|
}
|
||||||
|
func (m *MetricFamily) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_MetricFamily.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *MetricFamily) GetName() string {
|
func (m *MetricFamily) GetName() string {
|
||||||
if m != nil && m.Name != nil {
|
if m != nil && m.Name != nil {
|
||||||
|
@ -360,5 +572,58 @@ func (m *MetricFamily) GetMetric() []*Metric {
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
|
||||||
|
proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
|
||||||
|
proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
|
||||||
|
proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
|
||||||
|
proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
|
||||||
|
proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
|
||||||
|
proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
|
||||||
|
proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
|
||||||
|
proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
|
||||||
|
proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
|
||||||
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
|
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
|
||||||
|
|
||||||
|
var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
|
||||||
|
// 591 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
|
||||||
|
0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
|
||||||
|
0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
|
||||||
|
0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
|
||||||
|
0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
|
||||||
|
0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
|
||||||
|
0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
|
||||||
|
0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
|
||||||
|
0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
|
||||||
|
0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
|
||||||
|
0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
|
||||||
|
0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
|
||||||
|
0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
|
||||||
|
0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
|
||||||
|
0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
|
||||||
|
0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
|
||||||
|
0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
|
||||||
|
0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
|
||||||
|
0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
|
||||||
|
0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
|
||||||
|
0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
|
||||||
|
0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
|
||||||
|
0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
|
||||||
|
0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
|
||||||
|
0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
|
||||||
|
0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
|
||||||
|
0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
|
||||||
|
0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
|
||||||
|
0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
|
||||||
|
0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
|
||||||
|
0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
|
||||||
|
0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
|
||||||
|
0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
|
||||||
|
0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
|
||||||
|
0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
|
||||||
|
0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
|
||||||
|
0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
4
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
4
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
|
@ -164,9 +164,9 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtractSamples builds a slice of samples from the provided metric
|
// ExtractSamples builds a slice of samples from the provided metric
|
||||||
// families. If an error occurs during sample extraction, it continues to
|
// families. If an error occurrs during sample extraction, it continues to
|
||||||
// extract from the remaining metric families. The returned error is the last
|
// extract from the remaining metric families. The returned error is the last
|
||||||
// error that has occured.
|
// error that has occurred.
|
||||||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
|
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
|
||||||
var (
|
var (
|
||||||
all model.Vector
|
all model.Vector
|
||||||
|
|
2
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
|
@ -26,7 +26,7 @@ const (
|
||||||
|
|
||||||
// The Content-Type values for the different wire protocols.
|
// The Content-Type values for the different wire protocols.
|
||||||
FmtUnknown Format = `<unknown>`
|
FmtUnknown Format = `<unknown>`
|
||||||
FmtText Format = `text/plain; version=` + TextVersion
|
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
|
||||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||||
|
|
385
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
385
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
|
@ -14,13 +14,45 @@
|
||||||
package expfmt
|
package expfmt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
"github.com/prometheus/common/model"
|
)
|
||||||
|
|
||||||
|
// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer
|
||||||
|
// implements it.
|
||||||
|
type enhancedWriter interface {
|
||||||
|
io.Writer
|
||||||
|
WriteRune(r rune) (n int, err error)
|
||||||
|
WriteString(s string) (n int, err error)
|
||||||
|
WriteByte(c byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
initialBufSize = 512
|
||||||
|
initialNumBufSize = 24
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
bufPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return bytes.NewBuffer(make([]byte, 0, initialBufSize))
|
||||||
|
},
|
||||||
|
}
|
||||||
|
numBufPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
b := make([]byte, 0, initialNumBufSize)
|
||||||
|
return &b
|
||||||
|
},
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
||||||
|
@ -32,37 +64,92 @@ import (
|
||||||
// will result in invalid text format output.
|
// will result in invalid text format output.
|
||||||
//
|
//
|
||||||
// This method fulfills the type 'prometheus.encoder'.
|
// This method fulfills the type 'prometheus.encoder'.
|
||||||
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
|
||||||
var written int
|
|
||||||
|
|
||||||
// Fail-fast checks.
|
// Fail-fast checks.
|
||||||
if len(in.Metric) == 0 {
|
if len(in.Metric) == 0 {
|
||||||
return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
|
return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
|
||||||
}
|
}
|
||||||
name := in.GetName()
|
name := in.GetName()
|
||||||
if name == "" {
|
if name == "" {
|
||||||
return written, fmt.Errorf("MetricFamily has no name: %s", in)
|
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Try the interface upgrade. If it doesn't work, we'll use a
|
||||||
|
// bytes.Buffer from the sync.Pool and write out its content to out in a
|
||||||
|
// single go in the end.
|
||||||
|
w, ok := out.(enhancedWriter)
|
||||||
|
if !ok {
|
||||||
|
b := bufPool.Get().(*bytes.Buffer)
|
||||||
|
b.Reset()
|
||||||
|
w = b
|
||||||
|
defer func() {
|
||||||
|
bWritten, bErr := out.Write(b.Bytes())
|
||||||
|
written = bWritten
|
||||||
|
if err == nil {
|
||||||
|
err = bErr
|
||||||
|
}
|
||||||
|
bufPool.Put(b)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
var n int
|
||||||
|
|
||||||
// Comments, first HELP, then TYPE.
|
// Comments, first HELP, then TYPE.
|
||||||
if in.Help != nil {
|
if in.Help != nil {
|
||||||
n, err := fmt.Fprintf(
|
n, err = w.WriteString("# HELP ")
|
||||||
out, "# HELP %s %s\n",
|
|
||||||
name, escapeString(*in.Help, false),
|
|
||||||
)
|
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(name)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = writeEscapedString(w, *in.Help, false)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = w.WriteByte('\n')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
metricType := in.GetType()
|
n, err = w.WriteString("# TYPE ")
|
||||||
n, err := fmt.Fprintf(
|
|
||||||
out, "# TYPE %s %s\n",
|
|
||||||
name, strings.ToLower(metricType.String()),
|
|
||||||
)
|
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(name)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
metricType := in.GetType()
|
||||||
|
switch metricType {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
n, err = w.WriteString(" counter\n")
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
n, err = w.WriteString(" gauge\n")
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
n, err = w.WriteString(" summary\n")
|
||||||
|
case dto.MetricType_UNTYPED:
|
||||||
|
n, err = w.WriteString(" untyped\n")
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
n, err = w.WriteString(" histogram\n")
|
||||||
|
default:
|
||||||
|
return written, fmt.Errorf("unknown metric type %s", metricType.String())
|
||||||
|
}
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally the samples, one line for each.
|
// Finally the samples, one line for each.
|
||||||
|
@ -75,9 +162,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
n, err = writeSample(
|
n, err = writeSample(
|
||||||
name, metric, "", "",
|
w, name, "", metric, "", 0,
|
||||||
metric.Counter.GetValue(),
|
metric.Counter.GetValue(),
|
||||||
out,
|
|
||||||
)
|
)
|
||||||
case dto.MetricType_GAUGE:
|
case dto.MetricType_GAUGE:
|
||||||
if metric.Gauge == nil {
|
if metric.Gauge == nil {
|
||||||
|
@ -86,9 +172,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
n, err = writeSample(
|
n, err = writeSample(
|
||||||
name, metric, "", "",
|
w, name, "", metric, "", 0,
|
||||||
metric.Gauge.GetValue(),
|
metric.Gauge.GetValue(),
|
||||||
out,
|
|
||||||
)
|
)
|
||||||
case dto.MetricType_UNTYPED:
|
case dto.MetricType_UNTYPED:
|
||||||
if metric.Untyped == nil {
|
if metric.Untyped == nil {
|
||||||
|
@ -97,9 +182,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
n, err = writeSample(
|
n, err = writeSample(
|
||||||
name, metric, "", "",
|
w, name, "", metric, "", 0,
|
||||||
metric.Untyped.GetValue(),
|
metric.Untyped.GetValue(),
|
||||||
out,
|
|
||||||
)
|
)
|
||||||
case dto.MetricType_SUMMARY:
|
case dto.MetricType_SUMMARY:
|
||||||
if metric.Summary == nil {
|
if metric.Summary == nil {
|
||||||
|
@ -109,29 +193,26 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||||
}
|
}
|
||||||
for _, q := range metric.Summary.Quantile {
|
for _, q := range metric.Summary.Quantile {
|
||||||
n, err = writeSample(
|
n, err = writeSample(
|
||||||
name, metric,
|
w, name, "", metric,
|
||||||
model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
|
model.QuantileLabel, q.GetQuantile(),
|
||||||
q.GetValue(),
|
q.GetValue(),
|
||||||
out,
|
|
||||||
)
|
)
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
n, err = writeSample(
|
n, err = writeSample(
|
||||||
name+"_sum", metric, "", "",
|
w, name, "_sum", metric, "", 0,
|
||||||
metric.Summary.GetSampleSum(),
|
metric.Summary.GetSampleSum(),
|
||||||
out,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
written += n
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
n, err = writeSample(
|
n, err = writeSample(
|
||||||
name+"_count", metric, "", "",
|
w, name, "_count", metric, "", 0,
|
||||||
float64(metric.Summary.GetSampleCount()),
|
float64(metric.Summary.GetSampleCount()),
|
||||||
out,
|
|
||||||
)
|
)
|
||||||
case dto.MetricType_HISTOGRAM:
|
case dto.MetricType_HISTOGRAM:
|
||||||
if metric.Histogram == nil {
|
if metric.Histogram == nil {
|
||||||
|
@ -140,46 +221,42 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
infSeen := false
|
infSeen := false
|
||||||
for _, q := range metric.Histogram.Bucket {
|
for _, b := range metric.Histogram.Bucket {
|
||||||
n, err = writeSample(
|
n, err = writeSample(
|
||||||
name+"_bucket", metric,
|
w, name, "_bucket", metric,
|
||||||
model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
|
model.BucketLabel, b.GetUpperBound(),
|
||||||
float64(q.GetCumulativeCount()),
|
float64(b.GetCumulativeCount()),
|
||||||
out,
|
|
||||||
)
|
)
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return
|
||||||
}
|
}
|
||||||
if math.IsInf(q.GetUpperBound(), +1) {
|
if math.IsInf(b.GetUpperBound(), +1) {
|
||||||
infSeen = true
|
infSeen = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !infSeen {
|
if !infSeen {
|
||||||
n, err = writeSample(
|
n, err = writeSample(
|
||||||
name+"_bucket", metric,
|
w, name, "_bucket", metric,
|
||||||
model.BucketLabel, "+Inf",
|
model.BucketLabel, math.Inf(+1),
|
||||||
float64(metric.Histogram.GetSampleCount()),
|
float64(metric.Histogram.GetSampleCount()),
|
||||||
out,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
written += n
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
n, err = writeSample(
|
n, err = writeSample(
|
||||||
name+"_sum", metric, "", "",
|
w, name, "_sum", metric, "", 0,
|
||||||
metric.Histogram.GetSampleSum(),
|
metric.Histogram.GetSampleSum(),
|
||||||
out,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
written += n
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
n, err = writeSample(
|
n, err = writeSample(
|
||||||
name+"_count", metric, "", "",
|
w, name, "_count", metric, "", 0,
|
||||||
float64(metric.Histogram.GetSampleCount()),
|
float64(metric.Histogram.GetSampleCount()),
|
||||||
out,
|
|
||||||
)
|
)
|
||||||
default:
|
default:
|
||||||
return written, fmt.Errorf(
|
return written, fmt.Errorf(
|
||||||
|
@ -188,116 +265,204 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||||
}
|
}
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return written, nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeSample writes a single sample in text format to out, given the metric
|
// writeSample writes a single sample in text format to w, given the metric
|
||||||
// name, the metric proto message itself, optionally an additional label name
|
// name, the metric proto message itself, optionally an additional label name
|
||||||
// and value (use empty strings if not required), and the value. The function
|
// with a float64 value (use empty string as label name if not required), and
|
||||||
// returns the number of bytes written and any error encountered.
|
// the value. The function returns the number of bytes written and any error
|
||||||
|
// encountered.
|
||||||
func writeSample(
|
func writeSample(
|
||||||
name string,
|
w enhancedWriter,
|
||||||
|
name, suffix string,
|
||||||
metric *dto.Metric,
|
metric *dto.Metric,
|
||||||
additionalLabelName, additionalLabelValue string,
|
additionalLabelName string, additionalLabelValue float64,
|
||||||
value float64,
|
value float64,
|
||||||
out io.Writer,
|
|
||||||
) (int, error) {
|
) (int, error) {
|
||||||
var written int
|
var written int
|
||||||
n, err := fmt.Fprint(out, name)
|
n, err := w.WriteString(name)
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return written, err
|
||||||
}
|
}
|
||||||
n, err = labelPairsToText(
|
if suffix != "" {
|
||||||
metric.Label,
|
n, err = w.WriteString(suffix)
|
||||||
additionalLabelName, additionalLabelValue,
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
n, err = fmt.Fprintf(out, " %v", value)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
if metric.TimestampMs != nil {
|
|
||||||
n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
|
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return written, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
n, err = out.Write([]byte{'\n'})
|
n, err = writeLabelPairs(
|
||||||
|
w, metric.Label, additionalLabelName, additionalLabelValue,
|
||||||
|
)
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return written, err
|
||||||
}
|
}
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeFloat(w, value)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
if metric.TimestampMs != nil {
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeInt(w, *metric.TimestampMs)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = w.WriteByte('\n')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
return written, nil
|
return written, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// labelPairsToText converts a slice of LabelPair proto messages plus the
|
// writeLabelPairs converts a slice of LabelPair proto messages plus the
|
||||||
// explicitly given additional label pair into text formatted as required by the
|
// explicitly given additional label pair into text formatted as required by the
|
||||||
// text format and writes it to 'out'. An empty slice in combination with an
|
// text format and writes it to 'w'. An empty slice in combination with an empty
|
||||||
// empty string 'additionalLabelName' results in nothing being
|
// string 'additionalLabelName' results in nothing being written. Otherwise, the
|
||||||
// written. Otherwise, the label pairs are written, escaped as required by the
|
// label pairs are written, escaped as required by the text format, and enclosed
|
||||||
// text format, and enclosed in '{...}'. The function returns the number of
|
// in '{...}'. The function returns the number of bytes written and any error
|
||||||
// bytes written and any error encountered.
|
// encountered.
|
||||||
func labelPairsToText(
|
func writeLabelPairs(
|
||||||
|
w enhancedWriter,
|
||||||
in []*dto.LabelPair,
|
in []*dto.LabelPair,
|
||||||
additionalLabelName, additionalLabelValue string,
|
additionalLabelName string, additionalLabelValue float64,
|
||||||
out io.Writer,
|
|
||||||
) (int, error) {
|
) (int, error) {
|
||||||
if len(in) == 0 && additionalLabelName == "" {
|
if len(in) == 0 && additionalLabelName == "" {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
var written int
|
var (
|
||||||
separator := '{'
|
written int
|
||||||
|
separator byte = '{'
|
||||||
|
)
|
||||||
for _, lp := range in {
|
for _, lp := range in {
|
||||||
n, err := fmt.Fprintf(
|
err := w.WriteByte(separator)
|
||||||
out, `%c%s="%s"`,
|
written++
|
||||||
separator, lp.GetName(), escapeString(lp.GetValue(), true),
|
if err != nil {
|
||||||
)
|
return written, err
|
||||||
|
}
|
||||||
|
n, err := w.WriteString(lp.GetName())
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return written, err
|
||||||
}
|
}
|
||||||
|
n, err = w.WriteString(`="`)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeEscapedString(w, lp.GetValue(), true)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte('"')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
separator = ','
|
separator = ','
|
||||||
}
|
}
|
||||||
if additionalLabelName != "" {
|
if additionalLabelName != "" {
|
||||||
n, err := fmt.Fprintf(
|
err := w.WriteByte(separator)
|
||||||
out, `%c%s="%s"`,
|
written++
|
||||||
separator, additionalLabelName,
|
if err != nil {
|
||||||
escapeString(additionalLabelValue, true),
|
return written, err
|
||||||
)
|
}
|
||||||
|
n, err := w.WriteString(additionalLabelName)
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return written, err
|
||||||
}
|
}
|
||||||
|
n, err = w.WriteString(`="`)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeFloat(w, additionalLabelValue)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte('"')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
n, err := out.Write([]byte{'}'})
|
err := w.WriteByte('}')
|
||||||
written += n
|
written++
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return written, err
|
||||||
}
|
}
|
||||||
return written, nil
|
return written, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
|
||||||
|
// includeDoubleQuote is true - '"' by '\"'.
|
||||||
var (
|
var (
|
||||||
escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
|
escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`)
|
||||||
escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
|
quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
|
||||||
// includeDoubleQuote is true - '"' by '\"'.
|
|
||||||
func escapeString(v string, includeDoubleQuote bool) string {
|
|
||||||
if includeDoubleQuote {
|
if includeDoubleQuote {
|
||||||
return escapeWithDoubleQuote.Replace(v)
|
return quotedEscaper.WriteString(w, v)
|
||||||
|
} else {
|
||||||
|
return escaper.WriteString(w, v)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return escape.Replace(v)
|
|
||||||
|
// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
|
||||||
|
// a few common cases for increased efficiency. For non-hardcoded cases, it uses
|
||||||
|
// strconv.AppendFloat to avoid allocations, similar to writeInt.
|
||||||
|
func writeFloat(w enhancedWriter, f float64) (int, error) {
|
||||||
|
switch {
|
||||||
|
case f == 1:
|
||||||
|
return 1, w.WriteByte('1')
|
||||||
|
case f == 0:
|
||||||
|
return 1, w.WriteByte('0')
|
||||||
|
case f == -1:
|
||||||
|
return w.WriteString("-1")
|
||||||
|
case math.IsNaN(f):
|
||||||
|
return w.WriteString("NaN")
|
||||||
|
case math.IsInf(f, +1):
|
||||||
|
return w.WriteString("+Inf")
|
||||||
|
case math.IsInf(f, -1):
|
||||||
|
return w.WriteString("-Inf")
|
||||||
|
default:
|
||||||
|
bp := numBufPool.Get().(*[]byte)
|
||||||
|
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
|
||||||
|
written, err := w.Write(*bp)
|
||||||
|
numBufPool.Put(bp)
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
|
||||||
|
// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
|
||||||
|
// allocations.
|
||||||
|
func writeInt(w enhancedWriter, i int64) (int, error) {
|
||||||
|
bp := numBufPool.Get().(*[]byte)
|
||||||
|
*bp = strconv.AppendInt((*bp)[:0], i, 10)
|
||||||
|
written, err := w.Write(*bp)
|
||||||
|
numBufPool.Put(bp)
|
||||||
|
return written, err
|
||||||
}
|
}
|
||||||
|
|
6
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
6
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
|
@ -359,7 +359,7 @@ func (p *TextParser) startLabelValue() stateFn {
|
||||||
}
|
}
|
||||||
return p.readingValue
|
return p.readingValue
|
||||||
default:
|
default:
|
||||||
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
|
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -556,8 +556,8 @@ func (p *TextParser) readTokenUntilWhitespace() {
|
||||||
// byte considered is the byte already read (now in p.currentByte). The first
|
// byte considered is the byte already read (now in p.currentByte). The first
|
||||||
// newline byte encountered is still copied into p.currentByte, but not into
|
// newline byte encountered is still copied into p.currentByte, but not into
|
||||||
// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
|
// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
|
||||||
// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
|
// recognized: '\\' translates into '\', and '\n' into a line-feed character.
|
||||||
// other escape sequences are invalid and cause an error.
|
// All other escape sequences are invalid and cause an error.
|
||||||
func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
||||||
p.currentToken.Reset()
|
p.currentToken.Reset()
|
||||||
escaped := false
|
escaped := false
|
||||||
|
|
9
vendor/github.com/prometheus/common/log/log.go
generated
vendored
9
vendor/github.com/prometheus/common/log/log.go
generated
vendored
|
@ -56,11 +56,11 @@ func (s *loggerSettings) apply(ctx *kingpin.ParseContext) error {
|
||||||
// To use the default Kingpin application, call AddFlags(kingpin.CommandLine)
|
// To use the default Kingpin application, call AddFlags(kingpin.CommandLine)
|
||||||
func AddFlags(a *kingpin.Application) {
|
func AddFlags(a *kingpin.Application) {
|
||||||
s := loggerSettings{}
|
s := loggerSettings{}
|
||||||
kingpin.Flag("log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]").
|
a.Flag("log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]").
|
||||||
Default(origLogger.Level.String()).
|
Default(origLogger.Level.String()).
|
||||||
StringVar(&s.level)
|
StringVar(&s.level)
|
||||||
defaultFormat := url.URL{Scheme: "logger", Opaque: "stderr"}
|
defaultFormat := url.URL{Scheme: "logger", Opaque: "stderr"}
|
||||||
kingpin.Flag("log.format", `Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`).
|
a.Flag("log.format", `Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`).
|
||||||
Default(defaultFormat.String()).
|
Default(defaultFormat.String()).
|
||||||
StringVar(&s.format)
|
StringVar(&s.format)
|
||||||
a.Action(s.apply)
|
a.Action(s.apply)
|
||||||
|
@ -345,6 +345,11 @@ func Fatalf(format string, args ...interface{}) {
|
||||||
baseLogger.sourced().Fatalf(format, args...)
|
baseLogger.sourced().Fatalf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddHook adds hook to Prometheus' original logger.
|
||||||
|
func AddHook(hook logrus.Hook) {
|
||||||
|
origLogger.Hooks.Add(hook)
|
||||||
|
}
|
||||||
|
|
||||||
type errorLogWriter struct{}
|
type errorLogWriter struct{}
|
||||||
|
|
||||||
func (errorLogWriter) Write(b []byte) (int, error) {
|
func (errorLogWriter) Write(b []byte) (int, error) {
|
||||||
|
|
4
vendor/github.com/prometheus/common/model/silence.go
generated
vendored
4
vendor/github.com/prometheus/common/model/silence.go
generated
vendored
|
@ -59,8 +59,8 @@ func (m *Matcher) Validate() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Silence defines the representation of a silence definiton
|
// Silence defines the representation of a silence definition in the Prometheus
|
||||||
// in the Prometheus eco-system.
|
// eco-system.
|
||||||
type Silence struct {
|
type Silence struct {
|
||||||
ID uint64 `json:"id,omitempty"`
|
ID uint64 `json:"id,omitempty"`
|
||||||
|
|
||||||
|
|
5
vendor/github.com/prometheus/common/model/time.go
generated
vendored
5
vendor/github.com/prometheus/common/model/time.go
generated
vendored
|
@ -43,7 +43,7 @@ const (
|
||||||
// (1970-01-01 00:00 UTC) excluding leap seconds.
|
// (1970-01-01 00:00 UTC) excluding leap seconds.
|
||||||
type Time int64
|
type Time int64
|
||||||
|
|
||||||
// Interval describes and interval between two timestamps.
|
// Interval describes an interval between two timestamps.
|
||||||
type Interval struct {
|
type Interval struct {
|
||||||
Start, End Time
|
Start, End Time
|
||||||
}
|
}
|
||||||
|
@ -214,6 +214,9 @@ func (d Duration) String() string {
|
||||||
ms = int64(time.Duration(d) / time.Millisecond)
|
ms = int64(time.Duration(d) / time.Millisecond)
|
||||||
unit = "ms"
|
unit = "ms"
|
||||||
)
|
)
|
||||||
|
if ms == 0 {
|
||||||
|
return "0s"
|
||||||
|
}
|
||||||
factors := map[string]int64{
|
factors := map[string]int64{
|
||||||
"y": 1000 * 60 * 60 * 24 * 365,
|
"y": 1000 * 60 * 60 * 24 * 365,
|
||||||
"w": 1000 * 60 * 60 * 24 * 7,
|
"w": 1000 * 60 * 60 * 24 * 7,
|
||||||
|
|
4
vendor/github.com/prometheus/common/model/value.go
generated
vendored
4
vendor/github.com/prometheus/common/model/value.go
generated
vendored
|
@ -100,7 +100,7 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equal returns true if this SamplePair and o have equal Values and equal
|
// Equal returns true if this SamplePair and o have equal Values and equal
|
||||||
// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
|
// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
|
||||||
func (s *SamplePair) Equal(o *SamplePair) bool {
|
func (s *SamplePair) Equal(o *SamplePair) bool {
|
||||||
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
|
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
|
||||||
}
|
}
|
||||||
|
@ -117,7 +117,7 @@ type Sample struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equal compares first the metrics, then the timestamp, then the value. The
|
// Equal compares first the metrics, then the timestamp, then the value. The
|
||||||
// sematics of value equality is defined by SampleValue.Equal.
|
// semantics of value equality is defined by SampleValue.Equal.
|
||||||
func (s *Sample) Equal(o *Sample) bool {
|
func (s *Sample) Equal(o *Sample) bool {
|
||||||
if s == o {
|
if s == o {
|
||||||
return true
|
return true
|
||||||
|
|
1
vendor/github.com/prometheus/procfs/.gitignore
generated
vendored
Normal file
1
vendor/github.com/prometheus/procfs/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
/fixtures/
|
5
vendor/github.com/prometheus/procfs/.travis.yml
generated
vendored
5
vendor/github.com/prometheus/procfs/.travis.yml
generated
vendored
|
@ -1,5 +0,0 @@
|
||||||
sudo: false
|
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.7.6
|
|
||||||
- 1.8.3
|
|
40
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
40
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
|
@ -1,18 +1,30 @@
|
||||||
ci: fmt lint test
|
# Copyright 2018 The Prometheus Authors
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
fmt:
|
include Makefile.common
|
||||||
! gofmt -l *.go | read nothing
|
|
||||||
go vet
|
|
||||||
|
|
||||||
lint:
|
%/.unpacked: %.ttar
|
||||||
go get github.com/golang/lint/golint
|
./ttar -C $(dir $*) -x -f $*.ttar
|
||||||
golint *.go
|
|
||||||
|
|
||||||
test: sysfs/fixtures/.unpacked
|
|
||||||
go test -v ./...
|
|
||||||
|
|
||||||
sysfs/fixtures/.unpacked: sysfs/fixtures.ttar
|
|
||||||
./ttar -C sysfs -x -f sysfs/fixtures.ttar
|
|
||||||
touch $@
|
touch $@
|
||||||
|
|
||||||
.PHONY: fmt lint test ci
|
update_fixtures: fixtures.ttar sysfs/fixtures.ttar
|
||||||
|
|
||||||
|
%fixtures.ttar: %/fixtures
|
||||||
|
rm -v $(dir $*)fixtures/.unpacked
|
||||||
|
./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/
|
||||||
|
|
||||||
|
.PHONY: build
|
||||||
|
build:
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test: fixtures/.unpacked sysfs/fixtures/.unpacked common-test
|
||||||
|
|
223
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
Normal file
223
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
Normal file
|
@ -0,0 +1,223 @@
|
||||||
|
# Copyright 2018 The Prometheus Authors
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
# A common Makefile that includes rules to be reused in different prometheus projects.
|
||||||
|
# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
|
||||||
|
|
||||||
|
# Example usage :
|
||||||
|
# Create the main Makefile in the root project directory.
|
||||||
|
# include Makefile.common
|
||||||
|
# customTarget:
|
||||||
|
# @echo ">> Running customTarget"
|
||||||
|
#
|
||||||
|
|
||||||
|
# Ensure GOBIN is not set during build so that promu is installed to the correct path
|
||||||
|
unexport GOBIN
|
||||||
|
|
||||||
|
GO ?= go
|
||||||
|
GOFMT ?= $(GO)fmt
|
||||||
|
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
||||||
|
GOOPTS ?=
|
||||||
|
|
||||||
|
GO_VERSION ?= $(shell $(GO) version)
|
||||||
|
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||||
|
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||||
|
|
||||||
|
unexport GOVENDOR
|
||||||
|
ifeq (, $(PRE_GO_111))
|
||||||
|
ifneq (,$(wildcard go.mod))
|
||||||
|
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
|
||||||
|
GO111MODULE := on
|
||||||
|
|
||||||
|
ifneq (,$(wildcard vendor))
|
||||||
|
# Always use the local vendor/ directory to satisfy the dependencies.
|
||||||
|
GOOPTS := $(GOOPTS) -mod=vendor
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
ifneq (,$(wildcard go.mod))
|
||||||
|
ifneq (,$(wildcard vendor))
|
||||||
|
$(warning This repository requires Go >= 1.11 because of Go modules)
|
||||||
|
$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
# This repository isn't using Go modules (yet).
|
||||||
|
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
|
||||||
|
endif
|
||||||
|
|
||||||
|
unexport GO111MODULE
|
||||||
|
endif
|
||||||
|
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||||
|
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
|
||||||
|
pkgs = ./...
|
||||||
|
|
||||||
|
GO_VERSION ?= $(shell $(GO) version)
|
||||||
|
GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION)))
|
||||||
|
|
||||||
|
PROMU_VERSION ?= 0.2.0
|
||||||
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
|
PREFIX ?= $(shell pwd)
|
||||||
|
BIN_DIR ?= $(shell pwd)
|
||||||
|
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||||
|
DOCKER_REPO ?= prom
|
||||||
|
|
||||||
|
.PHONY: all
|
||||||
|
all: precheck style staticcheck unused build test
|
||||||
|
|
||||||
|
# This rule is used to forward a target like "build" to "common-build". This
|
||||||
|
# allows a new "build" target to be defined in a Makefile which includes this
|
||||||
|
# one and override "common-build" without override warnings.
|
||||||
|
%: common-% ;
|
||||||
|
|
||||||
|
.PHONY: common-style
|
||||||
|
common-style:
|
||||||
|
@echo ">> checking code style"
|
||||||
|
@fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
|
||||||
|
if [ -n "$${fmtRes}" ]; then \
|
||||||
|
echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
|
||||||
|
echo "Please ensure you are using $$($(GO) version) for formatting code."; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: common-check_license
|
||||||
|
common-check_license:
|
||||||
|
@echo ">> checking license header"
|
||||||
|
@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
|
||||||
|
awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
|
||||||
|
done); \
|
||||||
|
if [ -n "$${licRes}" ]; then \
|
||||||
|
echo "license header checking failed:"; echo "$${licRes}"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: common-test-short
|
||||||
|
common-test-short:
|
||||||
|
@echo ">> running short tests"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
|
.PHONY: common-test
|
||||||
|
common-test:
|
||||||
|
@echo ">> running all tests"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
|
.PHONY: common-format
|
||||||
|
common-format:
|
||||||
|
@echo ">> formatting code"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
|
.PHONY: common-vet
|
||||||
|
common-vet:
|
||||||
|
@echo ">> vetting code"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
|
.PHONY: common-staticcheck
|
||||||
|
common-staticcheck: $(STATICCHECK)
|
||||||
|
@echo ">> running staticcheck"
|
||||||
|
ifdef GO111MODULE
|
||||||
|
GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs)
|
||||||
|
else
|
||||||
|
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
|
||||||
|
endif
|
||||||
|
|
||||||
|
.PHONY: common-unused
|
||||||
|
common-unused: $(GOVENDOR)
|
||||||
|
ifdef GOVENDOR
|
||||||
|
@echo ">> running check for unused packages"
|
||||||
|
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
|
||||||
|
else
|
||||||
|
ifdef GO111MODULE
|
||||||
|
@echo ">> running check for unused/missing packages in go.mod"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
||||||
|
@git diff --exit-code -- go.sum go.mod
|
||||||
|
ifneq (,$(wildcard vendor))
|
||||||
|
@echo ">> running check for unused packages in vendor/"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
||||||
|
@git diff --exit-code -- go.sum go.mod vendor/
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
.PHONY: common-build
|
||||||
|
common-build: promu
|
||||||
|
@echo ">> building binaries"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX)
|
||||||
|
|
||||||
|
.PHONY: common-tarball
|
||||||
|
common-tarball: promu
|
||||||
|
@echo ">> building release tarball"
|
||||||
|
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
||||||
|
|
||||||
|
.PHONY: common-docker
|
||||||
|
common-docker:
|
||||||
|
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
|
||||||
|
|
||||||
|
.PHONY: common-docker-publish
|
||||||
|
common-docker-publish:
|
||||||
|
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
|
||||||
|
|
||||||
|
.PHONY: common-docker-tag-latest
|
||||||
|
common-docker-tag-latest:
|
||||||
|
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest"
|
||||||
|
|
||||||
|
.PHONY: promu
|
||||||
|
promu: $(PROMU)
|
||||||
|
|
||||||
|
$(PROMU):
|
||||||
|
curl -s -L $(PROMU_URL) | tar -xvz -C /tmp
|
||||||
|
mkdir -v -p $(FIRST_GOPATH)/bin
|
||||||
|
cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU)
|
||||||
|
|
||||||
|
.PHONY: proto
|
||||||
|
proto:
|
||||||
|
@echo ">> generating code from proto files"
|
||||||
|
@./scripts/genproto.sh
|
||||||
|
|
||||||
|
.PHONY: $(STATICCHECK)
|
||||||
|
$(STATICCHECK):
|
||||||
|
ifdef GO111MODULE
|
||||||
|
# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}.
|
||||||
|
# See https://github.com/golang/go/issues/27643.
|
||||||
|
# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules.
|
||||||
|
tmpModule=$$(mktemp -d 2>&1) && \
|
||||||
|
mkdir -p $${tmpModule}/staticcheck && \
|
||||||
|
cd "$${tmpModule}"/staticcheck && \
|
||||||
|
GO111MODULE=on $(GO) mod init example.com/staticcheck && \
|
||||||
|
GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \
|
||||||
|
rm -rf $${tmpModule};
|
||||||
|
else
|
||||||
|
GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifdef GOVENDOR
|
||||||
|
.PHONY: $(GOVENDOR)
|
||||||
|
$(GOVENDOR):
|
||||||
|
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
|
||||||
|
endif
|
||||||
|
|
||||||
|
.PHONY: precheck
|
||||||
|
precheck::
|
||||||
|
|
||||||
|
define PRECHECK_COMMAND_template =
|
||||||
|
precheck:: $(1)_precheck
|
||||||
|
|
||||||
|
|
||||||
|
PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
|
||||||
|
.PHONY: $(1)_precheck
|
||||||
|
$(1)_precheck:
|
||||||
|
@if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
|
||||||
|
echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
endef
|
2
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
2
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
|
@ -62,7 +62,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
var err error
|
var err error
|
||||||
line := scanner.Text()
|
line := scanner.Text()
|
||||||
parts := strings.Fields(string(line))
|
parts := strings.Fields(line)
|
||||||
|
|
||||||
if len(parts) < 4 {
|
if len(parts) < 4 {
|
||||||
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
|
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
|
||||||
|
|
462
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
Normal file
462
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
Normal file
|
@ -0,0 +1,462 @@
|
||||||
|
# Archive created by ttar -c -f fixtures.ttar fixtures/
|
||||||
|
Directory: fixtures
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/26231
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/cmdline
|
||||||
|
Lines: 1
|
||||||
|
vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/comm
|
||||||
|
Lines: 1
|
||||||
|
vim
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/cwd
|
||||||
|
SymlinkTo: /usr/bin
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/exe
|
||||||
|
SymlinkTo: /usr/bin/vim
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/26231/fd
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/fd/0
|
||||||
|
SymlinkTo: ../../symlinktargets/abc
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/fd/1
|
||||||
|
SymlinkTo: ../../symlinktargets/def
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/fd/10
|
||||||
|
SymlinkTo: ../../symlinktargets/xyz
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/fd/2
|
||||||
|
SymlinkTo: ../../symlinktargets/ghi
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/fd/3
|
||||||
|
SymlinkTo: ../../symlinktargets/uvw
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/io
|
||||||
|
Lines: 7
|
||||||
|
rchar: 750339
|
||||||
|
wchar: 818609
|
||||||
|
syscr: 7405
|
||||||
|
syscw: 5245
|
||||||
|
read_bytes: 1024
|
||||||
|
write_bytes: 2048
|
||||||
|
cancelled_write_bytes: -1024
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/limits
|
||||||
|
Lines: 17
|
||||||
|
Limit Soft Limit Hard Limit Units
|
||||||
|
Max cpu time unlimited unlimited seconds
|
||||||
|
Max file size unlimited unlimited bytes
|
||||||
|
Max data size unlimited unlimited bytes
|
||||||
|
Max stack size 8388608 unlimited bytes
|
||||||
|
Max core file size 0 unlimited bytes
|
||||||
|
Max resident set unlimited unlimited bytes
|
||||||
|
Max processes 62898 62898 processes
|
||||||
|
Max open files 2048 4096 files
|
||||||
|
Max locked memory 65536 65536 bytes
|
||||||
|
Max address space 8589934592 unlimited bytes
|
||||||
|
Max file locks unlimited unlimited locks
|
||||||
|
Max pending signals 62898 62898 signals
|
||||||
|
Max msgqueue size 819200 819200 bytes
|
||||||
|
Max nice priority 0 0
|
||||||
|
Max realtime priority 0 0
|
||||||
|
Max realtime timeout unlimited unlimited us
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/mountstats
|
||||||
|
Lines: 19
|
||||||
|
device rootfs mounted on / with fstype rootfs
|
||||||
|
device sysfs mounted on /sys with fstype sysfs
|
||||||
|
device proc mounted on /proc with fstype proc
|
||||||
|
device /dev/sda1 mounted on / with fstype ext4
|
||||||
|
device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
|
||||||
|
opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none
|
||||||
|
age: 13968
|
||||||
|
caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
|
||||||
|
nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
|
||||||
|
sec: flavor=1,pseudoflavor=1
|
||||||
|
events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
|
||||||
|
bytes: 1207640230 0 0 0 1210214218 0 295483 0
|
||||||
|
RPC iostats version: 1.0 p/v: 100003/4 (nfs)
|
||||||
|
xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
|
||||||
|
per-op statistics
|
||||||
|
NULL: 0 0 0 0 0 0 0 0
|
||||||
|
READ: 1298 1298 0 207680 1210292152 6 79386 79407
|
||||||
|
WRITE: 0 0 0 0 0 0 0 0
|
||||||
|
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/26231/net
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/net/dev
|
||||||
|
Lines: 4
|
||||||
|
Inter-| Receive | Transmit
|
||||||
|
face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
|
||||||
|
lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/26231/ns
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/ns/mnt
|
||||||
|
SymlinkTo: mnt:[4026531840]
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/ns/net
|
||||||
|
SymlinkTo: net:[4026531993]
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/root
|
||||||
|
SymlinkTo: /
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26231/stat
|
||||||
|
Lines: 1
|
||||||
|
26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/26232
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26232/cmdline
|
||||||
|
Lines: 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26232/comm
|
||||||
|
Lines: 1
|
||||||
|
ata_sff
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26232/cwd
|
||||||
|
SymlinkTo: /does/not/exist
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/26232/fd
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26232/fd/0
|
||||||
|
SymlinkTo: ../../symlinktargets/abc
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26232/fd/1
|
||||||
|
SymlinkTo: ../../symlinktargets/def
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26232/fd/2
|
||||||
|
SymlinkTo: ../../symlinktargets/ghi
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26232/fd/3
|
||||||
|
SymlinkTo: ../../symlinktargets/uvw
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26232/fd/4
|
||||||
|
SymlinkTo: ../../symlinktargets/xyz
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26232/limits
|
||||||
|
Lines: 17
|
||||||
|
Limit Soft Limit Hard Limit Units
|
||||||
|
Max cpu time unlimited unlimited seconds
|
||||||
|
Max file size unlimited unlimited bytes
|
||||||
|
Max data size unlimited unlimited bytes
|
||||||
|
Max stack size 8388608 unlimited bytes
|
||||||
|
Max core file size 0 unlimited bytes
|
||||||
|
Max resident set unlimited unlimited bytes
|
||||||
|
Max processes 29436 29436 processes
|
||||||
|
Max open files 1024 4096 files
|
||||||
|
Max locked memory 65536 65536 bytes
|
||||||
|
Max address space unlimited unlimited bytes
|
||||||
|
Max file locks unlimited unlimited locks
|
||||||
|
Max pending signals 29436 29436 signals
|
||||||
|
Max msgqueue size 819200 819200 bytes
|
||||||
|
Max nice priority 0 0
|
||||||
|
Max realtime priority 0 0
|
||||||
|
Max realtime timeout unlimited unlimited us
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26232/root
|
||||||
|
SymlinkTo: /does/not/exist
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26232/stat
|
||||||
|
Lines: 1
|
||||||
|
33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/26233
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/26233/cmdline
|
||||||
|
Lines: 1
|
||||||
|
com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/584
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/584/stat
|
||||||
|
Lines: 2
|
||||||
|
1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
|
||||||
|
#!/bin/cat /proc/self/stat
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/buddyinfo
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/buddyinfo/short
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/buddyinfo/short/buddyinfo
|
||||||
|
Lines: 3
|
||||||
|
Node 0, zone
|
||||||
|
Node 0, zone
|
||||||
|
Node 0, zone
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/buddyinfo/sizemismatch
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/buddyinfo/sizemismatch/buddyinfo
|
||||||
|
Lines: 3
|
||||||
|
Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
|
||||||
|
Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0
|
||||||
|
Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/buddyinfo/valid
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/buddyinfo/valid/buddyinfo
|
||||||
|
Lines: 3
|
||||||
|
Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
|
||||||
|
Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0
|
||||||
|
Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/fs
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/fs/xfs
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/fs/xfs/stat
|
||||||
|
Lines: 23
|
||||||
|
extent_alloc 92447 97589 92448 93751
|
||||||
|
abt 0 0 0 0
|
||||||
|
blk_map 1767055 188820 184891 92447 92448 2140766 0
|
||||||
|
bmbt 0 0 0 0
|
||||||
|
dir 185039 92447 92444 136422
|
||||||
|
trans 706 944304 0
|
||||||
|
ig 185045 58807 0 126238 0 33637 22
|
||||||
|
log 2883 113448 9 17360 739
|
||||||
|
push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
|
||||||
|
xstrat 92447 0
|
||||||
|
rw 107739 94045
|
||||||
|
attr 4 0 0 0
|
||||||
|
icluster 8677 7849 135802
|
||||||
|
vnodes 92601 0 0 0 92444 92444 92444 0
|
||||||
|
buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
|
||||||
|
abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
|
||||||
|
abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
|
||||||
|
bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
qm 0 0 0 0 0 0 0 0
|
||||||
|
xpc 399724544 92823103 86219234
|
||||||
|
debug 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/mdstat
|
||||||
|
Lines: 26
|
||||||
|
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
|
||||||
|
md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9]
|
||||||
|
5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
|
||||||
|
|
||||||
|
md127 : active raid1 sdi2[0] sdj2[1]
|
||||||
|
312319552 blocks [2/2] [UU]
|
||||||
|
|
||||||
|
md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1]
|
||||||
|
248896 blocks [2/2] [UU]
|
||||||
|
|
||||||
|
md4 : inactive raid1 sda3[0] sdb3[1]
|
||||||
|
4883648 blocks [2/2] [UU]
|
||||||
|
|
||||||
|
md6 : active raid1 sdb2[2] sda2[0]
|
||||||
|
195310144 blocks [2/1] [U_]
|
||||||
|
[=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
|
||||||
|
|
||||||
|
md8 : active raid1 sdb1[1] sda1[0]
|
||||||
|
195310144 blocks [2/2] [UU]
|
||||||
|
[=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
|
||||||
|
|
||||||
|
md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1]
|
||||||
|
7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
|
||||||
|
bitmap: 0/30 pages [0KB], 65536KB chunk
|
||||||
|
|
||||||
|
unused devices: <none>
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/net
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/net/dev
|
||||||
|
Lines: 6
|
||||||
|
Inter-| Receive | Transmit
|
||||||
|
face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
|
||||||
|
vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0
|
||||||
|
lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0
|
||||||
|
docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0
|
||||||
|
eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/net/ip_vs
|
||||||
|
Lines: 21
|
||||||
|
IP Virtual Server version 1.2.1 (size=4096)
|
||||||
|
Prot LocalAddress:Port Scheduler Flags
|
||||||
|
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
|
||||||
|
TCP C0A80016:0CEA wlc
|
||||||
|
-> C0A85216:0CEA Tunnel 100 248 2
|
||||||
|
-> C0A85318:0CEA Tunnel 100 248 2
|
||||||
|
-> C0A85315:0CEA Tunnel 100 248 1
|
||||||
|
TCP C0A80039:0CEA wlc
|
||||||
|
-> C0A85416:0CEA Tunnel 0 0 0
|
||||||
|
-> C0A85215:0CEA Tunnel 100 1499 0
|
||||||
|
-> C0A83215:0CEA Tunnel 100 1498 0
|
||||||
|
TCP C0A80037:0CEA wlc
|
||||||
|
-> C0A8321A:0CEA Tunnel 0 0 0
|
||||||
|
-> C0A83120:0CEA Tunnel 100 0 0
|
||||||
|
TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh
|
||||||
|
-> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0
|
||||||
|
-> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0
|
||||||
|
-> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1
|
||||||
|
FWM 10001000 wlc
|
||||||
|
-> C0A8321A:0CEA Route 0 0 1
|
||||||
|
-> C0A83215:0CEA Route 0 0 2
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/net/ip_vs_stats
|
||||||
|
Lines: 6
|
||||||
|
Total Incoming Outgoing Incoming Outgoing
|
||||||
|
Conns Packets Packets Bytes Bytes
|
||||||
|
16AA370 E33656E5 0 51D8C8883AB3 0
|
||||||
|
|
||||||
|
Conns/s Pkts/s Pkts/s Bytes/s Bytes/s
|
||||||
|
4 1FB3C 0 1282A8F 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/net/rpc
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/net/rpc/nfs
|
||||||
|
Lines: 5
|
||||||
|
net 18628 0 18628 6
|
||||||
|
rpc 4329785 0 4338291
|
||||||
|
proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
|
||||||
|
proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
|
||||||
|
proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/net/rpc/nfsd
|
||||||
|
Lines: 11
|
||||||
|
rc 0 6 18622
|
||||||
|
fh 0 0 0 0 0
|
||||||
|
io 157286400 0
|
||||||
|
th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
|
||||||
|
ra 32 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
net 18628 0 18628 6
|
||||||
|
rpc 18628 0 0 0 0
|
||||||
|
proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
|
||||||
|
proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
|
||||||
|
proc4 2 2 10853
|
||||||
|
proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/net/xfrm_stat
|
||||||
|
Lines: 28
|
||||||
|
XfrmInError 1
|
||||||
|
XfrmInBufferError 2
|
||||||
|
XfrmInHdrError 4
|
||||||
|
XfrmInNoStates 3
|
||||||
|
XfrmInStateProtoError 40
|
||||||
|
XfrmInStateModeError 100
|
||||||
|
XfrmInStateSeqError 6000
|
||||||
|
XfrmInStateExpired 4
|
||||||
|
XfrmInStateMismatch 23451
|
||||||
|
XfrmInStateInvalid 55555
|
||||||
|
XfrmInTmplMismatch 51
|
||||||
|
XfrmInNoPols 65432
|
||||||
|
XfrmInPolBlock 100
|
||||||
|
XfrmInPolError 10000
|
||||||
|
XfrmOutError 1000000
|
||||||
|
XfrmOutBundleGenError 43321
|
||||||
|
XfrmOutBundleCheckError 555
|
||||||
|
XfrmOutNoStates 869
|
||||||
|
XfrmOutStateProtoError 4542
|
||||||
|
XfrmOutStateModeError 4
|
||||||
|
XfrmOutStateSeqError 543
|
||||||
|
XfrmOutStateExpired 565
|
||||||
|
XfrmOutPolBlock 43456
|
||||||
|
XfrmOutPolDead 7656
|
||||||
|
XfrmOutPolError 1454
|
||||||
|
XfrmFwdHdrError 6654
|
||||||
|
XfrmOutStateInvalid 28765
|
||||||
|
XfrmAcquireError 24532
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/self
|
||||||
|
SymlinkTo: 26231
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/stat
|
||||||
|
Lines: 16
|
||||||
|
cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
|
||||||
|
cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
|
||||||
|
cpu1 47869 23 16474 1110787 591 0 46 0 0 0
|
||||||
|
cpu2 46504 36 15916 1112321 441 0 326 0 0 0
|
||||||
|
cpu3 47054 102 15683 1113230 533 0 60 0 0 0
|
||||||
|
cpu4 28413 25 10776 1140321 217 0 8 0 0 0
|
||||||
|
cpu5 29271 101 11586 1136270 672 0 30 0 0 0
|
||||||
|
cpu6 29152 36 10276 1139721 319 0 29 0 0 0
|
||||||
|
cpu7 29098 268 10164 1139282 555 0 31 0 0 0
|
||||||
|
intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
ctxt 38014093
|
||||||
|
btime 1418183276
|
||||||
|
processes 26442
|
||||||
|
procs_running 2
|
||||||
|
procs_blocked 1
|
||||||
|
softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/symlinktargets
|
||||||
|
Mode: 755
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/symlinktargets/README
|
||||||
|
Lines: 2
|
||||||
|
This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
|
||||||
|
They are otherwise ignored by the tests
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/symlinktargets/abc
|
||||||
|
Lines: 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/symlinktargets/def
|
||||||
|
Lines: 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/symlinktargets/ghi
|
||||||
|
Lines: 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/symlinktargets/uvw
|
||||||
|
Lines: 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/symlinktargets/xyz
|
||||||
|
Lines: 0
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/.unpacked
|
||||||
|
Lines: 0
|
||||||
|
Mode: 664
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
36
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
36
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
|
@ -1,3 +1,16 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -5,6 +18,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/nfs"
|
||||||
"github.com/prometheus/procfs/xfs"
|
"github.com/prometheus/procfs/xfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -44,3 +58,25 @@ func (fs FS) XFSStats() (*xfs.Stats, error) {
|
||||||
|
|
||||||
return xfs.ParseStats(f)
|
return xfs.ParseStats(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NFSClientRPCStats retrieves NFS client RPC statistics.
|
||||||
|
func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
|
||||||
|
f, err := os.Open(fs.Path("net/rpc/nfs"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
return nfs.ParseClientRPCStats(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
|
||||||
|
func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
|
||||||
|
f, err := os.Open(fs.Path("net/rpc/nfsd"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
return nfs.ParseServerRPCStats(f)
|
||||||
|
}
|
||||||
|
|
1
vendor/github.com/prometheus/procfs/go.mod
generated
vendored
Normal file
1
vendor/github.com/prometheus/procfs/go.mod
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
module github.com/prometheus/procfs
|
59
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
Normal file
59
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseUint32s parses a slice of strings into a slice of uint32s.
|
||||||
|
func ParseUint32s(ss []string) ([]uint32, error) {
|
||||||
|
us := make([]uint32, 0, len(ss))
|
||||||
|
for _, s := range ss {
|
||||||
|
u, err := strconv.ParseUint(s, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
us = append(us, uint32(u))
|
||||||
|
}
|
||||||
|
|
||||||
|
return us, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseUint64s parses a slice of strings into a slice of uint64s.
|
||||||
|
func ParseUint64s(ss []string) ([]uint64, error) {
|
||||||
|
us := make([]uint64, 0, len(ss))
|
||||||
|
for _, s := range ss {
|
||||||
|
u, err := strconv.ParseUint(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
us = append(us, u)
|
||||||
|
}
|
||||||
|
|
||||||
|
return us, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
||||||
|
func ReadUintFromFile(path string) (uint64, error) {
|
||||||
|
data, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||||
|
}
|
45
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
generated
vendored
Normal file
45
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
|
||||||
|
// https://github.com/prometheus/node_exporter/pull/728/files
|
||||||
|
func SysReadFile(file string) (string, error) {
|
||||||
|
f, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
|
||||||
|
// Go's ioutil.ReadFile implementation to poll forever.
|
||||||
|
//
|
||||||
|
// Since we either want to read data or bail immediately, do the simplest
|
||||||
|
// possible read using syscall directly.
|
||||||
|
b := make([]byte, 128)
|
||||||
|
n, err := syscall.Read(int(f.Fd()), b)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(bytes.TrimSpace(b[:n])), nil
|
||||||
|
}
|
23
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
23
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
|
@ -1,3 +1,16 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -31,16 +44,16 @@ type IPVSStats struct {
|
||||||
type IPVSBackendStatus struct {
|
type IPVSBackendStatus struct {
|
||||||
// The local (virtual) IP address.
|
// The local (virtual) IP address.
|
||||||
LocalAddress net.IP
|
LocalAddress net.IP
|
||||||
|
// The remote (real) IP address.
|
||||||
|
RemoteAddress net.IP
|
||||||
// The local (virtual) port.
|
// The local (virtual) port.
|
||||||
LocalPort uint16
|
LocalPort uint16
|
||||||
|
// The remote (real) port.
|
||||||
|
RemotePort uint16
|
||||||
// The local firewall mark
|
// The local firewall mark
|
||||||
LocalMark string
|
LocalMark string
|
||||||
// The transport protocol (TCP, UDP).
|
// The transport protocol (TCP, UDP).
|
||||||
Proto string
|
Proto string
|
||||||
// The remote (real) IP address.
|
|
||||||
RemoteAddress net.IP
|
|
||||||
// The remote (real) port.
|
|
||||||
RemotePort uint16
|
|
||||||
// The current number of active connections for this virtual/real address pair.
|
// The current number of active connections for this virtual/real address pair.
|
||||||
ActiveConn uint64
|
ActiveConn uint64
|
||||||
// The current number of inactive connections for this virtual/real address pair.
|
// The current number of inactive connections for this virtual/real address pair.
|
||||||
|
@ -151,7 +164,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
|
||||||
)
|
)
|
||||||
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
fields := strings.Fields(string(scanner.Text()))
|
fields := strings.Fields(scanner.Text())
|
||||||
if len(fields) == 0 {
|
if len(fields) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
13
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
13
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
|
@ -1,3 +1,16 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
64
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
64
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
|
@ -1,3 +1,16 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
|
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
|
||||||
|
@ -26,8 +39,11 @@ const (
|
||||||
statVersion10 = "1.0"
|
statVersion10 = "1.0"
|
||||||
statVersion11 = "1.1"
|
statVersion11 = "1.1"
|
||||||
|
|
||||||
fieldTransport10Len = 10
|
fieldTransport10TCPLen = 10
|
||||||
fieldTransport11Len = 13
|
fieldTransport10UDPLen = 7
|
||||||
|
|
||||||
|
fieldTransport11TCPLen = 13
|
||||||
|
fieldTransport11UDPLen = 10
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
|
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
|
||||||
|
@ -173,6 +189,8 @@ type NFSOperationStats struct {
|
||||||
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
||||||
// responses.
|
// responses.
|
||||||
type NFSTransportStats struct {
|
type NFSTransportStats struct {
|
||||||
|
// The transport protocol used for the NFS mount.
|
||||||
|
Protocol string
|
||||||
// The local port used for the NFS mount.
|
// The local port used for the NFS mount.
|
||||||
Port uint64
|
Port uint64
|
||||||
// Number of times the client has had to establish a connection from scratch
|
// Number of times the client has had to establish a connection from scratch
|
||||||
|
@ -347,7 +365,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
|
||||||
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
|
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
|
||||||
}
|
}
|
||||||
|
|
||||||
tstats, err := parseNFSTransportStats(ss[2:], statVersion)
|
tstats, err := parseNFSTransportStats(ss[1:], statVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -509,13 +527,33 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||||
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
|
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
|
||||||
// integer fields matched to a specific stats version.
|
// integer fields matched to a specific stats version.
|
||||||
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
|
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
|
||||||
|
// Extract the protocol field. It is the only string value in the line
|
||||||
|
protocol := ss[0]
|
||||||
|
ss = ss[1:]
|
||||||
|
|
||||||
switch statVersion {
|
switch statVersion {
|
||||||
case statVersion10:
|
case statVersion10:
|
||||||
if len(ss) != fieldTransport10Len {
|
var expectedLength int
|
||||||
|
if protocol == "tcp" {
|
||||||
|
expectedLength = fieldTransport10TCPLen
|
||||||
|
} else if protocol == "udp" {
|
||||||
|
expectedLength = fieldTransport10UDPLen
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
|
||||||
|
}
|
||||||
|
if len(ss) != expectedLength {
|
||||||
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
|
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
|
||||||
}
|
}
|
||||||
case statVersion11:
|
case statVersion11:
|
||||||
if len(ss) != fieldTransport11Len {
|
var expectedLength int
|
||||||
|
if protocol == "tcp" {
|
||||||
|
expectedLength = fieldTransport11TCPLen
|
||||||
|
} else if protocol == "udp" {
|
||||||
|
expectedLength = fieldTransport11UDPLen
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
|
||||||
|
}
|
||||||
|
if len(ss) != expectedLength {
|
||||||
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
|
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -523,12 +561,13 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
||||||
// in a v1.0 response.
|
// in a v1.0 response. Since the stat length is bigger for TCP stats, we use
|
||||||
|
// the TCP length here.
|
||||||
//
|
//
|
||||||
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
|
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
|
||||||
// only v1.0 stats are present.
|
// only v1.0 stats are present.
|
||||||
// See: https://github.com/prometheus/node_exporter/issues/571.
|
// See: https://github.com/prometheus/node_exporter/issues/571.
|
||||||
ns := make([]uint64, fieldTransport11Len)
|
ns := make([]uint64, fieldTransport11TCPLen)
|
||||||
for i, s := range ss {
|
for i, s := range ss {
|
||||||
n, err := strconv.ParseUint(s, 10, 64)
|
n, err := strconv.ParseUint(s, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -538,7 +577,18 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||||
ns[i] = n
|
ns[i] = n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The fields differ depending on the transport protocol (TCP or UDP)
|
||||||
|
// From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
|
||||||
|
//
|
||||||
|
// For the udp RPC transport there is no connection count, connect idle time,
|
||||||
|
// or idle time (fields #3, #4, and #5); all other fields are the same. So
|
||||||
|
// we set them to 0 here.
|
||||||
|
if protocol == "udp" {
|
||||||
|
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
|
||||||
|
}
|
||||||
|
|
||||||
return &NFSTransportStats{
|
return &NFSTransportStats{
|
||||||
|
Protocol: protocol,
|
||||||
Port: ns[0],
|
Port: ns[0],
|
||||||
Bind: ns[1],
|
Bind: ns[1],
|
||||||
Connect: ns[2],
|
Connect: ns[2],
|
||||||
|
|
216
vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
Normal file
216
vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
Normal file
|
@ -0,0 +1,216 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
|
||||||
|
type NetDevLine struct {
|
||||||
|
Name string `json:"name"` // The name of the interface.
|
||||||
|
RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
|
||||||
|
RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
|
||||||
|
RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
|
||||||
|
RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
|
||||||
|
RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
|
||||||
|
RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
|
||||||
|
RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
|
||||||
|
RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
|
||||||
|
TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
|
||||||
|
TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
|
||||||
|
TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
|
||||||
|
TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
|
||||||
|
TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
|
||||||
|
TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
|
||||||
|
TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
|
||||||
|
TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
|
||||||
|
// are interface names.
|
||||||
|
type NetDev map[string]NetDevLine
|
||||||
|
|
||||||
|
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
|
||||||
|
func NewNetDev() (NetDev, error) {
|
||||||
|
fs, err := NewFS(DefaultMountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.NewNetDev()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
|
||||||
|
func (fs FS) NewNetDev() (NetDev, error) {
|
||||||
|
return newNetDev(fs.Path("net/dev"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
|
||||||
|
func (p Proc) NewNetDev() (NetDev, error) {
|
||||||
|
return newNetDev(p.path("net/dev"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// newNetDev creates a new NetDev from the contents of the given file.
|
||||||
|
func newNetDev(file string) (NetDev, error) {
|
||||||
|
f, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return NetDev{}, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
nd := NetDev{}
|
||||||
|
s := bufio.NewScanner(f)
|
||||||
|
for n := 0; s.Scan(); n++ {
|
||||||
|
// Skip the 2 header lines.
|
||||||
|
if n < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
line, err := nd.parseLine(s.Text())
|
||||||
|
if err != nil {
|
||||||
|
return nd, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nd[line.Name] = *line
|
||||||
|
}
|
||||||
|
|
||||||
|
return nd, s.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseLine parses a single line from the /proc/net/dev file. Header lines
|
||||||
|
// must be filtered prior to calling this method.
|
||||||
|
func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
||||||
|
parts := strings.SplitN(rawLine, ":", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return nil, errors.New("invalid net/dev line, missing colon")
|
||||||
|
}
|
||||||
|
fields := strings.Fields(strings.TrimSpace(parts[1]))
|
||||||
|
|
||||||
|
var err error
|
||||||
|
line := &NetDevLine{}
|
||||||
|
|
||||||
|
// Interface Name
|
||||||
|
line.Name = strings.TrimSpace(parts[0])
|
||||||
|
if line.Name == "" {
|
||||||
|
return nil, errors.New("invalid net/dev line, empty interface name")
|
||||||
|
}
|
||||||
|
|
||||||
|
// RX
|
||||||
|
line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TX
|
||||||
|
line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return line, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total aggregates the values across interfaces and returns a new NetDevLine.
|
||||||
|
// The Name field will be a sorted comma separated list of interface names.
|
||||||
|
func (nd NetDev) Total() NetDevLine {
|
||||||
|
total := NetDevLine{}
|
||||||
|
|
||||||
|
names := make([]string, 0, len(nd))
|
||||||
|
for _, ifc := range nd {
|
||||||
|
names = append(names, ifc.Name)
|
||||||
|
total.RxBytes += ifc.RxBytes
|
||||||
|
total.RxPackets += ifc.RxPackets
|
||||||
|
total.RxPackets += ifc.RxPackets
|
||||||
|
total.RxErrors += ifc.RxErrors
|
||||||
|
total.RxDropped += ifc.RxDropped
|
||||||
|
total.RxFIFO += ifc.RxFIFO
|
||||||
|
total.RxFrame += ifc.RxFrame
|
||||||
|
total.RxCompressed += ifc.RxCompressed
|
||||||
|
total.RxMulticast += ifc.RxMulticast
|
||||||
|
total.TxBytes += ifc.TxBytes
|
||||||
|
total.TxPackets += ifc.TxPackets
|
||||||
|
total.TxErrors += ifc.TxErrors
|
||||||
|
total.TxDropped += ifc.TxDropped
|
||||||
|
total.TxFIFO += ifc.TxFIFO
|
||||||
|
total.TxCollisions += ifc.TxCollisions
|
||||||
|
total.TxCarrier += ifc.TxCarrier
|
||||||
|
total.TxCompressed += ifc.TxCompressed
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
total.Name = strings.Join(names, ", ")
|
||||||
|
|
||||||
|
return total
|
||||||
|
}
|
263
vendor/github.com/prometheus/procfs/nfs/nfs.go
generated
vendored
Normal file
263
vendor/github.com/prometheus/procfs/nfs/nfs.go
generated
vendored
Normal file
|
@ -0,0 +1,263 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package nfs implements parsing of /proc/net/rpc/nfsd.
|
||||||
|
// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
|
||||||
|
package nfs
|
||||||
|
|
||||||
|
// ReplyCache models the "rc" line.
|
||||||
|
type ReplyCache struct {
|
||||||
|
Hits uint64
|
||||||
|
Misses uint64
|
||||||
|
NoCache uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileHandles models the "fh" line.
|
||||||
|
type FileHandles struct {
|
||||||
|
Stale uint64
|
||||||
|
TotalLookups uint64
|
||||||
|
AnonLookups uint64
|
||||||
|
DirNoCache uint64
|
||||||
|
NoDirNoCache uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// InputOutput models the "io" line.
|
||||||
|
type InputOutput struct {
|
||||||
|
Read uint64
|
||||||
|
Write uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Threads models the "th" line.
|
||||||
|
type Threads struct {
|
||||||
|
Threads uint64
|
||||||
|
FullCnt uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAheadCache models the "ra" line.
|
||||||
|
type ReadAheadCache struct {
|
||||||
|
CacheSize uint64
|
||||||
|
CacheHistogram []uint64
|
||||||
|
NotFound uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Network models the "net" line.
|
||||||
|
type Network struct {
|
||||||
|
NetCount uint64
|
||||||
|
UDPCount uint64
|
||||||
|
TCPCount uint64
|
||||||
|
TCPConnect uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRPC models the nfs "rpc" line.
|
||||||
|
type ClientRPC struct {
|
||||||
|
RPCCount uint64
|
||||||
|
Retransmissions uint64
|
||||||
|
AuthRefreshes uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerRPC models the nfsd "rpc" line.
|
||||||
|
type ServerRPC struct {
|
||||||
|
RPCCount uint64
|
||||||
|
BadCnt uint64
|
||||||
|
BadFmt uint64
|
||||||
|
BadAuth uint64
|
||||||
|
BadcInt uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// V2Stats models the "proc2" line.
|
||||||
|
type V2Stats struct {
|
||||||
|
Null uint64
|
||||||
|
GetAttr uint64
|
||||||
|
SetAttr uint64
|
||||||
|
Root uint64
|
||||||
|
Lookup uint64
|
||||||
|
ReadLink uint64
|
||||||
|
Read uint64
|
||||||
|
WrCache uint64
|
||||||
|
Write uint64
|
||||||
|
Create uint64
|
||||||
|
Remove uint64
|
||||||
|
Rename uint64
|
||||||
|
Link uint64
|
||||||
|
SymLink uint64
|
||||||
|
MkDir uint64
|
||||||
|
RmDir uint64
|
||||||
|
ReadDir uint64
|
||||||
|
FsStat uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// V3Stats models the "proc3" line.
|
||||||
|
type V3Stats struct {
|
||||||
|
Null uint64
|
||||||
|
GetAttr uint64
|
||||||
|
SetAttr uint64
|
||||||
|
Lookup uint64
|
||||||
|
Access uint64
|
||||||
|
ReadLink uint64
|
||||||
|
Read uint64
|
||||||
|
Write uint64
|
||||||
|
Create uint64
|
||||||
|
MkDir uint64
|
||||||
|
SymLink uint64
|
||||||
|
MkNod uint64
|
||||||
|
Remove uint64
|
||||||
|
RmDir uint64
|
||||||
|
Rename uint64
|
||||||
|
Link uint64
|
||||||
|
ReadDir uint64
|
||||||
|
ReadDirPlus uint64
|
||||||
|
FsStat uint64
|
||||||
|
FsInfo uint64
|
||||||
|
PathConf uint64
|
||||||
|
Commit uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientV4Stats models the nfs "proc4" line.
|
||||||
|
type ClientV4Stats struct {
|
||||||
|
Null uint64
|
||||||
|
Read uint64
|
||||||
|
Write uint64
|
||||||
|
Commit uint64
|
||||||
|
Open uint64
|
||||||
|
OpenConfirm uint64
|
||||||
|
OpenNoattr uint64
|
||||||
|
OpenDowngrade uint64
|
||||||
|
Close uint64
|
||||||
|
Setattr uint64
|
||||||
|
FsInfo uint64
|
||||||
|
Renew uint64
|
||||||
|
SetClientID uint64
|
||||||
|
SetClientIDConfirm uint64
|
||||||
|
Lock uint64
|
||||||
|
Lockt uint64
|
||||||
|
Locku uint64
|
||||||
|
Access uint64
|
||||||
|
Getattr uint64
|
||||||
|
Lookup uint64
|
||||||
|
LookupRoot uint64
|
||||||
|
Remove uint64
|
||||||
|
Rename uint64
|
||||||
|
Link uint64
|
||||||
|
Symlink uint64
|
||||||
|
Create uint64
|
||||||
|
Pathconf uint64
|
||||||
|
StatFs uint64
|
||||||
|
ReadLink uint64
|
||||||
|
ReadDir uint64
|
||||||
|
ServerCaps uint64
|
||||||
|
DelegReturn uint64
|
||||||
|
GetACL uint64
|
||||||
|
SetACL uint64
|
||||||
|
FsLocations uint64
|
||||||
|
ReleaseLockowner uint64
|
||||||
|
Secinfo uint64
|
||||||
|
FsidPresent uint64
|
||||||
|
ExchangeID uint64
|
||||||
|
CreateSession uint64
|
||||||
|
DestroySession uint64
|
||||||
|
Sequence uint64
|
||||||
|
GetLeaseTime uint64
|
||||||
|
ReclaimComplete uint64
|
||||||
|
LayoutGet uint64
|
||||||
|
GetDeviceInfo uint64
|
||||||
|
LayoutCommit uint64
|
||||||
|
LayoutReturn uint64
|
||||||
|
SecinfoNoName uint64
|
||||||
|
TestStateID uint64
|
||||||
|
FreeStateID uint64
|
||||||
|
GetDeviceList uint64
|
||||||
|
BindConnToSession uint64
|
||||||
|
DestroyClientID uint64
|
||||||
|
Seek uint64
|
||||||
|
Allocate uint64
|
||||||
|
DeAllocate uint64
|
||||||
|
LayoutStats uint64
|
||||||
|
Clone uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerV4Stats models the nfsd "proc4" line.
|
||||||
|
type ServerV4Stats struct {
|
||||||
|
Null uint64
|
||||||
|
Compound uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// V4Ops models the "proc4ops" line: NFSv4 operations
|
||||||
|
// Variable list, see:
|
||||||
|
// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
|
||||||
|
// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
|
||||||
|
// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
|
||||||
|
type V4Ops struct {
|
||||||
|
//Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
|
||||||
|
Op0Unused uint64
|
||||||
|
Op1Unused uint64
|
||||||
|
Op2Future uint64
|
||||||
|
Access uint64
|
||||||
|
Close uint64
|
||||||
|
Commit uint64
|
||||||
|
Create uint64
|
||||||
|
DelegPurge uint64
|
||||||
|
DelegReturn uint64
|
||||||
|
GetAttr uint64
|
||||||
|
GetFH uint64
|
||||||
|
Link uint64
|
||||||
|
Lock uint64
|
||||||
|
Lockt uint64
|
||||||
|
Locku uint64
|
||||||
|
Lookup uint64
|
||||||
|
LookupRoot uint64
|
||||||
|
Nverify uint64
|
||||||
|
Open uint64
|
||||||
|
OpenAttr uint64
|
||||||
|
OpenConfirm uint64
|
||||||
|
OpenDgrd uint64
|
||||||
|
PutFH uint64
|
||||||
|
PutPubFH uint64
|
||||||
|
PutRootFH uint64
|
||||||
|
Read uint64
|
||||||
|
ReadDir uint64
|
||||||
|
ReadLink uint64
|
||||||
|
Remove uint64
|
||||||
|
Rename uint64
|
||||||
|
Renew uint64
|
||||||
|
RestoreFH uint64
|
||||||
|
SaveFH uint64
|
||||||
|
SecInfo uint64
|
||||||
|
SetAttr uint64
|
||||||
|
Verify uint64
|
||||||
|
Write uint64
|
||||||
|
RelLockOwner uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRPCStats models all stats from /proc/net/rpc/nfs.
|
||||||
|
type ClientRPCStats struct {
|
||||||
|
Network Network
|
||||||
|
ClientRPC ClientRPC
|
||||||
|
V2Stats V2Stats
|
||||||
|
V3Stats V3Stats
|
||||||
|
ClientV4Stats ClientV4Stats
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
|
||||||
|
type ServerRPCStats struct {
|
||||||
|
ReplyCache ReplyCache
|
||||||
|
FileHandles FileHandles
|
||||||
|
InputOutput InputOutput
|
||||||
|
Threads Threads
|
||||||
|
ReadAheadCache ReadAheadCache
|
||||||
|
Network Network
|
||||||
|
ServerRPC ServerRPC
|
||||||
|
V2Stats V2Stats
|
||||||
|
V3Stats V3Stats
|
||||||
|
ServerV4Stats ServerV4Stats
|
||||||
|
V4Ops V4Ops
|
||||||
|
}
|
317
vendor/github.com/prometheus/procfs/nfs/parse.go
generated
vendored
Normal file
317
vendor/github.com/prometheus/procfs/nfs/parse.go
generated
vendored
Normal file
|
@ -0,0 +1,317 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package nfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseReplyCache(v []uint64) (ReplyCache, error) {
|
||||||
|
if len(v) != 3 {
|
||||||
|
return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ReplyCache{
|
||||||
|
Hits: v[0],
|
||||||
|
Misses: v[1],
|
||||||
|
NoCache: v[2],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseFileHandles(v []uint64) (FileHandles, error) {
|
||||||
|
if len(v) != 5 {
|
||||||
|
return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return FileHandles{
|
||||||
|
Stale: v[0],
|
||||||
|
TotalLookups: v[1],
|
||||||
|
AnonLookups: v[2],
|
||||||
|
DirNoCache: v[3],
|
||||||
|
NoDirNoCache: v[4],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseInputOutput(v []uint64) (InputOutput, error) {
|
||||||
|
if len(v) != 2 {
|
||||||
|
return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return InputOutput{
|
||||||
|
Read: v[0],
|
||||||
|
Write: v[1],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseThreads(v []uint64) (Threads, error) {
|
||||||
|
if len(v) != 2 {
|
||||||
|
return Threads{}, fmt.Errorf("invalid Threads line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Threads{
|
||||||
|
Threads: v[0],
|
||||||
|
FullCnt: v[1],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
|
||||||
|
if len(v) != 12 {
|
||||||
|
return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ReadAheadCache{
|
||||||
|
CacheSize: v[0],
|
||||||
|
CacheHistogram: v[1:11],
|
||||||
|
NotFound: v[11],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseNetwork(v []uint64) (Network, error) {
|
||||||
|
if len(v) != 4 {
|
||||||
|
return Network{}, fmt.Errorf("invalid Network line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Network{
|
||||||
|
NetCount: v[0],
|
||||||
|
UDPCount: v[1],
|
||||||
|
TCPCount: v[2],
|
||||||
|
TCPConnect: v[3],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseServerRPC(v []uint64) (ServerRPC, error) {
|
||||||
|
if len(v) != 5 {
|
||||||
|
return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ServerRPC{
|
||||||
|
RPCCount: v[0],
|
||||||
|
BadCnt: v[1],
|
||||||
|
BadFmt: v[2],
|
||||||
|
BadAuth: v[3],
|
||||||
|
BadcInt: v[4],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseClientRPC(v []uint64) (ClientRPC, error) {
|
||||||
|
if len(v) != 3 {
|
||||||
|
return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ClientRPC{
|
||||||
|
RPCCount: v[0],
|
||||||
|
Retransmissions: v[1],
|
||||||
|
AuthRefreshes: v[2],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseV2Stats(v []uint64) (V2Stats, error) {
|
||||||
|
values := int(v[0])
|
||||||
|
if len(v[1:]) != values || values != 18 {
|
||||||
|
return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return V2Stats{
|
||||||
|
Null: v[1],
|
||||||
|
GetAttr: v[2],
|
||||||
|
SetAttr: v[3],
|
||||||
|
Root: v[4],
|
||||||
|
Lookup: v[5],
|
||||||
|
ReadLink: v[6],
|
||||||
|
Read: v[7],
|
||||||
|
WrCache: v[8],
|
||||||
|
Write: v[9],
|
||||||
|
Create: v[10],
|
||||||
|
Remove: v[11],
|
||||||
|
Rename: v[12],
|
||||||
|
Link: v[13],
|
||||||
|
SymLink: v[14],
|
||||||
|
MkDir: v[15],
|
||||||
|
RmDir: v[16],
|
||||||
|
ReadDir: v[17],
|
||||||
|
FsStat: v[18],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseV3Stats(v []uint64) (V3Stats, error) {
|
||||||
|
values := int(v[0])
|
||||||
|
if len(v[1:]) != values || values != 22 {
|
||||||
|
return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return V3Stats{
|
||||||
|
Null: v[1],
|
||||||
|
GetAttr: v[2],
|
||||||
|
SetAttr: v[3],
|
||||||
|
Lookup: v[4],
|
||||||
|
Access: v[5],
|
||||||
|
ReadLink: v[6],
|
||||||
|
Read: v[7],
|
||||||
|
Write: v[8],
|
||||||
|
Create: v[9],
|
||||||
|
MkDir: v[10],
|
||||||
|
SymLink: v[11],
|
||||||
|
MkNod: v[12],
|
||||||
|
Remove: v[13],
|
||||||
|
RmDir: v[14],
|
||||||
|
Rename: v[15],
|
||||||
|
Link: v[16],
|
||||||
|
ReadDir: v[17],
|
||||||
|
ReadDirPlus: v[18],
|
||||||
|
FsStat: v[19],
|
||||||
|
FsInfo: v[20],
|
||||||
|
PathConf: v[21],
|
||||||
|
Commit: v[22],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
|
||||||
|
values := int(v[0])
|
||||||
|
if len(v[1:]) != values {
|
||||||
|
return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function currently supports mapping 59 NFS v4 client stats. Older
|
||||||
|
// kernels may emit fewer stats, so we must detect this and pad out the
|
||||||
|
// values to match the expected slice size.
|
||||||
|
if values < 59 {
|
||||||
|
newValues := make([]uint64, 60)
|
||||||
|
copy(newValues, v)
|
||||||
|
v = newValues
|
||||||
|
}
|
||||||
|
|
||||||
|
return ClientV4Stats{
|
||||||
|
Null: v[1],
|
||||||
|
Read: v[2],
|
||||||
|
Write: v[3],
|
||||||
|
Commit: v[4],
|
||||||
|
Open: v[5],
|
||||||
|
OpenConfirm: v[6],
|
||||||
|
OpenNoattr: v[7],
|
||||||
|
OpenDowngrade: v[8],
|
||||||
|
Close: v[9],
|
||||||
|
Setattr: v[10],
|
||||||
|
FsInfo: v[11],
|
||||||
|
Renew: v[12],
|
||||||
|
SetClientID: v[13],
|
||||||
|
SetClientIDConfirm: v[14],
|
||||||
|
Lock: v[15],
|
||||||
|
Lockt: v[16],
|
||||||
|
Locku: v[17],
|
||||||
|
Access: v[18],
|
||||||
|
Getattr: v[19],
|
||||||
|
Lookup: v[20],
|
||||||
|
LookupRoot: v[21],
|
||||||
|
Remove: v[22],
|
||||||
|
Rename: v[23],
|
||||||
|
Link: v[24],
|
||||||
|
Symlink: v[25],
|
||||||
|
Create: v[26],
|
||||||
|
Pathconf: v[27],
|
||||||
|
StatFs: v[28],
|
||||||
|
ReadLink: v[29],
|
||||||
|
ReadDir: v[30],
|
||||||
|
ServerCaps: v[31],
|
||||||
|
DelegReturn: v[32],
|
||||||
|
GetACL: v[33],
|
||||||
|
SetACL: v[34],
|
||||||
|
FsLocations: v[35],
|
||||||
|
ReleaseLockowner: v[36],
|
||||||
|
Secinfo: v[37],
|
||||||
|
FsidPresent: v[38],
|
||||||
|
ExchangeID: v[39],
|
||||||
|
CreateSession: v[40],
|
||||||
|
DestroySession: v[41],
|
||||||
|
Sequence: v[42],
|
||||||
|
GetLeaseTime: v[43],
|
||||||
|
ReclaimComplete: v[44],
|
||||||
|
LayoutGet: v[45],
|
||||||
|
GetDeviceInfo: v[46],
|
||||||
|
LayoutCommit: v[47],
|
||||||
|
LayoutReturn: v[48],
|
||||||
|
SecinfoNoName: v[49],
|
||||||
|
TestStateID: v[50],
|
||||||
|
FreeStateID: v[51],
|
||||||
|
GetDeviceList: v[52],
|
||||||
|
BindConnToSession: v[53],
|
||||||
|
DestroyClientID: v[54],
|
||||||
|
Seek: v[55],
|
||||||
|
Allocate: v[56],
|
||||||
|
DeAllocate: v[57],
|
||||||
|
LayoutStats: v[58],
|
||||||
|
Clone: v[59],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
|
||||||
|
values := int(v[0])
|
||||||
|
if len(v[1:]) != values || values != 2 {
|
||||||
|
return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ServerV4Stats{
|
||||||
|
Null: v[1],
|
||||||
|
Compound: v[2],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseV4Ops(v []uint64) (V4Ops, error) {
|
||||||
|
values := int(v[0])
|
||||||
|
if len(v[1:]) != values || values < 39 {
|
||||||
|
return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := V4Ops{
|
||||||
|
Op0Unused: v[1],
|
||||||
|
Op1Unused: v[2],
|
||||||
|
Op2Future: v[3],
|
||||||
|
Access: v[4],
|
||||||
|
Close: v[5],
|
||||||
|
Commit: v[6],
|
||||||
|
Create: v[7],
|
||||||
|
DelegPurge: v[8],
|
||||||
|
DelegReturn: v[9],
|
||||||
|
GetAttr: v[10],
|
||||||
|
GetFH: v[11],
|
||||||
|
Link: v[12],
|
||||||
|
Lock: v[13],
|
||||||
|
Lockt: v[14],
|
||||||
|
Locku: v[15],
|
||||||
|
Lookup: v[16],
|
||||||
|
LookupRoot: v[17],
|
||||||
|
Nverify: v[18],
|
||||||
|
Open: v[19],
|
||||||
|
OpenAttr: v[20],
|
||||||
|
OpenConfirm: v[21],
|
||||||
|
OpenDgrd: v[22],
|
||||||
|
PutFH: v[23],
|
||||||
|
PutPubFH: v[24],
|
||||||
|
PutRootFH: v[25],
|
||||||
|
Read: v[26],
|
||||||
|
ReadDir: v[27],
|
||||||
|
ReadLink: v[28],
|
||||||
|
Remove: v[29],
|
||||||
|
Rename: v[30],
|
||||||
|
Renew: v[31],
|
||||||
|
RestoreFH: v[32],
|
||||||
|
SaveFH: v[33],
|
||||||
|
SecInfo: v[34],
|
||||||
|
SetAttr: v[35],
|
||||||
|
Verify: v[36],
|
||||||
|
Write: v[37],
|
||||||
|
RelLockOwner: v[38],
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
67
vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
generated
vendored
Normal file
67
vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package nfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
|
||||||
|
func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
|
||||||
|
stats := &ClientRPCStats{}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(r)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
parts := strings.Fields(scanner.Text())
|
||||||
|
// require at least <key> <value>
|
||||||
|
if len(parts) < 2 {
|
||||||
|
return nil, fmt.Errorf("invalid NFS metric line %q", line)
|
||||||
|
}
|
||||||
|
|
||||||
|
values, err := util.ParseUint64s(parts[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing NFS metric line: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch metricLine := parts[0]; metricLine {
|
||||||
|
case "net":
|
||||||
|
stats.Network, err = parseNetwork(values)
|
||||||
|
case "rpc":
|
||||||
|
stats.ClientRPC, err = parseClientRPC(values)
|
||||||
|
case "proc2":
|
||||||
|
stats.V2Stats, err = parseV2Stats(values)
|
||||||
|
case "proc3":
|
||||||
|
stats.V3Stats, err = parseV3Stats(values)
|
||||||
|
case "proc4":
|
||||||
|
stats.ClientV4Stats, err = parseClientV4Stats(values)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown NFS metric line %q", metricLine)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("errors parsing NFS metric line: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return nil, fmt.Errorf("error scanning NFS file: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
89
vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
generated
vendored
Normal file
89
vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package nfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
|
||||||
|
func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
|
||||||
|
stats := &ServerRPCStats{}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(r)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
parts := strings.Fields(scanner.Text())
|
||||||
|
// require at least <key> <value>
|
||||||
|
if len(parts) < 2 {
|
||||||
|
return nil, fmt.Errorf("invalid NFSd metric line %q", line)
|
||||||
|
}
|
||||||
|
label := parts[0]
|
||||||
|
|
||||||
|
var values []uint64
|
||||||
|
var err error
|
||||||
|
if label == "th" {
|
||||||
|
if len(parts) < 3 {
|
||||||
|
return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
|
||||||
|
}
|
||||||
|
values, err = util.ParseUint64s(parts[1:3])
|
||||||
|
} else {
|
||||||
|
values, err = util.ParseUint64s(parts[1:])
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch metricLine := parts[0]; metricLine {
|
||||||
|
case "rc":
|
||||||
|
stats.ReplyCache, err = parseReplyCache(values)
|
||||||
|
case "fh":
|
||||||
|
stats.FileHandles, err = parseFileHandles(values)
|
||||||
|
case "io":
|
||||||
|
stats.InputOutput, err = parseInputOutput(values)
|
||||||
|
case "th":
|
||||||
|
stats.Threads, err = parseThreads(values)
|
||||||
|
case "ra":
|
||||||
|
stats.ReadAheadCache, err = parseReadAheadCache(values)
|
||||||
|
case "net":
|
||||||
|
stats.Network, err = parseNetwork(values)
|
||||||
|
case "rpc":
|
||||||
|
stats.ServerRPC, err = parseServerRPC(values)
|
||||||
|
case "proc2":
|
||||||
|
stats.V2Stats, err = parseV2Stats(values)
|
||||||
|
case "proc3":
|
||||||
|
stats.V3Stats, err = parseV3Stats(values)
|
||||||
|
case "proc4":
|
||||||
|
stats.ServerV4Stats, err = parseServerV4Stats(values)
|
||||||
|
case "proc4ops":
|
||||||
|
stats.V4Ops, err = parseV4Ops(values)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return nil, fmt.Errorf("error scanning NFSd file: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
36
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
36
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
|
@ -1,6 +1,20 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
@ -113,7 +127,7 @@ func (p Proc) CmdLine() ([]string, error) {
|
||||||
return []string{}, nil
|
return []string{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
|
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Comm returns the command name of a process.
|
// Comm returns the command name of a process.
|
||||||
|
@ -142,6 +156,26 @@ func (p Proc) Executable() (string, error) {
|
||||||
return exe, err
|
return exe, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cwd returns the absolute path to the current working directory of the process.
|
||||||
|
func (p Proc) Cwd() (string, error) {
|
||||||
|
wd, err := os.Readlink(p.path("cwd"))
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return wd, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RootDir returns the absolute path to the process's root directory (as set by chroot)
|
||||||
|
func (p Proc) RootDir() (string, error) {
|
||||||
|
rdir, err := os.Readlink(p.path("root"))
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return rdir, err
|
||||||
|
}
|
||||||
|
|
||||||
// FileDescriptors returns the currently open file descriptors of a process.
|
// FileDescriptors returns the currently open file descriptors of a process.
|
||||||
func (p Proc) FileDescriptors() ([]uintptr, error) {
|
func (p Proc) FileDescriptors() ([]uintptr, error) {
|
||||||
names, err := p.fileDescriptors()
|
names, err := p.fileDescriptors()
|
||||||
|
|
18
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
18
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
|
@ -1,3 +1,16 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -47,9 +60,6 @@ func (p Proc) NewIO() (ProcIO, error) {
|
||||||
|
|
||||||
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
|
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
|
||||||
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
|
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
|
||||||
if err != nil {
|
|
||||||
return pio, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return pio, nil
|
return pio, err
|
||||||
}
|
}
|
||||||
|
|
51
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
51
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
|
@ -1,3 +1,16 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -13,46 +26,46 @@ import (
|
||||||
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
|
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
|
||||||
type ProcLimits struct {
|
type ProcLimits struct {
|
||||||
// CPU time limit in seconds.
|
// CPU time limit in seconds.
|
||||||
CPUTime int
|
CPUTime int64
|
||||||
// Maximum size of files that the process may create.
|
// Maximum size of files that the process may create.
|
||||||
FileSize int
|
FileSize int64
|
||||||
// Maximum size of the process's data segment (initialized data,
|
// Maximum size of the process's data segment (initialized data,
|
||||||
// uninitialized data, and heap).
|
// uninitialized data, and heap).
|
||||||
DataSize int
|
DataSize int64
|
||||||
// Maximum size of the process stack in bytes.
|
// Maximum size of the process stack in bytes.
|
||||||
StackSize int
|
StackSize int64
|
||||||
// Maximum size of a core file.
|
// Maximum size of a core file.
|
||||||
CoreFileSize int
|
CoreFileSize int64
|
||||||
// Limit of the process's resident set in pages.
|
// Limit of the process's resident set in pages.
|
||||||
ResidentSet int
|
ResidentSet int64
|
||||||
// Maximum number of processes that can be created for the real user ID of
|
// Maximum number of processes that can be created for the real user ID of
|
||||||
// the calling process.
|
// the calling process.
|
||||||
Processes int
|
Processes int64
|
||||||
// Value one greater than the maximum file descriptor number that can be
|
// Value one greater than the maximum file descriptor number that can be
|
||||||
// opened by this process.
|
// opened by this process.
|
||||||
OpenFiles int
|
OpenFiles int64
|
||||||
// Maximum number of bytes of memory that may be locked into RAM.
|
// Maximum number of bytes of memory that may be locked into RAM.
|
||||||
LockedMemory int
|
LockedMemory int64
|
||||||
// Maximum size of the process's virtual memory address space in bytes.
|
// Maximum size of the process's virtual memory address space in bytes.
|
||||||
AddressSpace int
|
AddressSpace int64
|
||||||
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
|
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
|
||||||
// this process may establish.
|
// this process may establish.
|
||||||
FileLocks int
|
FileLocks int64
|
||||||
// Limit of signals that may be queued for the real user ID of the calling
|
// Limit of signals that may be queued for the real user ID of the calling
|
||||||
// process.
|
// process.
|
||||||
PendingSignals int
|
PendingSignals int64
|
||||||
// Limit on the number of bytes that can be allocated for POSIX message
|
// Limit on the number of bytes that can be allocated for POSIX message
|
||||||
// queues for the real user ID of the calling process.
|
// queues for the real user ID of the calling process.
|
||||||
MsqqueueSize int
|
MsqqueueSize int64
|
||||||
// Limit of the nice priority set using setpriority(2) or nice(2).
|
// Limit of the nice priority set using setpriority(2) or nice(2).
|
||||||
NicePriority int
|
NicePriority int64
|
||||||
// Limit of the real-time priority set using sched_setscheduler(2) or
|
// Limit of the real-time priority set using sched_setscheduler(2) or
|
||||||
// sched_setparam(2).
|
// sched_setparam(2).
|
||||||
RealtimePriority int
|
RealtimePriority int64
|
||||||
// Limit (in microseconds) on the amount of CPU time that a process
|
// Limit (in microseconds) on the amount of CPU time that a process
|
||||||
// scheduled under a real-time scheduling policy may consume without making
|
// scheduled under a real-time scheduling policy may consume without making
|
||||||
// a blocking system call.
|
// a blocking system call.
|
||||||
RealtimeTimeout int
|
RealtimeTimeout int64
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -125,13 +138,13 @@ func (p Proc) NewLimits() (ProcLimits, error) {
|
||||||
return l, s.Err()
|
return l, s.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseInt(s string) (int, error) {
|
func parseInt(s string) (int64, error) {
|
||||||
if s == limitsUnlimited {
|
if s == limitsUnlimited {
|
||||||
return -1, nil
|
return -1, nil
|
||||||
}
|
}
|
||||||
i, err := strconv.ParseInt(s, 10, 32)
|
i, err := strconv.ParseInt(s, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
|
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
|
||||||
}
|
}
|
||||||
return int(i), nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
68
vendor/github.com/prometheus/procfs/proc_ns.go
generated
vendored
Normal file
68
vendor/github.com/prometheus/procfs/proc_ns.go
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Namespace represents a single namespace of a process.
|
||||||
|
type Namespace struct {
|
||||||
|
Type string // Namespace type.
|
||||||
|
Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Namespaces contains all of the namespaces that the process is contained in.
|
||||||
|
type Namespaces map[string]Namespace
|
||||||
|
|
||||||
|
// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
|
||||||
|
// process is a member.
|
||||||
|
func (p Proc) NewNamespaces() (Namespaces, error) {
|
||||||
|
d, err := os.Open(p.path("ns"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer d.Close()
|
||||||
|
|
||||||
|
names, err := d.Readdirnames(-1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read contents of ns dir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ns := make(Namespaces, len(names))
|
||||||
|
for _, name := range names {
|
||||||
|
target, err := os.Readlink(p.path("ns", name))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := strings.SplitN(target, ":", 2)
|
||||||
|
if len(fields) != 2 {
|
||||||
|
return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target)
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := fields[0]
|
||||||
|
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ns[name] = Namespace{typ, uint32(inode)}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ns, nil
|
||||||
|
}
|
13
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
13
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
|
@ -1,3 +1,16 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
13
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
13
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
|
@ -1,3 +1,16 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
133
vendor/github.com/prometheus/procfs/ttar
generated
vendored
133
vendor/github.com/prometheus/procfs/ttar
generated
vendored
|
@ -1,11 +1,26 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# Purpose: plain text tar format
|
# Purpose: plain text tar format
|
||||||
# Limitations: - only suitable for text files, directories, and symlinks
|
# Limitations: - only suitable for text files, directories, and symlinks
|
||||||
# - stores only filename, content, and mode
|
# - stores only filename, content, and mode
|
||||||
# - not designed for untrusted input
|
# - not designed for untrusted input
|
||||||
|
#
|
||||||
# Note: must work with bash version 3.2 (macOS)
|
# Note: must work with bash version 3.2 (macOS)
|
||||||
|
|
||||||
|
# Copyright 2017 Roger Luethi
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
set -o errexit -o nounset
|
set -o errexit -o nounset
|
||||||
|
|
||||||
# Sanitize environment (for instance, standard sorting of glob matches)
|
# Sanitize environment (for instance, standard sorting of glob matches)
|
||||||
|
@ -13,6 +28,55 @@ export LC_ALL=C
|
||||||
|
|
||||||
path=""
|
path=""
|
||||||
CMD=""
|
CMD=""
|
||||||
|
ARG_STRING="$*"
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# Not all sed implementations can work on null bytes. In order to make ttar
|
||||||
|
# work out of the box on macOS, use Python as a stream editor.
|
||||||
|
|
||||||
|
USE_PYTHON=0
|
||||||
|
|
||||||
|
PYTHON_CREATE_FILTER=$(cat << 'PCF'
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
for line in sys.stdin:
|
||||||
|
line = re.sub(r'EOF', r'\EOF', line)
|
||||||
|
line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
|
||||||
|
line = re.sub('\x00', r'NULLBYTE', line)
|
||||||
|
sys.stdout.write(line)
|
||||||
|
PCF
|
||||||
|
)
|
||||||
|
|
||||||
|
PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
for line in sys.stdin:
|
||||||
|
line = re.sub(r'(?<!\\)NULLBYTE', '\x00', line)
|
||||||
|
line = re.sub(r'\\NULLBYTE', 'NULLBYTE', line)
|
||||||
|
line = re.sub(r'([^\\])EOF', r'\1', line)
|
||||||
|
line = re.sub(r'\\EOF', 'EOF', line)
|
||||||
|
sys.stdout.write(line)
|
||||||
|
PEF
|
||||||
|
)
|
||||||
|
|
||||||
|
function test_environment {
|
||||||
|
if [[ "$(echo "a" | sed 's/a/\x0/' | wc -c)" -ne 2 ]]; then
|
||||||
|
echo "WARNING sed unable to handle null bytes, using Python (slow)."
|
||||||
|
if ! which python >/dev/null; then
|
||||||
|
echo "ERROR Python not found. Aborting."
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
USE_PYTHON=1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
function usage {
|
function usage {
|
||||||
bname=$(basename "$0")
|
bname=$(basename "$0")
|
||||||
|
@ -23,6 +87,7 @@ Usage: $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
-C <DIR> (change directory)
|
-C <DIR> (change directory)
|
||||||
|
-v (verbose)
|
||||||
|
|
||||||
Example: Change to sysfs directory, create ttar file from fixtures directory
|
Example: Change to sysfs directory, create ttar file from fixtures directory
|
||||||
$bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
|
$bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
|
||||||
|
@ -45,6 +110,8 @@ function set_cmd {
|
||||||
CMD=$1
|
CMD=$1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unset VERBOSE
|
||||||
|
|
||||||
while getopts :cf:htxvC: opt; do
|
while getopts :cf:htxvC: opt; do
|
||||||
case $opt in
|
case $opt in
|
||||||
c)
|
c)
|
||||||
|
@ -142,8 +209,37 @@ function extract {
|
||||||
fi
|
fi
|
||||||
while IFS= read -r line; do
|
while IFS= read -r line; do
|
||||||
line_no=$(( line_no + 1 ))
|
line_no=$(( line_no + 1 ))
|
||||||
|
local eof_without_newline
|
||||||
if [ "$size" -gt 0 ]; then
|
if [ "$size" -gt 0 ]; then
|
||||||
echo "$line" >> "$path"
|
if [[ "$line" =~ [^\\]EOF ]]; then
|
||||||
|
# An EOF not preceeded by a backslash indicates that the line
|
||||||
|
# does not end with a newline
|
||||||
|
eof_without_newline=1
|
||||||
|
else
|
||||||
|
eof_without_newline=0
|
||||||
|
fi
|
||||||
|
# Replace NULLBYTE with null byte if at beginning of line
|
||||||
|
# Replace NULLBYTE with null byte unless preceeded by backslash
|
||||||
|
# Remove one backslash in front of NULLBYTE (if any)
|
||||||
|
# Remove EOF unless preceeded by backslash
|
||||||
|
# Remove one backslash in front of EOF
|
||||||
|
if [ $USE_PYTHON -eq 1 ]; then
|
||||||
|
echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
|
||||||
|
else
|
||||||
|
# The repeated pattern makes up for sed's lack of negative
|
||||||
|
# lookbehind assertions (for consecutive null bytes).
|
||||||
|
echo -n "$line" | \
|
||||||
|
sed -e 's/^NULLBYTE/\x0/g;
|
||||||
|
s/\([^\\]\)NULLBYTE/\1\x0/g;
|
||||||
|
s/\([^\\]\)NULLBYTE/\1\x0/g;
|
||||||
|
s/\\NULLBYTE/NULLBYTE/g;
|
||||||
|
s/\([^\\]\)EOF/\1/g;
|
||||||
|
s/\\EOF/EOF/g;
|
||||||
|
' >> "$path"
|
||||||
|
fi
|
||||||
|
if [[ "$eof_without_newline" -eq 0 ]]; then
|
||||||
|
echo >> "$path"
|
||||||
|
fi
|
||||||
size=$(( size - 1 ))
|
size=$(( size - 1 ))
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
@ -187,11 +283,14 @@ function get_mode {
|
||||||
local mfile=$1
|
local mfile=$1
|
||||||
if [ -z "${STAT_OPTION:-}" ]; then
|
if [ -z "${STAT_OPTION:-}" ]; then
|
||||||
if stat -c '%a' "$mfile" >/dev/null 2>&1; then
|
if stat -c '%a' "$mfile" >/dev/null 2>&1; then
|
||||||
|
# GNU stat
|
||||||
STAT_OPTION='-c'
|
STAT_OPTION='-c'
|
||||||
STAT_FORMAT='%a'
|
STAT_FORMAT='%a'
|
||||||
else
|
else
|
||||||
|
# BSD stat
|
||||||
STAT_OPTION='-f'
|
STAT_OPTION='-f'
|
||||||
STAT_FORMAT='%A'
|
# Octal output, user/group/other (omit file type, sticky bit)
|
||||||
|
STAT_FORMAT='%OLp'
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
|
stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
|
||||||
|
@ -200,6 +299,7 @@ function get_mode {
|
||||||
function _create {
|
function _create {
|
||||||
shopt -s nullglob
|
shopt -s nullglob
|
||||||
local mode
|
local mode
|
||||||
|
local eof_without_newline
|
||||||
while (( "$#" )); do
|
while (( "$#" )); do
|
||||||
file=$1
|
file=$1
|
||||||
if [ -L "$file" ]; then
|
if [ -L "$file" ]; then
|
||||||
|
@ -223,8 +323,30 @@ function _create {
|
||||||
elif [ -f "$file" ]; then
|
elif [ -f "$file" ]; then
|
||||||
echo "Path: $file"
|
echo "Path: $file"
|
||||||
lines=$(wc -l "$file"|awk '{print $1}')
|
lines=$(wc -l "$file"|awk '{print $1}')
|
||||||
|
eof_without_newline=0
|
||||||
|
if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
|
||||||
|
[[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
|
||||||
|
eof_without_newline=1
|
||||||
|
lines=$((lines+1))
|
||||||
|
fi
|
||||||
echo "Lines: $lines"
|
echo "Lines: $lines"
|
||||||
cat "$file"
|
# Add backslash in front of EOF
|
||||||
|
# Add backslash in front of NULLBYTE
|
||||||
|
# Replace null byte with NULLBYTE
|
||||||
|
if [ $USE_PYTHON -eq 1 ]; then
|
||||||
|
< "$file" python -c "$PYTHON_CREATE_FILTER"
|
||||||
|
else
|
||||||
|
< "$file" \
|
||||||
|
sed 's/EOF/\\EOF/g;
|
||||||
|
s/NULLBYTE/\\NULLBYTE/g;
|
||||||
|
s/\x0/NULLBYTE/g;
|
||||||
|
'
|
||||||
|
fi
|
||||||
|
if [[ "$eof_without_newline" -eq 1 ]]; then
|
||||||
|
# Finish line with EOF to indicate that the original line did
|
||||||
|
# not end with a linefeed
|
||||||
|
echo "EOF"
|
||||||
|
fi
|
||||||
mode=$(get_mode "$file")
|
mode=$(get_mode "$file")
|
||||||
echo "Mode: $mode"
|
echo "Mode: $mode"
|
||||||
vecho "$mode $file"
|
vecho "$mode $file"
|
||||||
|
@ -249,9 +371,12 @@ function create {
|
||||||
rm "$ttar_file"
|
rm "$ttar_file"
|
||||||
fi
|
fi
|
||||||
exec > "$ttar_file"
|
exec > "$ttar_file"
|
||||||
|
echo "# Archive created by ttar $ARG_STRING"
|
||||||
_create "$@"
|
_create "$@"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
test_environment
|
||||||
|
|
||||||
if [ -n "${CDIR:-}" ]; then
|
if [ -n "${CDIR:-}" ]; then
|
||||||
if [[ "$ARCHIVE" != /* ]]; then
|
if [[ "$ARCHIVE" != /* ]]; then
|
||||||
# Relative path: preserve the archive's location before changing
|
# Relative path: preserve the archive's location before changing
|
||||||
|
|
2
vendor/github.com/prometheus/procfs/xfrm.go
generated
vendored
2
vendor/github.com/prometheus/procfs/xfrm.go
generated
vendored
|
@ -113,7 +113,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
|
||||||
|
|
||||||
if len(fields) != 2 {
|
if len(fields) != 2 {
|
||||||
return XfrmStat{}, fmt.Errorf(
|
return XfrmStat{}, fmt.Errorf(
|
||||||
"couldnt parse %s line %s", file.Name(), s.Text())
|
"couldn't parse %s line %s", file.Name(), s.Text())
|
||||||
}
|
}
|
||||||
|
|
||||||
name := fields[0]
|
name := fields[0]
|
||||||
|
|
37
vendor/github.com/prometheus/procfs/xfs/parse.go
generated
vendored
37
vendor/github.com/prometheus/procfs/xfs/parse.go
generated
vendored
|
@ -17,8 +17,9 @@ import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParseStats parses a Stats from an input io.Reader, using the format
|
// ParseStats parses a Stats from an input io.Reader, using the format
|
||||||
|
@ -68,7 +69,7 @@ func ParseStats(r io.Reader) (*Stats, error) {
|
||||||
|
|
||||||
// Extended precision counters are uint64 values.
|
// Extended precision counters are uint64 values.
|
||||||
if label == fieldXpc {
|
if label == fieldXpc {
|
||||||
us, err := parseUint64s(ss[1:])
|
us, err := util.ParseUint64s(ss[1:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -82,7 +83,7 @@ func ParseStats(r io.Reader) (*Stats, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// All other counters are uint32 values.
|
// All other counters are uint32 values.
|
||||||
us, err := parseUint32s(ss[1:])
|
us, err := util.ParseUint32s(ss[1:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -327,33 +328,3 @@ func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
|
||||||
ReadBytes: us[2],
|
ReadBytes: us[2],
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseUint32s parses a slice of strings into a slice of uint32s.
|
|
||||||
func parseUint32s(ss []string) ([]uint32, error) {
|
|
||||||
us := make([]uint32, 0, len(ss))
|
|
||||||
for _, s := range ss {
|
|
||||||
u, err := strconv.ParseUint(s, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
us = append(us, uint32(u))
|
|
||||||
}
|
|
||||||
|
|
||||||
return us, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseUint64s parses a slice of strings into a slice of uint64s.
|
|
||||||
func parseUint64s(ss []string) ([]uint64, error) {
|
|
||||||
us := make([]uint64, 0, len(ss))
|
|
||||||
for _, s := range ss {
|
|
||||||
u, err := strconv.ParseUint(s, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
us = append(us, u)
|
|
||||||
}
|
|
||||||
|
|
||||||
return us, nil
|
|
||||||
}
|
|
||||||
|
|
15
vendor/modules.txt
vendored
15
vendor/modules.txt
vendored
|
@ -3,27 +3,30 @@ github.com/alecthomas/template
|
||||||
github.com/alecthomas/template/parse
|
github.com/alecthomas/template/parse
|
||||||
# github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf
|
# github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf
|
||||||
github.com/alecthomas/units
|
github.com/alecthomas/units
|
||||||
# github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a
|
# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973
|
||||||
github.com/beorn7/perks/quantile
|
github.com/beorn7/perks/quantile
|
||||||
# github.com/golang/protobuf v1.2.0
|
# github.com/golang/protobuf v1.2.0
|
||||||
github.com/golang/protobuf/proto
|
github.com/golang/protobuf/proto
|
||||||
# github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607
|
# github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607
|
||||||
github.com/howeyc/fsnotify
|
github.com/howeyc/fsnotify
|
||||||
# github.com/matttproud/golang_protobuf_extensions v1.0.0
|
# github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||||
github.com/matttproud/golang_protobuf_extensions/pbutil
|
github.com/matttproud/golang_protobuf_extensions/pbutil
|
||||||
# github.com/prometheus/client_golang v0.8.0
|
# github.com/prometheus/client_golang v0.9.2
|
||||||
github.com/prometheus/client_golang/prometheus
|
github.com/prometheus/client_golang/prometheus
|
||||||
# github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612
|
github.com/prometheus/client_golang/prometheus/internal
|
||||||
|
# github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
||||||
github.com/prometheus/client_model/go
|
github.com/prometheus/client_model/go
|
||||||
# github.com/prometheus/common v0.0.0-20170731114204-61f87aac8082
|
# github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
||||||
github.com/prometheus/common/log
|
github.com/prometheus/common/log
|
||||||
github.com/prometheus/common/model
|
github.com/prometheus/common/model
|
||||||
github.com/prometheus/common/version
|
github.com/prometheus/common/version
|
||||||
github.com/prometheus/common/expfmt
|
github.com/prometheus/common/expfmt
|
||||||
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
|
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
|
||||||
# github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8
|
# github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
|
||||||
github.com/prometheus/procfs
|
github.com/prometheus/procfs
|
||||||
|
github.com/prometheus/procfs/nfs
|
||||||
github.com/prometheus/procfs/xfs
|
github.com/prometheus/procfs/xfs
|
||||||
|
github.com/prometheus/procfs/internal/util
|
||||||
# github.com/sirupsen/logrus v1.0.3
|
# github.com/sirupsen/logrus v1.0.3
|
||||||
github.com/sirupsen/logrus
|
github.com/sirupsen/logrus
|
||||||
# golang.org/x/crypto v0.0.0-20170825220121-81e90905daef
|
# golang.org/x/crypto v0.0.0-20170825220121-81e90905daef
|
||||||
|
|
Loading…
Reference in a new issue