mirror of
https://github.com/prometheus/statsd_exporter.git
synced 2024-12-24 22:40:29 +00:00
Merge pull request #237 from prometheus/beorn7/modules
Update prometheus/client_golang to v1.0.0
This commit is contained in:
commit
f2d3b9eb79
229 changed files with 15112 additions and 5114 deletions
24
go.mod
24
go.mod
|
@ -1,29 +1,13 @@
|
||||||
module github.com/prometheus/statsd_exporter
|
module github.com/prometheus/statsd_exporter
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38 // indirect
|
|
||||||
github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721 // indirect
|
|
||||||
github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1 // indirect
|
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc // indirect
|
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
|
||||||
github.com/hashicorp/golang-lru v0.5.1
|
github.com/hashicorp/golang-lru v0.5.1
|
||||||
github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607
|
github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607
|
||||||
github.com/kr/pretty v0.1.0 // indirect
|
github.com/kr/pretty v0.1.0 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.4 // indirect
|
github.com/prometheus/client_golang v1.0.0
|
||||||
github.com/onsi/ginkgo v1.7.0 // indirect
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
|
||||||
github.com/onsi/gomega v1.4.3 // indirect
|
github.com/prometheus/common v0.4.1
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||||
github.com/prometheus/client_golang v0.9.2
|
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
|
||||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
|
||||||
github.com/sergi/go-diff v1.0.0 // indirect
|
|
||||||
github.com/sirupsen/logrus v1.0.3 // indirect
|
|
||||||
github.com/stretchr/testify v1.2.2 // indirect
|
|
||||||
golang.org/x/crypto v0.0.0-20170825220121-81e90905daef // indirect
|
|
||||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
|
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.5
|
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
|
|
||||||
gopkg.in/yaml.v2 v2.2.1
|
gopkg.in/yaml.v2 v2.2.1
|
||||||
)
|
)
|
||||||
|
|
91
go.sum
91
go.sum
|
@ -1,82 +1,77 @@
|
||||||
github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38 h1:smF2tmSOzy2Mm+0dGI2AIUHY+w0BUc+4tn40djz7+6U=
|
|
||||||
github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38/go.mod h1:r7bzyVFMNntcxPZXK3/+KdruV1H5KSlyVY0gc+NgInI=
|
|
||||||
github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721 h1:JHZL0hZKJ1VENNfmXvHbgYlbUOvpzYzvy2aZU5gXVeo=
|
|
||||||
github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721/go.mod h1:QO9JBoKquHd+jz9nshCh40fOfO+JzsoXy8qTHF68zU0=
|
|
||||||
github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1 h1:GDQdwm/gAcJcLAKQQZGOJ4knlw+7rfEQQcmwTbt4p5E=
|
|
||||||
github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ=
|
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||||
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607 h1:+7wvV++11s0Okyl1dekihkIiCIYDz+Qk2LvxAShINU4=
|
github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607 h1:+7wvV++11s0Okyl1dekihkIiCIYDz+Qk2LvxAShINU4=
|
||||||
github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607/go.mod h1:41HzSPxBGeFRQKEEwgh49TRw/nKBsYZ2cF1OzPjSJsA=
|
github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607/go.mod h1:41HzSPxBGeFRQKEEwgh49TRw/nKBsYZ2cF1OzPjSJsA=
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
|
|
||||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||||
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
|
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
|
||||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||||
github.com/sirupsen/logrus v1.0.3 h1:B5C/igNWoiULof20pKfY4VntcIPqKuwEmoLZrabbUrc=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/sirupsen/logrus v1.0.3/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||||
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
golang.org/x/crypto v0.0.0-20170825220121-81e90905daef h1:R8ubLIilYRXIXpgjOg2l/ECVs3HzVKIjJEhxSsQ91u4=
|
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||||
golang.org/x/crypto v0.0.0-20170825220121-81e90905daef/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5 h1:mzjBh+S5frKOsOBobWIMAbXavqjmgO17k/2puhcFR94=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.5 h1:qskSCq465uEvC3oGocwvZNsO3RF3SpLVLumOAhL0bXo=
|
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.5/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
|
||||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
|
|
||||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
|
||||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
|
1
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
1
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
|
@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||||
if b&0x80 == 0 {
|
if b&0x80 == 0 {
|
||||||
goto done
|
goto done
|
||||||
}
|
}
|
||||||
// x -= 0x80 << 63 // Always zero.
|
|
||||||
|
|
||||||
return 0, errOverflow
|
return 0, errOverflow
|
||||||
|
|
||||||
|
|
63
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
Normal file
63
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
func GetStats() Stats { return Stats{} }
|
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||||
|
return nil, errors.New("proto: not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||||
|
return errors.New("proto: not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||||
|
return nil, errors.New("proto: not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||||
|
return errors.New("proto: not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
func RegisterMessageSetType(Message, int32, string) {}
|
3
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
3
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
|
@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
m1, m2 := e1.value, e2.value
|
m1 := extensionAsLegacyType(e1.value)
|
||||||
|
m2 := extensionAsLegacyType(e2.value)
|
||||||
|
|
||||||
if m1 == nil && m2 == nil {
|
if m1 == nil && m2 == nil {
|
||||||
// Both have only encoded form.
|
// Both have only encoded form.
|
||||||
|
|
78
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
78
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
|
@ -185,9 +185,25 @@ type Extension struct {
|
||||||
// extension will have only enc set. When such an extension is
|
// extension will have only enc set. When such an extension is
|
||||||
// accessed using GetExtension (or GetExtensions) desc and value
|
// accessed using GetExtension (or GetExtensions) desc and value
|
||||||
// will be set.
|
// will be set.
|
||||||
desc *ExtensionDesc
|
desc *ExtensionDesc
|
||||||
|
|
||||||
|
// value is a concrete value for the extension field. Let the type of
|
||||||
|
// desc.ExtensionType be the "API type" and the type of Extension.value
|
||||||
|
// be the "storage type". The API type and storage type are the same except:
|
||||||
|
// * For scalars (except []byte), the API type uses *T,
|
||||||
|
// while the storage type uses T.
|
||||||
|
// * For repeated fields, the API type uses []T, while the storage type
|
||||||
|
// uses *[]T.
|
||||||
|
//
|
||||||
|
// The reason for the divergence is so that the storage type more naturally
|
||||||
|
// matches what is expected of when retrieving the values through the
|
||||||
|
// protobuf reflection APIs.
|
||||||
|
//
|
||||||
|
// The value may only be populated if desc is also populated.
|
||||||
value interface{}
|
value interface{}
|
||||||
enc []byte
|
|
||||||
|
// enc is the raw bytes for the extension field.
|
||||||
|
enc []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetRawExtension is for testing only.
|
// SetRawExtension is for testing only.
|
||||||
|
@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||||
// descriptors with the same field number.
|
// descriptors with the same field number.
|
||||||
return nil, errors.New("proto: descriptor conflict")
|
return nil, errors.New("proto: descriptor conflict")
|
||||||
}
|
}
|
||||||
return e.value, nil
|
return extensionAsLegacyType(e.value), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if extension.ExtensionType == nil {
|
if extension.ExtensionType == nil {
|
||||||
|
@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||||
|
|
||||||
// Remember the decoded version and drop the encoded version.
|
// Remember the decoded version and drop the encoded version.
|
||||||
// That way it is safe to mutate what we return.
|
// That way it is safe to mutate what we return.
|
||||||
e.value = v
|
e.value = extensionAsStorageType(v)
|
||||||
e.desc = extension
|
e.desc = extension
|
||||||
e.enc = nil
|
e.enc = nil
|
||||||
emap[extension.Field] = e
|
emap[extension.Field] = e
|
||||||
return e.value, nil
|
return extensionAsLegacyType(e.value), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// defaultExtensionValue returns the default value for extension.
|
// defaultExtensionValue returns the default value for extension.
|
||||||
|
@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
|
||||||
}
|
}
|
||||||
typ := reflect.TypeOf(extension.ExtensionType)
|
typ := reflect.TypeOf(extension.ExtensionType)
|
||||||
if typ != reflect.TypeOf(value) {
|
if typ != reflect.TypeOf(value) {
|
||||||
return errors.New("proto: bad extension value type")
|
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
|
||||||
}
|
}
|
||||||
// nil extension values need to be caught early, because the
|
// nil extension values need to be caught early, because the
|
||||||
// encoder can't distinguish an ErrNil due to a nil extension
|
// encoder can't distinguish an ErrNil due to a nil extension
|
||||||
|
@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
|
||||||
}
|
}
|
||||||
|
|
||||||
extmap := epb.extensionsWrite()
|
extmap := epb.extensionsWrite()
|
||||||
extmap[extension.Field] = Extension{desc: extension, value: value}
|
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) {
|
||||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// extensionAsLegacyType converts an value in the storage type as the API type.
|
||||||
|
// See Extension.value.
|
||||||
|
func extensionAsLegacyType(v interface{}) interface{} {
|
||||||
|
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||||
|
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||||
|
// Represent primitive types as a pointer to the value.
|
||||||
|
rv2 := reflect.New(rv.Type())
|
||||||
|
rv2.Elem().Set(rv)
|
||||||
|
v = rv2.Interface()
|
||||||
|
case reflect.Ptr:
|
||||||
|
// Represent slice types as the value itself.
|
||||||
|
switch rv.Type().Elem().Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
if rv.IsNil() {
|
||||||
|
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||||
|
} else {
|
||||||
|
v = rv.Elem().Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// extensionAsStorageType converts an value in the API type as the storage type.
|
||||||
|
// See Extension.value.
|
||||||
|
func extensionAsStorageType(v interface{}) interface{} {
|
||||||
|
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
// Represent slice types as the value itself.
|
||||||
|
switch rv.Type().Elem().Kind() {
|
||||||
|
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||||
|
if rv.IsNil() {
|
||||||
|
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||||
|
} else {
|
||||||
|
v = rv.Elem().Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
// Represent slice types as a pointer to the value.
|
||||||
|
if rv.Type().Elem().Kind() != reflect.Uint8 {
|
||||||
|
rv2 := reflect.New(rv.Type())
|
||||||
|
rv2.Elem().Set(rv)
|
||||||
|
v = rv2.Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
38
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
38
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
|
@ -341,26 +341,6 @@ type Message interface {
|
||||||
ProtoMessage()
|
ProtoMessage()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stats records allocation details about the protocol buffer encoders
|
|
||||||
// and decoders. Useful for tuning the library itself.
|
|
||||||
type Stats struct {
|
|
||||||
Emalloc uint64 // mallocs in encode
|
|
||||||
Dmalloc uint64 // mallocs in decode
|
|
||||||
Encode uint64 // number of encodes
|
|
||||||
Decode uint64 // number of decodes
|
|
||||||
Chit uint64 // number of cache hits
|
|
||||||
Cmiss uint64 // number of cache misses
|
|
||||||
Size uint64 // number of sizes
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set to true to enable stats collection.
|
|
||||||
const collectStats = false
|
|
||||||
|
|
||||||
var stats Stats
|
|
||||||
|
|
||||||
// GetStats returns a copy of the global Stats structure.
|
|
||||||
func GetStats() Stats { return stats }
|
|
||||||
|
|
||||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||||
// protocol buffers. It may be reused between invocations to
|
// protocol buffers. It may be reused between invocations to
|
||||||
// reduce memory usage. It is not necessary to use a Buffer;
|
// reduce memory usage. It is not necessary to use a Buffer;
|
||||||
|
@ -960,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
const (
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
||||||
const ProtoPackageIsVersion2 = true
|
// to assert that that code is compatible with this version of the proto package.
|
||||||
|
ProtoPackageIsVersion3 = true
|
||||||
|
|
||||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
// to assert that that code is compatible with this version of the proto package.
|
||||||
const ProtoPackageIsVersion1 = true
|
ProtoPackageIsVersion2 = true
|
||||||
|
|
||||||
|
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||||
|
// to assert that that code is compatible with this version of the proto package.
|
||||||
|
ProtoPackageIsVersion1 = true
|
||||||
|
)
|
||||||
|
|
||||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||||
// This type is not intended to be used by non-generated code.
|
// This type is not intended to be used by non-generated code.
|
||||||
|
|
137
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
137
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
|
@ -36,13 +36,7 @@ package proto
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||||
|
@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte {
|
||||||
return buf[i+1:]
|
return buf[i+1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
|
||||||
func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
|
||||||
return marshalMessageSet(exts, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
|
|
||||||
func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
|
|
||||||
switch exts := exts.(type) {
|
|
||||||
case *XXX_InternalExtensions:
|
|
||||||
var u marshalInfo
|
|
||||||
siz := u.sizeMessageSet(exts)
|
|
||||||
b := make([]byte, 0, siz)
|
|
||||||
return u.appendMessageSet(b, exts, deterministic)
|
|
||||||
|
|
||||||
case map[int32]Extension:
|
|
||||||
// This is an old-style extension map.
|
|
||||||
// Wrap it in a new-style XXX_InternalExtensions.
|
|
||||||
ie := XXX_InternalExtensions{
|
|
||||||
p: &struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
extensionMap map[int32]Extension
|
|
||||||
}{
|
|
||||||
extensionMap: exts,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var u marshalInfo
|
|
||||||
siz := u.sizeMessageSet(&ie)
|
|
||||||
b := make([]byte, 0, siz)
|
|
||||||
return u.appendMessageSet(b, &ie, deterministic)
|
|
||||||
|
|
||||||
default:
|
|
||||||
return nil, errors.New("proto: not an extension map")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
|
||||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
func unmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||||
var m map[int32]Extension
|
var m map[int32]Extension
|
||||||
switch exts := exts.(type) {
|
switch exts := exts.(type) {
|
||||||
case *XXX_InternalExtensions:
|
case *XXX_InternalExtensions:
|
||||||
|
@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
|
||||||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
|
||||||
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
|
||||||
var m map[int32]Extension
|
|
||||||
switch exts := exts.(type) {
|
|
||||||
case *XXX_InternalExtensions:
|
|
||||||
var mu sync.Locker
|
|
||||||
m, mu = exts.extensionsRead()
|
|
||||||
if m != nil {
|
|
||||||
// Keep the extensions map locked until we're done marshaling to prevent
|
|
||||||
// races between marshaling and unmarshaling the lazily-{en,de}coded
|
|
||||||
// values.
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
}
|
|
||||||
case map[int32]Extension:
|
|
||||||
m = exts
|
|
||||||
default:
|
|
||||||
return nil, errors.New("proto: not an extension map")
|
|
||||||
}
|
|
||||||
var b bytes.Buffer
|
|
||||||
b.WriteByte('{')
|
|
||||||
|
|
||||||
// Process the map in key order for deterministic output.
|
|
||||||
ids := make([]int32, 0, len(m))
|
|
||||||
for id := range m {
|
|
||||||
ids = append(ids, id)
|
|
||||||
}
|
|
||||||
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
|
|
||||||
|
|
||||||
for i, id := range ids {
|
|
||||||
ext := m[id]
|
|
||||||
msd, ok := messageSetMap[id]
|
|
||||||
if !ok {
|
|
||||||
// Unknown type; we can't render it, so skip it.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if i > 0 && b.Len() > 1 {
|
|
||||||
b.WriteByte(',')
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
|
||||||
|
|
||||||
x := ext.value
|
|
||||||
if x == nil {
|
|
||||||
x = reflect.New(msd.t.Elem()).Interface()
|
|
||||||
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d, err := json.Marshal(x)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b.Write(d)
|
|
||||||
}
|
|
||||||
b.WriteByte('}')
|
|
||||||
return b.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
|
||||||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
|
||||||
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
|
|
||||||
// Common-case fast path.
|
|
||||||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is fairly tricky, and it's not clear that it is needed.
|
|
||||||
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// A global registry of types that can be used in a MessageSet.
|
|
||||||
|
|
||||||
var messageSetMap = make(map[int32]messageSetDesc)
|
|
||||||
|
|
||||||
type messageSetDesc struct {
|
|
||||||
t reflect.Type // pointer to struct
|
|
||||||
name string
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterMessageSetType is called from the generated code.
|
|
||||||
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
|
|
||||||
messageSetMap[fieldNum] = messageSetDesc{
|
|
||||||
t: reflect.TypeOf(m),
|
|
||||||
name: name,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
5
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
5
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
|
@ -79,10 +79,13 @@ func toPointer(i *Message) pointer {
|
||||||
|
|
||||||
// toAddrPointer converts an interface to a pointer that points to
|
// toAddrPointer converts an interface to a pointer that points to
|
||||||
// the interface data.
|
// the interface data.
|
||||||
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
|
||||||
v := reflect.ValueOf(*i)
|
v := reflect.ValueOf(*i)
|
||||||
u := reflect.New(v.Type())
|
u := reflect.New(v.Type())
|
||||||
u.Elem().Set(v)
|
u.Elem().Set(v)
|
||||||
|
if deref {
|
||||||
|
u = u.Elem()
|
||||||
|
}
|
||||||
return pointer{v: u}
|
return pointer{v: u}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
15
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
15
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
|
@ -85,16 +85,21 @@ func toPointer(i *Message) pointer {
|
||||||
|
|
||||||
// toAddrPointer converts an interface to a pointer that points to
|
// toAddrPointer converts an interface to a pointer that points to
|
||||||
// the interface data.
|
// the interface data.
|
||||||
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
|
||||||
// Super-tricky - read or get the address of data word of interface value.
|
// Super-tricky - read or get the address of data word of interface value.
|
||||||
if isptr {
|
if isptr {
|
||||||
// The interface is of pointer type, thus it is a direct interface.
|
// The interface is of pointer type, thus it is a direct interface.
|
||||||
// The data word is the pointer data itself. We take its address.
|
// The data word is the pointer data itself. We take its address.
|
||||||
return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||||
|
} else {
|
||||||
|
// The interface is not of pointer type. The data word is the pointer
|
||||||
|
// to the data.
|
||||||
|
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||||
}
|
}
|
||||||
// The interface is not of pointer type. The data word is the pointer
|
if deref {
|
||||||
// to the data.
|
p.p = *(*unsafe.Pointer)(p.p)
|
||||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
}
|
||||||
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||||
|
|
31
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
31
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
|
@ -334,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties {
|
||||||
sprop, ok := propertiesMap[t]
|
sprop, ok := propertiesMap[t]
|
||||||
propertiesMu.RUnlock()
|
propertiesMu.RUnlock()
|
||||||
if ok {
|
if ok {
|
||||||
if collectStats {
|
|
||||||
stats.Chit++
|
|
||||||
}
|
|
||||||
return sprop
|
return sprop
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -346,17 +343,20 @@ func GetProperties(t reflect.Type) *StructProperties {
|
||||||
return sprop
|
return sprop
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
oneofFuncsIface interface {
|
||||||
|
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||||
|
}
|
||||||
|
oneofWrappersIface interface {
|
||||||
|
XXX_OneofWrappers() []interface{}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
// getPropertiesLocked requires that propertiesMu is held.
|
// getPropertiesLocked requires that propertiesMu is held.
|
||||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||||
if prop, ok := propertiesMap[t]; ok {
|
if prop, ok := propertiesMap[t]; ok {
|
||||||
if collectStats {
|
|
||||||
stats.Chit++
|
|
||||||
}
|
|
||||||
return prop
|
return prop
|
||||||
}
|
}
|
||||||
if collectStats {
|
|
||||||
stats.Cmiss++
|
|
||||||
}
|
|
||||||
|
|
||||||
prop := new(StructProperties)
|
prop := new(StructProperties)
|
||||||
// in case of recursive protos, fill this in now.
|
// in case of recursive protos, fill this in now.
|
||||||
|
@ -391,13 +391,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||||
// Re-order prop.order.
|
// Re-order prop.order.
|
||||||
sort.Sort(prop)
|
sort.Sort(prop)
|
||||||
|
|
||||||
type oneofMessage interface {
|
var oots []interface{}
|
||||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||||
|
case oneofFuncsIface:
|
||||||
|
_, _, _, oots = m.XXX_OneofFuncs()
|
||||||
|
case oneofWrappersIface:
|
||||||
|
oots = m.XXX_OneofWrappers()
|
||||||
}
|
}
|
||||||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
if len(oots) > 0 {
|
||||||
var oots []interface{}
|
|
||||||
_, _, _, oots = om.XXX_OneofFuncs()
|
|
||||||
|
|
||||||
// Interpret oneof metadata.
|
// Interpret oneof metadata.
|
||||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||||
for _, oot := range oots {
|
for _, oot := range oots {
|
||||||
|
|
45
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
45
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
|
@ -87,6 +87,7 @@ type marshalElemInfo struct {
|
||||||
sizer sizer
|
sizer sizer
|
||||||
marshaler marshaler
|
marshaler marshaler
|
||||||
isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
|
isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
|
||||||
|
deref bool // dereference the pointer before operating on it; implies isptr
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -320,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() {
|
||||||
|
|
||||||
// get oneof implementers
|
// get oneof implementers
|
||||||
var oneofImplementers []interface{}
|
var oneofImplementers []interface{}
|
||||||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||||
|
case oneofFuncsIface:
|
||||||
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||||
|
case oneofWrappersIface:
|
||||||
|
oneofImplementers = m.XXX_OneofWrappers()
|
||||||
}
|
}
|
||||||
|
|
||||||
n := t.NumField()
|
n := t.NumField()
|
||||||
|
@ -407,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
|
||||||
panic("tag is not an integer")
|
panic("tag is not an integer")
|
||||||
}
|
}
|
||||||
wt := wiretype(tags[0])
|
wt := wiretype(tags[0])
|
||||||
|
if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
|
||||||
|
t = t.Elem()
|
||||||
|
}
|
||||||
sizer, marshaler := typeMarshaler(t, tags, false, false)
|
sizer, marshaler := typeMarshaler(t, tags, false, false)
|
||||||
|
var deref bool
|
||||||
|
if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
|
||||||
|
t = reflect.PtrTo(t)
|
||||||
|
deref = true
|
||||||
|
}
|
||||||
e = &marshalElemInfo{
|
e = &marshalElemInfo{
|
||||||
wiretag: uint64(tag)<<3 | wt,
|
wiretag: uint64(tag)<<3 | wt,
|
||||||
tagsize: SizeVarint(uint64(tag) << 3),
|
tagsize: SizeVarint(uint64(tag) << 3),
|
||||||
sizer: sizer,
|
sizer: sizer,
|
||||||
marshaler: marshaler,
|
marshaler: marshaler,
|
||||||
isptr: t.Kind() == reflect.Ptr,
|
isptr: t.Kind() == reflect.Ptr,
|
||||||
|
deref: deref,
|
||||||
}
|
}
|
||||||
|
|
||||||
// update cache
|
// update cache
|
||||||
|
@ -448,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
|
||||||
|
|
||||||
func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
|
func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
|
||||||
fi.field = toField(f)
|
fi.field = toField(f)
|
||||||
fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
|
fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
|
||||||
fi.isPointer = true
|
fi.isPointer = true
|
||||||
fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
|
fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
|
||||||
fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
|
fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
|
||||||
|
@ -476,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type oneofMessage interface {
|
|
||||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// wiretype returns the wire encoding of the type.
|
// wiretype returns the wire encoding of the type.
|
||||||
func wiretype(encoding string) uint64 {
|
func wiretype(encoding string) uint64 {
|
||||||
switch encoding {
|
switch encoding {
|
||||||
|
@ -2310,8 +2319,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
|
||||||
for _, k := range m.MapKeys() {
|
for _, k := range m.MapKeys() {
|
||||||
ki := k.Interface()
|
ki := k.Interface()
|
||||||
vi := m.MapIndex(k).Interface()
|
vi := m.MapIndex(k).Interface()
|
||||||
kaddr := toAddrPointer(&ki, false) // pointer to key
|
kaddr := toAddrPointer(&ki, false, false) // pointer to key
|
||||||
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
|
vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
|
||||||
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
|
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
|
||||||
n += siz + SizeVarint(uint64(siz)) + tagsize
|
n += siz + SizeVarint(uint64(siz)) + tagsize
|
||||||
}
|
}
|
||||||
|
@ -2329,8 +2338,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
|
||||||
for _, k := range keys {
|
for _, k := range keys {
|
||||||
ki := k.Interface()
|
ki := k.Interface()
|
||||||
vi := m.MapIndex(k).Interface()
|
vi := m.MapIndex(k).Interface()
|
||||||
kaddr := toAddrPointer(&ki, false) // pointer to key
|
kaddr := toAddrPointer(&ki, false, false) // pointer to key
|
||||||
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
|
vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
|
||||||
b = appendVarint(b, tag)
|
b = appendVarint(b, tag)
|
||||||
siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
|
siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
|
||||||
b = appendVarint(b, uint64(siz))
|
b = appendVarint(b, uint64(siz))
|
||||||
|
@ -2399,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
|
||||||
// the last time this function was called.
|
// the last time this function was called.
|
||||||
ei := u.getExtElemInfo(e.desc)
|
ei := u.getExtElemInfo(e.desc)
|
||||||
v := e.value
|
v := e.value
|
||||||
p := toAddrPointer(&v, ei.isptr)
|
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||||
n += ei.sizer(p, ei.tagsize)
|
n += ei.sizer(p, ei.tagsize)
|
||||||
}
|
}
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
|
@ -2434,7 +2443,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
|
||||||
|
|
||||||
ei := u.getExtElemInfo(e.desc)
|
ei := u.getExtElemInfo(e.desc)
|
||||||
v := e.value
|
v := e.value
|
||||||
p := toAddrPointer(&v, ei.isptr)
|
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||||
if !nerr.Merge(err) {
|
if !nerr.Merge(err) {
|
||||||
return b, err
|
return b, err
|
||||||
|
@ -2465,7 +2474,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
|
||||||
|
|
||||||
ei := u.getExtElemInfo(e.desc)
|
ei := u.getExtElemInfo(e.desc)
|
||||||
v := e.value
|
v := e.value
|
||||||
p := toAddrPointer(&v, ei.isptr)
|
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||||
if !nerr.Merge(err) {
|
if !nerr.Merge(err) {
|
||||||
return b, err
|
return b, err
|
||||||
|
@ -2510,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
|
||||||
|
|
||||||
ei := u.getExtElemInfo(e.desc)
|
ei := u.getExtElemInfo(e.desc)
|
||||||
v := e.value
|
v := e.value
|
||||||
p := toAddrPointer(&v, ei.isptr)
|
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||||
n += ei.sizer(p, 1) // message, tag = 3 (size=1)
|
n += ei.sizer(p, 1) // message, tag = 3 (size=1)
|
||||||
}
|
}
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
|
@ -2553,7 +2562,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
|
||||||
|
|
||||||
ei := u.getExtElemInfo(e.desc)
|
ei := u.getExtElemInfo(e.desc)
|
||||||
v := e.value
|
v := e.value
|
||||||
p := toAddrPointer(&v, ei.isptr)
|
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||||
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
|
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
|
||||||
if !nerr.Merge(err) {
|
if !nerr.Merge(err) {
|
||||||
return b, err
|
return b, err
|
||||||
|
@ -2591,7 +2600,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
|
||||||
|
|
||||||
ei := u.getExtElemInfo(e.desc)
|
ei := u.getExtElemInfo(e.desc)
|
||||||
v := e.value
|
v := e.value
|
||||||
p := toAddrPointer(&v, ei.isptr)
|
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||||
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
|
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
|
||||||
b = append(b, 1<<3|WireEndGroup)
|
b = append(b, 1<<3|WireEndGroup)
|
||||||
if !nerr.Merge(err) {
|
if !nerr.Merge(err) {
|
||||||
|
@ -2621,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
|
||||||
|
|
||||||
ei := u.getExtElemInfo(e.desc)
|
ei := u.getExtElemInfo(e.desc)
|
||||||
v := e.value
|
v := e.value
|
||||||
p := toAddrPointer(&v, ei.isptr)
|
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||||
n += ei.sizer(p, ei.tagsize)
|
n += ei.sizer(p, ei.tagsize)
|
||||||
}
|
}
|
||||||
return n
|
return n
|
||||||
|
@ -2656,7 +2665,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
|
||||||
|
|
||||||
ei := u.getExtElemInfo(e.desc)
|
ei := u.getExtElemInfo(e.desc)
|
||||||
v := e.value
|
v := e.value
|
||||||
p := toAddrPointer(&v, ei.isptr)
|
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||||
if !nerr.Merge(err) {
|
if !nerr.Merge(err) {
|
||||||
return b, err
|
return b, err
|
||||||
|
|
74
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
74
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
|
@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
|
||||||
u.computeUnmarshalInfo()
|
u.computeUnmarshalInfo()
|
||||||
}
|
}
|
||||||
if u.isMessageSet {
|
if u.isMessageSet {
|
||||||
return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
|
return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
|
||||||
}
|
}
|
||||||
var reqMask uint64 // bitmask of required fields we've seen.
|
var reqMask uint64 // bitmask of required fields we've seen.
|
||||||
var errLater error
|
var errLater error
|
||||||
|
@ -362,46 +362,48 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find any types associated with oneof fields.
|
// Find any types associated with oneof fields.
|
||||||
// TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
|
var oneofImplementers []interface{}
|
||||||
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
|
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||||
if fn.IsValid() {
|
case oneofFuncsIface:
|
||||||
res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
|
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||||
for i := res.Len() - 1; i >= 0; i-- {
|
case oneofWrappersIface:
|
||||||
v := res.Index(i) // interface{}
|
oneofImplementers = m.XXX_OneofWrappers()
|
||||||
tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
|
}
|
||||||
typ := tptr.Elem() // Msg_X
|
for _, v := range oneofImplementers {
|
||||||
|
tptr := reflect.TypeOf(v) // *Msg_X
|
||||||
|
typ := tptr.Elem() // Msg_X
|
||||||
|
|
||||||
f := typ.Field(0) // oneof implementers have one field
|
f := typ.Field(0) // oneof implementers have one field
|
||||||
baseUnmarshal := fieldUnmarshaler(&f)
|
baseUnmarshal := fieldUnmarshaler(&f)
|
||||||
tags := strings.Split(f.Tag.Get("protobuf"), ",")
|
tags := strings.Split(f.Tag.Get("protobuf"), ",")
|
||||||
fieldNum, err := strconv.Atoi(tags[1])
|
fieldNum, err := strconv.Atoi(tags[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("protobuf tag field not an integer: " + tags[1])
|
panic("protobuf tag field not an integer: " + tags[1])
|
||||||
}
|
}
|
||||||
var name string
|
var name string
|
||||||
for _, tag := range tags {
|
for _, tag := range tags {
|
||||||
if strings.HasPrefix(tag, "name=") {
|
if strings.HasPrefix(tag, "name=") {
|
||||||
name = strings.TrimPrefix(tag, "name=")
|
name = strings.TrimPrefix(tag, "name=")
|
||||||
break
|
break
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the oneof field that this struct implements.
|
|
||||||
// Might take O(n^2) to process all of the oneofs, but who cares.
|
|
||||||
for _, of := range oneofFields {
|
|
||||||
if tptr.Implements(of.ityp) {
|
|
||||||
// We have found the corresponding interface for this struct.
|
|
||||||
// That lets us know where this struct should be stored
|
|
||||||
// when we encounter it during unmarshaling.
|
|
||||||
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
|
|
||||||
u.setTag(fieldNum, of.field, unmarshal, 0, name)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Find the oneof field that this struct implements.
|
||||||
|
// Might take O(n^2) to process all of the oneofs, but who cares.
|
||||||
|
for _, of := range oneofFields {
|
||||||
|
if tptr.Implements(of.ityp) {
|
||||||
|
// We have found the corresponding interface for this struct.
|
||||||
|
// That lets us know where this struct should be stored
|
||||||
|
// when we encounter it during unmarshaling.
|
||||||
|
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
|
||||||
|
u.setTag(fieldNum, of.field, unmarshal, 0, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get extension ranges, if any.
|
// Get extension ranges, if any.
|
||||||
fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
|
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
|
||||||
if fn.IsValid() {
|
if fn.IsValid() {
|
||||||
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
|
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
|
||||||
panic("a message with extensions, but no extensions field in " + t.Name())
|
panic("a message with extensions, but no extensions field in " + t.Name())
|
||||||
|
@ -1948,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte {
|
||||||
// If there is an error, it returns 0,0.
|
// If there is an error, it returns 0,0.
|
||||||
func decodeVarint(b []byte) (uint64, int) {
|
func decodeVarint(b []byte) (uint64, int) {
|
||||||
var x, y uint64
|
var x, y uint64
|
||||||
if len(b) <= 0 {
|
if len(b) == 0 {
|
||||||
goto bad
|
goto bad
|
||||||
}
|
}
|
||||||
x = uint64(b[0])
|
x = uint64(b[0])
|
||||||
|
|
9
vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
generated
vendored
Normal file
9
vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
(The MIT License)
|
||||||
|
|
||||||
|
Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
40
vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
generated
vendored
Normal file
40
vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
# Windows Terminal Sequences
|
||||||
|
|
||||||
|
This library allow for enabling Windows terminal color support for Go.
|
||||||
|
|
||||||
|
See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
sequences "github.com/konsorten/go-windows-terminal-sequences"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Authors
|
||||||
|
|
||||||
|
The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
|
||||||
|
|
||||||
|
We thank all the authors who provided code to this library:
|
||||||
|
|
||||||
|
* Felix Kollmann
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
(The MIT License)
|
||||||
|
|
||||||
|
Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
1
vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
generated
vendored
Normal file
1
vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
module github.com/konsorten/go-windows-terminal-sequences
|
36
vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
generated
vendored
Normal file
36
vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package sequences
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
|
||||||
|
setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
|
||||||
|
)
|
||||||
|
|
||||||
|
func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
|
||||||
|
const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
|
||||||
|
|
||||||
|
var mode uint32
|
||||||
|
err := syscall.GetConsoleMode(syscall.Stdout, &mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if enable {
|
||||||
|
mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||||
|
} else {
|
||||||
|
mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||||
|
}
|
||||||
|
|
||||||
|
ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
|
||||||
|
if ret == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
29
vendor/github.com/prometheus/client_golang/prometheus/build_info.go
generated
vendored
Normal file
29
vendor/github.com/prometheus/client_golang/prometheus/build_info.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build go1.12
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import "runtime/debug"
|
||||||
|
|
||||||
|
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
|
||||||
|
func readBuildInfo() (path, version, sum string) {
|
||||||
|
path, version, sum = "unknown", "unknown", "unknown"
|
||||||
|
if bi, ok := debug.ReadBuildInfo(); ok {
|
||||||
|
path = bi.Main.Path
|
||||||
|
version = bi.Main.Version
|
||||||
|
sum = bi.Main.Sum
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
22
vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
generated
vendored
Normal file
22
vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !go1.12
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
|
||||||
|
// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
|
||||||
|
func readBuildInfo() (path, version, sum string) {
|
||||||
|
return "unknown", "unknown", "unknown"
|
||||||
|
}
|
2
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
|
@ -79,7 +79,7 @@ type Collector interface {
|
||||||
// of the Describe method. If a Collector sometimes collects no metrics at all
|
// of the Describe method. If a Collector sometimes collects no metrics at all
|
||||||
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
|
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
|
||||||
// metrics after a metric with a fully specified label set has been accessed),
|
// metrics after a metric with a fully specified label set has been accessed),
|
||||||
// it might even get registered as an unchecked Collecter (cf. the Register
|
// it might even get registered as an unchecked Collector (cf. the Register
|
||||||
// method of the Registerer interface). Hence, only use this shortcut
|
// method of the Registerer interface). Hence, only use this shortcut
|
||||||
// implementation of Describe if you are certain to fulfill the contract.
|
// implementation of Describe if you are certain to fulfill the contract.
|
||||||
//
|
//
|
||||||
|
|
7
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
7
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
|
@ -122,13 +122,13 @@
|
||||||
// the Collect method. The Describe method has to return separate Desc
|
// the Collect method. The Describe method has to return separate Desc
|
||||||
// instances, representative of the “throw-away” metrics to be created later.
|
// instances, representative of the “throw-away” metrics to be created later.
|
||||||
// NewDesc comes in handy to create those Desc instances. Alternatively, you
|
// NewDesc comes in handy to create those Desc instances. Alternatively, you
|
||||||
// could return no Desc at all, which will marke the Collector “unchecked”. No
|
// could return no Desc at all, which will mark the Collector “unchecked”. No
|
||||||
// checks are porformed at registration time, but metric consistency will still
|
// checks are performed at registration time, but metric consistency will still
|
||||||
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
||||||
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
||||||
// metrics that lead to inconsistencies in the total scrape result lies with the
|
// metrics that lead to inconsistencies in the total scrape result lies with the
|
||||||
// implementer of the Collector. While this is not a desirable state, it is
|
// implementer of the Collector. While this is not a desirable state, it is
|
||||||
// sometimes necessary. The typical use case is a situatios where the exact
|
// sometimes necessary. The typical use case is a situation where the exact
|
||||||
// metrics to be returned by a Collector cannot be predicted at registration
|
// metrics to be returned by a Collector cannot be predicted at registration
|
||||||
// time, but the implementer has sufficient knowledge of the whole system to
|
// time, but the implementer has sufficient knowledge of the whole system to
|
||||||
// guarantee metric consistency.
|
// guarantee metric consistency.
|
||||||
|
@ -183,7 +183,6 @@
|
||||||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||||
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
|
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
|
||||||
// (The top-level functions in the prometheus package are deprecated.)
|
|
||||||
//
|
//
|
||||||
// Pushing to the Pushgateway
|
// Pushing to the Pushgateway
|
||||||
//
|
//
|
||||||
|
|
121
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
121
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
|
@ -14,9 +14,9 @@
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -26,16 +26,41 @@ type goCollector struct {
|
||||||
gcDesc *Desc
|
gcDesc *Desc
|
||||||
goInfoDesc *Desc
|
goInfoDesc *Desc
|
||||||
|
|
||||||
// metrics to describe and collect
|
// ms... are memstats related.
|
||||||
metrics memStatsMetrics
|
msLast *runtime.MemStats // Previously collected memstats.
|
||||||
|
msLastTimestamp time.Time
|
||||||
|
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
|
||||||
|
msMetrics memStatsMetrics
|
||||||
|
msRead func(*runtime.MemStats) // For mocking in tests.
|
||||||
|
msMaxWait time.Duration // Wait time for fresh memstats.
|
||||||
|
msMaxAge time.Duration // Maximum allowed age of old memstats.
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGoCollector returns a collector which exports metrics about the current Go
|
// NewGoCollector returns a collector that exports metrics about the current Go
|
||||||
// process. This includes memory stats. To collect those, runtime.ReadMemStats
|
// process. This includes memory stats. To collect those, runtime.ReadMemStats
|
||||||
// is called. This causes a stop-the-world, which is very short with Go1.9+
|
// is called. This requires to “stop the world”, which usually only happens for
|
||||||
// (~25µs). However, with older Go versions, the stop-the-world duration depends
|
// garbage collection (GC). Take the following implications into account when
|
||||||
// on the heap size and can be quite significant (~1.7 ms/GiB as per
|
// deciding whether to use the Go collector:
|
||||||
|
//
|
||||||
|
// 1. The performance impact of stopping the world is the more relevant the more
|
||||||
|
// frequently metrics are collected. However, with Go1.9 or later the
|
||||||
|
// stop-the-world time per metrics collection is very short (~25µs) so that the
|
||||||
|
// performance impact will only matter in rare cases. However, with older Go
|
||||||
|
// versions, the stop-the-world duration depends on the heap size and can be
|
||||||
|
// quite significant (~1.7 ms/GiB as per
|
||||||
// https://go-review.googlesource.com/c/go/+/34937).
|
// https://go-review.googlesource.com/c/go/+/34937).
|
||||||
|
//
|
||||||
|
// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
|
||||||
|
// metrics collection happens to coincide with GC, it will only complete after
|
||||||
|
// GC has finished. Usually, GC is fast enough to not cause problems. However,
|
||||||
|
// with a very large heap, GC might take multiple seconds, which is enough to
|
||||||
|
// cause scrape timeouts in common setups. To avoid this problem, the Go
|
||||||
|
// collector will use the memstats from a previous collection if
|
||||||
|
// runtime.ReadMemStats takes more than 1s. However, if there are no previously
|
||||||
|
// collected memstats, or their collection is more than 5m ago, the collection
|
||||||
|
// will block until runtime.ReadMemStats succeeds. (The problem might be solved
|
||||||
|
// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
|
||||||
|
// issue.)
|
||||||
func NewGoCollector() Collector {
|
func NewGoCollector() Collector {
|
||||||
return &goCollector{
|
return &goCollector{
|
||||||
goroutinesDesc: NewDesc(
|
goroutinesDesc: NewDesc(
|
||||||
|
@ -54,7 +79,11 @@ func NewGoCollector() Collector {
|
||||||
"go_info",
|
"go_info",
|
||||||
"Information about the Go environment.",
|
"Information about the Go environment.",
|
||||||
nil, Labels{"version": runtime.Version()}),
|
nil, Labels{"version": runtime.Version()}),
|
||||||
metrics: memStatsMetrics{
|
msLast: &runtime.MemStats{},
|
||||||
|
msRead: runtime.ReadMemStats,
|
||||||
|
msMaxWait: time.Second,
|
||||||
|
msMaxAge: 5 * time.Minute,
|
||||||
|
msMetrics: memStatsMetrics{
|
||||||
{
|
{
|
||||||
desc: NewDesc(
|
desc: NewDesc(
|
||||||
memstatNamespace("alloc_bytes"),
|
memstatNamespace("alloc_bytes"),
|
||||||
|
@ -253,7 +282,7 @@ func NewGoCollector() Collector {
|
||||||
}
|
}
|
||||||
|
|
||||||
func memstatNamespace(s string) string {
|
func memstatNamespace(s string) string {
|
||||||
return fmt.Sprintf("go_memstats_%s", s)
|
return "go_memstats_" + s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe returns all descriptions of the collector.
|
// Describe returns all descriptions of the collector.
|
||||||
|
@ -262,13 +291,27 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||||
ch <- c.threadsDesc
|
ch <- c.threadsDesc
|
||||||
ch <- c.gcDesc
|
ch <- c.gcDesc
|
||||||
ch <- c.goInfoDesc
|
ch <- c.goInfoDesc
|
||||||
for _, i := range c.metrics {
|
for _, i := range c.msMetrics {
|
||||||
ch <- i.desc
|
ch <- i.desc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect returns the current state of all metrics of the collector.
|
// Collect returns the current state of all metrics of the collector.
|
||||||
func (c *goCollector) Collect(ch chan<- Metric) {
|
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||||
|
var (
|
||||||
|
ms = &runtime.MemStats{}
|
||||||
|
done = make(chan struct{})
|
||||||
|
)
|
||||||
|
// Start reading memstats first as it might take a while.
|
||||||
|
go func() {
|
||||||
|
c.msRead(ms)
|
||||||
|
c.msMtx.Lock()
|
||||||
|
c.msLast = ms
|
||||||
|
c.msLastTimestamp = time.Now()
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
||||||
n, _ := runtime.ThreadCreateProfile(nil)
|
n, _ := runtime.ThreadCreateProfile(nil)
|
||||||
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
|
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
|
||||||
|
@ -286,9 +329,31 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
||||||
|
|
||||||
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
||||||
|
|
||||||
ms := &runtime.MemStats{}
|
timer := time.NewTimer(c.msMaxWait)
|
||||||
runtime.ReadMemStats(ms)
|
select {
|
||||||
for _, i := range c.metrics {
|
case <-done: // Our own ReadMemStats succeeded in time. Use it.
|
||||||
|
timer.Stop() // Important for high collection frequencies to not pile up timers.
|
||||||
|
c.msCollect(ch, ms)
|
||||||
|
return
|
||||||
|
case <-timer.C: // Time out, use last memstats if possible. Continue below.
|
||||||
|
}
|
||||||
|
c.msMtx.Lock()
|
||||||
|
if time.Since(c.msLastTimestamp) < c.msMaxAge {
|
||||||
|
// Last memstats are recent enough. Collect from them under the lock.
|
||||||
|
c.msCollect(ch, c.msLast)
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// If we are here, the last memstats are too old or don't exist. We have
|
||||||
|
// to wait until our own ReadMemStats finally completes. For that to
|
||||||
|
// happen, we have to release the lock.
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
<-done
|
||||||
|
c.msCollect(ch, ms)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -299,3 +364,33 @@ type memStatsMetrics []struct {
|
||||||
eval func(*runtime.MemStats) float64
|
eval func(*runtime.MemStats) float64
|
||||||
valType ValueType
|
valType ValueType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewBuildInfoCollector returns a collector collecting a single metric
|
||||||
|
// "go_build_info" with the constant value 1 and three labels "path", "version",
|
||||||
|
// and "checksum". Their label values contain the main module path, version, and
|
||||||
|
// checksum, respectively. The labels will only have meaningful values if the
|
||||||
|
// binary is built with Go module support and from source code retrieved from
|
||||||
|
// the source repository (rather than the local file system). This is usually
|
||||||
|
// accomplished by building from outside of GOPATH, specifying the full address
|
||||||
|
// of the main package, e.g. "GO111MODULE=on go run
|
||||||
|
// github.com/prometheus/client_golang/examples/random". If built without Go
|
||||||
|
// module support, all label values will be "unknown". If built with Go module
|
||||||
|
// support but using the source code from the local file system, the "path" will
|
||||||
|
// be set appropriately, but "checksum" will be empty and "version" will be
|
||||||
|
// "(devel)".
|
||||||
|
//
|
||||||
|
// This collector uses only the build information for the main module. See
|
||||||
|
// https://github.com/povilasv/prommod for an example of a collector for the
|
||||||
|
// module dependencies.
|
||||||
|
func NewBuildInfoCollector() Collector {
|
||||||
|
path, version, sum := readBuildInfo()
|
||||||
|
c := &selfCollector{MustNewConstMetric(
|
||||||
|
NewDesc(
|
||||||
|
"go_build_info",
|
||||||
|
"Build information about the main Go module.",
|
||||||
|
nil, Labels{"path": path, "version": version, "checksum": sum},
|
||||||
|
),
|
||||||
|
GaugeValue, 1)}
|
||||||
|
c.init(c.self)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
116
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
116
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
|
@ -204,8 +204,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Finally we know the final length of h.upperBounds and can make counts
|
// Finally we know the final length of h.upperBounds and can make buckets
|
||||||
// for both states:
|
// for both counts:
|
||||||
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
|
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
|
||||||
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
|
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
|
||||||
|
|
||||||
|
@ -224,18 +224,21 @@ type histogramCounts struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type histogram struct {
|
type histogram struct {
|
||||||
// countAndHotIdx is a complicated one. For lock-free yet atomic
|
// countAndHotIdx enables lock-free writes with use of atomic updates.
|
||||||
// observations, we need to save the total count of observations again,
|
// The most significant bit is the hot index [0 or 1] of the count field
|
||||||
// combined with the index of the currently-hot counts struct, so that
|
// below. Observe calls update the hot one. All remaining bits count the
|
||||||
// we can perform the operation on both values atomically. The least
|
// number of Observe calls. Observe starts by incrementing this counter,
|
||||||
// significant bit defines the hot counts struct. The remaining 63 bits
|
// and finish by incrementing the count field in the respective
|
||||||
// represent the total count of observations. This happens under the
|
// histogramCounts, as a marker for completion.
|
||||||
// assumption that the 63bit count will never overflow. Rationale: An
|
|
||||||
// observations takes about 30ns. Let's assume it could happen in
|
|
||||||
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
|
|
||||||
// which is about 3000 years.
|
|
||||||
//
|
//
|
||||||
// This has to be first in the struct for 64bit alignment. See
|
// Calls of the Write method (which are non-mutating reads from the
|
||||||
|
// perspective of the histogram) swap the hot–cold under the writeMtx
|
||||||
|
// lock. A cooldown is awaited (while locked) by comparing the number of
|
||||||
|
// observations with the initiation count. Once they match, then the
|
||||||
|
// last observation on the now cool one has completed. All cool fields must
|
||||||
|
// be merged into the new hot before releasing writeMtx.
|
||||||
|
//
|
||||||
|
// Fields with atomic access first! See alignment constraint:
|
||||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
countAndHotIdx uint64
|
countAndHotIdx uint64
|
||||||
|
|
||||||
|
@ -243,16 +246,14 @@ type histogram struct {
|
||||||
desc *Desc
|
desc *Desc
|
||||||
writeMtx sync.Mutex // Only used in the Write method.
|
writeMtx sync.Mutex // Only used in the Write method.
|
||||||
|
|
||||||
upperBounds []float64
|
|
||||||
|
|
||||||
// Two counts, one is "hot" for lock-free observations, the other is
|
// Two counts, one is "hot" for lock-free observations, the other is
|
||||||
// "cold" for writing out a dto.Metric. It has to be an array of
|
// "cold" for writing out a dto.Metric. It has to be an array of
|
||||||
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||||
counts [2]*histogramCounts
|
counts [2]*histogramCounts
|
||||||
hotIdx int // Index of currently-hot counts. Only used within Write.
|
|
||||||
|
|
||||||
labelPairs []*dto.LabelPair
|
upperBounds []float64
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *histogram) Desc() *Desc {
|
func (h *histogram) Desc() *Desc {
|
||||||
|
@ -271,11 +272,11 @@ func (h *histogram) Observe(v float64) {
|
||||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||||
i := sort.SearchFloat64s(h.upperBounds, v)
|
i := sort.SearchFloat64s(h.upperBounds, v)
|
||||||
|
|
||||||
// We increment h.countAndHotIdx by 2 so that the counter in the upper
|
// We increment h.countAndHotIdx so that the counter in the lower
|
||||||
// 63 bits gets incremented by 1. At the same time, we get the new value
|
// 63 bits gets incremented. At the same time, we get the new value
|
||||||
// back, which we can use to find the currently-hot counts.
|
// back, which we can use to find the currently-hot counts.
|
||||||
n := atomic.AddUint64(&h.countAndHotIdx, 2)
|
n := atomic.AddUint64(&h.countAndHotIdx, 1)
|
||||||
hotCounts := h.counts[n%2]
|
hotCounts := h.counts[n>>63]
|
||||||
|
|
||||||
if i < len(h.upperBounds) {
|
if i < len(h.upperBounds) {
|
||||||
atomic.AddUint64(&hotCounts.buckets[i], 1)
|
atomic.AddUint64(&hotCounts.buckets[i], 1)
|
||||||
|
@ -293,72 +294,43 @@ func (h *histogram) Observe(v float64) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *histogram) Write(out *dto.Metric) error {
|
func (h *histogram) Write(out *dto.Metric) error {
|
||||||
var (
|
// For simplicity, we protect this whole method by a mutex. It is not in
|
||||||
his = &dto.Histogram{}
|
// the hot path, i.e. Observe is called much more often than Write. The
|
||||||
buckets = make([]*dto.Bucket, len(h.upperBounds))
|
// complication of making Write lock-free isn't worth it, if possible at
|
||||||
hotCounts, coldCounts *histogramCounts
|
// all.
|
||||||
count uint64
|
|
||||||
)
|
|
||||||
|
|
||||||
// For simplicity, we mutex the rest of this method. It is not in the
|
|
||||||
// hot path, i.e. Observe is called much more often than Write. The
|
|
||||||
// complication of making Write lock-free isn't worth it.
|
|
||||||
h.writeMtx.Lock()
|
h.writeMtx.Lock()
|
||||||
defer h.writeMtx.Unlock()
|
defer h.writeMtx.Unlock()
|
||||||
|
|
||||||
// This is a bit arcane, which is why the following spells out this if
|
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
|
||||||
// clause in English:
|
// without touching the count bits. See the struct comments for a full
|
||||||
//
|
// description of the algorithm.
|
||||||
// If the currently-hot counts struct is #0, we atomically increment
|
n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
|
||||||
// h.countAndHotIdx by 1 so that from now on Observe will use the counts
|
// count is contained unchanged in the lower 63 bits.
|
||||||
// struct #1. Furthermore, the atomic increment gives us the new value,
|
count := n & ((1 << 63) - 1)
|
||||||
// which, in its most significant 63 bits, tells us the count of
|
// The most significant bit tells us which counts is hot. The complement
|
||||||
// observations done so far up to and including currently ongoing
|
// is thus the cold one.
|
||||||
// observations still using the counts struct just changed from hot to
|
hotCounts := h.counts[n>>63]
|
||||||
// cold. To have a normal uint64 for the count, we bitshift by 1 and
|
coldCounts := h.counts[(^n)>>63]
|
||||||
// save the result in count. We also set h.hotIdx to 1 for the next
|
|
||||||
// Write call, and we will refer to counts #1 as hotCounts and to counts
|
|
||||||
// #0 as coldCounts.
|
|
||||||
//
|
|
||||||
// If the currently-hot counts struct is #1, we do the corresponding
|
|
||||||
// things the other way round. We have to _decrement_ h.countAndHotIdx
|
|
||||||
// (which is a bit arcane in itself, as we have to express -1 with an
|
|
||||||
// unsigned int...).
|
|
||||||
if h.hotIdx == 0 {
|
|
||||||
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
|
|
||||||
h.hotIdx = 1
|
|
||||||
hotCounts = h.counts[1]
|
|
||||||
coldCounts = h.counts[0]
|
|
||||||
} else {
|
|
||||||
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
|
|
||||||
h.hotIdx = 0
|
|
||||||
hotCounts = h.counts[0]
|
|
||||||
coldCounts = h.counts[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now we have to wait for the now-declared-cold counts to actually cool
|
// Await cooldown.
|
||||||
// down, i.e. wait for all observations still using it to finish. That's
|
for count != atomic.LoadUint64(&coldCounts.count) {
|
||||||
// the case once the count in the cold counts struct is the same as the
|
|
||||||
// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
|
|
||||||
for {
|
|
||||||
if count == atomic.LoadUint64(&coldCounts.count) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
runtime.Gosched() // Let observations get work done.
|
runtime.Gosched() // Let observations get work done.
|
||||||
}
|
}
|
||||||
|
|
||||||
his.SampleCount = proto.Uint64(count)
|
his := &dto.Histogram{
|
||||||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
|
Bucket: make([]*dto.Bucket, len(h.upperBounds)),
|
||||||
|
SampleCount: proto.Uint64(count),
|
||||||
|
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
|
||||||
|
}
|
||||||
var cumCount uint64
|
var cumCount uint64
|
||||||
for i, upperBound := range h.upperBounds {
|
for i, upperBound := range h.upperBounds {
|
||||||
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
|
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
|
||||||
buckets[i] = &dto.Bucket{
|
his.Bucket[i] = &dto.Bucket{
|
||||||
CumulativeCount: proto.Uint64(cumCount),
|
CumulativeCount: proto.Uint64(cumCount),
|
||||||
UpperBound: proto.Float64(upperBound),
|
UpperBound: proto.Float64(upperBound),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
his.Bucket = buckets
|
|
||||||
out.Histogram = his
|
out.Histogram = his
|
||||||
out.Label = h.labelPairs
|
out.Label = h.labelPairs
|
||||||
|
|
||||||
|
|
504
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
504
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
|
@ -1,504 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"compress/gzip"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/expfmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO(beorn7): Remove this whole file. It is a partial mirror of
|
|
||||||
// promhttp/http.go (to avoid circular import chains) where everything HTTP
|
|
||||||
// related should live. The functions here are just for avoiding
|
|
||||||
// breakage. Everything is deprecated.
|
|
||||||
|
|
||||||
const (
|
|
||||||
contentTypeHeader = "Content-Type"
|
|
||||||
contentLengthHeader = "Content-Length"
|
|
||||||
contentEncodingHeader = "Content-Encoding"
|
|
||||||
acceptEncodingHeader = "Accept-Encoding"
|
|
||||||
)
|
|
||||||
|
|
||||||
var gzipPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return gzip.NewWriter(nil)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
|
||||||
// already instrumented with InstrumentHandler (using "prometheus" as handler
|
|
||||||
// name).
|
|
||||||
//
|
|
||||||
// Deprecated: Please note the issues described in the doc comment of
|
|
||||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead.
|
|
||||||
func Handler() http.Handler {
|
|
||||||
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
|
||||||
}
|
|
||||||
|
|
||||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
|
||||||
//
|
|
||||||
// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
|
|
||||||
// instead. See there for further documentation.
|
|
||||||
func UninstrumentedHandler() http.Handler {
|
|
||||||
return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
|
||||||
mfs, err := DefaultGatherer.Gather()
|
|
||||||
if err != nil {
|
|
||||||
httpError(rsp, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
contentType := expfmt.Negotiate(req.Header)
|
|
||||||
header := rsp.Header()
|
|
||||||
header.Set(contentTypeHeader, string(contentType))
|
|
||||||
|
|
||||||
w := io.Writer(rsp)
|
|
||||||
if gzipAccepted(req.Header) {
|
|
||||||
header.Set(contentEncodingHeader, "gzip")
|
|
||||||
gz := gzipPool.Get().(*gzip.Writer)
|
|
||||||
defer gzipPool.Put(gz)
|
|
||||||
|
|
||||||
gz.Reset(w)
|
|
||||||
defer gz.Close()
|
|
||||||
|
|
||||||
w = gz
|
|
||||||
}
|
|
||||||
|
|
||||||
enc := expfmt.NewEncoder(w, contentType)
|
|
||||||
|
|
||||||
for _, mf := range mfs {
|
|
||||||
if err := enc.Encode(mf); err != nil {
|
|
||||||
httpError(rsp, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var instLabels = []string{"method", "code"}
|
|
||||||
|
|
||||||
type nower interface {
|
|
||||||
Now() time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type nowFunc func() time.Time
|
|
||||||
|
|
||||||
func (n nowFunc) Now() time.Time {
|
|
||||||
return n()
|
|
||||||
}
|
|
||||||
|
|
||||||
var now nower = nowFunc(func() time.Time {
|
|
||||||
return time.Now()
|
|
||||||
})
|
|
||||||
|
|
||||||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
|
||||||
// registers four metric collectors (if not already done) and reports HTTP
|
|
||||||
// metrics to the (newly or already) registered collectors: http_requests_total
|
|
||||||
// (CounterVec), http_request_duration_microseconds (Summary),
|
|
||||||
// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
|
|
||||||
// has a constant label named "handler" with the provided handlerName as
|
|
||||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
|
||||||
// (label name "method") and HTTP status code (label name "code").
|
|
||||||
//
|
|
||||||
// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
|
|
||||||
// package promhttp instead. The issues are the following: (1) It uses Summaries
|
|
||||||
// rather than Histograms. Summaries are not useful if aggregation across
|
|
||||||
// multiple instances is required. (2) It uses microseconds as unit, which is
|
|
||||||
// deprecated and should be replaced by seconds. (3) The size of the request is
|
|
||||||
// calculated in a separate goroutine. Since this calculator requires access to
|
|
||||||
// the request header, it creates a race with any writes to the header performed
|
|
||||||
// during request handling. httputil.ReverseProxy is a prominent example for a
|
|
||||||
// handler performing such writes. (4) It has additional issues with HTTP/2, cf.
|
|
||||||
// https://github.com/prometheus/client_golang/issues/272.
|
|
||||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
|
||||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
|
||||||
// otherwise works in the same way as InstrumentHandler (and shares the same
|
|
||||||
// issues).
|
|
||||||
//
|
|
||||||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
|
||||||
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
|
||||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
|
||||||
return InstrumentHandlerFuncWithOpts(
|
|
||||||
SummaryOpts{
|
|
||||||
Subsystem: "http",
|
|
||||||
ConstLabels: Labels{"handler": handlerName},
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
},
|
|
||||||
handlerFunc,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
|
|
||||||
// issues) but provides more flexibility (at the cost of a more complex call
|
|
||||||
// syntax). As InstrumentHandler, this function registers four metric
|
|
||||||
// collectors, but it uses the provided SummaryOpts to create them. However, the
|
|
||||||
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
|
|
||||||
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
|
|
||||||
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
|
||||||
// help string. The names of the variable labels of the http_requests_total
|
|
||||||
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
|
|
||||||
//
|
|
||||||
// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
|
|
||||||
// behavior of InstrumentHandler:
|
|
||||||
//
|
|
||||||
// prometheus.InstrumentHandlerWithOpts(
|
|
||||||
// prometheus.SummaryOpts{
|
|
||||||
// Subsystem: "http",
|
|
||||||
// ConstLabels: prometheus.Labels{"handler": handlerName},
|
|
||||||
// },
|
|
||||||
// handler,
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
|
|
||||||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
|
||||||
// and all its fields are set to the equally named fields in the provided
|
|
||||||
// SummaryOpts.
|
|
||||||
//
|
|
||||||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
|
||||||
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
|
||||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
|
||||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
|
|
||||||
// the same issues) but provides more flexibility (at the cost of a more complex
|
|
||||||
// call syntax). See InstrumentHandlerWithOpts for details how the provided
|
|
||||||
// SummaryOpts are used.
|
|
||||||
//
|
|
||||||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
|
||||||
// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
|
||||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
|
||||||
reqCnt := NewCounterVec(
|
|
||||||
CounterOpts{
|
|
||||||
Namespace: opts.Namespace,
|
|
||||||
Subsystem: opts.Subsystem,
|
|
||||||
Name: "requests_total",
|
|
||||||
Help: "Total number of HTTP requests made.",
|
|
||||||
ConstLabels: opts.ConstLabels,
|
|
||||||
},
|
|
||||||
instLabels,
|
|
||||||
)
|
|
||||||
if err := Register(reqCnt); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
reqCnt = are.ExistingCollector.(*CounterVec)
|
|
||||||
} else {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Name = "request_duration_microseconds"
|
|
||||||
opts.Help = "The HTTP request latencies in microseconds."
|
|
||||||
reqDur := NewSummary(opts)
|
|
||||||
if err := Register(reqDur); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
reqDur = are.ExistingCollector.(Summary)
|
|
||||||
} else {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Name = "request_size_bytes"
|
|
||||||
opts.Help = "The HTTP request sizes in bytes."
|
|
||||||
reqSz := NewSummary(opts)
|
|
||||||
if err := Register(reqSz); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
reqSz = are.ExistingCollector.(Summary)
|
|
||||||
} else {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Name = "response_size_bytes"
|
|
||||||
opts.Help = "The HTTP response sizes in bytes."
|
|
||||||
resSz := NewSummary(opts)
|
|
||||||
if err := Register(resSz); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
resSz = are.ExistingCollector.(Summary)
|
|
||||||
} else {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
delegate := &responseWriterDelegator{ResponseWriter: w}
|
|
||||||
out := computeApproximateRequestSize(r)
|
|
||||||
|
|
||||||
_, cn := w.(http.CloseNotifier)
|
|
||||||
_, fl := w.(http.Flusher)
|
|
||||||
_, hj := w.(http.Hijacker)
|
|
||||||
_, rf := w.(io.ReaderFrom)
|
|
||||||
var rw http.ResponseWriter
|
|
||||||
if cn && fl && hj && rf {
|
|
||||||
rw = &fancyResponseWriterDelegator{delegate}
|
|
||||||
} else {
|
|
||||||
rw = delegate
|
|
||||||
}
|
|
||||||
handlerFunc(rw, r)
|
|
||||||
|
|
||||||
elapsed := float64(time.Since(now)) / float64(time.Microsecond)
|
|
||||||
|
|
||||||
method := sanitizeMethod(r.Method)
|
|
||||||
code := sanitizeCode(delegate.status)
|
|
||||||
reqCnt.WithLabelValues(method, code).Inc()
|
|
||||||
reqDur.Observe(elapsed)
|
|
||||||
resSz.Observe(float64(delegate.written))
|
|
||||||
reqSz.Observe(float64(<-out))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func computeApproximateRequestSize(r *http.Request) <-chan int {
|
|
||||||
// Get URL length in current goroutine for avoiding a race condition.
|
|
||||||
// HandlerFunc that runs in parallel may modify the URL.
|
|
||||||
s := 0
|
|
||||||
if r.URL != nil {
|
|
||||||
s += len(r.URL.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
out := make(chan int, 1)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
s += len(r.Method)
|
|
||||||
s += len(r.Proto)
|
|
||||||
for name, values := range r.Header {
|
|
||||||
s += len(name)
|
|
||||||
for _, value := range values {
|
|
||||||
s += len(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s += len(r.Host)
|
|
||||||
|
|
||||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
|
||||||
|
|
||||||
if r.ContentLength != -1 {
|
|
||||||
s += int(r.ContentLength)
|
|
||||||
}
|
|
||||||
out <- s
|
|
||||||
close(out)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
type responseWriterDelegator struct {
|
|
||||||
http.ResponseWriter
|
|
||||||
|
|
||||||
status int
|
|
||||||
written int64
|
|
||||||
wroteHeader bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
|
||||||
r.status = code
|
|
||||||
r.wroteHeader = true
|
|
||||||
r.ResponseWriter.WriteHeader(code)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
|
||||||
if !r.wroteHeader {
|
|
||||||
r.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
n, err := r.ResponseWriter.Write(b)
|
|
||||||
r.written += int64(n)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type fancyResponseWriterDelegator struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
|
|
||||||
return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fancyResponseWriterDelegator) Flush() {
|
|
||||||
f.ResponseWriter.(http.Flusher).Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
|
||||||
return f.ResponseWriter.(http.Hijacker).Hijack()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
|
|
||||||
if !f.wroteHeader {
|
|
||||||
f.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
|
|
||||||
f.written += n
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func sanitizeMethod(m string) string {
|
|
||||||
switch m {
|
|
||||||
case "GET", "get":
|
|
||||||
return "get"
|
|
||||||
case "PUT", "put":
|
|
||||||
return "put"
|
|
||||||
case "HEAD", "head":
|
|
||||||
return "head"
|
|
||||||
case "POST", "post":
|
|
||||||
return "post"
|
|
||||||
case "DELETE", "delete":
|
|
||||||
return "delete"
|
|
||||||
case "CONNECT", "connect":
|
|
||||||
return "connect"
|
|
||||||
case "OPTIONS", "options":
|
|
||||||
return "options"
|
|
||||||
case "NOTIFY", "notify":
|
|
||||||
return "notify"
|
|
||||||
default:
|
|
||||||
return strings.ToLower(m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sanitizeCode(s int) string {
|
|
||||||
switch s {
|
|
||||||
case 100:
|
|
||||||
return "100"
|
|
||||||
case 101:
|
|
||||||
return "101"
|
|
||||||
|
|
||||||
case 200:
|
|
||||||
return "200"
|
|
||||||
case 201:
|
|
||||||
return "201"
|
|
||||||
case 202:
|
|
||||||
return "202"
|
|
||||||
case 203:
|
|
||||||
return "203"
|
|
||||||
case 204:
|
|
||||||
return "204"
|
|
||||||
case 205:
|
|
||||||
return "205"
|
|
||||||
case 206:
|
|
||||||
return "206"
|
|
||||||
|
|
||||||
case 300:
|
|
||||||
return "300"
|
|
||||||
case 301:
|
|
||||||
return "301"
|
|
||||||
case 302:
|
|
||||||
return "302"
|
|
||||||
case 304:
|
|
||||||
return "304"
|
|
||||||
case 305:
|
|
||||||
return "305"
|
|
||||||
case 307:
|
|
||||||
return "307"
|
|
||||||
|
|
||||||
case 400:
|
|
||||||
return "400"
|
|
||||||
case 401:
|
|
||||||
return "401"
|
|
||||||
case 402:
|
|
||||||
return "402"
|
|
||||||
case 403:
|
|
||||||
return "403"
|
|
||||||
case 404:
|
|
||||||
return "404"
|
|
||||||
case 405:
|
|
||||||
return "405"
|
|
||||||
case 406:
|
|
||||||
return "406"
|
|
||||||
case 407:
|
|
||||||
return "407"
|
|
||||||
case 408:
|
|
||||||
return "408"
|
|
||||||
case 409:
|
|
||||||
return "409"
|
|
||||||
case 410:
|
|
||||||
return "410"
|
|
||||||
case 411:
|
|
||||||
return "411"
|
|
||||||
case 412:
|
|
||||||
return "412"
|
|
||||||
case 413:
|
|
||||||
return "413"
|
|
||||||
case 414:
|
|
||||||
return "414"
|
|
||||||
case 415:
|
|
||||||
return "415"
|
|
||||||
case 416:
|
|
||||||
return "416"
|
|
||||||
case 417:
|
|
||||||
return "417"
|
|
||||||
case 418:
|
|
||||||
return "418"
|
|
||||||
|
|
||||||
case 500:
|
|
||||||
return "500"
|
|
||||||
case 501:
|
|
||||||
return "501"
|
|
||||||
case 502:
|
|
||||||
return "502"
|
|
||||||
case 503:
|
|
||||||
return "503"
|
|
||||||
case 504:
|
|
||||||
return "504"
|
|
||||||
case 505:
|
|
||||||
return "505"
|
|
||||||
|
|
||||||
case 428:
|
|
||||||
return "428"
|
|
||||||
case 429:
|
|
||||||
return "429"
|
|
||||||
case 431:
|
|
||||||
return "431"
|
|
||||||
case 511:
|
|
||||||
return "511"
|
|
||||||
|
|
||||||
default:
|
|
||||||
return strconv.Itoa(s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
|
||||||
func gzipAccepted(header http.Header) bool {
|
|
||||||
a := header.Get(acceptEncodingHeader)
|
|
||||||
parts := strings.Split(a, ",")
|
|
||||||
for _, part := range parts {
|
|
||||||
part = strings.TrimSpace(part)
|
|
||||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// httpError removes any content-encoding header and then calls http.Error with
|
|
||||||
// the provided error and http.StatusInternalServerErrer. Error contents is
|
|
||||||
// supposed to be uncompressed plain text. However, same as with a plain
|
|
||||||
// http.Error, any header settings will be void if the header has already been
|
|
||||||
// sent. The error message will still be written to the writer, but it will
|
|
||||||
// probably be of limited use.
|
|
||||||
func httpError(rsp http.ResponseWriter, err error) {
|
|
||||||
rsp.Header().Del(contentEncodingHeader)
|
|
||||||
http.Error(
|
|
||||||
rsp,
|
|
||||||
"An error has occurred while serving metrics:\n\n"+err.Error(),
|
|
||||||
http.StatusInternalServerError,
|
|
||||||
)
|
|
||||||
}
|
|
61
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
61
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
|
@ -16,8 +16,6 @@ package prometheus
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/prometheus/procfs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type processCollector struct {
|
type processCollector struct {
|
||||||
|
@ -59,20 +57,9 @@ type ProcessCollectorOpts struct {
|
||||||
// collector for the current process with an empty namespace string and no error
|
// collector for the current process with an empty namespace string and no error
|
||||||
// reporting.
|
// reporting.
|
||||||
//
|
//
|
||||||
// Currently, the collector depends on a Linux-style proc filesystem and
|
// The collector only works on operating systems with a Linux-style proc
|
||||||
// therefore only exports metrics for Linux.
|
// filesystem and on Microsoft Windows. On other operating systems, it will not
|
||||||
//
|
// collect any metrics.
|
||||||
// Note: An older version of this function had the following signature:
|
|
||||||
//
|
|
||||||
// NewProcessCollector(pid int, namespace string) Collector
|
|
||||||
//
|
|
||||||
// Most commonly, it was called as
|
|
||||||
//
|
|
||||||
// NewProcessCollector(os.Getpid(), "")
|
|
||||||
//
|
|
||||||
// The following call of the current version is equivalent to the above:
|
|
||||||
//
|
|
||||||
// NewProcessCollector(ProcessCollectorOpts{})
|
|
||||||
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||||
ns := ""
|
ns := ""
|
||||||
if len(opts.Namespace) > 0 {
|
if len(opts.Namespace) > 0 {
|
||||||
|
@ -126,7 +113,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up process metric collection if supported by the runtime.
|
// Set up process metric collection if supported by the runtime.
|
||||||
if _, err := procfs.NewStat(); err == nil {
|
if canCollectProcess() {
|
||||||
c.collectFn = c.processCollect
|
c.collectFn = c.processCollect
|
||||||
} else {
|
} else {
|
||||||
c.collectFn = func(ch chan<- Metric) {
|
c.collectFn = func(ch chan<- Metric) {
|
||||||
|
@ -153,46 +140,6 @@ func (c *processCollector) Collect(ch chan<- Metric) {
|
||||||
c.collectFn(ch)
|
c.collectFn(ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
|
||||||
pid, err := c.pidFn()
|
|
||||||
if err != nil {
|
|
||||||
c.reportError(ch, nil, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := procfs.NewProc(pid)
|
|
||||||
if err != nil {
|
|
||||||
c.reportError(ch, nil, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if stat, err := p.NewStat(); err == nil {
|
|
||||||
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
|
|
||||||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
|
|
||||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
|
||||||
if startTime, err := stat.StartTime(); err == nil {
|
|
||||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
|
||||||
} else {
|
|
||||||
c.reportError(ch, c.startTime, err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
c.reportError(ch, nil, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if fds, err := p.FileDescriptorsLen(); err == nil {
|
|
||||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
|
||||||
} else {
|
|
||||||
c.reportError(ch, c.openFDs, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if limits, err := p.NewLimits(); err == nil {
|
|
||||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
|
||||||
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
|
|
||||||
} else {
|
|
||||||
c.reportError(ch, nil, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
|
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
|
||||||
if !c.reportErrors {
|
if !c.reportErrors {
|
||||||
return
|
return
|
||||||
|
|
65
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
Normal file
65
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/procfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func canCollectProcess() bool {
|
||||||
|
_, err := procfs.NewDefaultFS()
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||||
|
pid, err := c.pidFn()
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := procfs.NewProc(pid)
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if stat, err := p.Stat(); err == nil {
|
||||||
|
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
|
||||||
|
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
|
||||||
|
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
||||||
|
if startTime, err := stat.StartTime(); err == nil {
|
||||||
|
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, c.startTime, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fds, err := p.FileDescriptorsLen(); err == nil {
|
||||||
|
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, c.openFDs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if limits, err := p.Limits(); err == nil {
|
||||||
|
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
||||||
|
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
}
|
||||||
|
}
|
112
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
Normal file
112
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
func canCollectProcess() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
modpsapi = syscall.NewLazyDLL("psapi.dll")
|
||||||
|
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
|
||||||
|
procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
|
||||||
|
procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
|
||||||
|
)
|
||||||
|
|
||||||
|
type processMemoryCounters struct {
|
||||||
|
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
|
||||||
|
_ uint32
|
||||||
|
PageFaultCount uint32
|
||||||
|
PeakWorkingSetSize uint64
|
||||||
|
WorkingSetSize uint64
|
||||||
|
QuotaPeakPagedPoolUsage uint64
|
||||||
|
QuotaPagedPoolUsage uint64
|
||||||
|
QuotaPeakNonPagedPoolUsage uint64
|
||||||
|
QuotaNonPagedPoolUsage uint64
|
||||||
|
PagefileUsage uint64
|
||||||
|
PeakPagefileUsage uint64
|
||||||
|
PrivateUsage uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
|
||||||
|
mem := processMemoryCounters{}
|
||||||
|
r1, _, err := procGetProcessMemoryInfo.Call(
|
||||||
|
uintptr(handle),
|
||||||
|
uintptr(unsafe.Pointer(&mem)),
|
||||||
|
uintptr(unsafe.Sizeof(mem)),
|
||||||
|
)
|
||||||
|
if r1 != 1 {
|
||||||
|
return mem, err
|
||||||
|
} else {
|
||||||
|
return mem, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProcessHandleCount(handle windows.Handle) (uint32, error) {
|
||||||
|
var count uint32
|
||||||
|
r1, _, err := procGetProcessHandleCount.Call(
|
||||||
|
uintptr(handle),
|
||||||
|
uintptr(unsafe.Pointer(&count)),
|
||||||
|
)
|
||||||
|
if r1 != 1 {
|
||||||
|
return 0, err
|
||||||
|
} else {
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||||
|
h, err := windows.GetCurrentProcess()
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var startTime, exitTime, kernelTime, userTime windows.Filetime
|
||||||
|
err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
|
||||||
|
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
|
||||||
|
|
||||||
|
mem, err := getProcessMemoryInfo(h)
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
|
||||||
|
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
|
||||||
|
|
||||||
|
handles, err := getProcessHandleCount(h)
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
|
||||||
|
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
|
||||||
|
}
|
||||||
|
|
||||||
|
func fileTimeToSeconds(ft windows.Filetime) float64 {
|
||||||
|
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
|
||||||
|
}
|
160
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
160
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
|
@ -38,7 +38,6 @@ type delegator interface {
|
||||||
type responseWriterDelegator struct {
|
type responseWriterDelegator struct {
|
||||||
http.ResponseWriter
|
http.ResponseWriter
|
||||||
|
|
||||||
handler, method string
|
|
||||||
status int
|
status int
|
||||||
written int64
|
written int64
|
||||||
wroteHeader bool
|
wroteHeader bool
|
||||||
|
@ -75,8 +74,11 @@ type closeNotifierDelegator struct{ *responseWriterDelegator }
|
||||||
type flusherDelegator struct{ *responseWriterDelegator }
|
type flusherDelegator struct{ *responseWriterDelegator }
|
||||||
type hijackerDelegator struct{ *responseWriterDelegator }
|
type hijackerDelegator struct{ *responseWriterDelegator }
|
||||||
type readerFromDelegator struct{ *responseWriterDelegator }
|
type readerFromDelegator struct{ *responseWriterDelegator }
|
||||||
|
type pusherDelegator struct{ *responseWriterDelegator }
|
||||||
|
|
||||||
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
||||||
|
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
||||||
|
//remove support from client_golang yet.
|
||||||
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||||
}
|
}
|
||||||
func (d flusherDelegator) Flush() {
|
func (d flusherDelegator) Flush() {
|
||||||
|
@ -93,6 +95,9 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||||
d.written += n
|
d.written += n
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
||||||
|
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
||||||
|
}
|
||||||
|
|
||||||
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
|
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
|
||||||
|
|
||||||
|
@ -196,4 +201,157 @@ func init() {
|
||||||
http.CloseNotifier
|
http.CloseNotifier
|
||||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
}
|
}
|
||||||
|
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
|
||||||
|
return pusherDelegator{d}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||||
|
d := &responseWriterDelegator{
|
||||||
|
ResponseWriter: w,
|
||||||
|
observeWriteHeader: observeWriteHeaderFunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
id := 0
|
||||||
|
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
||||||
|
//remove support from client_golang yet.
|
||||||
|
if _, ok := w.(http.CloseNotifier); ok {
|
||||||
|
id += closeNotifier
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Flusher); ok {
|
||||||
|
id += flusher
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Hijacker); ok {
|
||||||
|
id += hijacker
|
||||||
|
}
|
||||||
|
if _, ok := w.(io.ReaderFrom); ok {
|
||||||
|
id += readerFrom
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Pusher); ok {
|
||||||
|
id += pusher
|
||||||
|
}
|
||||||
|
|
||||||
|
return pickDelegator[id](d)
|
||||||
}
|
}
|
||||||
|
|
181
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
181
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
|
@ -1,181 +0,0 @@
|
||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package promhttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
type pusherDelegator struct{ *responseWriterDelegator }
|
|
||||||
|
|
||||||
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
|
||||||
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
|
|
||||||
return pusherDelegator{d}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.Flusher
|
|
||||||
}{d, pusherDelegator{d}, flusherDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.Flusher
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.Hijacker
|
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.Hijacker
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.Hijacker
|
|
||||||
http.Flusher
|
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.Hijacker
|
|
||||||
http.Flusher
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.Flusher
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.Flusher
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.Hijacker
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.Hijacker
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.Hijacker
|
|
||||||
http.Flusher
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.Hijacker
|
|
||||||
http.Flusher
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
|
||||||
d := &responseWriterDelegator{
|
|
||||||
ResponseWriter: w,
|
|
||||||
observeWriteHeader: observeWriteHeaderFunc,
|
|
||||||
}
|
|
||||||
|
|
||||||
id := 0
|
|
||||||
if _, ok := w.(http.CloseNotifier); ok {
|
|
||||||
id += closeNotifier
|
|
||||||
}
|
|
||||||
if _, ok := w.(http.Flusher); ok {
|
|
||||||
id += flusher
|
|
||||||
}
|
|
||||||
if _, ok := w.(http.Hijacker); ok {
|
|
||||||
id += hijacker
|
|
||||||
}
|
|
||||||
if _, ok := w.(io.ReaderFrom); ok {
|
|
||||||
id += readerFrom
|
|
||||||
}
|
|
||||||
if _, ok := w.(http.Pusher); ok {
|
|
||||||
id += pusher
|
|
||||||
}
|
|
||||||
|
|
||||||
return pickDelegator[id](d)
|
|
||||||
}
|
|
44
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
generated
vendored
44
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
generated
vendored
|
@ -1,44 +0,0 @@
|
||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !go1.8
|
|
||||||
|
|
||||||
package promhttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
|
||||||
d := &responseWriterDelegator{
|
|
||||||
ResponseWriter: w,
|
|
||||||
observeWriteHeader: observeWriteHeaderFunc,
|
|
||||||
}
|
|
||||||
|
|
||||||
id := 0
|
|
||||||
if _, ok := w.(http.CloseNotifier); ok {
|
|
||||||
id += closeNotifier
|
|
||||||
}
|
|
||||||
if _, ok := w.(http.Flusher); ok {
|
|
||||||
id += flusher
|
|
||||||
}
|
|
||||||
if _, ok := w.(http.Hijacker); ok {
|
|
||||||
id += hijacker
|
|
||||||
}
|
|
||||||
if _, ok := w.(io.ReaderFrom); ok {
|
|
||||||
id += readerFrom
|
|
||||||
}
|
|
||||||
|
|
||||||
return pickDelegator[id](d)
|
|
||||||
}
|
|
48
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
48
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
|
@ -47,7 +47,6 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
contentTypeHeader = "Content-Type"
|
contentTypeHeader = "Content-Type"
|
||||||
contentLengthHeader = "Content-Length"
|
|
||||||
contentEncodingHeader = "Content-Encoding"
|
contentEncodingHeader = "Content-Encoding"
|
||||||
acceptEncodingHeader = "Accept-Encoding"
|
acceptEncodingHeader = "Accept-Encoding"
|
||||||
)
|
)
|
||||||
|
@ -85,10 +84,32 @@ func Handler() http.Handler {
|
||||||
// instrumentation. Use the InstrumentMetricHandler function to apply the same
|
// instrumentation. Use the InstrumentMetricHandler function to apply the same
|
||||||
// kind of instrumentation as it is used by the Handler function.
|
// kind of instrumentation as it is used by the Handler function.
|
||||||
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||||
var inFlightSem chan struct{}
|
var (
|
||||||
|
inFlightSem chan struct{}
|
||||||
|
errCnt = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "promhttp_metric_handler_errors_total",
|
||||||
|
Help: "Total number of internal errors encountered by the promhttp metric handler.",
|
||||||
|
},
|
||||||
|
[]string{"cause"},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if opts.MaxRequestsInFlight > 0 {
|
if opts.MaxRequestsInFlight > 0 {
|
||||||
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
|
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
|
||||||
}
|
}
|
||||||
|
if opts.Registry != nil {
|
||||||
|
// Initialize all possibilites that can occur below.
|
||||||
|
errCnt.WithLabelValues("gathering")
|
||||||
|
errCnt.WithLabelValues("encoding")
|
||||||
|
if err := opts.Registry.Register(errCnt); err != nil {
|
||||||
|
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
||||||
|
errCnt = are.ExistingCollector.(*prometheus.CounterVec)
|
||||||
|
} else {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
||||||
if inFlightSem != nil {
|
if inFlightSem != nil {
|
||||||
|
@ -107,6 +128,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||||
if opts.ErrorLog != nil {
|
if opts.ErrorLog != nil {
|
||||||
opts.ErrorLog.Println("error gathering metrics:", err)
|
opts.ErrorLog.Println("error gathering metrics:", err)
|
||||||
}
|
}
|
||||||
|
errCnt.WithLabelValues("gathering").Inc()
|
||||||
switch opts.ErrorHandling {
|
switch opts.ErrorHandling {
|
||||||
case PanicOnError:
|
case PanicOnError:
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -147,6 +169,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||||
if opts.ErrorLog != nil {
|
if opts.ErrorLog != nil {
|
||||||
opts.ErrorLog.Println("error encoding and sending metric family:", err)
|
opts.ErrorLog.Println("error encoding and sending metric family:", err)
|
||||||
}
|
}
|
||||||
|
errCnt.WithLabelValues("encoding").Inc()
|
||||||
switch opts.ErrorHandling {
|
switch opts.ErrorHandling {
|
||||||
case PanicOnError:
|
case PanicOnError:
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -237,9 +260,12 @@ const (
|
||||||
// Ignore errors and try to serve as many metrics as possible. However,
|
// Ignore errors and try to serve as many metrics as possible. However,
|
||||||
// if no metrics can be served, serve an HTTP status code 500 and the
|
// if no metrics can be served, serve an HTTP status code 500 and the
|
||||||
// last error message in the body. Only use this in deliberate "best
|
// last error message in the body. Only use this in deliberate "best
|
||||||
// effort" metrics collection scenarios. It is recommended to at least
|
// effort" metrics collection scenarios. In this case, it is highly
|
||||||
// log errors (by providing an ErrorLog in HandlerOpts) to not mask
|
// recommended to provide other means of detecting errors: By setting an
|
||||||
// errors completely.
|
// ErrorLog in HandlerOpts, the errors are logged. By providing a
|
||||||
|
// Registry in HandlerOpts, the exposed metrics include an error counter
|
||||||
|
// "promhttp_metric_handler_errors_total", which can be used for
|
||||||
|
// alerts.
|
||||||
ContinueOnError
|
ContinueOnError
|
||||||
// Panic upon the first error encountered (useful for "crash only" apps).
|
// Panic upon the first error encountered (useful for "crash only" apps).
|
||||||
PanicOnError
|
PanicOnError
|
||||||
|
@ -262,6 +288,18 @@ type HandlerOpts struct {
|
||||||
// logged regardless of the configured ErrorHandling provided ErrorLog
|
// logged regardless of the configured ErrorHandling provided ErrorLog
|
||||||
// is not nil.
|
// is not nil.
|
||||||
ErrorHandling HandlerErrorHandling
|
ErrorHandling HandlerErrorHandling
|
||||||
|
// If Registry is not nil, it is used to register a metric
|
||||||
|
// "promhttp_metric_handler_errors_total", partitioned by "cause". A
|
||||||
|
// failed registration causes a panic. Note that this error counter is
|
||||||
|
// different from the instrumentation you get from the various
|
||||||
|
// InstrumentHandler... helpers. It counts errors that don't necessarily
|
||||||
|
// result in a non-2xx HTTP status code. There are two typical cases:
|
||||||
|
// (1) Encoding errors that only happen after streaming of the HTTP body
|
||||||
|
// has already started (and the status code 200 has been sent). This
|
||||||
|
// should only happen with custom collectors. (2) Collection errors with
|
||||||
|
// no effect on the HTTP status code because ErrorHandling is set to
|
||||||
|
// ContinueOnError.
|
||||||
|
Registry prometheus.Registerer
|
||||||
// If DisableCompression is true, the handler will never compress the
|
// If DisableCompression is true, the handler will never compress the
|
||||||
// response, even if requested by the client.
|
// response, even if requested by the client.
|
||||||
DisableCompression bool
|
DisableCompression bool
|
||||||
|
|
122
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
122
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
|
@ -14,7 +14,9 @@
|
||||||
package promhttp
|
package promhttp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
@ -95,3 +97,123 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
|
||||||
return resp, err
|
return resp, err
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
||||||
|
// httptrace.ClientTrace hook functions. Each function is passed a float64
|
||||||
|
// representing the time in seconds since the start of the http request. A user
|
||||||
|
// may choose to use separately buckets Histograms, or implement custom
|
||||||
|
// instance labels on a per function basis.
|
||||||
|
type InstrumentTrace struct {
|
||||||
|
GotConn func(float64)
|
||||||
|
PutIdleConn func(float64)
|
||||||
|
GotFirstResponseByte func(float64)
|
||||||
|
Got100Continue func(float64)
|
||||||
|
DNSStart func(float64)
|
||||||
|
DNSDone func(float64)
|
||||||
|
ConnectStart func(float64)
|
||||||
|
ConnectDone func(float64)
|
||||||
|
TLSHandshakeStart func(float64)
|
||||||
|
TLSHandshakeDone func(float64)
|
||||||
|
WroteHeaders func(float64)
|
||||||
|
Wait100Continue func(float64)
|
||||||
|
WroteRequest func(float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentRoundTripperTrace is a middleware that wraps the provided
|
||||||
|
// RoundTripper and reports times to hook functions provided in the
|
||||||
|
// InstrumentTrace struct. Hook functions that are not present in the provided
|
||||||
|
// InstrumentTrace struct are ignored. Times reported to the hook functions are
|
||||||
|
// time since the start of the request. Only with Go1.9+, those times are
|
||||||
|
// guaranteed to never be negative. (Earlier Go versions are not using a
|
||||||
|
// monotonic clock.) Note that partitioning of Histograms is expensive and
|
||||||
|
// should be used judiciously.
|
||||||
|
//
|
||||||
|
// For hook functions that receive an error as an argument, no observations are
|
||||||
|
// made in the event of a non-nil error value.
|
||||||
|
//
|
||||||
|
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||||
|
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
|
||||||
|
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
trace := &httptrace.ClientTrace{
|
||||||
|
GotConn: func(_ httptrace.GotConnInfo) {
|
||||||
|
if it.GotConn != nil {
|
||||||
|
it.GotConn(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
PutIdleConn: func(err error) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it.PutIdleConn != nil {
|
||||||
|
it.PutIdleConn(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
DNSStart: func(_ httptrace.DNSStartInfo) {
|
||||||
|
if it.DNSStart != nil {
|
||||||
|
it.DNSStart(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
||||||
|
if it.DNSDone != nil {
|
||||||
|
it.DNSDone(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConnectStart: func(_, _ string) {
|
||||||
|
if it.ConnectStart != nil {
|
||||||
|
it.ConnectStart(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConnectDone: func(_, _ string, err error) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it.ConnectDone != nil {
|
||||||
|
it.ConnectDone(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
GotFirstResponseByte: func() {
|
||||||
|
if it.GotFirstResponseByte != nil {
|
||||||
|
it.GotFirstResponseByte(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Got100Continue: func() {
|
||||||
|
if it.Got100Continue != nil {
|
||||||
|
it.Got100Continue(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
TLSHandshakeStart: func() {
|
||||||
|
if it.TLSHandshakeStart != nil {
|
||||||
|
it.TLSHandshakeStart(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it.TLSHandshakeDone != nil {
|
||||||
|
it.TLSHandshakeDone(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
WroteHeaders: func() {
|
||||||
|
if it.WroteHeaders != nil {
|
||||||
|
it.WroteHeaders(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Wait100Continue: func() {
|
||||||
|
if it.Wait100Continue != nil {
|
||||||
|
it.Wait100Continue(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
WroteRequest: func(_ httptrace.WroteRequestInfo) {
|
||||||
|
if it.WroteRequest != nil {
|
||||||
|
it.WroteRequest(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
|
||||||
|
|
||||||
|
return next.RoundTrip(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
144
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
generated
vendored
144
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
generated
vendored
|
@ -1,144 +0,0 @@
|
||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package promhttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptrace"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
|
||||||
// httptrace.ClientTrace hook functions. Each function is passed a float64
|
|
||||||
// representing the time in seconds since the start of the http request. A user
|
|
||||||
// may choose to use separately buckets Histograms, or implement custom
|
|
||||||
// instance labels on a per function basis.
|
|
||||||
type InstrumentTrace struct {
|
|
||||||
GotConn func(float64)
|
|
||||||
PutIdleConn func(float64)
|
|
||||||
GotFirstResponseByte func(float64)
|
|
||||||
Got100Continue func(float64)
|
|
||||||
DNSStart func(float64)
|
|
||||||
DNSDone func(float64)
|
|
||||||
ConnectStart func(float64)
|
|
||||||
ConnectDone func(float64)
|
|
||||||
TLSHandshakeStart func(float64)
|
|
||||||
TLSHandshakeDone func(float64)
|
|
||||||
WroteHeaders func(float64)
|
|
||||||
Wait100Continue func(float64)
|
|
||||||
WroteRequest func(float64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentRoundTripperTrace is a middleware that wraps the provided
|
|
||||||
// RoundTripper and reports times to hook functions provided in the
|
|
||||||
// InstrumentTrace struct. Hook functions that are not present in the provided
|
|
||||||
// InstrumentTrace struct are ignored. Times reported to the hook functions are
|
|
||||||
// time since the start of the request. Only with Go1.9+, those times are
|
|
||||||
// guaranteed to never be negative. (Earlier Go versions are not using a
|
|
||||||
// monotonic clock.) Note that partitioning of Histograms is expensive and
|
|
||||||
// should be used judiciously.
|
|
||||||
//
|
|
||||||
// For hook functions that receive an error as an argument, no observations are
|
|
||||||
// made in the event of a non-nil error value.
|
|
||||||
//
|
|
||||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
|
||||||
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
|
|
||||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
trace := &httptrace.ClientTrace{
|
|
||||||
GotConn: func(_ httptrace.GotConnInfo) {
|
|
||||||
if it.GotConn != nil {
|
|
||||||
it.GotConn(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
PutIdleConn: func(err error) {
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if it.PutIdleConn != nil {
|
|
||||||
it.PutIdleConn(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
DNSStart: func(_ httptrace.DNSStartInfo) {
|
|
||||||
if it.DNSStart != nil {
|
|
||||||
it.DNSStart(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
|
||||||
if it.DNSDone != nil {
|
|
||||||
it.DNSDone(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ConnectStart: func(_, _ string) {
|
|
||||||
if it.ConnectStart != nil {
|
|
||||||
it.ConnectStart(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ConnectDone: func(_, _ string, err error) {
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if it.ConnectDone != nil {
|
|
||||||
it.ConnectDone(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
GotFirstResponseByte: func() {
|
|
||||||
if it.GotFirstResponseByte != nil {
|
|
||||||
it.GotFirstResponseByte(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Got100Continue: func() {
|
|
||||||
if it.Got100Continue != nil {
|
|
||||||
it.Got100Continue(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
TLSHandshakeStart: func() {
|
|
||||||
if it.TLSHandshakeStart != nil {
|
|
||||||
it.TLSHandshakeStart(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if it.TLSHandshakeDone != nil {
|
|
||||||
it.TLSHandshakeDone(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
WroteHeaders: func() {
|
|
||||||
if it.WroteHeaders != nil {
|
|
||||||
it.WroteHeaders(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Wait100Continue: func() {
|
|
||||||
if it.Wait100Continue != nil {
|
|
||||||
it.Wait100Continue(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
WroteRequest: func(_ httptrace.WroteRequestInfo) {
|
|
||||||
if it.WroteRequest != nil {
|
|
||||||
it.WroteRequest(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
|
|
||||||
|
|
||||||
return next.RoundTrip(r)
|
|
||||||
})
|
|
||||||
}
|
|
16
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
16
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
|
@ -325,9 +325,17 @@ func (r *Registry) Register(c Collector) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if existing, exists := r.collectorsByID[collectorID]; exists {
|
if existing, exists := r.collectorsByID[collectorID]; exists {
|
||||||
return AlreadyRegisteredError{
|
switch e := existing.(type) {
|
||||||
ExistingCollector: existing,
|
case *wrappingCollector:
|
||||||
NewCollector: c,
|
return AlreadyRegisteredError{
|
||||||
|
ExistingCollector: e.unwrapRecursively(),
|
||||||
|
NewCollector: c,
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return AlreadyRegisteredError{
|
||||||
|
ExistingCollector: e,
|
||||||
|
NewCollector: c,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If the collectorID is new, but at least one of the descs existed
|
// If the collectorID is new, but at least one of the descs existed
|
||||||
|
@ -680,7 +688,7 @@ func processMetric(
|
||||||
// Gatherers is a slice of Gatherer instances that implements the Gatherer
|
// Gatherers is a slice of Gatherer instances that implements the Gatherer
|
||||||
// interface itself. Its Gather method calls Gather on all Gatherers in the
|
// interface itself. Its Gather method calls Gather on all Gatherers in the
|
||||||
// slice in order and returns the merged results. Errors returned from the
|
// slice in order and returns the merged results. Errors returned from the
|
||||||
// Gather calles are all returned in a flattened MultiError. Duplicate and
|
// Gather calls are all returned in a flattened MultiError. Duplicate and
|
||||||
// inconsistent Metrics are skipped (first occurrence in slice order wins) and
|
// inconsistent Metrics are skipped (first occurrence in slice order wins) and
|
||||||
// reported in the returned error.
|
// reported in the returned error.
|
||||||
//
|
//
|
||||||
|
|
152
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
152
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
|
@ -16,8 +16,10 @@ package prometheus
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/beorn7/perks/quantile"
|
"github.com/beorn7/perks/quantile"
|
||||||
|
@ -37,7 +39,7 @@ const quantileLabel = "quantile"
|
||||||
// A typical use-case is the observation of request latencies. By default, a
|
// A typical use-case is the observation of request latencies. By default, a
|
||||||
// Summary provides the median, the 90th and the 99th percentile of the latency
|
// Summary provides the median, the 90th and the 99th percentile of the latency
|
||||||
// as rank estimations. However, the default behavior will change in the
|
// as rank estimations. However, the default behavior will change in the
|
||||||
// upcoming v0.10 of the library. There will be no rank estimations at all by
|
// upcoming v1.0.0 of the library. There will be no rank estimations at all by
|
||||||
// default. For a sane transition, it is recommended to set the desired rank
|
// default. For a sane transition, it is recommended to set the desired rank
|
||||||
// estimations explicitly.
|
// estimations explicitly.
|
||||||
//
|
//
|
||||||
|
@ -56,16 +58,8 @@ type Summary interface {
|
||||||
Observe(float64)
|
Observe(float64)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefObjectives are the default Summary quantile values.
|
var errQuantileLabelNotAllowed = fmt.Errorf(
|
||||||
//
|
"%q is not allowed as label name in summaries", quantileLabel,
|
||||||
// Deprecated: DefObjectives will not be used as the default objectives in
|
|
||||||
// v0.10 of the library. The default Summary will have no quantiles then.
|
|
||||||
var (
|
|
||||||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
|
||||||
|
|
||||||
errQuantileLabelNotAllowed = fmt.Errorf(
|
|
||||||
"%q is not allowed as label name in summaries", quantileLabel,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Default values for SummaryOpts.
|
// Default values for SummaryOpts.
|
||||||
|
@ -84,7 +78,7 @@ const (
|
||||||
// mandatory to set Name to a non-empty string. While all other fields are
|
// mandatory to set Name to a non-empty string. While all other fields are
|
||||||
// optional and can safely be left at their zero value, it is recommended to set
|
// optional and can safely be left at their zero value, it is recommended to set
|
||||||
// a help string and to explicitly set the Objectives field to the desired value
|
// a help string and to explicitly set the Objectives field to the desired value
|
||||||
// as the default value will change in the upcoming v0.10 of the library.
|
// as the default value will change in the upcoming v1.0.0 of the library.
|
||||||
type SummaryOpts struct {
|
type SummaryOpts struct {
|
||||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||||
// name of the Summary (created by joining these components with
|
// name of the Summary (created by joining these components with
|
||||||
|
@ -121,13 +115,8 @@ type SummaryOpts struct {
|
||||||
// Objectives defines the quantile rank estimates with their respective
|
// Objectives defines the quantile rank estimates with their respective
|
||||||
// absolute error. If Objectives[q] = e, then the value reported for q
|
// absolute error. If Objectives[q] = e, then the value reported for q
|
||||||
// will be the φ-quantile value for some φ between q-e and q+e. The
|
// will be the φ-quantile value for some φ between q-e and q+e. The
|
||||||
// default value is DefObjectives. It is used if Objectives is left at
|
// default value is an empty map, resulting in a summary without
|
||||||
// its zero value (i.e. nil). To create a Summary without Objectives,
|
// quantiles.
|
||||||
// set it to an empty map (i.e. map[float64]float64{}).
|
|
||||||
//
|
|
||||||
// Deprecated: Note that the current value of DefObjectives is
|
|
||||||
// deprecated. It will be replaced by an empty map in v0.10 of the
|
|
||||||
// library. Please explicitly set Objectives to the desired value.
|
|
||||||
Objectives map[float64]float64
|
Objectives map[float64]float64
|
||||||
|
|
||||||
// MaxAge defines the duration for which an observation stays relevant
|
// MaxAge defines the duration for which an observation stays relevant
|
||||||
|
@ -151,7 +140,7 @@ type SummaryOpts struct {
|
||||||
BufCap uint32
|
BufCap uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
|
// Problem with the sliding-window decay algorithm... The Merge method of
|
||||||
// perk/quantile is actually not working as advertised - and it might be
|
// perk/quantile is actually not working as advertised - and it might be
|
||||||
// unfixable, as the underlying algorithm is apparently not capable of merging
|
// unfixable, as the underlying algorithm is apparently not capable of merging
|
||||||
// summaries in the first place. To avoid using Merge, we are currently adding
|
// summaries in the first place. To avoid using Merge, we are currently adding
|
||||||
|
@ -196,7 +185,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.Objectives == nil {
|
if opts.Objectives == nil {
|
||||||
opts.Objectives = DefObjectives
|
opts.Objectives = map[float64]float64{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.MaxAge < 0 {
|
if opts.MaxAge < 0 {
|
||||||
|
@ -214,6 +203,17 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||||
opts.BufCap = DefBufCap
|
opts.BufCap = DefBufCap
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(opts.Objectives) == 0 {
|
||||||
|
// Use the lock-free implementation of a Summary without objectives.
|
||||||
|
s := &noObjectivesSummary{
|
||||||
|
desc: desc,
|
||||||
|
labelPairs: makeLabelPairs(desc, labelValues),
|
||||||
|
counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}},
|
||||||
|
}
|
||||||
|
s.init(s) // Init self-collection.
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
s := &summary{
|
s := &summary{
|
||||||
desc: desc,
|
desc: desc,
|
||||||
|
|
||||||
|
@ -382,6 +382,116 @@ func (s *summary) swapBufs(now time.Time) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type summaryCounts struct {
|
||||||
|
// sumBits contains the bits of the float64 representing the sum of all
|
||||||
|
// observations. sumBits and count have to go first in the struct to
|
||||||
|
// guarantee alignment for atomic operations.
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
sumBits uint64
|
||||||
|
count uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type noObjectivesSummary struct {
|
||||||
|
// countAndHotIdx enables lock-free writes with use of atomic updates.
|
||||||
|
// The most significant bit is the hot index [0 or 1] of the count field
|
||||||
|
// below. Observe calls update the hot one. All remaining bits count the
|
||||||
|
// number of Observe calls. Observe starts by incrementing this counter,
|
||||||
|
// and finish by incrementing the count field in the respective
|
||||||
|
// summaryCounts, as a marker for completion.
|
||||||
|
//
|
||||||
|
// Calls of the Write method (which are non-mutating reads from the
|
||||||
|
// perspective of the summary) swap the hot–cold under the writeMtx
|
||||||
|
// lock. A cooldown is awaited (while locked) by comparing the number of
|
||||||
|
// observations with the initiation count. Once they match, then the
|
||||||
|
// last observation on the now cool one has completed. All cool fields must
|
||||||
|
// be merged into the new hot before releasing writeMtx.
|
||||||
|
|
||||||
|
// Fields with atomic access first! See alignment constraint:
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
countAndHotIdx uint64
|
||||||
|
|
||||||
|
selfCollector
|
||||||
|
desc *Desc
|
||||||
|
writeMtx sync.Mutex // Only used in the Write method.
|
||||||
|
|
||||||
|
// Two counts, one is "hot" for lock-free observations, the other is
|
||||||
|
// "cold" for writing out a dto.Metric. It has to be an array of
|
||||||
|
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||||
|
counts [2]*summaryCounts
|
||||||
|
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *noObjectivesSummary) Desc() *Desc {
|
||||||
|
return s.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *noObjectivesSummary) Observe(v float64) {
|
||||||
|
// We increment h.countAndHotIdx so that the counter in the lower
|
||||||
|
// 63 bits gets incremented. At the same time, we get the new value
|
||||||
|
// back, which we can use to find the currently-hot counts.
|
||||||
|
n := atomic.AddUint64(&s.countAndHotIdx, 1)
|
||||||
|
hotCounts := s.counts[n>>63]
|
||||||
|
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||||
|
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Increment count last as we take it as a signal that the observation
|
||||||
|
// is complete.
|
||||||
|
atomic.AddUint64(&hotCounts.count, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *noObjectivesSummary) Write(out *dto.Metric) error {
|
||||||
|
// For simplicity, we protect this whole method by a mutex. It is not in
|
||||||
|
// the hot path, i.e. Observe is called much more often than Write. The
|
||||||
|
// complication of making Write lock-free isn't worth it, if possible at
|
||||||
|
// all.
|
||||||
|
s.writeMtx.Lock()
|
||||||
|
defer s.writeMtx.Unlock()
|
||||||
|
|
||||||
|
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
|
||||||
|
// without touching the count bits. See the struct comments for a full
|
||||||
|
// description of the algorithm.
|
||||||
|
n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
|
||||||
|
// count is contained unchanged in the lower 63 bits.
|
||||||
|
count := n & ((1 << 63) - 1)
|
||||||
|
// The most significant bit tells us which counts is hot. The complement
|
||||||
|
// is thus the cold one.
|
||||||
|
hotCounts := s.counts[n>>63]
|
||||||
|
coldCounts := s.counts[(^n)>>63]
|
||||||
|
|
||||||
|
// Await cooldown.
|
||||||
|
for count != atomic.LoadUint64(&coldCounts.count) {
|
||||||
|
runtime.Gosched() // Let observations get work done.
|
||||||
|
}
|
||||||
|
|
||||||
|
sum := &dto.Summary{
|
||||||
|
SampleCount: proto.Uint64(count),
|
||||||
|
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Summary = sum
|
||||||
|
out.Label = s.labelPairs
|
||||||
|
|
||||||
|
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||||
|
atomic.AddUint64(&hotCounts.count, count)
|
||||||
|
atomic.StoreUint64(&coldCounts.count, 0)
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
|
||||||
|
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||||
|
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type quantSort []*dto.Quantile
|
type quantSort []*dto.Quantile
|
||||||
|
|
||||||
func (s quantSort) Len() int {
|
func (s quantSort) Len() int {
|
||||||
|
|
21
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
21
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
|
@ -32,6 +32,12 @@ import (
|
||||||
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
||||||
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
||||||
//
|
//
|
||||||
|
// Conflicts between Collectors registered through the original Registerer with
|
||||||
|
// Collectors registered through the wrapping Registerer will still be
|
||||||
|
// detected. Any AlreadyRegisteredError returned by the Register method of
|
||||||
|
// either Registerer will contain the ExistingCollector in the form it was
|
||||||
|
// provided to the respective registry.
|
||||||
|
//
|
||||||
// The Collector example demonstrates a use of WrapRegistererWith.
|
// The Collector example demonstrates a use of WrapRegistererWith.
|
||||||
func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||||
return &wrappingRegisterer{
|
return &wrappingRegisterer{
|
||||||
|
@ -54,6 +60,12 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||||
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
|
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
|
||||||
// fact, those metrics are already prefixed with “go_” or “process_”,
|
// fact, those metrics are already prefixed with “go_” or “process_”,
|
||||||
// respectively.)
|
// respectively.)
|
||||||
|
//
|
||||||
|
// Conflicts between Collectors registered through the original Registerer with
|
||||||
|
// Collectors registered through the wrapping Registerer will still be
|
||||||
|
// detected. Any AlreadyRegisteredError returned by the Register method of
|
||||||
|
// either Registerer will contain the ExistingCollector in the form it was
|
||||||
|
// provided to the respective registry.
|
||||||
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
|
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
|
||||||
return &wrappingRegisterer{
|
return &wrappingRegisterer{
|
||||||
wrappedRegisterer: reg,
|
wrappedRegisterer: reg,
|
||||||
|
@ -123,6 +135,15 @@ func (c *wrappingCollector) Describe(ch chan<- *Desc) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *wrappingCollector) unwrapRecursively() Collector {
|
||||||
|
switch wc := c.wrappedCollector.(type) {
|
||||||
|
case *wrappingCollector:
|
||||||
|
return wc.unwrapRecursively()
|
||||||
|
default:
|
||||||
|
return wc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type wrappingMetric struct {
|
type wrappingMetric struct {
|
||||||
wrappedMetric Metric
|
wrappedMetric Metric
|
||||||
prefix string
|
prefix string
|
||||||
|
|
6
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
6
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
|
@ -1,12 +1,12 @@
|
||||||
/*
|
/*
|
||||||
|
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
HTTP Content-Type Autonegotiation.
|
HTTP Content-Type Autonegotiation.
|
||||||
|
|
||||||
The functions in this package implement the behaviour specified in
|
The functions in this package implement the behaviour specified in
|
||||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||||
|
|
||||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
modification, are permitted provided that the following conditions are
|
modification, are permitted provided that the following conditions are
|
||||||
met:
|
met:
|
||||||
|
|
1
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
1
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
|
@ -21,7 +21,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
separator = []byte{0}
|
|
||||||
// MetricNameRE is a regular expression matching valid metric
|
// MetricNameRE is a regular expression matching valid metric
|
||||||
// names. Note that the IsValidMetricName function performs the same
|
// names. Note that the IsValidMetricName function performs the same
|
||||||
// check but faster than a match with this regular expression.
|
// check but faster than a match with this regular expression.
|
||||||
|
|
8
vendor/github.com/prometheus/common/model/time.go
generated
vendored
8
vendor/github.com/prometheus/common/model/time.go
generated
vendored
|
@ -150,7 +150,13 @@ func (t *Time) UnmarshalJSON(b []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
*t = Time(v + va)
|
// If the value was something like -0.1 the negative is lost in the
|
||||||
|
// parsing because of the leading zero, this ensures that we capture it.
|
||||||
|
if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
|
||||||
|
*t = Time(v+va) * -1
|
||||||
|
} else {
|
||||||
|
*t = Time(v + va)
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("invalid time %q", string(b))
|
return fmt.Errorf("invalid time %q", string(b))
|
||||||
|
|
6
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
Normal file
6
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
# Run only staticcheck for now. Additional linters will be enabled one-by-one.
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- staticcheck
|
||||||
|
- govet
|
||||||
|
disable-all: true
|
3
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
3
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
|
@ -1 +1,2 @@
|
||||||
* Tobias Schmidt <tobidt@gmail.com>
|
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
|
||||||
|
* Paul Gier <pgier@redhat.com> @pgier
|
||||||
|
|
11
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
11
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
|
@ -14,17 +14,16 @@
|
||||||
include Makefile.common
|
include Makefile.common
|
||||||
|
|
||||||
%/.unpacked: %.ttar
|
%/.unpacked: %.ttar
|
||||||
|
@echo ">> extracting fixtures"
|
||||||
./ttar -C $(dir $*) -x -f $*.ttar
|
./ttar -C $(dir $*) -x -f $*.ttar
|
||||||
touch $@
|
touch $@
|
||||||
|
|
||||||
update_fixtures: fixtures.ttar sysfs/fixtures.ttar
|
update_fixtures:
|
||||||
|
rm -vf fixtures/.unpacked
|
||||||
%fixtures.ttar: %/fixtures
|
./ttar -c -f fixtures.ttar fixtures/
|
||||||
rm -v $(dir $*)fixtures/.unpacked
|
|
||||||
./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/
|
|
||||||
|
|
||||||
.PHONY: build
|
.PHONY: build
|
||||||
build:
|
build:
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: fixtures/.unpacked sysfs/fixtures/.unpacked common-test
|
test: fixtures/.unpacked common-test
|
||||||
|
|
137
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
137
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
|
@ -29,12 +29,15 @@ GO ?= go
|
||||||
GOFMT ?= $(GO)fmt
|
GOFMT ?= $(GO)fmt
|
||||||
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
||||||
GOOPTS ?=
|
GOOPTS ?=
|
||||||
|
GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
|
||||||
|
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
|
||||||
|
|
||||||
GO_VERSION ?= $(shell $(GO) version)
|
GO_VERSION ?= $(shell $(GO) version)
|
||||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||||
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||||
|
|
||||||
unexport GOVENDOR
|
GOVENDOR :=
|
||||||
|
GO111MODULE :=
|
||||||
ifeq (, $(PRE_GO_111))
|
ifeq (, $(PRE_GO_111))
|
||||||
ifneq (,$(wildcard go.mod))
|
ifneq (,$(wildcard go.mod))
|
||||||
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
|
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
|
||||||
|
@ -55,32 +58,57 @@ $(warning Some recipes may not work as expected as the current Go runtime is '$(
|
||||||
# This repository isn't using Go modules (yet).
|
# This repository isn't using Go modules (yet).
|
||||||
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
|
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
|
||||||
endif
|
endif
|
||||||
|
|
||||||
unexport GO111MODULE
|
|
||||||
endif
|
endif
|
||||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||||
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
|
|
||||||
pkgs = ./...
|
pkgs = ./...
|
||||||
|
|
||||||
GO_VERSION ?= $(shell $(GO) version)
|
ifeq (arm, $(GOHOSTARCH))
|
||||||
GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION)))
|
GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
|
||||||
|
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
|
||||||
|
else
|
||||||
|
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
|
||||||
|
endif
|
||||||
|
|
||||||
PROMU_VERSION ?= 0.2.0
|
PROMU_VERSION ?= 0.4.0
|
||||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
|
GOLANGCI_LINT :=
|
||||||
|
GOLANGCI_LINT_OPTS ?=
|
||||||
|
GOLANGCI_LINT_VERSION ?= v1.16.0
|
||||||
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
|
# windows isn't included here because of the path separator being different.
|
||||||
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
|
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||||
|
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
PREFIX ?= $(shell pwd)
|
PREFIX ?= $(shell pwd)
|
||||||
BIN_DIR ?= $(shell pwd)
|
BIN_DIR ?= $(shell pwd)
|
||||||
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||||
DOCKER_REPO ?= prom
|
DOCKER_REPO ?= prom
|
||||||
|
|
||||||
.PHONY: all
|
DOCKER_ARCHS ?= amd64
|
||||||
all: precheck style staticcheck unused build test
|
|
||||||
|
BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
|
||||||
|
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
||||||
|
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
||||||
|
|
||||||
|
ifeq ($(GOHOSTARCH),amd64)
|
||||||
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
||||||
|
# Only supported on amd64
|
||||||
|
test-flags := -race
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
# This rule is used to forward a target like "build" to "common-build". This
|
# This rule is used to forward a target like "build" to "common-build". This
|
||||||
# allows a new "build" target to be defined in a Makefile which includes this
|
# allows a new "build" target to be defined in a Makefile which includes this
|
||||||
# one and override "common-build" without override warnings.
|
# one and override "common-build" without override warnings.
|
||||||
%: common-% ;
|
%: common-% ;
|
||||||
|
|
||||||
|
.PHONY: common-all
|
||||||
|
common-all: precheck style check_license lint unused build test
|
||||||
|
|
||||||
.PHONY: common-style
|
.PHONY: common-style
|
||||||
common-style:
|
common-style:
|
||||||
@echo ">> checking code style"
|
@echo ">> checking code style"
|
||||||
|
@ -102,6 +130,15 @@ common-check_license:
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
.PHONY: common-deps
|
||||||
|
common-deps:
|
||||||
|
@echo ">> getting dependencies"
|
||||||
|
ifdef GO111MODULE
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) mod download
|
||||||
|
else
|
||||||
|
$(GO) get $(GOOPTS) -t ./...
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: common-test-short
|
.PHONY: common-test-short
|
||||||
common-test-short:
|
common-test-short:
|
||||||
@echo ">> running short tests"
|
@echo ">> running short tests"
|
||||||
|
@ -110,26 +147,35 @@ common-test-short:
|
||||||
.PHONY: common-test
|
.PHONY: common-test
|
||||||
common-test:
|
common-test:
|
||||||
@echo ">> running all tests"
|
@echo ">> running all tests"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs)
|
GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
.PHONY: common-format
|
.PHONY: common-format
|
||||||
common-format:
|
common-format:
|
||||||
@echo ">> formatting code"
|
@echo ">> formatting code"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs)
|
GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
|
||||||
|
|
||||||
.PHONY: common-vet
|
.PHONY: common-vet
|
||||||
common-vet:
|
common-vet:
|
||||||
@echo ">> vetting code"
|
@echo ">> vetting code"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
|
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
.PHONY: common-staticcheck
|
.PHONY: common-lint
|
||||||
common-staticcheck: $(STATICCHECK)
|
common-lint: $(GOLANGCI_LINT)
|
||||||
@echo ">> running staticcheck"
|
ifdef GOLANGCI_LINT
|
||||||
|
@echo ">> running golangci-lint"
|
||||||
ifdef GO111MODULE
|
ifdef GO111MODULE
|
||||||
GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs)
|
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
||||||
|
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||||
else
|
else
|
||||||
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
|
$(GOLANGCI_LINT) run $(pkgs)
|
||||||
endif
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
# For backward-compatibility.
|
||||||
|
.PHONY: common-staticcheck
|
||||||
|
common-staticcheck: lint
|
||||||
|
|
||||||
.PHONY: common-unused
|
.PHONY: common-unused
|
||||||
common-unused: $(GOVENDOR)
|
common-unused: $(GOVENDOR)
|
||||||
|
@ -140,8 +186,9 @@ else
|
||||||
ifdef GO111MODULE
|
ifdef GO111MODULE
|
||||||
@echo ">> running check for unused/missing packages in go.mod"
|
@echo ">> running check for unused/missing packages in go.mod"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
||||||
|
ifeq (,$(wildcard vendor))
|
||||||
@git diff --exit-code -- go.sum go.mod
|
@git diff --exit-code -- go.sum go.mod
|
||||||
ifneq (,$(wildcard vendor))
|
else
|
||||||
@echo ">> running check for unused packages in vendor/"
|
@echo ">> running check for unused packages in vendor/"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
||||||
@git diff --exit-code -- go.sum go.mod vendor/
|
@git diff --exit-code -- go.sum go.mod vendor/
|
||||||
|
@ -159,45 +206,48 @@ common-tarball: promu
|
||||||
@echo ">> building release tarball"
|
@echo ">> building release tarball"
|
||||||
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
||||||
|
|
||||||
.PHONY: common-docker
|
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||||
common-docker:
|
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
|
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||||
|
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
||||||
|
--build-arg ARCH="$*" \
|
||||||
|
--build-arg OS="linux" \
|
||||||
|
.
|
||||||
|
|
||||||
.PHONY: common-docker-publish
|
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||||
common-docker-publish:
|
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
|
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
||||||
|
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
.PHONY: common-docker-tag-latest
|
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
||||||
common-docker-tag-latest:
|
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
||||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest"
|
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
||||||
|
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||||
|
|
||||||
|
.PHONY: common-docker-manifest
|
||||||
|
common-docker-manifest:
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
.PHONY: promu
|
.PHONY: promu
|
||||||
promu: $(PROMU)
|
promu: $(PROMU)
|
||||||
|
|
||||||
$(PROMU):
|
$(PROMU):
|
||||||
curl -s -L $(PROMU_URL) | tar -xvz -C /tmp
|
$(eval PROMU_TMP := $(shell mktemp -d))
|
||||||
mkdir -v -p $(FIRST_GOPATH)/bin
|
curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
|
||||||
cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU)
|
mkdir -p $(FIRST_GOPATH)/bin
|
||||||
|
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
|
||||||
|
rm -r $(PROMU_TMP)
|
||||||
|
|
||||||
.PHONY: proto
|
.PHONY: proto
|
||||||
proto:
|
proto:
|
||||||
@echo ">> generating code from proto files"
|
@echo ">> generating code from proto files"
|
||||||
@./scripts/genproto.sh
|
@./scripts/genproto.sh
|
||||||
|
|
||||||
.PHONY: $(STATICCHECK)
|
ifdef GOLANGCI_LINT
|
||||||
$(STATICCHECK):
|
$(GOLANGCI_LINT):
|
||||||
ifdef GO111MODULE
|
mkdir -p $(FIRST_GOPATH)/bin
|
||||||
# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}.
|
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
||||||
# See https://github.com/golang/go/issues/27643.
|
|
||||||
# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules.
|
|
||||||
tmpModule=$$(mktemp -d 2>&1) && \
|
|
||||||
mkdir -p $${tmpModule}/staticcheck && \
|
|
||||||
cd "$${tmpModule}"/staticcheck && \
|
|
||||||
GO111MODULE=on $(GO) mod init example.com/staticcheck && \
|
|
||||||
GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \
|
|
||||||
rm -rf $${tmpModule};
|
|
||||||
else
|
|
||||||
GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef GOVENDOR
|
ifdef GOVENDOR
|
||||||
|
@ -212,7 +262,6 @@ precheck::
|
||||||
define PRECHECK_COMMAND_template =
|
define PRECHECK_COMMAND_template =
|
||||||
precheck:: $(1)_precheck
|
precheck:: $(1)_precheck
|
||||||
|
|
||||||
|
|
||||||
PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
|
PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
|
||||||
.PHONY: $(1)_precheck
|
.PHONY: $(1)_precheck
|
||||||
$(1)_precheck:
|
$(1)_precheck:
|
||||||
|
|
44
vendor/github.com/prometheus/procfs/README.md
generated
vendored
44
vendor/github.com/prometheus/procfs/README.md
generated
vendored
|
@ -1,7 +1,7 @@
|
||||||
# procfs
|
# procfs
|
||||||
|
|
||||||
This procfs package provides functions to retrieve system, kernel and process
|
This procfs package provides functions to retrieve system, kernel and process
|
||||||
metrics from the pseudo-filesystem proc.
|
metrics from the pseudo-filesystems /proc and /sys.
|
||||||
|
|
||||||
*WARNING*: This package is a work in progress. Its API may still break in
|
*WARNING*: This package is a work in progress. Its API may still break in
|
||||||
backwards-incompatible ways without warnings. Use it at your own risk.
|
backwards-incompatible ways without warnings. Use it at your own risk.
|
||||||
|
@ -9,3 +9,45 @@ backwards-incompatible ways without warnings. Use it at your own risk.
|
||||||
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
|
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
|
||||||
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
|
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
|
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
The procfs library is organized by packages based on whether the gathered data is coming from
|
||||||
|
/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, /sys, or both. For example, current cpu statistics are gathered from
|
||||||
|
`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
|
||||||
|
point is initialized, and then the stat information is read.
|
||||||
|
|
||||||
|
```go
|
||||||
|
fs, err := procfs.NewFS("/proc")
|
||||||
|
stats, err := fs.Stat()
|
||||||
|
```
|
||||||
|
|
||||||
|
Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
|
||||||
|
|
||||||
|
```go
|
||||||
|
fs, err := blockdevice.NewFS("/proc", "/sys")
|
||||||
|
stats, err := fs.ProcDiskstats()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Building and Testing
|
||||||
|
|
||||||
|
The procfs library is normally built as part of another application. However, when making
|
||||||
|
changes to the library, the `make test` command can be used to run the API test suite.
|
||||||
|
|
||||||
|
### Updating Test Fixtures
|
||||||
|
|
||||||
|
The procfs library includes a set of test fixtures which include many example files from
|
||||||
|
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
|
||||||
|
which is extracted automatically during testing. To add/update the test fixtures, first
|
||||||
|
ensure the `fixtures` directory is up to date by removing the existing directory and then
|
||||||
|
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rm -rf fixtures
|
||||||
|
make test
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, make the required changes to the extracted files in the `fixtures` directory. When
|
||||||
|
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
|
||||||
|
based on the updated `fixtures` directory. And finally, verify the changes using
|
||||||
|
`git diff fixtures.ttar`.
|
||||||
|
|
14
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
14
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
|
@ -31,19 +31,9 @@ type BuddyInfo struct {
|
||||||
Sizes []float64
|
Sizes []float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBuddyInfo reads the buddyinfo statistics.
|
|
||||||
func NewBuddyInfo() ([]BuddyInfo, error) {
|
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewBuddyInfo()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
||||||
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
|
func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
|
||||||
file, err := os.Open(fs.Path("buddyinfo"))
|
file, err := os.Open(fs.proc.Path("buddyinfo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
1572
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
1572
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
File diff suppressed because it is too large
Load diff
73
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
73
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
|
@ -14,69 +14,30 @@
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"github.com/prometheus/procfs/internal/fs"
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/prometheus/procfs/nfs"
|
|
||||||
"github.com/prometheus/procfs/xfs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// FS represents the pseudo-filesystem proc, which provides an interface to
|
// FS represents the pseudo-filesystem sys, which provides an interface to
|
||||||
// kernel data structures.
|
// kernel data structures.
|
||||||
type FS string
|
type FS struct {
|
||||||
|
proc fs.FS
|
||||||
|
}
|
||||||
|
|
||||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||||
const DefaultMountPoint = "/proc"
|
const DefaultMountPoint = fs.DefaultProcMountPoint
|
||||||
|
|
||||||
// NewFS returns a new FS mounted under the given mountPoint. It will error
|
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
|
||||||
// if the mount point can't be read.
|
// It will error if the mount point directory can't be read or is a file.
|
||||||
|
func NewDefaultFS() (FS, error) {
|
||||||
|
return NewFS(DefaultMountPoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
|
||||||
|
// if the mount point directory can't be read or is a file.
|
||||||
func NewFS(mountPoint string) (FS, error) {
|
func NewFS(mountPoint string) (FS, error) {
|
||||||
info, err := os.Stat(mountPoint)
|
fs, err := fs.NewFS(mountPoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
|
return FS{}, err
|
||||||
}
|
}
|
||||||
if !info.IsDir() {
|
return FS{fs}, nil
|
||||||
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
return FS(mountPoint), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Path returns the path of the given subsystem relative to the procfs root.
|
|
||||||
func (fs FS) Path(p ...string) string {
|
|
||||||
return path.Join(append([]string{string(fs)}, p...)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// XFSStats retrieves XFS filesystem runtime statistics.
|
|
||||||
func (fs FS) XFSStats() (*xfs.Stats, error) {
|
|
||||||
f, err := os.Open(fs.Path("fs/xfs/stat"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
return xfs.ParseStats(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NFSClientRPCStats retrieves NFS client RPC statistics.
|
|
||||||
func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
|
|
||||||
f, err := os.Open(fs.Path("net/rpc/nfs"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
return nfs.ParseClientRPCStats(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
|
|
||||||
func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
|
|
||||||
f, err := os.Open(fs.Path("net/rpc/nfsd"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
return nfs.ParseServerRPCStats(f)
|
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/prometheus/procfs/go.mod
generated
vendored
2
vendor/github.com/prometheus/procfs/go.mod
generated
vendored
|
@ -1 +1,3 @@
|
||||||
module github.com/prometheus/procfs
|
module github.com/prometheus/procfs
|
||||||
|
|
||||||
|
require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4
|
||||||
|
|
2
vendor/github.com/prometheus/procfs/go.sum
generated
vendored
Normal file
2
vendor/github.com/prometheus/procfs/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
52
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
Normal file
52
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultProcMountPoint is the common mount point of the proc filesystem.
|
||||||
|
DefaultProcMountPoint = "/proc"
|
||||||
|
|
||||||
|
// DefaultSysMountPoint is the common mount point of the sys filesystem.
|
||||||
|
DefaultSysMountPoint = "/sys"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
|
||||||
|
// interface to kernel data structures.
|
||||||
|
type FS string
|
||||||
|
|
||||||
|
// NewFS returns a new FS mounted under the given mountPoint. It will error
|
||||||
|
// if the mount point can't be read.
|
||||||
|
func NewFS(mountPoint string) (FS, error) {
|
||||||
|
info, err := os.Stat(mountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
|
||||||
|
}
|
||||||
|
if !info.IsDir() {
|
||||||
|
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
return FS(mountPoint), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path appends the given path elements to the filesystem path, adding separators
|
||||||
|
// as necessary.
|
||||||
|
func (fs FS) Path(p ...string) string {
|
||||||
|
return filepath.Join(append([]string{string(fs)}, p...)...)
|
||||||
|
}
|
59
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
59
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
|
@ -1,59 +0,0 @@
|
||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseUint32s parses a slice of strings into a slice of uint32s.
|
|
||||||
func ParseUint32s(ss []string) ([]uint32, error) {
|
|
||||||
us := make([]uint32, 0, len(ss))
|
|
||||||
for _, s := range ss {
|
|
||||||
u, err := strconv.ParseUint(s, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
us = append(us, uint32(u))
|
|
||||||
}
|
|
||||||
|
|
||||||
return us, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseUint64s parses a slice of strings into a slice of uint64s.
|
|
||||||
func ParseUint64s(ss []string) ([]uint64, error) {
|
|
||||||
us := make([]uint64, 0, len(ss))
|
|
||||||
for _, s := range ss {
|
|
||||||
u, err := strconv.ParseUint(s, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
us = append(us, u)
|
|
||||||
}
|
|
||||||
|
|
||||||
return us, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
|
||||||
func ReadUintFromFile(path string) (uint64, error) {
|
|
||||||
data, err := ioutil.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
|
||||||
}
|
|
45
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
generated
vendored
45
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
generated
vendored
|
@ -1,45 +0,0 @@
|
||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
|
|
||||||
// https://github.com/prometheus/node_exporter/pull/728/files
|
|
||||||
func SysReadFile(file string) (string, error) {
|
|
||||||
f, err := os.Open(file)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
|
|
||||||
// Go's ioutil.ReadFile implementation to poll forever.
|
|
||||||
//
|
|
||||||
// Since we either want to read data or bail immediately, do the simplest
|
|
||||||
// possible read using syscall directly.
|
|
||||||
b := make([]byte, 128)
|
|
||||||
n, err := syscall.Read(int(f.Fd()), b)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(bytes.TrimSpace(b[:n])), nil
|
|
||||||
}
|
|
32
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
32
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
|
@ -62,19 +62,9 @@ type IPVSBackendStatus struct {
|
||||||
Weight uint64
|
Weight uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIPVSStats reads the IPVS statistics.
|
// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
|
||||||
func NewIPVSStats() (IPVSStats, error) {
|
func (fs FS) IPVSStats() (IPVSStats, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
file, err := os.Open(fs.proc.Path("net/ip_vs_stats"))
|
||||||
if err != nil {
|
|
||||||
return IPVSStats{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewIPVSStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
|
|
||||||
func (fs FS) NewIPVSStats() (IPVSStats, error) {
|
|
||||||
file, err := os.Open(fs.Path("net/ip_vs_stats"))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return IPVSStats{}, err
|
return IPVSStats{}, err
|
||||||
}
|
}
|
||||||
|
@ -131,19 +121,9 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
|
// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
|
||||||
func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
file, err := os.Open(fs.proc.Path("net/ip_vs"))
|
||||||
if err != nil {
|
|
||||||
return []IPVSBackendStatus{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewIPVSBackendStatus()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
|
|
||||||
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
|
||||||
file, err := os.Open(fs.Path("net/ip_vs"))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
74
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
74
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
|
@ -42,64 +42,64 @@ type MDStat struct {
|
||||||
BlocksSynced int64
|
BlocksSynced int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
|
// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
|
||||||
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
// structs containing the relevant info. More information available here:
|
||||||
mdStatusFilePath := fs.Path("mdstat")
|
// https://raid.wiki.kernel.org/index.php/Mdstat
|
||||||
content, err := ioutil.ReadFile(mdStatusFilePath)
|
func (fs FS) MDStat() ([]MDStat, error) {
|
||||||
|
data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
|
||||||
}
|
}
|
||||||
|
mdstat, err := parseMDStat(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
|
||||||
|
}
|
||||||
|
return mdstat, nil
|
||||||
|
}
|
||||||
|
|
||||||
mdStates := []MDStat{}
|
// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
|
||||||
lines := strings.Split(string(content), "\n")
|
// structs containing the relevant info.
|
||||||
|
func parseMDStat(mdstatData []byte) ([]MDStat, error) {
|
||||||
|
mdStats := []MDStat{}
|
||||||
|
lines := strings.Split(string(mdstatData), "\n")
|
||||||
for i, l := range lines {
|
for i, l := range lines {
|
||||||
if l == "" {
|
if strings.TrimSpace(l) == "" || l[0] == ' ' ||
|
||||||
continue
|
strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
|
||||||
}
|
|
||||||
if l[0] == ' ' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
mainLine := strings.Split(l, " ")
|
deviceFields := strings.Fields(l)
|
||||||
if len(mainLine) < 3 {
|
if len(deviceFields) < 3 {
|
||||||
return mdStates, fmt.Errorf("error parsing mdline: %s", l)
|
return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", l)
|
||||||
}
|
}
|
||||||
mdName := mainLine[0]
|
mdName := deviceFields[0]
|
||||||
activityState := mainLine[2]
|
activityState := deviceFields[2]
|
||||||
|
|
||||||
if len(lines) <= i+3 {
|
if len(lines) <= i+3 {
|
||||||
return mdStates, fmt.Errorf(
|
return mdStats, fmt.Errorf("missing lines for md device %s", mdName)
|
||||||
"error parsing %s: too few lines for md device %s",
|
|
||||||
mdStatusFilePath,
|
|
||||||
mdName,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
active, total, size, err := evalStatusline(lines[i+1])
|
active, total, size, err := evalStatusLine(lines[i+1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// j is the line number of the syncing-line.
|
syncLineIdx := i + 2
|
||||||
j := i + 2
|
|
||||||
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
|
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
|
||||||
j = i + 3
|
syncLineIdx++
|
||||||
}
|
}
|
||||||
|
|
||||||
// If device is syncing at the moment, get the number of currently
|
// If device is recovering/syncing at the moment, get the number of currently
|
||||||
// synced bytes, otherwise that number equals the size of the device.
|
// synced bytes, otherwise that number equals the size of the device.
|
||||||
syncedBlocks := size
|
syncedBlocks := size
|
||||||
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
|
if strings.Contains(lines[syncLineIdx], "recovery") || strings.Contains(lines[syncLineIdx], "resync") {
|
||||||
syncedBlocks, err = evalBuildline(lines[j])
|
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mdStates = append(mdStates, MDStat{
|
mdStats = append(mdStats, MDStat{
|
||||||
Name: mdName,
|
Name: mdName,
|
||||||
ActivityState: activityState,
|
ActivityState: activityState,
|
||||||
DisksActive: active,
|
DisksActive: active,
|
||||||
|
@ -109,10 +109,10 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return mdStates, nil
|
return mdStats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalStatusline(statusline string) (active, total, size int64, err error) {
|
func evalStatusLine(statusline string) (active, total, size int64, err error) {
|
||||||
matches := statuslineRE.FindStringSubmatch(statusline)
|
matches := statuslineRE.FindStringSubmatch(statusline)
|
||||||
if len(matches) != 4 {
|
if len(matches) != 4 {
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
|
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
|
||||||
|
@ -136,7 +136,7 @@ func evalStatusline(statusline string) (active, total, size int64, err error) {
|
||||||
return active, total, size, nil
|
return active, total, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalBuildline(buildline string) (syncedBlocks int64, err error) {
|
func evalRecoveryLine(buildline string) (syncedBlocks int64, err error) {
|
||||||
matches := buildlineRE.FindStringSubmatch(buildline)
|
matches := buildlineRE.FindStringSubmatch(buildline)
|
||||||
if len(matches) != 2 {
|
if len(matches) != 2 {
|
||||||
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
|
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
|
||||||
|
|
43
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
43
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
|
@ -69,6 +69,8 @@ type MountStats interface {
|
||||||
type MountStatsNFS struct {
|
type MountStatsNFS struct {
|
||||||
// The version of statistics provided.
|
// The version of statistics provided.
|
||||||
StatVersion string
|
StatVersion string
|
||||||
|
// The mount options of the NFS mount.
|
||||||
|
Opts map[string]string
|
||||||
// The age of the NFS mount.
|
// The age of the NFS mount.
|
||||||
Age time.Duration
|
Age time.Duration
|
||||||
// Statistics related to byte counters for various operations.
|
// Statistics related to byte counters for various operations.
|
||||||
|
@ -179,11 +181,11 @@ type NFSOperationStats struct {
|
||||||
// Number of bytes received for this operation, including RPC headers and payload.
|
// Number of bytes received for this operation, including RPC headers and payload.
|
||||||
BytesReceived uint64
|
BytesReceived uint64
|
||||||
// Duration all requests spent queued for transmission before they were sent.
|
// Duration all requests spent queued for transmission before they were sent.
|
||||||
CumulativeQueueTime time.Duration
|
CumulativeQueueMilliseconds uint64
|
||||||
// Duration it took to get a reply back after the request was transmitted.
|
// Duration it took to get a reply back after the request was transmitted.
|
||||||
CumulativeTotalResponseTime time.Duration
|
CumulativeTotalResponseMilliseconds uint64
|
||||||
// Duration from when a request was enqueued to when it was completely handled.
|
// Duration from when a request was enqueued to when it was completely handled.
|
||||||
CumulativeTotalRequestTime time.Duration
|
CumulativeTotalRequestMilliseconds uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
||||||
|
@ -202,7 +204,7 @@ type NFSTransportStats struct {
|
||||||
// spent waiting for connections to the server to be established.
|
// spent waiting for connections to the server to be established.
|
||||||
ConnectIdleTime uint64
|
ConnectIdleTime uint64
|
||||||
// Duration since the NFS mount last saw any RPC traffic.
|
// Duration since the NFS mount last saw any RPC traffic.
|
||||||
IdleTime time.Duration
|
IdleTimeSeconds uint64
|
||||||
// Number of RPC requests for this mount sent to the NFS server.
|
// Number of RPC requests for this mount sent to the NFS server.
|
||||||
Sends uint64
|
Sends uint64
|
||||||
// Number of RPC responses for this mount received from the NFS server.
|
// Number of RPC responses for this mount received from the NFS server.
|
||||||
|
@ -317,6 +319,7 @@ func parseMount(ss []string) (*Mount, error) {
|
||||||
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
|
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
|
||||||
// Field indicators for parsing specific types of data
|
// Field indicators for parsing specific types of data
|
||||||
const (
|
const (
|
||||||
|
fieldOpts = "opts:"
|
||||||
fieldAge = "age:"
|
fieldAge = "age:"
|
||||||
fieldBytes = "bytes:"
|
fieldBytes = "bytes:"
|
||||||
fieldEvents = "events:"
|
fieldEvents = "events:"
|
||||||
|
@ -338,6 +341,18 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
|
||||||
}
|
}
|
||||||
|
|
||||||
switch ss[0] {
|
switch ss[0] {
|
||||||
|
case fieldOpts:
|
||||||
|
if stats.Opts == nil {
|
||||||
|
stats.Opts = map[string]string{}
|
||||||
|
}
|
||||||
|
for _, opt := range strings.Split(ss[1], ",") {
|
||||||
|
split := strings.Split(opt, "=")
|
||||||
|
if len(split) == 2 {
|
||||||
|
stats.Opts[split[0]] = split[1]
|
||||||
|
} else {
|
||||||
|
stats.Opts[opt] = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
case fieldAge:
|
case fieldAge:
|
||||||
// Age integer is in seconds
|
// Age integer is in seconds
|
||||||
d, err := time.ParseDuration(ss[1] + "s")
|
d, err := time.ParseDuration(ss[1] + "s")
|
||||||
|
@ -509,15 +524,15 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ops = append(ops, NFSOperationStats{
|
ops = append(ops, NFSOperationStats{
|
||||||
Operation: strings.TrimSuffix(ss[0], ":"),
|
Operation: strings.TrimSuffix(ss[0], ":"),
|
||||||
Requests: ns[0],
|
Requests: ns[0],
|
||||||
Transmissions: ns[1],
|
Transmissions: ns[1],
|
||||||
MajorTimeouts: ns[2],
|
MajorTimeouts: ns[2],
|
||||||
BytesSent: ns[3],
|
BytesSent: ns[3],
|
||||||
BytesReceived: ns[4],
|
BytesReceived: ns[4],
|
||||||
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
|
CumulativeQueueMilliseconds: ns[5],
|
||||||
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
|
CumulativeTotalResponseMilliseconds: ns[6],
|
||||||
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
|
CumulativeTotalRequestMilliseconds: ns[7],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -593,7 +608,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||||
Bind: ns[1],
|
Bind: ns[1],
|
||||||
Connect: ns[2],
|
Connect: ns[2],
|
||||||
ConnectIdleTime: ns[3],
|
ConnectIdleTime: ns[3],
|
||||||
IdleTime: time.Duration(ns[4]) * time.Second,
|
IdleTimeSeconds: ns[4],
|
||||||
Sends: ns[5],
|
Sends: ns[5],
|
||||||
Receives: ns[6],
|
Receives: ns[6],
|
||||||
BadTransactionIDs: ns[7],
|
BadTransactionIDs: ns[7],
|
||||||
|
|
38
vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
38
vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
|
@ -47,23 +47,13 @@ type NetDevLine struct {
|
||||||
// are interface names.
|
// are interface names.
|
||||||
type NetDev map[string]NetDevLine
|
type NetDev map[string]NetDevLine
|
||||||
|
|
||||||
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
|
// NetDev returns kernel/system statistics read from /proc/net/dev.
|
||||||
func NewNetDev() (NetDev, error) {
|
func (fs FS) NetDev() (NetDev, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
return newNetDev(fs.proc.Path("net/dev"))
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewNetDev()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
|
// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
|
||||||
func (fs FS) NewNetDev() (NetDev, error) {
|
func (p Proc) NetDev() (NetDev, error) {
|
||||||
return newNetDev(fs.Path("net/dev"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
|
|
||||||
func (p Proc) NewNetDev() (NetDev, error) {
|
|
||||||
return newNetDev(p.path("net/dev"))
|
return newNetDev(p.path("net/dev"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,7 +65,7 @@ func newNetDev(file string) (NetDev, error) {
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
nd := NetDev{}
|
netDev := NetDev{}
|
||||||
s := bufio.NewScanner(f)
|
s := bufio.NewScanner(f)
|
||||||
for n := 0; s.Scan(); n++ {
|
for n := 0; s.Scan(); n++ {
|
||||||
// Skip the 2 header lines.
|
// Skip the 2 header lines.
|
||||||
|
@ -83,20 +73,20 @@ func newNetDev(file string) (NetDev, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
line, err := nd.parseLine(s.Text())
|
line, err := netDev.parseLine(s.Text())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nd, err
|
return netDev, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nd[line.Name] = *line
|
netDev[line.Name] = *line
|
||||||
}
|
}
|
||||||
|
|
||||||
return nd, s.Err()
|
return netDev, s.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseLine parses a single line from the /proc/net/dev file. Header lines
|
// parseLine parses a single line from the /proc/net/dev file. Header lines
|
||||||
// must be filtered prior to calling this method.
|
// must be filtered prior to calling this method.
|
||||||
func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
||||||
parts := strings.SplitN(rawLine, ":", 2)
|
parts := strings.SplitN(rawLine, ":", 2)
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return nil, errors.New("invalid net/dev line, missing colon")
|
return nil, errors.New("invalid net/dev line, missing colon")
|
||||||
|
@ -185,11 +175,11 @@ func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
||||||
|
|
||||||
// Total aggregates the values across interfaces and returns a new NetDevLine.
|
// Total aggregates the values across interfaces and returns a new NetDevLine.
|
||||||
// The Name field will be a sorted comma separated list of interface names.
|
// The Name field will be a sorted comma separated list of interface names.
|
||||||
func (nd NetDev) Total() NetDevLine {
|
func (netDev NetDev) Total() NetDevLine {
|
||||||
total := NetDevLine{}
|
total := NetDevLine{}
|
||||||
|
|
||||||
names := make([]string, 0, len(nd))
|
names := make([]string, 0, len(netDev))
|
||||||
for _, ifc := range nd {
|
for _, ifc := range netDev {
|
||||||
names = append(names, ifc.Name)
|
names = append(names, ifc.Name)
|
||||||
total.RxBytes += ifc.RxBytes
|
total.RxBytes += ifc.RxBytes
|
||||||
total.RxPackets += ifc.RxPackets
|
total.RxPackets += ifc.RxPackets
|
||||||
|
|
275
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
Normal file
275
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,275 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// For the proc file format details,
|
||||||
|
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
|
||||||
|
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
|
||||||
|
|
||||||
|
const (
|
||||||
|
netUnixKernelPtrIdx = iota
|
||||||
|
netUnixRefCountIdx
|
||||||
|
_
|
||||||
|
netUnixFlagsIdx
|
||||||
|
netUnixTypeIdx
|
||||||
|
netUnixStateIdx
|
||||||
|
netUnixInodeIdx
|
||||||
|
|
||||||
|
// Inode and Path are optional.
|
||||||
|
netUnixStaticFieldsCnt = 6
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
netUnixTypeStream = 1
|
||||||
|
netUnixTypeDgram = 2
|
||||||
|
netUnixTypeSeqpacket = 5
|
||||||
|
|
||||||
|
netUnixFlagListen = 1 << 16
|
||||||
|
|
||||||
|
netUnixStateUnconnected = 1
|
||||||
|
netUnixStateConnecting = 2
|
||||||
|
netUnixStateConnected = 3
|
||||||
|
netUnixStateDisconnected = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
|
||||||
|
|
||||||
|
// NetUnixType is the type of the type field.
|
||||||
|
type NetUnixType uint64
|
||||||
|
|
||||||
|
// NetUnixFlags is the type of the flags field.
|
||||||
|
type NetUnixFlags uint64
|
||||||
|
|
||||||
|
// NetUnixState is the type of the state field.
|
||||||
|
type NetUnixState uint64
|
||||||
|
|
||||||
|
// NetUnixLine represents a line of /proc/net/unix.
|
||||||
|
type NetUnixLine struct {
|
||||||
|
KernelPtr string
|
||||||
|
RefCount uint64
|
||||||
|
Protocol uint64
|
||||||
|
Flags NetUnixFlags
|
||||||
|
Type NetUnixType
|
||||||
|
State NetUnixState
|
||||||
|
Inode uint64
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetUnix holds the data read from /proc/net/unix.
|
||||||
|
type NetUnix struct {
|
||||||
|
Rows []*NetUnixLine
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetUnix returns data read from /proc/net/unix.
|
||||||
|
func NewNetUnix() (*NetUnix, error) {
|
||||||
|
fs, err := NewFS(DefaultMountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.NewNetUnix()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetUnix returns data read from /proc/net/unix.
|
||||||
|
func (fs FS) NewNetUnix() (*NetUnix, error) {
|
||||||
|
return NewNetUnixByPath(fs.proc.Path("net/unix"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
|
||||||
|
// It might returns an error with partial parsed data, if an error occur after some data parsed.
|
||||||
|
func NewNetUnixByPath(path string) (*NetUnix, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
return NewNetUnixByReader(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
|
||||||
|
// It might returns an error with partial parsed data, if an error occur after some data parsed.
|
||||||
|
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
|
||||||
|
nu := &NetUnix{
|
||||||
|
Rows: make([]*NetUnixLine, 0, 32),
|
||||||
|
}
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
// Omit the header line.
|
||||||
|
scanner.Scan()
|
||||||
|
header := scanner.Text()
|
||||||
|
// From the man page of proc(5), it does not contain an Inode field,
|
||||||
|
// but in actually it exists.
|
||||||
|
// This code works for both cases.
|
||||||
|
hasInode := strings.Contains(header, "Inode")
|
||||||
|
|
||||||
|
minFieldsCnt := netUnixStaticFieldsCnt
|
||||||
|
if hasInode {
|
||||||
|
minFieldsCnt++
|
||||||
|
}
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
item, err := nu.parseLine(line, hasInode, minFieldsCnt)
|
||||||
|
if err != nil {
|
||||||
|
return nu, err
|
||||||
|
}
|
||||||
|
nu.Rows = append(nu.Rows, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nu, scanner.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
|
||||||
|
fields := strings.Fields(line)
|
||||||
|
fieldsLen := len(fields)
|
||||||
|
if fieldsLen < minFieldsCnt {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Parse Unix domain failed: expect at least %d fields but got %d",
|
||||||
|
minFieldsCnt, fieldsLen)
|
||||||
|
}
|
||||||
|
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
|
||||||
|
}
|
||||||
|
users, err := u.parseUsers(fields[netUnixRefCountIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
|
||||||
|
}
|
||||||
|
flags, err := u.parseFlags(fields[netUnixFlagsIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
|
||||||
|
}
|
||||||
|
typ, err := u.parseType(fields[netUnixTypeIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
|
||||||
|
}
|
||||||
|
state, err := u.parseState(fields[netUnixStateIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
|
||||||
|
}
|
||||||
|
var inode uint64
|
||||||
|
if hasInode {
|
||||||
|
inodeStr := fields[netUnixInodeIdx]
|
||||||
|
inode, err = u.parseInode(inodeStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nuLine := &NetUnixLine{
|
||||||
|
KernelPtr: kernelPtr,
|
||||||
|
RefCount: users,
|
||||||
|
Type: typ,
|
||||||
|
Flags: flags,
|
||||||
|
State: state,
|
||||||
|
Inode: inode,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path field is optional.
|
||||||
|
if fieldsLen > minFieldsCnt {
|
||||||
|
pathIdx := netUnixInodeIdx + 1
|
||||||
|
if !hasInode {
|
||||||
|
pathIdx--
|
||||||
|
}
|
||||||
|
nuLine.Path = fields[pathIdx]
|
||||||
|
}
|
||||||
|
|
||||||
|
return nuLine, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseKernelPtr(str string) (string, error) {
|
||||||
|
if !strings.HasSuffix(str, ":") {
|
||||||
|
return "", errInvalidKernelPtrFmt
|
||||||
|
}
|
||||||
|
return str[:len(str)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
|
||||||
|
return strconv.ParseUint(hexStr, 16, 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseProtocol(hexStr string) (uint64, error) {
|
||||||
|
return strconv.ParseUint(hexStr, 16, 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
|
||||||
|
typ, err := strconv.ParseUint(hexStr, 16, 16)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return NetUnixType(typ), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
|
||||||
|
flags, err := strconv.ParseUint(hexStr, 16, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return NetUnixFlags(flags), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
|
||||||
|
st, err := strconv.ParseInt(hexStr, 16, 8)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return NetUnixState(st), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
|
||||||
|
return strconv.ParseUint(inodeStr, 10, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t NetUnixType) String() string {
|
||||||
|
switch t {
|
||||||
|
case netUnixTypeStream:
|
||||||
|
return "stream"
|
||||||
|
case netUnixTypeDgram:
|
||||||
|
return "dgram"
|
||||||
|
case netUnixTypeSeqpacket:
|
||||||
|
return "seqpacket"
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f NetUnixFlags) String() string {
|
||||||
|
switch f {
|
||||||
|
case netUnixFlagListen:
|
||||||
|
return "listen"
|
||||||
|
default:
|
||||||
|
return "default"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s NetUnixState) String() string {
|
||||||
|
switch s {
|
||||||
|
case netUnixStateUnconnected:
|
||||||
|
return "unconnected"
|
||||||
|
case netUnixStateConnecting:
|
||||||
|
return "connecting"
|
||||||
|
case netUnixStateConnected:
|
||||||
|
return "connected"
|
||||||
|
case netUnixStateDisconnected:
|
||||||
|
return "disconnected"
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
263
vendor/github.com/prometheus/procfs/nfs/nfs.go
generated
vendored
263
vendor/github.com/prometheus/procfs/nfs/nfs.go
generated
vendored
|
@ -1,263 +0,0 @@
|
||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package nfs implements parsing of /proc/net/rpc/nfsd.
|
|
||||||
// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
|
|
||||||
package nfs
|
|
||||||
|
|
||||||
// ReplyCache models the "rc" line.
|
|
||||||
type ReplyCache struct {
|
|
||||||
Hits uint64
|
|
||||||
Misses uint64
|
|
||||||
NoCache uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileHandles models the "fh" line.
|
|
||||||
type FileHandles struct {
|
|
||||||
Stale uint64
|
|
||||||
TotalLookups uint64
|
|
||||||
AnonLookups uint64
|
|
||||||
DirNoCache uint64
|
|
||||||
NoDirNoCache uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// InputOutput models the "io" line.
|
|
||||||
type InputOutput struct {
|
|
||||||
Read uint64
|
|
||||||
Write uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Threads models the "th" line.
|
|
||||||
type Threads struct {
|
|
||||||
Threads uint64
|
|
||||||
FullCnt uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadAheadCache models the "ra" line.
|
|
||||||
type ReadAheadCache struct {
|
|
||||||
CacheSize uint64
|
|
||||||
CacheHistogram []uint64
|
|
||||||
NotFound uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Network models the "net" line.
|
|
||||||
type Network struct {
|
|
||||||
NetCount uint64
|
|
||||||
UDPCount uint64
|
|
||||||
TCPCount uint64
|
|
||||||
TCPConnect uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientRPC models the nfs "rpc" line.
|
|
||||||
type ClientRPC struct {
|
|
||||||
RPCCount uint64
|
|
||||||
Retransmissions uint64
|
|
||||||
AuthRefreshes uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerRPC models the nfsd "rpc" line.
|
|
||||||
type ServerRPC struct {
|
|
||||||
RPCCount uint64
|
|
||||||
BadCnt uint64
|
|
||||||
BadFmt uint64
|
|
||||||
BadAuth uint64
|
|
||||||
BadcInt uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// V2Stats models the "proc2" line.
|
|
||||||
type V2Stats struct {
|
|
||||||
Null uint64
|
|
||||||
GetAttr uint64
|
|
||||||
SetAttr uint64
|
|
||||||
Root uint64
|
|
||||||
Lookup uint64
|
|
||||||
ReadLink uint64
|
|
||||||
Read uint64
|
|
||||||
WrCache uint64
|
|
||||||
Write uint64
|
|
||||||
Create uint64
|
|
||||||
Remove uint64
|
|
||||||
Rename uint64
|
|
||||||
Link uint64
|
|
||||||
SymLink uint64
|
|
||||||
MkDir uint64
|
|
||||||
RmDir uint64
|
|
||||||
ReadDir uint64
|
|
||||||
FsStat uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// V3Stats models the "proc3" line.
|
|
||||||
type V3Stats struct {
|
|
||||||
Null uint64
|
|
||||||
GetAttr uint64
|
|
||||||
SetAttr uint64
|
|
||||||
Lookup uint64
|
|
||||||
Access uint64
|
|
||||||
ReadLink uint64
|
|
||||||
Read uint64
|
|
||||||
Write uint64
|
|
||||||
Create uint64
|
|
||||||
MkDir uint64
|
|
||||||
SymLink uint64
|
|
||||||
MkNod uint64
|
|
||||||
Remove uint64
|
|
||||||
RmDir uint64
|
|
||||||
Rename uint64
|
|
||||||
Link uint64
|
|
||||||
ReadDir uint64
|
|
||||||
ReadDirPlus uint64
|
|
||||||
FsStat uint64
|
|
||||||
FsInfo uint64
|
|
||||||
PathConf uint64
|
|
||||||
Commit uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientV4Stats models the nfs "proc4" line.
|
|
||||||
type ClientV4Stats struct {
|
|
||||||
Null uint64
|
|
||||||
Read uint64
|
|
||||||
Write uint64
|
|
||||||
Commit uint64
|
|
||||||
Open uint64
|
|
||||||
OpenConfirm uint64
|
|
||||||
OpenNoattr uint64
|
|
||||||
OpenDowngrade uint64
|
|
||||||
Close uint64
|
|
||||||
Setattr uint64
|
|
||||||
FsInfo uint64
|
|
||||||
Renew uint64
|
|
||||||
SetClientID uint64
|
|
||||||
SetClientIDConfirm uint64
|
|
||||||
Lock uint64
|
|
||||||
Lockt uint64
|
|
||||||
Locku uint64
|
|
||||||
Access uint64
|
|
||||||
Getattr uint64
|
|
||||||
Lookup uint64
|
|
||||||
LookupRoot uint64
|
|
||||||
Remove uint64
|
|
||||||
Rename uint64
|
|
||||||
Link uint64
|
|
||||||
Symlink uint64
|
|
||||||
Create uint64
|
|
||||||
Pathconf uint64
|
|
||||||
StatFs uint64
|
|
||||||
ReadLink uint64
|
|
||||||
ReadDir uint64
|
|
||||||
ServerCaps uint64
|
|
||||||
DelegReturn uint64
|
|
||||||
GetACL uint64
|
|
||||||
SetACL uint64
|
|
||||||
FsLocations uint64
|
|
||||||
ReleaseLockowner uint64
|
|
||||||
Secinfo uint64
|
|
||||||
FsidPresent uint64
|
|
||||||
ExchangeID uint64
|
|
||||||
CreateSession uint64
|
|
||||||
DestroySession uint64
|
|
||||||
Sequence uint64
|
|
||||||
GetLeaseTime uint64
|
|
||||||
ReclaimComplete uint64
|
|
||||||
LayoutGet uint64
|
|
||||||
GetDeviceInfo uint64
|
|
||||||
LayoutCommit uint64
|
|
||||||
LayoutReturn uint64
|
|
||||||
SecinfoNoName uint64
|
|
||||||
TestStateID uint64
|
|
||||||
FreeStateID uint64
|
|
||||||
GetDeviceList uint64
|
|
||||||
BindConnToSession uint64
|
|
||||||
DestroyClientID uint64
|
|
||||||
Seek uint64
|
|
||||||
Allocate uint64
|
|
||||||
DeAllocate uint64
|
|
||||||
LayoutStats uint64
|
|
||||||
Clone uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerV4Stats models the nfsd "proc4" line.
|
|
||||||
type ServerV4Stats struct {
|
|
||||||
Null uint64
|
|
||||||
Compound uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// V4Ops models the "proc4ops" line: NFSv4 operations
|
|
||||||
// Variable list, see:
|
|
||||||
// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
|
|
||||||
// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
|
|
||||||
// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
|
|
||||||
type V4Ops struct {
|
|
||||||
//Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
|
|
||||||
Op0Unused uint64
|
|
||||||
Op1Unused uint64
|
|
||||||
Op2Future uint64
|
|
||||||
Access uint64
|
|
||||||
Close uint64
|
|
||||||
Commit uint64
|
|
||||||
Create uint64
|
|
||||||
DelegPurge uint64
|
|
||||||
DelegReturn uint64
|
|
||||||
GetAttr uint64
|
|
||||||
GetFH uint64
|
|
||||||
Link uint64
|
|
||||||
Lock uint64
|
|
||||||
Lockt uint64
|
|
||||||
Locku uint64
|
|
||||||
Lookup uint64
|
|
||||||
LookupRoot uint64
|
|
||||||
Nverify uint64
|
|
||||||
Open uint64
|
|
||||||
OpenAttr uint64
|
|
||||||
OpenConfirm uint64
|
|
||||||
OpenDgrd uint64
|
|
||||||
PutFH uint64
|
|
||||||
PutPubFH uint64
|
|
||||||
PutRootFH uint64
|
|
||||||
Read uint64
|
|
||||||
ReadDir uint64
|
|
||||||
ReadLink uint64
|
|
||||||
Remove uint64
|
|
||||||
Rename uint64
|
|
||||||
Renew uint64
|
|
||||||
RestoreFH uint64
|
|
||||||
SaveFH uint64
|
|
||||||
SecInfo uint64
|
|
||||||
SetAttr uint64
|
|
||||||
Verify uint64
|
|
||||||
Write uint64
|
|
||||||
RelLockOwner uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientRPCStats models all stats from /proc/net/rpc/nfs.
|
|
||||||
type ClientRPCStats struct {
|
|
||||||
Network Network
|
|
||||||
ClientRPC ClientRPC
|
|
||||||
V2Stats V2Stats
|
|
||||||
V3Stats V3Stats
|
|
||||||
ClientV4Stats ClientV4Stats
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
|
|
||||||
type ServerRPCStats struct {
|
|
||||||
ReplyCache ReplyCache
|
|
||||||
FileHandles FileHandles
|
|
||||||
InputOutput InputOutput
|
|
||||||
Threads Threads
|
|
||||||
ReadAheadCache ReadAheadCache
|
|
||||||
Network Network
|
|
||||||
ServerRPC ServerRPC
|
|
||||||
V2Stats V2Stats
|
|
||||||
V3Stats V3Stats
|
|
||||||
ServerV4Stats ServerV4Stats
|
|
||||||
V4Ops V4Ops
|
|
||||||
}
|
|
317
vendor/github.com/prometheus/procfs/nfs/parse.go
generated
vendored
317
vendor/github.com/prometheus/procfs/nfs/parse.go
generated
vendored
|
@ -1,317 +0,0 @@
|
||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package nfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
func parseReplyCache(v []uint64) (ReplyCache, error) {
|
|
||||||
if len(v) != 3 {
|
|
||||||
return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ReplyCache{
|
|
||||||
Hits: v[0],
|
|
||||||
Misses: v[1],
|
|
||||||
NoCache: v[2],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseFileHandles(v []uint64) (FileHandles, error) {
|
|
||||||
if len(v) != 5 {
|
|
||||||
return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return FileHandles{
|
|
||||||
Stale: v[0],
|
|
||||||
TotalLookups: v[1],
|
|
||||||
AnonLookups: v[2],
|
|
||||||
DirNoCache: v[3],
|
|
||||||
NoDirNoCache: v[4],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseInputOutput(v []uint64) (InputOutput, error) {
|
|
||||||
if len(v) != 2 {
|
|
||||||
return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return InputOutput{
|
|
||||||
Read: v[0],
|
|
||||||
Write: v[1],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseThreads(v []uint64) (Threads, error) {
|
|
||||||
if len(v) != 2 {
|
|
||||||
return Threads{}, fmt.Errorf("invalid Threads line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Threads{
|
|
||||||
Threads: v[0],
|
|
||||||
FullCnt: v[1],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
|
|
||||||
if len(v) != 12 {
|
|
||||||
return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ReadAheadCache{
|
|
||||||
CacheSize: v[0],
|
|
||||||
CacheHistogram: v[1:11],
|
|
||||||
NotFound: v[11],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseNetwork(v []uint64) (Network, error) {
|
|
||||||
if len(v) != 4 {
|
|
||||||
return Network{}, fmt.Errorf("invalid Network line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Network{
|
|
||||||
NetCount: v[0],
|
|
||||||
UDPCount: v[1],
|
|
||||||
TCPCount: v[2],
|
|
||||||
TCPConnect: v[3],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseServerRPC(v []uint64) (ServerRPC, error) {
|
|
||||||
if len(v) != 5 {
|
|
||||||
return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ServerRPC{
|
|
||||||
RPCCount: v[0],
|
|
||||||
BadCnt: v[1],
|
|
||||||
BadFmt: v[2],
|
|
||||||
BadAuth: v[3],
|
|
||||||
BadcInt: v[4],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseClientRPC(v []uint64) (ClientRPC, error) {
|
|
||||||
if len(v) != 3 {
|
|
||||||
return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ClientRPC{
|
|
||||||
RPCCount: v[0],
|
|
||||||
Retransmissions: v[1],
|
|
||||||
AuthRefreshes: v[2],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseV2Stats(v []uint64) (V2Stats, error) {
|
|
||||||
values := int(v[0])
|
|
||||||
if len(v[1:]) != values || values != 18 {
|
|
||||||
return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return V2Stats{
|
|
||||||
Null: v[1],
|
|
||||||
GetAttr: v[2],
|
|
||||||
SetAttr: v[3],
|
|
||||||
Root: v[4],
|
|
||||||
Lookup: v[5],
|
|
||||||
ReadLink: v[6],
|
|
||||||
Read: v[7],
|
|
||||||
WrCache: v[8],
|
|
||||||
Write: v[9],
|
|
||||||
Create: v[10],
|
|
||||||
Remove: v[11],
|
|
||||||
Rename: v[12],
|
|
||||||
Link: v[13],
|
|
||||||
SymLink: v[14],
|
|
||||||
MkDir: v[15],
|
|
||||||
RmDir: v[16],
|
|
||||||
ReadDir: v[17],
|
|
||||||
FsStat: v[18],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseV3Stats(v []uint64) (V3Stats, error) {
|
|
||||||
values := int(v[0])
|
|
||||||
if len(v[1:]) != values || values != 22 {
|
|
||||||
return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return V3Stats{
|
|
||||||
Null: v[1],
|
|
||||||
GetAttr: v[2],
|
|
||||||
SetAttr: v[3],
|
|
||||||
Lookup: v[4],
|
|
||||||
Access: v[5],
|
|
||||||
ReadLink: v[6],
|
|
||||||
Read: v[7],
|
|
||||||
Write: v[8],
|
|
||||||
Create: v[9],
|
|
||||||
MkDir: v[10],
|
|
||||||
SymLink: v[11],
|
|
||||||
MkNod: v[12],
|
|
||||||
Remove: v[13],
|
|
||||||
RmDir: v[14],
|
|
||||||
Rename: v[15],
|
|
||||||
Link: v[16],
|
|
||||||
ReadDir: v[17],
|
|
||||||
ReadDirPlus: v[18],
|
|
||||||
FsStat: v[19],
|
|
||||||
FsInfo: v[20],
|
|
||||||
PathConf: v[21],
|
|
||||||
Commit: v[22],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
|
|
||||||
values := int(v[0])
|
|
||||||
if len(v[1:]) != values {
|
|
||||||
return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function currently supports mapping 59 NFS v4 client stats. Older
|
|
||||||
// kernels may emit fewer stats, so we must detect this and pad out the
|
|
||||||
// values to match the expected slice size.
|
|
||||||
if values < 59 {
|
|
||||||
newValues := make([]uint64, 60)
|
|
||||||
copy(newValues, v)
|
|
||||||
v = newValues
|
|
||||||
}
|
|
||||||
|
|
||||||
return ClientV4Stats{
|
|
||||||
Null: v[1],
|
|
||||||
Read: v[2],
|
|
||||||
Write: v[3],
|
|
||||||
Commit: v[4],
|
|
||||||
Open: v[5],
|
|
||||||
OpenConfirm: v[6],
|
|
||||||
OpenNoattr: v[7],
|
|
||||||
OpenDowngrade: v[8],
|
|
||||||
Close: v[9],
|
|
||||||
Setattr: v[10],
|
|
||||||
FsInfo: v[11],
|
|
||||||
Renew: v[12],
|
|
||||||
SetClientID: v[13],
|
|
||||||
SetClientIDConfirm: v[14],
|
|
||||||
Lock: v[15],
|
|
||||||
Lockt: v[16],
|
|
||||||
Locku: v[17],
|
|
||||||
Access: v[18],
|
|
||||||
Getattr: v[19],
|
|
||||||
Lookup: v[20],
|
|
||||||
LookupRoot: v[21],
|
|
||||||
Remove: v[22],
|
|
||||||
Rename: v[23],
|
|
||||||
Link: v[24],
|
|
||||||
Symlink: v[25],
|
|
||||||
Create: v[26],
|
|
||||||
Pathconf: v[27],
|
|
||||||
StatFs: v[28],
|
|
||||||
ReadLink: v[29],
|
|
||||||
ReadDir: v[30],
|
|
||||||
ServerCaps: v[31],
|
|
||||||
DelegReturn: v[32],
|
|
||||||
GetACL: v[33],
|
|
||||||
SetACL: v[34],
|
|
||||||
FsLocations: v[35],
|
|
||||||
ReleaseLockowner: v[36],
|
|
||||||
Secinfo: v[37],
|
|
||||||
FsidPresent: v[38],
|
|
||||||
ExchangeID: v[39],
|
|
||||||
CreateSession: v[40],
|
|
||||||
DestroySession: v[41],
|
|
||||||
Sequence: v[42],
|
|
||||||
GetLeaseTime: v[43],
|
|
||||||
ReclaimComplete: v[44],
|
|
||||||
LayoutGet: v[45],
|
|
||||||
GetDeviceInfo: v[46],
|
|
||||||
LayoutCommit: v[47],
|
|
||||||
LayoutReturn: v[48],
|
|
||||||
SecinfoNoName: v[49],
|
|
||||||
TestStateID: v[50],
|
|
||||||
FreeStateID: v[51],
|
|
||||||
GetDeviceList: v[52],
|
|
||||||
BindConnToSession: v[53],
|
|
||||||
DestroyClientID: v[54],
|
|
||||||
Seek: v[55],
|
|
||||||
Allocate: v[56],
|
|
||||||
DeAllocate: v[57],
|
|
||||||
LayoutStats: v[58],
|
|
||||||
Clone: v[59],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
|
|
||||||
values := int(v[0])
|
|
||||||
if len(v[1:]) != values || values != 2 {
|
|
||||||
return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ServerV4Stats{
|
|
||||||
Null: v[1],
|
|
||||||
Compound: v[2],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseV4Ops(v []uint64) (V4Ops, error) {
|
|
||||||
values := int(v[0])
|
|
||||||
if len(v[1:]) != values || values < 39 {
|
|
||||||
return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
stats := V4Ops{
|
|
||||||
Op0Unused: v[1],
|
|
||||||
Op1Unused: v[2],
|
|
||||||
Op2Future: v[3],
|
|
||||||
Access: v[4],
|
|
||||||
Close: v[5],
|
|
||||||
Commit: v[6],
|
|
||||||
Create: v[7],
|
|
||||||
DelegPurge: v[8],
|
|
||||||
DelegReturn: v[9],
|
|
||||||
GetAttr: v[10],
|
|
||||||
GetFH: v[11],
|
|
||||||
Link: v[12],
|
|
||||||
Lock: v[13],
|
|
||||||
Lockt: v[14],
|
|
||||||
Locku: v[15],
|
|
||||||
Lookup: v[16],
|
|
||||||
LookupRoot: v[17],
|
|
||||||
Nverify: v[18],
|
|
||||||
Open: v[19],
|
|
||||||
OpenAttr: v[20],
|
|
||||||
OpenConfirm: v[21],
|
|
||||||
OpenDgrd: v[22],
|
|
||||||
PutFH: v[23],
|
|
||||||
PutPubFH: v[24],
|
|
||||||
PutRootFH: v[25],
|
|
||||||
Read: v[26],
|
|
||||||
ReadDir: v[27],
|
|
||||||
ReadLink: v[28],
|
|
||||||
Remove: v[29],
|
|
||||||
Rename: v[30],
|
|
||||||
Renew: v[31],
|
|
||||||
RestoreFH: v[32],
|
|
||||||
SaveFH: v[33],
|
|
||||||
SecInfo: v[34],
|
|
||||||
SetAttr: v[35],
|
|
||||||
Verify: v[36],
|
|
||||||
Write: v[37],
|
|
||||||
RelLockOwner: v[38],
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
67
vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
generated
vendored
67
vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
generated
vendored
|
@ -1,67 +0,0 @@
|
||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package nfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
|
|
||||||
func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
|
|
||||||
stats := &ClientRPCStats{}
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(r)
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
parts := strings.Fields(scanner.Text())
|
|
||||||
// require at least <key> <value>
|
|
||||||
if len(parts) < 2 {
|
|
||||||
return nil, fmt.Errorf("invalid NFS metric line %q", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
values, err := util.ParseUint64s(parts[1:])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error parsing NFS metric line: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch metricLine := parts[0]; metricLine {
|
|
||||||
case "net":
|
|
||||||
stats.Network, err = parseNetwork(values)
|
|
||||||
case "rpc":
|
|
||||||
stats.ClientRPC, err = parseClientRPC(values)
|
|
||||||
case "proc2":
|
|
||||||
stats.V2Stats, err = parseV2Stats(values)
|
|
||||||
case "proc3":
|
|
||||||
stats.V3Stats, err = parseV3Stats(values)
|
|
||||||
case "proc4":
|
|
||||||
stats.ClientV4Stats, err = parseClientV4Stats(values)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown NFS metric line %q", metricLine)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("errors parsing NFS metric line: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return nil, fmt.Errorf("error scanning NFS file: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
89
vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
generated
vendored
89
vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
generated
vendored
|
@ -1,89 +0,0 @@
|
||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package nfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
|
|
||||||
func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
|
|
||||||
stats := &ServerRPCStats{}
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(r)
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
parts := strings.Fields(scanner.Text())
|
|
||||||
// require at least <key> <value>
|
|
||||||
if len(parts) < 2 {
|
|
||||||
return nil, fmt.Errorf("invalid NFSd metric line %q", line)
|
|
||||||
}
|
|
||||||
label := parts[0]
|
|
||||||
|
|
||||||
var values []uint64
|
|
||||||
var err error
|
|
||||||
if label == "th" {
|
|
||||||
if len(parts) < 3 {
|
|
||||||
return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
|
|
||||||
}
|
|
||||||
values, err = util.ParseUint64s(parts[1:3])
|
|
||||||
} else {
|
|
||||||
values, err = util.ParseUint64s(parts[1:])
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch metricLine := parts[0]; metricLine {
|
|
||||||
case "rc":
|
|
||||||
stats.ReplyCache, err = parseReplyCache(values)
|
|
||||||
case "fh":
|
|
||||||
stats.FileHandles, err = parseFileHandles(values)
|
|
||||||
case "io":
|
|
||||||
stats.InputOutput, err = parseInputOutput(values)
|
|
||||||
case "th":
|
|
||||||
stats.Threads, err = parseThreads(values)
|
|
||||||
case "ra":
|
|
||||||
stats.ReadAheadCache, err = parseReadAheadCache(values)
|
|
||||||
case "net":
|
|
||||||
stats.Network, err = parseNetwork(values)
|
|
||||||
case "rpc":
|
|
||||||
stats.ServerRPC, err = parseServerRPC(values)
|
|
||||||
case "proc2":
|
|
||||||
stats.V2Stats, err = parseV2Stats(values)
|
|
||||||
case "proc3":
|
|
||||||
stats.V3Stats, err = parseV3Stats(values)
|
|
||||||
case "proc4":
|
|
||||||
stats.ServerV4Stats, err = parseServerV4Stats(values)
|
|
||||||
case "proc4ops":
|
|
||||||
stats.V4Ops, err = parseV4Ops(values)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return nil, fmt.Errorf("error scanning NFSd file: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
27
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
27
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
|
@ -20,6 +20,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Proc provides information about a running process.
|
// Proc provides information about a running process.
|
||||||
|
@ -27,7 +29,7 @@ type Proc struct {
|
||||||
// The process ID.
|
// The process ID.
|
||||||
PID int
|
PID int
|
||||||
|
|
||||||
fs FS
|
fs fs.FS
|
||||||
}
|
}
|
||||||
|
|
||||||
// Procs represents a list of Proc structs.
|
// Procs represents a list of Proc structs.
|
||||||
|
@ -52,7 +54,7 @@ func NewProc(pid int) (Proc, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return fs.NewProc(pid)
|
return fs.Proc(pid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllProcs returns a list of all currently available processes under /proc.
|
// AllProcs returns a list of all currently available processes under /proc.
|
||||||
|
@ -66,28 +68,35 @@ func AllProcs() (Procs, error) {
|
||||||
|
|
||||||
// Self returns a process for the current process.
|
// Self returns a process for the current process.
|
||||||
func (fs FS) Self() (Proc, error) {
|
func (fs FS) Self() (Proc, error) {
|
||||||
p, err := os.Readlink(fs.Path("self"))
|
p, err := os.Readlink(fs.proc.Path("self"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
|
pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return fs.NewProc(pid)
|
return fs.Proc(pid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProc returns a process for the given pid.
|
// NewProc returns a process for the given pid.
|
||||||
|
//
|
||||||
|
// Deprecated: use fs.Proc() instead
|
||||||
func (fs FS) NewProc(pid int) (Proc, error) {
|
func (fs FS) NewProc(pid int) (Proc, error) {
|
||||||
if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
|
return fs.Proc(pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proc returns a process for the given pid.
|
||||||
|
func (fs FS) Proc(pid int) (Proc, error) {
|
||||||
|
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return Proc{PID: pid, fs: fs}, nil
|
return Proc{PID: pid, fs: fs.proc}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllProcs returns a list of all currently available processes.
|
// AllProcs returns a list of all currently available processes.
|
||||||
func (fs FS) AllProcs() (Procs, error) {
|
func (fs FS) AllProcs() (Procs, error) {
|
||||||
d, err := os.Open(fs.Path())
|
d, err := os.Open(fs.proc.Path())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Procs{}, err
|
return Procs{}, err
|
||||||
}
|
}
|
||||||
|
@ -104,7 +113,7 @@ func (fs FS) AllProcs() (Procs, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
p = append(p, Proc{PID: int(pid), fs: fs})
|
p = append(p, Proc{PID: int(pid), fs: fs.proc})
|
||||||
}
|
}
|
||||||
|
|
||||||
return p, nil
|
return p, nil
|
||||||
|
|
4
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
4
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
|
@ -39,8 +39,8 @@ type ProcIO struct {
|
||||||
CancelledWriteBytes int64
|
CancelledWriteBytes int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIO creates a new ProcIO instance from a given Proc instance.
|
// IO creates a new ProcIO instance from a given Proc instance.
|
||||||
func (p Proc) NewIO() (ProcIO, error) {
|
func (p Proc) IO() (ProcIO, error) {
|
||||||
pio := ProcIO{}
|
pio := ProcIO{}
|
||||||
|
|
||||||
f, err := os.Open(p.path("io"))
|
f, err := os.Open(p.path("io"))
|
||||||
|
|
7
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
7
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
|
@ -78,7 +78,14 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewLimits returns the current soft limits of the process.
|
// NewLimits returns the current soft limits of the process.
|
||||||
|
//
|
||||||
|
// Deprecated: use p.Limits() instead
|
||||||
func (p Proc) NewLimits() (ProcLimits, error) {
|
func (p Proc) NewLimits() (ProcLimits, error) {
|
||||||
|
return p.Limits()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limits returns the current soft limits of the process.
|
||||||
|
func (p Proc) Limits() (ProcLimits, error) {
|
||||||
f, err := os.Open(p.path("limits"))
|
f, err := os.Open(p.path("limits"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcLimits{}, err
|
return ProcLimits{}, err
|
||||||
|
|
4
vendor/github.com/prometheus/procfs/proc_ns.go
generated
vendored
4
vendor/github.com/prometheus/procfs/proc_ns.go
generated
vendored
|
@ -29,9 +29,9 @@ type Namespace struct {
|
||||||
// Namespaces contains all of the namespaces that the process is contained in.
|
// Namespaces contains all of the namespaces that the process is contained in.
|
||||||
type Namespaces map[string]Namespace
|
type Namespaces map[string]Namespace
|
||||||
|
|
||||||
// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
|
// Namespaces reads from /proc/<pid>/ns/* to get the namespaces of which the
|
||||||
// process is a member.
|
// process is a member.
|
||||||
func (p Proc) NewNamespaces() (Namespaces, error) {
|
func (p Proc) Namespaces() (Namespaces, error) {
|
||||||
d, err := os.Open(p.path("ns"))
|
d, err := os.Open(p.path("ns"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
101
vendor/github.com/prometheus/procfs/proc_psi.go
generated
vendored
Normal file
101
vendor/github.com/prometheus/procfs/proc_psi.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
// The PSI / pressure interface is described at
|
||||||
|
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
|
||||||
|
// Each resource (cpu, io, memory, ...) is exposed as a single file.
|
||||||
|
// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
|
||||||
|
// Each line contains several averages (over n seconds) and a total in µs.
|
||||||
|
//
|
||||||
|
// Example io pressure file:
|
||||||
|
// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
|
||||||
|
// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
|
||||||
|
|
||||||
|
// PSILine is a single line of values as returned by /proc/pressure/*
|
||||||
|
// The Avg entries are averages over n seconds, as a percentage
|
||||||
|
// The Total line is in microseconds
|
||||||
|
type PSILine struct {
|
||||||
|
Avg10 float64
|
||||||
|
Avg60 float64
|
||||||
|
Avg300 float64
|
||||||
|
Total uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// PSIStats represent pressure stall information from /proc/pressure/*
|
||||||
|
// Some indicates the share of time in which at least some tasks are stalled
|
||||||
|
// Full indicates the share of time in which all non-idle tasks are stalled simultaneously
|
||||||
|
type PSIStats struct {
|
||||||
|
Some *PSILine
|
||||||
|
Full *PSILine
|
||||||
|
}
|
||||||
|
|
||||||
|
// PSIStatsForResource reads pressure stall information for the specified
|
||||||
|
// resource from /proc/pressure/<resource>. At time of writing this can be
|
||||||
|
// either "cpu", "memory" or "io".
|
||||||
|
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
|
||||||
|
file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
|
||||||
|
if err != nil {
|
||||||
|
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer file.Close()
|
||||||
|
return parsePSIStats(resource, file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePSIStats parses the specified file for pressure stall information
|
||||||
|
func parsePSIStats(resource string, file io.Reader) (PSIStats, error) {
|
||||||
|
psiStats := PSIStats{}
|
||||||
|
stats, err := ioutil.ReadAll(file)
|
||||||
|
if err != nil {
|
||||||
|
return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range strings.Split(string(stats), "\n") {
|
||||||
|
prefix := strings.Split(l, " ")[0]
|
||||||
|
switch prefix {
|
||||||
|
case "some":
|
||||||
|
psi := PSILine{}
|
||||||
|
_, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
|
||||||
|
if err != nil {
|
||||||
|
return PSIStats{}, err
|
||||||
|
}
|
||||||
|
psiStats.Some = &psi
|
||||||
|
case "full":
|
||||||
|
psi := PSILine{}
|
||||||
|
_, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
|
||||||
|
if err != nil {
|
||||||
|
return PSIStats{}, err
|
||||||
|
}
|
||||||
|
psiStats.Full = &psi
|
||||||
|
default:
|
||||||
|
// If we encounter a line with an unknown prefix, ignore it and move on
|
||||||
|
// Should new measurement types be added in the future we'll simply ignore them instead
|
||||||
|
// of erroring on retrieval
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return psiStats, nil
|
||||||
|
}
|
20
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
20
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
|
@ -18,6 +18,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
|
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
|
||||||
|
@ -95,15 +97,22 @@ type ProcStat struct {
|
||||||
// in clock ticks.
|
// in clock ticks.
|
||||||
Starttime uint64
|
Starttime uint64
|
||||||
// Virtual memory size in bytes.
|
// Virtual memory size in bytes.
|
||||||
VSize int
|
VSize uint
|
||||||
// Resident set size in pages.
|
// Resident set size in pages.
|
||||||
RSS int
|
RSS int
|
||||||
|
|
||||||
fs FS
|
proc fs.FS
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns the current status information of the process.
|
// NewStat returns the current status information of the process.
|
||||||
|
//
|
||||||
|
// Deprecated: use NewStat() instead
|
||||||
func (p Proc) NewStat() (ProcStat, error) {
|
func (p Proc) NewStat() (ProcStat, error) {
|
||||||
|
return p.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns the current status information of the process.
|
||||||
|
func (p Proc) Stat() (ProcStat, error) {
|
||||||
f, err := os.Open(p.path("stat"))
|
f, err := os.Open(p.path("stat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcStat{}, err
|
return ProcStat{}, err
|
||||||
|
@ -118,7 +127,7 @@ func (p Proc) NewStat() (ProcStat, error) {
|
||||||
var (
|
var (
|
||||||
ignore int
|
ignore int
|
||||||
|
|
||||||
s = ProcStat{PID: p.PID, fs: p.fs}
|
s = ProcStat{PID: p.PID, proc: p.fs}
|
||||||
l = bytes.Index(data, []byte("("))
|
l = bytes.Index(data, []byte("("))
|
||||||
r = bytes.LastIndex(data, []byte(")"))
|
r = bytes.LastIndex(data, []byte(")"))
|
||||||
)
|
)
|
||||||
|
@ -164,7 +173,7 @@ func (p Proc) NewStat() (ProcStat, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// VirtualMemory returns the virtual memory size in bytes.
|
// VirtualMemory returns the virtual memory size in bytes.
|
||||||
func (s ProcStat) VirtualMemory() int {
|
func (s ProcStat) VirtualMemory() uint {
|
||||||
return s.VSize
|
return s.VSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,7 +184,8 @@ func (s ProcStat) ResidentMemory() int {
|
||||||
|
|
||||||
// StartTime returns the unix timestamp of the process in seconds.
|
// StartTime returns the unix timestamp of the process in seconds.
|
||||||
func (s ProcStat) StartTime() (float64, error) {
|
func (s ProcStat) StartTime() (float64, error) {
|
||||||
stat, err := s.fs.NewStat()
|
fs := FS{proc: s.proc}
|
||||||
|
stat, err := fs.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
162
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
Normal file
162
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
Normal file
|
@ -0,0 +1,162 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProcStat provides status information about the process,
|
||||||
|
// read from /proc/[pid]/stat.
|
||||||
|
type ProcStatus struct {
|
||||||
|
// The process ID.
|
||||||
|
PID int
|
||||||
|
// The process name.
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Peak virtual memory size.
|
||||||
|
VmPeak uint64
|
||||||
|
// Virtual memory size.
|
||||||
|
VmSize uint64
|
||||||
|
// Locked memory size.
|
||||||
|
VmLck uint64
|
||||||
|
// Pinned memory size.
|
||||||
|
VmPin uint64
|
||||||
|
// Peak resident set size.
|
||||||
|
VmHWM uint64
|
||||||
|
// Resident set size (sum of RssAnnon RssFile and RssShmem).
|
||||||
|
VmRSS uint64
|
||||||
|
// Size of resident anonymous memory.
|
||||||
|
RssAnon uint64
|
||||||
|
// Size of resident file mappings.
|
||||||
|
RssFile uint64
|
||||||
|
// Size of resident shared memory.
|
||||||
|
RssShmem uint64
|
||||||
|
// Size of data segments.
|
||||||
|
VmData uint64
|
||||||
|
// Size of stack segments.
|
||||||
|
VmStk uint64
|
||||||
|
// Size of text segments.
|
||||||
|
VmExe uint64
|
||||||
|
// Shared library code size.
|
||||||
|
VmLib uint64
|
||||||
|
// Page table entries size.
|
||||||
|
VmPTE uint64
|
||||||
|
// Size of second-level page tables.
|
||||||
|
VmPMD uint64
|
||||||
|
// Swapped-out virtual memory size by anonymous private.
|
||||||
|
VmSwap uint64
|
||||||
|
// Size of hugetlb memory portions
|
||||||
|
HugetlbPages uint64
|
||||||
|
|
||||||
|
// Number of voluntary context switches.
|
||||||
|
VoluntaryCtxtSwitches uint64
|
||||||
|
// Number of involuntary context switches.
|
||||||
|
NonVoluntaryCtxtSwitches uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStatus returns the current status information of the process.
|
||||||
|
func (p Proc) NewStatus() (ProcStatus, error) {
|
||||||
|
f, err := os.Open(p.path("status"))
|
||||||
|
if err != nil {
|
||||||
|
return ProcStatus{}, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
data, err := ioutil.ReadAll(f)
|
||||||
|
if err != nil {
|
||||||
|
return ProcStatus{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s := ProcStatus{PID: p.PID}
|
||||||
|
|
||||||
|
lines := strings.Split(string(data), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
if !bytes.Contains([]byte(line), []byte(":")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
kv := strings.SplitN(line, ":", 2)
|
||||||
|
|
||||||
|
// removes spaces
|
||||||
|
k := string(strings.TrimSpace(kv[0]))
|
||||||
|
v := string(strings.TrimSpace(kv[1]))
|
||||||
|
// removes "kB"
|
||||||
|
v = string(bytes.Trim([]byte(v), " kB"))
|
||||||
|
|
||||||
|
// value to int when possible
|
||||||
|
// we can skip error check here, 'cause vKBytes is not used when value is a string
|
||||||
|
vKBytes, _ := strconv.ParseUint(v, 10, 64)
|
||||||
|
// convert kB to B
|
||||||
|
vBytes := vKBytes * 1024
|
||||||
|
|
||||||
|
s.fillStatus(k, v, vKBytes, vBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
|
||||||
|
switch k {
|
||||||
|
case "Name":
|
||||||
|
s.Name = vString
|
||||||
|
case "VmPeak":
|
||||||
|
s.VmPeak = vUintBytes
|
||||||
|
case "VmSize":
|
||||||
|
s.VmSize = vUintBytes
|
||||||
|
case "VmLck":
|
||||||
|
s.VmLck = vUintBytes
|
||||||
|
case "VmPin":
|
||||||
|
s.VmPin = vUintBytes
|
||||||
|
case "VmHWM":
|
||||||
|
s.VmHWM = vUintBytes
|
||||||
|
case "VmRSS":
|
||||||
|
s.VmRSS = vUintBytes
|
||||||
|
case "RssAnon":
|
||||||
|
s.RssAnon = vUintBytes
|
||||||
|
case "RssFile":
|
||||||
|
s.RssFile = vUintBytes
|
||||||
|
case "RssShmem":
|
||||||
|
s.RssShmem = vUintBytes
|
||||||
|
case "VmData":
|
||||||
|
s.VmData = vUintBytes
|
||||||
|
case "VmStk":
|
||||||
|
s.VmStk = vUintBytes
|
||||||
|
case "VmExe":
|
||||||
|
s.VmExe = vUintBytes
|
||||||
|
case "VmLib":
|
||||||
|
s.VmLib = vUintBytes
|
||||||
|
case "VmPTE":
|
||||||
|
s.VmPTE = vUintBytes
|
||||||
|
case "VmPMD":
|
||||||
|
s.VmPMD = vUintBytes
|
||||||
|
case "VmSwap":
|
||||||
|
s.VmSwap = vUintBytes
|
||||||
|
case "HugetlbPages":
|
||||||
|
s.HugetlbPages = vUintBytes
|
||||||
|
case "voluntary_ctxt_switches":
|
||||||
|
s.VoluntaryCtxtSwitches = vUint
|
||||||
|
case "nonvoluntary_ctxt_switches":
|
||||||
|
s.NonVoluntaryCtxtSwitches = vUint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalCtxtSwitches returns the total context switch.
|
||||||
|
func (s ProcStatus) TotalCtxtSwitches() uint64 {
|
||||||
|
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
|
||||||
|
}
|
40
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
40
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
|
@ -20,6 +20,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CPUStat shows how much time the cpu spend in various stages.
|
// CPUStat shows how much time the cpu spend in various stages.
|
||||||
|
@ -78,16 +80,6 @@ type Stat struct {
|
||||||
SoftIRQ SoftIRQStat
|
SoftIRQ SoftIRQStat
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns kernel/system statistics read from /proc/stat.
|
|
||||||
func NewStat() (Stat, error) {
|
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
|
||||||
if err != nil {
|
|
||||||
return Stat{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewStat()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
|
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
|
||||||
func parseCPUStat(line string) (CPUStat, int64, error) {
|
func parseCPUStat(line string) (CPUStat, int64, error) {
|
||||||
cpuStat := CPUStat{}
|
cpuStat := CPUStat{}
|
||||||
|
@ -149,11 +141,31 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
|
||||||
return softIRQStat, total, nil
|
return softIRQStat, total, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns an information about current kernel/system statistics.
|
// NewStat returns information about current cpu/process statistics.
|
||||||
func (fs FS) NewStat() (Stat, error) {
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
//
|
||||||
|
// Deprecated: use fs.Stat() instead
|
||||||
|
func NewStat() (Stat, error) {
|
||||||
|
fs, err := NewFS(fs.DefaultProcMountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return Stat{}, err
|
||||||
|
}
|
||||||
|
return fs.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
f, err := os.Open(fs.Path("stat"))
|
// NewStat returns information about current cpu/process statistics.
|
||||||
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
|
//
|
||||||
|
// Deprecated: use fs.Stat() instead
|
||||||
|
func (fs FS) NewStat() (Stat, error) {
|
||||||
|
return fs.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns information about current cpu/process statistics.
|
||||||
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
|
func (fs FS) Stat() (Stat, error) {
|
||||||
|
|
||||||
|
f, err := os.Open(fs.proc.Path("stat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Stat{}, err
|
return Stat{}, err
|
||||||
}
|
}
|
||||||
|
|
42
vendor/github.com/prometheus/procfs/ttar
generated
vendored
42
vendor/github.com/prometheus/procfs/ttar
generated
vendored
|
@ -86,8 +86,10 @@ Usage: $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
|
||||||
$bname [-C <DIR>] -x -f <ARCHIVE> (extract archive)
|
$bname [-C <DIR>] -x -f <ARCHIVE> (extract archive)
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
-C <DIR> (change directory)
|
-C <DIR> (change directory)
|
||||||
-v (verbose)
|
-v (verbose)
|
||||||
|
--recursive-unlink (recursively delete existing directory if path
|
||||||
|
collides with file or directory to extract)
|
||||||
|
|
||||||
Example: Change to sysfs directory, create ttar file from fixtures directory
|
Example: Change to sysfs directory, create ttar file from fixtures directory
|
||||||
$bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
|
$bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
|
||||||
|
@ -111,8 +113,9 @@ function set_cmd {
|
||||||
}
|
}
|
||||||
|
|
||||||
unset VERBOSE
|
unset VERBOSE
|
||||||
|
unset RECURSIVE_UNLINK
|
||||||
|
|
||||||
while getopts :cf:htxvC: opt; do
|
while getopts :cf:-:htxvC: opt; do
|
||||||
case $opt in
|
case $opt in
|
||||||
c)
|
c)
|
||||||
set_cmd "create"
|
set_cmd "create"
|
||||||
|
@ -136,6 +139,18 @@ while getopts :cf:htxvC: opt; do
|
||||||
C)
|
C)
|
||||||
CDIR=$OPTARG
|
CDIR=$OPTARG
|
||||||
;;
|
;;
|
||||||
|
-)
|
||||||
|
case $OPTARG in
|
||||||
|
recursive-unlink)
|
||||||
|
RECURSIVE_UNLINK="yes"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo -e "Error: invalid option -$OPTARG"
|
||||||
|
echo
|
||||||
|
usage 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
echo >&2 "ERROR: invalid option -$OPTARG"
|
echo >&2 "ERROR: invalid option -$OPTARG"
|
||||||
echo
|
echo
|
||||||
|
@ -212,16 +227,16 @@ function extract {
|
||||||
local eof_without_newline
|
local eof_without_newline
|
||||||
if [ "$size" -gt 0 ]; then
|
if [ "$size" -gt 0 ]; then
|
||||||
if [[ "$line" =~ [^\\]EOF ]]; then
|
if [[ "$line" =~ [^\\]EOF ]]; then
|
||||||
# An EOF not preceeded by a backslash indicates that the line
|
# An EOF not preceded by a backslash indicates that the line
|
||||||
# does not end with a newline
|
# does not end with a newline
|
||||||
eof_without_newline=1
|
eof_without_newline=1
|
||||||
else
|
else
|
||||||
eof_without_newline=0
|
eof_without_newline=0
|
||||||
fi
|
fi
|
||||||
# Replace NULLBYTE with null byte if at beginning of line
|
# Replace NULLBYTE with null byte if at beginning of line
|
||||||
# Replace NULLBYTE with null byte unless preceeded by backslash
|
# Replace NULLBYTE with null byte unless preceded by backslash
|
||||||
# Remove one backslash in front of NULLBYTE (if any)
|
# Remove one backslash in front of NULLBYTE (if any)
|
||||||
# Remove EOF unless preceeded by backslash
|
# Remove EOF unless preceded by backslash
|
||||||
# Remove one backslash in front of EOF
|
# Remove one backslash in front of EOF
|
||||||
if [ $USE_PYTHON -eq 1 ]; then
|
if [ $USE_PYTHON -eq 1 ]; then
|
||||||
echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
|
echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
|
||||||
|
@ -245,7 +260,16 @@ function extract {
|
||||||
fi
|
fi
|
||||||
if [[ $line =~ ^Path:\ (.*)$ ]]; then
|
if [[ $line =~ ^Path:\ (.*)$ ]]; then
|
||||||
path=${BASH_REMATCH[1]}
|
path=${BASH_REMATCH[1]}
|
||||||
if [ -e "$path" ] || [ -L "$path" ]; then
|
if [ -L "$path" ]; then
|
||||||
|
rm "$path"
|
||||||
|
elif [ -d "$path" ]; then
|
||||||
|
if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then
|
||||||
|
rm -r "$path"
|
||||||
|
else
|
||||||
|
# Safe because symlinks to directories are dealt with above
|
||||||
|
rmdir "$path"
|
||||||
|
fi
|
||||||
|
elif [ -e "$path" ]; then
|
||||||
rm "$path"
|
rm "$path"
|
||||||
fi
|
fi
|
||||||
elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
|
elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
|
||||||
|
@ -338,8 +362,8 @@ function _create {
|
||||||
else
|
else
|
||||||
< "$file" \
|
< "$file" \
|
||||||
sed 's/EOF/\\EOF/g;
|
sed 's/EOF/\\EOF/g;
|
||||||
s/NULLBYTE/\\NULLBYTE/g;
|
s/NULLBYTE/\\NULLBYTE/g;
|
||||||
s/\x0/NULLBYTE/g;
|
s/\x0/NULLBYTE/g;
|
||||||
'
|
'
|
||||||
fi
|
fi
|
||||||
if [[ "$eof_without_newline" -eq 1 ]]; then
|
if [[ "$eof_without_newline" -eq 1 ]]; then
|
||||||
|
|
2
vendor/github.com/prometheus/procfs/xfrm.go
generated
vendored
2
vendor/github.com/prometheus/procfs/xfrm.go
generated
vendored
|
@ -97,7 +97,7 @@ func NewXfrmStat() (XfrmStat, error) {
|
||||||
|
|
||||||
// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
|
// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
|
||||||
func (fs FS) NewXfrmStat() (XfrmStat, error) {
|
func (fs FS) NewXfrmStat() (XfrmStat, error) {
|
||||||
file, err := os.Open(fs.Path("net/xfrm_stat"))
|
file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return XfrmStat{}, err
|
return XfrmStat{}, err
|
||||||
}
|
}
|
||||||
|
|
330
vendor/github.com/prometheus/procfs/xfs/parse.go
generated
vendored
330
vendor/github.com/prometheus/procfs/xfs/parse.go
generated
vendored
|
@ -1,330 +0,0 @@
|
||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package xfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseStats parses a Stats from an input io.Reader, using the format
|
|
||||||
// found in /proc/fs/xfs/stat.
|
|
||||||
func ParseStats(r io.Reader) (*Stats, error) {
|
|
||||||
const (
|
|
||||||
// Fields parsed into stats structures.
|
|
||||||
fieldExtentAlloc = "extent_alloc"
|
|
||||||
fieldAbt = "abt"
|
|
||||||
fieldBlkMap = "blk_map"
|
|
||||||
fieldBmbt = "bmbt"
|
|
||||||
fieldDir = "dir"
|
|
||||||
fieldTrans = "trans"
|
|
||||||
fieldIg = "ig"
|
|
||||||
fieldLog = "log"
|
|
||||||
fieldRw = "rw"
|
|
||||||
fieldAttr = "attr"
|
|
||||||
fieldIcluster = "icluster"
|
|
||||||
fieldVnodes = "vnodes"
|
|
||||||
fieldBuf = "buf"
|
|
||||||
fieldXpc = "xpc"
|
|
||||||
|
|
||||||
// Unimplemented at this time due to lack of documentation.
|
|
||||||
fieldPushAil = "push_ail"
|
|
||||||
fieldXstrat = "xstrat"
|
|
||||||
fieldAbtb2 = "abtb2"
|
|
||||||
fieldAbtc2 = "abtc2"
|
|
||||||
fieldBmbt2 = "bmbt2"
|
|
||||||
fieldIbt2 = "ibt2"
|
|
||||||
fieldFibt2 = "fibt2"
|
|
||||||
fieldQm = "qm"
|
|
||||||
fieldDebug = "debug"
|
|
||||||
)
|
|
||||||
|
|
||||||
var xfss Stats
|
|
||||||
|
|
||||||
s := bufio.NewScanner(r)
|
|
||||||
for s.Scan() {
|
|
||||||
// Expect at least a string label and a single integer value, ex:
|
|
||||||
// - abt 0
|
|
||||||
// - rw 1 2
|
|
||||||
ss := strings.Fields(string(s.Bytes()))
|
|
||||||
if len(ss) < 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
label := ss[0]
|
|
||||||
|
|
||||||
// Extended precision counters are uint64 values.
|
|
||||||
if label == fieldXpc {
|
|
||||||
us, err := util.ParseUint64s(ss[1:])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// All other counters are uint32 values.
|
|
||||||
us, err := util.ParseUint32s(ss[1:])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch label {
|
|
||||||
case fieldExtentAlloc:
|
|
||||||
xfss.ExtentAllocation, err = extentAllocationStats(us)
|
|
||||||
case fieldAbt:
|
|
||||||
xfss.AllocationBTree, err = btreeStats(us)
|
|
||||||
case fieldBlkMap:
|
|
||||||
xfss.BlockMapping, err = blockMappingStats(us)
|
|
||||||
case fieldBmbt:
|
|
||||||
xfss.BlockMapBTree, err = btreeStats(us)
|
|
||||||
case fieldDir:
|
|
||||||
xfss.DirectoryOperation, err = directoryOperationStats(us)
|
|
||||||
case fieldTrans:
|
|
||||||
xfss.Transaction, err = transactionStats(us)
|
|
||||||
case fieldIg:
|
|
||||||
xfss.InodeOperation, err = inodeOperationStats(us)
|
|
||||||
case fieldLog:
|
|
||||||
xfss.LogOperation, err = logOperationStats(us)
|
|
||||||
case fieldRw:
|
|
||||||
xfss.ReadWrite, err = readWriteStats(us)
|
|
||||||
case fieldAttr:
|
|
||||||
xfss.AttributeOperation, err = attributeOperationStats(us)
|
|
||||||
case fieldIcluster:
|
|
||||||
xfss.InodeClustering, err = inodeClusteringStats(us)
|
|
||||||
case fieldVnodes:
|
|
||||||
xfss.Vnode, err = vnodeStats(us)
|
|
||||||
case fieldBuf:
|
|
||||||
xfss.Buffer, err = bufferStats(us)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &xfss, s.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
|
|
||||||
func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
|
|
||||||
if l := len(us); l != 4 {
|
|
||||||
return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ExtentAllocationStats{
|
|
||||||
ExtentsAllocated: us[0],
|
|
||||||
BlocksAllocated: us[1],
|
|
||||||
ExtentsFreed: us[2],
|
|
||||||
BlocksFreed: us[3],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// btreeStats builds a BTreeStats from a slice of uint32s.
|
|
||||||
func btreeStats(us []uint32) (BTreeStats, error) {
|
|
||||||
if l := len(us); l != 4 {
|
|
||||||
return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return BTreeStats{
|
|
||||||
Lookups: us[0],
|
|
||||||
Compares: us[1],
|
|
||||||
RecordsInserted: us[2],
|
|
||||||
RecordsDeleted: us[3],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
|
|
||||||
func blockMappingStats(us []uint32) (BlockMappingStats, error) {
|
|
||||||
if l := len(us); l != 7 {
|
|
||||||
return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return BlockMappingStats{
|
|
||||||
Reads: us[0],
|
|
||||||
Writes: us[1],
|
|
||||||
Unmaps: us[2],
|
|
||||||
ExtentListInsertions: us[3],
|
|
||||||
ExtentListDeletions: us[4],
|
|
||||||
ExtentListLookups: us[5],
|
|
||||||
ExtentListCompares: us[6],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
|
|
||||||
func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
|
|
||||||
if l := len(us); l != 4 {
|
|
||||||
return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return DirectoryOperationStats{
|
|
||||||
Lookups: us[0],
|
|
||||||
Creates: us[1],
|
|
||||||
Removes: us[2],
|
|
||||||
Getdents: us[3],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TransactionStats builds a TransactionStats from a slice of uint32s.
|
|
||||||
func transactionStats(us []uint32) (TransactionStats, error) {
|
|
||||||
if l := len(us); l != 3 {
|
|
||||||
return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return TransactionStats{
|
|
||||||
Sync: us[0],
|
|
||||||
Async: us[1],
|
|
||||||
Empty: us[2],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
|
|
||||||
func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
|
|
||||||
if l := len(us); l != 7 {
|
|
||||||
return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return InodeOperationStats{
|
|
||||||
Attempts: us[0],
|
|
||||||
Found: us[1],
|
|
||||||
Recycle: us[2],
|
|
||||||
Missed: us[3],
|
|
||||||
Duplicate: us[4],
|
|
||||||
Reclaims: us[5],
|
|
||||||
AttributeChange: us[6],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogOperationStats builds a LogOperationStats from a slice of uint32s.
|
|
||||||
func logOperationStats(us []uint32) (LogOperationStats, error) {
|
|
||||||
if l := len(us); l != 5 {
|
|
||||||
return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return LogOperationStats{
|
|
||||||
Writes: us[0],
|
|
||||||
Blocks: us[1],
|
|
||||||
NoInternalBuffers: us[2],
|
|
||||||
Force: us[3],
|
|
||||||
ForceSleep: us[4],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
|
|
||||||
func readWriteStats(us []uint32) (ReadWriteStats, error) {
|
|
||||||
if l := len(us); l != 2 {
|
|
||||||
return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ReadWriteStats{
|
|
||||||
Read: us[0],
|
|
||||||
Write: us[1],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
|
|
||||||
func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
|
|
||||||
if l := len(us); l != 4 {
|
|
||||||
return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return AttributeOperationStats{
|
|
||||||
Get: us[0],
|
|
||||||
Set: us[1],
|
|
||||||
Remove: us[2],
|
|
||||||
List: us[3],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
|
|
||||||
func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
|
|
||||||
if l := len(us); l != 3 {
|
|
||||||
return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return InodeClusteringStats{
|
|
||||||
Iflush: us[0],
|
|
||||||
Flush: us[1],
|
|
||||||
FlushInode: us[2],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// VnodeStats builds a VnodeStats from a slice of uint32s.
|
|
||||||
func vnodeStats(us []uint32) (VnodeStats, error) {
|
|
||||||
// The attribute "Free" appears to not be available on older XFS
|
|
||||||
// stats versions. Therefore, 7 or 8 elements may appear in
|
|
||||||
// this slice.
|
|
||||||
l := len(us)
|
|
||||||
if l != 7 && l != 8 {
|
|
||||||
return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
s := VnodeStats{
|
|
||||||
Active: us[0],
|
|
||||||
Allocate: us[1],
|
|
||||||
Get: us[2],
|
|
||||||
Hold: us[3],
|
|
||||||
Release: us[4],
|
|
||||||
Reclaim: us[5],
|
|
||||||
Remove: us[6],
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip adding free, unless it is present. The zero value will
|
|
||||||
// be used in place of an actual count.
|
|
||||||
if l == 7 {
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Free = us[7]
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BufferStats builds a BufferStats from a slice of uint32s.
|
|
||||||
func bufferStats(us []uint32) (BufferStats, error) {
|
|
||||||
if l := len(us); l != 9 {
|
|
||||||
return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return BufferStats{
|
|
||||||
Get: us[0],
|
|
||||||
Create: us[1],
|
|
||||||
GetLocked: us[2],
|
|
||||||
GetLockedWaited: us[3],
|
|
||||||
BusyLocked: us[4],
|
|
||||||
MissLocked: us[5],
|
|
||||||
PageRetries: us[6],
|
|
||||||
PageFound: us[7],
|
|
||||||
GetRead: us[8],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
|
|
||||||
func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
|
|
||||||
if l := len(us); l != 3 {
|
|
||||||
return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ExtendedPrecisionStats{
|
|
||||||
FlushBytes: us[0],
|
|
||||||
WriteBytes: us[1],
|
|
||||||
ReadBytes: us[2],
|
|
||||||
}, nil
|
|
||||||
}
|
|
163
vendor/github.com/prometheus/procfs/xfs/xfs.go
generated
vendored
163
vendor/github.com/prometheus/procfs/xfs/xfs.go
generated
vendored
|
@ -1,163 +0,0 @@
|
||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package xfs provides access to statistics exposed by the XFS filesystem.
|
|
||||||
package xfs
|
|
||||||
|
|
||||||
// Stats contains XFS filesystem runtime statistics, parsed from
|
|
||||||
// /proc/fs/xfs/stat.
|
|
||||||
//
|
|
||||||
// The names and meanings of each statistic were taken from
|
|
||||||
// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
|
|
||||||
// kernel source. Most counters are uint32s (same data types used in
|
|
||||||
// xfs_stats.h), but some of the "extended precision stats" are uint64s.
|
|
||||||
type Stats struct {
|
|
||||||
// The name of the filesystem used to source these statistics.
|
|
||||||
// If empty, this indicates aggregated statistics for all XFS
|
|
||||||
// filesystems on the host.
|
|
||||||
Name string
|
|
||||||
|
|
||||||
ExtentAllocation ExtentAllocationStats
|
|
||||||
AllocationBTree BTreeStats
|
|
||||||
BlockMapping BlockMappingStats
|
|
||||||
BlockMapBTree BTreeStats
|
|
||||||
DirectoryOperation DirectoryOperationStats
|
|
||||||
Transaction TransactionStats
|
|
||||||
InodeOperation InodeOperationStats
|
|
||||||
LogOperation LogOperationStats
|
|
||||||
ReadWrite ReadWriteStats
|
|
||||||
AttributeOperation AttributeOperationStats
|
|
||||||
InodeClustering InodeClusteringStats
|
|
||||||
Vnode VnodeStats
|
|
||||||
Buffer BufferStats
|
|
||||||
ExtendedPrecision ExtendedPrecisionStats
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtentAllocationStats contains statistics regarding XFS extent allocations.
|
|
||||||
type ExtentAllocationStats struct {
|
|
||||||
ExtentsAllocated uint32
|
|
||||||
BlocksAllocated uint32
|
|
||||||
ExtentsFreed uint32
|
|
||||||
BlocksFreed uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// BTreeStats contains statistics regarding an XFS internal B-tree.
|
|
||||||
type BTreeStats struct {
|
|
||||||
Lookups uint32
|
|
||||||
Compares uint32
|
|
||||||
RecordsInserted uint32
|
|
||||||
RecordsDeleted uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlockMappingStats contains statistics regarding XFS block maps.
|
|
||||||
type BlockMappingStats struct {
|
|
||||||
Reads uint32
|
|
||||||
Writes uint32
|
|
||||||
Unmaps uint32
|
|
||||||
ExtentListInsertions uint32
|
|
||||||
ExtentListDeletions uint32
|
|
||||||
ExtentListLookups uint32
|
|
||||||
ExtentListCompares uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectoryOperationStats contains statistics regarding XFS directory entries.
|
|
||||||
type DirectoryOperationStats struct {
|
|
||||||
Lookups uint32
|
|
||||||
Creates uint32
|
|
||||||
Removes uint32
|
|
||||||
Getdents uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// TransactionStats contains statistics regarding XFS metadata transactions.
|
|
||||||
type TransactionStats struct {
|
|
||||||
Sync uint32
|
|
||||||
Async uint32
|
|
||||||
Empty uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// InodeOperationStats contains statistics regarding XFS inode operations.
|
|
||||||
type InodeOperationStats struct {
|
|
||||||
Attempts uint32
|
|
||||||
Found uint32
|
|
||||||
Recycle uint32
|
|
||||||
Missed uint32
|
|
||||||
Duplicate uint32
|
|
||||||
Reclaims uint32
|
|
||||||
AttributeChange uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogOperationStats contains statistics regarding the XFS log buffer.
|
|
||||||
type LogOperationStats struct {
|
|
||||||
Writes uint32
|
|
||||||
Blocks uint32
|
|
||||||
NoInternalBuffers uint32
|
|
||||||
Force uint32
|
|
||||||
ForceSleep uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadWriteStats contains statistics regarding the number of read and write
|
|
||||||
// system calls for XFS filesystems.
|
|
||||||
type ReadWriteStats struct {
|
|
||||||
Read uint32
|
|
||||||
Write uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// AttributeOperationStats contains statistics regarding manipulation of
|
|
||||||
// XFS extended file attributes.
|
|
||||||
type AttributeOperationStats struct {
|
|
||||||
Get uint32
|
|
||||||
Set uint32
|
|
||||||
Remove uint32
|
|
||||||
List uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// InodeClusteringStats contains statistics regarding XFS inode clustering
|
|
||||||
// operations.
|
|
||||||
type InodeClusteringStats struct {
|
|
||||||
Iflush uint32
|
|
||||||
Flush uint32
|
|
||||||
FlushInode uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// VnodeStats contains statistics regarding XFS vnode operations.
|
|
||||||
type VnodeStats struct {
|
|
||||||
Active uint32
|
|
||||||
Allocate uint32
|
|
||||||
Get uint32
|
|
||||||
Hold uint32
|
|
||||||
Release uint32
|
|
||||||
Reclaim uint32
|
|
||||||
Remove uint32
|
|
||||||
Free uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// BufferStats contains statistics regarding XFS read/write I/O buffers.
|
|
||||||
type BufferStats struct {
|
|
||||||
Get uint32
|
|
||||||
Create uint32
|
|
||||||
GetLocked uint32
|
|
||||||
GetLockedWaited uint32
|
|
||||||
BusyLocked uint32
|
|
||||||
MissLocked uint32
|
|
||||||
PageRetries uint32
|
|
||||||
PageFound uint32
|
|
||||||
GetRead uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtendedPrecisionStats contains high precision counters used to track the
|
|
||||||
// total number of bytes read, written, or flushed, during XFS operations.
|
|
||||||
type ExtendedPrecisionStats struct {
|
|
||||||
FlushBytes uint64
|
|
||||||
WriteBytes uint64
|
|
||||||
ReadBytes uint64
|
|
||||||
}
|
|
1
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
1
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
|
@ -1 +1,2 @@
|
||||||
logrus
|
logrus
|
||||||
|
vendor
|
||||||
|
|
60
vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
60
vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
|
@ -1,15 +1,51 @@
|
||||||
language: go
|
language: go
|
||||||
go:
|
|
||||||
- 1.6.x
|
|
||||||
- 1.7.x
|
|
||||||
- 1.8.x
|
|
||||||
- tip
|
|
||||||
env:
|
env:
|
||||||
- GOMAXPROCS=4 GORACE=halt_on_error=1
|
- GOMAXPROCS=4 GORACE=halt_on_error=1
|
||||||
install:
|
matrix:
|
||||||
- go get github.com/stretchr/testify/assert
|
include:
|
||||||
- go get gopkg.in/gemnasium/logrus-airbrake-hook.v2
|
- go: 1.10.x
|
||||||
- go get golang.org/x/sys/unix
|
install:
|
||||||
- go get golang.org/x/sys/windows
|
- go get github.com/stretchr/testify/assert
|
||||||
script:
|
- go get golang.org/x/crypto/ssh/terminal
|
||||||
- go test -race -v ./...
|
- go get golang.org/x/sys/unix
|
||||||
|
- go get golang.org/x/sys/windows
|
||||||
|
script:
|
||||||
|
- go test -race -v ./...
|
||||||
|
- go: 1.11.x
|
||||||
|
env: GO111MODULE=on
|
||||||
|
install:
|
||||||
|
- go mod download
|
||||||
|
script:
|
||||||
|
- go test -race -v ./...
|
||||||
|
- go: 1.11.x
|
||||||
|
env: GO111MODULE=off
|
||||||
|
install:
|
||||||
|
- go get github.com/stretchr/testify/assert
|
||||||
|
- go get golang.org/x/crypto/ssh/terminal
|
||||||
|
- go get golang.org/x/sys/unix
|
||||||
|
- go get golang.org/x/sys/windows
|
||||||
|
script:
|
||||||
|
- go test -race -v ./...
|
||||||
|
- go: 1.10.x
|
||||||
|
install:
|
||||||
|
- go get github.com/stretchr/testify/assert
|
||||||
|
- go get golang.org/x/crypto/ssh/terminal
|
||||||
|
- go get golang.org/x/sys/unix
|
||||||
|
- go get golang.org/x/sys/windows
|
||||||
|
script:
|
||||||
|
- go test -race -v -tags appengine ./...
|
||||||
|
- go: 1.11.x
|
||||||
|
env: GO111MODULE=on
|
||||||
|
install:
|
||||||
|
- go mod download
|
||||||
|
script:
|
||||||
|
- go test -race -v -tags appengine ./...
|
||||||
|
- go: 1.11.x
|
||||||
|
env: GO111MODULE=off
|
||||||
|
install:
|
||||||
|
- go get github.com/stretchr/testify/assert
|
||||||
|
- go get golang.org/x/crypto/ssh/terminal
|
||||||
|
- go get golang.org/x/sys/unix
|
||||||
|
- go get golang.org/x/sys/windows
|
||||||
|
script:
|
||||||
|
- go test -race -v -tags appengine ./...
|
||||||
|
|
52
vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
52
vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,55 @@
|
||||||
|
# 1.2.0
|
||||||
|
This new release introduces:
|
||||||
|
* A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
|
||||||
|
* A new trace level named `Trace` whose level is below `Debug`
|
||||||
|
* A configurable exit function to be called upon a Fatal trace
|
||||||
|
* The `Level` object now implements `encoding.TextUnmarshaler` interface
|
||||||
|
|
||||||
|
# 1.1.1
|
||||||
|
This is a bug fix release.
|
||||||
|
* fix the build break on Solaris
|
||||||
|
* don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
|
||||||
|
|
||||||
|
# 1.1.0
|
||||||
|
This new release introduces:
|
||||||
|
* several fixes:
|
||||||
|
* a fix for a race condition on entry formatting
|
||||||
|
* proper cleanup of previously used entries before putting them back in the pool
|
||||||
|
* the extra new line at the end of message in text formatter has been removed
|
||||||
|
* a new global public API to check if a level is activated: IsLevelEnabled
|
||||||
|
* the following methods have been added to the Logger object
|
||||||
|
* IsLevelEnabled
|
||||||
|
* SetFormatter
|
||||||
|
* SetOutput
|
||||||
|
* ReplaceHooks
|
||||||
|
* introduction of go module
|
||||||
|
* an indent configuration for the json formatter
|
||||||
|
* output colour support for windows
|
||||||
|
* the field sort function is now configurable for text formatter
|
||||||
|
* the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
|
||||||
|
|
||||||
|
# 1.0.6
|
||||||
|
|
||||||
|
This new release introduces:
|
||||||
|
* a new api WithTime which allows to easily force the time of the log entry
|
||||||
|
which is mostly useful for logger wrapper
|
||||||
|
* a fix reverting the immutability of the entry given as parameter to the hooks
|
||||||
|
a new configuration field of the json formatter in order to put all the fields
|
||||||
|
in a nested dictionnary
|
||||||
|
* a new SetOutput method in the Logger
|
||||||
|
* a new configuration of the textformatter to configure the name of the default keys
|
||||||
|
* a new configuration of the text formatter to disable the level truncation
|
||||||
|
|
||||||
|
# 1.0.5
|
||||||
|
|
||||||
|
* Fix hooks race (#707)
|
||||||
|
* Fix panic deadlock (#695)
|
||||||
|
|
||||||
|
# 1.0.4
|
||||||
|
|
||||||
|
* Fix race when adding hooks (#612)
|
||||||
|
* Fix terminal check in AppEngine (#635)
|
||||||
|
|
||||||
# 1.0.3
|
# 1.0.3
|
||||||
|
|
||||||
* Replace example files with testable examples
|
* Replace example files with testable examples
|
||||||
|
|
94
vendor/github.com/sirupsen/logrus/README.md
generated
vendored
94
vendor/github.com/sirupsen/logrus/README.md
generated
vendored
|
@ -56,8 +56,39 @@ time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased
|
||||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||||
exit status 1
|
|
||||||
```
|
```
|
||||||
|
To ensure this behaviour even if a TTY is attached, set your formatter as follows:
|
||||||
|
|
||||||
|
```go
|
||||||
|
log.SetFormatter(&log.TextFormatter{
|
||||||
|
DisableColors: true,
|
||||||
|
FullTimestamp: true,
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Logging Method Name
|
||||||
|
|
||||||
|
If you wish to add the calling method as a field, instruct the logger via:
|
||||||
|
```go
|
||||||
|
log.SetReportCaller(true)
|
||||||
|
```
|
||||||
|
This adds the caller as 'method' like so:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
|
||||||
|
"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
|
||||||
|
```
|
||||||
|
Note that this does add measurable overhead - the cost will depend on the version of Go, but is
|
||||||
|
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
|
||||||
|
environment via benchmarks:
|
||||||
|
```
|
||||||
|
go test -bench=.*CallerTracing
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
#### Case-sensitivity
|
#### Case-sensitivity
|
||||||
|
|
||||||
|
@ -220,7 +251,7 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||||
```go
|
```go
|
||||||
import (
|
import (
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
|
||||||
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||||
"log/syslog"
|
"log/syslog"
|
||||||
)
|
)
|
||||||
|
@ -241,60 +272,15 @@ func init() {
|
||||||
```
|
```
|
||||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||||
|
|
||||||
| Hook | Description |
|
A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
|
||||||
| ----- | ----------- |
|
|
||||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
|
||||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
|
||||||
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
|
|
||||||
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
|
||||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
|
||||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
|
||||||
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
|
|
||||||
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
|
||||||
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
|
|
||||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
|
||||||
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
|
|
||||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
|
||||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
|
||||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
|
||||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
|
||||||
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
|
|
||||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
|
||||||
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
|
||||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
|
||||||
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
|
||||||
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
|
||||||
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
|
|
||||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
|
||||||
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
|
||||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
|
||||||
| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
|
|
||||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
|
||||||
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
|
|
||||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
|
||||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
|
||||||
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
|
|
||||||
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
|
|
||||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
|
||||||
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
|
||||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
|
||||||
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
|
|
||||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
|
||||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
|
||||||
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
|
|
||||||
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
|
||||||
| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
|
||||||
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
|
|
||||||
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
|
|
||||||
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
|
||||||
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
|
|
||||||
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
|
|
||||||
|
|
||||||
#### Level logging
|
#### Level logging
|
||||||
|
|
||||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
log.Trace("Something very low level.")
|
||||||
log.Debug("Useful debugging information.")
|
log.Debug("Useful debugging information.")
|
||||||
log.Info("Something noteworthy happened!")
|
log.Info("Something noteworthy happened!")
|
||||||
log.Warn("You should probably take a look at this.")
|
log.Warn("You should probably take a look at this.")
|
||||||
|
@ -366,13 +352,15 @@ The built-in logging formatters are:
|
||||||
field to `true`. To force no colored output even if there is a TTY set the
|
field to `true`. To force no colored output even if there is a TTY set the
|
||||||
`DisableColors` field to `true`. For Windows, see
|
`DisableColors` field to `true`. For Windows, see
|
||||||
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
|
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
|
||||||
|
* When colors are enabled, levels are truncated to 4 characters by default. To disable
|
||||||
|
truncation set the `DisableLevelTruncation` field to `true`.
|
||||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
||||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
||||||
|
|
||||||
Third party logging formatters:
|
Third party logging formatters:
|
||||||
|
|
||||||
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can by parsed by Kubernetes and Google Container Engine.
|
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
|
||||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||||
|
@ -489,7 +477,7 @@ logrus.RegisterExitHandler(handler)
|
||||||
|
|
||||||
#### Thread safety
|
#### Thread safety
|
||||||
|
|
||||||
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
|
By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
|
||||||
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
||||||
|
|
||||||
Situation when locking is not needed includes:
|
Situation when locking is not needed includes:
|
||||||
|
|
234
vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
234
vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
|
@ -4,11 +4,30 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var bufferPool *sync.Pool
|
var (
|
||||||
|
bufferPool *sync.Pool
|
||||||
|
|
||||||
|
// qualified package name, cached at first use
|
||||||
|
logrusPackage string
|
||||||
|
|
||||||
|
// Positions in the call stack when tracing to report the calling method
|
||||||
|
minimumCallerDepth int
|
||||||
|
|
||||||
|
// Used for caller information initialisation
|
||||||
|
callerInitOnce sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maximumCallerDepth int = 25
|
||||||
|
knownLogrusFrames int = 4
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
bufferPool = &sync.Pool{
|
bufferPool = &sync.Pool{
|
||||||
|
@ -16,15 +35,18 @@ func init() {
|
||||||
return new(bytes.Buffer)
|
return new(bytes.Buffer)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// start at the bottom of the stack before the package-name cache is primed
|
||||||
|
minimumCallerDepth = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Defines the key when adding errors using WithError.
|
// Defines the key when adding errors using WithError.
|
||||||
var ErrorKey = "error"
|
var ErrorKey = "error"
|
||||||
|
|
||||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
|
||||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
|
||||||
// passed around as much as you wish to avoid field duplication.
|
// reused and passed around as much as you wish to avoid field duplication.
|
||||||
type Entry struct {
|
type Entry struct {
|
||||||
Logger *Logger
|
Logger *Logger
|
||||||
|
|
||||||
|
@ -34,22 +56,28 @@ type Entry struct {
|
||||||
// Time at which the log entry was created
|
// Time at which the log entry was created
|
||||||
Time time.Time
|
Time time.Time
|
||||||
|
|
||||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
// Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
|
||||||
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
|
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
|
||||||
Level Level
|
Level Level
|
||||||
|
|
||||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
// Calling method, with package name
|
||||||
|
Caller *runtime.Frame
|
||||||
|
|
||||||
|
// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
|
||||||
Message string
|
Message string
|
||||||
|
|
||||||
// When formatter is called in entry.log(), an Buffer may be set to entry
|
// When formatter is called in entry.log(), a Buffer may be set to entry
|
||||||
Buffer *bytes.Buffer
|
Buffer *bytes.Buffer
|
||||||
|
|
||||||
|
// err may contain a field formatting error
|
||||||
|
err string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEntry(logger *Logger) *Entry {
|
func NewEntry(logger *Logger) *Entry {
|
||||||
return &Entry{
|
return &Entry{
|
||||||
Logger: logger,
|
Logger: logger,
|
||||||
// Default is three fields, give a little extra room
|
// Default is three fields, plus one optional. Give a little extra room.
|
||||||
Data: make(Fields, 5),
|
Data: make(Fields, 6),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,43 +108,106 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||||
for k, v := range entry.Data {
|
for k, v := range entry.Data {
|
||||||
data[k] = v
|
data[k] = v
|
||||||
}
|
}
|
||||||
|
var field_err string
|
||||||
for k, v := range fields {
|
for k, v := range fields {
|
||||||
data[k] = v
|
if t := reflect.TypeOf(v); t != nil && t.Kind() == reflect.Func {
|
||||||
|
field_err = fmt.Sprintf("can not add field %q", k)
|
||||||
|
if entry.err != "" {
|
||||||
|
field_err = entry.err + ", " + field_err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return &Entry{Logger: entry.Logger, Data: data}
|
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: field_err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overrides the time of the Entry.
|
||||||
|
func (entry *Entry) WithTime(t time.Time) *Entry {
|
||||||
|
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPackageName reduces a fully qualified function name to the package name
|
||||||
|
// There really ought to be to be a better way...
|
||||||
|
func getPackageName(f string) string {
|
||||||
|
for {
|
||||||
|
lastPeriod := strings.LastIndex(f, ".")
|
||||||
|
lastSlash := strings.LastIndex(f, "/")
|
||||||
|
if lastPeriod > lastSlash {
|
||||||
|
f = f[:lastPeriod]
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCaller retrieves the name of the first non-logrus calling function
|
||||||
|
func getCaller() *runtime.Frame {
|
||||||
|
// Restrict the lookback frames to avoid runaway lookups
|
||||||
|
pcs := make([]uintptr, maximumCallerDepth)
|
||||||
|
depth := runtime.Callers(minimumCallerDepth, pcs)
|
||||||
|
frames := runtime.CallersFrames(pcs[:depth])
|
||||||
|
|
||||||
|
// cache this package's fully-qualified name
|
||||||
|
callerInitOnce.Do(func() {
|
||||||
|
logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name())
|
||||||
|
|
||||||
|
// now that we have the cache, we can skip a minimum count of known-logrus functions
|
||||||
|
// XXX this is dubious, the number of frames may vary store an entry in a logger interface
|
||||||
|
minimumCallerDepth = knownLogrusFrames
|
||||||
|
})
|
||||||
|
|
||||||
|
for f, again := frames.Next(); again; f, again = frames.Next() {
|
||||||
|
pkg := getPackageName(f.Function)
|
||||||
|
|
||||||
|
// If the caller isn't part of this package, we're done
|
||||||
|
if pkg != logrusPackage {
|
||||||
|
return &f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we got here, we failed to find the caller's context
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry Entry) HasCaller() (has bool) {
|
||||||
|
return entry.Logger != nil &&
|
||||||
|
entry.Logger.ReportCaller &&
|
||||||
|
entry.Caller != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function is not declared with a pointer value because otherwise
|
// This function is not declared with a pointer value because otherwise
|
||||||
// race conditions will occur when using multiple goroutines
|
// race conditions will occur when using multiple goroutines
|
||||||
func (entry Entry) log(level Level, msg string) {
|
func (entry Entry) log(level Level, msg string) {
|
||||||
var buffer *bytes.Buffer
|
var buffer *bytes.Buffer
|
||||||
entry.Time = time.Now()
|
|
||||||
|
// Default to now, but allow users to override if they want.
|
||||||
|
//
|
||||||
|
// We don't have to worry about polluting future calls to Entry#log()
|
||||||
|
// with this assignment because this function is declared with a
|
||||||
|
// non-pointer receiver.
|
||||||
|
if entry.Time.IsZero() {
|
||||||
|
entry.Time = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
entry.Level = level
|
entry.Level = level
|
||||||
entry.Message = msg
|
entry.Message = msg
|
||||||
|
if entry.Logger.ReportCaller {
|
||||||
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
|
entry.Caller = getCaller()
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
entry.fireHooks()
|
||||||
|
|
||||||
buffer = bufferPool.Get().(*bytes.Buffer)
|
buffer = bufferPool.Get().(*bytes.Buffer)
|
||||||
buffer.Reset()
|
buffer.Reset()
|
||||||
defer bufferPool.Put(buffer)
|
defer bufferPool.Put(buffer)
|
||||||
entry.Buffer = buffer
|
entry.Buffer = buffer
|
||||||
serialized, err := entry.Logger.Formatter.Format(&entry)
|
|
||||||
|
entry.write()
|
||||||
|
|
||||||
entry.Buffer = nil
|
entry.Buffer = nil
|
||||||
if err != nil {
|
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
} else {
|
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
_, err = entry.Logger.Out.Write(serialized)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
|
||||||
}
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// To avoid Entry#log() returning a value that only would make sense for
|
// To avoid Entry#log() returning a value that only would make sense for
|
||||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||||
|
@ -126,8 +217,37 @@ func (entry Entry) log(level Level, msg string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) fireHooks() {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
defer entry.Logger.mu.Unlock()
|
||||||
|
err := entry.Logger.Hooks.Fire(entry.Level, entry)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) write() {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
defer entry.Logger.mu.Unlock()
|
||||||
|
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||||
|
} else {
|
||||||
|
_, err = entry.Logger.Out.Write(serialized)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Trace(args ...interface{}) {
|
||||||
|
if entry.Logger.IsLevelEnabled(TraceLevel) {
|
||||||
|
entry.log(TraceLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (entry *Entry) Debug(args ...interface{}) {
|
func (entry *Entry) Debug(args ...interface{}) {
|
||||||
if entry.Logger.level() >= DebugLevel {
|
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -137,13 +257,13 @@ func (entry *Entry) Print(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Info(args ...interface{}) {
|
func (entry *Entry) Info(args ...interface{}) {
|
||||||
if entry.Logger.level() >= InfoLevel {
|
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warn(args ...interface{}) {
|
func (entry *Entry) Warn(args ...interface{}) {
|
||||||
if entry.Logger.level() >= WarnLevel {
|
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -153,20 +273,20 @@ func (entry *Entry) Warning(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Error(args ...interface{}) {
|
func (entry *Entry) Error(args ...interface{}) {
|
||||||
if entry.Logger.level() >= ErrorLevel {
|
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Fatal(args ...interface{}) {
|
func (entry *Entry) Fatal(args ...interface{}) {
|
||||||
if entry.Logger.level() >= FatalLevel {
|
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
Exit(1)
|
entry.Logger.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panic(args ...interface{}) {
|
func (entry *Entry) Panic(args ...interface{}) {
|
||||||
if entry.Logger.level() >= PanicLevel {
|
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
panic(fmt.Sprint(args...))
|
panic(fmt.Sprint(args...))
|
||||||
|
@ -174,14 +294,20 @@ func (entry *Entry) Panic(args ...interface{}) {
|
||||||
|
|
||||||
// Entry Printf family functions
|
// Entry Printf family functions
|
||||||
|
|
||||||
|
func (entry *Entry) Tracef(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.IsLevelEnabled(TraceLevel) {
|
||||||
|
entry.Trace(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||||
if entry.Logger.level() >= DebugLevel {
|
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||||
entry.Debug(fmt.Sprintf(format, args...))
|
entry.Debug(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||||
if entry.Logger.level() >= InfoLevel {
|
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||||
entry.Info(fmt.Sprintf(format, args...))
|
entry.Info(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -191,7 +317,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||||
if entry.Logger.level() >= WarnLevel {
|
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry.Warn(fmt.Sprintf(format, args...))
|
entry.Warn(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -201,34 +327,40 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||||
if entry.Logger.level() >= ErrorLevel {
|
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||||
entry.Error(fmt.Sprintf(format, args...))
|
entry.Error(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||||
if entry.Logger.level() >= FatalLevel {
|
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||||
entry.Fatal(fmt.Sprintf(format, args...))
|
entry.Fatal(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
Exit(1)
|
entry.Logger.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||||
if entry.Logger.level() >= PanicLevel {
|
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||||
entry.Panic(fmt.Sprintf(format, args...))
|
entry.Panic(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Entry Println family functions
|
// Entry Println family functions
|
||||||
|
|
||||||
|
func (entry *Entry) Traceln(args ...interface{}) {
|
||||||
|
if entry.Logger.IsLevelEnabled(TraceLevel) {
|
||||||
|
entry.Trace(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (entry *Entry) Debugln(args ...interface{}) {
|
func (entry *Entry) Debugln(args ...interface{}) {
|
||||||
if entry.Logger.level() >= DebugLevel {
|
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||||
entry.Debug(entry.sprintlnn(args...))
|
entry.Debug(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Infoln(args ...interface{}) {
|
func (entry *Entry) Infoln(args ...interface{}) {
|
||||||
if entry.Logger.level() >= InfoLevel {
|
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||||
entry.Info(entry.sprintlnn(args...))
|
entry.Info(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -238,7 +370,7 @@ func (entry *Entry) Println(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warnln(args ...interface{}) {
|
func (entry *Entry) Warnln(args ...interface{}) {
|
||||||
if entry.Logger.level() >= WarnLevel {
|
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry.Warn(entry.sprintlnn(args...))
|
entry.Warn(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -248,20 +380,20 @@ func (entry *Entry) Warningln(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Errorln(args ...interface{}) {
|
func (entry *Entry) Errorln(args ...interface{}) {
|
||||||
if entry.Logger.level() >= ErrorLevel {
|
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||||
entry.Error(entry.sprintlnn(args...))
|
entry.Error(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||||
if entry.Logger.level() >= FatalLevel {
|
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||||
entry.Fatal(entry.sprintlnn(args...))
|
entry.Fatal(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
Exit(1)
|
entry.Logger.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panicln(args ...interface{}) {
|
func (entry *Entry) Panicln(args ...interface{}) {
|
||||||
if entry.Logger.level() >= PanicLevel {
|
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||||
entry.Panic(entry.sprintlnn(args...))
|
entry.Panic(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
60
vendor/github.com/sirupsen/logrus/exported.go
generated
vendored
60
vendor/github.com/sirupsen/logrus/exported.go
generated
vendored
|
@ -2,6 +2,7 @@ package logrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -15,37 +16,38 @@ func StandardLogger() *Logger {
|
||||||
|
|
||||||
// SetOutput sets the standard logger output.
|
// SetOutput sets the standard logger output.
|
||||||
func SetOutput(out io.Writer) {
|
func SetOutput(out io.Writer) {
|
||||||
std.mu.Lock()
|
std.SetOutput(out)
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Out = out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetFormatter sets the standard logger formatter.
|
// SetFormatter sets the standard logger formatter.
|
||||||
func SetFormatter(formatter Formatter) {
|
func SetFormatter(formatter Formatter) {
|
||||||
std.mu.Lock()
|
std.SetFormatter(formatter)
|
||||||
defer std.mu.Unlock()
|
}
|
||||||
std.Formatter = formatter
|
|
||||||
|
// SetReportCaller sets whether the standard logger will include the calling
|
||||||
|
// method as a field.
|
||||||
|
func SetReportCaller(include bool) {
|
||||||
|
std.SetReportCaller(include)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLevel sets the standard logger level.
|
// SetLevel sets the standard logger level.
|
||||||
func SetLevel(level Level) {
|
func SetLevel(level Level) {
|
||||||
std.mu.Lock()
|
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.SetLevel(level)
|
std.SetLevel(level)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLevel returns the standard logger level.
|
// GetLevel returns the standard logger level.
|
||||||
func GetLevel() Level {
|
func GetLevel() Level {
|
||||||
std.mu.Lock()
|
return std.GetLevel()
|
||||||
defer std.mu.Unlock()
|
}
|
||||||
return std.level()
|
|
||||||
|
// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
|
||||||
|
func IsLevelEnabled(level Level) bool {
|
||||||
|
return std.IsLevelEnabled(level)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddHook adds a hook to the standard logger hooks.
|
// AddHook adds a hook to the standard logger hooks.
|
||||||
func AddHook(hook Hook) {
|
func AddHook(hook Hook) {
|
||||||
std.mu.Lock()
|
std.AddHook(hook)
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Hooks.Add(hook)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||||
|
@ -72,6 +74,20 @@ func WithFields(fields Fields) *Entry {
|
||||||
return std.WithFields(fields)
|
return std.WithFields(fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithTime creats an entry from the standard logger and overrides the time of
|
||||||
|
// logs generated with it.
|
||||||
|
//
|
||||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||||
|
// or Panic on the Entry it returns.
|
||||||
|
func WithTime(t time.Time) *Entry {
|
||||||
|
return std.WithTime(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trace logs a message at level Trace on the standard logger.
|
||||||
|
func Trace(args ...interface{}) {
|
||||||
|
std.Trace(args...)
|
||||||
|
}
|
||||||
|
|
||||||
// Debug logs a message at level Debug on the standard logger.
|
// Debug logs a message at level Debug on the standard logger.
|
||||||
func Debug(args ...interface{}) {
|
func Debug(args ...interface{}) {
|
||||||
std.Debug(args...)
|
std.Debug(args...)
|
||||||
|
@ -107,11 +123,16 @@ func Panic(args ...interface{}) {
|
||||||
std.Panic(args...)
|
std.Panic(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal logs a message at level Fatal on the standard logger.
|
// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||||
func Fatal(args ...interface{}) {
|
func Fatal(args ...interface{}) {
|
||||||
std.Fatal(args...)
|
std.Fatal(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tracef logs a message at level Trace on the standard logger.
|
||||||
|
func Tracef(format string, args ...interface{}) {
|
||||||
|
std.Tracef(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
// Debugf logs a message at level Debug on the standard logger.
|
// Debugf logs a message at level Debug on the standard logger.
|
||||||
func Debugf(format string, args ...interface{}) {
|
func Debugf(format string, args ...interface{}) {
|
||||||
std.Debugf(format, args...)
|
std.Debugf(format, args...)
|
||||||
|
@ -147,11 +168,16 @@ func Panicf(format string, args ...interface{}) {
|
||||||
std.Panicf(format, args...)
|
std.Panicf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatalf logs a message at level Fatal on the standard logger.
|
// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||||
func Fatalf(format string, args ...interface{}) {
|
func Fatalf(format string, args ...interface{}) {
|
||||||
std.Fatalf(format, args...)
|
std.Fatalf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Traceln logs a message at level Trace on the standard logger.
|
||||||
|
func Traceln(args ...interface{}) {
|
||||||
|
std.Traceln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
// Debugln logs a message at level Debug on the standard logger.
|
// Debugln logs a message at level Debug on the standard logger.
|
||||||
func Debugln(args ...interface{}) {
|
func Debugln(args ...interface{}) {
|
||||||
std.Debugln(args...)
|
std.Debugln(args...)
|
||||||
|
@ -187,7 +213,7 @@ func Panicln(args ...interface{}) {
|
||||||
std.Panicln(args...)
|
std.Panicln(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatalln logs a message at level Fatal on the standard logger.
|
// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||||
func Fatalln(args ...interface{}) {
|
func Fatalln(args ...interface{}) {
|
||||||
std.Fatalln(args...)
|
std.Fatalln(args...)
|
||||||
}
|
}
|
||||||
|
|
51
vendor/github.com/sirupsen/logrus/formatter.go
generated
vendored
51
vendor/github.com/sirupsen/logrus/formatter.go
generated
vendored
|
@ -2,7 +2,16 @@ package logrus
|
||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
||||||
const defaultTimestampFormat = time.RFC3339
|
// Default key names for the default fields
|
||||||
|
const (
|
||||||
|
defaultTimestampFormat = time.RFC3339
|
||||||
|
FieldKeyMsg = "msg"
|
||||||
|
FieldKeyLevel = "level"
|
||||||
|
FieldKeyTime = "time"
|
||||||
|
FieldKeyLogrusError = "logrus_error"
|
||||||
|
FieldKeyFunc = "func"
|
||||||
|
FieldKeyFile = "file"
|
||||||
|
)
|
||||||
|
|
||||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||||
// `Entry`. It exposes all the fields, including the default ones:
|
// `Entry`. It exposes all the fields, including the default ones:
|
||||||
|
@ -18,7 +27,7 @@ type Formatter interface {
|
||||||
Format(*Entry) ([]byte, error)
|
Format(*Entry) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
|
||||||
// dumping it. If this code wasn't there doing:
|
// dumping it. If this code wasn't there doing:
|
||||||
//
|
//
|
||||||
// logrus.WithField("level", 1).Info("hello")
|
// logrus.WithField("level", 1).Info("hello")
|
||||||
|
@ -30,16 +39,40 @@ type Formatter interface {
|
||||||
//
|
//
|
||||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||||
// avoid code duplication between the two default formatters.
|
// avoid code duplication between the two default formatters.
|
||||||
func prefixFieldClashes(data Fields) {
|
func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
|
||||||
if t, ok := data["time"]; ok {
|
timeKey := fieldMap.resolve(FieldKeyTime)
|
||||||
data["fields.time"] = t
|
if t, ok := data[timeKey]; ok {
|
||||||
|
data["fields."+timeKey] = t
|
||||||
|
delete(data, timeKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
if m, ok := data["msg"]; ok {
|
msgKey := fieldMap.resolve(FieldKeyMsg)
|
||||||
data["fields.msg"] = m
|
if m, ok := data[msgKey]; ok {
|
||||||
|
data["fields."+msgKey] = m
|
||||||
|
delete(data, msgKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l, ok := data["level"]; ok {
|
levelKey := fieldMap.resolve(FieldKeyLevel)
|
||||||
data["fields.level"] = l
|
if l, ok := data[levelKey]; ok {
|
||||||
|
data["fields."+levelKey] = l
|
||||||
|
delete(data, levelKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
|
||||||
|
if l, ok := data[logrusErrKey]; ok {
|
||||||
|
data["fields."+logrusErrKey] = l
|
||||||
|
delete(data, logrusErrKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If reportCaller is not set, 'func' will not conflict.
|
||||||
|
if reportCaller {
|
||||||
|
funcKey := fieldMap.resolve(FieldKeyFunc)
|
||||||
|
if l, ok := data[funcKey]; ok {
|
||||||
|
data["fields."+funcKey] = l
|
||||||
|
}
|
||||||
|
fileKey := fieldMap.resolve(FieldKeyFile)
|
||||||
|
if l, ok := data[fileKey]; ok {
|
||||||
|
data["fields."+fileKey] = l
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
11
vendor/github.com/sirupsen/logrus/go.mod
generated
vendored
Normal file
11
vendor/github.com/sirupsen/logrus/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
module github.com/sirupsen/logrus
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/stretchr/objx v0.1.1 // indirect
|
||||||
|
github.com/stretchr/testify v1.2.2
|
||||||
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33
|
||||||
|
)
|
15
vendor/github.com/sirupsen/logrus/go.sum
generated
vendored
Normal file
15
vendor/github.com/sirupsen/logrus/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
|
||||||
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
54
vendor/github.com/sirupsen/logrus/json_formatter.go
generated
vendored
54
vendor/github.com/sirupsen/logrus/json_formatter.go
generated
vendored
|
@ -1,6 +1,7 @@
|
||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
)
|
)
|
||||||
|
@ -10,13 +11,6 @@ type fieldKey string
|
||||||
// FieldMap allows customization of the key names for default fields.
|
// FieldMap allows customization of the key names for default fields.
|
||||||
type FieldMap map[fieldKey]string
|
type FieldMap map[fieldKey]string
|
||||||
|
|
||||||
// Default key names for the default fields
|
|
||||||
const (
|
|
||||||
FieldKeyMsg = "msg"
|
|
||||||
FieldKeyLevel = "level"
|
|
||||||
FieldKeyTime = "time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (f FieldMap) resolve(key fieldKey) string {
|
func (f FieldMap) resolve(key fieldKey) string {
|
||||||
if k, ok := f[key]; ok {
|
if k, ok := f[key]; ok {
|
||||||
return k
|
return k
|
||||||
|
@ -33,21 +27,28 @@ type JSONFormatter struct {
|
||||||
// DisableTimestamp allows disabling automatic timestamps in output
|
// DisableTimestamp allows disabling automatic timestamps in output
|
||||||
DisableTimestamp bool
|
DisableTimestamp bool
|
||||||
|
|
||||||
|
// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
|
||||||
|
DataKey string
|
||||||
|
|
||||||
// FieldMap allows users to customize the names of keys for default fields.
|
// FieldMap allows users to customize the names of keys for default fields.
|
||||||
// As an example:
|
// As an example:
|
||||||
// formatter := &JSONFormatter{
|
// formatter := &JSONFormatter{
|
||||||
// FieldMap: FieldMap{
|
// FieldMap: FieldMap{
|
||||||
// FieldKeyTime: "@timestamp",
|
// FieldKeyTime: "@timestamp",
|
||||||
// FieldKeyLevel: "@level",
|
// FieldKeyLevel: "@level",
|
||||||
// FieldKeyMsg: "@message",
|
// FieldKeyMsg: "@message",
|
||||||
|
// FieldKeyFunc: "@caller",
|
||||||
// },
|
// },
|
||||||
// }
|
// }
|
||||||
FieldMap FieldMap
|
FieldMap FieldMap
|
||||||
|
|
||||||
|
// PrettyPrint will indent all json logs
|
||||||
|
PrettyPrint bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format renders a single log entry
|
// Format renders a single log entry
|
||||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
data := make(Fields, len(entry.Data)+3)
|
data := make(Fields, len(entry.Data)+4)
|
||||||
for k, v := range entry.Data {
|
for k, v := range entry.Data {
|
||||||
switch v := v.(type) {
|
switch v := v.(type) {
|
||||||
case error:
|
case error:
|
||||||
|
@ -58,22 +59,47 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
data[k] = v
|
data[k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
prefixFieldClashes(data)
|
|
||||||
|
if f.DataKey != "" {
|
||||||
|
newData := make(Fields, 4)
|
||||||
|
newData[f.DataKey] = data
|
||||||
|
data = newData
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
|
||||||
|
|
||||||
timestampFormat := f.TimestampFormat
|
timestampFormat := f.TimestampFormat
|
||||||
if timestampFormat == "" {
|
if timestampFormat == "" {
|
||||||
timestampFormat = defaultTimestampFormat
|
timestampFormat = defaultTimestampFormat
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if entry.err != "" {
|
||||||
|
data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
|
||||||
|
}
|
||||||
if !f.DisableTimestamp {
|
if !f.DisableTimestamp {
|
||||||
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
||||||
}
|
}
|
||||||
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||||
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||||
|
if entry.HasCaller() {
|
||||||
|
data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function
|
||||||
|
data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||||
|
}
|
||||||
|
|
||||||
serialized, err := json.Marshal(data)
|
var b *bytes.Buffer
|
||||||
if err != nil {
|
if entry.Buffer != nil {
|
||||||
|
b = entry.Buffer
|
||||||
|
} else {
|
||||||
|
b = &bytes.Buffer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
encoder := json.NewEncoder(b)
|
||||||
|
if f.PrettyPrint {
|
||||||
|
encoder.SetIndent("", " ")
|
||||||
|
}
|
||||||
|
if err := encoder.Encode(data); err != nil {
|
||||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||||
}
|
}
|
||||||
return append(serialized, '\n'), nil
|
|
||||||
|
return b.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
158
vendor/github.com/sirupsen/logrus/logger.go
generated
vendored
158
vendor/github.com/sirupsen/logrus/logger.go
generated
vendored
|
@ -5,12 +5,13 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Logger struct {
|
type Logger struct {
|
||||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||||
// file, or leave it default which is `os.Stderr`. You can also set this to
|
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||||
// something more adventorous, such as logging to Kafka.
|
// something more adventurous, such as logging to Kafka.
|
||||||
Out io.Writer
|
Out io.Writer
|
||||||
// Hooks for the logger instance. These allow firing events based on logging
|
// Hooks for the logger instance. These allow firing events based on logging
|
||||||
// levels and log entries. For example, to send errors to an error tracking
|
// levels and log entries. For example, to send errors to an error tracking
|
||||||
|
@ -23,6 +24,10 @@ type Logger struct {
|
||||||
// own that implements the `Formatter` interface, see the `README` or included
|
// own that implements the `Formatter` interface, see the `README` or included
|
||||||
// formatters for examples.
|
// formatters for examples.
|
||||||
Formatter Formatter
|
Formatter Formatter
|
||||||
|
|
||||||
|
// Flag for whether to log caller info (off by default)
|
||||||
|
ReportCaller bool
|
||||||
|
|
||||||
// The logging level the logger should log at. This is typically (and defaults
|
// The logging level the logger should log at. This is typically (and defaults
|
||||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||||
// logged.
|
// logged.
|
||||||
|
@ -31,8 +36,12 @@ type Logger struct {
|
||||||
mu MutexWrap
|
mu MutexWrap
|
||||||
// Reusable empty entry
|
// Reusable empty entry
|
||||||
entryPool sync.Pool
|
entryPool sync.Pool
|
||||||
|
// Function to exit the application, defaults to `os.Exit()`
|
||||||
|
ExitFunc exitFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type exitFunc func(int)
|
||||||
|
|
||||||
type MutexWrap struct {
|
type MutexWrap struct {
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
disabled bool
|
disabled bool
|
||||||
|
@ -68,10 +77,12 @@ func (mw *MutexWrap) Disable() {
|
||||||
// It's recommended to make this a global instance called `log`.
|
// It's recommended to make this a global instance called `log`.
|
||||||
func New() *Logger {
|
func New() *Logger {
|
||||||
return &Logger{
|
return &Logger{
|
||||||
Out: os.Stderr,
|
Out: os.Stderr,
|
||||||
Formatter: new(TextFormatter),
|
Formatter: new(TextFormatter),
|
||||||
Hooks: make(LevelHooks),
|
Hooks: make(LevelHooks),
|
||||||
Level: InfoLevel,
|
Level: InfoLevel,
|
||||||
|
ExitFunc: os.Exit,
|
||||||
|
ReportCaller: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,11 +95,12 @@ func (logger *Logger) newEntry() *Entry {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) releaseEntry(entry *Entry) {
|
func (logger *Logger) releaseEntry(entry *Entry) {
|
||||||
|
entry.Data = map[string]interface{}{}
|
||||||
logger.entryPool.Put(entry)
|
logger.entryPool.Put(entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds a field to the log entry, note that it doesn't log until you call
|
// Adds a field to the log entry, note that it doesn't log until you call
|
||||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
|
||||||
// If you want multiple fields, use `WithFields`.
|
// If you want multiple fields, use `WithFields`.
|
||||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
|
@ -112,8 +124,23 @@ func (logger *Logger) WithError(err error) *Entry {
|
||||||
return entry.WithError(err)
|
return entry.WithError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Overrides the time of the log entry.
|
||||||
|
func (logger *Logger) WithTime(t time.Time) *Entry {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
defer logger.releaseEntry(entry)
|
||||||
|
return entry.WithTime(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Tracef(format string, args ...interface{}) {
|
||||||
|
if logger.IsLevelEnabled(TraceLevel) {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Tracef(format, args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||||
if logger.level() >= DebugLevel {
|
if logger.IsLevelEnabled(DebugLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Debugf(format, args...)
|
entry.Debugf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -121,7 +148,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||||
if logger.level() >= InfoLevel {
|
if logger.IsLevelEnabled(InfoLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Infof(format, args...)
|
entry.Infof(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -135,7 +162,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||||
if logger.level() >= WarnLevel {
|
if logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warnf(format, args...)
|
entry.Warnf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -143,7 +170,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||||
if logger.level() >= WarnLevel {
|
if logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warnf(format, args...)
|
entry.Warnf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -151,7 +178,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||||
if logger.level() >= ErrorLevel {
|
if logger.IsLevelEnabled(ErrorLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Errorf(format, args...)
|
entry.Errorf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -159,24 +186,32 @@ func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||||
if logger.level() >= FatalLevel {
|
if logger.IsLevelEnabled(FatalLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Fatalf(format, args...)
|
entry.Fatalf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
Exit(1)
|
logger.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||||
if logger.level() >= PanicLevel {
|
if logger.IsLevelEnabled(PanicLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Panicf(format, args...)
|
entry.Panicf(format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Trace(args ...interface{}) {
|
||||||
|
if logger.IsLevelEnabled(TraceLevel) {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Trace(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (logger *Logger) Debug(args ...interface{}) {
|
func (logger *Logger) Debug(args ...interface{}) {
|
||||||
if logger.level() >= DebugLevel {
|
if logger.IsLevelEnabled(DebugLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Debug(args...)
|
entry.Debug(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -184,7 +219,7 @@ func (logger *Logger) Debug(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Info(args ...interface{}) {
|
func (logger *Logger) Info(args ...interface{}) {
|
||||||
if logger.level() >= InfoLevel {
|
if logger.IsLevelEnabled(InfoLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Info(args...)
|
entry.Info(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -198,7 +233,7 @@ func (logger *Logger) Print(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warn(args ...interface{}) {
|
func (logger *Logger) Warn(args ...interface{}) {
|
||||||
if logger.level() >= WarnLevel {
|
if logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warn(args...)
|
entry.Warn(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -206,7 +241,7 @@ func (logger *Logger) Warn(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warning(args ...interface{}) {
|
func (logger *Logger) Warning(args ...interface{}) {
|
||||||
if logger.level() >= WarnLevel {
|
if logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warn(args...)
|
entry.Warn(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -214,7 +249,7 @@ func (logger *Logger) Warning(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Error(args ...interface{}) {
|
func (logger *Logger) Error(args ...interface{}) {
|
||||||
if logger.level() >= ErrorLevel {
|
if logger.IsLevelEnabled(ErrorLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Error(args...)
|
entry.Error(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -222,24 +257,32 @@ func (logger *Logger) Error(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Fatal(args ...interface{}) {
|
func (logger *Logger) Fatal(args ...interface{}) {
|
||||||
if logger.level() >= FatalLevel {
|
if logger.IsLevelEnabled(FatalLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Fatal(args...)
|
entry.Fatal(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
Exit(1)
|
logger.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panic(args ...interface{}) {
|
func (logger *Logger) Panic(args ...interface{}) {
|
||||||
if logger.level() >= PanicLevel {
|
if logger.IsLevelEnabled(PanicLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Panic(args...)
|
entry.Panic(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Traceln(args ...interface{}) {
|
||||||
|
if logger.IsLevelEnabled(TraceLevel) {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Traceln(args...)
|
||||||
|
logger.releaseEntry(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (logger *Logger) Debugln(args ...interface{}) {
|
func (logger *Logger) Debugln(args ...interface{}) {
|
||||||
if logger.level() >= DebugLevel {
|
if logger.IsLevelEnabled(DebugLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Debugln(args...)
|
entry.Debugln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -247,7 +290,7 @@ func (logger *Logger) Debugln(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Infoln(args ...interface{}) {
|
func (logger *Logger) Infoln(args ...interface{}) {
|
||||||
if logger.level() >= InfoLevel {
|
if logger.IsLevelEnabled(InfoLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Infoln(args...)
|
entry.Infoln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -261,7 +304,7 @@ func (logger *Logger) Println(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warnln(args ...interface{}) {
|
func (logger *Logger) Warnln(args ...interface{}) {
|
||||||
if logger.level() >= WarnLevel {
|
if logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warnln(args...)
|
entry.Warnln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -269,7 +312,7 @@ func (logger *Logger) Warnln(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warningln(args ...interface{}) {
|
func (logger *Logger) Warningln(args ...interface{}) {
|
||||||
if logger.level() >= WarnLevel {
|
if logger.IsLevelEnabled(WarnLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Warnln(args...)
|
entry.Warnln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -277,7 +320,7 @@ func (logger *Logger) Warningln(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Errorln(args ...interface{}) {
|
func (logger *Logger) Errorln(args ...interface{}) {
|
||||||
if logger.level() >= ErrorLevel {
|
if logger.IsLevelEnabled(ErrorLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Errorln(args...)
|
entry.Errorln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
|
@ -285,22 +328,30 @@ func (logger *Logger) Errorln(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||||
if logger.level() >= FatalLevel {
|
if logger.IsLevelEnabled(FatalLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Fatalln(args...)
|
entry.Fatalln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
Exit(1)
|
logger.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panicln(args ...interface{}) {
|
func (logger *Logger) Panicln(args ...interface{}) {
|
||||||
if logger.level() >= PanicLevel {
|
if logger.IsLevelEnabled(PanicLevel) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Panicln(args...)
|
entry.Panicln(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Exit(code int) {
|
||||||
|
runHandlers()
|
||||||
|
if logger.ExitFunc == nil {
|
||||||
|
logger.ExitFunc = os.Exit
|
||||||
|
}
|
||||||
|
logger.ExitFunc(code)
|
||||||
|
}
|
||||||
|
|
||||||
//When file is opened with appending mode, it's safe to
|
//When file is opened with appending mode, it's safe to
|
||||||
//write concurrently to a file (within 4k message on Linux).
|
//write concurrently to a file (within 4k message on Linux).
|
||||||
//In these cases user can choose to disable the lock.
|
//In these cases user can choose to disable the lock.
|
||||||
|
@ -312,6 +363,53 @@ func (logger *Logger) level() Level {
|
||||||
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
|
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLevel sets the logger level.
|
||||||
func (logger *Logger) SetLevel(level Level) {
|
func (logger *Logger) SetLevel(level Level) {
|
||||||
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
|
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLevel returns the logger level.
|
||||||
|
func (logger *Logger) GetLevel() Level {
|
||||||
|
return logger.level()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddHook adds a hook to the logger hooks.
|
||||||
|
func (logger *Logger) AddHook(hook Hook) {
|
||||||
|
logger.mu.Lock()
|
||||||
|
defer logger.mu.Unlock()
|
||||||
|
logger.Hooks.Add(hook)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsLevelEnabled checks if the log level of the logger is greater than the level param
|
||||||
|
func (logger *Logger) IsLevelEnabled(level Level) bool {
|
||||||
|
return logger.level() >= level
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFormatter sets the logger formatter.
|
||||||
|
func (logger *Logger) SetFormatter(formatter Formatter) {
|
||||||
|
logger.mu.Lock()
|
||||||
|
defer logger.mu.Unlock()
|
||||||
|
logger.Formatter = formatter
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetOutput sets the logger output.
|
||||||
|
func (logger *Logger) SetOutput(output io.Writer) {
|
||||||
|
logger.mu.Lock()
|
||||||
|
defer logger.mu.Unlock()
|
||||||
|
logger.Out = output
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) SetReportCaller(reportCaller bool) {
|
||||||
|
logger.mu.Lock()
|
||||||
|
defer logger.mu.Unlock()
|
||||||
|
logger.ReportCaller = reportCaller
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceHooks replaces the logger hooks and returns the old ones
|
||||||
|
func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
|
||||||
|
logger.mu.Lock()
|
||||||
|
oldHooks := logger.Hooks
|
||||||
|
logger.Hooks = hooks
|
||||||
|
logger.mu.Unlock()
|
||||||
|
return oldHooks
|
||||||
|
}
|
||||||
|
|
37
vendor/github.com/sirupsen/logrus/logrus.go
generated
vendored
37
vendor/github.com/sirupsen/logrus/logrus.go
generated
vendored
|
@ -15,6 +15,8 @@ type Level uint32
|
||||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||||
func (level Level) String() string {
|
func (level Level) String() string {
|
||||||
switch level {
|
switch level {
|
||||||
|
case TraceLevel:
|
||||||
|
return "trace"
|
||||||
case DebugLevel:
|
case DebugLevel:
|
||||||
return "debug"
|
return "debug"
|
||||||
case InfoLevel:
|
case InfoLevel:
|
||||||
|
@ -47,12 +49,26 @@ func ParseLevel(lvl string) (Level, error) {
|
||||||
return InfoLevel, nil
|
return InfoLevel, nil
|
||||||
case "debug":
|
case "debug":
|
||||||
return DebugLevel, nil
|
return DebugLevel, nil
|
||||||
|
case "trace":
|
||||||
|
return TraceLevel, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var l Level
|
var l Level
|
||||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||||
|
func (level *Level) UnmarshalText(text []byte) error {
|
||||||
|
l, err := ParseLevel(string(text))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*level = Level(l)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// A constant exposing all logging levels
|
// A constant exposing all logging levels
|
||||||
var AllLevels = []Level{
|
var AllLevels = []Level{
|
||||||
PanicLevel,
|
PanicLevel,
|
||||||
|
@ -61,6 +77,7 @@ var AllLevels = []Level{
|
||||||
WarnLevel,
|
WarnLevel,
|
||||||
InfoLevel,
|
InfoLevel,
|
||||||
DebugLevel,
|
DebugLevel,
|
||||||
|
TraceLevel,
|
||||||
}
|
}
|
||||||
|
|
||||||
// These are the different logging levels. You can set the logging level to log
|
// These are the different logging levels. You can set the logging level to log
|
||||||
|
@ -69,7 +86,7 @@ const (
|
||||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||||
// message passed to Debug, Info, ...
|
// message passed to Debug, Info, ...
|
||||||
PanicLevel Level = iota
|
PanicLevel Level = iota
|
||||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
// FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
|
||||||
// logging level is set to Panic.
|
// logging level is set to Panic.
|
||||||
FatalLevel
|
FatalLevel
|
||||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||||
|
@ -82,6 +99,8 @@ const (
|
||||||
InfoLevel
|
InfoLevel
|
||||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||||
DebugLevel
|
DebugLevel
|
||||||
|
// TraceLevel level. Designates finer-grained informational events than the Debug.
|
||||||
|
TraceLevel
|
||||||
)
|
)
|
||||||
|
|
||||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||||
|
@ -140,4 +159,20 @@ type FieldLogger interface {
|
||||||
Errorln(args ...interface{})
|
Errorln(args ...interface{})
|
||||||
Fatalln(args ...interface{})
|
Fatalln(args ...interface{})
|
||||||
Panicln(args ...interface{})
|
Panicln(args ...interface{})
|
||||||
|
|
||||||
|
// IsDebugEnabled() bool
|
||||||
|
// IsInfoEnabled() bool
|
||||||
|
// IsWarnEnabled() bool
|
||||||
|
// IsErrorEnabled() bool
|
||||||
|
// IsFatalEnabled() bool
|
||||||
|
// IsPanicEnabled() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
|
||||||
|
// here for consistancy. Do not use. Use Logger or Entry instead.
|
||||||
|
type Ext1FieldLogger interface {
|
||||||
|
FieldLogger
|
||||||
|
Tracef(format string, args ...interface{})
|
||||||
|
Trace(args ...interface{})
|
||||||
|
Traceln(args ...interface{})
|
||||||
}
|
}
|
||||||
|
|
10
vendor/github.com/sirupsen/logrus/terminal_bsd.go
generated
vendored
10
vendor/github.com/sirupsen/logrus/terminal_bsd.go
generated
vendored
|
@ -1,10 +0,0 @@
|
||||||
// +build darwin freebsd openbsd netbsd dragonfly
|
|
||||||
// +build !appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import "golang.org/x/sys/unix"
|
|
||||||
|
|
||||||
const ioctlReadTermios = unix.TIOCGETA
|
|
||||||
|
|
||||||
type Termios unix.Termios
|
|
11
vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
generated
vendored
Normal file
11
vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkIfTerminal(w io.Writer) bool {
|
||||||
|
return true
|
||||||
|
}
|
11
vendor/github.com/sirupsen/logrus/terminal_check_js.go
generated
vendored
Normal file
11
vendor/github.com/sirupsen/logrus/terminal_check_js.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// +build js
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkIfTerminal(w io.Writer) bool {
|
||||||
|
return false
|
||||||
|
}
|
19
vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
Normal file
19
vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
// +build !appengine,!js,!windows
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/ssh/terminal"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkIfTerminal(w io.Writer) bool {
|
||||||
|
switch v := w.(type) {
|
||||||
|
case *os.File:
|
||||||
|
return terminal.IsTerminal(int(v.Fd()))
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
20
vendor/github.com/sirupsen/logrus/terminal_check_windows.go
generated
vendored
Normal file
20
vendor/github.com/sirupsen/logrus/terminal_check_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
// +build !appengine,!js,windows
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkIfTerminal(w io.Writer) bool {
|
||||||
|
switch v := w.(type) {
|
||||||
|
case *os.File:
|
||||||
|
var mode uint32
|
||||||
|
err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
|
||||||
|
return err == nil
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
14
vendor/github.com/sirupsen/logrus/terminal_linux.go
generated
vendored
14
vendor/github.com/sirupsen/logrus/terminal_linux.go
generated
vendored
|
@ -1,14 +0,0 @@
|
||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import "golang.org/x/sys/unix"
|
|
||||||
|
|
||||||
const ioctlReadTermios = unix.TCGETS
|
|
||||||
|
|
||||||
type Termios unix.Termios
|
|
8
vendor/github.com/sirupsen/logrus/terminal_notwindows.go
generated
vendored
Normal file
8
vendor/github.com/sirupsen/logrus/terminal_notwindows.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
func initTerminal(w io.Writer) {
|
||||||
|
}
|
18
vendor/github.com/sirupsen/logrus/terminal_windows.go
generated
vendored
Normal file
18
vendor/github.com/sirupsen/logrus/terminal_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
// +build !appengine,!js,windows
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
sequences "github.com/konsorten/go-windows-terminal-sequences"
|
||||||
|
)
|
||||||
|
|
||||||
|
func initTerminal(w io.Writer) {
|
||||||
|
switch v := w.(type) {
|
||||||
|
case *os.File:
|
||||||
|
sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
|
||||||
|
}
|
||||||
|
}
|
146
vendor/github.com/sirupsen/logrus/text_formatter.go
generated
vendored
146
vendor/github.com/sirupsen/logrus/text_formatter.go
generated
vendored
|
@ -3,14 +3,11 @@ package logrus
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/ssh/terminal"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -24,6 +21,7 @@ const (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
baseTimestamp time.Time
|
baseTimestamp time.Time
|
||||||
|
emptyFieldMap FieldMap
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -38,6 +36,9 @@ type TextFormatter struct {
|
||||||
// Force disabling colors.
|
// Force disabling colors.
|
||||||
DisableColors bool
|
DisableColors bool
|
||||||
|
|
||||||
|
// Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
|
||||||
|
EnvironmentOverrideColors bool
|
||||||
|
|
||||||
// Disable timestamp logging. useful when output is redirected to logging
|
// Disable timestamp logging. useful when output is redirected to logging
|
||||||
// system that already adds timestamps.
|
// system that already adds timestamps.
|
||||||
DisableTimestamp bool
|
DisableTimestamp bool
|
||||||
|
@ -54,69 +55,132 @@ type TextFormatter struct {
|
||||||
// be desired.
|
// be desired.
|
||||||
DisableSorting bool
|
DisableSorting bool
|
||||||
|
|
||||||
|
// The keys sorting function, when uninitialized it uses sort.Strings.
|
||||||
|
SortingFunc func([]string)
|
||||||
|
|
||||||
|
// Disables the truncation of the level text to 4 characters.
|
||||||
|
DisableLevelTruncation bool
|
||||||
|
|
||||||
// QuoteEmptyFields will wrap empty fields in quotes if true
|
// QuoteEmptyFields will wrap empty fields in quotes if true
|
||||||
QuoteEmptyFields bool
|
QuoteEmptyFields bool
|
||||||
|
|
||||||
// Whether the logger's out is to a terminal
|
// Whether the logger's out is to a terminal
|
||||||
isTerminal bool
|
isTerminal bool
|
||||||
|
|
||||||
sync.Once
|
// FieldMap allows users to customize the names of keys for default fields.
|
||||||
|
// As an example:
|
||||||
|
// formatter := &TextFormatter{
|
||||||
|
// FieldMap: FieldMap{
|
||||||
|
// FieldKeyTime: "@timestamp",
|
||||||
|
// FieldKeyLevel: "@level",
|
||||||
|
// FieldKeyMsg: "@message"}}
|
||||||
|
FieldMap FieldMap
|
||||||
|
|
||||||
|
terminalInitOnce sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *TextFormatter) init(entry *Entry) {
|
func (f *TextFormatter) init(entry *Entry) {
|
||||||
if entry.Logger != nil {
|
if entry.Logger != nil {
|
||||||
f.isTerminal = f.checkIfTerminal(entry.Logger.Out)
|
f.isTerminal = checkIfTerminal(entry.Logger.Out)
|
||||||
|
|
||||||
|
if f.isTerminal {
|
||||||
|
initTerminal(entry.Logger.Out)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *TextFormatter) checkIfTerminal(w io.Writer) bool {
|
func (f *TextFormatter) isColored() bool {
|
||||||
switch v := w.(type) {
|
isColored := f.ForceColors || f.isTerminal
|
||||||
case *os.File:
|
|
||||||
return terminal.IsTerminal(int(v.Fd()))
|
if f.EnvironmentOverrideColors {
|
||||||
default:
|
if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
|
||||||
return false
|
isColored = true
|
||||||
|
} else if ok && force == "0" {
|
||||||
|
isColored = false
|
||||||
|
} else if os.Getenv("CLICOLOR") == "0" {
|
||||||
|
isColored = false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return isColored && !f.DisableColors
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format renders a single log entry
|
// Format renders a single log entry
|
||||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
var b *bytes.Buffer
|
prefixFieldClashes(entry.Data, f.FieldMap, entry.HasCaller())
|
||||||
|
|
||||||
keys := make([]string, 0, len(entry.Data))
|
keys := make([]string, 0, len(entry.Data))
|
||||||
for k := range entry.Data {
|
for k := range entry.Data {
|
||||||
keys = append(keys, k)
|
keys = append(keys, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !f.DisableSorting {
|
fixedKeys := make([]string, 0, 4+len(entry.Data))
|
||||||
sort.Strings(keys)
|
if !f.DisableTimestamp {
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
|
||||||
}
|
}
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
|
||||||
|
if entry.Message != "" {
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
|
||||||
|
}
|
||||||
|
if entry.err != "" {
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
|
||||||
|
}
|
||||||
|
if entry.HasCaller() {
|
||||||
|
fixedKeys = append(fixedKeys,
|
||||||
|
f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !f.DisableSorting {
|
||||||
|
if f.SortingFunc == nil {
|
||||||
|
sort.Strings(keys)
|
||||||
|
fixedKeys = append(fixedKeys, keys...)
|
||||||
|
} else {
|
||||||
|
if !f.isColored() {
|
||||||
|
fixedKeys = append(fixedKeys, keys...)
|
||||||
|
f.SortingFunc(fixedKeys)
|
||||||
|
} else {
|
||||||
|
f.SortingFunc(keys)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fixedKeys = append(fixedKeys, keys...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var b *bytes.Buffer
|
||||||
if entry.Buffer != nil {
|
if entry.Buffer != nil {
|
||||||
b = entry.Buffer
|
b = entry.Buffer
|
||||||
} else {
|
} else {
|
||||||
b = &bytes.Buffer{}
|
b = &bytes.Buffer{}
|
||||||
}
|
}
|
||||||
|
|
||||||
prefixFieldClashes(entry.Data)
|
f.terminalInitOnce.Do(func() { f.init(entry) })
|
||||||
|
|
||||||
f.Do(func() { f.init(entry) })
|
|
||||||
|
|
||||||
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
|
|
||||||
|
|
||||||
timestampFormat := f.TimestampFormat
|
timestampFormat := f.TimestampFormat
|
||||||
if timestampFormat == "" {
|
if timestampFormat == "" {
|
||||||
timestampFormat = defaultTimestampFormat
|
timestampFormat = defaultTimestampFormat
|
||||||
}
|
}
|
||||||
if isColored {
|
if f.isColored() {
|
||||||
f.printColored(b, entry, keys, timestampFormat)
|
f.printColored(b, entry, keys, timestampFormat)
|
||||||
} else {
|
} else {
|
||||||
if !f.DisableTimestamp {
|
for _, key := range fixedKeys {
|
||||||
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
var value interface{}
|
||||||
}
|
switch {
|
||||||
f.appendKeyValue(b, "level", entry.Level.String())
|
case key == f.FieldMap.resolve(FieldKeyTime):
|
||||||
if entry.Message != "" {
|
value = entry.Time.Format(timestampFormat)
|
||||||
f.appendKeyValue(b, "msg", entry.Message)
|
case key == f.FieldMap.resolve(FieldKeyLevel):
|
||||||
}
|
value = entry.Level.String()
|
||||||
for _, key := range keys {
|
case key == f.FieldMap.resolve(FieldKeyMsg):
|
||||||
f.appendKeyValue(b, key, entry.Data[key])
|
value = entry.Message
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyLogrusError):
|
||||||
|
value = entry.err
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
|
||||||
|
value = entry.Caller.Function
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
|
||||||
|
value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||||
|
default:
|
||||||
|
value = entry.Data[key]
|
||||||
|
}
|
||||||
|
f.appendKeyValue(b, key, value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,7 +191,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||||
var levelColor int
|
var levelColor int
|
||||||
switch entry.Level {
|
switch entry.Level {
|
||||||
case DebugLevel:
|
case DebugLevel, TraceLevel:
|
||||||
levelColor = gray
|
levelColor = gray
|
||||||
case WarnLevel:
|
case WarnLevel:
|
||||||
levelColor = yellow
|
levelColor = yellow
|
||||||
|
@ -137,14 +201,28 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
|
||||||
levelColor = blue
|
levelColor = blue
|
||||||
}
|
}
|
||||||
|
|
||||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
levelText := strings.ToUpper(entry.Level.String())
|
||||||
|
if !f.DisableLevelTruncation {
|
||||||
|
levelText = levelText[0:4]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove a single newline if it already exists in the message to keep
|
||||||
|
// the behavior of logrus text_formatter the same as the stdlib log package
|
||||||
|
entry.Message = strings.TrimSuffix(entry.Message, "\n")
|
||||||
|
|
||||||
|
caller := ""
|
||||||
|
|
||||||
|
if entry.HasCaller() {
|
||||||
|
caller = fmt.Sprintf("%s:%d %s()",
|
||||||
|
entry.Caller.File, entry.Caller.Line, entry.Caller.Function)
|
||||||
|
}
|
||||||
|
|
||||||
if f.DisableTimestamp {
|
if f.DisableTimestamp {
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
|
||||||
} else if !f.FullTimestamp {
|
} else if !f.FullTimestamp {
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
|
||||||
}
|
}
|
||||||
for _, k := range keys {
|
for _, k := range keys {
|
||||||
v := entry.Data[k]
|
v := entry.Data[k]
|
||||||
|
|
2
vendor/github.com/sirupsen/logrus/writer.go
generated
vendored
2
vendor/github.com/sirupsen/logrus/writer.go
generated
vendored
|
@ -24,6 +24,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
||||||
var printFunc func(args ...interface{})
|
var printFunc func(args ...interface{})
|
||||||
|
|
||||||
switch level {
|
switch level {
|
||||||
|
case TraceLevel:
|
||||||
|
printFunc = entry.Trace
|
||||||
case DebugLevel:
|
case DebugLevel:
|
||||||
printFunc = entry.Debug
|
printFunc = entry.Debug
|
||||||
case InfoLevel:
|
case InfoLevel:
|
||||||
|
|
2
vendor/golang.org/x/crypto/ssh/terminal/terminal.go
generated
vendored
2
vendor/golang.org/x/crypto/ssh/terminal/terminal.go
generated
vendored
|
@ -617,7 +617,7 @@ func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) {
|
||||||
if _, err = w.Write(crlf); err != nil {
|
if _, err = w.Write(crlf); err != nil {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
n += 1
|
n++
|
||||||
buf = buf[1:]
|
buf = buf[1:]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
71
vendor/golang.org/x/crypto/ssh/terminal/util.go
generated
vendored
71
vendor/golang.org/x/crypto/ssh/terminal/util.go
generated
vendored
|
@ -17,44 +17,41 @@
|
||||||
package terminal // import "golang.org/x/crypto/ssh/terminal"
|
package terminal // import "golang.org/x/crypto/ssh/terminal"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// State contains the state of a terminal.
|
// State contains the state of a terminal.
|
||||||
type State struct {
|
type State struct {
|
||||||
termios syscall.Termios
|
termios unix.Termios
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||||
func IsTerminal(fd int) bool {
|
func IsTerminal(fd int) bool {
|
||||||
var termios syscall.Termios
|
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
|
||||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
return err == nil
|
||||||
return err == 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MakeRaw put the terminal connected to the given file descriptor into raw
|
// MakeRaw put the terminal connected to the given file descriptor into raw
|
||||||
// mode and returns the previous state of the terminal so that it can be
|
// mode and returns the previous state of the terminal so that it can be
|
||||||
// restored.
|
// restored.
|
||||||
func MakeRaw(fd int) (*State, error) {
|
func MakeRaw(fd int) (*State, error) {
|
||||||
var oldState State
|
termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
|
||||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
newState := oldState.termios
|
oldState := State{termios: *termios}
|
||||||
|
|
||||||
// This attempts to replicate the behaviour documented for cfmakeraw in
|
// This attempts to replicate the behaviour documented for cfmakeraw in
|
||||||
// the termios(3) manpage.
|
// the termios(3) manpage.
|
||||||
newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
|
termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
|
||||||
newState.Oflag &^= syscall.OPOST
|
termios.Oflag &^= unix.OPOST
|
||||||
newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
|
termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
|
||||||
newState.Cflag &^= syscall.CSIZE | syscall.PARENB
|
termios.Cflag &^= unix.CSIZE | unix.PARENB
|
||||||
newState.Cflag |= syscall.CS8
|
termios.Cflag |= unix.CS8
|
||||||
newState.Cc[unix.VMIN] = 1
|
termios.Cc[unix.VMIN] = 1
|
||||||
newState.Cc[unix.VTIME] = 0
|
termios.Cc[unix.VTIME] = 0
|
||||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
|
if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,60 +61,54 @@ func MakeRaw(fd int) (*State, error) {
|
||||||
// GetState returns the current state of a terminal which may be useful to
|
// GetState returns the current state of a terminal which may be useful to
|
||||||
// restore the terminal after a signal.
|
// restore the terminal after a signal.
|
||||||
func GetState(fd int) (*State, error) {
|
func GetState(fd int) (*State, error) {
|
||||||
var oldState State
|
termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
|
||||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &oldState, nil
|
return &State{termios: *termios}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Restore restores the terminal connected to the given file descriptor to a
|
// Restore restores the terminal connected to the given file descriptor to a
|
||||||
// previous state.
|
// previous state.
|
||||||
func Restore(fd int, state *State) error {
|
func Restore(fd int, state *State) error {
|
||||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0); err != 0 {
|
return unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSize returns the dimensions of the given terminal.
|
// GetSize returns the dimensions of the given terminal.
|
||||||
func GetSize(fd int) (width, height int, err error) {
|
func GetSize(fd int) (width, height int, err error) {
|
||||||
var dimensions [4]uint16
|
ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
|
||||||
|
if err != nil {
|
||||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
|
|
||||||
return -1, -1, err
|
return -1, -1, err
|
||||||
}
|
}
|
||||||
return int(dimensions[1]), int(dimensions[0]), nil
|
return int(ws.Col), int(ws.Row), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// passwordReader is an io.Reader that reads from a specific file descriptor.
|
// passwordReader is an io.Reader that reads from a specific file descriptor.
|
||||||
type passwordReader int
|
type passwordReader int
|
||||||
|
|
||||||
func (r passwordReader) Read(buf []byte) (int, error) {
|
func (r passwordReader) Read(buf []byte) (int, error) {
|
||||||
return syscall.Read(int(r), buf)
|
return unix.Read(int(r), buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadPassword reads a line of input from a terminal without local echo. This
|
// ReadPassword reads a line of input from a terminal without local echo. This
|
||||||
// is commonly used for inputting passwords and other sensitive data. The slice
|
// is commonly used for inputting passwords and other sensitive data. The slice
|
||||||
// returned does not include the \n.
|
// returned does not include the \n.
|
||||||
func ReadPassword(fd int) ([]byte, error) {
|
func ReadPassword(fd int) ([]byte, error) {
|
||||||
var oldState syscall.Termios
|
termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
|
||||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
newState := oldState
|
newState := *termios
|
||||||
newState.Lflag &^= syscall.ECHO
|
newState.Lflag &^= unix.ECHO
|
||||||
newState.Lflag |= syscall.ICANON | syscall.ISIG
|
newState.Lflag |= unix.ICANON | unix.ISIG
|
||||||
newState.Iflag |= syscall.ICRNL
|
newState.Iflag |= unix.ICRNL
|
||||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
|
if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newState); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios)
|
||||||
syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return readPasswordLine(passwordReader(fd))
|
return readPasswordLine(passwordReader(fd))
|
||||||
}
|
}
|
||||||
|
|
36
vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
generated
vendored
36
vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
generated
vendored
|
@ -14,7 +14,7 @@ import (
|
||||||
|
|
||||||
// State contains the state of a terminal.
|
// State contains the state of a terminal.
|
||||||
type State struct {
|
type State struct {
|
||||||
state *unix.Termios
|
termios unix.Termios
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||||
|
@ -75,47 +75,43 @@ func ReadPassword(fd int) ([]byte, error) {
|
||||||
// restored.
|
// restored.
|
||||||
// see http://cr.illumos.org/~webrev/andy_js/1060/
|
// see http://cr.illumos.org/~webrev/andy_js/1060/
|
||||||
func MakeRaw(fd int) (*State, error) {
|
func MakeRaw(fd int) (*State, error) {
|
||||||
oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS)
|
termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
oldTermios := *oldTermiosPtr
|
|
||||||
|
|
||||||
newTermios := oldTermios
|
oldState := State{termios: *termios}
|
||||||
newTermios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
|
|
||||||
newTermios.Oflag &^= syscall.OPOST
|
|
||||||
newTermios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
|
|
||||||
newTermios.Cflag &^= syscall.CSIZE | syscall.PARENB
|
|
||||||
newTermios.Cflag |= syscall.CS8
|
|
||||||
newTermios.Cc[unix.VMIN] = 1
|
|
||||||
newTermios.Cc[unix.VTIME] = 0
|
|
||||||
|
|
||||||
if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil {
|
termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
|
||||||
|
termios.Oflag &^= unix.OPOST
|
||||||
|
termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
|
||||||
|
termios.Cflag &^= unix.CSIZE | unix.PARENB
|
||||||
|
termios.Cflag |= unix.CS8
|
||||||
|
termios.Cc[unix.VMIN] = 1
|
||||||
|
termios.Cc[unix.VTIME] = 0
|
||||||
|
|
||||||
|
if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &State{
|
return &oldState, nil
|
||||||
state: oldTermiosPtr,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Restore restores the terminal connected to the given file descriptor to a
|
// Restore restores the terminal connected to the given file descriptor to a
|
||||||
// previous state.
|
// previous state.
|
||||||
func Restore(fd int, oldState *State) error {
|
func Restore(fd int, oldState *State) error {
|
||||||
return unix.IoctlSetTermios(fd, unix.TCSETS, oldState.state)
|
return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetState returns the current state of a terminal which may be useful to
|
// GetState returns the current state of a terminal which may be useful to
|
||||||
// restore the terminal after a signal.
|
// restore the terminal after a signal.
|
||||||
func GetState(fd int) (*State, error) {
|
func GetState(fd int) (*State, error) {
|
||||||
oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS)
|
termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &State{
|
return &State{termios: *termios}, nil
|
||||||
state: oldTermiosPtr,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSize returns the dimensions of the given terminal.
|
// GetSize returns the dimensions of the given terminal.
|
||||||
|
|
23
vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
generated
vendored
23
vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
generated
vendored
|
@ -17,6 +17,8 @@
|
||||||
package terminal
|
package terminal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -71,13 +73,6 @@ func GetSize(fd int) (width, height int, err error) {
|
||||||
return int(info.Size.X), int(info.Size.Y), nil
|
return int(info.Size.X), int(info.Size.Y), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// passwordReader is an io.Reader that reads from a specific Windows HANDLE.
|
|
||||||
type passwordReader int
|
|
||||||
|
|
||||||
func (r passwordReader) Read(buf []byte) (int, error) {
|
|
||||||
return windows.Read(windows.Handle(r), buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadPassword reads a line of input from a terminal without local echo. This
|
// ReadPassword reads a line of input from a terminal without local echo. This
|
||||||
// is commonly used for inputting passwords and other sensitive data. The slice
|
// is commonly used for inputting passwords and other sensitive data. The slice
|
||||||
// returned does not include the \n.
|
// returned does not include the \n.
|
||||||
|
@ -94,9 +89,15 @@ func ReadPassword(fd int) ([]byte, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer windows.SetConsoleMode(windows.Handle(fd), old)
|
||||||
windows.SetConsoleMode(windows.Handle(fd), old)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return readPasswordLine(passwordReader(fd))
|
var h windows.Handle
|
||||||
|
p, _ := windows.GetCurrentProcess()
|
||||||
|
if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f := os.NewFile(uintptr(h), "stdin")
|
||||||
|
defer f.Close()
|
||||||
|
return readPasswordLine(f)
|
||||||
}
|
}
|
||||||
|
|
17
vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
generated
vendored
Normal file
17
vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !gccgo
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
//
|
||||||
|
// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
|
||||||
|
//
|
||||||
|
|
||||||
|
TEXT ·syscall6(SB),NOSPLIT,$0-88
|
||||||
|
JMP syscall·syscall6(SB)
|
||||||
|
|
||||||
|
TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
|
||||||
|
JMP syscall·rawSyscall6(SB)
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue