mirror of
https://github.com/prometheus/statsd_exporter.git
synced 2025-10-02 13:39:44 +00:00
Update vendor github.com/prometheus/procfs
This commit is contained in:
parent
115cb0e157
commit
3f4f12eb1f
16 changed files with 1889 additions and 55 deletions
20
vendor/github.com/prometheus/procfs/AUTHORS.md
generated
vendored
20
vendor/github.com/prometheus/procfs/AUTHORS.md
generated
vendored
|
@ -1,20 +0,0 @@
|
||||||
The Prometheus project was started by Matt T. Proud (emeritus) and
|
|
||||||
Julius Volz in 2012.
|
|
||||||
|
|
||||||
Maintainers of this repository:
|
|
||||||
|
|
||||||
* Tobias Schmidt <ts@soundcloud.com>
|
|
||||||
|
|
||||||
The following individuals have contributed code to this repository
|
|
||||||
(listed in alphabetical order):
|
|
||||||
|
|
||||||
* Armen Baghumian <abaghumian@noggin.com.au>
|
|
||||||
* Bjoern Rabenstein <beorn@soundcloud.com>
|
|
||||||
* David Cournapeau <cournape@gmail.com>
|
|
||||||
* Ji-Hoon, Seol <jihoon.seol@gmail.com>
|
|
||||||
* Jonas Große Sundrup <cherti@letopolis.de>
|
|
||||||
* Julius Volz <julius.volz@gmail.com>
|
|
||||||
* Matthias Rampke <mr@soundcloud.com>
|
|
||||||
* Nicky Gerritsen <nicky@streamone.nl>
|
|
||||||
* Rémi Audebert <contact@halfr.net>
|
|
||||||
* Tobias Schmidt <tobidt@gmail.com>
|
|
6
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
6
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
|
@ -2,9 +2,9 @@
|
||||||
|
|
||||||
Prometheus uses GitHub to manage reviews of pull requests.
|
Prometheus uses GitHub to manage reviews of pull requests.
|
||||||
|
|
||||||
* If you have a trivial fix or improvement, go ahead and create a pull
|
* If you have a trivial fix or improvement, go ahead and create a pull request,
|
||||||
request, addressing (with `@...`) one or more of the maintainers
|
addressing (with `@...`) the maintainer of this repository (see
|
||||||
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
|
[MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
|
||||||
|
|
||||||
* If you plan to do something more involved, first discuss your ideas
|
* If you plan to do something more involved, first discuss your ideas
|
||||||
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
||||||
|
|
1
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
Normal file
1
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
* Tobias Schmidt <tobidt@gmail.com>
|
16
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
16
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
|
@ -1,6 +1,18 @@
|
||||||
ci:
|
ci: fmt lint test
|
||||||
|
|
||||||
|
fmt:
|
||||||
! gofmt -l *.go | read nothing
|
! gofmt -l *.go | read nothing
|
||||||
go vet
|
go vet
|
||||||
go test -v ./...
|
|
||||||
|
lint:
|
||||||
go get github.com/golang/lint/golint
|
go get github.com/golang/lint/golint
|
||||||
golint *.go
|
golint *.go
|
||||||
|
|
||||||
|
test: sysfs/fixtures/.unpacked
|
||||||
|
go test -v ./...
|
||||||
|
|
||||||
|
sysfs/fixtures/.unpacked: sysfs/fixtures.ttar
|
||||||
|
./ttar -C sysfs -x -f sysfs/fixtures.ttar
|
||||||
|
touch $@
|
||||||
|
|
||||||
|
.PHONY: fmt lint test ci
|
||||||
|
|
1
vendor/github.com/prometheus/procfs/README.md
generated
vendored
1
vendor/github.com/prometheus/procfs/README.md
generated
vendored
|
@ -8,3 +8,4 @@ backwards-incompatible ways without warnings. Use it at your own risk.
|
||||||
|
|
||||||
[](https://godoc.org/github.com/prometheus/procfs)
|
[](https://godoc.org/github.com/prometheus/procfs)
|
||||||
[](https://travis-ci.org/prometheus/procfs)
|
[](https://travis-ci.org/prometheus/procfs)
|
||||||
|
[](https://goreportcard.com/report/github.com/prometheus/procfs)
|
||||||
|
|
95
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
Normal file
95
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A BuddyInfo is the details parsed from /proc/buddyinfo.
|
||||||
|
// The data is comprised of an array of free fragments of each size.
|
||||||
|
// The sizes are 2^n*PAGE_SIZE, where n is the array index.
|
||||||
|
type BuddyInfo struct {
|
||||||
|
Node string
|
||||||
|
Zone string
|
||||||
|
Sizes []float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBuddyInfo reads the buddyinfo statistics.
|
||||||
|
func NewBuddyInfo() ([]BuddyInfo, error) {
|
||||||
|
fs, err := NewFS(DefaultMountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.NewBuddyInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
||||||
|
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
|
||||||
|
file, err := os.Open(fs.Path("buddyinfo"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
return parseBuddyInfo(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
|
||||||
|
var (
|
||||||
|
buddyInfo = []BuddyInfo{}
|
||||||
|
scanner = bufio.NewScanner(r)
|
||||||
|
bucketCount = -1
|
||||||
|
)
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
var err error
|
||||||
|
line := scanner.Text()
|
||||||
|
parts := strings.Fields(string(line))
|
||||||
|
|
||||||
|
if len(parts) < 4 {
|
||||||
|
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
|
||||||
|
}
|
||||||
|
|
||||||
|
node := strings.TrimRight(parts[1], ",")
|
||||||
|
zone := strings.TrimRight(parts[3], ",")
|
||||||
|
arraySize := len(parts[4:])
|
||||||
|
|
||||||
|
if bucketCount == -1 {
|
||||||
|
bucketCount = arraySize
|
||||||
|
} else {
|
||||||
|
if bucketCount != arraySize {
|
||||||
|
return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sizes := make([]float64, arraySize)
|
||||||
|
for i := 0; i < arraySize; i++ {
|
||||||
|
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
|
||||||
|
}
|
||||||
|
|
||||||
|
return buddyInfo, scanner.Err()
|
||||||
|
}
|
13
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
13
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
|
@ -4,6 +4,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/xfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FS represents the pseudo-filesystem proc, which provides an interface to
|
// FS represents the pseudo-filesystem proc, which provides an interface to
|
||||||
|
@ -31,3 +33,14 @@ func NewFS(mountPoint string) (FS, error) {
|
||||||
func (fs FS) Path(p ...string) string {
|
func (fs FS) Path(p ...string) string {
|
||||||
return path.Join(append([]string{string(fs)}, p...)...)
|
return path.Join(append([]string{string(fs)}, p...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// XFSStats retrieves XFS filesystem runtime statistics.
|
||||||
|
func (fs FS) XFSStats() (*xfs.Stats, error) {
|
||||||
|
f, err := os.Open(fs.Path("fs/xfs/stat"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
return xfs.ParseStats(f)
|
||||||
|
}
|
||||||
|
|
44
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
44
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
|
@ -33,6 +33,8 @@ type IPVSBackendStatus struct {
|
||||||
LocalAddress net.IP
|
LocalAddress net.IP
|
||||||
// The local (virtual) port.
|
// The local (virtual) port.
|
||||||
LocalPort uint16
|
LocalPort uint16
|
||||||
|
// The local firewall mark
|
||||||
|
LocalMark string
|
||||||
// The transport protocol (TCP, UDP).
|
// The transport protocol (TCP, UDP).
|
||||||
Proto string
|
Proto string
|
||||||
// The remote (real) IP address.
|
// The remote (real) IP address.
|
||||||
|
@ -142,6 +144,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
|
||||||
status []IPVSBackendStatus
|
status []IPVSBackendStatus
|
||||||
scanner = bufio.NewScanner(file)
|
scanner = bufio.NewScanner(file)
|
||||||
proto string
|
proto string
|
||||||
|
localMark string
|
||||||
localAddress net.IP
|
localAddress net.IP
|
||||||
localPort uint16
|
localPort uint16
|
||||||
err error
|
err error
|
||||||
|
@ -160,10 +163,19 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
proto = fields[0]
|
proto = fields[0]
|
||||||
|
localMark = ""
|
||||||
localAddress, localPort, err = parseIPPort(fields[1])
|
localAddress, localPort, err = parseIPPort(fields[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
case fields[0] == "FWM":
|
||||||
|
if len(fields) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
proto = fields[0]
|
||||||
|
localMark = fields[1]
|
||||||
|
localAddress = nil
|
||||||
|
localPort = 0
|
||||||
case fields[0] == "->":
|
case fields[0] == "->":
|
||||||
if len(fields) < 6 {
|
if len(fields) < 6 {
|
||||||
continue
|
continue
|
||||||
|
@ -187,6 +199,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
|
||||||
status = append(status, IPVSBackendStatus{
|
status = append(status, IPVSBackendStatus{
|
||||||
LocalAddress: localAddress,
|
LocalAddress: localAddress,
|
||||||
LocalPort: localPort,
|
LocalPort: localPort,
|
||||||
|
LocalMark: localMark,
|
||||||
RemoteAddress: remoteAddress,
|
RemoteAddress: remoteAddress,
|
||||||
RemotePort: remotePort,
|
RemotePort: remotePort,
|
||||||
Proto: proto,
|
Proto: proto,
|
||||||
|
@ -200,22 +213,31 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseIPPort(s string) (net.IP, uint16, error) {
|
func parseIPPort(s string) (net.IP, uint16, error) {
|
||||||
tmp := strings.SplitN(s, ":", 2)
|
var (
|
||||||
|
ip net.IP
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
if len(tmp) != 2 {
|
switch len(s) {
|
||||||
return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
|
case 13:
|
||||||
}
|
ip, err = hex.DecodeString(s[0:8])
|
||||||
|
|
||||||
if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
|
|
||||||
return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
ip, err := hex.DecodeString(tmp[0])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
case 46:
|
||||||
|
ip = net.ParseIP(s[1:40])
|
||||||
|
if ip == nil {
|
||||||
|
return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
|
||||||
|
}
|
||||||
|
|
||||||
port, err := strconv.ParseUint(tmp[1], 16, 16)
|
portString := s[len(s)-4:]
|
||||||
|
if len(portString) != 4 {
|
||||||
|
return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
|
||||||
|
}
|
||||||
|
port, err := strconv.ParseUint(portString, 16, 16)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
556
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
Normal file
556
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
Normal file
|
@ -0,0 +1,556 @@
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
|
||||||
|
// heavily as a reference:
|
||||||
|
// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
|
||||||
|
//
|
||||||
|
// Special thanks to Chris Siebenmann for all of his posts explaining the
|
||||||
|
// various statistics available for NFS.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Constants shared between multiple functions.
|
||||||
|
const (
|
||||||
|
deviceEntryLen = 8
|
||||||
|
|
||||||
|
fieldBytesLen = 8
|
||||||
|
fieldEventsLen = 27
|
||||||
|
|
||||||
|
statVersion10 = "1.0"
|
||||||
|
statVersion11 = "1.1"
|
||||||
|
|
||||||
|
fieldTransport10Len = 10
|
||||||
|
fieldTransport11Len = 13
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
|
||||||
|
type Mount struct {
|
||||||
|
// Name of the device.
|
||||||
|
Device string
|
||||||
|
// The mount point of the device.
|
||||||
|
Mount string
|
||||||
|
// The filesystem type used by the device.
|
||||||
|
Type string
|
||||||
|
// If available additional statistics related to this Mount.
|
||||||
|
// Use a type assertion to determine if additional statistics are available.
|
||||||
|
Stats MountStats
|
||||||
|
}
|
||||||
|
|
||||||
|
// A MountStats is a type which contains detailed statistics for a specific
|
||||||
|
// type of Mount.
|
||||||
|
type MountStats interface {
|
||||||
|
mountStats()
|
||||||
|
}
|
||||||
|
|
||||||
|
// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
|
||||||
|
type MountStatsNFS struct {
|
||||||
|
// The version of statistics provided.
|
||||||
|
StatVersion string
|
||||||
|
// The age of the NFS mount.
|
||||||
|
Age time.Duration
|
||||||
|
// Statistics related to byte counters for various operations.
|
||||||
|
Bytes NFSBytesStats
|
||||||
|
// Statistics related to various NFS event occurrences.
|
||||||
|
Events NFSEventsStats
|
||||||
|
// Statistics broken down by filesystem operation.
|
||||||
|
Operations []NFSOperationStats
|
||||||
|
// Statistics about the NFS RPC transport.
|
||||||
|
Transport NFSTransportStats
|
||||||
|
}
|
||||||
|
|
||||||
|
// mountStats implements MountStats.
|
||||||
|
func (m MountStatsNFS) mountStats() {}
|
||||||
|
|
||||||
|
// A NFSBytesStats contains statistics about the number of bytes read and written
|
||||||
|
// by an NFS client to and from an NFS server.
|
||||||
|
type NFSBytesStats struct {
|
||||||
|
// Number of bytes read using the read() syscall.
|
||||||
|
Read uint64
|
||||||
|
// Number of bytes written using the write() syscall.
|
||||||
|
Write uint64
|
||||||
|
// Number of bytes read using the read() syscall in O_DIRECT mode.
|
||||||
|
DirectRead uint64
|
||||||
|
// Number of bytes written using the write() syscall in O_DIRECT mode.
|
||||||
|
DirectWrite uint64
|
||||||
|
// Number of bytes read from the NFS server, in total.
|
||||||
|
ReadTotal uint64
|
||||||
|
// Number of bytes written to the NFS server, in total.
|
||||||
|
WriteTotal uint64
|
||||||
|
// Number of pages read directly via mmap()'d files.
|
||||||
|
ReadPages uint64
|
||||||
|
// Number of pages written directly via mmap()'d files.
|
||||||
|
WritePages uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// A NFSEventsStats contains statistics about NFS event occurrences.
|
||||||
|
type NFSEventsStats struct {
|
||||||
|
// Number of times cached inode attributes are re-validated from the server.
|
||||||
|
InodeRevalidate uint64
|
||||||
|
// Number of times cached dentry nodes are re-validated from the server.
|
||||||
|
DnodeRevalidate uint64
|
||||||
|
// Number of times an inode cache is cleared.
|
||||||
|
DataInvalidate uint64
|
||||||
|
// Number of times cached inode attributes are invalidated.
|
||||||
|
AttributeInvalidate uint64
|
||||||
|
// Number of times files or directories have been open()'d.
|
||||||
|
VFSOpen uint64
|
||||||
|
// Number of times a directory lookup has occurred.
|
||||||
|
VFSLookup uint64
|
||||||
|
// Number of times permissions have been checked.
|
||||||
|
VFSAccess uint64
|
||||||
|
// Number of updates (and potential writes) to pages.
|
||||||
|
VFSUpdatePage uint64
|
||||||
|
// Number of pages read directly via mmap()'d files.
|
||||||
|
VFSReadPage uint64
|
||||||
|
// Number of times a group of pages have been read.
|
||||||
|
VFSReadPages uint64
|
||||||
|
// Number of pages written directly via mmap()'d files.
|
||||||
|
VFSWritePage uint64
|
||||||
|
// Number of times a group of pages have been written.
|
||||||
|
VFSWritePages uint64
|
||||||
|
// Number of times directory entries have been read with getdents().
|
||||||
|
VFSGetdents uint64
|
||||||
|
// Number of times attributes have been set on inodes.
|
||||||
|
VFSSetattr uint64
|
||||||
|
// Number of pending writes that have been forcefully flushed to the server.
|
||||||
|
VFSFlush uint64
|
||||||
|
// Number of times fsync() has been called on directories and files.
|
||||||
|
VFSFsync uint64
|
||||||
|
// Number of times locking has been attempted on a file.
|
||||||
|
VFSLock uint64
|
||||||
|
// Number of times files have been closed and released.
|
||||||
|
VFSFileRelease uint64
|
||||||
|
// Unknown. Possibly unused.
|
||||||
|
CongestionWait uint64
|
||||||
|
// Number of times files have been truncated.
|
||||||
|
Truncation uint64
|
||||||
|
// Number of times a file has been grown due to writes beyond its existing end.
|
||||||
|
WriteExtension uint64
|
||||||
|
// Number of times a file was removed while still open by another process.
|
||||||
|
SillyRename uint64
|
||||||
|
// Number of times the NFS server gave less data than expected while reading.
|
||||||
|
ShortRead uint64
|
||||||
|
// Number of times the NFS server wrote less data than expected while writing.
|
||||||
|
ShortWrite uint64
|
||||||
|
// Number of times the NFS server indicated EJUKEBOX; retrieving data from
|
||||||
|
// offline storage.
|
||||||
|
JukeboxDelay uint64
|
||||||
|
// Number of NFS v4.1+ pNFS reads.
|
||||||
|
PNFSRead uint64
|
||||||
|
// Number of NFS v4.1+ pNFS writes.
|
||||||
|
PNFSWrite uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// A NFSOperationStats contains statistics for a single operation.
|
||||||
|
type NFSOperationStats struct {
|
||||||
|
// The name of the operation.
|
||||||
|
Operation string
|
||||||
|
// Number of requests performed for this operation.
|
||||||
|
Requests uint64
|
||||||
|
// Number of times an actual RPC request has been transmitted for this operation.
|
||||||
|
Transmissions uint64
|
||||||
|
// Number of times a request has had a major timeout.
|
||||||
|
MajorTimeouts uint64
|
||||||
|
// Number of bytes sent for this operation, including RPC headers and payload.
|
||||||
|
BytesSent uint64
|
||||||
|
// Number of bytes received for this operation, including RPC headers and payload.
|
||||||
|
BytesReceived uint64
|
||||||
|
// Duration all requests spent queued for transmission before they were sent.
|
||||||
|
CumulativeQueueTime time.Duration
|
||||||
|
// Duration it took to get a reply back after the request was transmitted.
|
||||||
|
CumulativeTotalResponseTime time.Duration
|
||||||
|
// Duration from when a request was enqueued to when it was completely handled.
|
||||||
|
CumulativeTotalRequestTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
||||||
|
// responses.
|
||||||
|
type NFSTransportStats struct {
|
||||||
|
// The local port used for the NFS mount.
|
||||||
|
Port uint64
|
||||||
|
// Number of times the client has had to establish a connection from scratch
|
||||||
|
// to the NFS server.
|
||||||
|
Bind uint64
|
||||||
|
// Number of times the client has made a TCP connection to the NFS server.
|
||||||
|
Connect uint64
|
||||||
|
// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
|
||||||
|
// spent waiting for connections to the server to be established.
|
||||||
|
ConnectIdleTime uint64
|
||||||
|
// Duration since the NFS mount last saw any RPC traffic.
|
||||||
|
IdleTime time.Duration
|
||||||
|
// Number of RPC requests for this mount sent to the NFS server.
|
||||||
|
Sends uint64
|
||||||
|
// Number of RPC responses for this mount received from the NFS server.
|
||||||
|
Receives uint64
|
||||||
|
// Number of times the NFS server sent a response with a transaction ID
|
||||||
|
// unknown to this client.
|
||||||
|
BadTransactionIDs uint64
|
||||||
|
// A running counter, incremented on each request as the current difference
|
||||||
|
// ebetween sends and receives.
|
||||||
|
CumulativeActiveRequests uint64
|
||||||
|
// A running counter, incremented on each request by the current backlog
|
||||||
|
// queue size.
|
||||||
|
CumulativeBacklog uint64
|
||||||
|
|
||||||
|
// Stats below only available with stat version 1.1.
|
||||||
|
|
||||||
|
// Maximum number of simultaneously active RPC requests ever used.
|
||||||
|
MaximumRPCSlotsUsed uint64
|
||||||
|
// A running counter, incremented on each request as the current size of the
|
||||||
|
// sending queue.
|
||||||
|
CumulativeSendingQueue uint64
|
||||||
|
// A running counter, incremented on each request as the current size of the
|
||||||
|
// pending queue.
|
||||||
|
CumulativePendingQueue uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
|
||||||
|
// of Mount structures containing detailed information about each mount.
|
||||||
|
// If available, statistics for each mount are parsed as well.
|
||||||
|
func parseMountStats(r io.Reader) ([]*Mount, error) {
|
||||||
|
const (
|
||||||
|
device = "device"
|
||||||
|
statVersionPrefix = "statvers="
|
||||||
|
|
||||||
|
nfs3Type = "nfs"
|
||||||
|
nfs4Type = "nfs4"
|
||||||
|
)
|
||||||
|
|
||||||
|
var mounts []*Mount
|
||||||
|
|
||||||
|
s := bufio.NewScanner(r)
|
||||||
|
for s.Scan() {
|
||||||
|
// Only look for device entries in this function
|
||||||
|
ss := strings.Fields(string(s.Bytes()))
|
||||||
|
if len(ss) == 0 || ss[0] != device {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := parseMount(ss)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Does this mount also possess statistics information?
|
||||||
|
if len(ss) > deviceEntryLen {
|
||||||
|
// Only NFSv3 and v4 are supported for parsing statistics
|
||||||
|
if m.Type != nfs3Type && m.Type != nfs4Type {
|
||||||
|
return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
|
||||||
|
|
||||||
|
stats, err := parseMountStatsNFS(s, statVersion)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Stats = stats
|
||||||
|
}
|
||||||
|
|
||||||
|
mounts = append(mounts, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
return mounts, s.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
|
||||||
|
// device [device] mounted on [mount] with fstype [type]
|
||||||
|
func parseMount(ss []string) (*Mount, error) {
|
||||||
|
if len(ss) < deviceEntryLen {
|
||||||
|
return nil, fmt.Errorf("invalid device entry: %v", ss)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for specific words appearing at specific indices to ensure
|
||||||
|
// the format is consistent with what we expect
|
||||||
|
format := []struct {
|
||||||
|
i int
|
||||||
|
s string
|
||||||
|
}{
|
||||||
|
{i: 0, s: "device"},
|
||||||
|
{i: 2, s: "mounted"},
|
||||||
|
{i: 3, s: "on"},
|
||||||
|
{i: 5, s: "with"},
|
||||||
|
{i: 6, s: "fstype"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range format {
|
||||||
|
if ss[f.i] != f.s {
|
||||||
|
return nil, fmt.Errorf("invalid device entry: %v", ss)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Mount{
|
||||||
|
Device: ss[1],
|
||||||
|
Mount: ss[4],
|
||||||
|
Type: ss[7],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
|
||||||
|
// related to NFS statistics.
|
||||||
|
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
|
||||||
|
// Field indicators for parsing specific types of data
|
||||||
|
const (
|
||||||
|
fieldAge = "age:"
|
||||||
|
fieldBytes = "bytes:"
|
||||||
|
fieldEvents = "events:"
|
||||||
|
fieldPerOpStats = "per-op"
|
||||||
|
fieldTransport = "xprt:"
|
||||||
|
)
|
||||||
|
|
||||||
|
stats := &MountStatsNFS{
|
||||||
|
StatVersion: statVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
for s.Scan() {
|
||||||
|
ss := strings.Fields(string(s.Bytes()))
|
||||||
|
if len(ss) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if len(ss) < 2 {
|
||||||
|
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch ss[0] {
|
||||||
|
case fieldAge:
|
||||||
|
// Age integer is in seconds
|
||||||
|
d, err := time.ParseDuration(ss[1] + "s")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.Age = d
|
||||||
|
case fieldBytes:
|
||||||
|
bstats, err := parseNFSBytesStats(ss[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.Bytes = *bstats
|
||||||
|
case fieldEvents:
|
||||||
|
estats, err := parseNFSEventsStats(ss[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.Events = *estats
|
||||||
|
case fieldTransport:
|
||||||
|
if len(ss) < 3 {
|
||||||
|
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
|
||||||
|
}
|
||||||
|
|
||||||
|
tstats, err := parseNFSTransportStats(ss[2:], statVersion)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.Transport = *tstats
|
||||||
|
}
|
||||||
|
|
||||||
|
// When encountering "per-operation statistics", we must break this
|
||||||
|
// loop and parse them separately to ensure we can terminate parsing
|
||||||
|
// before reaching another device entry; hence why this 'if' statement
|
||||||
|
// is not just another switch case
|
||||||
|
if ss[0] == fieldPerOpStats {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NFS per-operation stats appear last before the next device entry
|
||||||
|
perOpStats, err := parseNFSOperationStats(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.Operations = perOpStats
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseNFSBytesStats parses a NFSBytesStats line using an input set of
|
||||||
|
// integer fields.
|
||||||
|
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
|
||||||
|
if len(ss) != fieldBytesLen {
|
||||||
|
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
|
||||||
|
}
|
||||||
|
|
||||||
|
ns := make([]uint64, 0, fieldBytesLen)
|
||||||
|
for _, s := range ss {
|
||||||
|
n, err := strconv.ParseUint(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ns = append(ns, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &NFSBytesStats{
|
||||||
|
Read: ns[0],
|
||||||
|
Write: ns[1],
|
||||||
|
DirectRead: ns[2],
|
||||||
|
DirectWrite: ns[3],
|
||||||
|
ReadTotal: ns[4],
|
||||||
|
WriteTotal: ns[5],
|
||||||
|
ReadPages: ns[6],
|
||||||
|
WritePages: ns[7],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseNFSEventsStats parses a NFSEventsStats line using an input set of
|
||||||
|
// integer fields.
|
||||||
|
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
|
||||||
|
if len(ss) != fieldEventsLen {
|
||||||
|
return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
|
||||||
|
}
|
||||||
|
|
||||||
|
ns := make([]uint64, 0, fieldEventsLen)
|
||||||
|
for _, s := range ss {
|
||||||
|
n, err := strconv.ParseUint(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ns = append(ns, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &NFSEventsStats{
|
||||||
|
InodeRevalidate: ns[0],
|
||||||
|
DnodeRevalidate: ns[1],
|
||||||
|
DataInvalidate: ns[2],
|
||||||
|
AttributeInvalidate: ns[3],
|
||||||
|
VFSOpen: ns[4],
|
||||||
|
VFSLookup: ns[5],
|
||||||
|
VFSAccess: ns[6],
|
||||||
|
VFSUpdatePage: ns[7],
|
||||||
|
VFSReadPage: ns[8],
|
||||||
|
VFSReadPages: ns[9],
|
||||||
|
VFSWritePage: ns[10],
|
||||||
|
VFSWritePages: ns[11],
|
||||||
|
VFSGetdents: ns[12],
|
||||||
|
VFSSetattr: ns[13],
|
||||||
|
VFSFlush: ns[14],
|
||||||
|
VFSFsync: ns[15],
|
||||||
|
VFSLock: ns[16],
|
||||||
|
VFSFileRelease: ns[17],
|
||||||
|
CongestionWait: ns[18],
|
||||||
|
Truncation: ns[19],
|
||||||
|
WriteExtension: ns[20],
|
||||||
|
SillyRename: ns[21],
|
||||||
|
ShortRead: ns[22],
|
||||||
|
ShortWrite: ns[23],
|
||||||
|
JukeboxDelay: ns[24],
|
||||||
|
PNFSRead: ns[25],
|
||||||
|
PNFSWrite: ns[26],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
|
||||||
|
// additional information about per-operation statistics until an empty
|
||||||
|
// line is reached.
|
||||||
|
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||||
|
const (
|
||||||
|
// Number of expected fields in each per-operation statistics set
|
||||||
|
numFields = 9
|
||||||
|
)
|
||||||
|
|
||||||
|
var ops []NFSOperationStats
|
||||||
|
|
||||||
|
for s.Scan() {
|
||||||
|
ss := strings.Fields(string(s.Bytes()))
|
||||||
|
if len(ss) == 0 {
|
||||||
|
// Must break when reading a blank line after per-operation stats to
|
||||||
|
// enable top-level function to parse the next device entry
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ss) != numFields {
|
||||||
|
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip string operation name for integers
|
||||||
|
ns := make([]uint64, 0, numFields-1)
|
||||||
|
for _, st := range ss[1:] {
|
||||||
|
n, err := strconv.ParseUint(st, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ns = append(ns, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
ops = append(ops, NFSOperationStats{
|
||||||
|
Operation: strings.TrimSuffix(ss[0], ":"),
|
||||||
|
Requests: ns[0],
|
||||||
|
Transmissions: ns[1],
|
||||||
|
MajorTimeouts: ns[2],
|
||||||
|
BytesSent: ns[3],
|
||||||
|
BytesReceived: ns[4],
|
||||||
|
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
|
||||||
|
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
|
||||||
|
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return ops, s.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
|
||||||
|
// integer fields matched to a specific stats version.
|
||||||
|
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
|
||||||
|
switch statVersion {
|
||||||
|
case statVersion10:
|
||||||
|
if len(ss) != fieldTransport10Len {
|
||||||
|
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
|
||||||
|
}
|
||||||
|
case statVersion11:
|
||||||
|
if len(ss) != fieldTransport11Len {
|
||||||
|
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
||||||
|
// in a v1.0 response.
|
||||||
|
//
|
||||||
|
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
|
||||||
|
// only v1.0 stats are present.
|
||||||
|
// See: https://github.com/prometheus/node_exporter/issues/571.
|
||||||
|
ns := make([]uint64, fieldTransport11Len)
|
||||||
|
for i, s := range ss {
|
||||||
|
n, err := strconv.ParseUint(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ns[i] = n
|
||||||
|
}
|
||||||
|
|
||||||
|
return &NFSTransportStats{
|
||||||
|
Port: ns[0],
|
||||||
|
Bind: ns[1],
|
||||||
|
Connect: ns[2],
|
||||||
|
ConnectIdleTime: ns[3],
|
||||||
|
IdleTime: time.Duration(ns[4]) * time.Second,
|
||||||
|
Sends: ns[5],
|
||||||
|
Receives: ns[6],
|
||||||
|
BadTransactionIDs: ns[7],
|
||||||
|
CumulativeActiveRequests: ns[8],
|
||||||
|
CumulativeBacklog: ns[9],
|
||||||
|
MaximumRPCSlotsUsed: ns[10],
|
||||||
|
CumulativeSendingQueue: ns[11],
|
||||||
|
CumulativePendingQueue: ns[12],
|
||||||
|
}, nil
|
||||||
|
}
|
12
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
12
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
|
@ -192,6 +192,18 @@ func (p Proc) FileDescriptorsLen() (int, error) {
|
||||||
return len(fds), nil
|
return len(fds), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MountStats retrieves statistics and configuration for mount points in a
|
||||||
|
// process's namespace.
|
||||||
|
func (p Proc) MountStats() ([]*Mount, error) {
|
||||||
|
f, err := os.Open(p.path("mountstats"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
return parseMountStats(f)
|
||||||
|
}
|
||||||
|
|
||||||
func (p Proc) fileDescriptors() ([]string, error) {
|
func (p Proc) fileDescriptors() ([]string, error) {
|
||||||
d, err := os.Open(p.path("fd"))
|
d, err := os.Open(p.path("fd"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
189
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
189
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
|
@ -3,15 +3,66 @@ package procfs
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// CPUStat shows how much time the cpu spend in various stages.
|
||||||
|
type CPUStat struct {
|
||||||
|
User float64
|
||||||
|
Nice float64
|
||||||
|
System float64
|
||||||
|
Idle float64
|
||||||
|
Iowait float64
|
||||||
|
IRQ float64
|
||||||
|
SoftIRQ float64
|
||||||
|
Steal float64
|
||||||
|
Guest float64
|
||||||
|
GuestNice float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
|
||||||
|
// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
|
||||||
|
// It is possible to get per-cpu stats by reading /proc/softirqs
|
||||||
|
type SoftIRQStat struct {
|
||||||
|
Hi uint64
|
||||||
|
Timer uint64
|
||||||
|
NetTx uint64
|
||||||
|
NetRx uint64
|
||||||
|
Block uint64
|
||||||
|
BlockIoPoll uint64
|
||||||
|
Tasklet uint64
|
||||||
|
Sched uint64
|
||||||
|
Hrtimer uint64
|
||||||
|
Rcu uint64
|
||||||
|
}
|
||||||
|
|
||||||
// Stat represents kernel/system statistics.
|
// Stat represents kernel/system statistics.
|
||||||
type Stat struct {
|
type Stat struct {
|
||||||
// Boot time in seconds since the Epoch.
|
// Boot time in seconds since the Epoch.
|
||||||
BootTime int64
|
BootTime uint64
|
||||||
|
// Summed up cpu statistics.
|
||||||
|
CPUTotal CPUStat
|
||||||
|
// Per-CPU statistics.
|
||||||
|
CPU []CPUStat
|
||||||
|
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
|
||||||
|
IRQTotal uint64
|
||||||
|
// Number of times a numbered IRQ was triggered.
|
||||||
|
IRQ []uint64
|
||||||
|
// Number of times a context switch happened.
|
||||||
|
ContextSwitches uint64
|
||||||
|
// Number of times a process was created.
|
||||||
|
ProcessCreated uint64
|
||||||
|
// Number of processes currently running.
|
||||||
|
ProcessesRunning uint64
|
||||||
|
// Number of processes currently blocked (waiting for IO).
|
||||||
|
ProcessesBlocked uint64
|
||||||
|
// Number of times a softirq was scheduled.
|
||||||
|
SoftIRQTotal uint64
|
||||||
|
// Detailed softirq statistics.
|
||||||
|
SoftIRQ SoftIRQStat
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns kernel/system statistics read from /proc/stat.
|
// NewStat returns kernel/system statistics read from /proc/stat.
|
||||||
|
@ -24,33 +75,145 @@ func NewStat() (Stat, error) {
|
||||||
return fs.NewStat()
|
return fs.NewStat()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
|
||||||
|
func parseCPUStat(line string) (CPUStat, int64, error) {
|
||||||
|
cpuStat := CPUStat{}
|
||||||
|
var cpu string
|
||||||
|
|
||||||
|
count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
|
||||||
|
&cpu,
|
||||||
|
&cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
|
||||||
|
&cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
|
||||||
|
&cpuStat.Guest, &cpuStat.GuestNice)
|
||||||
|
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
|
||||||
|
}
|
||||||
|
if count == 0 {
|
||||||
|
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
|
||||||
|
}
|
||||||
|
|
||||||
|
cpuStat.User /= userHZ
|
||||||
|
cpuStat.Nice /= userHZ
|
||||||
|
cpuStat.System /= userHZ
|
||||||
|
cpuStat.Idle /= userHZ
|
||||||
|
cpuStat.Iowait /= userHZ
|
||||||
|
cpuStat.IRQ /= userHZ
|
||||||
|
cpuStat.SoftIRQ /= userHZ
|
||||||
|
cpuStat.Steal /= userHZ
|
||||||
|
cpuStat.Guest /= userHZ
|
||||||
|
cpuStat.GuestNice /= userHZ
|
||||||
|
|
||||||
|
if cpu == "cpu" {
|
||||||
|
return cpuStat, -1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cpuStat, cpuID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse a softirq line.
|
||||||
|
func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
|
||||||
|
softIRQStat := SoftIRQStat{}
|
||||||
|
var total uint64
|
||||||
|
var prefix string
|
||||||
|
|
||||||
|
_, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
|
||||||
|
&prefix, &total,
|
||||||
|
&softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
|
||||||
|
&softIRQStat.Block, &softIRQStat.BlockIoPoll,
|
||||||
|
&softIRQStat.Tasklet, &softIRQStat.Sched,
|
||||||
|
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return softIRQStat, total, nil
|
||||||
|
}
|
||||||
|
|
||||||
// NewStat returns an information about current kernel/system statistics.
|
// NewStat returns an information about current kernel/system statistics.
|
||||||
func (fs FS) NewStat() (Stat, error) {
|
func (fs FS) NewStat() (Stat, error) {
|
||||||
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
|
|
||||||
f, err := os.Open(fs.Path("stat"))
|
f, err := os.Open(fs.Path("stat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Stat{}, err
|
return Stat{}, err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
s := bufio.NewScanner(f)
|
stat := Stat{}
|
||||||
for s.Scan() {
|
|
||||||
line := s.Text()
|
scanner := bufio.NewScanner(f)
|
||||||
if !strings.HasPrefix(line, "btime") {
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
parts := strings.Fields(scanner.Text())
|
||||||
|
// require at least <key> <value>
|
||||||
|
if len(parts) < 2 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fields := strings.Fields(line)
|
switch {
|
||||||
if len(fields) != 2 {
|
case parts[0] == "btime":
|
||||||
return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
|
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
|
return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
|
||||||
}
|
}
|
||||||
i, err := strconv.ParseInt(fields[1], 10, 32)
|
case parts[0] == "intr":
|
||||||
|
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
|
return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
|
||||||
|
}
|
||||||
|
numberedIRQs := parts[2:]
|
||||||
|
stat.IRQ = make([]uint64, len(numberedIRQs))
|
||||||
|
for i, count := range numberedIRQs {
|
||||||
|
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
|
return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case parts[0] == "ctxt":
|
||||||
|
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
|
return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
|
||||||
|
}
|
||||||
|
case parts[0] == "processes":
|
||||||
|
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
|
return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
|
||||||
|
}
|
||||||
|
case parts[0] == "procs_running":
|
||||||
|
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
|
return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
|
||||||
|
}
|
||||||
|
case parts[0] == "procs_blocked":
|
||||||
|
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
|
return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
|
||||||
|
}
|
||||||
|
case parts[0] == "softirq":
|
||||||
|
softIRQStats, total, err := parseSoftIRQStat(line)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
|
return Stat{}, err
|
||||||
}
|
}
|
||||||
return Stat{BootTime: i}, nil
|
stat.SoftIRQTotal = total
|
||||||
|
stat.SoftIRQ = softIRQStats
|
||||||
|
case strings.HasPrefix(parts[0], "cpu"):
|
||||||
|
cpuStat, cpuID, err := parseCPUStat(line)
|
||||||
|
if err != nil {
|
||||||
|
return Stat{}, err
|
||||||
}
|
}
|
||||||
if err := s.Err(); err != nil {
|
if cpuID == -1 {
|
||||||
|
stat.CPUTotal = cpuStat
|
||||||
|
} else {
|
||||||
|
for int64(len(stat.CPU)) <= cpuID {
|
||||||
|
stat.CPU = append(stat.CPU, CPUStat{})
|
||||||
|
}
|
||||||
|
stat.CPU[cpuID] = cpuStat
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
|
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
|
return stat, nil
|
||||||
}
|
}
|
||||||
|
|
264
vendor/github.com/prometheus/procfs/ttar
generated
vendored
Executable file
264
vendor/github.com/prometheus/procfs/ttar
generated
vendored
Executable file
|
@ -0,0 +1,264 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Purpose: plain text tar format
|
||||||
|
# Limitations: - only suitable for text files, directories, and symlinks
|
||||||
|
# - stores only filename, content, and mode
|
||||||
|
# - not designed for untrusted input
|
||||||
|
|
||||||
|
# Note: must work with bash version 3.2 (macOS)
|
||||||
|
|
||||||
|
set -o errexit -o nounset
|
||||||
|
|
||||||
|
# Sanitize environment (for instance, standard sorting of glob matches)
|
||||||
|
export LC_ALL=C
|
||||||
|
|
||||||
|
path=""
|
||||||
|
CMD=""
|
||||||
|
|
||||||
|
function usage {
|
||||||
|
bname=$(basename "$0")
|
||||||
|
cat << USAGE
|
||||||
|
Usage: $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
|
||||||
|
$bname -t -f <ARCHIVE> (list archive contents)
|
||||||
|
$bname [-C <DIR>] -x -f <ARCHIVE> (extract archive)
|
||||||
|
|
||||||
|
Options:
|
||||||
|
-C <DIR> (change directory)
|
||||||
|
|
||||||
|
Example: Change to sysfs directory, create ttar file from fixtures directory
|
||||||
|
$bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
|
||||||
|
USAGE
|
||||||
|
exit "$1"
|
||||||
|
}
|
||||||
|
|
||||||
|
function vecho {
|
||||||
|
if [ "${VERBOSE:-}" == "yes" ]; then
|
||||||
|
echo >&7 "$@"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function set_cmd {
|
||||||
|
if [ -n "$CMD" ]; then
|
||||||
|
echo "ERROR: more than one command given"
|
||||||
|
echo
|
||||||
|
usage 2
|
||||||
|
fi
|
||||||
|
CMD=$1
|
||||||
|
}
|
||||||
|
|
||||||
|
while getopts :cf:htxvC: opt; do
|
||||||
|
case $opt in
|
||||||
|
c)
|
||||||
|
set_cmd "create"
|
||||||
|
;;
|
||||||
|
f)
|
||||||
|
ARCHIVE=$OPTARG
|
||||||
|
;;
|
||||||
|
h)
|
||||||
|
usage 0
|
||||||
|
;;
|
||||||
|
t)
|
||||||
|
set_cmd "list"
|
||||||
|
;;
|
||||||
|
x)
|
||||||
|
set_cmd "extract"
|
||||||
|
;;
|
||||||
|
v)
|
||||||
|
VERBOSE=yes
|
||||||
|
exec 7>&1
|
||||||
|
;;
|
||||||
|
C)
|
||||||
|
CDIR=$OPTARG
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo >&2 "ERROR: invalid option -$OPTARG"
|
||||||
|
echo
|
||||||
|
usage 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Remove processed options from arguments
|
||||||
|
shift $(( OPTIND - 1 ));
|
||||||
|
|
||||||
|
if [ "${CMD:-}" == "" ]; then
|
||||||
|
echo >&2 "ERROR: no command given"
|
||||||
|
echo
|
||||||
|
usage 1
|
||||||
|
elif [ "${ARCHIVE:-}" == "" ]; then
|
||||||
|
echo >&2 "ERROR: no archive name given"
|
||||||
|
echo
|
||||||
|
usage 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
function list {
|
||||||
|
local path=""
|
||||||
|
local size=0
|
||||||
|
local line_no=0
|
||||||
|
local ttar_file=$1
|
||||||
|
if [ -n "${2:-}" ]; then
|
||||||
|
echo >&2 "ERROR: too many arguments."
|
||||||
|
echo
|
||||||
|
usage 1
|
||||||
|
fi
|
||||||
|
if [ ! -e "$ttar_file" ]; then
|
||||||
|
echo >&2 "ERROR: file not found ($ttar_file)"
|
||||||
|
echo
|
||||||
|
usage 1
|
||||||
|
fi
|
||||||
|
while read -r line; do
|
||||||
|
line_no=$(( line_no + 1 ))
|
||||||
|
if [ $size -gt 0 ]; then
|
||||||
|
size=$(( size - 1 ))
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
if [[ $line =~ ^Path:\ (.*)$ ]]; then
|
||||||
|
path=${BASH_REMATCH[1]}
|
||||||
|
elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
|
||||||
|
size=${BASH_REMATCH[1]}
|
||||||
|
echo "$path"
|
||||||
|
elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
|
||||||
|
path=${BASH_REMATCH[1]}
|
||||||
|
echo "$path/"
|
||||||
|
elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
|
||||||
|
echo "$path -> ${BASH_REMATCH[1]}"
|
||||||
|
fi
|
||||||
|
done < "$ttar_file"
|
||||||
|
}
|
||||||
|
|
||||||
|
function extract {
|
||||||
|
local path=""
|
||||||
|
local size=0
|
||||||
|
local line_no=0
|
||||||
|
local ttar_file=$1
|
||||||
|
if [ -n "${2:-}" ]; then
|
||||||
|
echo >&2 "ERROR: too many arguments."
|
||||||
|
echo
|
||||||
|
usage 1
|
||||||
|
fi
|
||||||
|
if [ ! -e "$ttar_file" ]; then
|
||||||
|
echo >&2 "ERROR: file not found ($ttar_file)"
|
||||||
|
echo
|
||||||
|
usage 1
|
||||||
|
fi
|
||||||
|
while IFS= read -r line; do
|
||||||
|
line_no=$(( line_no + 1 ))
|
||||||
|
if [ "$size" -gt 0 ]; then
|
||||||
|
echo "$line" >> "$path"
|
||||||
|
size=$(( size - 1 ))
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
if [[ $line =~ ^Path:\ (.*)$ ]]; then
|
||||||
|
path=${BASH_REMATCH[1]}
|
||||||
|
if [ -e "$path" ] || [ -L "$path" ]; then
|
||||||
|
rm "$path"
|
||||||
|
fi
|
||||||
|
elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
|
||||||
|
size=${BASH_REMATCH[1]}
|
||||||
|
# Create file even if it is zero-length.
|
||||||
|
touch "$path"
|
||||||
|
vecho " $path"
|
||||||
|
elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
|
||||||
|
mode=${BASH_REMATCH[1]}
|
||||||
|
chmod "$mode" "$path"
|
||||||
|
vecho "$mode"
|
||||||
|
elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
|
||||||
|
path=${BASH_REMATCH[1]}
|
||||||
|
mkdir -p "$path"
|
||||||
|
vecho " $path/"
|
||||||
|
elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
|
||||||
|
ln -s "${BASH_REMATCH[1]}" "$path"
|
||||||
|
vecho " $path -> ${BASH_REMATCH[1]}"
|
||||||
|
elif [[ $line =~ ^# ]]; then
|
||||||
|
# Ignore comments between files
|
||||||
|
continue
|
||||||
|
else
|
||||||
|
echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done < "$ttar_file"
|
||||||
|
}
|
||||||
|
|
||||||
|
function div {
|
||||||
|
echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
|
||||||
|
"- - - - - -"
|
||||||
|
}
|
||||||
|
|
||||||
|
function get_mode {
|
||||||
|
local mfile=$1
|
||||||
|
if [ -z "${STAT_OPTION:-}" ]; then
|
||||||
|
if stat -c '%a' "$mfile" >/dev/null 2>&1; then
|
||||||
|
STAT_OPTION='-c'
|
||||||
|
STAT_FORMAT='%a'
|
||||||
|
else
|
||||||
|
STAT_OPTION='-f'
|
||||||
|
STAT_FORMAT='%A'
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
|
||||||
|
}
|
||||||
|
|
||||||
|
function _create {
|
||||||
|
shopt -s nullglob
|
||||||
|
local mode
|
||||||
|
while (( "$#" )); do
|
||||||
|
file=$1
|
||||||
|
if [ -L "$file" ]; then
|
||||||
|
echo "Path: $file"
|
||||||
|
symlinkTo=$(readlink "$file")
|
||||||
|
echo "SymlinkTo: $symlinkTo"
|
||||||
|
vecho " $file -> $symlinkTo"
|
||||||
|
div
|
||||||
|
elif [ -d "$file" ]; then
|
||||||
|
# Strip trailing slash (if there is one)
|
||||||
|
file=${file%/}
|
||||||
|
echo "Directory: $file"
|
||||||
|
mode=$(get_mode "$file")
|
||||||
|
echo "Mode: $mode"
|
||||||
|
vecho "$mode $file/"
|
||||||
|
div
|
||||||
|
# Find all files and dirs, including hidden/dot files
|
||||||
|
for x in "$file/"{*,.[^.]*}; do
|
||||||
|
_create "$x"
|
||||||
|
done
|
||||||
|
elif [ -f "$file" ]; then
|
||||||
|
echo "Path: $file"
|
||||||
|
lines=$(wc -l "$file"|awk '{print $1}')
|
||||||
|
echo "Lines: $lines"
|
||||||
|
cat "$file"
|
||||||
|
mode=$(get_mode "$file")
|
||||||
|
echo "Mode: $mode"
|
||||||
|
vecho "$mode $file"
|
||||||
|
div
|
||||||
|
else
|
||||||
|
echo >&2 "ERROR: file not found ($file in $(pwd))"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
function create {
|
||||||
|
ttar_file=$1
|
||||||
|
shift
|
||||||
|
if [ -z "${1:-}" ]; then
|
||||||
|
echo >&2 "ERROR: missing arguments."
|
||||||
|
echo
|
||||||
|
usage 1
|
||||||
|
fi
|
||||||
|
if [ -e "$ttar_file" ]; then
|
||||||
|
rm "$ttar_file"
|
||||||
|
fi
|
||||||
|
exec > "$ttar_file"
|
||||||
|
_create "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ -n "${CDIR:-}" ]; then
|
||||||
|
if [[ "$ARCHIVE" != /* ]]; then
|
||||||
|
# Relative path: preserve the archive's location before changing
|
||||||
|
# directory
|
||||||
|
ARCHIVE="$(pwd)/$ARCHIVE"
|
||||||
|
fi
|
||||||
|
cd "$CDIR"
|
||||||
|
fi
|
||||||
|
|
||||||
|
"$CMD" "$ARCHIVE" "$@"
|
187
vendor/github.com/prometheus/procfs/xfrm.go
generated
vendored
Normal file
187
vendor/github.com/prometheus/procfs/xfrm.go
generated
vendored
Normal file
|
@ -0,0 +1,187 @@
|
||||||
|
// Copyright 2017 Prometheus Team
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// XfrmStat models the contents of /proc/net/xfrm_stat.
|
||||||
|
type XfrmStat struct {
|
||||||
|
// All errors which are not matched by other
|
||||||
|
XfrmInError int
|
||||||
|
// No buffer is left
|
||||||
|
XfrmInBufferError int
|
||||||
|
// Header Error
|
||||||
|
XfrmInHdrError int
|
||||||
|
// No state found
|
||||||
|
// i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
|
||||||
|
XfrmInNoStates int
|
||||||
|
// Transformation protocol specific error
|
||||||
|
// e.g. SA Key is wrong
|
||||||
|
XfrmInStateProtoError int
|
||||||
|
// Transformation mode specific error
|
||||||
|
XfrmInStateModeError int
|
||||||
|
// Sequence error
|
||||||
|
// e.g. sequence number is out of window
|
||||||
|
XfrmInStateSeqError int
|
||||||
|
// State is expired
|
||||||
|
XfrmInStateExpired int
|
||||||
|
// State has mismatch option
|
||||||
|
// e.g. UDP encapsulation type is mismatched
|
||||||
|
XfrmInStateMismatch int
|
||||||
|
// State is invalid
|
||||||
|
XfrmInStateInvalid int
|
||||||
|
// No matching template for states
|
||||||
|
// e.g. Inbound SAs are correct but SP rule is wrong
|
||||||
|
XfrmInTmplMismatch int
|
||||||
|
// No policy is found for states
|
||||||
|
// e.g. Inbound SAs are correct but no SP is found
|
||||||
|
XfrmInNoPols int
|
||||||
|
// Policy discards
|
||||||
|
XfrmInPolBlock int
|
||||||
|
// Policy error
|
||||||
|
XfrmInPolError int
|
||||||
|
// All errors which are not matched by others
|
||||||
|
XfrmOutError int
|
||||||
|
// Bundle generation error
|
||||||
|
XfrmOutBundleGenError int
|
||||||
|
// Bundle check error
|
||||||
|
XfrmOutBundleCheckError int
|
||||||
|
// No state was found
|
||||||
|
XfrmOutNoStates int
|
||||||
|
// Transformation protocol specific error
|
||||||
|
XfrmOutStateProtoError int
|
||||||
|
// Transportation mode specific error
|
||||||
|
XfrmOutStateModeError int
|
||||||
|
// Sequence error
|
||||||
|
// i.e sequence number overflow
|
||||||
|
XfrmOutStateSeqError int
|
||||||
|
// State is expired
|
||||||
|
XfrmOutStateExpired int
|
||||||
|
// Policy discads
|
||||||
|
XfrmOutPolBlock int
|
||||||
|
// Policy is dead
|
||||||
|
XfrmOutPolDead int
|
||||||
|
// Policy Error
|
||||||
|
XfrmOutPolError int
|
||||||
|
XfrmFwdHdrError int
|
||||||
|
XfrmOutStateInvalid int
|
||||||
|
XfrmAcquireError int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewXfrmStat reads the xfrm_stat statistics.
|
||||||
|
func NewXfrmStat() (XfrmStat, error) {
|
||||||
|
fs, err := NewFS(DefaultMountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return XfrmStat{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.NewXfrmStat()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
|
||||||
|
func (fs FS) NewXfrmStat() (XfrmStat, error) {
|
||||||
|
file, err := os.Open(fs.Path("net/xfrm_stat"))
|
||||||
|
if err != nil {
|
||||||
|
return XfrmStat{}, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
x = XfrmStat{}
|
||||||
|
s = bufio.NewScanner(file)
|
||||||
|
)
|
||||||
|
|
||||||
|
for s.Scan() {
|
||||||
|
fields := strings.Fields(s.Text())
|
||||||
|
|
||||||
|
if len(fields) != 2 {
|
||||||
|
return XfrmStat{}, fmt.Errorf(
|
||||||
|
"couldnt parse %s line %s", file.Name(), s.Text())
|
||||||
|
}
|
||||||
|
|
||||||
|
name := fields[0]
|
||||||
|
value, err := strconv.Atoi(fields[1])
|
||||||
|
if err != nil {
|
||||||
|
return XfrmStat{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch name {
|
||||||
|
case "XfrmInError":
|
||||||
|
x.XfrmInError = value
|
||||||
|
case "XfrmInBufferError":
|
||||||
|
x.XfrmInBufferError = value
|
||||||
|
case "XfrmInHdrError":
|
||||||
|
x.XfrmInHdrError = value
|
||||||
|
case "XfrmInNoStates":
|
||||||
|
x.XfrmInNoStates = value
|
||||||
|
case "XfrmInStateProtoError":
|
||||||
|
x.XfrmInStateProtoError = value
|
||||||
|
case "XfrmInStateModeError":
|
||||||
|
x.XfrmInStateModeError = value
|
||||||
|
case "XfrmInStateSeqError":
|
||||||
|
x.XfrmInStateSeqError = value
|
||||||
|
case "XfrmInStateExpired":
|
||||||
|
x.XfrmInStateExpired = value
|
||||||
|
case "XfrmInStateInvalid":
|
||||||
|
x.XfrmInStateInvalid = value
|
||||||
|
case "XfrmInTmplMismatch":
|
||||||
|
x.XfrmInTmplMismatch = value
|
||||||
|
case "XfrmInNoPols":
|
||||||
|
x.XfrmInNoPols = value
|
||||||
|
case "XfrmInPolBlock":
|
||||||
|
x.XfrmInPolBlock = value
|
||||||
|
case "XfrmInPolError":
|
||||||
|
x.XfrmInPolError = value
|
||||||
|
case "XfrmOutError":
|
||||||
|
x.XfrmOutError = value
|
||||||
|
case "XfrmInStateMismatch":
|
||||||
|
x.XfrmInStateMismatch = value
|
||||||
|
case "XfrmOutBundleGenError":
|
||||||
|
x.XfrmOutBundleGenError = value
|
||||||
|
case "XfrmOutBundleCheckError":
|
||||||
|
x.XfrmOutBundleCheckError = value
|
||||||
|
case "XfrmOutNoStates":
|
||||||
|
x.XfrmOutNoStates = value
|
||||||
|
case "XfrmOutStateProtoError":
|
||||||
|
x.XfrmOutStateProtoError = value
|
||||||
|
case "XfrmOutStateModeError":
|
||||||
|
x.XfrmOutStateModeError = value
|
||||||
|
case "XfrmOutStateSeqError":
|
||||||
|
x.XfrmOutStateSeqError = value
|
||||||
|
case "XfrmOutStateExpired":
|
||||||
|
x.XfrmOutStateExpired = value
|
||||||
|
case "XfrmOutPolBlock":
|
||||||
|
x.XfrmOutPolBlock = value
|
||||||
|
case "XfrmOutPolDead":
|
||||||
|
x.XfrmOutPolDead = value
|
||||||
|
case "XfrmOutPolError":
|
||||||
|
x.XfrmOutPolError = value
|
||||||
|
case "XfrmFwdHdrError":
|
||||||
|
x.XfrmFwdHdrError = value
|
||||||
|
case "XfrmOutStateInvalid":
|
||||||
|
x.XfrmOutStateInvalid = value
|
||||||
|
case "XfrmAcquireError":
|
||||||
|
x.XfrmAcquireError = value
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return x, s.Err()
|
||||||
|
}
|
359
vendor/github.com/prometheus/procfs/xfs/parse.go
generated
vendored
Normal file
359
vendor/github.com/prometheus/procfs/xfs/parse.go
generated
vendored
Normal file
|
@ -0,0 +1,359 @@
|
||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package xfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseStats parses a Stats from an input io.Reader, using the format
|
||||||
|
// found in /proc/fs/xfs/stat.
|
||||||
|
func ParseStats(r io.Reader) (*Stats, error) {
|
||||||
|
const (
|
||||||
|
// Fields parsed into stats structures.
|
||||||
|
fieldExtentAlloc = "extent_alloc"
|
||||||
|
fieldAbt = "abt"
|
||||||
|
fieldBlkMap = "blk_map"
|
||||||
|
fieldBmbt = "bmbt"
|
||||||
|
fieldDir = "dir"
|
||||||
|
fieldTrans = "trans"
|
||||||
|
fieldIg = "ig"
|
||||||
|
fieldLog = "log"
|
||||||
|
fieldRw = "rw"
|
||||||
|
fieldAttr = "attr"
|
||||||
|
fieldIcluster = "icluster"
|
||||||
|
fieldVnodes = "vnodes"
|
||||||
|
fieldBuf = "buf"
|
||||||
|
fieldXpc = "xpc"
|
||||||
|
|
||||||
|
// Unimplemented at this time due to lack of documentation.
|
||||||
|
fieldPushAil = "push_ail"
|
||||||
|
fieldXstrat = "xstrat"
|
||||||
|
fieldAbtb2 = "abtb2"
|
||||||
|
fieldAbtc2 = "abtc2"
|
||||||
|
fieldBmbt2 = "bmbt2"
|
||||||
|
fieldIbt2 = "ibt2"
|
||||||
|
fieldFibt2 = "fibt2"
|
||||||
|
fieldQm = "qm"
|
||||||
|
fieldDebug = "debug"
|
||||||
|
)
|
||||||
|
|
||||||
|
var xfss Stats
|
||||||
|
|
||||||
|
s := bufio.NewScanner(r)
|
||||||
|
for s.Scan() {
|
||||||
|
// Expect at least a string label and a single integer value, ex:
|
||||||
|
// - abt 0
|
||||||
|
// - rw 1 2
|
||||||
|
ss := strings.Fields(string(s.Bytes()))
|
||||||
|
if len(ss) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
label := ss[0]
|
||||||
|
|
||||||
|
// Extended precision counters are uint64 values.
|
||||||
|
if label == fieldXpc {
|
||||||
|
us, err := parseUint64s(ss[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// All other counters are uint32 values.
|
||||||
|
us, err := parseUint32s(ss[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch label {
|
||||||
|
case fieldExtentAlloc:
|
||||||
|
xfss.ExtentAllocation, err = extentAllocationStats(us)
|
||||||
|
case fieldAbt:
|
||||||
|
xfss.AllocationBTree, err = btreeStats(us)
|
||||||
|
case fieldBlkMap:
|
||||||
|
xfss.BlockMapping, err = blockMappingStats(us)
|
||||||
|
case fieldBmbt:
|
||||||
|
xfss.BlockMapBTree, err = btreeStats(us)
|
||||||
|
case fieldDir:
|
||||||
|
xfss.DirectoryOperation, err = directoryOperationStats(us)
|
||||||
|
case fieldTrans:
|
||||||
|
xfss.Transaction, err = transactionStats(us)
|
||||||
|
case fieldIg:
|
||||||
|
xfss.InodeOperation, err = inodeOperationStats(us)
|
||||||
|
case fieldLog:
|
||||||
|
xfss.LogOperation, err = logOperationStats(us)
|
||||||
|
case fieldRw:
|
||||||
|
xfss.ReadWrite, err = readWriteStats(us)
|
||||||
|
case fieldAttr:
|
||||||
|
xfss.AttributeOperation, err = attributeOperationStats(us)
|
||||||
|
case fieldIcluster:
|
||||||
|
xfss.InodeClustering, err = inodeClusteringStats(us)
|
||||||
|
case fieldVnodes:
|
||||||
|
xfss.Vnode, err = vnodeStats(us)
|
||||||
|
case fieldBuf:
|
||||||
|
xfss.Buffer, err = bufferStats(us)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &xfss, s.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
|
||||||
|
func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
|
||||||
|
if l := len(us); l != 4 {
|
||||||
|
return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ExtentAllocationStats{
|
||||||
|
ExtentsAllocated: us[0],
|
||||||
|
BlocksAllocated: us[1],
|
||||||
|
ExtentsFreed: us[2],
|
||||||
|
BlocksFreed: us[3],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// btreeStats builds a BTreeStats from a slice of uint32s.
|
||||||
|
func btreeStats(us []uint32) (BTreeStats, error) {
|
||||||
|
if l := len(us); l != 4 {
|
||||||
|
return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return BTreeStats{
|
||||||
|
Lookups: us[0],
|
||||||
|
Compares: us[1],
|
||||||
|
RecordsInserted: us[2],
|
||||||
|
RecordsDeleted: us[3],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
|
||||||
|
func blockMappingStats(us []uint32) (BlockMappingStats, error) {
|
||||||
|
if l := len(us); l != 7 {
|
||||||
|
return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return BlockMappingStats{
|
||||||
|
Reads: us[0],
|
||||||
|
Writes: us[1],
|
||||||
|
Unmaps: us[2],
|
||||||
|
ExtentListInsertions: us[3],
|
||||||
|
ExtentListDeletions: us[4],
|
||||||
|
ExtentListLookups: us[5],
|
||||||
|
ExtentListCompares: us[6],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
|
||||||
|
func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
|
||||||
|
if l := len(us); l != 4 {
|
||||||
|
return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return DirectoryOperationStats{
|
||||||
|
Lookups: us[0],
|
||||||
|
Creates: us[1],
|
||||||
|
Removes: us[2],
|
||||||
|
Getdents: us[3],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TransactionStats builds a TransactionStats from a slice of uint32s.
|
||||||
|
func transactionStats(us []uint32) (TransactionStats, error) {
|
||||||
|
if l := len(us); l != 3 {
|
||||||
|
return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return TransactionStats{
|
||||||
|
Sync: us[0],
|
||||||
|
Async: us[1],
|
||||||
|
Empty: us[2],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
|
||||||
|
func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
|
||||||
|
if l := len(us); l != 7 {
|
||||||
|
return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return InodeOperationStats{
|
||||||
|
Attempts: us[0],
|
||||||
|
Found: us[1],
|
||||||
|
Recycle: us[2],
|
||||||
|
Missed: us[3],
|
||||||
|
Duplicate: us[4],
|
||||||
|
Reclaims: us[5],
|
||||||
|
AttributeChange: us[6],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogOperationStats builds a LogOperationStats from a slice of uint32s.
|
||||||
|
func logOperationStats(us []uint32) (LogOperationStats, error) {
|
||||||
|
if l := len(us); l != 5 {
|
||||||
|
return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return LogOperationStats{
|
||||||
|
Writes: us[0],
|
||||||
|
Blocks: us[1],
|
||||||
|
NoInternalBuffers: us[2],
|
||||||
|
Force: us[3],
|
||||||
|
ForceSleep: us[4],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
|
||||||
|
func readWriteStats(us []uint32) (ReadWriteStats, error) {
|
||||||
|
if l := len(us); l != 2 {
|
||||||
|
return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ReadWriteStats{
|
||||||
|
Read: us[0],
|
||||||
|
Write: us[1],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
|
||||||
|
func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
|
||||||
|
if l := len(us); l != 4 {
|
||||||
|
return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return AttributeOperationStats{
|
||||||
|
Get: us[0],
|
||||||
|
Set: us[1],
|
||||||
|
Remove: us[2],
|
||||||
|
List: us[3],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
|
||||||
|
func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
|
||||||
|
if l := len(us); l != 3 {
|
||||||
|
return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return InodeClusteringStats{
|
||||||
|
Iflush: us[0],
|
||||||
|
Flush: us[1],
|
||||||
|
FlushInode: us[2],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VnodeStats builds a VnodeStats from a slice of uint32s.
|
||||||
|
func vnodeStats(us []uint32) (VnodeStats, error) {
|
||||||
|
// The attribute "Free" appears to not be available on older XFS
|
||||||
|
// stats versions. Therefore, 7 or 8 elements may appear in
|
||||||
|
// this slice.
|
||||||
|
l := len(us)
|
||||||
|
if l != 7 && l != 8 {
|
||||||
|
return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
s := VnodeStats{
|
||||||
|
Active: us[0],
|
||||||
|
Allocate: us[1],
|
||||||
|
Get: us[2],
|
||||||
|
Hold: us[3],
|
||||||
|
Release: us[4],
|
||||||
|
Reclaim: us[5],
|
||||||
|
Remove: us[6],
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip adding free, unless it is present. The zero value will
|
||||||
|
// be used in place of an actual count.
|
||||||
|
if l == 7 {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Free = us[7]
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BufferStats builds a BufferStats from a slice of uint32s.
|
||||||
|
func bufferStats(us []uint32) (BufferStats, error) {
|
||||||
|
if l := len(us); l != 9 {
|
||||||
|
return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return BufferStats{
|
||||||
|
Get: us[0],
|
||||||
|
Create: us[1],
|
||||||
|
GetLocked: us[2],
|
||||||
|
GetLockedWaited: us[3],
|
||||||
|
BusyLocked: us[4],
|
||||||
|
MissLocked: us[5],
|
||||||
|
PageRetries: us[6],
|
||||||
|
PageFound: us[7],
|
||||||
|
GetRead: us[8],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
|
||||||
|
func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
|
||||||
|
if l := len(us); l != 3 {
|
||||||
|
return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ExtendedPrecisionStats{
|
||||||
|
FlushBytes: us[0],
|
||||||
|
WriteBytes: us[1],
|
||||||
|
ReadBytes: us[2],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseUint32s parses a slice of strings into a slice of uint32s.
|
||||||
|
func parseUint32s(ss []string) ([]uint32, error) {
|
||||||
|
us := make([]uint32, 0, len(ss))
|
||||||
|
for _, s := range ss {
|
||||||
|
u, err := strconv.ParseUint(s, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
us = append(us, uint32(u))
|
||||||
|
}
|
||||||
|
|
||||||
|
return us, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseUint64s parses a slice of strings into a slice of uint64s.
|
||||||
|
func parseUint64s(ss []string) ([]uint64, error) {
|
||||||
|
us := make([]uint64, 0, len(ss))
|
||||||
|
for _, s := range ss {
|
||||||
|
u, err := strconv.ParseUint(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
us = append(us, u)
|
||||||
|
}
|
||||||
|
|
||||||
|
return us, nil
|
||||||
|
}
|
163
vendor/github.com/prometheus/procfs/xfs/xfs.go
generated
vendored
Normal file
163
vendor/github.com/prometheus/procfs/xfs/xfs.go
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package xfs provides access to statistics exposed by the XFS filesystem.
|
||||||
|
package xfs
|
||||||
|
|
||||||
|
// Stats contains XFS filesystem runtime statistics, parsed from
|
||||||
|
// /proc/fs/xfs/stat.
|
||||||
|
//
|
||||||
|
// The names and meanings of each statistic were taken from
|
||||||
|
// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
|
||||||
|
// kernel source. Most counters are uint32s (same data types used in
|
||||||
|
// xfs_stats.h), but some of the "extended precision stats" are uint64s.
|
||||||
|
type Stats struct {
|
||||||
|
// The name of the filesystem used to source these statistics.
|
||||||
|
// If empty, this indicates aggregated statistics for all XFS
|
||||||
|
// filesystems on the host.
|
||||||
|
Name string
|
||||||
|
|
||||||
|
ExtentAllocation ExtentAllocationStats
|
||||||
|
AllocationBTree BTreeStats
|
||||||
|
BlockMapping BlockMappingStats
|
||||||
|
BlockMapBTree BTreeStats
|
||||||
|
DirectoryOperation DirectoryOperationStats
|
||||||
|
Transaction TransactionStats
|
||||||
|
InodeOperation InodeOperationStats
|
||||||
|
LogOperation LogOperationStats
|
||||||
|
ReadWrite ReadWriteStats
|
||||||
|
AttributeOperation AttributeOperationStats
|
||||||
|
InodeClustering InodeClusteringStats
|
||||||
|
Vnode VnodeStats
|
||||||
|
Buffer BufferStats
|
||||||
|
ExtendedPrecision ExtendedPrecisionStats
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtentAllocationStats contains statistics regarding XFS extent allocations.
|
||||||
|
type ExtentAllocationStats struct {
|
||||||
|
ExtentsAllocated uint32
|
||||||
|
BlocksAllocated uint32
|
||||||
|
ExtentsFreed uint32
|
||||||
|
BlocksFreed uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// BTreeStats contains statistics regarding an XFS internal B-tree.
|
||||||
|
type BTreeStats struct {
|
||||||
|
Lookups uint32
|
||||||
|
Compares uint32
|
||||||
|
RecordsInserted uint32
|
||||||
|
RecordsDeleted uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockMappingStats contains statistics regarding XFS block maps.
|
||||||
|
type BlockMappingStats struct {
|
||||||
|
Reads uint32
|
||||||
|
Writes uint32
|
||||||
|
Unmaps uint32
|
||||||
|
ExtentListInsertions uint32
|
||||||
|
ExtentListDeletions uint32
|
||||||
|
ExtentListLookups uint32
|
||||||
|
ExtentListCompares uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirectoryOperationStats contains statistics regarding XFS directory entries.
|
||||||
|
type DirectoryOperationStats struct {
|
||||||
|
Lookups uint32
|
||||||
|
Creates uint32
|
||||||
|
Removes uint32
|
||||||
|
Getdents uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// TransactionStats contains statistics regarding XFS metadata transactions.
|
||||||
|
type TransactionStats struct {
|
||||||
|
Sync uint32
|
||||||
|
Async uint32
|
||||||
|
Empty uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// InodeOperationStats contains statistics regarding XFS inode operations.
|
||||||
|
type InodeOperationStats struct {
|
||||||
|
Attempts uint32
|
||||||
|
Found uint32
|
||||||
|
Recycle uint32
|
||||||
|
Missed uint32
|
||||||
|
Duplicate uint32
|
||||||
|
Reclaims uint32
|
||||||
|
AttributeChange uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogOperationStats contains statistics regarding the XFS log buffer.
|
||||||
|
type LogOperationStats struct {
|
||||||
|
Writes uint32
|
||||||
|
Blocks uint32
|
||||||
|
NoInternalBuffers uint32
|
||||||
|
Force uint32
|
||||||
|
ForceSleep uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadWriteStats contains statistics regarding the number of read and write
|
||||||
|
// system calls for XFS filesystems.
|
||||||
|
type ReadWriteStats struct {
|
||||||
|
Read uint32
|
||||||
|
Write uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttributeOperationStats contains statistics regarding manipulation of
|
||||||
|
// XFS extended file attributes.
|
||||||
|
type AttributeOperationStats struct {
|
||||||
|
Get uint32
|
||||||
|
Set uint32
|
||||||
|
Remove uint32
|
||||||
|
List uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// InodeClusteringStats contains statistics regarding XFS inode clustering
|
||||||
|
// operations.
|
||||||
|
type InodeClusteringStats struct {
|
||||||
|
Iflush uint32
|
||||||
|
Flush uint32
|
||||||
|
FlushInode uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// VnodeStats contains statistics regarding XFS vnode operations.
|
||||||
|
type VnodeStats struct {
|
||||||
|
Active uint32
|
||||||
|
Allocate uint32
|
||||||
|
Get uint32
|
||||||
|
Hold uint32
|
||||||
|
Release uint32
|
||||||
|
Reclaim uint32
|
||||||
|
Remove uint32
|
||||||
|
Free uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// BufferStats contains statistics regarding XFS read/write I/O buffers.
|
||||||
|
type BufferStats struct {
|
||||||
|
Get uint32
|
||||||
|
Create uint32
|
||||||
|
GetLocked uint32
|
||||||
|
GetLockedWaited uint32
|
||||||
|
BusyLocked uint32
|
||||||
|
MissLocked uint32
|
||||||
|
PageRetries uint32
|
||||||
|
PageFound uint32
|
||||||
|
GetRead uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtendedPrecisionStats contains high precision counters used to track the
|
||||||
|
// total number of bytes read, written, or flushed, during XFS operations.
|
||||||
|
type ExtendedPrecisionStats struct {
|
||||||
|
FlushBytes uint64
|
||||||
|
WriteBytes uint64
|
||||||
|
ReadBytes uint64
|
||||||
|
}
|
12
vendor/vendor.json
vendored
12
vendor/vendor.json
vendored
|
@ -97,10 +97,16 @@
|
||||||
"revisionTime": "2017-07-31T11:42:04Z"
|
"revisionTime": "2017-07-31T11:42:04Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "W218eJZPXJG783fUr/z6IaAZyes=",
|
"checksumSHA1": "ihxJIjxtbEYdQKwA0D0nRipj95I=",
|
||||||
"path": "github.com/prometheus/procfs",
|
"path": "github.com/prometheus/procfs",
|
||||||
"revision": "abf152e5f3e97f2fafac028d2cc06c1feb87ffa5",
|
"revision": "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2",
|
||||||
"revisionTime": "2016-04-11T19:08:41Z"
|
"revisionTime": "2017-07-03T10:12:42Z"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "xCiFAAwVTrjsfZT1BIJQ3DgeNCY=",
|
||||||
|
"path": "github.com/prometheus/procfs/xfs",
|
||||||
|
"revision": "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2",
|
||||||
|
"revisionTime": "2017-07-03T10:12:42Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "BYvROBsiyAXK4sq6yhDe8RgT4LM=",
|
"checksumSHA1": "BYvROBsiyAXK4sq6yhDe8RgT4LM=",
|
||||||
|
|
Loading…
Reference in a new issue