forked from mirrors/statsd_exporter
Removed redundant files.
Signed-off-by: Frank Davidson <davidfr@americas.manulife.net> Signed-off-by: Frank Davidson <ffdavidson@gmail.com>
This commit is contained in:
parent
44d4daf599
commit
390e862252
3 changed files with 0 additions and 1122 deletions
546
exporter.go
546
exporter.go
|
@ -1,546 +0,0 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/statsd_exporter/pkg/clock"
|
||||
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHelp = "Metric autogenerated by statsd_exporter."
|
||||
regErrF = "Failed to update metric"
|
||||
)
|
||||
|
||||
// uncheckedCollector wraps a Collector but its Describe method yields no Desc.
|
||||
// This allows incoming metrics to have inconsistent label sets
|
||||
type uncheckedCollector struct {
|
||||
c prometheus.Collector
|
||||
}
|
||||
|
||||
func (u uncheckedCollector) Describe(_ chan<- *prometheus.Desc) {}
|
||||
func (u uncheckedCollector) Collect(c chan<- prometheus.Metric) {
|
||||
u.c.Collect(c)
|
||||
}
|
||||
|
||||
type Exporter struct {
|
||||
mapper *mapper.MetricMapper
|
||||
registry *registry
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// Listen handles all events sent to the given channel sequentially. It
|
||||
// terminates when the channel is closed.
|
||||
func (b *Exporter) Listen(e <-chan Events) {
|
||||
removeStaleMetricsTicker := clock.NewTicker(time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-removeStaleMetricsTicker.C:
|
||||
b.registry.removeStaleMetrics()
|
||||
case events, ok := <-e:
|
||||
if !ok {
|
||||
level.Debug(b.logger).Log("msg", "Channel is closed. Break out of Exporter.Listener.")
|
||||
removeStaleMetricsTicker.Stop()
|
||||
return
|
||||
}
|
||||
for _, event := range events {
|
||||
b.handleEvent(event)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleEvent processes a single Event according to the configured mapping.
|
||||
func (b *Exporter) handleEvent(event Event) {
|
||||
mapping, labels, present := b.mapper.GetMapping(event.MetricName(), event.MetricType())
|
||||
if mapping == nil {
|
||||
mapping = &mapper.MetricMapping{}
|
||||
if b.mapper.Defaults.Ttl != 0 {
|
||||
mapping.Ttl = b.mapper.Defaults.Ttl
|
||||
}
|
||||
}
|
||||
|
||||
if mapping.Action == mapper.ActionTypeDrop {
|
||||
eventsActions.WithLabelValues("drop").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
help := defaultHelp
|
||||
if mapping.HelpText != "" {
|
||||
help = mapping.HelpText
|
||||
}
|
||||
|
||||
metricName := ""
|
||||
prometheusLabels := event.Labels()
|
||||
if present {
|
||||
if mapping.Name == "" {
|
||||
level.Debug(b.logger).Log("msg", "The mapping generates an empty metric name", "metric_name", event.MetricName(), "match", mapping.Match)
|
||||
errorEventStats.WithLabelValues("empty_metric_name").Inc()
|
||||
return
|
||||
}
|
||||
metricName = mapper.EscapeMetricName(mapping.Name)
|
||||
for label, value := range labels {
|
||||
prometheusLabels[label] = value
|
||||
}
|
||||
eventsActions.WithLabelValues(string(mapping.Action)).Inc()
|
||||
} else {
|
||||
eventsUnmapped.Inc()
|
||||
metricName = mapper.EscapeMetricName(event.MetricName())
|
||||
}
|
||||
|
||||
switch ev := event.(type) {
|
||||
case *CounterEvent:
|
||||
// We don't accept negative values for counters. Incrementing the counter with a negative number
|
||||
// will cause the exporter to panic. Instead we will warn and continue to the next event.
|
||||
if event.Value() < 0.0 {
|
||||
level.Debug(b.logger).Log("msg", "counter must be non-negative value", "metric", metricName, "event_value", event.Value())
|
||||
errorEventStats.WithLabelValues("illegal_negative_counter").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
counter, err := b.registry.getCounter(metricName, prometheusLabels, help, mapping)
|
||||
if err == nil {
|
||||
counter.Add(event.Value())
|
||||
eventStats.WithLabelValues("counter").Inc()
|
||||
} else {
|
||||
level.Debug(b.logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
||||
conflictingEventStats.WithLabelValues("counter").Inc()
|
||||
}
|
||||
|
||||
case *GaugeEvent:
|
||||
gauge, err := b.registry.getGauge(metricName, prometheusLabels, help, mapping)
|
||||
|
||||
if err == nil {
|
||||
if ev.relative {
|
||||
gauge.Add(event.Value())
|
||||
} else {
|
||||
gauge.Set(event.Value())
|
||||
}
|
||||
eventStats.WithLabelValues("gauge").Inc()
|
||||
} else {
|
||||
level.Debug(b.logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
||||
conflictingEventStats.WithLabelValues("gauge").Inc()
|
||||
}
|
||||
|
||||
case *TimerEvent:
|
||||
t := mapper.TimerTypeDefault
|
||||
if mapping != nil {
|
||||
t = mapping.TimerType
|
||||
}
|
||||
if t == mapper.TimerTypeDefault {
|
||||
t = b.mapper.Defaults.TimerType
|
||||
}
|
||||
|
||||
switch t {
|
||||
case mapper.TimerTypeHistogram:
|
||||
histogram, err := b.registry.getHistogram(metricName, prometheusLabels, help, mapping)
|
||||
if err == nil {
|
||||
histogram.Observe(event.Value() / 1000) // prometheus presumes seconds, statsd millisecond
|
||||
eventStats.WithLabelValues("timer").Inc()
|
||||
} else {
|
||||
level.Debug(b.logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
||||
conflictingEventStats.WithLabelValues("timer").Inc()
|
||||
}
|
||||
|
||||
case mapper.TimerTypeDefault, mapper.TimerTypeSummary:
|
||||
summary, err := b.registry.getSummary(metricName, prometheusLabels, help, mapping)
|
||||
if err == nil {
|
||||
summary.Observe(event.Value() / 1000) // prometheus presumes seconds, statsd millisecond
|
||||
eventStats.WithLabelValues("timer").Inc()
|
||||
} else {
|
||||
level.Debug(b.logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
||||
conflictingEventStats.WithLabelValues("timer").Inc()
|
||||
}
|
||||
|
||||
default:
|
||||
level.Error(b.logger).Log("msg", "unknown timer type", "type", t)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
default:
|
||||
level.Debug(b.logger).Log("msg", "Unsupported event type")
|
||||
eventStats.WithLabelValues("illegal").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func NewExporter(mapper *mapper.MetricMapper, logger log.Logger) *Exporter {
|
||||
return &Exporter{
|
||||
mapper: mapper,
|
||||
registry: newRegistry(mapper),
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
func buildEvent(statType, metric string, value float64, relative bool, labels map[string]string) (Event, error) {
|
||||
switch statType {
|
||||
case "c":
|
||||
return &CounterEvent{
|
||||
metricName: metric,
|
||||
value: float64(value),
|
||||
labels: labels,
|
||||
}, nil
|
||||
case "g":
|
||||
return &GaugeEvent{
|
||||
metricName: metric,
|
||||
value: float64(value),
|
||||
relative: relative,
|
||||
labels: labels,
|
||||
}, nil
|
||||
case "ms", "h", "d":
|
||||
return &TimerEvent{
|
||||
metricName: metric,
|
||||
value: float64(value),
|
||||
labels: labels,
|
||||
}, nil
|
||||
case "s":
|
||||
return nil, fmt.Errorf("no support for StatsD sets")
|
||||
default:
|
||||
return nil, fmt.Errorf("bad stat type %s", statType)
|
||||
}
|
||||
}
|
||||
|
||||
func parseTag(component, tag string, separator rune, labels map[string]string, logger log.Logger) {
|
||||
// Entirely empty tag is an error
|
||||
if len(tag) == 0 {
|
||||
tagErrors.Inc()
|
||||
level.Debug(logger).Log("msg", "Empty name tag", "component", component)
|
||||
return
|
||||
}
|
||||
|
||||
for i, c := range tag {
|
||||
if c == separator {
|
||||
k := tag[:i]
|
||||
v := tag[i+1:]
|
||||
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
// Empty key or value is an error
|
||||
tagErrors.Inc()
|
||||
level.Debug(logger).Log("msg", "Malformed name tag", "k", k, "v", v, "component", component)
|
||||
} else {
|
||||
labels[mapper.EscapeMetricName(k)] = v
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Missing separator (no value) is an error
|
||||
tagErrors.Inc()
|
||||
level.Debug(logger).Log("msg", "Malformed name tag", "tag", tag, "component", component)
|
||||
}
|
||||
|
||||
func parseNameTags(component string, labels map[string]string, logger log.Logger) {
|
||||
lastTagEndIndex := 0
|
||||
for i, c := range component {
|
||||
if c == ',' {
|
||||
tag := component[lastTagEndIndex:i]
|
||||
lastTagEndIndex = i + 1
|
||||
parseTag(component, tag, '=', labels, logger)
|
||||
}
|
||||
}
|
||||
|
||||
// If we're not off the end of the string, add the last tag
|
||||
if lastTagEndIndex < len(component) {
|
||||
tag := component[lastTagEndIndex:]
|
||||
parseTag(component, tag, '=', labels, logger)
|
||||
}
|
||||
}
|
||||
|
||||
func trimLeftHash(s string) string {
|
||||
if s != "" && s[0] == '#' {
|
||||
return s[1:]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func parseDogStatsDTags(component string, labels map[string]string, logger log.Logger) {
|
||||
lastTagEndIndex := 0
|
||||
for i, c := range component {
|
||||
if c == ',' {
|
||||
tag := component[lastTagEndIndex:i]
|
||||
lastTagEndIndex = i + 1
|
||||
parseTag(component, trimLeftHash(tag), ':', labels, logger)
|
||||
}
|
||||
}
|
||||
|
||||
// If we're not off the end of the string, add the last tag
|
||||
if lastTagEndIndex < len(component) {
|
||||
tag := component[lastTagEndIndex:]
|
||||
parseTag(component, trimLeftHash(tag), ':', labels, logger)
|
||||
}
|
||||
}
|
||||
|
||||
func parseNameAndTags(name string, labels map[string]string, logger log.Logger) string {
|
||||
for i, c := range name {
|
||||
// `#` delimits start of tags by Librato
|
||||
// https://www.librato.com/docs/kb/collect/collection_agents/stastd/#stat-level-tags
|
||||
// `,` delimits start of tags by InfluxDB
|
||||
// https://www.influxdata.com/blog/getting-started-with-sending-statsd-metrics-to-telegraf-influxdb/#introducing-influx-statsd
|
||||
if c == '#' || c == ',' {
|
||||
parseNameTags(name[i+1:], labels, logger)
|
||||
return name[:i]
|
||||
}
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func lineToEvents(line string, logger log.Logger) Events {
|
||||
events := Events{}
|
||||
if line == "" {
|
||||
return events
|
||||
}
|
||||
|
||||
elements := strings.SplitN(line, ":", 2)
|
||||
if len(elements) < 2 || len(elements[0]) == 0 || !utf8.ValidString(line) {
|
||||
sampleErrors.WithLabelValues("malformed_line").Inc()
|
||||
level.Debug(logger).Log("msg", "Bad line from StatsD", "line", line)
|
||||
return events
|
||||
}
|
||||
|
||||
labels := map[string]string{}
|
||||
metric := parseNameAndTags(elements[0], labels, logger)
|
||||
|
||||
var samples []string
|
||||
if strings.Contains(elements[1], "|#") {
|
||||
// using DogStatsD tags
|
||||
|
||||
// don't allow mixed tagging styles
|
||||
if len(labels) > 0 {
|
||||
sampleErrors.WithLabelValues("mixed_tagging_styles").Inc()
|
||||
level.Debug(logger).Log("msg", "Bad line (multiple tagging styles) from StatsD", "line", line)
|
||||
return events
|
||||
}
|
||||
|
||||
// disable multi-metrics
|
||||
samples = elements[1:]
|
||||
} else {
|
||||
samples = strings.Split(elements[1], ":")
|
||||
}
|
||||
samples:
|
||||
for _, sample := range samples {
|
||||
samplesReceived.Inc()
|
||||
components := strings.Split(sample, "|")
|
||||
samplingFactor := 1.0
|
||||
if len(components) < 2 || len(components) > 4 {
|
||||
sampleErrors.WithLabelValues("malformed_component").Inc()
|
||||
level.Debug(logger).Log("msg", "Bad component", "line", line)
|
||||
continue
|
||||
}
|
||||
valueStr, statType := components[0], components[1]
|
||||
|
||||
var relative = false
|
||||
if strings.Index(valueStr, "+") == 0 || strings.Index(valueStr, "-") == 0 {
|
||||
relative = true
|
||||
}
|
||||
|
||||
value, err := strconv.ParseFloat(valueStr, 64)
|
||||
if err != nil {
|
||||
level.Debug(logger).Log("msg", "Bad value", "value", valueStr, "line", line)
|
||||
sampleErrors.WithLabelValues("malformed_value").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
multiplyEvents := 1
|
||||
if len(components) >= 3 {
|
||||
for _, component := range components[2:] {
|
||||
if len(component) == 0 {
|
||||
level.Debug(logger).Log("msg", "Empty component", "line", line)
|
||||
sampleErrors.WithLabelValues("malformed_component").Inc()
|
||||
continue samples
|
||||
}
|
||||
}
|
||||
|
||||
for _, component := range components[2:] {
|
||||
switch component[0] {
|
||||
case '@':
|
||||
|
||||
samplingFactor, err = strconv.ParseFloat(component[1:], 64)
|
||||
if err != nil {
|
||||
level.Debug(logger).Log("msg", "Invalid sampling factor", "component", component[1:], "line", line)
|
||||
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
|
||||
}
|
||||
if samplingFactor == 0 {
|
||||
samplingFactor = 1
|
||||
}
|
||||
|
||||
if statType == "g" {
|
||||
continue
|
||||
} else if statType == "c" {
|
||||
value /= samplingFactor
|
||||
} else if statType == "ms" || statType == "h" || statType == "d" {
|
||||
multiplyEvents = int(1 / samplingFactor)
|
||||
}
|
||||
case '#':
|
||||
parseDogStatsDTags(component[1:], labels, logger)
|
||||
default:
|
||||
level.Debug(logger).Log("msg", "Invalid sampling factor or tag section", "component", components[2], "line", line)
|
||||
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(labels) > 0 {
|
||||
tagsReceived.Inc()
|
||||
}
|
||||
|
||||
for i := 0; i < multiplyEvents; i++ {
|
||||
event, err := buildEvent(statType, metric, value, relative, labels)
|
||||
if err != nil {
|
||||
level.Debug(logger).Log("msg", "Error building event", "line", line, "error", err)
|
||||
sampleErrors.WithLabelValues("illegal_event").Inc()
|
||||
continue
|
||||
}
|
||||
events = append(events, event)
|
||||
}
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
type StatsDUDPListener struct {
|
||||
conn *net.UDPConn
|
||||
eventHandler eventHandler
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func (l *StatsDUDPListener) SetEventHandler(eh eventHandler) {
|
||||
l.eventHandler = eh
|
||||
}
|
||||
|
||||
func (l *StatsDUDPListener) Listen() {
|
||||
buf := make([]byte, 65535)
|
||||
for {
|
||||
n, _, err := l.conn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
// https://github.com/golang/go/issues/4373
|
||||
// ignore net: errClosing error as it will occur during shutdown
|
||||
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
||||
return
|
||||
}
|
||||
level.Error(l.logger).Log("error", err)
|
||||
return
|
||||
}
|
||||
l.handlePacket(buf[0:n])
|
||||
}
|
||||
}
|
||||
|
||||
func (l *StatsDUDPListener) handlePacket(packet []byte) {
|
||||
udpPackets.Inc()
|
||||
lines := strings.Split(string(packet), "\n")
|
||||
for _, line := range lines {
|
||||
linesReceived.Inc()
|
||||
l.eventHandler.queue(lineToEvents(line, l.logger))
|
||||
}
|
||||
}
|
||||
|
||||
type StatsDTCPListener struct {
|
||||
conn *net.TCPListener
|
||||
eventHandler eventHandler
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func (l *StatsDTCPListener) SetEventHandler(eh eventHandler) {
|
||||
l.eventHandler = eh
|
||||
}
|
||||
|
||||
func (l *StatsDTCPListener) Listen() {
|
||||
for {
|
||||
c, err := l.conn.AcceptTCP()
|
||||
if err != nil {
|
||||
// https://github.com/golang/go/issues/4373
|
||||
// ignore net: errClosing error as it will occur during shutdown
|
||||
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
||||
return
|
||||
}
|
||||
level.Error(l.logger).Log("msg", "AcceptTCP failed", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
go l.handleConn(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *StatsDTCPListener) handleConn(c *net.TCPConn) {
|
||||
defer c.Close()
|
||||
|
||||
tcpConnections.Inc()
|
||||
|
||||
r := bufio.NewReader(c)
|
||||
for {
|
||||
line, isPrefix, err := r.ReadLine()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
tcpErrors.Inc()
|
||||
level.Debug(l.logger).Log("msg", "Read failed", "addr", c.RemoteAddr(), "error", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
if isPrefix {
|
||||
tcpLineTooLong.Inc()
|
||||
level.Debug(l.logger).Log("msg", "Read failed: line too long", "addr", c.RemoteAddr())
|
||||
break
|
||||
}
|
||||
linesReceived.Inc()
|
||||
l.eventHandler.queue(lineToEvents(string(line), l.logger))
|
||||
}
|
||||
}
|
||||
|
||||
type StatsDUnixgramListener struct {
|
||||
conn *net.UnixConn
|
||||
eventHandler eventHandler
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func (l *StatsDUnixgramListener) SetEventHandler(eh eventHandler) {
|
||||
l.eventHandler = eh
|
||||
}
|
||||
|
||||
func (l *StatsDUnixgramListener) Listen() {
|
||||
buf := make([]byte, 65535)
|
||||
for {
|
||||
n, _, err := l.conn.ReadFromUnix(buf)
|
||||
if err != nil {
|
||||
// https://github.com/golang/go/issues/4373
|
||||
// ignore net: errClosing error as it will occur during shutdown
|
||||
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
||||
return
|
||||
}
|
||||
level.Error(l.logger).Log(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
l.handlePacket(buf[:n])
|
||||
}
|
||||
}
|
||||
|
||||
func (l *StatsDUnixgramListener) handlePacket(packet []byte) {
|
||||
unixgramPackets.Inc()
|
||||
lines := strings.Split(string(packet), "\n")
|
||||
for _, line := range lines {
|
||||
linesReceived.Inc()
|
||||
l.eventHandler.queue(lineToEvents(string(line), l.logger))
|
||||
}
|
||||
}
|
416
registry.go
416
registry.go
|
@ -1,416 +0,0 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/statsd_exporter/pkg/clock"
|
||||
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||
)
|
||||
|
||||
type metricType int
|
||||
|
||||
const (
|
||||
CounterMetricType metricType = iota
|
||||
GaugeMetricType
|
||||
SummaryMetricType
|
||||
HistogramMetricType
|
||||
)
|
||||
|
||||
type nameHash uint64
|
||||
type valueHash uint64
|
||||
type labelHash struct {
|
||||
// This is a hash over the label names
|
||||
names nameHash
|
||||
// This is a hash over the label names + label values
|
||||
values valueHash
|
||||
}
|
||||
|
||||
type metricHolder interface{}
|
||||
|
||||
type registeredMetric struct {
|
||||
lastRegisteredAt time.Time
|
||||
labels prometheus.Labels
|
||||
ttl time.Duration
|
||||
metric metricHolder
|
||||
vecKey nameHash
|
||||
}
|
||||
|
||||
type vectorHolder interface {
|
||||
Delete(label prometheus.Labels) bool
|
||||
}
|
||||
|
||||
type vector struct {
|
||||
holder vectorHolder
|
||||
refCount uint64
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
metricType metricType
|
||||
// Vectors key is the hash of the label names
|
||||
vectors map[nameHash]*vector
|
||||
// Metrics key is a hash of the label names + label values
|
||||
metrics map[valueHash]*registeredMetric
|
||||
}
|
||||
|
||||
type registry struct {
|
||||
metrics map[string]metric
|
||||
mapper *mapper.MetricMapper
|
||||
// The below value and label variables are allocated in the registry struct
|
||||
// so that we don't have to allocate them every time have to compute a label
|
||||
// hash.
|
||||
valueBuf, nameBuf bytes.Buffer
|
||||
hasher hash.Hash64
|
||||
}
|
||||
|
||||
func newRegistry(mapper *mapper.MetricMapper) *registry {
|
||||
return ®istry{
|
||||
metrics: make(map[string]metric),
|
||||
mapper: mapper,
|
||||
hasher: fnv.New64a(),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *registry) metricConflicts(metricName string, metricType metricType) bool {
|
||||
vector, hasMetric := r.metrics[metricName]
|
||||
if !hasMetric {
|
||||
// No metric with this name exists
|
||||
return false
|
||||
}
|
||||
|
||||
if vector.metricType == metricType {
|
||||
// We've found a copy of this metric with this type, but different
|
||||
// labels, so it's safe to create a new one.
|
||||
return false
|
||||
}
|
||||
|
||||
// The metric exists, but it's of a different type than we're trying to
|
||||
// create.
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *registry) storeCounter(metricName string, hash labelHash, labels prometheus.Labels, vec *prometheus.CounterVec, c prometheus.Counter, ttl time.Duration) {
|
||||
r.store(metricName, hash, labels, vec, c, CounterMetricType, ttl)
|
||||
}
|
||||
|
||||
func (r *registry) storeGauge(metricName string, hash labelHash, labels prometheus.Labels, vec *prometheus.GaugeVec, g prometheus.Counter, ttl time.Duration) {
|
||||
r.store(metricName, hash, labels, vec, g, GaugeMetricType, ttl)
|
||||
}
|
||||
|
||||
func (r *registry) storeHistogram(metricName string, hash labelHash, labels prometheus.Labels, vec *prometheus.HistogramVec, o prometheus.Observer, ttl time.Duration) {
|
||||
r.store(metricName, hash, labels, vec, o, HistogramMetricType, ttl)
|
||||
}
|
||||
|
||||
func (r *registry) storeSummary(metricName string, hash labelHash, labels prometheus.Labels, vec *prometheus.SummaryVec, o prometheus.Observer, ttl time.Duration) {
|
||||
r.store(metricName, hash, labels, vec, o, SummaryMetricType, ttl)
|
||||
}
|
||||
|
||||
func (r *registry) store(metricName string, hash labelHash, labels prometheus.Labels, vh vectorHolder, mh metricHolder, metricType metricType, ttl time.Duration) {
|
||||
metric, hasMetric := r.metrics[metricName]
|
||||
if !hasMetric {
|
||||
metric.metricType = metricType
|
||||
metric.vectors = make(map[nameHash]*vector)
|
||||
metric.metrics = make(map[valueHash]*registeredMetric)
|
||||
|
||||
r.metrics[metricName] = metric
|
||||
}
|
||||
|
||||
v, ok := metric.vectors[hash.names]
|
||||
if !ok {
|
||||
v = &vector{holder: vh}
|
||||
metric.vectors[hash.names] = v
|
||||
}
|
||||
|
||||
rm, ok := metric.metrics[hash.values]
|
||||
if !ok {
|
||||
rm = ®isteredMetric{
|
||||
labels: labels,
|
||||
ttl: ttl,
|
||||
metric: mh,
|
||||
vecKey: hash.names,
|
||||
}
|
||||
metric.metrics[hash.values] = rm
|
||||
v.refCount++
|
||||
}
|
||||
now := clock.Now()
|
||||
rm.lastRegisteredAt = now
|
||||
// Update ttl from mapping
|
||||
rm.ttl = ttl
|
||||
}
|
||||
|
||||
func (r *registry) get(metricName string, hash labelHash, metricType metricType) (vectorHolder, metricHolder) {
|
||||
metric, hasMetric := r.metrics[metricName]
|
||||
|
||||
if !hasMetric {
|
||||
return nil, nil
|
||||
}
|
||||
if metric.metricType != metricType {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rm, ok := metric.metrics[hash.values]
|
||||
if ok {
|
||||
now := clock.Now()
|
||||
rm.lastRegisteredAt = now
|
||||
return metric.vectors[hash.names].holder, rm.metric
|
||||
}
|
||||
|
||||
vector, ok := metric.vectors[hash.names]
|
||||
if ok {
|
||||
return vector.holder, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *registry) getCounter(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping) (prometheus.Counter, error) {
|
||||
hash, labelNames := r.hashLabels(labels)
|
||||
vh, mh := r.get(metricName, hash, CounterMetricType)
|
||||
if mh != nil {
|
||||
return mh.(prometheus.Counter), nil
|
||||
}
|
||||
|
||||
if r.metricConflicts(metricName, CounterMetricType) {
|
||||
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||
}
|
||||
|
||||
var counterVec *prometheus.CounterVec
|
||||
if vh == nil {
|
||||
metricsCount.WithLabelValues("counter").Inc()
|
||||
counterVec = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: metricName,
|
||||
Help: help,
|
||||
}, labelNames)
|
||||
|
||||
if err := prometheus.Register(uncheckedCollector{counterVec}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
counterVec = vh.(*prometheus.CounterVec)
|
||||
}
|
||||
|
||||
var counter prometheus.Counter
|
||||
var err error
|
||||
if counter, err = counterVec.GetMetricWith(labels); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.storeCounter(metricName, hash, labels, counterVec, counter, mapping.Ttl)
|
||||
|
||||
return counter, nil
|
||||
}
|
||||
|
||||
func (r *registry) getGauge(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping) (prometheus.Gauge, error) {
|
||||
hash, labelNames := r.hashLabels(labels)
|
||||
vh, mh := r.get(metricName, hash, GaugeMetricType)
|
||||
if mh != nil {
|
||||
return mh.(prometheus.Gauge), nil
|
||||
}
|
||||
|
||||
if r.metricConflicts(metricName, GaugeMetricType) {
|
||||
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||
}
|
||||
|
||||
var gaugeVec *prometheus.GaugeVec
|
||||
if vh == nil {
|
||||
metricsCount.WithLabelValues("gauge").Inc()
|
||||
gaugeVec = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: metricName,
|
||||
Help: help,
|
||||
}, labelNames)
|
||||
|
||||
if err := prometheus.Register(uncheckedCollector{gaugeVec}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
gaugeVec = vh.(*prometheus.GaugeVec)
|
||||
}
|
||||
|
||||
var gauge prometheus.Gauge
|
||||
var err error
|
||||
if gauge, err = gaugeVec.GetMetricWith(labels); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.storeGauge(metricName, hash, labels, gaugeVec, gauge, mapping.Ttl)
|
||||
|
||||
return gauge, nil
|
||||
}
|
||||
|
||||
func (r *registry) getHistogram(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping) (prometheus.Observer, error) {
|
||||
hash, labelNames := r.hashLabels(labels)
|
||||
vh, mh := r.get(metricName, hash, HistogramMetricType)
|
||||
if mh != nil {
|
||||
return mh.(prometheus.Observer), nil
|
||||
}
|
||||
|
||||
if r.metricConflicts(metricName, HistogramMetricType) {
|
||||
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||
}
|
||||
if r.metricConflicts(metricName+"_sum", HistogramMetricType) {
|
||||
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||
}
|
||||
if r.metricConflicts(metricName+"_count", HistogramMetricType) {
|
||||
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||
}
|
||||
if r.metricConflicts(metricName+"_bucket", HistogramMetricType) {
|
||||
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||
}
|
||||
|
||||
var histogramVec *prometheus.HistogramVec
|
||||
if vh == nil {
|
||||
metricsCount.WithLabelValues("histogram").Inc()
|
||||
buckets := r.mapper.Defaults.Buckets
|
||||
if mapping.HistogramOptions != nil && len(mapping.HistogramOptions.Buckets) > 0 {
|
||||
buckets = mapping.HistogramOptions.Buckets
|
||||
}
|
||||
histogramVec = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: metricName,
|
||||
Help: help,
|
||||
Buckets: buckets,
|
||||
}, labelNames)
|
||||
|
||||
if err := prometheus.Register(uncheckedCollector{histogramVec}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
histogramVec = vh.(*prometheus.HistogramVec)
|
||||
}
|
||||
|
||||
var observer prometheus.Observer
|
||||
var err error
|
||||
if observer, err = histogramVec.GetMetricWith(labels); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.storeHistogram(metricName, hash, labels, histogramVec, observer, mapping.Ttl)
|
||||
|
||||
return observer, nil
|
||||
}
|
||||
|
||||
func (r *registry) getSummary(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping) (prometheus.Observer, error) {
|
||||
hash, labelNames := r.hashLabels(labels)
|
||||
vh, mh := r.get(metricName, hash, SummaryMetricType)
|
||||
if mh != nil {
|
||||
return mh.(prometheus.Observer), nil
|
||||
}
|
||||
|
||||
if r.metricConflicts(metricName, SummaryMetricType) {
|
||||
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||
}
|
||||
if r.metricConflicts(metricName+"_sum", SummaryMetricType) {
|
||||
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||
}
|
||||
if r.metricConflicts(metricName+"_count", SummaryMetricType) {
|
||||
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||
}
|
||||
|
||||
var summaryVec *prometheus.SummaryVec
|
||||
if vh == nil {
|
||||
metricsCount.WithLabelValues("summary").Inc()
|
||||
quantiles := r.mapper.Defaults.Quantiles
|
||||
if mapping != nil && mapping.SummaryOptions != nil && len(mapping.SummaryOptions.Quantiles) > 0 {
|
||||
quantiles = mapping.SummaryOptions.Quantiles
|
||||
}
|
||||
summaryOptions := mapper.SummaryOptions{}
|
||||
if mapping != nil && mapping.SummaryOptions != nil {
|
||||
summaryOptions = *mapping.SummaryOptions
|
||||
}
|
||||
objectives := make(map[float64]float64)
|
||||
for _, q := range quantiles {
|
||||
objectives[q.Quantile] = q.Error
|
||||
}
|
||||
// In the case of no mapping file, explicitly define the default quantiles
|
||||
if len(objectives) == 0 {
|
||||
objectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
||||
}
|
||||
summaryVec = prometheus.NewSummaryVec(prometheus.SummaryOpts{
|
||||
Name: metricName,
|
||||
Help: help,
|
||||
Objectives: objectives,
|
||||
MaxAge: summaryOptions.MaxAge,
|
||||
AgeBuckets: summaryOptions.AgeBuckets,
|
||||
BufCap: summaryOptions.BufCap,
|
||||
}, labelNames)
|
||||
|
||||
if err := prometheus.Register(uncheckedCollector{summaryVec}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
summaryVec = vh.(*prometheus.SummaryVec)
|
||||
}
|
||||
|
||||
var observer prometheus.Observer
|
||||
var err error
|
||||
if observer, err = summaryVec.GetMetricWith(labels); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.storeSummary(metricName, hash, labels, summaryVec, observer, mapping.Ttl)
|
||||
|
||||
return observer, nil
|
||||
}
|
||||
|
||||
func (r *registry) removeStaleMetrics() {
|
||||
now := clock.Now()
|
||||
// delete timeseries with expired ttl
|
||||
for _, metric := range r.metrics {
|
||||
for hash, rm := range metric.metrics {
|
||||
if rm.ttl == 0 {
|
||||
continue
|
||||
}
|
||||
if rm.lastRegisteredAt.Add(rm.ttl).Before(now) {
|
||||
metric.vectors[rm.vecKey].holder.Delete(rm.labels)
|
||||
metric.vectors[rm.vecKey].refCount--
|
||||
delete(metric.metrics, hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Calculates a hash of both the label names and the label names and values.
|
||||
func (r *registry) hashLabels(labels prometheus.Labels) (labelHash, []string) {
|
||||
r.hasher.Reset()
|
||||
r.nameBuf.Reset()
|
||||
r.valueBuf.Reset()
|
||||
labelNames := make([]string, 0, len(labels))
|
||||
|
||||
for labelName := range labels {
|
||||
labelNames = append(labelNames, labelName)
|
||||
}
|
||||
sort.Strings(labelNames)
|
||||
|
||||
r.valueBuf.WriteByte(model.SeparatorByte)
|
||||
for _, labelName := range labelNames {
|
||||
r.valueBuf.WriteString(labels[labelName])
|
||||
r.valueBuf.WriteByte(model.SeparatorByte)
|
||||
|
||||
r.nameBuf.WriteString(labelName)
|
||||
r.nameBuf.WriteByte(model.SeparatorByte)
|
||||
}
|
||||
|
||||
lh := labelHash{}
|
||||
r.hasher.Write(r.nameBuf.Bytes())
|
||||
lh.names = nameHash(r.hasher.Sum64())
|
||||
|
||||
// Now add the values to the names we've already hashed.
|
||||
r.hasher.Write(r.valueBuf.Bytes())
|
||||
lh.values = valueHash(r.hasher.Sum64())
|
||||
|
||||
return lh, labelNames
|
||||
}
|
160
telemetry.go
160
telemetry.go
|
@ -1,160 +0,0 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
eventStats = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_events_total",
|
||||
Help: "The total number of StatsD events seen.",
|
||||
},
|
||||
[]string{"type"},
|
||||
)
|
||||
eventsFlushed = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_event_queue_flushed_total",
|
||||
Help: "Number of times events were flushed to exporter",
|
||||
},
|
||||
)
|
||||
eventsUnmapped = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_events_unmapped_total",
|
||||
Help: "The total number of StatsD events no mapping was found for.",
|
||||
})
|
||||
udpPackets = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_udp_packets_total",
|
||||
Help: "The total number of StatsD packets received over UDP.",
|
||||
},
|
||||
)
|
||||
tcpConnections = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_tcp_connections_total",
|
||||
Help: "The total number of TCP connections handled.",
|
||||
},
|
||||
)
|
||||
tcpErrors = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_tcp_connection_errors_total",
|
||||
Help: "The number of errors encountered reading from TCP.",
|
||||
},
|
||||
)
|
||||
tcpLineTooLong = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_tcp_too_long_lines_total",
|
||||
Help: "The number of lines discarded due to being too long.",
|
||||
},
|
||||
)
|
||||
unixgramPackets = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_unixgram_packets_total",
|
||||
Help: "The total number of StatsD packets received over Unixgram.",
|
||||
},
|
||||
)
|
||||
linesReceived = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_lines_total",
|
||||
Help: "The total number of StatsD lines received.",
|
||||
},
|
||||
)
|
||||
samplesReceived = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_samples_total",
|
||||
Help: "The total number of StatsD samples received.",
|
||||
},
|
||||
)
|
||||
sampleErrors = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_sample_errors_total",
|
||||
Help: "The total number of errors parsing StatsD samples.",
|
||||
},
|
||||
[]string{"reason"},
|
||||
)
|
||||
tagsReceived = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_tags_total",
|
||||
Help: "The total number of DogStatsD tags processed.",
|
||||
},
|
||||
)
|
||||
tagErrors = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_tag_errors_total",
|
||||
Help: "The number of errors parsing DogStatsD tags.",
|
||||
},
|
||||
)
|
||||
configLoads = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_config_reloads_total",
|
||||
Help: "The number of configuration reloads.",
|
||||
},
|
||||
[]string{"outcome"},
|
||||
)
|
||||
mappingsCount = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "statsd_exporter_loaded_mappings",
|
||||
Help: "The current number of configured metric mappings.",
|
||||
})
|
||||
conflictingEventStats = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_events_conflict_total",
|
||||
Help: "The total number of StatsD events with conflicting names.",
|
||||
},
|
||||
[]string{"type"},
|
||||
)
|
||||
errorEventStats = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_events_error_total",
|
||||
Help: "The total number of StatsD events discarded due to errors.",
|
||||
},
|
||||
[]string{"reason"},
|
||||
)
|
||||
eventsActions = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_events_actions_total",
|
||||
Help: "The total number of StatsD events by action.",
|
||||
},
|
||||
[]string{"action"},
|
||||
)
|
||||
metricsCount = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "statsd_exporter_metrics_total",
|
||||
Help: "The total number of metrics.",
|
||||
},
|
||||
[]string{"type"},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(eventStats)
|
||||
prometheus.MustRegister(eventsFlushed)
|
||||
prometheus.MustRegister(eventsUnmapped)
|
||||
prometheus.MustRegister(udpPackets)
|
||||
prometheus.MustRegister(tcpConnections)
|
||||
prometheus.MustRegister(tcpErrors)
|
||||
prometheus.MustRegister(tcpLineTooLong)
|
||||
prometheus.MustRegister(unixgramPackets)
|
||||
prometheus.MustRegister(linesReceived)
|
||||
prometheus.MustRegister(samplesReceived)
|
||||
prometheus.MustRegister(sampleErrors)
|
||||
prometheus.MustRegister(tagsReceived)
|
||||
prometheus.MustRegister(tagErrors)
|
||||
prometheus.MustRegister(configLoads)
|
||||
prometheus.MustRegister(mappingsCount)
|
||||
prometheus.MustRegister(conflictingEventStats)
|
||||
prometheus.MustRegister(errorEventStats)
|
||||
prometheus.MustRegister(eventsActions)
|
||||
prometheus.MustRegister(metricsCount)
|
||||
}
|
Loading…
Reference in a new issue