2015-02-18 21:40:55 +00:00
|
|
|
// Copyright 2013 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
2013-07-05 22:12:43 +00:00
|
|
|
//
|
2015-02-18 21:40:55 +00:00
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2013-07-05 22:12:43 +00:00
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2017-08-01 10:21:00 +00:00
|
|
|
"bufio"
|
2013-07-05 22:12:43 +00:00
|
|
|
"fmt"
|
2017-08-01 10:21:00 +00:00
|
|
|
"io"
|
2013-07-05 22:12:43 +00:00
|
|
|
"net"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
2018-11-27 12:53:45 +00:00
|
|
|
"time"
|
2016-07-15 14:05:47 +00:00
|
|
|
"unicode/utf8"
|
2013-07-05 22:12:43 +00:00
|
|
|
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2016-05-04 19:16:17 +00:00
|
|
|
"github.com/prometheus/common/log"
|
2018-08-10 12:28:38 +00:00
|
|
|
|
2018-12-19 05:21:43 +00:00
|
|
|
"github.com/prometheus/statsd_exporter/pkg/clock"
|
2018-08-14 09:20:00 +00:00
|
|
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
2013-07-05 22:12:43 +00:00
|
|
|
)
|
|
|
|
|
2014-06-26 13:56:21 +00:00
|
|
|
const (
|
2015-10-10 00:34:28 +00:00
|
|
|
defaultHelp = "Metric autogenerated by statsd_exporter."
|
2019-03-28 21:51:12 +00:00
|
|
|
regErrF = "Failed to update metric %q. Error: %s"
|
2014-06-26 13:56:21 +00:00
|
|
|
)
|
|
|
|
|
2019-03-28 21:51:12 +00:00
|
|
|
// uncheckedCollector wraps a Collector but its Describe method yields no Desc.
|
|
|
|
// This allows incoming metrics to have inconsistent label sets
|
|
|
|
type uncheckedCollector struct {
|
|
|
|
c prometheus.Collector
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u uncheckedCollector) Describe(_ chan<- *prometheus.Desc) {}
|
|
|
|
func (u uncheckedCollector) Collect(c chan<- prometheus.Metric) {
|
|
|
|
u.c.Collect(c)
|
|
|
|
}
|
|
|
|
|
2015-10-10 00:34:28 +00:00
|
|
|
type Exporter struct {
|
2019-05-23 21:22:05 +00:00
|
|
|
mapper *mapper.MetricMapper
|
|
|
|
registry *registry
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
|
2019-04-09 15:52:31 +00:00
|
|
|
// Replace invalid characters in the metric name with "_"
|
|
|
|
// Valid characters are a-z, A-Z, 0-9, and _
|
2013-07-05 22:12:43 +00:00
|
|
|
func escapeMetricName(metricName string) string {
|
2019-05-16 11:09:19 +00:00
|
|
|
metricLen := len(metricName)
|
|
|
|
if metricLen == 0 {
|
|
|
|
return ""
|
2013-07-12 12:27:51 +00:00
|
|
|
}
|
|
|
|
|
2019-05-16 11:09:19 +00:00
|
|
|
escaped := false
|
|
|
|
var sb strings.Builder
|
|
|
|
// If a metric starts with a digit, allocate the memory and prepend an
|
|
|
|
// underscore.
|
|
|
|
if metricName[0] >= '0' && metricName[0] <= '9' {
|
|
|
|
escaped = true
|
|
|
|
sb.Grow(metricLen + 1)
|
|
|
|
sb.WriteByte('_')
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is an character replacement method optimized for this limited
|
2019-04-09 15:52:31 +00:00
|
|
|
// use case. It is much faster than using a regex.
|
2019-05-16 11:09:19 +00:00
|
|
|
offset := 0
|
|
|
|
for i, c := range metricName {
|
|
|
|
// Seek forward, skipping valid characters until we find one that needs
|
|
|
|
// to be replaced, then add all the characters we've seen so far to the
|
|
|
|
// string.Builder.
|
|
|
|
if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
|
|
|
(c >= '0' && c <= '9') || (c == '_') {
|
|
|
|
// Character is valid, so skip over it without doing anything.
|
2019-04-02 21:42:39 +00:00
|
|
|
} else {
|
2019-05-16 11:09:19 +00:00
|
|
|
if !escaped {
|
|
|
|
// Up until now we've been lazy and avoided actually allocating
|
|
|
|
// memory. Unfortunately we've now determined this string needs
|
|
|
|
// escaping, so allocate the buffer for the whole string.
|
|
|
|
escaped = true
|
|
|
|
sb.Grow(metricLen)
|
|
|
|
}
|
|
|
|
sb.WriteString(metricName[offset:i])
|
|
|
|
offset = i + utf8.RuneLen(c)
|
|
|
|
sb.WriteByte('_')
|
2019-04-02 21:42:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-16 11:09:19 +00:00
|
|
|
if !escaped {
|
|
|
|
// This is the happy path where nothing had to be escaped, so we can
|
|
|
|
// avoid doing anything.
|
|
|
|
return metricName
|
|
|
|
}
|
|
|
|
|
|
|
|
if offset < metricLen {
|
|
|
|
sb.WriteString(metricName[offset:])
|
|
|
|
}
|
2019-04-02 21:42:39 +00:00
|
|
|
|
2019-05-16 11:09:19 +00:00
|
|
|
return sb.String()
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
|
2018-11-02 17:28:18 +00:00
|
|
|
// Listen handles all events sent to the given channel sequentially. It
|
|
|
|
// terminates when the channel is closed.
|
2015-10-10 00:34:28 +00:00
|
|
|
func (b *Exporter) Listen(e <-chan Events) {
|
2018-12-19 05:21:43 +00:00
|
|
|
removeStaleMetricsTicker := clock.NewTicker(time.Second)
|
2018-11-27 12:53:45 +00:00
|
|
|
|
2013-07-05 22:12:43 +00:00
|
|
|
for {
|
2018-11-27 12:53:45 +00:00
|
|
|
select {
|
|
|
|
case <-removeStaleMetricsTicker.C:
|
Rework metric registration and tracking
This reworks/rewrites the way that metric registration and tracking is
handled across all of statsd_exporter. The goal here is to reduce
memory and cpu usage, but also to reduce complexity by unifying metric
registration with the ttl tracking for expiration.
Some high level notes:
* Previously metric names and labels were being hashed three times for
every event accepted: in the container code, the save label set code and
again in the prometheus client libraries. This unifies the first two
and caches the results of `GetMetricWith` to avoid the third.
* This optimizes the label hashing to reduce cpu overhead and memory
allocations. The label hashing code previously showed up high on all
profiling done for both CPU and memory allocations
Using the BenchmarkExporterListener benchmark, the improvement looks
like this.
Before:
cpu: 11,341,797 ns/op
memory allocated: 1,731,119 B/op
memory allocations: 58,028 allocs/op
After:
cpu: 7,084,651 ns/op
memory allocated: 906,556 B/op
memory allocations: 42,026 allocs/op
Signed-off-by: Clayton O'Neill <claytono@github.com>
2019-05-22 02:48:04 +00:00
|
|
|
b.registry.removeStaleMetrics()
|
2018-11-27 12:53:45 +00:00
|
|
|
case events, ok := <-e:
|
|
|
|
if !ok {
|
|
|
|
log.Debug("Channel is closed. Break out of Exporter.Listener.")
|
|
|
|
removeStaleMetricsTicker.Stop()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, event := range events {
|
|
|
|
b.handleEvent(event)
|
|
|
|
}
|
2018-11-02 17:28:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-07-05 22:12:43 +00:00
|
|
|
|
2018-11-02 17:28:18 +00:00
|
|
|
// handleEvent processes a single Event according to the configured mapping.
|
|
|
|
func (b *Exporter) handleEvent(event Event) {
|
|
|
|
mapping, labels, present := b.mapper.GetMapping(event.MetricName(), event.MetricType())
|
|
|
|
if mapping == nil {
|
|
|
|
mapping = &mapper.MetricMapping{}
|
2018-12-13 13:53:40 +00:00
|
|
|
if b.mapper.Defaults.Ttl != 0 {
|
|
|
|
mapping.Ttl = b.mapper.Defaults.Ttl
|
|
|
|
}
|
2018-11-02 17:28:18 +00:00
|
|
|
}
|
2018-01-02 22:21:50 +00:00
|
|
|
|
2018-11-02 17:28:18 +00:00
|
|
|
if mapping.Action == mapper.ActionTypeDrop {
|
2019-03-25 22:16:38 +00:00
|
|
|
eventsActions.WithLabelValues("drop").Inc()
|
2018-11-02 17:28:18 +00:00
|
|
|
return
|
|
|
|
}
|
2018-01-02 22:21:50 +00:00
|
|
|
|
2018-11-02 17:28:18 +00:00
|
|
|
help := defaultHelp
|
|
|
|
if mapping.HelpText != "" {
|
|
|
|
help = mapping.HelpText
|
|
|
|
}
|
|
|
|
|
|
|
|
metricName := ""
|
|
|
|
prometheusLabels := event.Labels()
|
|
|
|
if present {
|
2019-03-23 18:14:08 +00:00
|
|
|
if mapping.Name == "" {
|
2019-03-25 23:44:17 +00:00
|
|
|
log.Debugf("The mapping of '%s' for match '%s' generates an empty metric name", event.MetricName(), mapping.Match)
|
|
|
|
errorEventStats.WithLabelValues("empty_metric_name").Inc()
|
2019-03-23 18:14:08 +00:00
|
|
|
return
|
|
|
|
}
|
2018-11-02 17:28:18 +00:00
|
|
|
metricName = escapeMetricName(mapping.Name)
|
|
|
|
for label, value := range labels {
|
|
|
|
prometheusLabels[label] = value
|
|
|
|
}
|
2019-03-25 22:16:38 +00:00
|
|
|
eventsActions.WithLabelValues(string(mapping.Action)).Inc()
|
2018-11-02 17:28:18 +00:00
|
|
|
} else {
|
|
|
|
eventsUnmapped.Inc()
|
|
|
|
metricName = escapeMetricName(event.MetricName())
|
|
|
|
}
|
|
|
|
|
|
|
|
switch ev := event.(type) {
|
|
|
|
case *CounterEvent:
|
|
|
|
// We don't accept negative values for counters. Incrementing the counter with a negative number
|
|
|
|
// will cause the exporter to panic. Instead we will warn and continue to the next event.
|
|
|
|
if event.Value() < 0.0 {
|
|
|
|
log.Debugf("Counter %q is: '%f' (counter must be non-negative value)", metricName, event.Value())
|
2019-03-25 23:44:17 +00:00
|
|
|
errorEventStats.WithLabelValues("illegal_negative_counter").Inc()
|
2018-11-02 17:28:18 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
Rework metric registration and tracking
This reworks/rewrites the way that metric registration and tracking is
handled across all of statsd_exporter. The goal here is to reduce
memory and cpu usage, but also to reduce complexity by unifying metric
registration with the ttl tracking for expiration.
Some high level notes:
* Previously metric names and labels were being hashed three times for
every event accepted: in the container code, the save label set code and
again in the prometheus client libraries. This unifies the first two
and caches the results of `GetMetricWith` to avoid the third.
* This optimizes the label hashing to reduce cpu overhead and memory
allocations. The label hashing code previously showed up high on all
profiling done for both CPU and memory allocations
Using the BenchmarkExporterListener benchmark, the improvement looks
like this.
Before:
cpu: 11,341,797 ns/op
memory allocated: 1,731,119 B/op
memory allocations: 58,028 allocs/op
After:
cpu: 7,084,651 ns/op
memory allocated: 906,556 B/op
memory allocations: 42,026 allocs/op
Signed-off-by: Clayton O'Neill <claytono@github.com>
2019-05-22 02:48:04 +00:00
|
|
|
counter, err := b.registry.getCounter(metricName, prometheusLabels, help, mapping)
|
2018-11-02 17:28:18 +00:00
|
|
|
if err == nil {
|
|
|
|
counter.Add(event.Value())
|
|
|
|
eventStats.WithLabelValues("counter").Inc()
|
|
|
|
} else {
|
|
|
|
log.Debugf(regErrF, metricName, err)
|
|
|
|
conflictingEventStats.WithLabelValues("counter").Inc()
|
|
|
|
}
|
|
|
|
|
|
|
|
case *GaugeEvent:
|
Rework metric registration and tracking
This reworks/rewrites the way that metric registration and tracking is
handled across all of statsd_exporter. The goal here is to reduce
memory and cpu usage, but also to reduce complexity by unifying metric
registration with the ttl tracking for expiration.
Some high level notes:
* Previously metric names and labels were being hashed three times for
every event accepted: in the container code, the save label set code and
again in the prometheus client libraries. This unifies the first two
and caches the results of `GetMetricWith` to avoid the third.
* This optimizes the label hashing to reduce cpu overhead and memory
allocations. The label hashing code previously showed up high on all
profiling done for both CPU and memory allocations
Using the BenchmarkExporterListener benchmark, the improvement looks
like this.
Before:
cpu: 11,341,797 ns/op
memory allocated: 1,731,119 B/op
memory allocations: 58,028 allocs/op
After:
cpu: 7,084,651 ns/op
memory allocated: 906,556 B/op
memory allocations: 42,026 allocs/op
Signed-off-by: Clayton O'Neill <claytono@github.com>
2019-05-22 02:48:04 +00:00
|
|
|
gauge, err := b.registry.getGauge(metricName, prometheusLabels, help, mapping)
|
2018-11-02 17:28:18 +00:00
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
if ev.relative {
|
|
|
|
gauge.Add(event.Value())
|
2017-10-04 16:11:58 +00:00
|
|
|
} else {
|
2018-11-02 17:28:18 +00:00
|
|
|
gauge.Set(event.Value())
|
2017-10-04 16:11:58 +00:00
|
|
|
}
|
2018-11-02 17:28:18 +00:00
|
|
|
eventStats.WithLabelValues("gauge").Inc()
|
|
|
|
} else {
|
|
|
|
log.Debugf(regErrF, metricName, err)
|
|
|
|
conflictingEventStats.WithLabelValues("gauge").Inc()
|
|
|
|
}
|
|
|
|
|
|
|
|
case *TimerEvent:
|
|
|
|
t := mapper.TimerTypeDefault
|
|
|
|
if mapping != nil {
|
|
|
|
t = mapping.TimerType
|
|
|
|
}
|
|
|
|
if t == mapper.TimerTypeDefault {
|
|
|
|
t = b.mapper.Defaults.TimerType
|
|
|
|
}
|
|
|
|
|
|
|
|
switch t {
|
|
|
|
case mapper.TimerTypeHistogram:
|
Rework metric registration and tracking
This reworks/rewrites the way that metric registration and tracking is
handled across all of statsd_exporter. The goal here is to reduce
memory and cpu usage, but also to reduce complexity by unifying metric
registration with the ttl tracking for expiration.
Some high level notes:
* Previously metric names and labels were being hashed three times for
every event accepted: in the container code, the save label set code and
again in the prometheus client libraries. This unifies the first two
and caches the results of `GetMetricWith` to avoid the third.
* This optimizes the label hashing to reduce cpu overhead and memory
allocations. The label hashing code previously showed up high on all
profiling done for both CPU and memory allocations
Using the BenchmarkExporterListener benchmark, the improvement looks
like this.
Before:
cpu: 11,341,797 ns/op
memory allocated: 1,731,119 B/op
memory allocations: 58,028 allocs/op
After:
cpu: 7,084,651 ns/op
memory allocated: 906,556 B/op
memory allocations: 42,026 allocs/op
Signed-off-by: Clayton O'Neill <claytono@github.com>
2019-05-22 02:48:04 +00:00
|
|
|
histogram, err := b.registry.getHistogram(metricName, prometheusLabels, help, mapping)
|
2018-11-02 17:28:18 +00:00
|
|
|
if err == nil {
|
|
|
|
histogram.Observe(event.Value() / 1000) // prometheus presumes seconds, statsd millisecond
|
|
|
|
eventStats.WithLabelValues("timer").Inc()
|
2013-07-05 22:12:43 +00:00
|
|
|
} else {
|
2018-11-02 17:28:18 +00:00
|
|
|
log.Debugf(regErrF, metricName, err)
|
|
|
|
conflictingEventStats.WithLabelValues("timer").Inc()
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
|
2018-11-02 17:28:18 +00:00
|
|
|
case mapper.TimerTypeDefault, mapper.TimerTypeSummary:
|
Rework metric registration and tracking
This reworks/rewrites the way that metric registration and tracking is
handled across all of statsd_exporter. The goal here is to reduce
memory and cpu usage, but also to reduce complexity by unifying metric
registration with the ttl tracking for expiration.
Some high level notes:
* Previously metric names and labels were being hashed three times for
every event accepted: in the container code, the save label set code and
again in the prometheus client libraries. This unifies the first two
and caches the results of `GetMetricWith` to avoid the third.
* This optimizes the label hashing to reduce cpu overhead and memory
allocations. The label hashing code previously showed up high on all
profiling done for both CPU and memory allocations
Using the BenchmarkExporterListener benchmark, the improvement looks
like this.
Before:
cpu: 11,341,797 ns/op
memory allocated: 1,731,119 B/op
memory allocations: 58,028 allocs/op
After:
cpu: 7,084,651 ns/op
memory allocated: 906,556 B/op
memory allocations: 42,026 allocs/op
Signed-off-by: Clayton O'Neill <claytono@github.com>
2019-05-22 02:48:04 +00:00
|
|
|
summary, err := b.registry.getSummary(metricName, prometheusLabels, help, mapping)
|
2018-11-02 17:28:18 +00:00
|
|
|
if err == nil {
|
2019-01-09 15:31:05 +00:00
|
|
|
summary.Observe(event.Value() / 1000) // prometheus presumes seconds, statsd millisecond
|
2018-11-02 17:28:18 +00:00
|
|
|
eventStats.WithLabelValues("timer").Inc()
|
|
|
|
} else {
|
|
|
|
log.Debugf(regErrF, metricName, err)
|
|
|
|
conflictingEventStats.WithLabelValues("timer").Inc()
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
2018-11-02 17:28:18 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown timer type '%s'", t))
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
2018-11-02 17:28:18 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
log.Debugln("Unsupported event type")
|
|
|
|
eventStats.WithLabelValues("illegal").Inc()
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-10 12:28:38 +00:00
|
|
|
func NewExporter(mapper *mapper.MetricMapper) *Exporter {
|
2015-10-10 00:34:28 +00:00
|
|
|
return &Exporter{
|
2019-05-23 21:22:05 +00:00
|
|
|
mapper: mapper,
|
|
|
|
registry: newRegistry(mapper),
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-10 00:35:29 +00:00
|
|
|
func buildEvent(statType, metric string, value float64, relative bool, labels map[string]string) (Event, error) {
|
2013-07-05 22:12:43 +00:00
|
|
|
switch statType {
|
|
|
|
case "c":
|
|
|
|
return &CounterEvent{
|
|
|
|
metricName: metric,
|
|
|
|
value: float64(value),
|
2014-11-04 11:44:59 +00:00
|
|
|
labels: labels,
|
2013-07-05 22:12:43 +00:00
|
|
|
}, nil
|
|
|
|
case "g":
|
|
|
|
return &GaugeEvent{
|
|
|
|
metricName: metric,
|
|
|
|
value: float64(value),
|
2017-03-10 00:35:29 +00:00
|
|
|
relative: relative,
|
2014-11-04 11:44:59 +00:00
|
|
|
labels: labels,
|
2013-07-05 22:12:43 +00:00
|
|
|
}, nil
|
2019-05-13 18:30:38 +00:00
|
|
|
case "ms", "h", "d":
|
2013-07-05 22:12:43 +00:00
|
|
|
return &TimerEvent{
|
|
|
|
metricName: metric,
|
|
|
|
value: float64(value),
|
2014-11-04 11:44:59 +00:00
|
|
|
labels: labels,
|
2013-07-05 22:12:43 +00:00
|
|
|
}, nil
|
|
|
|
case "s":
|
2019-01-02 08:54:28 +00:00
|
|
|
return nil, fmt.Errorf("no support for StatsD sets")
|
2013-07-05 22:12:43 +00:00
|
|
|
default:
|
2019-01-02 08:54:28 +00:00
|
|
|
return nil, fmt.Errorf("bad stat type %s", statType)
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-13 16:10:17 +00:00
|
|
|
func handleDogStatsDTagToKeyValue(labels map[string]string, component, tag string) {
|
2019-05-13 15:42:33 +00:00
|
|
|
// Bail early if the tag is empty
|
|
|
|
if len(tag) == 0 {
|
2019-05-13 16:10:17 +00:00
|
|
|
tagErrors.Inc()
|
|
|
|
log.Debugf("Malformed or empty DogStatsD tag %s in component %s", tag, component)
|
2019-05-13 15:42:33 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Skip hash if found.
|
|
|
|
if tag[0] == '#' {
|
|
|
|
tag = tag[1:]
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the first comma and split the tag into key and value.
|
2019-05-13 16:10:17 +00:00
|
|
|
var k, v string
|
2019-05-13 15:42:33 +00:00
|
|
|
for i, c := range tag {
|
|
|
|
if c == ':' {
|
|
|
|
k = tag[0:i]
|
|
|
|
v = tag[(i + 1):]
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2019-05-13 16:10:17 +00:00
|
|
|
// If either of them is empty, then either the k or v is empty, or we
|
|
|
|
// didn't find a colon, either way, throw an error and skip ahead.
|
|
|
|
if len(k) == 0 || len(v) == 0 {
|
|
|
|
tagErrors.Inc()
|
|
|
|
log.Debugf("Malformed or empty DogStatsD tag %s in component %s", tag, component)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
labels[escapeMetricName(k)] = v
|
2019-05-13 15:42:33 +00:00
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-04-23 21:50:41 +00:00
|
|
|
func parseDogStatsDTagsToLabels(component string) map[string]string {
|
|
|
|
labels := map[string]string{}
|
2017-11-10 19:23:54 +00:00
|
|
|
tagsReceived.Inc()
|
2019-05-13 16:10:17 +00:00
|
|
|
|
|
|
|
lastTagEndIndex := 0
|
|
|
|
for i, c := range component {
|
|
|
|
if c == ',' {
|
|
|
|
tag := component[lastTagEndIndex:i]
|
|
|
|
lastTagEndIndex = i + 1
|
|
|
|
handleDogStatsDTagToKeyValue(labels, component, tag)
|
2016-04-23 21:50:41 +00:00
|
|
|
}
|
2019-05-13 16:10:17 +00:00
|
|
|
}
|
2016-04-23 21:50:41 +00:00
|
|
|
|
2019-05-13 16:10:17 +00:00
|
|
|
// If we're not off the end of the string, add the last tag
|
|
|
|
if lastTagEndIndex < len(component) {
|
|
|
|
tag := component[lastTagEndIndex:]
|
|
|
|
handleDogStatsDTagToKeyValue(labels, component, tag)
|
2016-04-23 21:50:41 +00:00
|
|
|
}
|
2019-05-13 16:10:17 +00:00
|
|
|
|
2016-04-23 21:50:41 +00:00
|
|
|
return labels
|
|
|
|
}
|
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
func lineToEvents(line string) Events {
|
2013-07-05 22:12:43 +00:00
|
|
|
events := Events{}
|
2017-08-01 10:21:00 +00:00
|
|
|
if line == "" {
|
|
|
|
return events
|
|
|
|
}
|
2016-07-21 15:55:47 +00:00
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
elements := strings.SplitN(line, ":", 2)
|
|
|
|
if len(elements) < 2 || len(elements[0]) == 0 || !utf8.ValidString(line) {
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("malformed_line").Inc()
|
2017-08-14 23:04:57 +00:00
|
|
|
log.Debugln("Bad line from StatsD:", line)
|
2017-08-01 10:21:00 +00:00
|
|
|
return events
|
|
|
|
}
|
|
|
|
metric := elements[0]
|
|
|
|
var samples []string
|
|
|
|
if strings.Contains(elements[1], "|#") {
|
|
|
|
// using datadog extensions, disable multi-metrics
|
|
|
|
samples = elements[1:]
|
|
|
|
} else {
|
|
|
|
samples = strings.Split(elements[1], ":")
|
|
|
|
}
|
|
|
|
samples:
|
|
|
|
for _, sample := range samples {
|
2017-11-10 19:23:54 +00:00
|
|
|
samplesReceived.Inc()
|
2017-08-01 10:21:00 +00:00
|
|
|
components := strings.Split(sample, "|")
|
|
|
|
samplingFactor := 1.0
|
|
|
|
if len(components) < 2 || len(components) > 4 {
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("malformed_component").Inc()
|
2017-08-14 23:04:57 +00:00
|
|
|
log.Debugln("Bad component on line:", line)
|
2013-07-05 22:12:43 +00:00
|
|
|
continue
|
|
|
|
}
|
2017-08-01 10:21:00 +00:00
|
|
|
valueStr, statType := components[0], components[1]
|
|
|
|
|
|
|
|
var relative = false
|
|
|
|
if strings.Index(valueStr, "+") == 0 || strings.Index(valueStr, "-") == 0 {
|
|
|
|
relative = true
|
2014-11-04 11:44:59 +00:00
|
|
|
}
|
2017-03-09 22:50:06 +00:00
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
value, err := strconv.ParseFloat(valueStr, 64)
|
|
|
|
if err != nil {
|
2017-08-14 23:04:57 +00:00
|
|
|
log.Debugf("Bad value %s on line: %s", valueStr, line)
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("malformed_value").Inc()
|
2017-08-01 10:21:00 +00:00
|
|
|
continue
|
|
|
|
}
|
2017-03-09 22:50:06 +00:00
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
multiplyEvents := 1
|
|
|
|
labels := map[string]string{}
|
|
|
|
if len(components) >= 3 {
|
|
|
|
for _, component := range components[2:] {
|
|
|
|
if len(component) == 0 {
|
2017-08-14 23:04:57 +00:00
|
|
|
log.Debugln("Empty component on line: ", line)
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("malformed_component").Inc()
|
2017-08-01 10:21:00 +00:00
|
|
|
continue samples
|
|
|
|
}
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
for _, component := range components[2:] {
|
|
|
|
switch component[0] {
|
|
|
|
case '@':
|
|
|
|
if statType != "c" && statType != "ms" {
|
2017-08-14 23:04:57 +00:00
|
|
|
log.Debugln("Illegal sampling factor for non-counter metric on line", line)
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("illegal_sample_factor").Inc()
|
2017-08-01 10:21:00 +00:00
|
|
|
continue
|
2016-05-19 13:17:36 +00:00
|
|
|
}
|
2017-08-01 10:21:00 +00:00
|
|
|
samplingFactor, err = strconv.ParseFloat(component[1:], 64)
|
|
|
|
if err != nil {
|
2017-08-14 23:04:57 +00:00
|
|
|
log.Debugf("Invalid sampling factor %s on line %s", component[1:], line)
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
|
2014-11-04 11:44:59 +00:00
|
|
|
}
|
2017-08-01 10:21:00 +00:00
|
|
|
if samplingFactor == 0 {
|
|
|
|
samplingFactor = 1
|
|
|
|
}
|
2013-07-05 22:12:43 +00:00
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
if statType == "c" {
|
|
|
|
value /= samplingFactor
|
|
|
|
} else if statType == "ms" {
|
|
|
|
multiplyEvents = int(1 / samplingFactor)
|
|
|
|
}
|
|
|
|
case '#':
|
2019-05-13 16:10:17 +00:00
|
|
|
labels = parseDogStatsDTagsToLabels(component[1:])
|
2017-08-01 10:21:00 +00:00
|
|
|
default:
|
2017-08-14 23:04:57 +00:00
|
|
|
log.Debugf("Invalid sampling factor or tag section %s on line %s", components[2], line)
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
|
2017-05-15 13:57:31 +00:00
|
|
|
continue
|
|
|
|
}
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
2017-08-01 10:21:00 +00:00
|
|
|
|
|
|
|
for i := 0; i < multiplyEvents; i++ {
|
|
|
|
event, err := buildEvent(statType, metric, value, relative, labels)
|
|
|
|
if err != nil {
|
2017-08-14 23:04:57 +00:00
|
|
|
log.Debugf("Error building event on line %s: %s", line, err)
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("illegal_event").Inc()
|
2017-08-01 10:21:00 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
events = append(events, event)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return events
|
|
|
|
}
|
|
|
|
|
|
|
|
type StatsDUDPListener struct {
|
2019-05-26 13:08:54 +00:00
|
|
|
conn *net.UDPConn
|
|
|
|
eventHandler eventHandler
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
|
2019-05-26 13:08:54 +00:00
|
|
|
func (l *StatsDUDPListener) SetEventHandler(eh eventHandler) {
|
|
|
|
l.eventHandler = eh
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *StatsDUDPListener) Listen() {
|
2017-08-01 10:21:00 +00:00
|
|
|
buf := make([]byte, 65535)
|
|
|
|
for {
|
|
|
|
n, _, err := l.conn.ReadFromUDP(buf)
|
|
|
|
if err != nil {
|
2019-04-22 23:31:24 +00:00
|
|
|
// https://github.com/golang/go/issues/4373
|
|
|
|
// ignore net: errClosing error as it will occur during shutdown
|
|
|
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
log.Error(err)
|
|
|
|
return
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
2019-05-26 13:08:54 +00:00
|
|
|
l.handlePacket(buf[0:n])
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-26 13:08:54 +00:00
|
|
|
func (l *StatsDUDPListener) handlePacket(packet []byte) {
|
2017-11-10 19:23:54 +00:00
|
|
|
udpPackets.Inc()
|
2019-05-20 08:20:19 +00:00
|
|
|
lines := strings.Split(string(packet), "\n")
|
|
|
|
for _, line := range lines {
|
|
|
|
linesReceived.Inc()
|
2019-05-26 13:08:54 +00:00
|
|
|
l.eventHandler.queue(lineToEvents(line))
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
2017-08-01 10:21:00 +00:00
|
|
|
|
|
|
|
type StatsDTCPListener struct {
|
2019-05-26 13:08:54 +00:00
|
|
|
conn *net.TCPListener
|
|
|
|
eventHandler eventHandler
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
|
2019-05-26 13:08:54 +00:00
|
|
|
func (l *StatsDTCPListener) SetEventHandler(eh eventHandler) {
|
|
|
|
l.eventHandler = eh
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *StatsDTCPListener) Listen() {
|
2017-08-01 10:21:00 +00:00
|
|
|
for {
|
|
|
|
c, err := l.conn.AcceptTCP()
|
|
|
|
if err != nil {
|
2019-04-22 23:31:24 +00:00
|
|
|
// https://github.com/golang/go/issues/4373
|
|
|
|
// ignore net: errClosing error as it will occur during shutdown
|
|
|
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
|
|
|
return
|
|
|
|
}
|
2017-08-01 10:21:00 +00:00
|
|
|
log.Fatalf("AcceptTCP failed: %v", err)
|
|
|
|
}
|
2019-05-26 13:08:54 +00:00
|
|
|
go l.handleConn(c)
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-26 13:08:54 +00:00
|
|
|
func (l *StatsDTCPListener) handleConn(c *net.TCPConn) {
|
2017-08-01 10:21:00 +00:00
|
|
|
defer c.Close()
|
|
|
|
|
2017-11-10 19:23:54 +00:00
|
|
|
tcpConnections.Inc()
|
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
r := bufio.NewReader(c)
|
|
|
|
for {
|
|
|
|
line, isPrefix, err := r.ReadLine()
|
|
|
|
if err != nil {
|
|
|
|
if err != io.EOF {
|
2017-11-10 19:23:54 +00:00
|
|
|
tcpErrors.Inc()
|
2017-08-14 23:04:57 +00:00
|
|
|
log.Debugf("Read %s failed: %v", c.RemoteAddr(), err)
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if isPrefix {
|
2017-11-10 19:23:54 +00:00
|
|
|
tcpLineTooLong.Inc()
|
2017-08-14 23:04:57 +00:00
|
|
|
log.Debugf("Read %s failed: line too long", c.RemoteAddr())
|
2017-08-01 10:21:00 +00:00
|
|
|
break
|
|
|
|
}
|
2019-05-20 08:20:19 +00:00
|
|
|
linesReceived.Inc()
|
2019-05-26 13:08:54 +00:00
|
|
|
l.eventHandler.queue(lineToEvents(string(line)))
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
}
|
2019-04-05 23:37:23 +00:00
|
|
|
|
|
|
|
type StatsDUnixgramListener struct {
|
2019-05-26 13:08:54 +00:00
|
|
|
conn *net.UnixConn
|
|
|
|
eventHandler eventHandler
|
2019-04-05 23:37:23 +00:00
|
|
|
}
|
|
|
|
|
2019-05-26 13:08:54 +00:00
|
|
|
func (l *StatsDUnixgramListener) SetEventHandler(eh eventHandler) {
|
|
|
|
l.eventHandler = eh
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *StatsDUnixgramListener) Listen() {
|
2019-04-05 23:37:23 +00:00
|
|
|
buf := make([]byte, 65535)
|
|
|
|
for {
|
|
|
|
n, _, err := l.conn.ReadFromUnix(buf)
|
|
|
|
if err != nil {
|
2019-04-22 23:31:24 +00:00
|
|
|
// https://github.com/golang/go/issues/4373
|
|
|
|
// ignore net: errClosing error as it will occur during shutdown
|
|
|
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
|
|
|
return
|
|
|
|
}
|
2019-04-05 23:37:23 +00:00
|
|
|
log.Fatal(err)
|
|
|
|
}
|
2019-05-26 13:08:54 +00:00
|
|
|
l.handlePacket(buf[:n])
|
2019-04-05 23:37:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-26 13:08:54 +00:00
|
|
|
func (l *StatsDUnixgramListener) handlePacket(packet []byte) {
|
2019-04-05 23:37:23 +00:00
|
|
|
unixgramPackets.Inc()
|
|
|
|
lines := strings.Split(string(packet), "\n")
|
|
|
|
for _, line := range lines {
|
2019-05-20 08:20:19 +00:00
|
|
|
linesReceived.Inc()
|
2019-05-26 13:08:54 +00:00
|
|
|
l.eventHandler.queue(lineToEvents(string(line)))
|
2019-04-05 23:37:23 +00:00
|
|
|
}
|
|
|
|
}
|