2015-02-18 21:40:55 +00:00
|
|
|
// Copyright 2013 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
2013-07-05 22:12:43 +00:00
|
|
|
//
|
2015-02-18 21:40:55 +00:00
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2013-07-05 22:12:43 +00:00
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2017-08-01 10:21:00 +00:00
|
|
|
"bufio"
|
2013-07-05 22:12:43 +00:00
|
|
|
"fmt"
|
2017-08-01 10:21:00 +00:00
|
|
|
"io"
|
2013-07-05 22:12:43 +00:00
|
|
|
"net"
|
2019-10-13 19:59:54 +00:00
|
|
|
"os"
|
2013-07-05 22:12:43 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2018-11-27 12:53:45 +00:00
|
|
|
"time"
|
2016-07-15 14:05:47 +00:00
|
|
|
"unicode/utf8"
|
2013-07-05 22:12:43 +00:00
|
|
|
|
2019-10-10 02:08:25 +00:00
|
|
|
"github.com/go-kit/kit/log"
|
|
|
|
"github.com/go-kit/kit/log/level"
|
2013-07-05 22:12:43 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2018-08-10 12:28:38 +00:00
|
|
|
|
2018-12-19 05:21:43 +00:00
|
|
|
"github.com/prometheus/statsd_exporter/pkg/clock"
|
2018-08-14 09:20:00 +00:00
|
|
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
2013-07-05 22:12:43 +00:00
|
|
|
)
|
|
|
|
|
2014-06-26 13:56:21 +00:00
|
|
|
const (
|
2015-10-10 00:34:28 +00:00
|
|
|
defaultHelp = "Metric autogenerated by statsd_exporter."
|
2019-10-10 02:08:25 +00:00
|
|
|
regErrF = "Failed to update metric"
|
2014-06-26 13:56:21 +00:00
|
|
|
)
|
|
|
|
|
2019-03-28 21:51:12 +00:00
|
|
|
// uncheckedCollector wraps a Collector but its Describe method yields no Desc.
|
|
|
|
// This allows incoming metrics to have inconsistent label sets
|
|
|
|
type uncheckedCollector struct {
|
|
|
|
c prometheus.Collector
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u uncheckedCollector) Describe(_ chan<- *prometheus.Desc) {}
|
|
|
|
func (u uncheckedCollector) Collect(c chan<- prometheus.Metric) {
|
|
|
|
u.c.Collect(c)
|
|
|
|
}
|
|
|
|
|
2015-10-10 00:34:28 +00:00
|
|
|
type Exporter struct {
|
2019-05-23 21:22:05 +00:00
|
|
|
mapper *mapper.MetricMapper
|
|
|
|
registry *registry
|
2019-10-10 02:08:25 +00:00
|
|
|
logger log.Logger
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
|
2018-11-02 17:28:18 +00:00
|
|
|
// Listen handles all events sent to the given channel sequentially. It
|
|
|
|
// terminates when the channel is closed.
|
2015-10-10 00:34:28 +00:00
|
|
|
func (b *Exporter) Listen(e <-chan Events) {
|
2018-12-19 05:21:43 +00:00
|
|
|
removeStaleMetricsTicker := clock.NewTicker(time.Second)
|
2018-11-27 12:53:45 +00:00
|
|
|
|
2013-07-05 22:12:43 +00:00
|
|
|
for {
|
2018-11-27 12:53:45 +00:00
|
|
|
select {
|
|
|
|
case <-removeStaleMetricsTicker.C:
|
Rework metric registration and tracking
This reworks/rewrites the way that metric registration and tracking is
handled across all of statsd_exporter. The goal here is to reduce
memory and cpu usage, but also to reduce complexity by unifying metric
registration with the ttl tracking for expiration.
Some high level notes:
* Previously metric names and labels were being hashed three times for
every event accepted: in the container code, the save label set code and
again in the prometheus client libraries. This unifies the first two
and caches the results of `GetMetricWith` to avoid the third.
* This optimizes the label hashing to reduce cpu overhead and memory
allocations. The label hashing code previously showed up high on all
profiling done for both CPU and memory allocations
Using the BenchmarkExporterListener benchmark, the improvement looks
like this.
Before:
cpu: 11,341,797 ns/op
memory allocated: 1,731,119 B/op
memory allocations: 58,028 allocs/op
After:
cpu: 7,084,651 ns/op
memory allocated: 906,556 B/op
memory allocations: 42,026 allocs/op
Signed-off-by: Clayton O'Neill <claytono@github.com>
2019-05-22 02:48:04 +00:00
|
|
|
b.registry.removeStaleMetrics()
|
2018-11-27 12:53:45 +00:00
|
|
|
case events, ok := <-e:
|
|
|
|
if !ok {
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(b.logger).Log("msg", "Channel is closed. Break out of Exporter.Listener.")
|
2018-11-27 12:53:45 +00:00
|
|
|
removeStaleMetricsTicker.Stop()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, event := range events {
|
|
|
|
b.handleEvent(event)
|
|
|
|
}
|
2018-11-02 17:28:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-07-05 22:12:43 +00:00
|
|
|
|
2018-11-02 17:28:18 +00:00
|
|
|
// handleEvent processes a single Event according to the configured mapping.
|
|
|
|
func (b *Exporter) handleEvent(event Event) {
|
|
|
|
mapping, labels, present := b.mapper.GetMapping(event.MetricName(), event.MetricType())
|
|
|
|
if mapping == nil {
|
|
|
|
mapping = &mapper.MetricMapping{}
|
2018-12-13 13:53:40 +00:00
|
|
|
if b.mapper.Defaults.Ttl != 0 {
|
|
|
|
mapping.Ttl = b.mapper.Defaults.Ttl
|
|
|
|
}
|
2018-11-02 17:28:18 +00:00
|
|
|
}
|
2018-01-02 22:21:50 +00:00
|
|
|
|
2018-11-02 17:28:18 +00:00
|
|
|
if mapping.Action == mapper.ActionTypeDrop {
|
2019-03-25 22:16:38 +00:00
|
|
|
eventsActions.WithLabelValues("drop").Inc()
|
2018-11-02 17:28:18 +00:00
|
|
|
return
|
|
|
|
}
|
2018-01-02 22:21:50 +00:00
|
|
|
|
2018-11-02 17:28:18 +00:00
|
|
|
help := defaultHelp
|
|
|
|
if mapping.HelpText != "" {
|
|
|
|
help = mapping.HelpText
|
|
|
|
}
|
|
|
|
|
|
|
|
metricName := ""
|
|
|
|
prometheusLabels := event.Labels()
|
|
|
|
if present {
|
2019-03-23 18:14:08 +00:00
|
|
|
if mapping.Name == "" {
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(b.logger).Log("msg", "The mapping generates an empty metric name", "metric_name", event.MetricName(), "match", mapping.Match)
|
2019-03-25 23:44:17 +00:00
|
|
|
errorEventStats.WithLabelValues("empty_metric_name").Inc()
|
2019-03-23 18:14:08 +00:00
|
|
|
return
|
|
|
|
}
|
2020-01-16 13:48:17 +00:00
|
|
|
metricName = mapper.EscapeMetricName(mapping.Name)
|
2018-11-02 17:28:18 +00:00
|
|
|
for label, value := range labels {
|
|
|
|
prometheusLabels[label] = value
|
|
|
|
}
|
2019-03-25 22:16:38 +00:00
|
|
|
eventsActions.WithLabelValues(string(mapping.Action)).Inc()
|
2018-11-02 17:28:18 +00:00
|
|
|
} else {
|
|
|
|
eventsUnmapped.Inc()
|
2020-01-16 13:48:17 +00:00
|
|
|
metricName = mapper.EscapeMetricName(event.MetricName())
|
2018-11-02 17:28:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch ev := event.(type) {
|
|
|
|
case *CounterEvent:
|
|
|
|
// We don't accept negative values for counters. Incrementing the counter with a negative number
|
|
|
|
// will cause the exporter to panic. Instead we will warn and continue to the next event.
|
|
|
|
if event.Value() < 0.0 {
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(b.logger).Log("msg", "counter must be non-negative value", "metric", metricName, "event_value", event.Value())
|
2019-03-25 23:44:17 +00:00
|
|
|
errorEventStats.WithLabelValues("illegal_negative_counter").Inc()
|
2018-11-02 17:28:18 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
Rework metric registration and tracking
This reworks/rewrites the way that metric registration and tracking is
handled across all of statsd_exporter. The goal here is to reduce
memory and cpu usage, but also to reduce complexity by unifying metric
registration with the ttl tracking for expiration.
Some high level notes:
* Previously metric names and labels were being hashed three times for
every event accepted: in the container code, the save label set code and
again in the prometheus client libraries. This unifies the first two
and caches the results of `GetMetricWith` to avoid the third.
* This optimizes the label hashing to reduce cpu overhead and memory
allocations. The label hashing code previously showed up high on all
profiling done for both CPU and memory allocations
Using the BenchmarkExporterListener benchmark, the improvement looks
like this.
Before:
cpu: 11,341,797 ns/op
memory allocated: 1,731,119 B/op
memory allocations: 58,028 allocs/op
After:
cpu: 7,084,651 ns/op
memory allocated: 906,556 B/op
memory allocations: 42,026 allocs/op
Signed-off-by: Clayton O'Neill <claytono@github.com>
2019-05-22 02:48:04 +00:00
|
|
|
counter, err := b.registry.getCounter(metricName, prometheusLabels, help, mapping)
|
2018-11-02 17:28:18 +00:00
|
|
|
if err == nil {
|
|
|
|
counter.Add(event.Value())
|
|
|
|
eventStats.WithLabelValues("counter").Inc()
|
|
|
|
} else {
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(b.logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
2018-11-02 17:28:18 +00:00
|
|
|
conflictingEventStats.WithLabelValues("counter").Inc()
|
|
|
|
}
|
|
|
|
|
|
|
|
case *GaugeEvent:
|
Rework metric registration and tracking
This reworks/rewrites the way that metric registration and tracking is
handled across all of statsd_exporter. The goal here is to reduce
memory and cpu usage, but also to reduce complexity by unifying metric
registration with the ttl tracking for expiration.
Some high level notes:
* Previously metric names and labels were being hashed three times for
every event accepted: in the container code, the save label set code and
again in the prometheus client libraries. This unifies the first two
and caches the results of `GetMetricWith` to avoid the third.
* This optimizes the label hashing to reduce cpu overhead and memory
allocations. The label hashing code previously showed up high on all
profiling done for both CPU and memory allocations
Using the BenchmarkExporterListener benchmark, the improvement looks
like this.
Before:
cpu: 11,341,797 ns/op
memory allocated: 1,731,119 B/op
memory allocations: 58,028 allocs/op
After:
cpu: 7,084,651 ns/op
memory allocated: 906,556 B/op
memory allocations: 42,026 allocs/op
Signed-off-by: Clayton O'Neill <claytono@github.com>
2019-05-22 02:48:04 +00:00
|
|
|
gauge, err := b.registry.getGauge(metricName, prometheusLabels, help, mapping)
|
2018-11-02 17:28:18 +00:00
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
if ev.relative {
|
|
|
|
gauge.Add(event.Value())
|
2017-10-04 16:11:58 +00:00
|
|
|
} else {
|
2018-11-02 17:28:18 +00:00
|
|
|
gauge.Set(event.Value())
|
2017-10-04 16:11:58 +00:00
|
|
|
}
|
2018-11-02 17:28:18 +00:00
|
|
|
eventStats.WithLabelValues("gauge").Inc()
|
|
|
|
} else {
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(b.logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
2018-11-02 17:28:18 +00:00
|
|
|
conflictingEventStats.WithLabelValues("gauge").Inc()
|
|
|
|
}
|
|
|
|
|
|
|
|
case *TimerEvent:
|
|
|
|
t := mapper.TimerTypeDefault
|
|
|
|
if mapping != nil {
|
|
|
|
t = mapping.TimerType
|
|
|
|
}
|
|
|
|
if t == mapper.TimerTypeDefault {
|
|
|
|
t = b.mapper.Defaults.TimerType
|
|
|
|
}
|
|
|
|
|
|
|
|
switch t {
|
|
|
|
case mapper.TimerTypeHistogram:
|
Rework metric registration and tracking
This reworks/rewrites the way that metric registration and tracking is
handled across all of statsd_exporter. The goal here is to reduce
memory and cpu usage, but also to reduce complexity by unifying metric
registration with the ttl tracking for expiration.
Some high level notes:
* Previously metric names and labels were being hashed three times for
every event accepted: in the container code, the save label set code and
again in the prometheus client libraries. This unifies the first two
and caches the results of `GetMetricWith` to avoid the third.
* This optimizes the label hashing to reduce cpu overhead and memory
allocations. The label hashing code previously showed up high on all
profiling done for both CPU and memory allocations
Using the BenchmarkExporterListener benchmark, the improvement looks
like this.
Before:
cpu: 11,341,797 ns/op
memory allocated: 1,731,119 B/op
memory allocations: 58,028 allocs/op
After:
cpu: 7,084,651 ns/op
memory allocated: 906,556 B/op
memory allocations: 42,026 allocs/op
Signed-off-by: Clayton O'Neill <claytono@github.com>
2019-05-22 02:48:04 +00:00
|
|
|
histogram, err := b.registry.getHistogram(metricName, prometheusLabels, help, mapping)
|
2018-11-02 17:28:18 +00:00
|
|
|
if err == nil {
|
|
|
|
histogram.Observe(event.Value() / 1000) // prometheus presumes seconds, statsd millisecond
|
|
|
|
eventStats.WithLabelValues("timer").Inc()
|
2013-07-05 22:12:43 +00:00
|
|
|
} else {
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(b.logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
2018-11-02 17:28:18 +00:00
|
|
|
conflictingEventStats.WithLabelValues("timer").Inc()
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
|
2018-11-02 17:28:18 +00:00
|
|
|
case mapper.TimerTypeDefault, mapper.TimerTypeSummary:
|
Rework metric registration and tracking
This reworks/rewrites the way that metric registration and tracking is
handled across all of statsd_exporter. The goal here is to reduce
memory and cpu usage, but also to reduce complexity by unifying metric
registration with the ttl tracking for expiration.
Some high level notes:
* Previously metric names and labels were being hashed three times for
every event accepted: in the container code, the save label set code and
again in the prometheus client libraries. This unifies the first two
and caches the results of `GetMetricWith` to avoid the third.
* This optimizes the label hashing to reduce cpu overhead and memory
allocations. The label hashing code previously showed up high on all
profiling done for both CPU and memory allocations
Using the BenchmarkExporterListener benchmark, the improvement looks
like this.
Before:
cpu: 11,341,797 ns/op
memory allocated: 1,731,119 B/op
memory allocations: 58,028 allocs/op
After:
cpu: 7,084,651 ns/op
memory allocated: 906,556 B/op
memory allocations: 42,026 allocs/op
Signed-off-by: Clayton O'Neill <claytono@github.com>
2019-05-22 02:48:04 +00:00
|
|
|
summary, err := b.registry.getSummary(metricName, prometheusLabels, help, mapping)
|
2018-11-02 17:28:18 +00:00
|
|
|
if err == nil {
|
2019-01-09 15:31:05 +00:00
|
|
|
summary.Observe(event.Value() / 1000) // prometheus presumes seconds, statsd millisecond
|
2018-11-02 17:28:18 +00:00
|
|
|
eventStats.WithLabelValues("timer").Inc()
|
|
|
|
} else {
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(b.logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
2018-11-02 17:28:18 +00:00
|
|
|
conflictingEventStats.WithLabelValues("timer").Inc()
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
2018-11-02 17:28:18 +00:00
|
|
|
|
|
|
|
default:
|
2020-01-10 14:02:59 +00:00
|
|
|
level.Error(b.logger).Log("msg", "unknown timer type", "type", t)
|
|
|
|
os.Exit(1)
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
2018-11-02 17:28:18 +00:00
|
|
|
|
|
|
|
default:
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(b.logger).Log("msg", "Unsupported event type")
|
2018-11-02 17:28:18 +00:00
|
|
|
eventStats.WithLabelValues("illegal").Inc()
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-10 02:08:25 +00:00
|
|
|
func NewExporter(mapper *mapper.MetricMapper, logger log.Logger) *Exporter {
|
2015-10-10 00:34:28 +00:00
|
|
|
return &Exporter{
|
2019-05-23 21:22:05 +00:00
|
|
|
mapper: mapper,
|
|
|
|
registry: newRegistry(mapper),
|
2019-10-10 02:08:25 +00:00
|
|
|
logger: logger,
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-10 00:35:29 +00:00
|
|
|
func buildEvent(statType, metric string, value float64, relative bool, labels map[string]string) (Event, error) {
|
2013-07-05 22:12:43 +00:00
|
|
|
switch statType {
|
|
|
|
case "c":
|
|
|
|
return &CounterEvent{
|
|
|
|
metricName: metric,
|
|
|
|
value: float64(value),
|
2014-11-04 11:44:59 +00:00
|
|
|
labels: labels,
|
2013-07-05 22:12:43 +00:00
|
|
|
}, nil
|
|
|
|
case "g":
|
|
|
|
return &GaugeEvent{
|
|
|
|
metricName: metric,
|
|
|
|
value: float64(value),
|
2017-03-10 00:35:29 +00:00
|
|
|
relative: relative,
|
2014-11-04 11:44:59 +00:00
|
|
|
labels: labels,
|
2013-07-05 22:12:43 +00:00
|
|
|
}, nil
|
2019-05-13 18:30:38 +00:00
|
|
|
case "ms", "h", "d":
|
2013-07-05 22:12:43 +00:00
|
|
|
return &TimerEvent{
|
|
|
|
metricName: metric,
|
|
|
|
value: float64(value),
|
2014-11-04 11:44:59 +00:00
|
|
|
labels: labels,
|
2013-07-05 22:12:43 +00:00
|
|
|
}, nil
|
|
|
|
case "s":
|
2019-01-02 08:54:28 +00:00
|
|
|
return nil, fmt.Errorf("no support for StatsD sets")
|
2013-07-05 22:12:43 +00:00
|
|
|
default:
|
2019-01-02 08:54:28 +00:00
|
|
|
return nil, fmt.Errorf("bad stat type %s", statType)
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-10 02:08:25 +00:00
|
|
|
func parseTag(component, tag string, separator rune, labels map[string]string, logger log.Logger) {
|
2019-09-18 13:29:38 +00:00
|
|
|
// Entirely empty tag is an error
|
2019-09-14 14:29:22 +00:00
|
|
|
if len(tag) == 0 {
|
|
|
|
tagErrors.Inc()
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(logger).Log("msg", "Empty name tag", "component", component)
|
2019-09-14 14:29:22 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, c := range tag {
|
2019-09-18 13:29:38 +00:00
|
|
|
if c == separator {
|
2019-09-14 14:29:22 +00:00
|
|
|
k := tag[:i]
|
|
|
|
v := tag[i+1:]
|
|
|
|
|
|
|
|
if len(k) == 0 || len(v) == 0 {
|
2019-09-18 13:29:38 +00:00
|
|
|
// Empty key or value is an error
|
2019-09-14 14:29:22 +00:00
|
|
|
tagErrors.Inc()
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(logger).Log("msg", "Malformed name tag", "k", k, "v", v, "component", component)
|
2019-09-14 14:29:22 +00:00
|
|
|
} else {
|
2020-01-16 13:48:17 +00:00
|
|
|
labels[mapper.EscapeMetricName(k)] = v
|
2019-09-14 14:29:22 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-18 13:29:38 +00:00
|
|
|
// Missing separator (no value) is an error
|
2019-09-14 14:29:22 +00:00
|
|
|
tagErrors.Inc()
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(logger).Log("msg", "Malformed name tag", "tag", tag, "component", component)
|
2019-09-14 14:29:22 +00:00
|
|
|
}
|
|
|
|
|
2019-10-10 02:08:25 +00:00
|
|
|
func parseNameTags(component string, labels map[string]string, logger log.Logger) {
|
2019-09-14 14:29:22 +00:00
|
|
|
lastTagEndIndex := 0
|
|
|
|
for i, c := range component {
|
|
|
|
if c == ',' {
|
|
|
|
tag := component[lastTagEndIndex:i]
|
|
|
|
lastTagEndIndex = i + 1
|
2019-10-10 02:08:25 +00:00
|
|
|
parseTag(component, tag, '=', labels, logger)
|
2019-09-14 14:29:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're not off the end of the string, add the last tag
|
|
|
|
if lastTagEndIndex < len(component) {
|
|
|
|
tag := component[lastTagEndIndex:]
|
2019-10-10 02:08:25 +00:00
|
|
|
parseTag(component, tag, '=', labels, logger)
|
2019-09-14 14:29:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-18 13:29:38 +00:00
|
|
|
func trimLeftHash(s string) string {
|
|
|
|
if s != "" && s[0] == '#' {
|
|
|
|
return s[1:]
|
2019-05-13 16:10:17 +00:00
|
|
|
}
|
2019-09-18 13:29:38 +00:00
|
|
|
return s
|
2019-05-13 15:42:33 +00:00
|
|
|
}
|
|
|
|
|
2019-10-10 02:08:25 +00:00
|
|
|
func parseDogStatsDTags(component string, labels map[string]string, logger log.Logger) {
|
2019-05-13 16:10:17 +00:00
|
|
|
lastTagEndIndex := 0
|
|
|
|
for i, c := range component {
|
|
|
|
if c == ',' {
|
|
|
|
tag := component[lastTagEndIndex:i]
|
|
|
|
lastTagEndIndex = i + 1
|
2019-10-10 02:08:25 +00:00
|
|
|
parseTag(component, trimLeftHash(tag), ':', labels, logger)
|
2016-04-23 21:50:41 +00:00
|
|
|
}
|
2019-05-13 16:10:17 +00:00
|
|
|
}
|
2016-04-23 21:50:41 +00:00
|
|
|
|
2019-05-13 16:10:17 +00:00
|
|
|
// If we're not off the end of the string, add the last tag
|
|
|
|
if lastTagEndIndex < len(component) {
|
|
|
|
tag := component[lastTagEndIndex:]
|
2019-10-10 02:08:25 +00:00
|
|
|
parseTag(component, trimLeftHash(tag), ':', labels, logger)
|
2019-09-18 13:29:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-10 02:08:25 +00:00
|
|
|
func parseNameAndTags(name string, labels map[string]string, logger log.Logger) string {
|
2019-09-18 13:29:38 +00:00
|
|
|
for i, c := range name {
|
|
|
|
// `#` delimits start of tags by Librato
|
|
|
|
// https://www.librato.com/docs/kb/collect/collection_agents/stastd/#stat-level-tags
|
|
|
|
// `,` delimits start of tags by InfluxDB
|
|
|
|
// https://www.influxdata.com/blog/getting-started-with-sending-statsd-metrics-to-telegraf-influxdb/#introducing-influx-statsd
|
|
|
|
if c == '#' || c == ',' {
|
2019-10-10 02:08:25 +00:00
|
|
|
parseNameTags(name[i+1:], labels, logger)
|
2019-09-18 13:29:38 +00:00
|
|
|
return name[:i]
|
|
|
|
}
|
2016-04-23 21:50:41 +00:00
|
|
|
}
|
2019-09-18 13:29:38 +00:00
|
|
|
return name
|
2016-04-23 21:50:41 +00:00
|
|
|
}
|
|
|
|
|
2019-10-10 02:08:25 +00:00
|
|
|
func lineToEvents(line string, logger log.Logger) Events {
|
2013-07-05 22:12:43 +00:00
|
|
|
events := Events{}
|
2017-08-01 10:21:00 +00:00
|
|
|
if line == "" {
|
|
|
|
return events
|
|
|
|
}
|
2016-07-21 15:55:47 +00:00
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
elements := strings.SplitN(line, ":", 2)
|
|
|
|
if len(elements) < 2 || len(elements[0]) == 0 || !utf8.ValidString(line) {
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("malformed_line").Inc()
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(logger).Log("msg", "Bad line from StatsD", "line", line)
|
2017-08-01 10:21:00 +00:00
|
|
|
return events
|
|
|
|
}
|
2019-09-14 14:29:22 +00:00
|
|
|
|
|
|
|
labels := map[string]string{}
|
2019-10-10 02:08:25 +00:00
|
|
|
metric := parseNameAndTags(elements[0], labels, logger)
|
2019-09-18 11:14:57 +00:00
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
var samples []string
|
|
|
|
if strings.Contains(elements[1], "|#") {
|
2019-09-18 13:29:38 +00:00
|
|
|
// using DogStatsD tags
|
2019-09-18 11:14:57 +00:00
|
|
|
|
|
|
|
// don't allow mixed tagging styles
|
|
|
|
if len(labels) > 0 {
|
2019-09-19 16:31:04 +00:00
|
|
|
sampleErrors.WithLabelValues("mixed_tagging_styles").Inc()
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(logger).Log("msg", "Bad line (multiple tagging styles) from StatsD", "line", line)
|
2019-09-18 11:14:57 +00:00
|
|
|
return events
|
|
|
|
}
|
|
|
|
|
|
|
|
// disable multi-metrics
|
2017-08-01 10:21:00 +00:00
|
|
|
samples = elements[1:]
|
|
|
|
} else {
|
|
|
|
samples = strings.Split(elements[1], ":")
|
|
|
|
}
|
|
|
|
samples:
|
|
|
|
for _, sample := range samples {
|
2017-11-10 19:23:54 +00:00
|
|
|
samplesReceived.Inc()
|
2017-08-01 10:21:00 +00:00
|
|
|
components := strings.Split(sample, "|")
|
|
|
|
samplingFactor := 1.0
|
|
|
|
if len(components) < 2 || len(components) > 4 {
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("malformed_component").Inc()
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(logger).Log("msg", "Bad component", "line", line)
|
2013-07-05 22:12:43 +00:00
|
|
|
continue
|
|
|
|
}
|
2017-08-01 10:21:00 +00:00
|
|
|
valueStr, statType := components[0], components[1]
|
|
|
|
|
|
|
|
var relative = false
|
|
|
|
if strings.Index(valueStr, "+") == 0 || strings.Index(valueStr, "-") == 0 {
|
|
|
|
relative = true
|
2014-11-04 11:44:59 +00:00
|
|
|
}
|
2017-03-09 22:50:06 +00:00
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
value, err := strconv.ParseFloat(valueStr, 64)
|
|
|
|
if err != nil {
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(logger).Log("msg", "Bad value", "value", valueStr, "line", line)
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("malformed_value").Inc()
|
2017-08-01 10:21:00 +00:00
|
|
|
continue
|
|
|
|
}
|
2017-03-09 22:50:06 +00:00
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
multiplyEvents := 1
|
|
|
|
if len(components) >= 3 {
|
|
|
|
for _, component := range components[2:] {
|
|
|
|
if len(component) == 0 {
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(logger).Log("msg", "Empty component", "line", line)
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("malformed_component").Inc()
|
2017-08-01 10:21:00 +00:00
|
|
|
continue samples
|
|
|
|
}
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
for _, component := range components[2:] {
|
|
|
|
switch component[0] {
|
|
|
|
case '@':
|
2019-09-13 04:27:35 +00:00
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
samplingFactor, err = strconv.ParseFloat(component[1:], 64)
|
|
|
|
if err != nil {
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(logger).Log("msg", "Invalid sampling factor", "component", component[1:], "line", line)
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
|
2014-11-04 11:44:59 +00:00
|
|
|
}
|
2017-08-01 10:21:00 +00:00
|
|
|
if samplingFactor == 0 {
|
|
|
|
samplingFactor = 1
|
|
|
|
}
|
2013-07-05 22:12:43 +00:00
|
|
|
|
2019-09-13 04:27:35 +00:00
|
|
|
if statType == "g" {
|
|
|
|
continue
|
|
|
|
} else if statType == "c" {
|
2017-08-01 10:21:00 +00:00
|
|
|
value /= samplingFactor
|
2019-09-13 04:27:35 +00:00
|
|
|
} else if statType == "ms" || statType == "h" || statType == "d" {
|
2017-08-01 10:21:00 +00:00
|
|
|
multiplyEvents = int(1 / samplingFactor)
|
|
|
|
}
|
|
|
|
case '#':
|
2019-10-10 02:08:25 +00:00
|
|
|
parseDogStatsDTags(component[1:], labels, logger)
|
2017-08-01 10:21:00 +00:00
|
|
|
default:
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(logger).Log("msg", "Invalid sampling factor or tag section", "component", components[2], "line", line)
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
|
2017-05-15 13:57:31 +00:00
|
|
|
continue
|
|
|
|
}
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
2017-08-01 10:21:00 +00:00
|
|
|
|
2019-09-14 14:29:22 +00:00
|
|
|
if len(labels) > 0 {
|
|
|
|
tagsReceived.Inc()
|
|
|
|
}
|
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
for i := 0; i < multiplyEvents; i++ {
|
|
|
|
event, err := buildEvent(statType, metric, value, relative, labels)
|
|
|
|
if err != nil {
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(logger).Log("msg", "Error building event", "line", line, "error", err)
|
2017-11-10 19:23:54 +00:00
|
|
|
sampleErrors.WithLabelValues("illegal_event").Inc()
|
2017-08-01 10:21:00 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
events = append(events, event)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return events
|
|
|
|
}
|
|
|
|
|
|
|
|
type StatsDUDPListener struct {
|
2019-05-26 13:08:54 +00:00
|
|
|
conn *net.UDPConn
|
|
|
|
eventHandler eventHandler
|
2019-10-10 02:08:25 +00:00
|
|
|
logger log.Logger
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
|
2019-05-26 13:08:54 +00:00
|
|
|
func (l *StatsDUDPListener) SetEventHandler(eh eventHandler) {
|
|
|
|
l.eventHandler = eh
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *StatsDUDPListener) Listen() {
|
2017-08-01 10:21:00 +00:00
|
|
|
buf := make([]byte, 65535)
|
|
|
|
for {
|
|
|
|
n, _, err := l.conn.ReadFromUDP(buf)
|
|
|
|
if err != nil {
|
2019-04-22 23:31:24 +00:00
|
|
|
// https://github.com/golang/go/issues/4373
|
|
|
|
// ignore net: errClosing error as it will occur during shutdown
|
|
|
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
|
|
|
return
|
|
|
|
}
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Error(l.logger).Log("error", err)
|
2019-04-22 23:31:24 +00:00
|
|
|
return
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
2019-05-26 13:08:54 +00:00
|
|
|
l.handlePacket(buf[0:n])
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-26 13:08:54 +00:00
|
|
|
func (l *StatsDUDPListener) handlePacket(packet []byte) {
|
2017-11-10 19:23:54 +00:00
|
|
|
udpPackets.Inc()
|
2019-05-20 08:20:19 +00:00
|
|
|
lines := strings.Split(string(packet), "\n")
|
|
|
|
for _, line := range lines {
|
|
|
|
linesReceived.Inc()
|
2019-10-10 02:08:25 +00:00
|
|
|
l.eventHandler.queue(lineToEvents(line, l.logger))
|
2013-07-05 22:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
2017-08-01 10:21:00 +00:00
|
|
|
|
|
|
|
type StatsDTCPListener struct {
|
2019-05-26 13:08:54 +00:00
|
|
|
conn *net.TCPListener
|
|
|
|
eventHandler eventHandler
|
2019-10-10 02:08:25 +00:00
|
|
|
logger log.Logger
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
|
2019-05-26 13:08:54 +00:00
|
|
|
func (l *StatsDTCPListener) SetEventHandler(eh eventHandler) {
|
|
|
|
l.eventHandler = eh
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *StatsDTCPListener) Listen() {
|
2017-08-01 10:21:00 +00:00
|
|
|
for {
|
|
|
|
c, err := l.conn.AcceptTCP()
|
|
|
|
if err != nil {
|
2019-04-22 23:31:24 +00:00
|
|
|
// https://github.com/golang/go/issues/4373
|
|
|
|
// ignore net: errClosing error as it will occur during shutdown
|
|
|
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
|
|
|
return
|
|
|
|
}
|
2020-01-10 14:02:59 +00:00
|
|
|
level.Error(l.logger).Log("msg", "AcceptTCP failed", "error", err)
|
|
|
|
os.Exit(1)
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
2019-05-26 13:08:54 +00:00
|
|
|
go l.handleConn(c)
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-26 13:08:54 +00:00
|
|
|
func (l *StatsDTCPListener) handleConn(c *net.TCPConn) {
|
2017-08-01 10:21:00 +00:00
|
|
|
defer c.Close()
|
|
|
|
|
2017-11-10 19:23:54 +00:00
|
|
|
tcpConnections.Inc()
|
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
r := bufio.NewReader(c)
|
|
|
|
for {
|
|
|
|
line, isPrefix, err := r.ReadLine()
|
|
|
|
if err != nil {
|
|
|
|
if err != io.EOF {
|
2017-11-10 19:23:54 +00:00
|
|
|
tcpErrors.Inc()
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(l.logger).Log("msg", "Read failed", "addr", c.RemoteAddr(), "error", err)
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if isPrefix {
|
2017-11-10 19:23:54 +00:00
|
|
|
tcpLineTooLong.Inc()
|
2019-10-10 02:08:25 +00:00
|
|
|
level.Debug(l.logger).Log("msg", "Read failed: line too long", "addr", c.RemoteAddr())
|
2017-08-01 10:21:00 +00:00
|
|
|
break
|
|
|
|
}
|
2019-05-20 08:20:19 +00:00
|
|
|
linesReceived.Inc()
|
2019-10-10 02:08:25 +00:00
|
|
|
l.eventHandler.queue(lineToEvents(string(line), l.logger))
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
}
|
2019-04-05 23:37:23 +00:00
|
|
|
|
|
|
|
type StatsDUnixgramListener struct {
|
2019-05-26 13:08:54 +00:00
|
|
|
conn *net.UnixConn
|
|
|
|
eventHandler eventHandler
|
2019-10-10 02:08:25 +00:00
|
|
|
logger log.Logger
|
2019-04-05 23:37:23 +00:00
|
|
|
}
|
|
|
|
|
2019-05-26 13:08:54 +00:00
|
|
|
func (l *StatsDUnixgramListener) SetEventHandler(eh eventHandler) {
|
|
|
|
l.eventHandler = eh
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *StatsDUnixgramListener) Listen() {
|
2019-04-05 23:37:23 +00:00
|
|
|
buf := make([]byte, 65535)
|
|
|
|
for {
|
|
|
|
n, _, err := l.conn.ReadFromUnix(buf)
|
|
|
|
if err != nil {
|
2019-04-22 23:31:24 +00:00
|
|
|
// https://github.com/golang/go/issues/4373
|
|
|
|
// ignore net: errClosing error as it will occur during shutdown
|
|
|
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
|
|
|
return
|
|
|
|
}
|
2019-10-13 19:59:54 +00:00
|
|
|
level.Error(l.logger).Log(err)
|
|
|
|
os.Exit(1)
|
2019-04-05 23:37:23 +00:00
|
|
|
}
|
2019-05-26 13:08:54 +00:00
|
|
|
l.handlePacket(buf[:n])
|
2019-04-05 23:37:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-26 13:08:54 +00:00
|
|
|
func (l *StatsDUnixgramListener) handlePacket(packet []byte) {
|
2019-04-05 23:37:23 +00:00
|
|
|
unixgramPackets.Inc()
|
|
|
|
lines := strings.Split(string(packet), "\n")
|
|
|
|
for _, line := range lines {
|
2019-05-20 08:20:19 +00:00
|
|
|
linesReceived.Inc()
|
2019-10-10 02:08:25 +00:00
|
|
|
l.eventHandler.queue(lineToEvents(string(line), l.logger))
|
2019-04-05 23:37:23 +00:00
|
|
|
}
|
|
|
|
}
|