2016-03-30 01:55:14 +00:00
|
|
|
// Copyright 2013 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2017-08-01 10:21:00 +00:00
|
|
|
"fmt"
|
|
|
|
"net"
|
2016-03-30 01:55:14 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2017-08-15 15:51:13 +00:00
|
|
|
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2018-11-27 12:41:04 +00:00
|
|
|
dto "github.com/prometheus/client_model/go"
|
2018-08-10 12:28:38 +00:00
|
|
|
|
2018-12-19 05:21:43 +00:00
|
|
|
"github.com/prometheus/statsd_exporter/pkg/clock"
|
2018-08-14 09:20:00 +00:00
|
|
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
2016-03-30 01:55:14 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// TestNegativeCounter validates when we send a negative
|
|
|
|
// number to a counter that we no longer panic the Exporter Listener.
|
|
|
|
func TestNegativeCounter(t *testing.T) {
|
|
|
|
defer func() {
|
|
|
|
if e := recover(); e != nil {
|
|
|
|
err := e.(error)
|
|
|
|
if err.Error() == "counter cannot decrease in value" {
|
|
|
|
t.Fatalf("Counter was negative and causes a panic.")
|
|
|
|
} else {
|
|
|
|
t.Fatalf("Unknown panic and error: %q", err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2019-01-02 08:54:28 +00:00
|
|
|
events := make(chan Events)
|
2016-03-30 01:55:14 +00:00
|
|
|
go func() {
|
2018-12-19 05:21:43 +00:00
|
|
|
c := Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "foo",
|
|
|
|
value: -1,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
events <- c
|
2016-03-30 01:55:14 +00:00
|
|
|
close(events)
|
|
|
|
}()
|
|
|
|
|
2019-03-25 23:44:17 +00:00
|
|
|
errorCounter := errorEventStats.WithLabelValues("illegal_negative_counter")
|
|
|
|
prev := getTelemetryCounterValue(errorCounter)
|
|
|
|
|
2019-04-03 15:36:34 +00:00
|
|
|
testMapper := mapper.MetricMapper{}
|
|
|
|
testMapper.InitCache(0)
|
|
|
|
|
|
|
|
ex := NewExporter(&testMapper)
|
2016-03-30 01:55:14 +00:00
|
|
|
ex.Listen(events)
|
2019-03-25 23:44:17 +00:00
|
|
|
|
|
|
|
updated := getTelemetryCounterValue(errorCounter)
|
|
|
|
if updated-prev != 1 {
|
2019-03-26 00:02:38 +00:00
|
|
|
t.Fatal("Illegal negative counter error not counted")
|
2019-03-25 23:44:17 +00:00
|
|
|
}
|
2016-03-30 01:55:14 +00:00
|
|
|
}
|
2016-07-15 14:05:47 +00:00
|
|
|
|
2019-03-28 21:51:12 +00:00
|
|
|
// TestInconsistentLabelSets validates that the exporter will register
|
|
|
|
// and record metrics with the same metric name but inconsistent label
|
|
|
|
// sets e.g foo{a="1"} and foo{b="1"}
|
|
|
|
func TestInconsistentLabelSets(t *testing.T) {
|
|
|
|
firstLabelSet := make(map[string]string)
|
|
|
|
secondLabelSet := make(map[string]string)
|
|
|
|
metricNames := [4]string{"counter_test", "gauge_test", "histogram_test", "summary_test"}
|
|
|
|
|
|
|
|
firstLabelSet["foo"] = "1"
|
|
|
|
secondLabelSet["foo"] = "1"
|
|
|
|
secondLabelSet["bar"] = "2"
|
|
|
|
|
|
|
|
events := make(chan Events)
|
|
|
|
go func() {
|
|
|
|
c := Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "counter_test",
|
|
|
|
value: 1,
|
|
|
|
labels: firstLabelSet,
|
|
|
|
},
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "counter_test",
|
|
|
|
value: 1,
|
|
|
|
labels: secondLabelSet,
|
|
|
|
},
|
|
|
|
&GaugeEvent{
|
|
|
|
metricName: "gauge_test",
|
|
|
|
value: 1,
|
|
|
|
labels: firstLabelSet,
|
|
|
|
},
|
|
|
|
&GaugeEvent{
|
|
|
|
metricName: "gauge_test",
|
|
|
|
value: 1,
|
|
|
|
labels: secondLabelSet,
|
|
|
|
},
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: "histogram.test",
|
|
|
|
value: 1,
|
|
|
|
labels: firstLabelSet,
|
|
|
|
},
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: "histogram.test",
|
|
|
|
value: 1,
|
|
|
|
labels: secondLabelSet,
|
|
|
|
},
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: "summary_test",
|
|
|
|
value: 1,
|
|
|
|
labels: firstLabelSet,
|
|
|
|
},
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: "summary_test",
|
|
|
|
value: 1,
|
|
|
|
labels: secondLabelSet,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
events <- c
|
|
|
|
close(events)
|
|
|
|
}()
|
|
|
|
|
|
|
|
config := `
|
|
|
|
mappings:
|
|
|
|
- match: histogram.test
|
|
|
|
timer_type: histogram
|
|
|
|
name: "histogram_test"
|
|
|
|
`
|
|
|
|
testMapper := &mapper.MetricMapper{}
|
2019-04-03 15:36:34 +00:00
|
|
|
err := testMapper.InitFromYAMLString(config, 0)
|
2019-03-28 21:51:12 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Config load error: %s %s", config, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ex := NewExporter(testMapper)
|
|
|
|
ex.Listen(events)
|
|
|
|
|
|
|
|
metrics, err := prometheus.DefaultGatherer.Gather()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Cannot gather from DefaultGatherer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, metricName := range metricNames {
|
|
|
|
firstMetric := getFloat64(metrics, metricName, firstLabelSet)
|
|
|
|
secondMetric := getFloat64(metrics, metricName, secondLabelSet)
|
|
|
|
|
|
|
|
if firstMetric == nil {
|
|
|
|
t.Fatalf("Could not find time series with first label set for metric: %s", metricName)
|
|
|
|
}
|
|
|
|
if secondMetric == nil {
|
|
|
|
t.Fatalf("Could not find time series with second label set for metric: %s", metricName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-14 20:04:07 +00:00
|
|
|
// TestConflictingMetrics validates that the exporter will not register metrics
|
|
|
|
// of different types that have overlapping names.
|
|
|
|
func TestConflictingMetrics(t *testing.T) {
|
|
|
|
scenarios := []struct {
|
|
|
|
name string
|
2019-05-15 10:45:16 +00:00
|
|
|
expected []float64
|
2019-05-14 20:04:07 +00:00
|
|
|
in Events
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "counter vs gauge",
|
2019-05-15 10:45:16 +00:00
|
|
|
expected: []float64{1},
|
2019-05-14 20:04:07 +00:00
|
|
|
in: Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "cvg_test",
|
|
|
|
value: 1,
|
|
|
|
},
|
|
|
|
&GaugeEvent{
|
|
|
|
metricName: "cvg_test",
|
|
|
|
value: 2,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-05-15 10:45:16 +00:00
|
|
|
{
|
|
|
|
name: "counter vs gauge with different labels",
|
|
|
|
expected: []float64{1, 2},
|
|
|
|
in: Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "cvgl_test",
|
|
|
|
value: 1,
|
|
|
|
labels: map[string]string{"tag": "1"},
|
|
|
|
},
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "cvgl_test",
|
|
|
|
value: 2,
|
|
|
|
labels: map[string]string{"tag": "2"},
|
|
|
|
},
|
|
|
|
&GaugeEvent{
|
|
|
|
metricName: "cvgl_test",
|
|
|
|
value: 3,
|
|
|
|
labels: map[string]string{"tag": "1"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "counter vs gauge with same labels",
|
|
|
|
expected: []float64{3},
|
|
|
|
in: Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "cvgsl_test",
|
|
|
|
value: 1,
|
|
|
|
labels: map[string]string{"tag": "1"},
|
|
|
|
},
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "cvgsl_test",
|
|
|
|
value: 2,
|
|
|
|
labels: map[string]string{"tag": "1"},
|
|
|
|
},
|
|
|
|
&GaugeEvent{
|
|
|
|
metricName: "cvgsl_test",
|
|
|
|
value: 3,
|
|
|
|
labels: map[string]string{"tag": "1"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-05-14 20:04:07 +00:00
|
|
|
{
|
|
|
|
name: "gauge vs counter",
|
2019-05-15 10:45:16 +00:00
|
|
|
expected: []float64{2},
|
2019-05-14 20:04:07 +00:00
|
|
|
in: Events{
|
|
|
|
&GaugeEvent{
|
|
|
|
metricName: "gvc_test",
|
|
|
|
value: 2,
|
|
|
|
},
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "gvc_test",
|
|
|
|
value: 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-05-15 13:01:35 +00:00
|
|
|
{
|
|
|
|
name: "counter vs histogram",
|
|
|
|
expected: []float64{1},
|
|
|
|
in: Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "histogram_test1",
|
|
|
|
value: 1,
|
|
|
|
},
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: "histogram.test1",
|
|
|
|
value: 2,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-05-14 20:04:07 +00:00
|
|
|
{
|
|
|
|
name: "counter vs histogram sum",
|
2019-05-15 10:45:16 +00:00
|
|
|
expected: []float64{1},
|
2019-05-14 20:04:07 +00:00
|
|
|
in: Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "histogram_test1_sum",
|
|
|
|
value: 1,
|
|
|
|
},
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: "histogram.test1",
|
|
|
|
value: 2,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "counter vs histogram count",
|
2019-05-15 10:45:16 +00:00
|
|
|
expected: []float64{1},
|
2019-05-14 20:04:07 +00:00
|
|
|
in: Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "histogram_test2_count",
|
|
|
|
value: 1,
|
|
|
|
},
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: "histogram.test2",
|
|
|
|
value: 2,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "counter vs histogram bucket",
|
2019-05-15 10:45:16 +00:00
|
|
|
expected: []float64{1},
|
2019-05-14 20:04:07 +00:00
|
|
|
in: Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "histogram_test3_bucket",
|
|
|
|
value: 1,
|
|
|
|
},
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: "histogram.test3",
|
|
|
|
value: 2,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "counter vs summary quantile",
|
2019-05-15 10:45:16 +00:00
|
|
|
expected: []float64{1},
|
2019-05-14 20:04:07 +00:00
|
|
|
in: Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "cvsq_test",
|
|
|
|
value: 1,
|
|
|
|
},
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: "cvsq_test",
|
|
|
|
value: 2,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "counter vs summary count",
|
2019-05-15 10:45:16 +00:00
|
|
|
expected: []float64{1},
|
2019-05-14 20:04:07 +00:00
|
|
|
in: Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "cvsc_count",
|
|
|
|
value: 1,
|
|
|
|
},
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: "cvsc",
|
|
|
|
value: 2,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "counter vs summary sum",
|
2019-05-15 10:45:16 +00:00
|
|
|
expected: []float64{1},
|
2019-05-14 20:04:07 +00:00
|
|
|
in: Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "cvss_sum",
|
|
|
|
value: 1,
|
|
|
|
},
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: "cvss",
|
|
|
|
value: 2,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
config := `
|
|
|
|
mappings:
|
|
|
|
- match: histogram.*
|
|
|
|
timer_type: histogram
|
|
|
|
name: "histogram_${1}"
|
|
|
|
`
|
|
|
|
for _, s := range scenarios {
|
|
|
|
t.Run(s.name, func(t *testing.T) {
|
|
|
|
testMapper := &mapper.MetricMapper{}
|
2019-05-15 13:17:17 +00:00
|
|
|
err := testMapper.InitFromYAMLString(config, 0)
|
2019-05-14 20:04:07 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Config load error: %s %s", config, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
events := make(chan Events)
|
|
|
|
go func() {
|
|
|
|
events <- s.in
|
|
|
|
close(events)
|
|
|
|
}()
|
|
|
|
ex := NewExporter(testMapper)
|
|
|
|
ex.Listen(events)
|
|
|
|
|
|
|
|
metrics, err := prometheus.DefaultGatherer.Gather()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Cannot gather from DefaultGatherer: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-05-15 10:45:16 +00:00
|
|
|
for i, e := range s.expected {
|
|
|
|
mn := s.in[i].MetricName()
|
|
|
|
m := getFloat64(metrics, mn, s.in[i].Labels())
|
2019-05-14 20:04:07 +00:00
|
|
|
|
2019-05-15 10:45:16 +00:00
|
|
|
if m == nil {
|
|
|
|
t.Fatalf("Could not find time series with metric name '%v'", mn)
|
|
|
|
}
|
2019-05-14 20:04:07 +00:00
|
|
|
|
2019-05-15 10:45:16 +00:00
|
|
|
if *m != e {
|
|
|
|
t.Fatalf("Expected to get %v, but got %v instead", e, *m)
|
|
|
|
}
|
2019-05-14 20:04:07 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-23 18:14:08 +00:00
|
|
|
// TestEmptyStringMetric validates when a metric name ends up
|
|
|
|
// being the empty string after applying the match replacements
|
|
|
|
// tha we don't panic the Exporter Listener.
|
|
|
|
func TestEmptyStringMetric(t *testing.T) {
|
|
|
|
events := make(chan Events)
|
|
|
|
go func() {
|
|
|
|
c := Events{
|
|
|
|
&CounterEvent{
|
|
|
|
metricName: "foo_bar",
|
|
|
|
value: 1,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
events <- c
|
|
|
|
close(events)
|
|
|
|
}()
|
|
|
|
|
|
|
|
config := `
|
|
|
|
mappings:
|
|
|
|
- match: .*_bar
|
|
|
|
match_type: regex
|
|
|
|
name: "${1}"
|
|
|
|
`
|
|
|
|
testMapper := &mapper.MetricMapper{}
|
2019-04-03 15:36:34 +00:00
|
|
|
err := testMapper.InitFromYAMLString(config, 0)
|
2019-03-23 18:14:08 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Config load error: %s %s", config, err)
|
|
|
|
}
|
|
|
|
|
2019-03-25 23:44:17 +00:00
|
|
|
errorCounter := errorEventStats.WithLabelValues("empty_metric_name")
|
|
|
|
prev := getTelemetryCounterValue(errorCounter)
|
|
|
|
|
2019-03-23 18:14:08 +00:00
|
|
|
ex := NewExporter(testMapper)
|
|
|
|
ex.Listen(events)
|
2019-03-25 23:44:17 +00:00
|
|
|
|
|
|
|
updated := getTelemetryCounterValue(errorCounter)
|
|
|
|
if updated-prev != 1 {
|
|
|
|
t.Fatal("Empty metric name error event not counted")
|
|
|
|
}
|
2019-03-23 18:14:08 +00:00
|
|
|
}
|
|
|
|
|
2016-07-15 14:05:47 +00:00
|
|
|
// TestInvalidUtf8InDatadogTagValue validates robustness of exporter listener
|
|
|
|
// against datadog tags with invalid tag values.
|
|
|
|
// It sends the same tags first with a valid value, then with an invalid one.
|
|
|
|
// The exporter should not panic, but drop the invalid event
|
|
|
|
func TestInvalidUtf8InDatadogTagValue(t *testing.T) {
|
2018-12-19 05:21:43 +00:00
|
|
|
defer func() {
|
|
|
|
if e := recover(); e != nil {
|
|
|
|
err := e.(error)
|
|
|
|
t.Fatalf("Exporter listener should not panic on bad utf8: %q", err.Error())
|
|
|
|
}
|
|
|
|
}()
|
2016-07-15 14:05:47 +00:00
|
|
|
|
2019-01-02 08:54:28 +00:00
|
|
|
events := make(chan Events)
|
2016-07-15 14:05:47 +00:00
|
|
|
|
2018-12-19 05:21:43 +00:00
|
|
|
go func() {
|
|
|
|
for _, l := range []statsDPacketHandler{&StatsDUDPListener{}, &mockStatsDTCPListener{}} {
|
|
|
|
l.handlePacket([]byte("bar:200|c|#tag:value\nbar:200|c|#tag:\xc3\x28invalid"), events)
|
|
|
|
}
|
|
|
|
close(events)
|
|
|
|
}()
|
2017-08-01 10:21:00 +00:00
|
|
|
|
2019-04-03 15:36:34 +00:00
|
|
|
testMapper := mapper.MetricMapper{}
|
|
|
|
testMapper.InitCache(0)
|
|
|
|
|
|
|
|
ex := NewExporter(&testMapper)
|
2018-12-19 05:21:43 +00:00
|
|
|
ex.Listen(events)
|
2017-08-01 10:21:00 +00:00
|
|
|
}
|
|
|
|
|
2019-05-14 00:14:42 +00:00
|
|
|
// In the case of someone starting the statsd exporter with no mapping file specified
|
|
|
|
// which is valid, we want to make sure that the default quantile metrics are generated
|
|
|
|
// as well as the sum/count metrics
|
|
|
|
func TestSummaryWithQuantilesEmptyMapping(t *testing.T) {
|
|
|
|
// Start exporter with a synchronous channel
|
|
|
|
events := make(chan Events)
|
|
|
|
go func() {
|
2019-05-15 13:17:17 +00:00
|
|
|
testMapper := mapper.MetricMapper{}
|
|
|
|
testMapper.InitCache(0)
|
|
|
|
|
|
|
|
ex := NewExporter(&testMapper)
|
2019-05-14 00:14:42 +00:00
|
|
|
ex.Listen(events)
|
|
|
|
}()
|
|
|
|
|
|
|
|
name := "default_foo"
|
|
|
|
c := Events{
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: name,
|
|
|
|
value: 300,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
events <- c
|
|
|
|
events <- Events{}
|
|
|
|
close(events)
|
|
|
|
|
|
|
|
metrics, err := prometheus.DefaultGatherer.Gather()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal("Gather should not fail")
|
|
|
|
}
|
|
|
|
|
|
|
|
var metricFamily *dto.MetricFamily
|
|
|
|
for _, m := range metrics {
|
|
|
|
if *m.Name == name {
|
|
|
|
metricFamily = m
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if metricFamily == nil {
|
|
|
|
t.Fatal("Metric could not be found")
|
|
|
|
}
|
|
|
|
|
|
|
|
quantiles := metricFamily.Metric[0].Summary.Quantile
|
|
|
|
if len(quantiles) == 0 {
|
|
|
|
t.Fatal("Summary has no quantiles available")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-14 22:20:45 +00:00
|
|
|
func TestHistogramUnits(t *testing.T) {
|
2018-12-19 05:21:43 +00:00
|
|
|
// Start exporter with a synchronous channel
|
2019-01-02 08:54:28 +00:00
|
|
|
events := make(chan Events)
|
2018-12-19 05:21:43 +00:00
|
|
|
go func() {
|
2019-04-03 15:36:34 +00:00
|
|
|
testMapper := mapper.MetricMapper{}
|
|
|
|
testMapper.InitCache(0)
|
|
|
|
ex := NewExporter(&testMapper)
|
2018-12-19 05:21:43 +00:00
|
|
|
ex.mapper.Defaults.TimerType = mapper.TimerTypeHistogram
|
|
|
|
ex.Listen(events)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Synchronously send a statsd event to wait for handleEvent execution.
|
|
|
|
// Then close events channel to stop a listener.
|
2017-08-15 15:51:13 +00:00
|
|
|
name := "foo"
|
2017-08-14 22:20:45 +00:00
|
|
|
c := Events{
|
|
|
|
&TimerEvent{
|
2017-08-15 15:51:13 +00:00
|
|
|
metricName: name,
|
2017-08-14 22:20:45 +00:00
|
|
|
value: 300,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
events <- c
|
2018-12-19 05:21:43 +00:00
|
|
|
events <- Events{}
|
|
|
|
close(events)
|
2018-11-27 12:41:04 +00:00
|
|
|
|
2018-12-19 05:21:43 +00:00
|
|
|
// Check histogram value
|
2018-12-13 13:53:40 +00:00
|
|
|
metrics, err := prometheus.DefaultGatherer.Gather()
|
2018-11-27 12:41:04 +00:00
|
|
|
if err != nil {
|
2018-12-13 13:53:40 +00:00
|
|
|
t.Fatalf("Cannot gather from DefaultGatherer: %v", err)
|
2018-11-27 12:41:04 +00:00
|
|
|
}
|
2018-12-13 13:53:40 +00:00
|
|
|
value := getFloat64(metrics, name, prometheus.Labels{})
|
2018-12-19 05:21:43 +00:00
|
|
|
if value == nil {
|
|
|
|
t.Fatal("Histogram value should not be nil")
|
|
|
|
}
|
2018-12-13 13:53:40 +00:00
|
|
|
if *value == 300 {
|
2017-08-14 22:20:45 +00:00
|
|
|
t.Fatalf("Histogram observations not scaled into Seconds")
|
2018-12-13 13:53:40 +00:00
|
|
|
} else if *value != .300 {
|
|
|
|
t.Fatalf("Received unexpected value for histogram observation %f != .300", *value)
|
2017-08-14 22:20:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
type statsDPacketHandler interface {
|
|
|
|
handlePacket(packet []byte, e chan<- Events)
|
|
|
|
}
|
|
|
|
|
|
|
|
type mockStatsDTCPListener struct {
|
|
|
|
StatsDTCPListener
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ml *mockStatsDTCPListener) handlePacket(packet []byte, e chan<- Events) {
|
2017-11-10 14:04:28 +00:00
|
|
|
// Forcing IPv4 because the TravisCI build environment does not have IPv6
|
|
|
|
// addresses.
|
|
|
|
lc, err := net.ListenTCP("tcp4", nil)
|
2017-08-01 10:21:00 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Sprintf("mockStatsDTCPListener: listen failed: %v", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
defer lc.Close()
|
2016-07-15 14:05:47 +00:00
|
|
|
|
|
|
|
go func() {
|
2017-08-01 10:21:00 +00:00
|
|
|
cc, err := net.DialTCP("tcp", nil, lc.Addr().(*net.TCPAddr))
|
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Sprintf("mockStatsDTCPListener: dial failed: %v", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
defer cc.Close()
|
|
|
|
|
|
|
|
n, err := cc.Write(packet)
|
|
|
|
if err != nil || n != len(packet) {
|
|
|
|
panic(fmt.Sprintf("mockStatsDTCPListener: write failed: %v,%d", err, n))
|
|
|
|
}
|
2016-07-15 14:05:47 +00:00
|
|
|
}()
|
|
|
|
|
2017-08-01 10:21:00 +00:00
|
|
|
sc, err := lc.AcceptTCP()
|
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Sprintf("mockStatsDTCPListener: accept failed: %v", err))
|
|
|
|
}
|
|
|
|
ml.handleConn(sc, e)
|
2016-07-15 14:05:47 +00:00
|
|
|
}
|
2017-11-10 20:34:10 +00:00
|
|
|
|
|
|
|
func TestEscapeMetricName(t *testing.T) {
|
|
|
|
scenarios := map[string]string{
|
|
|
|
"clean": "clean",
|
|
|
|
"0starts_with_digit": "_0starts_with_digit",
|
|
|
|
"with_underscore": "with_underscore",
|
|
|
|
"with.dot": "with_dot",
|
|
|
|
"with😱emoji": "with_emoji",
|
|
|
|
"with.*.multiple": "with___multiple",
|
|
|
|
"test.web-server.foo.bar": "test_web_server_foo_bar",
|
2019-03-23 18:47:25 +00:00
|
|
|
"": "",
|
2017-11-10 20:34:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for in, want := range scenarios {
|
|
|
|
if got := escapeMetricName(in); want != got {
|
|
|
|
t.Errorf("expected `%s` to be escaped to `%s`, got `%s`", in, want, got)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-12-13 13:53:40 +00:00
|
|
|
|
|
|
|
// TestTtlExpiration validates expiration of time series.
|
|
|
|
// foobar metric without mapping should expire with default ttl of 1s
|
|
|
|
// bazqux metric should expire with ttl of 2s
|
|
|
|
func TestTtlExpiration(t *testing.T) {
|
2018-12-19 05:21:43 +00:00
|
|
|
// Mock a time.NewTicker
|
2019-01-02 08:54:28 +00:00
|
|
|
tickerCh := make(chan time.Time)
|
2018-12-19 05:21:43 +00:00
|
|
|
clock.ClockInstance = &clock.Clock{
|
|
|
|
TickerCh: tickerCh,
|
|
|
|
}
|
|
|
|
|
2018-12-13 13:53:40 +00:00
|
|
|
config := `
|
|
|
|
defaults:
|
|
|
|
ttl: 1s
|
|
|
|
mappings:
|
|
|
|
- match: bazqux.*
|
|
|
|
name: bazqux
|
|
|
|
ttl: 2s
|
|
|
|
`
|
2018-12-19 05:21:43 +00:00
|
|
|
// Create mapper from config and start an Exporter with a synchronous channel
|
2018-12-13 13:53:40 +00:00
|
|
|
testMapper := &mapper.MetricMapper{}
|
2019-04-03 15:36:34 +00:00
|
|
|
err := testMapper.InitFromYAMLString(config, 0)
|
2018-12-13 13:53:40 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Config load error: %s %s", config, err)
|
|
|
|
}
|
2019-01-02 08:54:28 +00:00
|
|
|
events := make(chan Events)
|
2018-12-19 05:21:43 +00:00
|
|
|
defer close(events)
|
|
|
|
go func() {
|
|
|
|
ex := NewExporter(testMapper)
|
|
|
|
ex.Listen(events)
|
|
|
|
}()
|
2018-12-13 13:53:40 +00:00
|
|
|
|
2018-12-19 05:21:43 +00:00
|
|
|
ev := Events{
|
|
|
|
// event with default ttl = 1s
|
|
|
|
&GaugeEvent{
|
|
|
|
metricName: "foobar",
|
|
|
|
value: 200,
|
|
|
|
},
|
|
|
|
// event with ttl = 2s from a mapping
|
|
|
|
&TimerEvent{
|
|
|
|
metricName: "bazqux.main",
|
2019-01-09 15:31:05 +00:00
|
|
|
value: 42000,
|
2018-12-19 05:21:43 +00:00
|
|
|
},
|
|
|
|
}
|
2018-12-13 13:53:40 +00:00
|
|
|
|
2018-12-19 05:21:43 +00:00
|
|
|
var metrics []*dto.MetricFamily
|
|
|
|
var foobarValue *float64
|
|
|
|
var bazquxValue *float64
|
2018-12-13 13:53:40 +00:00
|
|
|
|
2018-12-19 05:21:43 +00:00
|
|
|
// Step 1. Send events with statsd metrics.
|
|
|
|
// Send empty Events to wait for events are handled.
|
|
|
|
// saveLabelValues will use fake instant as a lastRegisteredAt time.
|
|
|
|
clock.ClockInstance.Instant = time.Unix(0, 0)
|
|
|
|
events <- ev
|
|
|
|
events <- Events{}
|
2018-12-13 13:53:40 +00:00
|
|
|
|
2018-12-19 05:21:43 +00:00
|
|
|
// Check values
|
|
|
|
metrics, err = prometheus.DefaultGatherer.Gather()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal("Gather should not fail")
|
|
|
|
}
|
|
|
|
foobarValue = getFloat64(metrics, "foobar", prometheus.Labels{})
|
|
|
|
bazquxValue = getFloat64(metrics, "bazqux", prometheus.Labels{})
|
|
|
|
if foobarValue == nil || bazquxValue == nil {
|
|
|
|
t.Fatalf("Gauge `foobar` and Summary `bazqux` should be gathered")
|
|
|
|
}
|
|
|
|
if *foobarValue != 200 {
|
|
|
|
t.Fatalf("Gauge `foobar` observation %f is not expected. Should be 200", *foobarValue)
|
|
|
|
}
|
|
|
|
if *bazquxValue != 42 {
|
|
|
|
t.Fatalf("Summary `bazqux` observation %f is not expected. Should be 42", *bazquxValue)
|
|
|
|
}
|
2018-12-13 13:53:40 +00:00
|
|
|
|
2018-12-19 05:21:43 +00:00
|
|
|
// Step 2. Increase Instant to emulate metrics expiration after 1s
|
|
|
|
clock.ClockInstance.Instant = time.Unix(1, 10)
|
|
|
|
clock.ClockInstance.TickerCh <- time.Unix(0, 0)
|
|
|
|
events <- Events{}
|
2018-12-13 13:53:40 +00:00
|
|
|
|
2018-12-19 05:21:43 +00:00
|
|
|
// Check values
|
|
|
|
metrics, err = prometheus.DefaultGatherer.Gather()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal("Gather should not fail")
|
|
|
|
}
|
|
|
|
foobarValue = getFloat64(metrics, "foobar", prometheus.Labels{})
|
|
|
|
bazquxValue = getFloat64(metrics, "bazqux", prometheus.Labels{})
|
|
|
|
if foobarValue != nil {
|
|
|
|
t.Fatalf("Gauge `foobar` should be expired")
|
|
|
|
}
|
|
|
|
if bazquxValue == nil {
|
|
|
|
t.Fatalf("Summary `bazqux` should be gathered")
|
|
|
|
}
|
|
|
|
if *bazquxValue != 42 {
|
|
|
|
t.Fatalf("Summary `bazqux` observation %f is not expected. Should be 42", *bazquxValue)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Step 3. Increase Instant to emulate metrics expiration after 2s
|
|
|
|
clock.ClockInstance.Instant = time.Unix(2, 200)
|
|
|
|
clock.ClockInstance.TickerCh <- time.Unix(0, 0)
|
|
|
|
events <- Events{}
|
|
|
|
|
|
|
|
// Check values
|
|
|
|
metrics, err = prometheus.DefaultGatherer.Gather()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal("Gather should not fail")
|
|
|
|
}
|
|
|
|
foobarValue = getFloat64(metrics, "foobar", prometheus.Labels{})
|
|
|
|
bazquxValue = getFloat64(metrics, "bazqux", prometheus.Labels{})
|
|
|
|
if bazquxValue != nil {
|
|
|
|
t.Fatalf("Summary `bazqux` should be expired")
|
|
|
|
}
|
|
|
|
if foobarValue != nil {
|
|
|
|
t.Fatalf("Gauge `foobar` should not be gathered after expiration")
|
2018-12-13 13:53:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getFloat64 search for metric by name in array of MetricFamily and then search a value by labels.
|
|
|
|
// Method returns a value or nil if metric is not found.
|
|
|
|
func getFloat64(metrics []*dto.MetricFamily, name string, labels prometheus.Labels) *float64 {
|
|
|
|
var metricFamily *dto.MetricFamily
|
|
|
|
for _, m := range metrics {
|
|
|
|
if *m.Name == name {
|
|
|
|
metricFamily = m
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if metricFamily == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var metric *dto.Metric
|
|
|
|
labelsHash := hashNameAndLabels(name, labels)
|
|
|
|
for _, m := range metricFamily.Metric {
|
|
|
|
h := hashNameAndLabels(name, labelPairsAsLabels(m.GetLabel()))
|
|
|
|
if h == labelsHash {
|
|
|
|
metric = m
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if metric == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var value float64
|
|
|
|
if metric.Gauge != nil {
|
|
|
|
value = metric.Gauge.GetValue()
|
|
|
|
return &value
|
|
|
|
}
|
|
|
|
if metric.Counter != nil {
|
|
|
|
value = metric.Counter.GetValue()
|
|
|
|
return &value
|
|
|
|
}
|
|
|
|
if metric.Histogram != nil {
|
|
|
|
value = metric.Histogram.GetSampleSum()
|
|
|
|
return &value
|
|
|
|
}
|
|
|
|
if metric.Summary != nil {
|
|
|
|
value = metric.Summary.GetSampleSum()
|
|
|
|
return &value
|
|
|
|
}
|
|
|
|
if metric.Untyped != nil {
|
|
|
|
value = metric.Untyped.GetValue()
|
|
|
|
return &value
|
|
|
|
}
|
|
|
|
panic(fmt.Errorf("collected a non-gauge/counter/histogram/summary/untyped metric: %s", metric))
|
|
|
|
}
|
|
|
|
|
|
|
|
func labelPairsAsLabels(pairs []*dto.LabelPair) (labels prometheus.Labels) {
|
|
|
|
labels = prometheus.Labels{}
|
|
|
|
for _, pair := range pairs {
|
|
|
|
if pair.Name == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
value := ""
|
|
|
|
if pair.Value != nil {
|
|
|
|
value = *pair.Value
|
|
|
|
}
|
|
|
|
labels[*pair.Name] = value
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2019-03-25 23:44:17 +00:00
|
|
|
|
|
|
|
func getTelemetryCounterValue(counter prometheus.Counter) float64 {
|
|
|
|
var metric dto.Metric
|
|
|
|
err := counter.Write(&metric)
|
|
|
|
if err != nil {
|
|
|
|
return 0.0
|
|
|
|
}
|
|
|
|
return metric.Counter.GetValue()
|
|
|
|
}
|
2019-04-09 15:56:58 +00:00
|
|
|
|
|
|
|
func BenchmarkEscapeMetricName(b *testing.B) {
|
|
|
|
scenarios := []string{
|
|
|
|
"clean",
|
|
|
|
"0starts_with_digit",
|
|
|
|
"with_underscore",
|
|
|
|
"with.dot",
|
|
|
|
"with😱emoji",
|
|
|
|
"with.*.multiple",
|
|
|
|
"test.web-server.foo.bar",
|
|
|
|
"",
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, s := range scenarios {
|
|
|
|
b.Run(s, func(b *testing.B) {
|
|
|
|
for n := 0; n < b.N; n++ {
|
|
|
|
escapeMetricName(s)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2019-05-13 15:03:33 +00:00
|
|
|
|
|
|
|
func BenchmarkParseDogStatsDTagsToLabels(b *testing.B) {
|
|
|
|
scenarios := map[string]string{
|
|
|
|
"1 tag w/hash": "#test:tag",
|
|
|
|
"1 tag w/o hash": "test:tag",
|
|
|
|
"2 tags, mixed hashes": "tag1:test,#tag2:test",
|
|
|
|
"3 long tags": "tag1:reallylongtagthisisreallylong,tag2:anotherreallylongtag,tag3:thisisyetanotherextraordinarilylongtag",
|
|
|
|
"a-z tags": "a:0,b:1,c:2,d:3,e:4,f:5,g:6,h:7,i:8,j:9,k:0,l:1,m:2,n:3,o:4,p:5,q:6,r:7,s:8,t:9,u:0,v:1,w:2,x:3,y:4,z:5",
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, tags := range scenarios {
|
|
|
|
b.Run(name, func(b *testing.B) {
|
|
|
|
for n := 0; n < b.N; n++ {
|
|
|
|
parseDogStatsDTagsToLabels(tags)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|