forked from mirrors/statsd_exporter
:Issue 234: Split into reusable packages.
Signed-off-by: Frank Davidson <frank_davidson@manulife.com> Signed-off-by: Frank Davidson <ffdavidson@gmail.com>
This commit is contained in:
parent
cb516fa69a
commit
d55b42eabb
602 changed files with 296670 additions and 294114 deletions
408
bridge_test.go
408
bridge_test.go
|
@ -18,277 +18,279 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/kit/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/event"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/listener"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHandlePacket(t *testing.T) {
|
func TestHandlePacket(t *testing.T) {
|
||||||
scenarios := []struct {
|
scenarios := []struct {
|
||||||
name string
|
name string
|
||||||
in string
|
in string
|
||||||
out Events
|
out event.Events
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "empty",
|
name: "empty",
|
||||||
}, {
|
}, {
|
||||||
name: "simple counter",
|
name: "simple counter",
|
||||||
in: "foo:2|c",
|
in: "foo:2|c",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 2,
|
CValue: 2,
|
||||||
labels: map[string]string{},
|
CLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "simple gauge",
|
name: "simple gauge",
|
||||||
in: "foo:3|g",
|
in: "foo:3|g",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&GaugeEvent{
|
&event.GaugeEvent{
|
||||||
metricName: "foo",
|
GMetricName: "foo",
|
||||||
value: 3,
|
GValue: 3,
|
||||||
labels: map[string]string{},
|
GLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "gauge with sampling",
|
name: "gauge with sampling",
|
||||||
in: "foo:3|g|@0.2",
|
in: "foo:3|g|@0.2",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&GaugeEvent{
|
&event.GaugeEvent{
|
||||||
metricName: "foo",
|
GMetricName: "foo",
|
||||||
value: 3,
|
GValue: 3,
|
||||||
labels: map[string]string{},
|
GLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "gauge decrement",
|
name: "gauge decrement",
|
||||||
in: "foo:-10|g",
|
in: "foo:-10|g",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&GaugeEvent{
|
&event.GaugeEvent{
|
||||||
metricName: "foo",
|
GMetricName: "foo",
|
||||||
value: -10,
|
GValue: -10,
|
||||||
relative: true,
|
GRelative: true,
|
||||||
labels: map[string]string{},
|
GLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "simple timer",
|
name: "simple timer",
|
||||||
in: "foo:200|ms",
|
in: "foo:200|ms",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 200,
|
TValue: 200,
|
||||||
labels: map[string]string{},
|
TLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "simple histogram",
|
name: "simple histogram",
|
||||||
in: "foo:200|h",
|
in: "foo:200|h",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 200,
|
TValue: 200,
|
||||||
labels: map[string]string{},
|
TLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "simple distribution",
|
name: "simple distribution",
|
||||||
in: "foo:200|d",
|
in: "foo:200|d",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 200,
|
TValue: 200,
|
||||||
labels: map[string]string{},
|
TLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "distribution with sampling",
|
name: "distribution with sampling",
|
||||||
in: "foo:0.01|d|@0.2|#tag1:bar,#tag2:baz",
|
in: "foo:0.01|d|@0.2|#tag1:bar,#tag2:baz",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 0.01,
|
TValue: 0.01,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
TLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 0.01,
|
TValue: 0.01,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
TLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 0.01,
|
TValue: 0.01,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
TLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 0.01,
|
TValue: 0.01,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
TLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 0.01,
|
TValue: 0.01,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
TLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "librato tag extension",
|
name: "librato tag extension",
|
||||||
in: "foo#tag1=bar,tag2=baz:100|c",
|
in: "foo#tag1=bar,tag2=baz:100|c",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
CLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "librato tag extension with tag keys unsupported by prometheus",
|
name: "librato tag extension with tag keys unsupported by prometheus",
|
||||||
in: "foo#09digits=0,tag.with.dots=1:100|c",
|
in: "foo#09digits=0,tag.with.dots=1:100|c",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{"_09digits": "0", "tag_with_dots": "1"},
|
CLabels: map[string]string{"_09digits": "0", "tag_with_dots": "1"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "influxdb tag extension",
|
name: "influxdb tag extension",
|
||||||
in: "foo,tag1=bar,tag2=baz:100|c",
|
in: "foo,tag1=bar,tag2=baz:100|c",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
CLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "influxdb tag extension with tag keys unsupported by prometheus",
|
name: "influxdb tag extension with tag keys unsupported by prometheus",
|
||||||
in: "foo,09digits=0,tag.with.dots=1:100|c",
|
in: "foo,09digits=0,tag.with.dots=1:100|c",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{"_09digits": "0", "tag_with_dots": "1"},
|
CLabels: map[string]string{"_09digits": "0", "tag_with_dots": "1"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "datadog tag extension",
|
name: "datadog tag extension",
|
||||||
in: "foo:100|c|#tag1:bar,tag2:baz",
|
in: "foo:100|c|#tag1:bar,tag2:baz",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
CLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "datadog tag extension with # in all keys (as sent by datadog php client)",
|
name: "datadog tag extension with # in all keys (as sent by datadog php client)",
|
||||||
in: "foo:100|c|#tag1:bar,#tag2:baz",
|
in: "foo:100|c|#tag1:bar,#tag2:baz",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
CLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "datadog tag extension with tag keys unsupported by prometheus",
|
name: "datadog tag extension with tag keys unsupported by prometheus",
|
||||||
in: "foo:100|c|#09digits:0,tag.with.dots:1",
|
in: "foo:100|c|#09digits:0,tag.with.dots:1",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{"_09digits": "0", "tag_with_dots": "1"},
|
CLabels: map[string]string{"_09digits": "0", "tag_with_dots": "1"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "datadog tag extension with valueless tags: ignored",
|
name: "datadog tag extension with valueless tags: ignored",
|
||||||
in: "foo:100|c|#tag_without_a_value",
|
in: "foo:100|c|#tag_without_a_value",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{},
|
CLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "datadog tag extension with valueless tags (edge case)",
|
name: "datadog tag extension with valueless tags (edge case)",
|
||||||
in: "foo:100|c|#tag_without_a_value,tag:value",
|
in: "foo:100|c|#tag_without_a_value,tag:value",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{"tag": "value"},
|
CLabels: map[string]string{"tag": "value"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "datadog tag extension with empty tags (edge case)",
|
name: "datadog tag extension with empty tags (edge case)",
|
||||||
in: "foo:100|c|#tag:value,,",
|
in: "foo:100|c|#tag:value,,",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{"tag": "value"},
|
CLabels: map[string]string{"tag": "value"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "datadog tag extension with sampling",
|
name: "datadog tag extension with sampling",
|
||||||
in: "foo:100|c|@0.1|#tag1:bar,#tag2:baz",
|
in: "foo:100|c|@0.1|#tag1:bar,#tag2:baz",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 1000,
|
CValue: 1000,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
CLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "librato/dogstatsd mixed tag styles without sampling",
|
name: "librato/dogstatsd mixed tag styles without sampling",
|
||||||
in: "foo#tag1=foo,tag3=bing:100|c|#tag1:bar,#tag2:baz",
|
in: "foo#tag1=foo,tag3=bing:100|c|#tag1:bar,#tag2:baz",
|
||||||
out: Events{},
|
out: event.Events{},
|
||||||
}, {
|
}, {
|
||||||
name: "influxdb/dogstatsd mixed tag styles without sampling",
|
name: "influxdb/dogstatsd mixed tag styles without sampling",
|
||||||
in: "foo,tag1=foo,tag3=bing:100|c|#tag1:bar,#tag2:baz",
|
in: "foo,tag1=foo,tag3=bing:100|c|#tag1:bar,#tag2:baz",
|
||||||
out: Events{},
|
out: event.Events{},
|
||||||
}, {
|
}, {
|
||||||
name: "mixed tag styles with sampling",
|
name: "mixed tag styles with sampling",
|
||||||
in: "foo#tag1=foo,tag3=bing:100|c|@0.1|#tag1:bar,#tag2:baz",
|
in: "foo#tag1=foo,tag3=bing:100|c|@0.1|#tag1:bar,#tag2:baz",
|
||||||
out: Events{},
|
out: event.Events{},
|
||||||
}, {
|
}, {
|
||||||
name: "histogram with sampling",
|
name: "histogram with sampling",
|
||||||
in: "foo:0.01|h|@0.2|#tag1:bar,#tag2:baz",
|
in: "foo:0.01|h|@0.2|#tag1:bar,#tag2:baz",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 0.01,
|
TValue: 0.01,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
TLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 0.01,
|
TValue: 0.01,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
TLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 0.01,
|
TValue: 0.01,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
TLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 0.01,
|
TValue: 0.01,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
TLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 0.01,
|
TValue: 0.01,
|
||||||
labels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
TLabels: map[string]string{"tag1": "bar", "tag2": "baz"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "datadog tag extension with multiple colons",
|
name: "datadog tag extension with multiple colons",
|
||||||
in: "foo:100|c|@0.1|#tag1:foo:bar",
|
in: "foo:100|c|@0.1|#tag1:foo:bar",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 1000,
|
CValue: 1000,
|
||||||
labels: map[string]string{"tag1": "foo:bar"},
|
CLabels: map[string]string{"tag1": "foo:bar"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
@ -300,62 +302,62 @@ func TestHandlePacket(t *testing.T) {
|
||||||
}, {
|
}, {
|
||||||
name: "multiple metrics with invalid datadog utf8 tag values",
|
name: "multiple metrics with invalid datadog utf8 tag values",
|
||||||
in: "foo:200|c|#tag:value\nfoo:300|c|#tag:\xc3\x28invalid",
|
in: "foo:200|c|#tag:value\nfoo:300|c|#tag:\xc3\x28invalid",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 200,
|
CValue: 200,
|
||||||
labels: map[string]string{"tag": "value"},
|
CLabels: map[string]string{"tag": "value"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "combined multiline metrics",
|
name: "combined multiline metrics",
|
||||||
in: "foo:200|ms:300|ms:5|c|@0.1:6|g\nbar:1|c:5|ms",
|
in: "foo:200|ms:300|ms:5|c|@0.1:6|g\nbar:1|c:5|ms",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 200,
|
TValue: 200,
|
||||||
labels: map[string]string{},
|
TLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "foo",
|
TMetricName: "foo",
|
||||||
value: 300,
|
TValue: 300,
|
||||||
labels: map[string]string{},
|
TLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 50,
|
CValue: 50,
|
||||||
labels: map[string]string{},
|
CLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
&GaugeEvent{
|
&event.GaugeEvent{
|
||||||
metricName: "foo",
|
GMetricName: "foo",
|
||||||
value: 6,
|
GValue: 6,
|
||||||
labels: map[string]string{},
|
GLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "bar",
|
CMetricName: "bar",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
labels: map[string]string{},
|
CLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "bar",
|
TMetricName: "bar",
|
||||||
value: 5,
|
TValue: 5,
|
||||||
labels: map[string]string{},
|
TLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "timings with sampling factor",
|
name: "timings with sampling factor",
|
||||||
in: "foo.timing:0.5|ms|@0.1",
|
in: "foo.timing:0.5|ms|@0.1",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}},
|
&event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}},
|
||||||
&TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}},
|
&event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}},
|
||||||
&TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}},
|
&event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}},
|
||||||
&TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}},
|
&event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}},
|
||||||
&TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}},
|
&event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}},
|
||||||
&TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}},
|
&event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}},
|
||||||
&TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}},
|
&event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}},
|
||||||
&TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}},
|
&event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}},
|
||||||
&TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}},
|
&event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}},
|
||||||
&TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}},
|
&event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "bad line",
|
name: "bad line",
|
||||||
|
@ -369,21 +371,21 @@ func TestHandlePacket(t *testing.T) {
|
||||||
}, {
|
}, {
|
||||||
name: "illegal sampling factor",
|
name: "illegal sampling factor",
|
||||||
in: "foo:1|c|@bar",
|
in: "foo:1|c|@bar",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
labels: map[string]string{},
|
CLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "zero sampling factor",
|
name: "zero sampling factor",
|
||||||
in: "foo:2|c|@0",
|
in: "foo:2|c|@0",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 2,
|
CValue: 2,
|
||||||
labels: map[string]string{},
|
CLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
@ -405,25 +407,25 @@ func TestHandlePacket(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "some invalid utf8",
|
name: "some invalid utf8",
|
||||||
in: "valid_utf8:1|c\ninvalid\xc3\x28utf8:1|c",
|
in: "valid_utf8:1|c\ninvalid\xc3\x28utf8:1|c",
|
||||||
out: Events{
|
out: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "valid_utf8",
|
CMetricName: "valid_utf8",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
labels: map[string]string{},
|
CLabels: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, l := range []statsDPacketHandler{&StatsDUDPListener{nil, nil, log.NewNopLogger()}, &mockStatsDTCPListener{StatsDTCPListener{nil, nil, log.NewNopLogger()}, log.NewNopLogger()}} {
|
for k, l := range []statsDPacketHandler{&listener.StatsDUDPListener{nil, nil, log.NewNopLogger()}, &mockStatsDTCPListener{listener.StatsDTCPListener{nil, nil, log.NewNopLogger()}, log.NewNopLogger()}} {
|
||||||
events := make(chan Events, 32)
|
events := make(chan event.Events, 32)
|
||||||
l.SetEventHandler(&unbufferedEventHandler{c: events})
|
l.SetEventHandler(&event.UnbufferedEventHandler{C: events})
|
||||||
for i, scenario := range scenarios {
|
for i, scenario := range scenarios {
|
||||||
l.handlePacket([]byte(scenario.in))
|
l.HandlePacket([]byte(scenario.in), udpPackets, linesReceived, eventsFlushed, *sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
|
|
||||||
le := len(events)
|
le := len(events)
|
||||||
// Flatten actual events.
|
// Flatten actual events.
|
||||||
actual := Events{}
|
actual := event.Events{}
|
||||||
for i := 0; i < le; i++ {
|
for i := 0; i < le; i++ {
|
||||||
actual = append(actual, <-events...)
|
actual = append(actual, <-events...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,15 +18,16 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/statsd_exporter/pkg/clock"
|
"github.com/prometheus/statsd_exporter/pkg/clock"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEventThresholdFlush(t *testing.T) {
|
func TestEventThresholdFlush(t *testing.T) {
|
||||||
c := make(chan Events, 100)
|
c := make(chan event.Events, 100)
|
||||||
// We're not going to flush during this test, so the duration doesn't matter.
|
// We're not going to flush during this test, so the duration doesn't matter.
|
||||||
eq := newEventQueue(c, 5, time.Second)
|
eq := event.NewEventQueue(c, 5, time.Second, eventsFlushed)
|
||||||
e := make(Events, 13)
|
e := make(event.Events, 13)
|
||||||
go func() {
|
go func() {
|
||||||
eq.queue(e)
|
eq.Queue(e, &eventsFlushed)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
batch := <-c
|
batch := <-c
|
||||||
|
@ -51,26 +52,26 @@ func TestEventIntervalFlush(t *testing.T) {
|
||||||
}
|
}
|
||||||
clock.ClockInstance.Instant = time.Unix(0, 0)
|
clock.ClockInstance.Instant = time.Unix(0, 0)
|
||||||
|
|
||||||
c := make(chan Events, 100)
|
c := make(chan event.Events, 100)
|
||||||
eq := newEventQueue(c, 1000, time.Second*1000)
|
eq := event.NewEventQueue(c, 1000, time.Second*1000, eventsFlushed)
|
||||||
e := make(Events, 10)
|
e := make(event.Events, 10)
|
||||||
eq.queue(e)
|
eq.Queue(e, &eventsFlushed)
|
||||||
|
|
||||||
if eq.len() != 10 {
|
if eq.Len() != 10 {
|
||||||
t.Fatal("Expected 10 events to be queued, but got", eq.len())
|
t.Fatal("Expected 10 events to be queued, but got", eq.Len())
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(eq.c) != 0 {
|
if len(eq.C) != 0 {
|
||||||
t.Fatal("Expected 0 events in the event channel, but got", len(eq.c))
|
t.Fatal("Expected 0 events in the event channel, but got", len(eq.C))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tick time forward to trigger a flush
|
// Tick time forward to trigger a flush
|
||||||
clock.ClockInstance.Instant = time.Unix(10000, 0)
|
clock.ClockInstance.Instant = time.Unix(10000, 0)
|
||||||
clock.ClockInstance.TickerCh <- time.Unix(10000, 0)
|
clock.ClockInstance.TickerCh <- time.Unix(10000, 0)
|
||||||
|
|
||||||
events := <-eq.c
|
events := <-eq.C
|
||||||
if eq.len() != 0 {
|
if eq.Len() != 0 {
|
||||||
t.Fatal("Expected 0 events to be queued, but got", eq.len())
|
t.Fatal("Expected 0 events to be queued, but got", eq.Len())
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(events) != 10 {
|
if len(events) != 10 {
|
||||||
|
|
|
@ -18,6 +18,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/kit/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/event"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/exporter"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/listener"
|
||||||
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -42,12 +45,12 @@ func benchmarkUDPListener(times int, b *testing.B) {
|
||||||
}
|
}
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
// there are more events than input lines, need bigger buffer
|
// there are more events than input lines, need bigger buffer
|
||||||
events := make(chan Events, len(bytesInput)*times*2)
|
events := make(chan event.Events, len(bytesInput)*times*2)
|
||||||
l := StatsDUDPListener{eventHandler: &unbufferedEventHandler{c: events}}
|
l := listener.StatsDUDPListener{EventHandler: &event.UnbufferedEventHandler{C: events}}
|
||||||
|
|
||||||
for i := 0; i < times; i++ {
|
for i := 0; i < times; i++ {
|
||||||
for _, line := range bytesInput {
|
for _, line := range bytesInput {
|
||||||
l.handlePacket([]byte(line))
|
l.HandlePacket([]byte(line), udpPackets, linesReceived, eventsFlushed, *sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -64,52 +67,52 @@ func BenchmarkUDPListener50(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkExporterListener(b *testing.B) {
|
func BenchmarkExporterListener(b *testing.B) {
|
||||||
events := Events{
|
events := event.Events{
|
||||||
&CounterEvent{ // simple counter
|
&event.CounterEvent{ // simple counter
|
||||||
metricName: "counter",
|
CMetricName: "counter",
|
||||||
value: 2,
|
CValue: 2,
|
||||||
},
|
},
|
||||||
&GaugeEvent{ // simple gauge
|
&event.GaugeEvent{ // simple gauge
|
||||||
metricName: "gauge",
|
GMetricName: "gauge",
|
||||||
value: 10,
|
GValue: 10,
|
||||||
},
|
},
|
||||||
&TimerEvent{ // simple timer
|
&event.TimerEvent{ // simple timer
|
||||||
metricName: "timer",
|
TMetricName: "timer",
|
||||||
value: 200,
|
TValue: 200,
|
||||||
},
|
},
|
||||||
&TimerEvent{ // simple histogram
|
&event.TimerEvent{ // simple histogram
|
||||||
metricName: "histogram.test",
|
TMetricName: "histogram.test",
|
||||||
value: 200,
|
TValue: 200,
|
||||||
},
|
},
|
||||||
&CounterEvent{ // simple_tags
|
&event.CounterEvent{ // simple_tags
|
||||||
metricName: "simple_tags",
|
CMetricName: "simple_tags",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{
|
CLabels: map[string]string{
|
||||||
"alpha": "bar",
|
"alpha": "bar",
|
||||||
"bravo": "baz",
|
"bravo": "baz",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&CounterEvent{ // slightly different tags
|
&event.CounterEvent{ // slightly different tags
|
||||||
metricName: "simple_tags",
|
CMetricName: "simple_tags",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{
|
CLabels: map[string]string{
|
||||||
"alpha": "bar",
|
"alpha": "bar",
|
||||||
"charlie": "baz",
|
"charlie": "baz",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&CounterEvent{ // and even more different tags
|
&event.CounterEvent{ // and even more different tags
|
||||||
metricName: "simple_tags",
|
CMetricName: "simple_tags",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{
|
CLabels: map[string]string{
|
||||||
"alpha": "bar",
|
"alpha": "bar",
|
||||||
"bravo": "baz",
|
"bravo": "baz",
|
||||||
"golf": "looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong",
|
"golf": "looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&CounterEvent{ // datadog tag extension with complex tags
|
&event.CounterEvent{ // datadog tag extension with complex tags
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: 100,
|
CValue: 100,
|
||||||
labels: map[string]string{
|
CLabels: map[string]string{
|
||||||
"action": "test",
|
"action": "test",
|
||||||
"application": "testapp",
|
"application": "testapp",
|
||||||
"application_component": "testcomp",
|
"application_component": "testcomp",
|
||||||
|
@ -139,9 +142,9 @@ mappings:
|
||||||
b.Fatalf("Config load error: %s %s", config, err)
|
b.Fatalf("Config load error: %s %s", config, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ex := NewExporter(testMapper, log.NewNopLogger())
|
ex := exporter.NewExporter(testMapper, log.NewNopLogger())
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
ec := make(chan Events, 1000)
|
ec := make(chan event.Events, 1000)
|
||||||
go func() {
|
go func() {
|
||||||
for i := 0; i < 1000; i++ {
|
for i := 0; i < 1000; i++ {
|
||||||
ec <- events
|
ec <- events
|
||||||
|
@ -149,6 +152,6 @@ mappings:
|
||||||
close(ec)
|
close(ec)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ex.Listen(ec)
|
ex.Listen(ec, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
447
exporter_test.go
447
exporter_test.go
|
@ -24,7 +24,12 @@ import (
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
"github.com/prometheus/statsd_exporter/pkg/clock"
|
"github.com/prometheus/statsd_exporter/pkg/clock"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/event"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/exporter"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/line"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/listener"
|
||||||
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/registry"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestNegativeCounter validates when we send a negative
|
// TestNegativeCounter validates when we send a negative
|
||||||
|
@ -41,12 +46,12 @@ func TestNegativeCounter(t *testing.T) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
events := make(chan Events)
|
events := make(chan event.Events)
|
||||||
go func() {
|
go func() {
|
||||||
c := Events{
|
c := event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo",
|
CMetricName: "foo",
|
||||||
value: -1,
|
CValue: -1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
events <- c
|
events <- c
|
||||||
|
@ -59,8 +64,8 @@ func TestNegativeCounter(t *testing.T) {
|
||||||
testMapper := mapper.MetricMapper{}
|
testMapper := mapper.MetricMapper{}
|
||||||
testMapper.InitCache(0)
|
testMapper.InitCache(0)
|
||||||
|
|
||||||
ex := NewExporter(&testMapper, log.NewNopLogger())
|
ex := exporter.NewExporter(&testMapper, log.NewNopLogger())
|
||||||
ex.Listen(events)
|
ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
|
|
||||||
updated := getTelemetryCounterValue(errorCounter)
|
updated := getTelemetryCounterValue(errorCounter)
|
||||||
if updated-prev != 1 {
|
if updated-prev != 1 {
|
||||||
|
@ -80,48 +85,48 @@ func TestInconsistentLabelSets(t *testing.T) {
|
||||||
secondLabelSet["foo"] = "1"
|
secondLabelSet["foo"] = "1"
|
||||||
secondLabelSet["bar"] = "2"
|
secondLabelSet["bar"] = "2"
|
||||||
|
|
||||||
events := make(chan Events)
|
events := make(chan event.Events)
|
||||||
go func() {
|
go func() {
|
||||||
c := Events{
|
c := event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "counter_test",
|
CMetricName: "counter_test",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
labels: firstLabelSet,
|
CLabels: firstLabelSet,
|
||||||
},
|
},
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "counter_test",
|
CMetricName: "counter_test",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
labels: secondLabelSet,
|
CLabels: secondLabelSet,
|
||||||
},
|
},
|
||||||
&GaugeEvent{
|
&event.GaugeEvent{
|
||||||
metricName: "gauge_test",
|
GMetricName: "gauge_test",
|
||||||
value: 1,
|
GValue: 1,
|
||||||
labels: firstLabelSet,
|
GLabels: firstLabelSet,
|
||||||
},
|
},
|
||||||
&GaugeEvent{
|
&event.GaugeEvent{
|
||||||
metricName: "gauge_test",
|
GMetricName: "gauge_test",
|
||||||
value: 1,
|
GValue: 1,
|
||||||
labels: secondLabelSet,
|
GLabels: secondLabelSet,
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "histogram.test",
|
TMetricName: "histogram.test",
|
||||||
value: 1,
|
TValue: 1,
|
||||||
labels: firstLabelSet,
|
TLabels: firstLabelSet,
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "histogram.test",
|
TMetricName: "histogram.test",
|
||||||
value: 1,
|
TValue: 1,
|
||||||
labels: secondLabelSet,
|
TLabels: secondLabelSet,
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "summary_test",
|
TMetricName: "summary_test",
|
||||||
value: 1,
|
TValue: 1,
|
||||||
labels: firstLabelSet,
|
TLabels: firstLabelSet,
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "summary_test",
|
TMetricName: "summary_test",
|
||||||
value: 1,
|
TValue: 1,
|
||||||
labels: secondLabelSet,
|
TLabels: secondLabelSet,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
events <- c
|
events <- c
|
||||||
|
@ -140,8 +145,8 @@ mappings:
|
||||||
t.Fatalf("Config load error: %s %s", config, err)
|
t.Fatalf("Config load error: %s %s", config, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ex := NewExporter(testMapper, log.NewNopLogger())
|
ex := exporter.NewExporter(testMapper, log.NewNopLogger())
|
||||||
ex.Listen(events)
|
ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
|
|
||||||
metrics, err := prometheus.DefaultGatherer.Gather()
|
metrics, err := prometheus.DefaultGatherer.Gather()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -166,18 +171,18 @@ mappings:
|
||||||
func TestLabelParsing(t *testing.T) {
|
func TestLabelParsing(t *testing.T) {
|
||||||
codes := [2]string{"200", "300"}
|
codes := [2]string{"200", "300"}
|
||||||
|
|
||||||
events := make(chan Events)
|
events := make(chan event.Events)
|
||||||
go func() {
|
go func() {
|
||||||
c := Events{
|
c := event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "counter.test.200",
|
CMetricName: "counter.test.200",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
labels: make(map[string]string),
|
CLabels: make(map[string]string),
|
||||||
},
|
},
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "counter.test.300",
|
CMetricName: "counter.test.300",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
labels: make(map[string]string),
|
CLabels: make(map[string]string),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
events <- c
|
events <- c
|
||||||
|
@ -198,8 +203,8 @@ mappings:
|
||||||
t.Fatalf("Config load error: %s %s", config, err)
|
t.Fatalf("Config load error: %s %s", config, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ex := NewExporter(testMapper, log.NewNopLogger())
|
ex := exporter.NewExporter(testMapper, log.NewNopLogger())
|
||||||
ex.Listen(events)
|
ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
|
|
||||||
metrics, err := prometheus.DefaultGatherer.Gather()
|
metrics, err := prometheus.DefaultGatherer.Gather()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -222,173 +227,173 @@ func TestConflictingMetrics(t *testing.T) {
|
||||||
scenarios := []struct {
|
scenarios := []struct {
|
||||||
name string
|
name string
|
||||||
expected []float64
|
expected []float64
|
||||||
in Events
|
in event.Events
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "counter vs gauge",
|
name: "counter vs gauge",
|
||||||
expected: []float64{1},
|
expected: []float64{1},
|
||||||
in: Events{
|
in: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "cvg_test",
|
CMetricName: "cvg_test",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
},
|
},
|
||||||
&GaugeEvent{
|
&event.GaugeEvent{
|
||||||
metricName: "cvg_test",
|
GMetricName: "cvg_test",
|
||||||
value: 2,
|
GValue: 2,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "counter vs gauge with different labels",
|
name: "counter vs gauge with different labels",
|
||||||
expected: []float64{1, 2},
|
expected: []float64{1, 2},
|
||||||
in: Events{
|
in: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "cvgl_test",
|
CMetricName: "cvgl_test",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
labels: map[string]string{"tag": "1"},
|
CLabels: map[string]string{"tag": "1"},
|
||||||
},
|
},
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "cvgl_test",
|
CMetricName: "cvgl_test",
|
||||||
value: 2,
|
CValue: 2,
|
||||||
labels: map[string]string{"tag": "2"},
|
CLabels: map[string]string{"tag": "2"},
|
||||||
},
|
},
|
||||||
&GaugeEvent{
|
&event.GaugeEvent{
|
||||||
metricName: "cvgl_test",
|
GMetricName: "cvgl_test",
|
||||||
value: 3,
|
GValue: 3,
|
||||||
labels: map[string]string{"tag": "1"},
|
GLabels: map[string]string{"tag": "1"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "counter vs gauge with same labels",
|
name: "counter vs gauge with same labels",
|
||||||
expected: []float64{3},
|
expected: []float64{3},
|
||||||
in: Events{
|
in: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "cvgsl_test",
|
CMetricName: "cvgsl_test",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
labels: map[string]string{"tag": "1"},
|
CLabels: map[string]string{"tag": "1"},
|
||||||
},
|
},
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "cvgsl_test",
|
CMetricName: "cvgsl_test",
|
||||||
value: 2,
|
CValue: 2,
|
||||||
labels: map[string]string{"tag": "1"},
|
CLabels: map[string]string{"tag": "1"},
|
||||||
},
|
},
|
||||||
&GaugeEvent{
|
&event.GaugeEvent{
|
||||||
metricName: "cvgsl_test",
|
GMetricName: "cvgsl_test",
|
||||||
value: 3,
|
GValue: 3,
|
||||||
labels: map[string]string{"tag": "1"},
|
GLabels: map[string]string{"tag": "1"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "gauge vs counter",
|
name: "gauge vs counter",
|
||||||
expected: []float64{2},
|
expected: []float64{2},
|
||||||
in: Events{
|
in: event.Events{
|
||||||
&GaugeEvent{
|
&event.GaugeEvent{
|
||||||
metricName: "gvc_test",
|
GMetricName: "gvc_test",
|
||||||
value: 2,
|
GValue: 2,
|
||||||
},
|
},
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "gvc_test",
|
CMetricName: "gvc_test",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "counter vs histogram",
|
name: "counter vs histogram",
|
||||||
expected: []float64{1},
|
expected: []float64{1},
|
||||||
in: Events{
|
in: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "histogram_test1",
|
CMetricName: "histogram_test1",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "histogram.test1",
|
TMetricName: "histogram.test1",
|
||||||
value: 2,
|
TValue: 2,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "counter vs histogram sum",
|
name: "counter vs histogram sum",
|
||||||
expected: []float64{1},
|
expected: []float64{1},
|
||||||
in: Events{
|
in: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "histogram_test1_sum",
|
CMetricName: "histogram_test1_sum",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "histogram.test1",
|
TMetricName: "histogram.test1",
|
||||||
value: 2,
|
TValue: 2,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "counter vs histogram count",
|
name: "counter vs histogram count",
|
||||||
expected: []float64{1},
|
expected: []float64{1},
|
||||||
in: Events{
|
in: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "histogram_test2_count",
|
CMetricName: "histogram_test2_count",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "histogram.test2",
|
TMetricName: "histogram.test2",
|
||||||
value: 2,
|
TValue: 2,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "counter vs histogram bucket",
|
name: "counter vs histogram bucket",
|
||||||
expected: []float64{1},
|
expected: []float64{1},
|
||||||
in: Events{
|
in: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "histogram_test3_bucket",
|
CMetricName: "histogram_test3_bucket",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "histogram.test3",
|
TMetricName: "histogram.test3",
|
||||||
value: 2,
|
TValue: 2,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "counter vs summary quantile",
|
name: "counter vs summary quantile",
|
||||||
expected: []float64{1},
|
expected: []float64{1},
|
||||||
in: Events{
|
in: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "cvsq_test",
|
CMetricName: "cvsq_test",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "cvsq_test",
|
TMetricName: "cvsq_test",
|
||||||
value: 2,
|
TValue: 2,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "counter vs summary count",
|
name: "counter vs summary count",
|
||||||
expected: []float64{1},
|
expected: []float64{1},
|
||||||
in: Events{
|
in: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "cvsc_count",
|
CMetricName: "cvsc_count",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "cvsc",
|
TMetricName: "cvsc",
|
||||||
value: 2,
|
TValue: 2,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "counter vs summary sum",
|
name: "counter vs summary sum",
|
||||||
expected: []float64{1},
|
expected: []float64{1},
|
||||||
in: Events{
|
in: event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "cvss_sum",
|
CMetricName: "cvss_sum",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
},
|
},
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "cvss",
|
TMetricName: "cvss",
|
||||||
value: 2,
|
TValue: 2,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -408,13 +413,13 @@ mappings:
|
||||||
t.Fatalf("Config load error: %s %s", config, err)
|
t.Fatalf("Config load error: %s %s", config, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
events := make(chan Events)
|
events := make(chan event.Events)
|
||||||
go func() {
|
go func() {
|
||||||
events <- s.in
|
events <- s.in
|
||||||
close(events)
|
close(events)
|
||||||
}()
|
}()
|
||||||
ex := NewExporter(testMapper, log.NewNopLogger())
|
ex := exporter.NewExporter(testMapper, log.NewNopLogger())
|
||||||
ex.Listen(events)
|
ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
|
|
||||||
metrics, err := prometheus.DefaultGatherer.Gather()
|
metrics, err := prometheus.DefaultGatherer.Gather()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -441,12 +446,12 @@ mappings:
|
||||||
// being the empty string after applying the match replacements
|
// being the empty string after applying the match replacements
|
||||||
// tha we don't panic the Exporter Listener.
|
// tha we don't panic the Exporter Listener.
|
||||||
func TestEmptyStringMetric(t *testing.T) {
|
func TestEmptyStringMetric(t *testing.T) {
|
||||||
events := make(chan Events)
|
events := make(chan event.Events)
|
||||||
go func() {
|
go func() {
|
||||||
c := Events{
|
c := event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: "foo_bar",
|
CMetricName: "foo_bar",
|
||||||
value: 1,
|
CValue: 1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
events <- c
|
events <- c
|
||||||
|
@ -468,8 +473,8 @@ mappings:
|
||||||
errorCounter := errorEventStats.WithLabelValues("empty_metric_name")
|
errorCounter := errorEventStats.WithLabelValues("empty_metric_name")
|
||||||
prev := getTelemetryCounterValue(errorCounter)
|
prev := getTelemetryCounterValue(errorCounter)
|
||||||
|
|
||||||
ex := NewExporter(testMapper, log.NewNopLogger())
|
ex := exporter.NewExporter(testMapper, log.NewNopLogger())
|
||||||
ex.Listen(events)
|
ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
|
|
||||||
updated := getTelemetryCounterValue(errorCounter)
|
updated := getTelemetryCounterValue(errorCounter)
|
||||||
if updated-prev != 1 {
|
if updated-prev != 1 {
|
||||||
|
@ -489,13 +494,13 @@ func TestInvalidUtf8InDatadogTagValue(t *testing.T) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
events := make(chan Events)
|
events := make(chan event.Events)
|
||||||
ueh := &unbufferedEventHandler{c: events}
|
ueh := &event.UnbufferedEventHandler{C: events}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for _, l := range []statsDPacketHandler{&StatsDUDPListener{nil, nil, log.NewNopLogger()}, &mockStatsDTCPListener{StatsDTCPListener{nil, nil, log.NewNopLogger()}, log.NewNopLogger()}} {
|
for _, l := range []statsDPacketHandler{&listener.StatsDUDPListener{nil, nil, log.NewNopLogger()}, &mockStatsDTCPListener{listener.StatsDTCPListener{nil, nil, log.NewNopLogger()}, log.NewNopLogger()}} {
|
||||||
l.SetEventHandler(ueh)
|
l.SetEventHandler(ueh)
|
||||||
l.handlePacket([]byte("bar:200|c|#tag:value\nbar:200|c|#tag:\xc3\x28invalid"))
|
l.HandlePacket([]byte("bar:200|c|#tag:value\nbar:200|c|#tag:\xc3\x28invalid"), udpPackets, linesReceived, eventsFlushed, *sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
}
|
}
|
||||||
close(events)
|
close(events)
|
||||||
}()
|
}()
|
||||||
|
@ -503,8 +508,8 @@ func TestInvalidUtf8InDatadogTagValue(t *testing.T) {
|
||||||
testMapper := mapper.MetricMapper{}
|
testMapper := mapper.MetricMapper{}
|
||||||
testMapper.InitCache(0)
|
testMapper.InitCache(0)
|
||||||
|
|
||||||
ex := NewExporter(&testMapper, log.NewNopLogger())
|
ex := exporter.NewExporter(&testMapper, log.NewNopLogger())
|
||||||
ex.Listen(events)
|
ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// In the case of someone starting the statsd exporter with no mapping file specified
|
// In the case of someone starting the statsd exporter with no mapping file specified
|
||||||
|
@ -512,24 +517,24 @@ func TestInvalidUtf8InDatadogTagValue(t *testing.T) {
|
||||||
// as well as the sum/count metrics
|
// as well as the sum/count metrics
|
||||||
func TestSummaryWithQuantilesEmptyMapping(t *testing.T) {
|
func TestSummaryWithQuantilesEmptyMapping(t *testing.T) {
|
||||||
// Start exporter with a synchronous channel
|
// Start exporter with a synchronous channel
|
||||||
events := make(chan Events)
|
events := make(chan event.Events)
|
||||||
go func() {
|
go func() {
|
||||||
testMapper := mapper.MetricMapper{}
|
testMapper := mapper.MetricMapper{}
|
||||||
testMapper.InitCache(0)
|
testMapper.InitCache(0)
|
||||||
|
|
||||||
ex := NewExporter(&testMapper, log.NewNopLogger())
|
ex := exporter.NewExporter(&testMapper, log.NewNopLogger())
|
||||||
ex.Listen(events)
|
ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
name := "default_foo"
|
name := "default_foo"
|
||||||
c := Events{
|
c := event.Events{
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: name,
|
TMetricName: name,
|
||||||
value: 300,
|
TValue: 300,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
events <- c
|
events <- c
|
||||||
events <- Events{}
|
events <- event.Events{}
|
||||||
close(events)
|
close(events)
|
||||||
|
|
||||||
metrics, err := prometheus.DefaultGatherer.Gather()
|
metrics, err := prometheus.DefaultGatherer.Gather()
|
||||||
|
@ -557,26 +562,26 @@ func TestSummaryWithQuantilesEmptyMapping(t *testing.T) {
|
||||||
|
|
||||||
func TestHistogramUnits(t *testing.T) {
|
func TestHistogramUnits(t *testing.T) {
|
||||||
// Start exporter with a synchronous channel
|
// Start exporter with a synchronous channel
|
||||||
events := make(chan Events)
|
events := make(chan event.Events)
|
||||||
go func() {
|
go func() {
|
||||||
testMapper := mapper.MetricMapper{}
|
testMapper := mapper.MetricMapper{}
|
||||||
testMapper.InitCache(0)
|
testMapper.InitCache(0)
|
||||||
ex := NewExporter(&testMapper, log.NewNopLogger())
|
ex := exporter.NewExporter(&testMapper, log.NewNopLogger())
|
||||||
ex.mapper.Defaults.TimerType = mapper.TimerTypeHistogram
|
ex.Mapper.Defaults.TimerType = mapper.TimerTypeHistogram
|
||||||
ex.Listen(events)
|
ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Synchronously send a statsd event to wait for handleEvent execution.
|
// Synchronously send a statsd event to wait for handleEvent execution.
|
||||||
// Then close events channel to stop a listener.
|
// Then close events channel to stop a listener.
|
||||||
name := "foo"
|
name := "foo"
|
||||||
c := Events{
|
c := event.Events{
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: name,
|
TMetricName: name,
|
||||||
value: 300,
|
TValue: 300,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
events <- c
|
events <- c
|
||||||
events <- Events{}
|
events <- event.Events{}
|
||||||
close(events)
|
close(events)
|
||||||
|
|
||||||
// Check histogram value
|
// Check histogram value
|
||||||
|
@ -596,12 +601,12 @@ func TestHistogramUnits(t *testing.T) {
|
||||||
}
|
}
|
||||||
func TestCounterIncrement(t *testing.T) {
|
func TestCounterIncrement(t *testing.T) {
|
||||||
// Start exporter with a synchronous channel
|
// Start exporter with a synchronous channel
|
||||||
events := make(chan Events)
|
events := make(chan event.Events)
|
||||||
go func() {
|
go func() {
|
||||||
testMapper := mapper.MetricMapper{}
|
testMapper := mapper.MetricMapper{}
|
||||||
testMapper.InitCache(0)
|
testMapper.InitCache(0)
|
||||||
ex := NewExporter(&testMapper, log.NewNopLogger())
|
ex := exporter.NewExporter(&testMapper, log.NewNopLogger())
|
||||||
ex.Listen(events)
|
ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Synchronously send a statsd event to wait for handleEvent execution.
|
// Synchronously send a statsd event to wait for handleEvent execution.
|
||||||
|
@ -610,21 +615,21 @@ func TestCounterIncrement(t *testing.T) {
|
||||||
labels := map[string]string{
|
labels := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
}
|
}
|
||||||
c := Events{
|
c := event.Events{
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: name,
|
CMetricName: name,
|
||||||
value: 1,
|
CValue: 1,
|
||||||
labels: labels,
|
CLabels: labels,
|
||||||
},
|
},
|
||||||
&CounterEvent{
|
&event.CounterEvent{
|
||||||
metricName: name,
|
CMetricName: name,
|
||||||
value: 1,
|
CValue: 1,
|
||||||
labels: labels,
|
CLabels: labels,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
events <- c
|
events <- c
|
||||||
// Push empty event so that we block until the first event is consumed.
|
// Push empty event so that we block until the first event is consumed.
|
||||||
events <- Events{}
|
events <- event.Events{}
|
||||||
close(events)
|
close(events)
|
||||||
|
|
||||||
// Check histogram value
|
// Check histogram value
|
||||||
|
@ -642,16 +647,16 @@ func TestCounterIncrement(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type statsDPacketHandler interface {
|
type statsDPacketHandler interface {
|
||||||
handlePacket(packet []byte)
|
HandlePacket(packet []byte, udpPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter)
|
||||||
SetEventHandler(eh eventHandler)
|
SetEventHandler(eh event.EventHandler)
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockStatsDTCPListener struct {
|
type mockStatsDTCPListener struct {
|
||||||
StatsDTCPListener
|
listener.StatsDTCPListener
|
||||||
log.Logger
|
log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ml *mockStatsDTCPListener) handlePacket(packet []byte) {
|
func (ml *mockStatsDTCPListener) HandlePacket(packet []byte, udpPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
// Forcing IPv4 because the TravisCI build environment does not have IPv6
|
// Forcing IPv4 because the TravisCI build environment does not have IPv6
|
||||||
// addresses.
|
// addresses.
|
||||||
lc, err := net.ListenTCP("tcp4", nil)
|
lc, err := net.ListenTCP("tcp4", nil)
|
||||||
|
@ -679,7 +684,7 @@ func (ml *mockStatsDTCPListener) handlePacket(packet []byte) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("mockStatsDTCPListener: accept failed: %v", err))
|
panic(fmt.Sprintf("mockStatsDTCPListener: accept failed: %v", err))
|
||||||
}
|
}
|
||||||
ml.handleConn(sc)
|
ml.HandleConn(sc, linesReceived, eventsFlushed, tcpConnections, tcpErrors, tcpLineTooLong, sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestTtlExpiration validates expiration of time series.
|
// TestTtlExpiration validates expiration of time series.
|
||||||
|
@ -706,23 +711,23 @@ mappings:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Config load error: %s %s", config, err)
|
t.Fatalf("Config load error: %s %s", config, err)
|
||||||
}
|
}
|
||||||
events := make(chan Events)
|
events := make(chan event.Events)
|
||||||
defer close(events)
|
defer close(events)
|
||||||
go func() {
|
go func() {
|
||||||
ex := NewExporter(testMapper, log.NewNopLogger())
|
ex := exporter.NewExporter(testMapper, log.NewNopLogger())
|
||||||
ex.Listen(events)
|
ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ev := Events{
|
ev := event.Events{
|
||||||
// event with default ttl = 1s
|
// event with default ttl = 1s
|
||||||
&GaugeEvent{
|
&event.GaugeEvent{
|
||||||
metricName: "foobar",
|
GMetricName: "foobar",
|
||||||
value: 200,
|
GValue: 200,
|
||||||
},
|
},
|
||||||
// event with ttl = 2s from a mapping
|
// event with ttl = 2s from a mapping
|
||||||
&TimerEvent{
|
&event.TimerEvent{
|
||||||
metricName: "bazqux.main",
|
TMetricName: "bazqux.main",
|
||||||
value: 42000,
|
TValue: 42000,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -735,7 +740,7 @@ mappings:
|
||||||
// saveLabelValues will use fake instant as a lastRegisteredAt time.
|
// saveLabelValues will use fake instant as a lastRegisteredAt time.
|
||||||
clock.ClockInstance.Instant = time.Unix(0, 0)
|
clock.ClockInstance.Instant = time.Unix(0, 0)
|
||||||
events <- ev
|
events <- ev
|
||||||
events <- Events{}
|
events <- event.Events{}
|
||||||
|
|
||||||
// Check values
|
// Check values
|
||||||
metrics, err = prometheus.DefaultGatherer.Gather()
|
metrics, err = prometheus.DefaultGatherer.Gather()
|
||||||
|
@ -757,7 +762,7 @@ mappings:
|
||||||
// Step 2. Increase Instant to emulate metrics expiration after 1s
|
// Step 2. Increase Instant to emulate metrics expiration after 1s
|
||||||
clock.ClockInstance.Instant = time.Unix(1, 10)
|
clock.ClockInstance.Instant = time.Unix(1, 10)
|
||||||
clock.ClockInstance.TickerCh <- time.Unix(0, 0)
|
clock.ClockInstance.TickerCh <- time.Unix(0, 0)
|
||||||
events <- Events{}
|
events <- event.Events{}
|
||||||
|
|
||||||
// Check values
|
// Check values
|
||||||
metrics, err = prometheus.DefaultGatherer.Gather()
|
metrics, err = prometheus.DefaultGatherer.Gather()
|
||||||
|
@ -779,7 +784,7 @@ mappings:
|
||||||
// Step 3. Increase Instant to emulate metrics expiration after 2s
|
// Step 3. Increase Instant to emulate metrics expiration after 2s
|
||||||
clock.ClockInstance.Instant = time.Unix(2, 200)
|
clock.ClockInstance.Instant = time.Unix(2, 200)
|
||||||
clock.ClockInstance.TickerCh <- time.Unix(0, 0)
|
clock.ClockInstance.TickerCh <- time.Unix(0, 0)
|
||||||
events <- Events{}
|
events <- event.Events{}
|
||||||
|
|
||||||
// Check values
|
// Check values
|
||||||
metrics, err = prometheus.DefaultGatherer.Gather()
|
metrics, err = prometheus.DefaultGatherer.Gather()
|
||||||
|
@ -797,32 +802,32 @@ mappings:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHashLabelNames(t *testing.T) {
|
func TestHashLabelNames(t *testing.T) {
|
||||||
r := newRegistry(nil)
|
r := registry.NewRegistry(nil)
|
||||||
// Validate value hash changes and name has doesn't when just the value changes.
|
// Validate value hash changes and name has doesn't when just the value changes.
|
||||||
hash1, _ := r.hashLabels(map[string]string{
|
hash1, _ := r.HashLabels(map[string]string{
|
||||||
"label": "value1",
|
"label": "value1",
|
||||||
})
|
})
|
||||||
hash2, _ := r.hashLabels(map[string]string{
|
hash2, _ := r.HashLabels(map[string]string{
|
||||||
"label": "value2",
|
"label": "value2",
|
||||||
})
|
})
|
||||||
if hash1.names != hash2.names {
|
if hash1.Names != hash2.Names {
|
||||||
t.Fatal("Hash of label names should match, but doesn't")
|
t.Fatal("Hash of label names should match, but doesn't")
|
||||||
}
|
}
|
||||||
if hash1.values == hash2.values {
|
if hash1.Values == hash2.Values {
|
||||||
t.Fatal("Hash of label names shouldn't match, but do")
|
t.Fatal("Hash of label names shouldn't match, but do")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate value and name hashes change when the name changes.
|
// Validate value and name hashes change when the name changes.
|
||||||
hash1, _ = r.hashLabels(map[string]string{
|
hash1, _ = r.HashLabels(map[string]string{
|
||||||
"label1": "value",
|
"label1": "value",
|
||||||
})
|
})
|
||||||
hash2, _ = r.hashLabels(map[string]string{
|
hash2, _ = r.HashLabels(map[string]string{
|
||||||
"label2": "value",
|
"label2": "value",
|
||||||
})
|
})
|
||||||
if hash1.names == hash2.names {
|
if hash1.Names == hash2.Names {
|
||||||
t.Fatal("Hash of label names shouldn't match, but do")
|
t.Fatal("Hash of label names shouldn't match, but do")
|
||||||
}
|
}
|
||||||
if hash1.values == hash2.values {
|
if hash1.Values == hash2.Values {
|
||||||
t.Fatal("Hash of label names shouldn't match, but do")
|
t.Fatal("Hash of label names shouldn't match, but do")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -916,7 +921,7 @@ func BenchmarkParseDogStatsDTags(b *testing.B) {
|
||||||
b.Run(name, func(b *testing.B) {
|
b.Run(name, func(b *testing.B) {
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
labels := map[string]string{}
|
labels := map[string]string{}
|
||||||
parseDogStatsDTags(tags, labels, log.NewNopLogger())
|
line.ParseDogStatsDTags(tags, labels, tagErrors, log.NewNopLogger())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -953,11 +958,11 @@ func BenchmarkHashNameAndLabels(b *testing.B) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
r := newRegistry(nil)
|
r := registry.NewRegistry(nil)
|
||||||
for _, s := range scenarios {
|
for _, s := range scenarios {
|
||||||
b.Run(s.name, func(b *testing.B) {
|
b.Run(s.name, func(b *testing.B) {
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
r.hashLabels(s.labels)
|
r.HashLabels(s.labels)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
1
go.sum
1
go.sum
|
@ -56,6 +56,7 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
|
github.com/prometheus/client_golang v1.5.0 h1:Ctq0iGpCmr3jeP77kbF2UxgvRwzWWz+4Bh9/vJTyg1A=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||||
|
|
230
main.go
230
main.go
|
@ -15,7 +15,6 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof"
|
_ "net/http/pprof"
|
||||||
|
@ -33,11 +32,170 @@ import (
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
"gopkg.in/alecthomas/kingpin.v2"
|
"gopkg.in/alecthomas/kingpin.v2"
|
||||||
|
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/event"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/exporter"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/listener"
|
||||||
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultHelp = "Metric autogenerated by statsd_exporter."
|
||||||
|
regErrF = "Failed to update metric"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
eventStats = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_events_total",
|
||||||
|
Help: "The total number of StatsD events seen.",
|
||||||
|
},
|
||||||
|
[]string{"type"},
|
||||||
|
)
|
||||||
|
eventsFlushed = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_event_queue_flushed_total",
|
||||||
|
Help: "Number of times events were flushed to exporter",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
eventsUnmapped = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_events_unmapped_total",
|
||||||
|
Help: "The total number of StatsD events no mapping was found for.",
|
||||||
|
})
|
||||||
|
udpPackets = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_udp_packets_total",
|
||||||
|
Help: "The total number of StatsD packets received over UDP.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
tcpConnections = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_tcp_connections_total",
|
||||||
|
Help: "The total number of TCP connections handled.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
tcpErrors = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_tcp_connection_errors_total",
|
||||||
|
Help: "The number of errors encountered reading from TCP.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
tcpLineTooLong = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_tcp_too_long_lines_total",
|
||||||
|
Help: "The number of lines discarded due to being too long.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
unixgramPackets = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_unixgram_packets_total",
|
||||||
|
Help: "The total number of StatsD packets received over Unixgram.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
linesReceived = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_lines_total",
|
||||||
|
Help: "The total number of StatsD lines received.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
samplesReceived = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_samples_total",
|
||||||
|
Help: "The total number of StatsD samples received.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
sampleErrors = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_sample_errors_total",
|
||||||
|
Help: "The total number of errors parsing StatsD samples.",
|
||||||
|
},
|
||||||
|
[]string{"reason"},
|
||||||
|
)
|
||||||
|
tagsReceived = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_tags_total",
|
||||||
|
Help: "The total number of DogStatsD tags processed.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
tagErrors = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_tag_errors_total",
|
||||||
|
Help: "The number of errors parsing DogStatsD tags.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
configLoads = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_config_reloads_total",
|
||||||
|
Help: "The number of configuration reloads.",
|
||||||
|
},
|
||||||
|
[]string{"outcome"},
|
||||||
|
)
|
||||||
|
mappingsCount = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Name: "statsd_exporter_loaded_mappings",
|
||||||
|
Help: "The current number of configured metric mappings.",
|
||||||
|
})
|
||||||
|
conflictingEventStats = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_events_conflict_total",
|
||||||
|
Help: "The total number of StatsD events with conflicting names.",
|
||||||
|
},
|
||||||
|
[]string{"type"},
|
||||||
|
)
|
||||||
|
errorEventStats = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_events_error_total",
|
||||||
|
Help: "The total number of StatsD events discarded due to errors.",
|
||||||
|
},
|
||||||
|
[]string{"reason"},
|
||||||
|
)
|
||||||
|
eventsActions = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "statsd_exporter_events_actions_total",
|
||||||
|
Help: "The total number of StatsD events by action.",
|
||||||
|
},
|
||||||
|
[]string{"action"},
|
||||||
|
)
|
||||||
|
metricsCount = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "statsd_exporter_metrics_total",
|
||||||
|
Help: "The total number of metrics.",
|
||||||
|
},
|
||||||
|
[]string{"type"},
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
prometheus.MustRegister(version.NewCollector("statsd_exporter"))
|
prometheus.MustRegister(version.NewCollector("statsd_exporter"))
|
||||||
|
prometheus.MustRegister(eventStats)
|
||||||
|
prometheus.MustRegister(eventsFlushed)
|
||||||
|
prometheus.MustRegister(eventsUnmapped)
|
||||||
|
prometheus.MustRegister(udpPackets)
|
||||||
|
prometheus.MustRegister(tcpConnections)
|
||||||
|
prometheus.MustRegister(tcpErrors)
|
||||||
|
prometheus.MustRegister(tcpLineTooLong)
|
||||||
|
prometheus.MustRegister(unixgramPackets)
|
||||||
|
prometheus.MustRegister(linesReceived)
|
||||||
|
prometheus.MustRegister(samplesReceived)
|
||||||
|
prometheus.MustRegister(sampleErrors)
|
||||||
|
prometheus.MustRegister(tagsReceived)
|
||||||
|
prometheus.MustRegister(tagErrors)
|
||||||
|
prometheus.MustRegister(configLoads)
|
||||||
|
prometheus.MustRegister(mappingsCount)
|
||||||
|
prometheus.MustRegister(conflictingEventStats)
|
||||||
|
prometheus.MustRegister(errorEventStats)
|
||||||
|
prometheus.MustRegister(eventsActions)
|
||||||
|
prometheus.MustRegister(metricsCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// uncheckedCollector wraps a Collector but its Describe method yields no Desc.
|
||||||
|
// This allows incoming metrics to have inconsistent label sets
|
||||||
|
type uncheckedCollector struct {
|
||||||
|
c prometheus.Collector
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u uncheckedCollector) Describe(_ chan<- *prometheus.Desc) {}
|
||||||
|
func (u uncheckedCollector) Collect(c chan<- prometheus.Metric) {
|
||||||
|
u.c.Collect(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func serveHTTP(listenAddress, metricsEndpoint string, logger log.Logger) {
|
func serveHTTP(listenAddress, metricsEndpoint string, logger log.Logger) {
|
||||||
|
@ -55,52 +213,6 @@ func serveHTTP(listenAddress, metricsEndpoint string, logger log.Logger) {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ipPortFromString(addr string) (*net.IPAddr, int, error) {
|
|
||||||
host, portStr, err := net.SplitHostPort(addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, fmt.Errorf("bad StatsD listening address: %s", addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if host == "" {
|
|
||||||
host = "0.0.0.0"
|
|
||||||
}
|
|
||||||
ip, err := net.ResolveIPAddr("ip", host)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, fmt.Errorf("Unable to resolve %s: %s", host, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
port, err := strconv.Atoi(portStr)
|
|
||||||
if err != nil || port < 0 || port > 65535 {
|
|
||||||
return nil, 0, fmt.Errorf("Bad port %s: %s", portStr, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ip, port, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func udpAddrFromString(addr string) (*net.UDPAddr, error) {
|
|
||||||
ip, port, err := ipPortFromString(addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &net.UDPAddr{
|
|
||||||
IP: ip.IP,
|
|
||||||
Port: port,
|
|
||||||
Zone: ip.Zone,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func tcpAddrFromString(addr string) (*net.TCPAddr, error) {
|
|
||||||
ip, port, err := ipPortFromString(addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &net.TCPAddr{
|
|
||||||
IP: ip.IP,
|
|
||||||
Port: port,
|
|
||||||
Zone: ip.Zone,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func configReloader(fileName string, mapper *mapper.MetricMapper, cacheSize int, logger log.Logger, option mapper.CacheOption) {
|
func configReloader(fileName string, mapper *mapper.MetricMapper, cacheSize int, logger log.Logger, option mapper.CacheOption) {
|
||||||
|
|
||||||
signals := make(chan os.Signal, 1)
|
signals := make(chan os.Signal, 1)
|
||||||
|
@ -177,12 +289,12 @@ func main() {
|
||||||
|
|
||||||
go serveHTTP(*listenAddress, *metricsEndpoint, logger)
|
go serveHTTP(*listenAddress, *metricsEndpoint, logger)
|
||||||
|
|
||||||
events := make(chan Events, *eventQueueSize)
|
events := make(chan event.Events, *eventQueueSize)
|
||||||
defer close(events)
|
defer close(events)
|
||||||
eventQueue := newEventQueue(events, *eventFlushThreshold, *eventFlushInterval)
|
eventQueue := event.NewEventQueue(events, *eventFlushThreshold, *eventFlushInterval, eventsFlushed)
|
||||||
|
|
||||||
if *statsdListenUDP != "" {
|
if *statsdListenUDP != "" {
|
||||||
udpListenAddr, err := udpAddrFromString(*statsdListenUDP)
|
udpListenAddr, err := util.UDPAddrFromString(*statsdListenUDP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "invalid UDP listen address", "address", *statsdListenUDP, "error", err)
|
level.Error(logger).Log("msg", "invalid UDP listen address", "address", *statsdListenUDP, "error", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
@ -201,12 +313,12 @@ func main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ul := &StatsDUDPListener{conn: uconn, eventHandler: eventQueue, logger: logger}
|
ul := &listener.StatsDUDPListener{Conn: uconn, EventHandler: eventQueue, Logger: logger}
|
||||||
go ul.Listen()
|
go ul.Listen(udpPackets, linesReceived, eventsFlushed, *sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
}
|
}
|
||||||
|
|
||||||
if *statsdListenTCP != "" {
|
if *statsdListenTCP != "" {
|
||||||
tcpListenAddr, err := tcpAddrFromString(*statsdListenTCP)
|
tcpListenAddr, err := util.TCPAddrFromString(*statsdListenTCP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "invalid TCP listen address", "address", *statsdListenUDP, "error", err)
|
level.Error(logger).Log("msg", "invalid TCP listen address", "address", *statsdListenUDP, "error", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
@ -218,8 +330,8 @@ func main() {
|
||||||
}
|
}
|
||||||
defer tconn.Close()
|
defer tconn.Close()
|
||||||
|
|
||||||
tl := &StatsDTCPListener{conn: tconn, eventHandler: eventQueue, logger: logger}
|
tl := &listener.StatsDTCPListener{Conn: tconn, EventHandler: eventQueue, Logger: logger}
|
||||||
go tl.Listen()
|
go tl.Listen(linesReceived, eventsFlushed, tcpConnections, tcpErrors, tcpLineTooLong, *sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
}
|
}
|
||||||
|
|
||||||
if *statsdListenUnixgram != "" {
|
if *statsdListenUnixgram != "" {
|
||||||
|
@ -247,8 +359,8 @@ func main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ul := &StatsDUnixgramListener{conn: uxgconn, eventHandler: eventQueue, logger: logger}
|
ul := &listener.StatsDUnixgramListener{Conn: uxgconn, EventHandler: eventQueue, Logger: logger}
|
||||||
go ul.Listen()
|
go ul.Listen(unixgramPackets, linesReceived, eventsFlushed, *sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
|
|
||||||
// if it's an abstract unix domain socket, it won't exist on fs
|
// if it's an abstract unix domain socket, it won't exist on fs
|
||||||
// so we can't chmod it either
|
// so we can't chmod it either
|
||||||
|
@ -291,12 +403,12 @@ func main() {
|
||||||
|
|
||||||
go configReloader(*mappingConfig, mapper, *cacheSize, logger, cacheOption)
|
go configReloader(*mappingConfig, mapper, *cacheSize, logger, cacheOption)
|
||||||
|
|
||||||
exporter := NewExporter(mapper, logger)
|
exporter := exporter.NewExporter(mapper, logger)
|
||||||
|
|
||||||
signals := make(chan os.Signal, 1)
|
signals := make(chan os.Signal, 1)
|
||||||
signal.Notify(signals, os.Interrupt, syscall.SIGTERM)
|
signal.Notify(signals, os.Interrupt, syscall.SIGTERM)
|
||||||
|
|
||||||
go exporter.Listen(events)
|
go exporter.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
|
|
||||||
<-signals
|
<-signals
|
||||||
}
|
}
|
||||||
|
|
133
pkg/event.go~
Normal file
133
pkg/event.go~
Normal file
|
@ -0,0 +1,133 @@
|
||||||
|
// Copyright 2013 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/clock"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Event interface {
|
||||||
|
MetricName() string
|
||||||
|
Value() float64
|
||||||
|
Labels() map[string]string
|
||||||
|
MetricType() mapper.MetricType
|
||||||
|
}
|
||||||
|
|
||||||
|
type CounterEvent struct {
|
||||||
|
metricName string
|
||||||
|
value float64
|
||||||
|
labels map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CounterEvent) MetricName() string { return c.metricName }
|
||||||
|
func (c *CounterEvent) Value() float64 { return c.value }
|
||||||
|
func (c *CounterEvent) Labels() map[string]string { return c.labels }
|
||||||
|
func (c *CounterEvent) MetricType() mapper.MetricType { return mapper.MetricTypeCounter }
|
||||||
|
|
||||||
|
type GaugeEvent struct {
|
||||||
|
metricName string
|
||||||
|
value float64
|
||||||
|
relative bool
|
||||||
|
labels map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GaugeEvent) MetricName() string { return g.metricName }
|
||||||
|
func (g *GaugeEvent) Value() float64 { return g.value }
|
||||||
|
func (c *GaugeEvent) Labels() map[string]string { return c.labels }
|
||||||
|
func (c *GaugeEvent) MetricType() mapper.MetricType { return mapper.MetricTypeGauge }
|
||||||
|
|
||||||
|
type TimerEvent struct {
|
||||||
|
metricName string
|
||||||
|
value float64
|
||||||
|
labels map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TimerEvent) MetricName() string { return t.metricName }
|
||||||
|
func (t *TimerEvent) Value() float64 { return t.value }
|
||||||
|
func (c *TimerEvent) Labels() map[string]string { return c.labels }
|
||||||
|
func (c *TimerEvent) MetricType() mapper.MetricType { return mapper.MetricTypeTimer }
|
||||||
|
|
||||||
|
type Events []Event
|
||||||
|
|
||||||
|
type eventQueue struct {
|
||||||
|
c chan Events
|
||||||
|
q Events
|
||||||
|
m sync.Mutex
|
||||||
|
flushThreshold int
|
||||||
|
flushTicker *time.Ticker
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventHandler interface {
|
||||||
|
queue(event Events)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEventQueue(c chan Events, flushThreshold int, flushInterval time.Duration) *eventQueue {
|
||||||
|
ticker := clock.NewTicker(flushInterval)
|
||||||
|
eq := &eventQueue{
|
||||||
|
c: c,
|
||||||
|
flushThreshold: flushThreshold,
|
||||||
|
flushTicker: ticker,
|
||||||
|
q: make([]Event, 0, flushThreshold),
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
<-ticker.C
|
||||||
|
eq.flush()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return eq
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *eventQueue) queue(events Events) {
|
||||||
|
eq.m.Lock()
|
||||||
|
defer eq.m.Unlock()
|
||||||
|
|
||||||
|
for _, e := range events {
|
||||||
|
eq.q = append(eq.q, e)
|
||||||
|
if len(eq.q) >= eq.flushThreshold {
|
||||||
|
eq.flushUnlocked()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *eventQueue) flush() {
|
||||||
|
eq.m.Lock()
|
||||||
|
defer eq.m.Unlock()
|
||||||
|
eq.flushUnlocked()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *eventQueue) flushUnlocked() {
|
||||||
|
eq.c <- eq.q
|
||||||
|
eq.q = make([]Event, 0, cap(eq.q))
|
||||||
|
eventsFlushed.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *eventQueue) len() int {
|
||||||
|
eq.m.Lock()
|
||||||
|
defer eq.m.Unlock()
|
||||||
|
|
||||||
|
return len(eq.q)
|
||||||
|
}
|
||||||
|
|
||||||
|
type unbufferedEventHandler struct {
|
||||||
|
c chan Events
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ueh *unbufferedEventHandler) queue(events Events) {
|
||||||
|
ueh.c <- events
|
||||||
|
}
|
134
pkg/event/event.go
Normal file
134
pkg/event/event.go
Normal file
|
@ -0,0 +1,134 @@
|
||||||
|
// Copyright 2013 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package event
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/clock"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Event interface {
|
||||||
|
MetricName() string
|
||||||
|
Value() float64
|
||||||
|
Labels() map[string]string
|
||||||
|
MetricType() mapper.MetricType
|
||||||
|
}
|
||||||
|
|
||||||
|
type CounterEvent struct {
|
||||||
|
CMetricName string
|
||||||
|
CValue float64
|
||||||
|
CLabels map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CounterEvent) MetricName() string { return c.CMetricName }
|
||||||
|
func (c *CounterEvent) Value() float64 { return c.CValue }
|
||||||
|
func (c *CounterEvent) Labels() map[string]string { return c.CLabels }
|
||||||
|
func (c *CounterEvent) MetricType() mapper.MetricType { return mapper.MetricTypeCounter }
|
||||||
|
|
||||||
|
type GaugeEvent struct {
|
||||||
|
GMetricName string
|
||||||
|
GValue float64
|
||||||
|
GRelative bool
|
||||||
|
GLabels map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GaugeEvent) MetricName() string { return g.GMetricName }
|
||||||
|
func (g *GaugeEvent) Value() float64 { return g.GValue }
|
||||||
|
func (c *GaugeEvent) Labels() map[string]string { return c.GLabels }
|
||||||
|
func (c *GaugeEvent) MetricType() mapper.MetricType { return mapper.MetricTypeGauge }
|
||||||
|
|
||||||
|
type TimerEvent struct {
|
||||||
|
TMetricName string
|
||||||
|
TValue float64
|
||||||
|
TLabels map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TimerEvent) MetricName() string { return t.TMetricName }
|
||||||
|
func (t *TimerEvent) Value() float64 { return t.TValue }
|
||||||
|
func (c *TimerEvent) Labels() map[string]string { return c.TLabels }
|
||||||
|
func (c *TimerEvent) MetricType() mapper.MetricType { return mapper.MetricTypeTimer }
|
||||||
|
|
||||||
|
type Events []Event
|
||||||
|
|
||||||
|
type EventQueue struct {
|
||||||
|
C chan Events
|
||||||
|
q Events
|
||||||
|
m sync.Mutex
|
||||||
|
flushThreshold int
|
||||||
|
flushTicker *time.Ticker
|
||||||
|
}
|
||||||
|
|
||||||
|
type EventHandler interface {
|
||||||
|
Queue(event Events, eventsFlushed *prometheus.Counter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEventQueue(c chan Events, flushThreshold int, flushInterval time.Duration, eventsFlushed prometheus.Counter) *EventQueue {
|
||||||
|
ticker := clock.NewTicker(flushInterval)
|
||||||
|
eq := &EventQueue{
|
||||||
|
C: c,
|
||||||
|
flushThreshold: flushThreshold,
|
||||||
|
flushTicker: ticker,
|
||||||
|
q: make([]Event, 0, flushThreshold),
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
<-ticker.C
|
||||||
|
eq.Flush(eventsFlushed)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return eq
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *EventQueue) Queue(events Events, eventsFlushed *prometheus.Counter) {
|
||||||
|
eq.m.Lock()
|
||||||
|
defer eq.m.Unlock()
|
||||||
|
|
||||||
|
for _, e := range events {
|
||||||
|
eq.q = append(eq.q, e)
|
||||||
|
if len(eq.q) >= eq.flushThreshold {
|
||||||
|
eq.FlushUnlocked(*eventsFlushed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *EventQueue) Flush(eventsFlushed prometheus.Counter) {
|
||||||
|
eq.m.Lock()
|
||||||
|
defer eq.m.Unlock()
|
||||||
|
eq.FlushUnlocked(eventsFlushed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *EventQueue) FlushUnlocked(eventsFlushed prometheus.Counter) {
|
||||||
|
eq.C <- eq.q
|
||||||
|
eq.q = make([]Event, 0, cap(eq.q))
|
||||||
|
eventsFlushed.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *EventQueue) Len() int {
|
||||||
|
eq.m.Lock()
|
||||||
|
defer eq.m.Unlock()
|
||||||
|
|
||||||
|
return len(eq.q)
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnbufferedEventHandler struct {
|
||||||
|
C chan Events
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ueh *UnbufferedEventHandler) Queue(events Events, eventsFlushed *prometheus.Counter) {
|
||||||
|
ueh.C <- events
|
||||||
|
}
|
134
pkg/event/event.go~
Normal file
134
pkg/event/event.go~
Normal file
|
@ -0,0 +1,134 @@
|
||||||
|
// Copyright 2013 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package event
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/clock"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Event interface {
|
||||||
|
MetricName() string
|
||||||
|
Value() float64
|
||||||
|
Labels() map[string]string
|
||||||
|
MetricType() mapper.MetricType
|
||||||
|
}
|
||||||
|
|
||||||
|
type CounterEvent struct {
|
||||||
|
CMetricName string
|
||||||
|
CValue float64
|
||||||
|
CLabels map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CounterEvent) MetricName() string { return c.CMetricName }
|
||||||
|
func (c *CounterEvent) Value() float64 { return c.CValue }
|
||||||
|
func (c *CounterEvent) Labels() map[string]string { return c.CLabels }
|
||||||
|
func (c *CounterEvent) MetricType() mapper.MetricType { return mapper.MetricTypeCounter }
|
||||||
|
|
||||||
|
type GaugeEvent struct {
|
||||||
|
GMetricName string
|
||||||
|
GValue float64
|
||||||
|
GRelative bool
|
||||||
|
GLabels map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GaugeEvent) MetricName() string { return g.GMetricName }
|
||||||
|
func (g *GaugeEvent) Value() float64 { return g.GValue }
|
||||||
|
func (c *GaugeEvent) Labels() map[string]string { return c.GLabels }
|
||||||
|
func (c *GaugeEvent) MetricType() mapper.MetricType { return mapper.MetricTypeGauge }
|
||||||
|
|
||||||
|
type TimerEvent struct {
|
||||||
|
TMetricName string
|
||||||
|
TValue float64
|
||||||
|
TLabels map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TimerEvent) MetricName() string { return t.TMetricName }
|
||||||
|
func (t *TimerEvent) Value() float64 { return t.TValue }
|
||||||
|
func (c *TimerEvent) Labels() map[string]string { return c.TLabels }
|
||||||
|
func (c *TimerEvent) MetricType() mapper.MetricType { return mapper.MetricTypeTimer }
|
||||||
|
|
||||||
|
type Events []Event
|
||||||
|
|
||||||
|
type EventQueue struct {
|
||||||
|
C chan Events
|
||||||
|
q Events
|
||||||
|
m sync.Mutex
|
||||||
|
flushThreshold int
|
||||||
|
flushTicker *time.Ticker
|
||||||
|
}
|
||||||
|
|
||||||
|
type EventHandler interface {
|
||||||
|
Queue(event Events, eventsFlushed *prometheus.Counter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEventQueue(c chan Events, flushThreshold int, flushInterval time.Duration, eventsFlushed prometheus.Counter) *EventQueue {
|
||||||
|
ticker := clock.NewTicker(flushInterval)
|
||||||
|
eq := &EventQueue{
|
||||||
|
C: c,
|
||||||
|
flushThreshold: flushThreshold,
|
||||||
|
flushTicker: ticker,
|
||||||
|
q: make([]Event, 0, flushThreshold),
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
<-ticker.C
|
||||||
|
eq.Flush(eventsFlushed)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return eq
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *EventQueue) Queue(events Events, eventsFlushed *prometheus.Counter) {
|
||||||
|
eq.m.Lock()
|
||||||
|
defer eq.m.Unlock()
|
||||||
|
|
||||||
|
for _, e := range events {
|
||||||
|
eq.q = append(eq.q, e)
|
||||||
|
if len(eq.q) >= eq.flushThreshold {
|
||||||
|
eq.FlushUnlocked(*eventsFlushed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *EventQueue) Flush(eventsFlushed prometheus.Counter) {
|
||||||
|
eq.m.Lock()
|
||||||
|
defer eq.m.Unlock()
|
||||||
|
eq.FlushUnlocked(eventsFlushed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *EventQueue) FlushUnlocked(eventsFlushed prometheus.Counter) {
|
||||||
|
eq.C <- eq.q
|
||||||
|
eq.q = make([]Event, 0, cap(eq.q))
|
||||||
|
eventsFlushed.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *EventQueue) Len() int {
|
||||||
|
eq.m.Lock()
|
||||||
|
defer eq.m.Unlock()
|
||||||
|
|
||||||
|
return len(eq.q)
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnbufferedEventHandler struct {
|
||||||
|
C chan Events
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ueh *UnbufferedEventHandler) Queue(events Events) {
|
||||||
|
ueh.C <- events
|
||||||
|
}
|
173
pkg/exporter/exporter.go
Normal file
173
pkg/exporter/exporter.go
Normal file
|
@ -0,0 +1,173 @@
|
||||||
|
package exporter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/clock"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/event"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/registry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultHelp = "Metric autogenerated by statsd_exporter."
|
||||||
|
regErrF = "Failed to update metric"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Exporter struct {
|
||||||
|
Mapper *mapper.MetricMapper
|
||||||
|
Registry *registry.Registry
|
||||||
|
Logger log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listen handles all events sent to the given channel sequentially. It
|
||||||
|
// terminates when the channel is closed.
|
||||||
|
func (b *Exporter) Listen(e <-chan event.Events, eventsActions *prometheus.CounterVec, eventsUnmapped prometheus.Counter,
|
||||||
|
errorEventStats *prometheus.CounterVec, eventStats *prometheus.CounterVec, conflictingEventStats *prometheus.CounterVec, metricsCount *prometheus.GaugeVec) {
|
||||||
|
|
||||||
|
removeStaleMetricsTicker := clock.NewTicker(time.Second)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-removeStaleMetricsTicker.C:
|
||||||
|
b.Registry.RemoveStaleMetrics()
|
||||||
|
case events, ok := <-e:
|
||||||
|
if !ok {
|
||||||
|
level.Debug(b.Logger).Log("msg", "Channel is closed. Break out of Exporter.Listener.")
|
||||||
|
removeStaleMetricsTicker.Stop()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, event := range events {
|
||||||
|
b.handleEvent(event, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleEvent processes a single Event according to the configured mapping.
|
||||||
|
func (b *Exporter) handleEvent(thisEvent event.Event, eventsActions *prometheus.CounterVec, eventsUnmapped prometheus.Counter,
|
||||||
|
errorEventStats *prometheus.CounterVec, eventStats *prometheus.CounterVec, conflictingEventStats *prometheus.CounterVec, metricsCount *prometheus.GaugeVec) {
|
||||||
|
|
||||||
|
mapping, labels, present := b.Mapper.GetMapping(thisEvent.MetricName(), thisEvent.MetricType())
|
||||||
|
if mapping == nil {
|
||||||
|
mapping = &mapper.MetricMapping{}
|
||||||
|
if b.Mapper.Defaults.Ttl != 0 {
|
||||||
|
mapping.Ttl = b.Mapper.Defaults.Ttl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if mapping.Action == mapper.ActionTypeDrop {
|
||||||
|
eventsActions.WithLabelValues("drop").Inc()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
metricName := ""
|
||||||
|
|
||||||
|
help := defaultHelp
|
||||||
|
if mapping.HelpText != "" {
|
||||||
|
help = mapping.HelpText
|
||||||
|
}
|
||||||
|
|
||||||
|
prometheusLabels := thisEvent.Labels()
|
||||||
|
if present {
|
||||||
|
if mapping.Name == "" {
|
||||||
|
level.Debug(b.Logger).Log("msg", "The mapping generates an empty metric name", "metric_name", thisEvent.MetricName(), "match", mapping.Match)
|
||||||
|
errorEventStats.WithLabelValues("empty_metric_name").Inc()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
metricName = mapper.EscapeMetricName(mapping.Name)
|
||||||
|
for label, value := range labels {
|
||||||
|
prometheusLabels[label] = value
|
||||||
|
}
|
||||||
|
eventsActions.WithLabelValues(string(mapping.Action)).Inc()
|
||||||
|
} else {
|
||||||
|
eventsUnmapped.Inc()
|
||||||
|
metricName = mapper.EscapeMetricName(thisEvent.MetricName())
|
||||||
|
}
|
||||||
|
|
||||||
|
switch ev := thisEvent.(type) {
|
||||||
|
case *event.CounterEvent:
|
||||||
|
// We don't accept negative values for counters. Incrementing the counter with a negative number
|
||||||
|
// will cause the exporter to panic. Instead we will warn and continue to the next event.
|
||||||
|
if thisEvent.Value() < 0.0 {
|
||||||
|
level.Debug(b.Logger).Log("msg", "counter must be non-negative value", "metric", metricName, "event_value", thisEvent.Value())
|
||||||
|
errorEventStats.WithLabelValues("illegal_negative_counter").Inc()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
counter, err := b.Registry.GetCounter(metricName, prometheusLabels, help, mapping, metricsCount)
|
||||||
|
if err == nil {
|
||||||
|
counter.Add(thisEvent.Value())
|
||||||
|
eventStats.WithLabelValues("counter").Inc()
|
||||||
|
} else {
|
||||||
|
level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
||||||
|
conflictingEventStats.WithLabelValues("counter").Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
case *event.GaugeEvent:
|
||||||
|
gauge, err := b.Registry.GetGauge(metricName, prometheusLabels, help, mapping, metricsCount)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
if ev.GRelative {
|
||||||
|
gauge.Add(thisEvent.Value())
|
||||||
|
} else {
|
||||||
|
gauge.Set(thisEvent.Value())
|
||||||
|
}
|
||||||
|
eventStats.WithLabelValues("gauge").Inc()
|
||||||
|
} else {
|
||||||
|
level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
||||||
|
conflictingEventStats.WithLabelValues("gauge").Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
case *event.TimerEvent:
|
||||||
|
t := mapper.TimerTypeDefault
|
||||||
|
if mapping != nil {
|
||||||
|
t = mapping.TimerType
|
||||||
|
}
|
||||||
|
if t == mapper.TimerTypeDefault {
|
||||||
|
t = b.Mapper.Defaults.TimerType
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t {
|
||||||
|
case mapper.TimerTypeHistogram:
|
||||||
|
histogram, err := b.Registry.GetHistogram(metricName, prometheusLabels, help, mapping, metricsCount)
|
||||||
|
if err == nil {
|
||||||
|
histogram.Observe(thisEvent.Value() / 1000) // prometheus presumes seconds, statsd millisecond
|
||||||
|
eventStats.WithLabelValues("timer").Inc()
|
||||||
|
} else {
|
||||||
|
level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
||||||
|
conflictingEventStats.WithLabelValues("timer").Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
case mapper.TimerTypeDefault, mapper.TimerTypeSummary:
|
||||||
|
summary, err := b.Registry.GetSummary(metricName, prometheusLabels, help, mapping, metricsCount)
|
||||||
|
if err == nil {
|
||||||
|
summary.Observe(thisEvent.Value() / 1000) // prometheus presumes seconds, statsd millisecond
|
||||||
|
eventStats.WithLabelValues("timer").Inc()
|
||||||
|
} else {
|
||||||
|
level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
||||||
|
conflictingEventStats.WithLabelValues("timer").Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
level.Error(b.Logger).Log("msg", "unknown timer type", "type", t)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
level.Debug(b.Logger).Log("msg", "Unsupported event type")
|
||||||
|
eventStats.WithLabelValues("illegal").Inc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewExporter(mapper *mapper.MetricMapper, logger log.Logger) *Exporter {
|
||||||
|
return &Exporter{
|
||||||
|
Mapper: mapper,
|
||||||
|
Registry: registry.NewRegistry(mapper),
|
||||||
|
Logger: logger,
|
||||||
|
}
|
||||||
|
}
|
172
pkg/exporter/exporter.go~
Normal file
172
pkg/exporter/exporter.go~
Normal file
|
@ -0,0 +1,172 @@
|
||||||
|
package exporter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/clock"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/event"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/registry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultHelp = "Metric autogenerated by statsd_exporter."
|
||||||
|
regErrF = "Failed to update metric"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Exporter struct {
|
||||||
|
Mapper *mapper.MetricMapper
|
||||||
|
Registry *registry.Registry
|
||||||
|
Logger log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listen handles all events sent to the given channel sequentially. It
|
||||||
|
// terminates when the channel is closed.
|
||||||
|
func (b *Exporter) Listen(e <-chan event.Events, thisEvent event.Event, eventsActions prometheus.GaugeVec, eventsUnmapped prometheus.Gauge,
|
||||||
|
errorEventStats prometheus.GaugeVec, eventStats prometheus.GaugeVec, conflictingEventStats prometheus.GaugeVec, metricsCount prometheus.GaugeVec, l func(string, log.Logger)) {
|
||||||
|
removeStaleMetricsTicker := clock.NewTicker(time.Second)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-removeStaleMetricsTicker.C:
|
||||||
|
b.Registry.RemoveStaleMetrics()
|
||||||
|
case events, ok := <-e:
|
||||||
|
if !ok {
|
||||||
|
level.Debug(b.Logger).Log("msg", "Channel is closed. Break out of Exporter.Listener.")
|
||||||
|
removeStaleMetricsTicker.Stop()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, event := range events {
|
||||||
|
b.handleEvent(event, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount, l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleEvent processes a single Event according to the configured mapping.
|
||||||
|
func (b *Exporter) handleEvent(thisEvent event.Event, eventsActions prometheus.GaugeVec, eventsUnmapped prometheus.Gauge,
|
||||||
|
errorEventStats prometheus.GaugeVec, eventStats prometheus.GaugeVec, conflictingEventStats prometheus.GaugeVec, metricsCount prometheus.GaugeVec, l func(string, log.Logger)) {
|
||||||
|
|
||||||
|
mapping, labels, present := b.Mapper.GetMapping(thisEvent.MetricName(), thisEvent.MetricType())
|
||||||
|
if mapping == nil {
|
||||||
|
mapping = &mapper.MetricMapping{}
|
||||||
|
if b.Mapper.Defaults.Ttl != 0 {
|
||||||
|
mapping.Ttl = b.Mapper.Defaults.Ttl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if mapping.Action == mapper.ActionTypeDrop {
|
||||||
|
eventsActions.WithLabelValues("drop").Inc()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
metricName := ""
|
||||||
|
|
||||||
|
help := defaultHelp
|
||||||
|
if mapping.HelpText != "" {
|
||||||
|
help = mapping.HelpText
|
||||||
|
}
|
||||||
|
|
||||||
|
prometheusLabels := thisEvent.Labels()
|
||||||
|
if present {
|
||||||
|
if mapping.Name == "" {
|
||||||
|
level.Debug(b.Logger).Log("msg", "The mapping generates an empty metric name", "metric_name", thisEvent.MetricName(), "match", mapping.Match)
|
||||||
|
errorEventStats.WithLabelValues("empty_metric_name").Inc()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
metricName = mapper.EscapeMetricName(mapping.Name)
|
||||||
|
for label, value := range labels {
|
||||||
|
prometheusLabels[label] = value
|
||||||
|
}
|
||||||
|
eventsActions.WithLabelValues(string(mapping.Action)).Inc()
|
||||||
|
} else {
|
||||||
|
eventsUnmapped.Inc()
|
||||||
|
metricName = mapper.EscapeMetricName(thisEvent.MetricName())
|
||||||
|
}
|
||||||
|
|
||||||
|
switch ev := thisEvent.(type) {
|
||||||
|
case *event.CounterEvent:
|
||||||
|
// We don't accept negative values for counters. Incrementing the counter with a negative number
|
||||||
|
// will cause the exporter to panic. Instead we will warn and continue to the next event.
|
||||||
|
if thisEvent.Value() < 0.0 {
|
||||||
|
level.Debug(b.Logger).Log("msg", "counter must be non-negative value", "metric", metricName, "event_value", thisEvent.Value())
|
||||||
|
errorEventStats.WithLabelValues("illegal_negative_counter").Inc()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
counter, err := b.Registry.GetCounter(metricName, prometheusLabels, help, mapping, &metricsCount)
|
||||||
|
if err == nil {
|
||||||
|
counter.Add(thisEvent.Value())
|
||||||
|
eventStats.WithLabelValues("counter").Inc()
|
||||||
|
} else {
|
||||||
|
level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
||||||
|
conflictingEventStats.WithLabelValues("counter").Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
case *event.GaugeEvent:
|
||||||
|
gauge, err := b.Registry.GetGauge(metricName, prometheusLabels, help, mapping, &metricsCount)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
if ev.GRelative {
|
||||||
|
gauge.Add(thisEvent.Value())
|
||||||
|
} else {
|
||||||
|
gauge.Set(thisEvent.Value())
|
||||||
|
}
|
||||||
|
eventStats.WithLabelValues("gauge").Inc()
|
||||||
|
} else {
|
||||||
|
level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
||||||
|
conflictingEventStats.WithLabelValues("gauge").Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
case *event.TimerEvent:
|
||||||
|
t := mapper.TimerTypeDefault
|
||||||
|
if mapping != nil {
|
||||||
|
t = mapping.TimerType
|
||||||
|
}
|
||||||
|
if t == mapper.TimerTypeDefault {
|
||||||
|
t = b.Mapper.Defaults.TimerType
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t {
|
||||||
|
case mapper.TimerTypeHistogram:
|
||||||
|
histogram, err := b.Registry.GetHistogram(metricName, prometheusLabels, help, mapping, &metricsCount)
|
||||||
|
if err == nil {
|
||||||
|
histogram.Observe(thisEvent.Value() / 1000) // prometheus presumes seconds, statsd millisecond
|
||||||
|
eventStats.WithLabelValues("timer").Inc()
|
||||||
|
} else {
|
||||||
|
level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
||||||
|
conflictingEventStats.WithLabelValues("timer").Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
case mapper.TimerTypeDefault, mapper.TimerTypeSummary:
|
||||||
|
summary, err := b.Registry.GetSummary(metricName, prometheusLabels, help, mapping, &metricsCount)
|
||||||
|
if err == nil {
|
||||||
|
summary.Observe(thisEvent.Value() / 1000) // prometheus presumes seconds, statsd millisecond
|
||||||
|
eventStats.WithLabelValues("timer").Inc()
|
||||||
|
} else {
|
||||||
|
level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err)
|
||||||
|
conflictingEventStats.WithLabelValues("timer").Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
level.Error(b.Logger).Log("msg", "unknown timer type", "type", t)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
level.Debug(b.Logger).Log("msg", "Unsupported event type")
|
||||||
|
eventStats.WithLabelValues("illegal").Inc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewExporter(mapper *mapper.MetricMapper, logger log.Logger) *Exporter {
|
||||||
|
return &Exporter{
|
||||||
|
Mapper: mapper,
|
||||||
|
Registry: registry.NewRegistry(mapper),
|
||||||
|
Logger: logger,
|
||||||
|
}
|
||||||
|
}
|
241
pkg/line/line.go
Normal file
241
pkg/line/line.go
Normal file
|
@ -0,0 +1,241 @@
|
||||||
|
package line
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/event"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||||
|
)
|
||||||
|
|
||||||
|
func buildEvent(statType, metric string, value float64, relative bool, labels map[string]string) (event.Event, error) {
|
||||||
|
switch statType {
|
||||||
|
case "c":
|
||||||
|
return &event.CounterEvent{
|
||||||
|
CMetricName: metric,
|
||||||
|
CValue: float64(value),
|
||||||
|
CLabels: labels,
|
||||||
|
}, nil
|
||||||
|
case "g":
|
||||||
|
return &event.GaugeEvent{
|
||||||
|
GMetricName: metric,
|
||||||
|
GValue: float64(value),
|
||||||
|
GRelative: relative,
|
||||||
|
GLabels: labels,
|
||||||
|
}, nil
|
||||||
|
case "ms", "h", "d":
|
||||||
|
return &event.TimerEvent{
|
||||||
|
TMetricName: metric,
|
||||||
|
TValue: float64(value),
|
||||||
|
TLabels: labels,
|
||||||
|
}, nil
|
||||||
|
case "s":
|
||||||
|
return nil, fmt.Errorf("no support for StatsD sets")
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("bad stat type %s", statType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTag(component, tag string, separator rune, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) {
|
||||||
|
// Entirely empty tag is an error
|
||||||
|
if len(tag) == 0 {
|
||||||
|
tagErrors.Inc()
|
||||||
|
level.Debug(logger).Log("msg", "Empty name tag", "component", component)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, c := range tag {
|
||||||
|
if c == separator {
|
||||||
|
k := tag[:i]
|
||||||
|
v := tag[i+1:]
|
||||||
|
|
||||||
|
if len(k) == 0 || len(v) == 0 {
|
||||||
|
// Empty key or value is an error
|
||||||
|
tagErrors.Inc()
|
||||||
|
level.Debug(logger).Log("msg", "Malformed name tag", "k", k, "v", v, "component", component)
|
||||||
|
} else {
|
||||||
|
labels[mapper.EscapeMetricName(k)] = v
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Missing separator (no value) is an error
|
||||||
|
tagErrors.Inc()
|
||||||
|
level.Debug(logger).Log("msg", "Malformed name tag", "tag", tag, "component", component)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseNameTags(component string, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) {
|
||||||
|
lastTagEndIndex := 0
|
||||||
|
for i, c := range component {
|
||||||
|
if c == ',' {
|
||||||
|
tag := component[lastTagEndIndex:i]
|
||||||
|
lastTagEndIndex = i + 1
|
||||||
|
parseTag(component, tag, '=', labels, tagErrors, logger)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're not off the end of the string, add the last tag
|
||||||
|
if lastTagEndIndex < len(component) {
|
||||||
|
tag := component[lastTagEndIndex:]
|
||||||
|
parseTag(component, tag, '=', labels, tagErrors, logger)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func trimLeftHash(s string) string {
|
||||||
|
if s != "" && s[0] == '#' {
|
||||||
|
return s[1:]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseDogStatsDTags(component string, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) {
|
||||||
|
lastTagEndIndex := 0
|
||||||
|
for i, c := range component {
|
||||||
|
if c == ',' {
|
||||||
|
tag := component[lastTagEndIndex:i]
|
||||||
|
lastTagEndIndex = i + 1
|
||||||
|
parseTag(component, trimLeftHash(tag), ':', labels, tagErrors, logger)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're not off the end of the string, add the last tag
|
||||||
|
if lastTagEndIndex < len(component) {
|
||||||
|
tag := component[lastTagEndIndex:]
|
||||||
|
parseTag(component, trimLeftHash(tag), ':', labels, tagErrors, logger)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseNameAndTags(name string, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) string {
|
||||||
|
for i, c := range name {
|
||||||
|
// `#` delimits start of tags by Librato
|
||||||
|
// https://www.librato.com/docs/kb/collect/collection_agents/stastd/#stat-level-tags
|
||||||
|
// `,` delimits start of tags by InfluxDB
|
||||||
|
// https://www.influxdata.com/blog/getting-started-with-sending-statsd-metrics-to-telegraf-influxdb/#introducing-influx-statsd
|
||||||
|
if c == '#' || c == ',' {
|
||||||
|
parseNameTags(name[i+1:], labels, tagErrors, logger)
|
||||||
|
return name[:i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func LineToEvents(line string, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter, logger log.Logger) event.Events {
|
||||||
|
events := event.Events{}
|
||||||
|
if line == "" {
|
||||||
|
return events
|
||||||
|
}
|
||||||
|
|
||||||
|
elements := strings.SplitN(line, ":", 2)
|
||||||
|
if len(elements) < 2 || len(elements[0]) == 0 || !utf8.ValidString(line) {
|
||||||
|
sampleErrors.WithLabelValues("malformed_line").Inc()
|
||||||
|
level.Debug(logger).Log("msg", "Bad line from StatsD", "line", line)
|
||||||
|
return events
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := map[string]string{}
|
||||||
|
metric := parseNameAndTags(elements[0], labels, tagErrors, logger)
|
||||||
|
|
||||||
|
var samples []string
|
||||||
|
if strings.Contains(elements[1], "|#") {
|
||||||
|
// using DogStatsD tags
|
||||||
|
|
||||||
|
// don't allow mixed tagging styles
|
||||||
|
if len(labels) > 0 {
|
||||||
|
sampleErrors.WithLabelValues("mixed_tagging_styles").Inc()
|
||||||
|
level.Debug(logger).Log("msg", "Bad line (multiple tagging styles) from StatsD", "line", line)
|
||||||
|
return events
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable multi-metrics
|
||||||
|
samples = elements[1:]
|
||||||
|
} else {
|
||||||
|
samples = strings.Split(elements[1], ":")
|
||||||
|
}
|
||||||
|
|
||||||
|
samples:
|
||||||
|
for _, sample := range samples {
|
||||||
|
samplesReceived.Inc()
|
||||||
|
components := strings.Split(sample, "|")
|
||||||
|
samplingFactor := 1.0
|
||||||
|
if len(components) < 2 || len(components) > 4 {
|
||||||
|
sampleErrors.WithLabelValues("malformed_component").Inc()
|
||||||
|
level.Debug(logger).Log("msg", "Bad component", "line", line)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
valueStr, statType := components[0], components[1]
|
||||||
|
|
||||||
|
var relative = false
|
||||||
|
if strings.Index(valueStr, "+") == 0 || strings.Index(valueStr, "-") == 0 {
|
||||||
|
relative = true
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := strconv.ParseFloat(valueStr, 64)
|
||||||
|
if err != nil {
|
||||||
|
level.Debug(logger).Log("msg", "Bad value", "value", valueStr, "line", line)
|
||||||
|
sampleErrors.WithLabelValues("malformed_value").Inc()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
multiplyEvents := 1
|
||||||
|
if len(components) >= 3 {
|
||||||
|
for _, component := range components[2:] {
|
||||||
|
if len(component) == 0 {
|
||||||
|
level.Debug(logger).Log("msg", "Empty component", "line", line)
|
||||||
|
sampleErrors.WithLabelValues("malformed_component").Inc()
|
||||||
|
continue samples
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, component := range components[2:] {
|
||||||
|
switch component[0] {
|
||||||
|
case '@':
|
||||||
|
|
||||||
|
samplingFactor, err = strconv.ParseFloat(component[1:], 64)
|
||||||
|
if err != nil {
|
||||||
|
level.Debug(logger).Log("msg", "Invalid sampling factor", "component", component[1:], "line", line)
|
||||||
|
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
|
||||||
|
}
|
||||||
|
if samplingFactor == 0 {
|
||||||
|
samplingFactor = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if statType == "g" {
|
||||||
|
continue
|
||||||
|
} else if statType == "c" {
|
||||||
|
value /= samplingFactor
|
||||||
|
} else if statType == "ms" || statType == "h" || statType == "d" {
|
||||||
|
multiplyEvents = int(1 / samplingFactor)
|
||||||
|
}
|
||||||
|
case '#':
|
||||||
|
ParseDogStatsDTags(component[1:], labels, tagErrors, logger)
|
||||||
|
default:
|
||||||
|
level.Debug(logger).Log("msg", "Invalid sampling factor or tag section", "component", components[2], "line", line)
|
||||||
|
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(labels) > 0 {
|
||||||
|
tagsReceived.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < multiplyEvents; i++ {
|
||||||
|
event, err := buildEvent(statType, metric, value, relative, labels)
|
||||||
|
if err != nil {
|
||||||
|
level.Debug(logger).Log("msg", "Error building event", "line", line, "error", err)
|
||||||
|
sampleErrors.WithLabelValues("illegal_event").Inc()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
events = append(events, event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return events
|
||||||
|
}
|
241
pkg/line/line.go~
Normal file
241
pkg/line/line.go~
Normal file
|
@ -0,0 +1,241 @@
|
||||||
|
package line
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/event"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||||
|
)
|
||||||
|
|
||||||
|
func buildEvent(statType, metric string, value float64, relative bool, labels map[string]string) (event.Event, error) {
|
||||||
|
switch statType {
|
||||||
|
case "c":
|
||||||
|
return &event.CounterEvent{
|
||||||
|
CMetricName: metric,
|
||||||
|
CValue: float64(value),
|
||||||
|
CLabels: labels,
|
||||||
|
}, nil
|
||||||
|
case "g":
|
||||||
|
return &event.GaugeEvent{
|
||||||
|
GMetricName: metric,
|
||||||
|
GValue: float64(value),
|
||||||
|
GRelative: relative,
|
||||||
|
GLabels: labels,
|
||||||
|
}, nil
|
||||||
|
case "ms", "h", "d":
|
||||||
|
return &event.TimerEvent{
|
||||||
|
TMetricName: metric,
|
||||||
|
TValue: float64(value),
|
||||||
|
TLabels: labels,
|
||||||
|
}, nil
|
||||||
|
case "s":
|
||||||
|
return nil, fmt.Errorf("no support for StatsD sets")
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("bad stat type %s", statType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTag(component, tag string, separator rune, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) {
|
||||||
|
// Entirely empty tag is an error
|
||||||
|
if len(tag) == 0 {
|
||||||
|
tagErrors.Inc()
|
||||||
|
level.Debug(logger).Log("msg", "Empty name tag", "component", component)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, c := range tag {
|
||||||
|
if c == separator {
|
||||||
|
k := tag[:i]
|
||||||
|
v := tag[i+1:]
|
||||||
|
|
||||||
|
if len(k) == 0 || len(v) == 0 {
|
||||||
|
// Empty key or value is an error
|
||||||
|
tagErrors.Inc()
|
||||||
|
level.Debug(logger).Log("msg", "Malformed name tag", "k", k, "v", v, "component", component)
|
||||||
|
} else {
|
||||||
|
labels[mapper.EscapeMetricName(k)] = v
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Missing separator (no value) is an error
|
||||||
|
tagErrors.Inc()
|
||||||
|
level.Debug(logger).Log("msg", "Malformed name tag", "tag", tag, "component", component)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseNameTags(component string, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) {
|
||||||
|
lastTagEndIndex := 0
|
||||||
|
for i, c := range component {
|
||||||
|
if c == ',' {
|
||||||
|
tag := component[lastTagEndIndex:i]
|
||||||
|
lastTagEndIndex = i + 1
|
||||||
|
parseTag(component, tag, '=', labels, tagErrors, logger)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're not off the end of the string, add the last tag
|
||||||
|
if lastTagEndIndex < len(component) {
|
||||||
|
tag := component[lastTagEndIndex:]
|
||||||
|
parseTag(component, tag, '=', labels, tagErrors, logger)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func trimLeftHash(s string) string {
|
||||||
|
if s != "" && s[0] == '#' {
|
||||||
|
return s[1:]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDogStatsDTags(component string, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) {
|
||||||
|
lastTagEndIndex := 0
|
||||||
|
for i, c := range component {
|
||||||
|
if c == ',' {
|
||||||
|
tag := component[lastTagEndIndex:i]
|
||||||
|
lastTagEndIndex = i + 1
|
||||||
|
parseTag(component, trimLeftHash(tag), ':', labels, tagErrors, logger)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're not off the end of the string, add the last tag
|
||||||
|
if lastTagEndIndex < len(component) {
|
||||||
|
tag := component[lastTagEndIndex:]
|
||||||
|
parseTag(component, trimLeftHash(tag), ':', labels, tagErrors, logger)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseNameAndTags(name string, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) string {
|
||||||
|
for i, c := range name {
|
||||||
|
// `#` delimits start of tags by Librato
|
||||||
|
// https://www.librato.com/docs/kb/collect/collection_agents/stastd/#stat-level-tags
|
||||||
|
// `,` delimits start of tags by InfluxDB
|
||||||
|
// https://www.influxdata.com/blog/getting-started-with-sending-statsd-metrics-to-telegraf-influxdb/#introducing-influx-statsd
|
||||||
|
if c == '#' || c == ',' {
|
||||||
|
parseNameTags(name[i+1:], labels, tagErrors, logger)
|
||||||
|
return name[:i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func LineToEvents(line string, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter, logger log.Logger) event.Events {
|
||||||
|
events := event.Events{}
|
||||||
|
if line == "" {
|
||||||
|
return events
|
||||||
|
}
|
||||||
|
|
||||||
|
elements := strings.SplitN(line, ":", 2)
|
||||||
|
if len(elements) < 2 || len(elements[0]) == 0 || !utf8.ValidString(line) {
|
||||||
|
sampleErrors.WithLabelValues("malformed_line").Inc()
|
||||||
|
level.Debug(logger).Log("msg", "Bad line from StatsD", "line", line)
|
||||||
|
return events
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := map[string]string{}
|
||||||
|
metric := parseNameAndTags(elements[0], labels, tagErrors, logger)
|
||||||
|
|
||||||
|
var samples []string
|
||||||
|
if strings.Contains(elements[1], "|#") {
|
||||||
|
// using DogStatsD tags
|
||||||
|
|
||||||
|
// don't allow mixed tagging styles
|
||||||
|
if len(labels) > 0 {
|
||||||
|
sampleErrors.WithLabelValues("mixed_tagging_styles").Inc()
|
||||||
|
level.Debug(logger).Log("msg", "Bad line (multiple tagging styles) from StatsD", "line", line)
|
||||||
|
return events
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable multi-metrics
|
||||||
|
samples = elements[1:]
|
||||||
|
} else {
|
||||||
|
samples = strings.Split(elements[1], ":")
|
||||||
|
}
|
||||||
|
|
||||||
|
samples:
|
||||||
|
for _, sample := range samples {
|
||||||
|
samplesReceived.Inc()
|
||||||
|
components := strings.Split(sample, "|")
|
||||||
|
samplingFactor := 1.0
|
||||||
|
if len(components) < 2 || len(components) > 4 {
|
||||||
|
sampleErrors.WithLabelValues("malformed_component").Inc()
|
||||||
|
level.Debug(logger).Log("msg", "Bad component", "line", line)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
valueStr, statType := components[0], components[1]
|
||||||
|
|
||||||
|
var relative = false
|
||||||
|
if strings.Index(valueStr, "+") == 0 || strings.Index(valueStr, "-") == 0 {
|
||||||
|
relative = true
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := strconv.ParseFloat(valueStr, 64)
|
||||||
|
if err != nil {
|
||||||
|
level.Debug(logger).Log("msg", "Bad value", "value", valueStr, "line", line)
|
||||||
|
sampleErrors.WithLabelValues("malformed_value").Inc()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
multiplyEvents := 1
|
||||||
|
if len(components) >= 3 {
|
||||||
|
for _, component := range components[2:] {
|
||||||
|
if len(component) == 0 {
|
||||||
|
level.Debug(logger).Log("msg", "Empty component", "line", line)
|
||||||
|
sampleErrors.WithLabelValues("malformed_component").Inc()
|
||||||
|
continue samples
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, component := range components[2:] {
|
||||||
|
switch component[0] {
|
||||||
|
case '@':
|
||||||
|
|
||||||
|
samplingFactor, err = strconv.ParseFloat(component[1:], 64)
|
||||||
|
if err != nil {
|
||||||
|
level.Debug(logger).Log("msg", "Invalid sampling factor", "component", component[1:], "line", line)
|
||||||
|
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
|
||||||
|
}
|
||||||
|
if samplingFactor == 0 {
|
||||||
|
samplingFactor = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if statType == "g" {
|
||||||
|
continue
|
||||||
|
} else if statType == "c" {
|
||||||
|
value /= samplingFactor
|
||||||
|
} else if statType == "ms" || statType == "h" || statType == "d" {
|
||||||
|
multiplyEvents = int(1 / samplingFactor)
|
||||||
|
}
|
||||||
|
case '#':
|
||||||
|
parseDogStatsDTags(component[1:], labels, tagErrors, logger)
|
||||||
|
default:
|
||||||
|
level.Debug(logger).Log("msg", "Invalid sampling factor or tag section", "component", components[2], "line", line)
|
||||||
|
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(labels) > 0 {
|
||||||
|
tagsReceived.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < multiplyEvents; i++ {
|
||||||
|
event, err := buildEvent(statType, metric, value, relative, labels)
|
||||||
|
if err != nil {
|
||||||
|
level.Debug(logger).Log("msg", "Error building event", "line", line, "error", err)
|
||||||
|
sampleErrors.WithLabelValues("illegal_event").Inc()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
events = append(events, event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return events
|
||||||
|
}
|
138
pkg/listener/listener.go
Normal file
138
pkg/listener/listener.go
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
package listener
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/event"
|
||||||
|
pkgLine "github.com/prometheus/statsd_exporter/pkg/line"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StatsDUDPListener struct {
|
||||||
|
Conn *net.UDPConn
|
||||||
|
EventHandler event.EventHandler
|
||||||
|
Logger log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDUDPListener) SetEventHandler(eh event.EventHandler) {
|
||||||
|
l.EventHandler = eh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDUDPListener) Listen(udpPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
|
buf := make([]byte, 65535)
|
||||||
|
for {
|
||||||
|
n, _, err := l.Conn.ReadFromUDP(buf)
|
||||||
|
if err != nil {
|
||||||
|
// https://github.com/golang/go/issues/4373
|
||||||
|
// ignore net: errClosing error as it will occur during shutdown
|
||||||
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
level.Error(l.Logger).Log("error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.HandlePacket(buf[0:n], udpPackets, linesReceived, eventsFlushed, sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDUDPListener) HandlePacket(packet []byte, udpPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
|
udpPackets.Inc()
|
||||||
|
lines := strings.Split(string(packet), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
linesReceived.Inc()
|
||||||
|
l.EventHandler.Queue(pkgLine.LineToEvents(line, sampleErrors, samplesReceived, tagErrors, tagsReceived, l.Logger), &eventsFlushed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type StatsDTCPListener struct {
|
||||||
|
Conn *net.TCPListener
|
||||||
|
EventHandler event.EventHandler
|
||||||
|
Logger log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDTCPListener) SetEventHandler(eh event.EventHandler) {
|
||||||
|
l.EventHandler = eh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDTCPListener) Listen(linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, tcpConnections prometheus.Counter, tcpErrors prometheus.Counter, tcpLineTooLong prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
|
for {
|
||||||
|
c, err := l.Conn.AcceptTCP()
|
||||||
|
if err != nil {
|
||||||
|
// https://github.com/golang/go/issues/4373
|
||||||
|
// ignore net: errClosing error as it will occur during shutdown
|
||||||
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
level.Error(l.Logger).Log("msg", "AcceptTCP failed", "error", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
go l.HandleConn(c, linesReceived, eventsFlushed, tcpConnections, tcpErrors, tcpLineTooLong, sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDTCPListener) HandleConn(c *net.TCPConn, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, tcpConnections prometheus.Counter, tcpErrors prometheus.Counter, tcpLineTooLong prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
|
defer c.Close()
|
||||||
|
|
||||||
|
tcpConnections.Inc()
|
||||||
|
|
||||||
|
r := bufio.NewReader(c)
|
||||||
|
for {
|
||||||
|
line, isPrefix, err := r.ReadLine()
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
tcpErrors.Inc()
|
||||||
|
level.Debug(l.Logger).Log("msg", "Read failed", "addr", c.RemoteAddr(), "error", err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if isPrefix {
|
||||||
|
tcpLineTooLong.Inc()
|
||||||
|
level.Debug(l.Logger).Log("msg", "Read failed: line too long", "addr", c.RemoteAddr())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
linesReceived.Inc()
|
||||||
|
l.EventHandler.Queue(pkgLine.LineToEvents(string(line), sampleErrors, samplesReceived, tagErrors, tagsReceived, l.Logger), &eventsFlushed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type StatsDUnixgramListener struct {
|
||||||
|
Conn *net.UnixConn
|
||||||
|
EventHandler event.EventHandler
|
||||||
|
Logger log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDUnixgramListener) SetEventHandler(eh event.EventHandler) {
|
||||||
|
l.EventHandler = eh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDUnixgramListener) Listen(unixgramPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
|
buf := make([]byte, 65535)
|
||||||
|
for {
|
||||||
|
n, _, err := l.Conn.ReadFromUnix(buf)
|
||||||
|
if err != nil {
|
||||||
|
// https://github.com/golang/go/issues/4373
|
||||||
|
// ignore net: errClosing error as it will occur during shutdown
|
||||||
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
level.Error(l.Logger).Log(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
l.HandlePacket(buf[:n], unixgramPackets, linesReceived, eventsFlushed, sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDUnixgramListener) HandlePacket(packet []byte, unixgramPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
|
unixgramPackets.Inc()
|
||||||
|
lines := strings.Split(string(packet), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
linesReceived.Inc()
|
||||||
|
l.EventHandler.Queue(pkgLine.LineToEvents(line, sampleErrors, samplesReceived, tagErrors, tagsReceived, l.Logger), &eventsFlushed)
|
||||||
|
}
|
||||||
|
}
|
138
pkg/listener/listener.go~
Normal file
138
pkg/listener/listener.go~
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
package listener
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/event"
|
||||||
|
pkgLine "github.com/prometheus/statsd_exporter/pkg/line"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StatsDUDPListener struct {
|
||||||
|
Conn *net.UDPConn
|
||||||
|
EventHandler event.EventHandler
|
||||||
|
Logger log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDUDPListener) SetEventHandler(eh event.EventHandler) {
|
||||||
|
l.EventHandler = eh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDUDPListener) Listen(udpPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
|
buf := make([]byte, 65535)
|
||||||
|
for {
|
||||||
|
n, _, err := l.Conn.ReadFromUDP(buf)
|
||||||
|
if err != nil {
|
||||||
|
// https://github.com/golang/go/issues/4373
|
||||||
|
// ignore net: errClosing error as it will occur during shutdown
|
||||||
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
level.Error(l.Logger).Log("error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.handlePacket(buf[0:n], udpPackets, linesReceived, eventsFlushed, sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDUDPListener) handlePacket(packet []byte, udpPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
|
udpPackets.Inc()
|
||||||
|
lines := strings.Split(string(packet), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
linesReceived.Inc()
|
||||||
|
l.EventHandler.Queue(pkgLine.LineToEvents(line, sampleErrors, samplesReceived, tagErrors, tagsReceived, l.Logger), &eventsFlushed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type StatsDTCPListener struct {
|
||||||
|
Conn *net.TCPListener
|
||||||
|
EventHandler event.EventHandler
|
||||||
|
Logger log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDTCPListener) SetEventHandler(eh event.EventHandler) {
|
||||||
|
l.EventHandler = eh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDTCPListener) Listen(linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, tcpConnections prometheus.Counter, tcpErrors prometheus.Counter, tcpLineTooLong prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
|
for {
|
||||||
|
c, err := l.Conn.AcceptTCP()
|
||||||
|
if err != nil {
|
||||||
|
// https://github.com/golang/go/issues/4373
|
||||||
|
// ignore net: errClosing error as it will occur during shutdown
|
||||||
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
level.Error(l.Logger).Log("msg", "AcceptTCP failed", "error", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
go l.handleConn(c, linesReceived, eventsFlushed, tcpConnections, tcpErrors, tcpLineTooLong, sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDTCPListener) handleConn(c *net.TCPConn, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, tcpConnections prometheus.Counter, tcpErrors prometheus.Counter, tcpLineTooLong prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
|
defer c.Close()
|
||||||
|
|
||||||
|
tcpConnections.Inc()
|
||||||
|
|
||||||
|
r := bufio.NewReader(c)
|
||||||
|
for {
|
||||||
|
line, isPrefix, err := r.ReadLine()
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
tcpErrors.Inc()
|
||||||
|
level.Debug(l.Logger).Log("msg", "Read failed", "addr", c.RemoteAddr(), "error", err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if isPrefix {
|
||||||
|
tcpLineTooLong.Inc()
|
||||||
|
level.Debug(l.Logger).Log("msg", "Read failed: line too long", "addr", c.RemoteAddr())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
linesReceived.Inc()
|
||||||
|
l.EventHandler.Queue(pkgLine.LineToEvents(string(line), sampleErrors, samplesReceived, tagErrors, tagsReceived, l.Logger), &eventsFlushed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type StatsDUnixgramListener struct {
|
||||||
|
Conn *net.UnixConn
|
||||||
|
EventHandler event.EventHandler
|
||||||
|
Logger log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDUnixgramListener) SetEventHandler(eh event.EventHandler) {
|
||||||
|
l.EventHandler = eh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDUnixgramListener) Listen(unixgramPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
|
buf := make([]byte, 65535)
|
||||||
|
for {
|
||||||
|
n, _, err := l.Conn.ReadFromUnix(buf)
|
||||||
|
if err != nil {
|
||||||
|
// https://github.com/golang/go/issues/4373
|
||||||
|
// ignore net: errClosing error as it will occur during shutdown
|
||||||
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
level.Error(l.Logger).Log(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
l.handlePacket(buf[:n], unixgramPackets, linesReceived, eventsFlushed, sampleErrors, samplesReceived, tagErrors, tagsReceived)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *StatsDUnixgramListener) handlePacket(packet []byte, unixgramPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) {
|
||||||
|
unixgramPackets.Inc()
|
||||||
|
lines := strings.Split(string(packet), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
linesReceived.Inc()
|
||||||
|
l.EventHandler.Queue(pkgLine.LineToEvents(line, sampleErrors, samplesReceived, tagErrors, tagsReceived, l.Logger), &eventsFlushed)
|
||||||
|
}
|
||||||
|
}
|
54
pkg/metrics/metrics.go
Normal file
54
pkg/metrics/metrics.go
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MetricType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
CounterMetricType MetricType = iota
|
||||||
|
GaugeMetricType
|
||||||
|
SummaryMetricType
|
||||||
|
HistogramMetricType
|
||||||
|
)
|
||||||
|
|
||||||
|
type NameHash uint64
|
||||||
|
|
||||||
|
type ValueHash uint64
|
||||||
|
|
||||||
|
type LabelHash struct {
|
||||||
|
// This is a hash over the label names
|
||||||
|
Names NameHash
|
||||||
|
// This is a hash over the label names + label values
|
||||||
|
Values ValueHash
|
||||||
|
}
|
||||||
|
|
||||||
|
type MetricHolder interface{}
|
||||||
|
|
||||||
|
type VectorHolder interface {
|
||||||
|
Delete(label prometheus.Labels) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type Vector struct {
|
||||||
|
Holder VectorHolder
|
||||||
|
RefCount uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type Metric struct {
|
||||||
|
MetricType MetricType
|
||||||
|
// Vectors key is the hash of the label names
|
||||||
|
Vectors map[NameHash]*Vector
|
||||||
|
// Metrics key is a hash of the label names + label values
|
||||||
|
Metrics map[ValueHash]*RegisteredMetric
|
||||||
|
}
|
||||||
|
|
||||||
|
type RegisteredMetric struct {
|
||||||
|
LastRegisteredAt time.Time
|
||||||
|
Labels prometheus.Labels
|
||||||
|
TTL time.Duration
|
||||||
|
Metric MetricHolder
|
||||||
|
VecKey NameHash
|
||||||
|
}
|
42
pkg/metrics/metrics.go~
Normal file
42
pkg/metrics/metrics.go~
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type metricType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
CounterMetricType metricType = iota
|
||||||
|
GaugeMetricType
|
||||||
|
SummaryMetricType
|
||||||
|
HistogramMetricType
|
||||||
|
)
|
||||||
|
|
||||||
|
type nameHash uint64
|
||||||
|
type valueHash uint64
|
||||||
|
type labelHash struct {
|
||||||
|
// This is a hash over the label names
|
||||||
|
names nameHash
|
||||||
|
// This is a hash over the label names + label values
|
||||||
|
values valueHash
|
||||||
|
}
|
||||||
|
|
||||||
|
type metricHolder interface{}
|
||||||
|
|
||||||
|
type vectorHolder interface {
|
||||||
|
Delete(label prometheus.Labels) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type vector struct {
|
||||||
|
holder vectorHolder
|
||||||
|
refCount uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type metric struct {
|
||||||
|
metricType metricType
|
||||||
|
// Vectors key is the hash of the label names
|
||||||
|
vectors map[nameHash]*vector
|
||||||
|
// Metrics key is a hash of the label names + label values
|
||||||
|
metrics map[valueHash]*registeredMetric
|
||||||
|
}
|
370
pkg/registry/registry.go
Normal file
370
pkg/registry/registry.go
Normal file
|
@ -0,0 +1,370 @@
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"hash/fnv"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/clock"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/mapper"
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
// uncheckedCollector wraps a Collector but its Describe method yields no Desc.
|
||||||
|
// This allows incoming metrics to have inconsistent label sets
|
||||||
|
type uncheckedCollector struct {
|
||||||
|
c prometheus.Collector
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u uncheckedCollector) Describe(_ chan<- *prometheus.Desc) {}
|
||||||
|
func (u uncheckedCollector) Collect(c chan<- prometheus.Metric) {
|
||||||
|
u.c.Collect(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Registry struct {
|
||||||
|
Metrics map[string]metrics.Metric
|
||||||
|
Mapper *mapper.MetricMapper
|
||||||
|
// The below value and label variables are allocated in the registry struct
|
||||||
|
// so that we don't have to allocate them every time have to compute a label
|
||||||
|
// hash.
|
||||||
|
ValueBuf, NameBuf bytes.Buffer
|
||||||
|
Hasher hash.Hash64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRegistry(mapper *mapper.MetricMapper) *Registry {
|
||||||
|
return &Registry{
|
||||||
|
Metrics: make(map[string]metrics.Metric),
|
||||||
|
Mapper: mapper,
|
||||||
|
Hasher: fnv.New64a(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) MetricConflicts(metricName string, metricType metrics.MetricType) bool {
|
||||||
|
vector, hasMetrics := r.Metrics[metricName]
|
||||||
|
if !hasMetrics {
|
||||||
|
// No metrics.Metric with this name exists
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if vector.MetricType == metricType {
|
||||||
|
// We've found a copy of this metrics.Metric with this type, but different
|
||||||
|
// labels, so it's safe to create a new one.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// The metrics.Metric exists, but it's of a different type than we're trying to
|
||||||
|
// create.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) StoreCounter(metricName string, hash metrics.LabelHash, labels prometheus.Labels, vec *prometheus.CounterVec, c prometheus.Counter, ttl time.Duration) {
|
||||||
|
r.Store(metricName, hash, labels, vec, c, metrics.CounterMetricType, ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) StoreGauge(metricName string, hash metrics.LabelHash, labels prometheus.Labels, vec *prometheus.GaugeVec, g prometheus.Counter, ttl time.Duration) {
|
||||||
|
r.Store(metricName, hash, labels, vec, g, metrics.GaugeMetricType, ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) StoreHistogram(metricName string, hash metrics.LabelHash, labels prometheus.Labels, vec *prometheus.HistogramVec, o prometheus.Observer, ttl time.Duration) {
|
||||||
|
r.Store(metricName, hash, labels, vec, o, metrics.HistogramMetricType, ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) StoreSummary(metricName string, hash metrics.LabelHash, labels prometheus.Labels, vec *prometheus.SummaryVec, o prometheus.Observer, ttl time.Duration) {
|
||||||
|
r.Store(metricName, hash, labels, vec, o, metrics.SummaryMetricType, ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) Store(metricName string, hash metrics.LabelHash, labels prometheus.Labels, vh metrics.VectorHolder, mh metrics.MetricHolder, metricType metrics.MetricType, ttl time.Duration) {
|
||||||
|
metric, hasMetrics := r.Metrics[metricName]
|
||||||
|
if !hasMetrics {
|
||||||
|
metric.MetricType = metricType
|
||||||
|
metric.Vectors = make(map[metrics.NameHash]*metrics.Vector)
|
||||||
|
metric.Metrics = make(map[metrics.ValueHash]*metrics.RegisteredMetric)
|
||||||
|
|
||||||
|
r.Metrics[metricName] = metric
|
||||||
|
}
|
||||||
|
|
||||||
|
v, ok := metric.Vectors[hash.Names]
|
||||||
|
if !ok {
|
||||||
|
v = &metrics.Vector{Holder: vh}
|
||||||
|
metric.Vectors[hash.Names] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
rm, ok := metric.Metrics[hash.Values]
|
||||||
|
if !ok {
|
||||||
|
rm = &metrics.RegisteredMetric{
|
||||||
|
Labels: labels,
|
||||||
|
TTL: ttl,
|
||||||
|
Metric: mh,
|
||||||
|
VecKey: hash.Names,
|
||||||
|
}
|
||||||
|
metric.Metrics[hash.Values] = rm
|
||||||
|
v.RefCount++
|
||||||
|
}
|
||||||
|
now := clock.Now()
|
||||||
|
rm.LastRegisteredAt = now
|
||||||
|
// Update ttl from mapping
|
||||||
|
rm.TTL = ttl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) Get(metricName string, hash metrics.LabelHash, metricType metrics.MetricType) (metrics.VectorHolder, metrics.MetricHolder) {
|
||||||
|
metric, hasMetric := r.Metrics[metricName]
|
||||||
|
|
||||||
|
if !hasMetric {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if metric.MetricType != metricType {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rm, ok := metric.Metrics[hash.Values]
|
||||||
|
if ok {
|
||||||
|
now := clock.Now()
|
||||||
|
rm.LastRegisteredAt = now
|
||||||
|
return metric.Vectors[hash.Names].Holder, rm.Metric
|
||||||
|
}
|
||||||
|
|
||||||
|
vector, ok := metric.Vectors[hash.Names]
|
||||||
|
if ok {
|
||||||
|
return vector.Holder, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) GetCounter(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping, metricsCount *prometheus.GaugeVec) (prometheus.Counter, error) {
|
||||||
|
hash, labelNames := r.HashLabels(labels)
|
||||||
|
vh, mh := r.Get(metricName, hash, metrics.CounterMetricType)
|
||||||
|
if mh != nil {
|
||||||
|
return mh.(prometheus.Counter), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.MetricConflicts(metricName, metrics.CounterMetricType) {
|
||||||
|
return nil, fmt.Errorf("Metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var counterVec *prometheus.CounterVec
|
||||||
|
if vh == nil {
|
||||||
|
metricsCount.WithLabelValues("counter").Inc()
|
||||||
|
counterVec = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||||
|
Name: metricName,
|
||||||
|
Help: help,
|
||||||
|
}, labelNames)
|
||||||
|
|
||||||
|
if err := prometheus.Register(uncheckedCollector{counterVec}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
counterVec = vh.(*prometheus.CounterVec)
|
||||||
|
}
|
||||||
|
|
||||||
|
var counter prometheus.Counter
|
||||||
|
var err error
|
||||||
|
if counter, err = counterVec.GetMetricWith(labels); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.StoreCounter(metricName, hash, labels, counterVec, counter, mapping.Ttl)
|
||||||
|
|
||||||
|
return counter, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) GetGauge(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping, metricsCount *prometheus.GaugeVec) (prometheus.Gauge, error) {
|
||||||
|
hash, labelNames := r.HashLabels(labels)
|
||||||
|
vh, mh := r.Get(metricName, hash, metrics.GaugeMetricType)
|
||||||
|
if mh != nil {
|
||||||
|
return mh.(prometheus.Gauge), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.MetricConflicts(metricName, metrics.GaugeMetricType) {
|
||||||
|
return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var gaugeVec *prometheus.GaugeVec
|
||||||
|
if vh == nil {
|
||||||
|
metricsCount.WithLabelValues("gauge").Inc()
|
||||||
|
gaugeVec = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Name: metricName,
|
||||||
|
Help: help,
|
||||||
|
}, labelNames)
|
||||||
|
|
||||||
|
if err := prometheus.Register(uncheckedCollector{gaugeVec}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
gaugeVec = vh.(*prometheus.GaugeVec)
|
||||||
|
}
|
||||||
|
|
||||||
|
var gauge prometheus.Gauge
|
||||||
|
var err error
|
||||||
|
if gauge, err = gaugeVec.GetMetricWith(labels); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.StoreGauge(metricName, hash, labels, gaugeVec, gauge, mapping.Ttl)
|
||||||
|
|
||||||
|
return gauge, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) GetHistogram(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping, metricsCount *prometheus.GaugeVec) (prometheus.Observer, error) {
|
||||||
|
hash, labelNames := r.HashLabels(labels)
|
||||||
|
vh, mh := r.Get(metricName, hash, metrics.HistogramMetricType)
|
||||||
|
if mh != nil {
|
||||||
|
return mh.(prometheus.Observer), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.MetricConflicts(metricName, metrics.HistogramMetricType) {
|
||||||
|
return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
if r.MetricConflicts(metricName+"_sum", metrics.HistogramMetricType) {
|
||||||
|
return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
if r.MetricConflicts(metricName+"_count", metrics.HistogramMetricType) {
|
||||||
|
return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
if r.MetricConflicts(metricName+"_bucket", metrics.HistogramMetricType) {
|
||||||
|
return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var histogramVec *prometheus.HistogramVec
|
||||||
|
if vh == nil {
|
||||||
|
metricsCount.WithLabelValues("histogram").Inc()
|
||||||
|
buckets := r.Mapper.Defaults.Buckets
|
||||||
|
if mapping.HistogramOptions != nil && len(mapping.HistogramOptions.Buckets) > 0 {
|
||||||
|
buckets = mapping.HistogramOptions.Buckets
|
||||||
|
}
|
||||||
|
histogramVec = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||||
|
Name: metricName,
|
||||||
|
Help: help,
|
||||||
|
Buckets: buckets,
|
||||||
|
}, labelNames)
|
||||||
|
|
||||||
|
if err := prometheus.Register(uncheckedCollector{histogramVec}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
histogramVec = vh.(*prometheus.HistogramVec)
|
||||||
|
}
|
||||||
|
|
||||||
|
var observer prometheus.Observer
|
||||||
|
var err error
|
||||||
|
if observer, err = histogramVec.GetMetricWith(labels); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.StoreHistogram(metricName, hash, labels, histogramVec, observer, mapping.Ttl)
|
||||||
|
|
||||||
|
return observer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) GetSummary(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping, metricsCount *prometheus.GaugeVec) (prometheus.Observer, error) {
|
||||||
|
hash, labelNames := r.HashLabels(labels)
|
||||||
|
vh, mh := r.Get(metricName, hash, metrics.SummaryMetricType)
|
||||||
|
if mh != nil {
|
||||||
|
return mh.(prometheus.Observer), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.MetricConflicts(metricName, metrics.SummaryMetricType) {
|
||||||
|
return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
if r.MetricConflicts(metricName+"_sum", metrics.SummaryMetricType) {
|
||||||
|
return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
if r.MetricConflicts(metricName+"_count", metrics.SummaryMetricType) {
|
||||||
|
return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var summaryVec *prometheus.SummaryVec
|
||||||
|
if vh == nil {
|
||||||
|
metricsCount.WithLabelValues("summary").Inc()
|
||||||
|
quantiles := r.Mapper.Defaults.Quantiles
|
||||||
|
if mapping != nil && mapping.SummaryOptions != nil && len(mapping.SummaryOptions.Quantiles) > 0 {
|
||||||
|
quantiles = mapping.SummaryOptions.Quantiles
|
||||||
|
}
|
||||||
|
summaryOptions := mapper.SummaryOptions{}
|
||||||
|
if mapping != nil && mapping.SummaryOptions != nil {
|
||||||
|
summaryOptions = *mapping.SummaryOptions
|
||||||
|
}
|
||||||
|
objectives := make(map[float64]float64)
|
||||||
|
for _, q := range quantiles {
|
||||||
|
objectives[q.Quantile] = q.Error
|
||||||
|
}
|
||||||
|
// In the case of no mapping file, explicitly define the default quantiles
|
||||||
|
if len(objectives) == 0 {
|
||||||
|
objectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
||||||
|
}
|
||||||
|
summaryVec = prometheus.NewSummaryVec(prometheus.SummaryOpts{
|
||||||
|
Name: metricName,
|
||||||
|
Help: help,
|
||||||
|
Objectives: objectives,
|
||||||
|
MaxAge: summaryOptions.MaxAge,
|
||||||
|
AgeBuckets: summaryOptions.AgeBuckets,
|
||||||
|
BufCap: summaryOptions.BufCap,
|
||||||
|
}, labelNames)
|
||||||
|
|
||||||
|
if err := prometheus.Register(uncheckedCollector{summaryVec}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
summaryVec = vh.(*prometheus.SummaryVec)
|
||||||
|
}
|
||||||
|
|
||||||
|
var observer prometheus.Observer
|
||||||
|
var err error
|
||||||
|
if observer, err = summaryVec.GetMetricWith(labels); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.StoreSummary(metricName, hash, labels, summaryVec, observer, mapping.Ttl)
|
||||||
|
|
||||||
|
return observer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Registry) RemoveStaleMetrics() {
|
||||||
|
now := clock.Now()
|
||||||
|
// delete timeseries with expired ttl
|
||||||
|
for _, metric := range r.Metrics {
|
||||||
|
for hash, rm := range metric.Metrics {
|
||||||
|
if rm.TTL == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if rm.LastRegisteredAt.Add(rm.TTL).Before(now) {
|
||||||
|
metric.Vectors[rm.VecKey].Holder.Delete(rm.Labels)
|
||||||
|
metric.Vectors[rm.VecKey].RefCount--
|
||||||
|
delete(metric.Metrics, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculates a hash of both the label names and the label names and values.
|
||||||
|
func (r *Registry) HashLabels(labels prometheus.Labels) (metrics.LabelHash, []string) {
|
||||||
|
r.Hasher.Reset()
|
||||||
|
r.NameBuf.Reset()
|
||||||
|
r.ValueBuf.Reset()
|
||||||
|
labelNames := make([]string, 0, len(labels))
|
||||||
|
|
||||||
|
for labelName := range labels {
|
||||||
|
labelNames = append(labelNames, labelName)
|
||||||
|
}
|
||||||
|
sort.Strings(labelNames)
|
||||||
|
|
||||||
|
r.ValueBuf.WriteByte(model.SeparatorByte)
|
||||||
|
for _, labelName := range labelNames {
|
||||||
|
r.ValueBuf.WriteString(labels[labelName])
|
||||||
|
r.ValueBuf.WriteByte(model.SeparatorByte)
|
||||||
|
|
||||||
|
r.NameBuf.WriteString(labelName)
|
||||||
|
r.NameBuf.WriteByte(model.SeparatorByte)
|
||||||
|
}
|
||||||
|
|
||||||
|
lh := metrics.LabelHash{}
|
||||||
|
r.Hasher.Write(r.NameBuf.Bytes())
|
||||||
|
lh.Names = metrics.NameHash(r.Hasher.Sum64())
|
||||||
|
|
||||||
|
// Now add the values to the names we've already hashed.
|
||||||
|
r.Hasher.Write(r.ValueBuf.Bytes())
|
||||||
|
lh.Values = metrics.ValueHash(r.Hasher.Sum64())
|
||||||
|
|
||||||
|
return lh, labelNames
|
||||||
|
}
|
356
pkg/registry/registry.go~
Normal file
356
pkg/registry/registry.go~
Normal file
|
@ -0,0 +1,356 @@
|
||||||
|
package registry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/statsd_exporter/pkg/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RegisteredMetric struct {
|
||||||
|
lastRegisteredAt time.Time
|
||||||
|
labels prometheus.Labels
|
||||||
|
ttl time.Duration
|
||||||
|
metric metricHolder
|
||||||
|
vecKey nameHash
|
||||||
|
}
|
||||||
|
|
||||||
|
type Registry struct {
|
||||||
|
metrics map[string]metric
|
||||||
|
mapper *mapper.MetricMapper
|
||||||
|
// The below value and label variables are allocated in the registry struct
|
||||||
|
// so that we don't have to allocate them every time have to compute a label
|
||||||
|
// hash.
|
||||||
|
valueBuf, nameBuf bytes.Buffer
|
||||||
|
hasher hash.Hash64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRegistry(mapper *mapper.MetricMapper) *registry {
|
||||||
|
return ®istry{
|
||||||
|
metrics: make(map[string]metric),
|
||||||
|
mapper: mapper,
|
||||||
|
hasher: fnv.New64a(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *registry) MetricConflicts(metricName string, metricType metricType) bool {
|
||||||
|
vector, hasMetric := r.metrics[metricName]
|
||||||
|
if !hasMetric {
|
||||||
|
// No metric with this name exists
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if vector.metricType == metricType {
|
||||||
|
// We've found a copy of this metric with this type, but different
|
||||||
|
// labels, so it's safe to create a new one.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// The metric exists, but it's of a different type than we're trying to
|
||||||
|
// create.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *registry) StoreCounter(metricName string, hash labelHash, labels prometheus.Labels, vec *prometheus.CounterVec, c prometheus.Counter, ttl time.Duration) {
|
||||||
|
r.store(metricName, hash, labels, vec, c, CounterMetricType, ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *registry) StoreGauge(metricName string, hash labelHash, labels prometheus.Labels, vec *prometheus.GaugeVec, g prometheus.Counter, ttl time.Duration) {
|
||||||
|
r.store(metricName, hash, labels, vec, g, GaugeMetricType, ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *registry) StoreHistogram(metricName string, hash labelHash, labels prometheus.Labels, vec *prometheus.HistogramVec, o prometheus.Observer, ttl time.Duration) {
|
||||||
|
r.store(metricName, hash, labels, vec, o, HistogramMetricType, ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *registry) StoreSummary(metricName string, hash labelHash, labels prometheus.Labels, vec *prometheus.SummaryVec, o prometheus.Observer, ttl time.Duration) {
|
||||||
|
r.store(metricName, hash, labels, vec, o, SummaryMetricType, ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *registry) Store(metricName string, hash labelHash, labels prometheus.Labels, vh vectorHolder, mh metricHolder, metricType metricType, ttl time.Duration) {
|
||||||
|
metric, hasMetric := r.metrics[metricName]
|
||||||
|
if !hasMetric {
|
||||||
|
metric.metricType = metricType
|
||||||
|
metric.vectors = make(map[nameHash]*vector)
|
||||||
|
metric.metrics = make(map[valueHash]*registeredMetric)
|
||||||
|
|
||||||
|
r.metrics[metricName] = metric
|
||||||
|
}
|
||||||
|
|
||||||
|
v, ok := metric.vectors[hash.names]
|
||||||
|
if !ok {
|
||||||
|
v = &vector{holder: vh}
|
||||||
|
metric.vectors[hash.names] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
rm, ok := metric.metrics[hash.values]
|
||||||
|
if !ok {
|
||||||
|
rm = ®isteredMetric{
|
||||||
|
labels: labels,
|
||||||
|
ttl: ttl,
|
||||||
|
metric: mh,
|
||||||
|
vecKey: hash.names,
|
||||||
|
}
|
||||||
|
metric.metrics[hash.values] = rm
|
||||||
|
v.refCount++
|
||||||
|
}
|
||||||
|
now := clock.Now()
|
||||||
|
rm.lastRegisteredAt = now
|
||||||
|
// Update ttl from mapping
|
||||||
|
rm.ttl = ttl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *registry) Get(metricName string, hash labelHash, metricType metricType) (vectorHolder, metricHolder) {
|
||||||
|
metric, hasMetric := r.metrics[metricName]
|
||||||
|
|
||||||
|
if !hasMetric {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if metric.metricType != metricType {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rm, ok := metric.metrics[hash.values]
|
||||||
|
if ok {
|
||||||
|
now := clock.Now()
|
||||||
|
rm.lastRegisteredAt = now
|
||||||
|
return metric.vectors[hash.names].holder, rm.metric
|
||||||
|
}
|
||||||
|
|
||||||
|
vector, ok := metric.vectors[hash.names]
|
||||||
|
if ok {
|
||||||
|
return vector.holder, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *registry) GetCounter(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping) (prometheus.Counter, error) {
|
||||||
|
hash, labelNames := r.hashLabels(labels)
|
||||||
|
vh, mh := r.get(metricName, hash, CounterMetricType)
|
||||||
|
if mh != nil {
|
||||||
|
return mh.(prometheus.Counter), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.metricConflicts(metricName, CounterMetricType) {
|
||||||
|
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var counterVec *prometheus.CounterVec
|
||||||
|
if vh == nil {
|
||||||
|
metricsCount.WithLabelValues("counter").Inc()
|
||||||
|
counterVec = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||||
|
Name: metricName,
|
||||||
|
Help: help,
|
||||||
|
}, labelNames)
|
||||||
|
|
||||||
|
if err := prometheus.Register(uncheckedCollector{counterVec}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
counterVec = vh.(*prometheus.CounterVec)
|
||||||
|
}
|
||||||
|
|
||||||
|
var counter prometheus.Counter
|
||||||
|
var err error
|
||||||
|
if counter, err = counterVec.GetMetricWith(labels); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.storeCounter(metricName, hash, labels, counterVec, counter, mapping.Ttl)
|
||||||
|
|
||||||
|
return counter, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *registry) GetGauge(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping) (prometheus.Gauge, error) {
|
||||||
|
hash, labelNames := r.hashLabels(labels)
|
||||||
|
vh, mh := r.get(metricName, hash, GaugeMetricType)
|
||||||
|
if mh != nil {
|
||||||
|
return mh.(prometheus.Gauge), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.metricConflicts(metricName, GaugeMetricType) {
|
||||||
|
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var gaugeVec *prometheus.GaugeVec
|
||||||
|
if vh == nil {
|
||||||
|
metricsCount.WithLabelValues("gauge").Inc()
|
||||||
|
gaugeVec = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Name: metricName,
|
||||||
|
Help: help,
|
||||||
|
}, labelNames)
|
||||||
|
|
||||||
|
if err := prometheus.Register(uncheckedCollector{gaugeVec}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
gaugeVec = vh.(*prometheus.GaugeVec)
|
||||||
|
}
|
||||||
|
|
||||||
|
var gauge prometheus.Gauge
|
||||||
|
var err error
|
||||||
|
if gauge, err = gaugeVec.GetMetricWith(labels); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.storeGauge(metricName, hash, labels, gaugeVec, gauge, mapping.Ttl)
|
||||||
|
|
||||||
|
return gauge, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *registry) GetHistogram(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping) (prometheus.Observer, error) {
|
||||||
|
hash, labelNames := r.hashLabels(labels)
|
||||||
|
vh, mh := r.get(metricName, hash, HistogramMetricType)
|
||||||
|
if mh != nil {
|
||||||
|
return mh.(prometheus.Observer), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.metricConflicts(metricName, HistogramMetricType) {
|
||||||
|
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
if r.metricConflicts(metricName+"_sum", HistogramMetricType) {
|
||||||
|
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
if r.metricConflicts(metricName+"_count", HistogramMetricType) {
|
||||||
|
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
if r.metricConflicts(metricName+"_bucket", HistogramMetricType) {
|
||||||
|
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var histogramVec *prometheus.HistogramVec
|
||||||
|
if vh == nil {
|
||||||
|
metricsCount.WithLabelValues("histogram").Inc()
|
||||||
|
buckets := r.mapper.Defaults.Buckets
|
||||||
|
if mapping.HistogramOptions != nil && len(mapping.HistogramOptions.Buckets) > 0 {
|
||||||
|
buckets = mapping.HistogramOptions.Buckets
|
||||||
|
}
|
||||||
|
histogramVec = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||||
|
Name: metricName,
|
||||||
|
Help: help,
|
||||||
|
Buckets: buckets,
|
||||||
|
}, labelNames)
|
||||||
|
|
||||||
|
if err := prometheus.Register(uncheckedCollector{histogramVec}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
histogramVec = vh.(*prometheus.HistogramVec)
|
||||||
|
}
|
||||||
|
|
||||||
|
var observer prometheus.Observer
|
||||||
|
var err error
|
||||||
|
if observer, err = histogramVec.GetMetricWith(labels); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.storeHistogram(metricName, hash, labels, histogramVec, observer, mapping.Ttl)
|
||||||
|
|
||||||
|
return observer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *registry) GetSummary(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping) (prometheus.Observer, error) {
|
||||||
|
hash, labelNames := r.hashLabels(labels)
|
||||||
|
vh, mh := r.get(metricName, hash, SummaryMetricType)
|
||||||
|
if mh != nil {
|
||||||
|
return mh.(prometheus.Observer), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.metricConflicts(metricName, SummaryMetricType) {
|
||||||
|
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
if r.metricConflicts(metricName+"_sum", SummaryMetricType) {
|
||||||
|
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
if r.metricConflicts(metricName+"_count", SummaryMetricType) {
|
||||||
|
return nil, fmt.Errorf("metric with name %s is already registered", metricName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var summaryVec *prometheus.SummaryVec
|
||||||
|
if vh == nil {
|
||||||
|
metricsCount.WithLabelValues("summary").Inc()
|
||||||
|
quantiles := r.mapper.Defaults.Quantiles
|
||||||
|
if mapping != nil && mapping.SummaryOptions != nil && len(mapping.SummaryOptions.Quantiles) > 0 {
|
||||||
|
quantiles = mapping.SummaryOptions.Quantiles
|
||||||
|
}
|
||||||
|
summaryOptions := mapper.SummaryOptions{}
|
||||||
|
if mapping != nil && mapping.SummaryOptions != nil {
|
||||||
|
summaryOptions = *mapping.SummaryOptions
|
||||||
|
}
|
||||||
|
objectives := make(map[float64]float64)
|
||||||
|
for _, q := range quantiles {
|
||||||
|
objectives[q.Quantile] = q.Error
|
||||||
|
}
|
||||||
|
// In the case of no mapping file, explicitly define the default quantiles
|
||||||
|
if len(objectives) == 0 {
|
||||||
|
objectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
||||||
|
}
|
||||||
|
summaryVec = prometheus.NewSummaryVec(prometheus.SummaryOpts{
|
||||||
|
Name: metricName,
|
||||||
|
Help: help,
|
||||||
|
Objectives: objectives,
|
||||||
|
MaxAge: summaryOptions.MaxAge,
|
||||||
|
AgeBuckets: summaryOptions.AgeBuckets,
|
||||||
|
BufCap: summaryOptions.BufCap,
|
||||||
|
}, labelNames)
|
||||||
|
|
||||||
|
if err := prometheus.Register(uncheckedCollector{summaryVec}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
summaryVec = vh.(*prometheus.SummaryVec)
|
||||||
|
}
|
||||||
|
|
||||||
|
var observer prometheus.Observer
|
||||||
|
var err error
|
||||||
|
if observer, err = summaryVec.GetMetricWith(labels); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.storeSummary(metricName, hash, labels, summaryVec, observer, mapping.Ttl)
|
||||||
|
|
||||||
|
return observer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *registry) RemoveStaleMetrics() {
|
||||||
|
now := clock.Now()
|
||||||
|
// delete timeseries with expired ttl
|
||||||
|
for _, metric := range r.metrics {
|
||||||
|
for hash, rm := range metric.metrics {
|
||||||
|
if rm.ttl == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if rm.lastRegisteredAt.Add(rm.ttl).Before(now) {
|
||||||
|
metric.vectors[rm.vecKey].holder.Delete(rm.labels)
|
||||||
|
metric.vectors[rm.vecKey].refCount--
|
||||||
|
delete(metric.metrics, hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculates a hash of both the label names and the label names and values.
|
||||||
|
func (r *registry) HashLabels(labels prometheus.Labels) (labelHash, []string) {
|
||||||
|
r.hasher.Reset()
|
||||||
|
r.nameBuf.Reset()
|
||||||
|
r.valueBuf.Reset()
|
||||||
|
labelNames := make([]string, 0, len(labels))
|
||||||
|
|
||||||
|
for labelName := range labels {
|
||||||
|
labelNames = append(labelNames, labelName)
|
||||||
|
}
|
||||||
|
sort.Strings(labelNames)
|
||||||
|
|
||||||
|
r.valueBuf.WriteByte(model.SeparatorByte)
|
||||||
|
for _, labelName := range labelNames {
|
||||||
|
r.valueBuf.WriteString(labels[labelName])
|
||||||
|
r.valueBuf.WriteByte(model.SeparatorByte)
|
||||||
|
|
||||||
|
r.nameBuf.WriteString(labelName)
|
||||||
|
r.nameBuf.WriteByte(model.SeparatorByte)
|
||||||
|
}
|
||||||
|
|
||||||
|
lh := labelHash{}
|
||||||
|
r.hasher.Write(r.nameBuf.Bytes())
|
||||||
|
lh.names = nameHash(r.hasher.Sum64())
|
||||||
|
|
||||||
|
// Now add the values to the names we've already hashed.
|
||||||
|
r.hasher.Write(r.valueBuf.Bytes())
|
||||||
|
lh.values = valueHash(r.hasher.Sum64())
|
||||||
|
|
||||||
|
return lh, labelNames
|
||||||
|
}
|
53
pkg/util/util.go
Normal file
53
pkg/util/util.go
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
func IPPortFromString(addr string) (*net.IPAddr, int, error) {
|
||||||
|
host, portStr, err := net.SplitHostPort(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("bad StatsD listening address: %s", addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if host == "" {
|
||||||
|
host = "0.0.0.0"
|
||||||
|
}
|
||||||
|
ip, err := net.ResolveIPAddr("ip", host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("Unable to resolve %s: %s", host, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
port, err := strconv.Atoi(portStr)
|
||||||
|
if err != nil || port < 0 || port > 65535 {
|
||||||
|
return nil, 0, fmt.Errorf("Bad port %s: %s", portStr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ip, port, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func UDPAddrFromString(addr string) (*net.UDPAddr, error) {
|
||||||
|
ip, port, err := IPPortFromString(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &net.UDPAddr{
|
||||||
|
IP: ip.IP,
|
||||||
|
Port: port,
|
||||||
|
Zone: ip.Zone,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TCPAddrFromString(addr string) (*net.TCPAddr, error) {
|
||||||
|
ip, port, err := IPPortFromString(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &net.TCPAddr{
|
||||||
|
IP: ip.IP,
|
||||||
|
Port: port,
|
||||||
|
Zone: ip.Zone,
|
||||||
|
}, nil
|
||||||
|
}
|
53
pkg/util/util.go~
Normal file
53
pkg/util/util.go~
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
func IPPortFromString(addr string) (*net.IPAddr, int, error) {
|
||||||
|
host, portStr, err := net.SplitHostPort(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("bad StatsD listening address: %s", addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if host == "" {
|
||||||
|
host = "0.0.0.0"
|
||||||
|
}
|
||||||
|
ip, err := net.ResolveIPAddr("ip", host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("Unable to resolve %s: %s", host, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
port, err := strconv.Atoi(portStr)
|
||||||
|
if err != nil || port < 0 || port > 65535 {
|
||||||
|
return nil, 0, fmt.Errorf("Bad port %s: %s", portStr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ip, port, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func UDPAddrFromString(addr string) (*net.UDPAddr, error) {
|
||||||
|
ip, port, err := ipPortFromString(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &net.UDPAddr{
|
||||||
|
IP: ip.IP,
|
||||||
|
Port: port,
|
||||||
|
Zone: ip.Zone,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TCPAddrFromString(addr string) (*net.TCPAddr, error) {
|
||||||
|
ip, port, err := ipPortFromString(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &net.TCPAddr{
|
||||||
|
IP: ip.IP,
|
||||||
|
Port: port,
|
||||||
|
Zone: ip.Zone,
|
||||||
|
}, nil
|
||||||
|
}
|
BIN
statsd_exporter.exe
Normal file
BIN
statsd_exporter.exe
Normal file
Binary file not shown.
Loading…
Reference in a new issue