diff --git a/CHANGELOG.md b/CHANGELOG.md index ba6b224..73cbf95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,286 +1,286 @@ -## 0.15.0 / 2020-03-05 - -* [ENHANCEMENT] Allow setting granularity for summary metrics ([#290](https://github.com/prometheus/statsd_exporter/pull/290)) -* [ENHANCEMENT] Support a random-replacement cache invalidation strategy ([#281](https://github.com/prometheus/statsd_exporter/pull/281) - -To facilitate the expanded settings for summaries, the configuration format changes from - -```yaml -mappings: -- match: … - timer_type: summary - quantiles: - - quantile: 0.99 - error: 0.001 - - quantile: 0.95 - error: 0.01 - … -``` - -to - -```yaml -mappings: -- match: … - timer_type: summary - summary_options: - quantiles: - - quantile: 0.99 - error: 0.001 - - quantile: 0.95 - error: 0.01 - … - max_summary_age: 30s - summary_age_buckets: 3 - stream_buffer_size: 1000 - … -``` - -For consistency, the format for histogram buckets also changes from - -```yaml -mappings: -- match: … - timer_type: histogram - buckets: [ 0.01, 0.025, 0.05, 0.1 ] -``` - -to - -```yaml -mappings: -- match: … - timer_type: histogram - histogram_options: - buckets: [ 0.01, 0.025, 0.05, 0.1 ] -``` - -Transitionally, the old format will still work but is *deprecated*. The new -settings are optional. - -For users of the [mapper](https://pkg.go.dev/github.com/prometheus/statsd_exporter/pkg/mapper?tab=doc) -as a library, this is a breaking change. To adjust your code, replace -`mapping.Buckets` with `mapping.HistogramOptions.Buckets` and -`mapping.Quantiles` with `mapping.SummaryOptions.Quantiles`. - -## 0.14.1 / 2020-01-13 - -* [BUGFIX] Mapper cache poisoning when name is variable ([#286](https://github.com/prometheus/statsd_exporter/pull/286)) -* [BUGFIX] nil pointer dereference in UDP listener ([#287](https://github.com/prometheus/statsd_exporter/pull/287)) - -Thank you to everyone who reported these, and @bakins for the mapper cache fix! - -## 0.14.0 / 2020-01-10 - -* [CHANGE] Switch logging to go-kit ([#283](https://github.com/prometheus/statsd_exporter/pull/283)) -* [CHANGE] Rename existing metric for mapping cache size ([#284](https://github.com/prometheus/statsd_exporter/pull/284)) -* [ENHANCEMENT] Add metrics for mapping cache hits ([#280](https://github.com/prometheus/statsd_exporter/pull/280)) - -Logs are more structured now. The `fatal` log level no longer exists; use `--log.level=error` instead. The valid log formats are `logfmt` and `json`. - -The metric `statsd_exporter_cache_length` is now called `statsd_metric_mapper_cache_length`. - -## 0.13.0 / 2019-12-06 - -* [ENHANCEMENT] Support sampling factors for all statsd metric types ([#264](https://github.com/prometheus/statsd_exporter/issues/250)) -* [ENHANCEMENT] Support Librato and InfluxDB labeling formats ([#267](https://github.com/prometheus/statsd_exporter/pull/267)) - -## 0.12.2 / 2019-07-25 - -* [BUGFIX] Fix Unix socket handler ([#252](https://github.com/prometheus/statsd_exporter/pull/252)) -* [BUGFIX] Fix panic under high load ([#253](https://github.com/prometheus/statsd_exporter/pull/253)) - -Thank you to everyone who reported and helped debug these issues! - -## 0.12.1 / 2019-07-08 - -* [BUGFIX] Renew TTL when a metric receives updates ([#246](https://github.com/prometheus/statsd_exporter/pull/246)) -* [CHANGE] Reload on SIGHUP instead of watching the file ([#243](https://github.com/prometheus/statsd_exporter/pull/243)) - -## 0.11.2 / 2019-06-14 - -* [BUGFIX] Fix TCP handler ([#235](https://github.com/prometheus/statsd_exporter/pull/235)) - -## 0.11.1 / 2019-06-14 - -* [ENHANCEMENT] Batch event processing for improved ingestion performance ([#227](https://github.com/prometheus/statsd_exporter/pull/227)) -* [ENHANCEMENT] Switch Prometheus client to promhttp, freeing the standard HTTP metrics ([#233](https://github.com/prometheus/statsd_exporter/pull/233)) - -With #233, the exporter no longer exports metrics about its own HTTP status. These were not helpful since you could not get them when scraping fails. This allows mapping to metric names like `http_requests_total` that are useful as application metrics. - -## 0.10.6 / 2019-06-07 - -* [BUGFIX] Fix mapping collision for metrics with different types, but the same name ([#229](https://github.com/prometheus/statsd_exporter/pull/229)) - -## 0.10.5 / 2019-05-27 - -* [BUGFIX] Fix "Error: inconsistent label cardinality: expected 0 label values but got N in prometheus.Labels" ([#224](https://github.com/prometheus/statsd_exporter/pull/224)) - -## 0.10.4 / 2019-05-20 - -* [BUGFIX] Revert #218 due to a race condition ([#221](https://github.com/prometheus/statsd_exporter/pull/221)) - -## 0.10.3 / 2019-05-17 - -* [ENHANCEMENT] Reduce allocations when escaping metric names ([#217](https://github.com/prometheus/statsd_exporter/pull/217)) -* [ENHANCEMENT] Reduce allocations when handling packets ([#218](https://github.com/prometheus/statsd_exporter/pull/218)) -* [ENHANCEMENT] Optimize label sorting ([#219](https://github.com/prometheus/statsd_exporter/pull/219)) - -This release is entirely powered by @claytono. Kudos! - -## 0.10.2 / 2019-05-17 - -* [CHANGE] Do not run as root in the Docker container by default ([#202](https://github.com/prometheus/statsd_exporter/pull/202)) -* [FEATURE] Add metric for count of events by action ([#193](https://github.com/prometheus/statsd_exporter/pull/193)) -* [FEATURE] Add metric for count of distinct metric names ([#200](https://github.com/prometheus/statsd_exporter/pull/200)) -* [FEATURE] Add UNIX socket listener support ([#199](https://github.com/prometheus/statsd_exporter/pull/199)) -* [FEATURE] Accept Datadog [distributions](https://docs.datadoghq.com/graphing/metrics/distributions/) ([#211](https://github.com/prometheus/statsd_exporter/pull/211)) -* [ENHANCEMENT] Add a health check to the Docker container ([#182](https://github.com/prometheus/statsd_exporter/pull/182)) -* [ENHANCEMENT] Allow inconsistent label sets ([#194](https://github.com/prometheus/statsd_exporter/pull/194)) -* [ENHANCEMENT] Speed up sanitization of metric names ([#197](https://github.com/prometheus/statsd_exporter/pull/197)) -* [ENHANCEMENT] Enable pprof endpoints ([#205](https://github.com/prometheus/statsd_exporter/pull/205)) -* [ENHANCEMENT] DogStatsD tag parsing is faster ([#210](https://github.com/prometheus/statsd_exporter/pull/210)) -* [ENHANCEMENT] Cache mapped metrics ([#198](https://github.com/prometheus/statsd_exporter/pull/198)) -* [BUGFIX] Fix panic if a mapping resulted in an empty name ([#192](https://github.com/prometheus/statsd_exporter/pull/192)) -* [BUGFIX] Ensure that there are always default quantiles if using summaries ([#212](https://github.com/prometheus/statsd_exporter/pull/212)) -* [BUGFIX] Prevent ingesting conflicting metric types that would make scraping fail ([#213](https://github.com/prometheus/statsd_exporter/pull/213)) - -With #192, the count of events rejected because of negative counter increments has moved into the `statsd_exporter_events_error_total` metric, instead of being lumped in with the different kinds of successful events. - -## 0.9.0 / 2019-03-11 - -* [ENHANCEMENT] Update the Prometheus client library to 0.9.2 ([#171](https://github.com/prometheus/statsd_exporter/pull/171)) -* [FEATURE] Metrics can now be expired with a per-mapping TTL ([#164](https://github.com/prometheus/statsd_exporter/pull/164)) -* [CHANGE] Timers that mapped to a summary are scaled to seconds, just like histograms ([#178](https://github.com/prometheus/statsd_exporter/pull/178)) - -If you are using summaries, all your quantiles and `_total` will change by a factor of 1000. -Adjust your queries and dashboards, or consider switching to histograms altogether. - -## 0.8.1 / 2018-12-05 - -* [BUGFIX] Expose the counter for unmapped matches ([#161](https://github.com/prometheus/statsd_exporter/pull/161)) -* [BUGFIX] Unsuccessful backtracking does not clobber captures ([#169](https://github.com/prometheus/statsd_exporter/pull/169), fixes [#168](https://github.com/prometheus/statsd_exporter/issues/168)) - -## 0.8.0 / 2018-10-12 - -* [ENHANCEMENT] Speed up glob matching ([#157](https://github.com/prometheus/statsd_exporter/pull/157)) - -This release replaces the implementation of the glob matching mechanism, -speeding it up significantly. In certain sub-optimal configurations, a warning -is logged. - -This major enhancement was contributed by [Wangchong Zhou](https://github.com/fffonion). - -## 0.7.0 / 2018-08-22 - -This is a breaking release, but the migration is easy: command line flags now -require two dashes (`--help` instead of `-help`). The previous flag library -already accepts this form, so if necessary you can migrate the flags first -before upgrading. - -The deprecated `--statsd.listen-address` flag has been removed, use -`--statsd.listen-udp` instead. - -* [CHANGE] Switch to Kingpin for flags, fixes setting log level ([#141](https://github.com/prometheus/statsd_exporter/pull/141)) -* [ENHANCEMENT] Allow matching on specific metric types ([#136](https://github.com/prometheus/statsd_exporter/pulls/136)) -* [ENHANCEMENT] Summary quantiles can be configured ([#135](https://github.com/prometheus/statsd_exporter/pulls/135)) -* [BUGFIX] Fix panic if an invalid regular expression is supplied ([#126](https://github.com/prometheus/statsd_exporter/pulls/126)) - -## 0.6.0 / 2018-01-17 - -* [ENHANCEMENT] Add a drop action ([#115](https://github.com/prometheus/statsd_exporter/pulls/115)) -* [ENHANCEMENT] Allow templating metric names ([#117](https://github.com/prometheus/statsd_exporter/pulls/117)) - -## 0.5.0 / 2017-11-16 - -NOTE: This release breaks backward compatibility. `statsd_exporter` now uses -a YAML configuration file. You must convert your mappings configuration to -the new format when you upgrade. For example, the configuration - -``` -test.dispatcher.*.*.* -name="dispatcher_events_total" -processor="$1" -action="$2" -outcome="$3" -job="test_dispatcher" - -*.signup.*.* -name="signup_events_total" -provider="$2" -outcome="$3" -job="${1}_server" -``` - -now has the format - -```yaml -mappings: -- match: test.dispatcher.*.*.* - help: "The total number of events handled by the dispatcher." - name: "dispatcher_events_total" - labels: - processor: "$1" - action: "$2" - outcome: "$3" - job: "test_dispatcher" -- match: *.signup.*.* - name: "signup_events_total" - help: "The total number of signup events." - labels: - provider: "$2" - outcome: "$3" - job: "${1}_server" -``` - -The help field is optional. - -There is a [tool](https://github.com/bakins/statsd-exporter-convert) available to help with this conversion. - -* [CHANGE] Replace the overloaded "packets" metric ([#106](https://github.com/prometheus/statsd_exporter/pulls/106)) -* [CHANGE] Removed `-statsd.add-suffix` option flag [#99](https://github.com/prometheus/statsd_exporter/pulls/99). Users should remove - this flag when upgrading. Metrics will no longer automatically include the - suffixes `_timer` or `counter`. You may need to adjust any graphs that used - metrics with these suffixes. -* [CHANGE] Reduce log levels [#92](https://github.com/prometheus/statsd_exporter/pulls/92). Many log events have been changed from error - to debug log level. -* [CHANGE] Use YAML for configuration file [#66](https://github.com/prometheus/statsd_exporter/pulls/66). See note above about file format - conversion. -* [ENHANCEMENT] Allow help text to be customized [#87](https://github.com/prometheus/statsd_exporter/pulls/87) -* [ENHANCEMENT] Add support for regex mappers [#85](https://github.com/prometheus/statsd_exporter/pulls/85) -* [ENHANCEMENT] Add TCP listener support [#71](https://github.com/prometheus/statsd_exporter/pulls/71) -* [ENHANCEMENT] Allow histograms for timer metrics [#66](https://github.com/prometheus/statsd_exporter/pulls/66) -* [ENHANCEMENT] Added support for sampling factor on timing events [#28](https://github.com/prometheus/statsd_exporter/pulls/28) -* [BUGFIX] Conflicting label sets no longer crash the exporter and will be - ignored. Restart to clear the remembered label set. [#72](https://github.com/prometheus/statsd_exporter/pulls/72) - -## 0.4.0 / 2017-05-12 - -* [ENHANCEMENT] Improve mapping configuration parser [#61](https://github.com/prometheus/statsd_exporter/pulls/61) -* [ENHANCEMENT] Add increment/decrement support to Gauges [#65](https://github.com/prometheus/statsd_exporter/pulls/65) -* [BUGFIX] Tolerate more forms of broken lines from StatsD [#48](https://github.com/prometheus/statsd_exporter/pulls/48) -* [BUGFIX] Skip metrics with invalid utf8 [#50](https://github.com/prometheus/statsd_exporter/pulls/50) -* [BUGFIX] ListenAndServe now fails on exit [#58](https://github.com/prometheus/statsd_exporter/pulls/58) - -## 0.3.0 / 2016-05-05 - -* [CHANGE] Drop `_count` suffix for `loaded_mappings` metric ([#41](https://github.com/prometheus/statsd_exporter/pulls/41)) -* [ENHANCEMENT] Use common's log and version packages, and add -version flag ([#44](https://github.com/prometheus/statsd_exporter/pulls/44)) -* [ENHANCEMENT] Add flag to disable metric type suffixes ([#37](https://github.com/prometheus/statsd_exporter/pulls/37)) -* [BUGFIX] Increase receivable UDP datagram size to 65535 bytes ([#36](https://github.com/prometheus/statsd_exporter/pulls/36)) -* [BUGFIX] Warn, not panic when negative number counter is submitted ([#33](https://github.com/prometheus/statsd_exporter/pulls/33)) - -## 0.2.0 / 2016-03-19 - -NOTE: This release renames `statsd_bridge` to `statsd_exporter` - -* [CHANGE] New Dockerfile using alpine-golang-make-onbuild base image ([#17](https://github.com/prometheus/statsd_exporter/pulls/17)) -* [ENHANCEMENT] Allow configuration of UDP read buffer ([#22](https://github.com/prometheus/statsd_exporter/pulls/22)) -* [BUGFIX] allow metrics with dashes when mapping ([#24](https://github.com/prometheus/statsd_exporter/pulls/24)) -* [ENHANCEMENT] add root endpoint with redirect ([#25](https://github.com/prometheus/statsd_exporter/pulls/25)) -* [CHANGE] rename bridge to exporter ([#26](https://github.com/prometheus/statsd_exporter/pulls/26)) - - -## 0.1.0 / 2015-04-17 - -* Initial release +## 0.15.0 / 2020-03-05 + +* [ENHANCEMENT] Allow setting granularity for summary metrics ([#290](https://github.com/prometheus/statsd_exporter/pull/290)) +* [ENHANCEMENT] Support a random-replacement cache invalidation strategy ([#281](https://github.com/prometheus/statsd_exporter/pull/281) + +To facilitate the expanded settings for summaries, the configuration format changes from + +```yaml +mappings: +- match: … + timer_type: summary + quantiles: + - quantile: 0.99 + error: 0.001 + - quantile: 0.95 + error: 0.01 + … +``` + +to + +```yaml +mappings: +- match: … + timer_type: summary + summary_options: + quantiles: + - quantile: 0.99 + error: 0.001 + - quantile: 0.95 + error: 0.01 + … + max_summary_age: 30s + summary_age_buckets: 3 + stream_buffer_size: 1000 + … +``` + +For consistency, the format for histogram buckets also changes from + +```yaml +mappings: +- match: … + timer_type: histogram + buckets: [ 0.01, 0.025, 0.05, 0.1 ] +``` + +to + +```yaml +mappings: +- match: … + timer_type: histogram + histogram_options: + buckets: [ 0.01, 0.025, 0.05, 0.1 ] +``` + +Transitionally, the old format will still work but is *deprecated*. The new +settings are optional. + +For users of the [mapper](https://pkg.go.dev/github.com/prometheus/statsd_exporter/pkg/mapper?tab=doc) +as a library, this is a breaking change. To adjust your code, replace +`mapping.Buckets` with `mapping.HistogramOptions.Buckets` and +`mapping.Quantiles` with `mapping.SummaryOptions.Quantiles`. + +## 0.14.1 / 2020-01-13 + +* [BUGFIX] Mapper cache poisoning when name is variable ([#286](https://github.com/prometheus/statsd_exporter/pull/286)) +* [BUGFIX] nil pointer dereference in UDP listener ([#287](https://github.com/prometheus/statsd_exporter/pull/287)) + +Thank you to everyone who reported these, and @bakins for the mapper cache fix! + +## 0.14.0 / 2020-01-10 + +* [CHANGE] Switch logging to go-kit ([#283](https://github.com/prometheus/statsd_exporter/pull/283)) +* [CHANGE] Rename existing metric for mapping cache size ([#284](https://github.com/prometheus/statsd_exporter/pull/284)) +* [ENHANCEMENT] Add metrics for mapping cache hits ([#280](https://github.com/prometheus/statsd_exporter/pull/280)) + +Logs are more structured now. The `fatal` log level no longer exists; use `--log.level=error` instead. The valid log formats are `logfmt` and `json`. + +The metric `statsd_exporter_cache_length` is now called `statsd_metric_mapper_cache_length`. + +## 0.13.0 / 2019-12-06 + +* [ENHANCEMENT] Support sampling factors for all statsd metric types ([#264](https://github.com/prometheus/statsd_exporter/issues/250)) +* [ENHANCEMENT] Support Librato and InfluxDB labeling formats ([#267](https://github.com/prometheus/statsd_exporter/pull/267)) + +## 0.12.2 / 2019-07-25 + +* [BUGFIX] Fix Unix socket handler ([#252](https://github.com/prometheus/statsd_exporter/pull/252)) +* [BUGFIX] Fix panic under high load ([#253](https://github.com/prometheus/statsd_exporter/pull/253)) + +Thank you to everyone who reported and helped debug these issues! + +## 0.12.1 / 2019-07-08 + +* [BUGFIX] Renew TTL when a metric receives updates ([#246](https://github.com/prometheus/statsd_exporter/pull/246)) +* [CHANGE] Reload on SIGHUP instead of watching the file ([#243](https://github.com/prometheus/statsd_exporter/pull/243)) + +## 0.11.2 / 2019-06-14 + +* [BUGFIX] Fix TCP handler ([#235](https://github.com/prometheus/statsd_exporter/pull/235)) + +## 0.11.1 / 2019-06-14 + +* [ENHANCEMENT] Batch event processing for improved ingestion performance ([#227](https://github.com/prometheus/statsd_exporter/pull/227)) +* [ENHANCEMENT] Switch Prometheus client to promhttp, freeing the standard HTTP metrics ([#233](https://github.com/prometheus/statsd_exporter/pull/233)) + +With #233, the exporter no longer exports metrics about its own HTTP status. These were not helpful since you could not get them when scraping fails. This allows mapping to metric names like `http_requests_total` that are useful as application metrics. + +## 0.10.6 / 2019-06-07 + +* [BUGFIX] Fix mapping collision for metrics with different types, but the same name ([#229](https://github.com/prometheus/statsd_exporter/pull/229)) + +## 0.10.5 / 2019-05-27 + +* [BUGFIX] Fix "Error: inconsistent label cardinality: expected 0 label values but got N in prometheus.Labels" ([#224](https://github.com/prometheus/statsd_exporter/pull/224)) + +## 0.10.4 / 2019-05-20 + +* [BUGFIX] Revert #218 due to a race condition ([#221](https://github.com/prometheus/statsd_exporter/pull/221)) + +## 0.10.3 / 2019-05-17 + +* [ENHANCEMENT] Reduce allocations when escaping metric names ([#217](https://github.com/prometheus/statsd_exporter/pull/217)) +* [ENHANCEMENT] Reduce allocations when handling packets ([#218](https://github.com/prometheus/statsd_exporter/pull/218)) +* [ENHANCEMENT] Optimize label sorting ([#219](https://github.com/prometheus/statsd_exporter/pull/219)) + +This release is entirely powered by @claytono. Kudos! + +## 0.10.2 / 2019-05-17 + +* [CHANGE] Do not run as root in the Docker container by default ([#202](https://github.com/prometheus/statsd_exporter/pull/202)) +* [FEATURE] Add metric for count of events by action ([#193](https://github.com/prometheus/statsd_exporter/pull/193)) +* [FEATURE] Add metric for count of distinct metric names ([#200](https://github.com/prometheus/statsd_exporter/pull/200)) +* [FEATURE] Add UNIX socket listener support ([#199](https://github.com/prometheus/statsd_exporter/pull/199)) +* [FEATURE] Accept Datadog [distributions](https://docs.datadoghq.com/graphing/metrics/distributions/) ([#211](https://github.com/prometheus/statsd_exporter/pull/211)) +* [ENHANCEMENT] Add a health check to the Docker container ([#182](https://github.com/prometheus/statsd_exporter/pull/182)) +* [ENHANCEMENT] Allow inconsistent label sets ([#194](https://github.com/prometheus/statsd_exporter/pull/194)) +* [ENHANCEMENT] Speed up sanitization of metric names ([#197](https://github.com/prometheus/statsd_exporter/pull/197)) +* [ENHANCEMENT] Enable pprof endpoints ([#205](https://github.com/prometheus/statsd_exporter/pull/205)) +* [ENHANCEMENT] DogStatsD tag parsing is faster ([#210](https://github.com/prometheus/statsd_exporter/pull/210)) +* [ENHANCEMENT] Cache mapped metrics ([#198](https://github.com/prometheus/statsd_exporter/pull/198)) +* [BUGFIX] Fix panic if a mapping resulted in an empty name ([#192](https://github.com/prometheus/statsd_exporter/pull/192)) +* [BUGFIX] Ensure that there are always default quantiles if using summaries ([#212](https://github.com/prometheus/statsd_exporter/pull/212)) +* [BUGFIX] Prevent ingesting conflicting metric types that would make scraping fail ([#213](https://github.com/prometheus/statsd_exporter/pull/213)) + +With #192, the count of events rejected because of negative counter increments has moved into the `statsd_exporter_events_error_total` metric, instead of being lumped in with the different kinds of successful events. + +## 0.9.0 / 2019-03-11 + +* [ENHANCEMENT] Update the Prometheus client library to 0.9.2 ([#171](https://github.com/prometheus/statsd_exporter/pull/171)) +* [FEATURE] Metrics can now be expired with a per-mapping TTL ([#164](https://github.com/prometheus/statsd_exporter/pull/164)) +* [CHANGE] Timers that mapped to a summary are scaled to seconds, just like histograms ([#178](https://github.com/prometheus/statsd_exporter/pull/178)) + +If you are using summaries, all your quantiles and `_total` will change by a factor of 1000. +Adjust your queries and dashboards, or consider switching to histograms altogether. + +## 0.8.1 / 2018-12-05 + +* [BUGFIX] Expose the counter for unmapped matches ([#161](https://github.com/prometheus/statsd_exporter/pull/161)) +* [BUGFIX] Unsuccessful backtracking does not clobber captures ([#169](https://github.com/prometheus/statsd_exporter/pull/169), fixes [#168](https://github.com/prometheus/statsd_exporter/issues/168)) + +## 0.8.0 / 2018-10-12 + +* [ENHANCEMENT] Speed up glob matching ([#157](https://github.com/prometheus/statsd_exporter/pull/157)) + +This release replaces the implementation of the glob matching mechanism, +speeding it up significantly. In certain sub-optimal configurations, a warning +is logged. + +This major enhancement was contributed by [Wangchong Zhou](https://github.com/fffonion). + +## 0.7.0 / 2018-08-22 + +This is a breaking release, but the migration is easy: command line flags now +require two dashes (`--help` instead of `-help`). The previous flag library +already accepts this form, so if necessary you can migrate the flags first +before upgrading. + +The deprecated `--statsd.listen-address` flag has been removed, use +`--statsd.listen-udp` instead. + +* [CHANGE] Switch to Kingpin for flags, fixes setting log level ([#141](https://github.com/prometheus/statsd_exporter/pull/141)) +* [ENHANCEMENT] Allow matching on specific metric types ([#136](https://github.com/prometheus/statsd_exporter/pulls/136)) +* [ENHANCEMENT] Summary quantiles can be configured ([#135](https://github.com/prometheus/statsd_exporter/pulls/135)) +* [BUGFIX] Fix panic if an invalid regular expression is supplied ([#126](https://github.com/prometheus/statsd_exporter/pulls/126)) + +## 0.6.0 / 2018-01-17 + +* [ENHANCEMENT] Add a drop action ([#115](https://github.com/prometheus/statsd_exporter/pulls/115)) +* [ENHANCEMENT] Allow templating metric names ([#117](https://github.com/prometheus/statsd_exporter/pulls/117)) + +## 0.5.0 / 2017-11-16 + +NOTE: This release breaks backward compatibility. `statsd_exporter` now uses +a YAML configuration file. You must convert your mappings configuration to +the new format when you upgrade. For example, the configuration + +``` +test.dispatcher.*.*.* +name="dispatcher_events_total" +processor="$1" +action="$2" +outcome="$3" +job="test_dispatcher" + +*.signup.*.* +name="signup_events_total" +provider="$2" +outcome="$3" +job="${1}_server" +``` + +now has the format + +```yaml +mappings: +- match: test.dispatcher.*.*.* + help: "The total number of events handled by the dispatcher." + name: "dispatcher_events_total" + labels: + processor: "$1" + action: "$2" + outcome: "$3" + job: "test_dispatcher" +- match: *.signup.*.* + name: "signup_events_total" + help: "The total number of signup events." + labels: + provider: "$2" + outcome: "$3" + job: "${1}_server" +``` + +The help field is optional. + +There is a [tool](https://github.com/bakins/statsd-exporter-convert) available to help with this conversion. + +* [CHANGE] Replace the overloaded "packets" metric ([#106](https://github.com/prometheus/statsd_exporter/pulls/106)) +* [CHANGE] Removed `-statsd.add-suffix` option flag [#99](https://github.com/prometheus/statsd_exporter/pulls/99). Users should remove + this flag when upgrading. Metrics will no longer automatically include the + suffixes `_timer` or `counter`. You may need to adjust any graphs that used + metrics with these suffixes. +* [CHANGE] Reduce log levels [#92](https://github.com/prometheus/statsd_exporter/pulls/92). Many log events have been changed from error + to debug log level. +* [CHANGE] Use YAML for configuration file [#66](https://github.com/prometheus/statsd_exporter/pulls/66). See note above about file format + conversion. +* [ENHANCEMENT] Allow help text to be customized [#87](https://github.com/prometheus/statsd_exporter/pulls/87) +* [ENHANCEMENT] Add support for regex mappers [#85](https://github.com/prometheus/statsd_exporter/pulls/85) +* [ENHANCEMENT] Add TCP listener support [#71](https://github.com/prometheus/statsd_exporter/pulls/71) +* [ENHANCEMENT] Allow histograms for timer metrics [#66](https://github.com/prometheus/statsd_exporter/pulls/66) +* [ENHANCEMENT] Added support for sampling factor on timing events [#28](https://github.com/prometheus/statsd_exporter/pulls/28) +* [BUGFIX] Conflicting label sets no longer crash the exporter and will be + ignored. Restart to clear the remembered label set. [#72](https://github.com/prometheus/statsd_exporter/pulls/72) + +## 0.4.0 / 2017-05-12 + +* [ENHANCEMENT] Improve mapping configuration parser [#61](https://github.com/prometheus/statsd_exporter/pulls/61) +* [ENHANCEMENT] Add increment/decrement support to Gauges [#65](https://github.com/prometheus/statsd_exporter/pulls/65) +* [BUGFIX] Tolerate more forms of broken lines from StatsD [#48](https://github.com/prometheus/statsd_exporter/pulls/48) +* [BUGFIX] Skip metrics with invalid utf8 [#50](https://github.com/prometheus/statsd_exporter/pulls/50) +* [BUGFIX] ListenAndServe now fails on exit [#58](https://github.com/prometheus/statsd_exporter/pulls/58) + +## 0.3.0 / 2016-05-05 + +* [CHANGE] Drop `_count` suffix for `loaded_mappings` metric ([#41](https://github.com/prometheus/statsd_exporter/pulls/41)) +* [ENHANCEMENT] Use common's log and version packages, and add -version flag ([#44](https://github.com/prometheus/statsd_exporter/pulls/44)) +* [ENHANCEMENT] Add flag to disable metric type suffixes ([#37](https://github.com/prometheus/statsd_exporter/pulls/37)) +* [BUGFIX] Increase receivable UDP datagram size to 65535 bytes ([#36](https://github.com/prometheus/statsd_exporter/pulls/36)) +* [BUGFIX] Warn, not panic when negative number counter is submitted ([#33](https://github.com/prometheus/statsd_exporter/pulls/33)) + +## 0.2.0 / 2016-03-19 + +NOTE: This release renames `statsd_bridge` to `statsd_exporter` + +* [CHANGE] New Dockerfile using alpine-golang-make-onbuild base image ([#17](https://github.com/prometheus/statsd_exporter/pulls/17)) +* [ENHANCEMENT] Allow configuration of UDP read buffer ([#22](https://github.com/prometheus/statsd_exporter/pulls/22)) +* [BUGFIX] allow metrics with dashes when mapping ([#24](https://github.com/prometheus/statsd_exporter/pulls/24)) +* [ENHANCEMENT] add root endpoint with redirect ([#25](https://github.com/prometheus/statsd_exporter/pulls/25)) +* [CHANGE] rename bridge to exporter ([#26](https://github.com/prometheus/statsd_exporter/pulls/26)) + + +## 0.1.0 / 2015-04-17 + +* Initial release diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 40503ed..b9aa654 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,18 +1,18 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull request, - addressing (with `@...`) the maintainer of this repository (see - [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/Dockerfile b/Dockerfile index f4a1910..9a7c52c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,13 +1,13 @@ -ARG ARCH="amd64" -ARG OS="linux" -FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest -LABEL maintainer="The Prometheus Authors " - -ARG ARCH="amd64" -ARG OS="linux" -COPY .build/${OS}-${ARCH}/statsd_exporter /bin/statsd_exporter - -USER nobody -EXPOSE 9102 9125 9125/udp -HEALTHCHECK CMD wget --spider -S "http://localhost:9102/metrics" -T 60 2>&1 || exit 1 -ENTRYPOINT [ "/bin/statsd_exporter" ] +ARG ARCH="amd64" +ARG OS="linux" +FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest +LABEL maintainer="The Prometheus Authors " + +ARG ARCH="amd64" +ARG OS="linux" +COPY .build/${OS}-${ARCH}/statsd_exporter /bin/statsd_exporter + +USER nobody +EXPOSE 9102 9125 9125/udp +HEALTHCHECK CMD wget --spider -S "http://localhost:9102/metrics" -T 60 2>&1 || exit 1 +ENTRYPOINT [ "/bin/statsd_exporter" ] diff --git a/LICENSE b/LICENSE index 261eeb9..29f81d8 100644 --- a/LICENSE +++ b/LICENSE @@ -1,201 +1,201 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 51ab4d8..4d2dc99 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -1 +1 @@ -* Matthias Rampke +* Matthias Rampke diff --git a/Makefile b/Makefile index 62f00e2..4d3552f 100644 --- a/Makefile +++ b/Makefile @@ -1,28 +1,28 @@ -# Copyright 2013 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Needs to be defined before including Makefile.common to auto-generate targets -DOCKER_ARCHS ?= amd64 armv7 arm64 - -include Makefile.common - -STATICCHECK_IGNORE = - -DOCKER_IMAGE_NAME ?= statsd-exporter - -.PHONY: bench -bench: - @echo ">> running all benchmarks" - $(GO) test -bench . -race $(pkgs) - -all: bench +# Copyright 2013 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Needs to be defined before including Makefile.common to auto-generate targets +DOCKER_ARCHS ?= amd64 armv7 arm64 + +include Makefile.common + +STATICCHECK_IGNORE = + +DOCKER_IMAGE_NAME ?= statsd-exporter + +.PHONY: bench +bench: + @echo ">> running all benchmarks" + $(GO) test -bench . -race $(pkgs) + +all: bench diff --git a/Makefile.common b/Makefile.common index b978dfc..ad022a6 100644 --- a/Makefile.common +++ b/Makefile.common @@ -1,289 +1,289 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# A common Makefile that includes rules to be reused in different prometheus projects. -# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! - -# Example usage : -# Create the main Makefile in the root project directory. -# include Makefile.common -# customTarget: -# @echo ">> Running customTarget" -# - -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -GOOPTS ?= -GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) -GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) - -GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) -PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') - -GOVENDOR := -GO111MODULE := -ifeq (, $(PRE_GO_111)) - ifneq (,$(wildcard go.mod)) - # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). - GO111MODULE := on - - ifneq (,$(wildcard vendor)) - # Always use the local vendor/ directory to satisfy the dependencies. - GOOPTS := $(GOOPTS) -mod=vendor - endif - endif -else - ifneq (,$(wildcard go.mod)) - ifneq (,$(wildcard vendor)) -$(warning This repository requires Go >= 1.11 because of Go modules) -$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') - endif - else - # This repository isn't using Go modules (yet). - GOVENDOR := $(FIRST_GOPATH)/bin/govendor - endif -endif -PROMU := $(FIRST_GOPATH)/bin/promu -pkgs = ./... - -ifeq (arm, $(GOHOSTARCH)) - GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) -else - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) -endif - -GOTEST := $(GO) test -GOTEST_DIR := -ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) - GOTEST_DIR := test-results - GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- -endif -endif - -PROMU_VERSION ?= 0.5.0 -PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz - -GOLANGCI_LINT := -GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.18.0 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. -# windows isn't included here because of the path separator being different. -ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - endif -endif - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) -DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKERFILE_PATH ?= ./Dockerfile -DOCKERBUILD_CONTEXT ?= ./ -DOCKER_REPO ?= prom - -DOCKER_ARCHS ?= amd64 - -BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) -PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) -TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) - -ifeq ($(GOHOSTARCH),amd64) - ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) - # Only supported on amd64 - test-flags := -race - endif -endif - -# This rule is used to forward a target like "build" to "common-build". This -# allows a new "build" target to be defined in a Makefile which includes this -# one and override "common-build" without override warnings. -%: common-% ; - -.PHONY: common-all -common-all: precheck style check_license lint unused build test - -.PHONY: common-style -common-style: - @echo ">> checking code style" - @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ - if [ -n "$${fmtRes}" ]; then \ - echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ - echo "Please ensure you are using $$($(GO) version) for formatting code."; \ - exit 1; \ - fi - -.PHONY: common-check_license -common-check_license: - @echo ">> checking license header" - @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ - awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -.PHONY: common-deps -common-deps: - @echo ">> getting dependencies" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(GO) mod download -else - $(GO) get $(GOOPTS) -t ./... -endif - -.PHONY: common-test-short -common-test-short: $(GOTEST_DIR) - @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) - -.PHONY: common-test -common-test: $(GOTEST_DIR) - @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) - -$(GOTEST_DIR): - @mkdir -p $@ - -.PHONY: common-format -common-format: - @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) - -.PHONY: common-vet -common-vet: - @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) - -.PHONY: common-lint -common-lint: $(GOLANGCI_LINT) -ifdef GOLANGCI_LINT - @echo ">> running golangci-lint" -ifdef GO111MODULE -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -else - $(GOLANGCI_LINT) run $(pkgs) -endif -endif - -# For backward-compatibility. -.PHONY: common-staticcheck -common-staticcheck: lint - -.PHONY: common-unused -common-unused: $(GOVENDOR) -ifdef GOVENDOR - @echo ">> running check for unused packages" - @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' -else -ifdef GO111MODULE - @echo ">> running check for unused/missing packages in go.mod" - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifeq (,$(wildcard vendor)) - @git diff --exit-code -- go.sum go.mod -else - @echo ">> running check for unused packages in vendor/" - GO111MODULE=$(GO111MODULE) $(GO) mod vendor - @git diff --exit-code -- go.sum go.mod vendor/ -endif -endif -endif - -.PHONY: common-build -common-build: promu - @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) - -.PHONY: common-tarball -common-tarball: promu - @echo ">> building release tarball" - $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) - -.PHONY: common-docker $(BUILD_DOCKER_ARCHS) -common-docker: $(BUILD_DOCKER_ARCHS) -$(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ - -f $(DOCKERFILE_PATH) \ - --build-arg ARCH="$*" \ - --build-arg OS="linux" \ - $(DOCKERBUILD_CONTEXT) - -.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) -common-docker-publish: $(PUBLISH_DOCKER_ARCHS) -$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" - -.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) -common-docker-tag-latest: $(TAG_DOCKER_ARCHS) -$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - -.PHONY: common-docker-manifest -common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" - -.PHONY: promu -promu: $(PROMU) - -$(PROMU): - $(eval PROMU_TMP := $(shell mktemp -d)) - curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) - mkdir -p $(FIRST_GOPATH)/bin - cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu - rm -r $(PROMU_TMP) - -.PHONY: proto -proto: - @echo ">> generating code from proto files" - @./scripts/genproto.sh - -ifdef GOLANGCI_LINT -$(GOLANGCI_LINT): - mkdir -p $(FIRST_GOPATH)/bin - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ - | sed -e '/install -d/d' \ - | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) -endif - -ifdef GOVENDOR -.PHONY: $(GOVENDOR) -$(GOVENDOR): - GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor -endif - -.PHONY: precheck -precheck:: - -define PRECHECK_COMMAND_template = -precheck:: $(1)_precheck - -PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) -.PHONY: $(1)_precheck -$(1)_precheck: - @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ - echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ - exit 1; \ - fi -endef +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOOPTS ?= +GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) +GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) + +GO_VERSION ?= $(shell $(GO) version) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') + +GOVENDOR := +GO111MODULE := +ifeq (, $(PRE_GO_111)) + ifneq (,$(wildcard go.mod)) + # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). + GO111MODULE := on + + ifneq (,$(wildcard vendor)) + # Always use the local vendor/ directory to satisfy the dependencies. + GOOPTS := $(GOOPTS) -mod=vendor + endif + endif +else + ifneq (,$(wildcard go.mod)) + ifneq (,$(wildcard vendor)) +$(warning This repository requires Go >= 1.11 because of Go modules) +$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') + endif + else + # This repository isn't using Go modules (yet). + GOVENDOR := $(FIRST_GOPATH)/bin/govendor + endif +endif +PROMU := $(FIRST_GOPATH)/bin/promu +pkgs = ./... + +ifeq (arm, $(GOHOSTARCH)) + GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) +else + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) +endif + +GOTEST := $(GO) test +GOTEST_DIR := +ifneq ($(CIRCLE_JOB),) +ifneq ($(shell which gotestsum),) + GOTEST_DIR := test-results + GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- +endif +endif + +PROMU_VERSION ?= 0.5.0 +PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz + +GOLANGCI_LINT := +GOLANGCI_LINT_OPTS ?= +GOLANGCI_LINT_VERSION ?= v1.18.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +# windows isn't included here because of the path separator being different. +ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif +endif + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKERFILE_PATH ?= ./Dockerfile +DOCKERBUILD_CONTEXT ?= ./ +DOCKER_REPO ?= prom + +DOCKER_ARCHS ?= amd64 + +BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) +PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) +TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) + +ifeq ($(GOHOSTARCH),amd64) + ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) + # Only supported on amd64 + test-flags := -race + endif +endif + +# This rule is used to forward a target like "build" to "common-build". This +# allows a new "build" target to be defined in a Makefile which includes this +# one and override "common-build" without override warnings. +%: common-% ; + +.PHONY: common-all +common-all: precheck style check_license lint unused build test + +.PHONY: common-style +common-style: + @echo ">> checking code style" + @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ + if [ -n "$${fmtRes}" ]; then \ + echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ + echo "Please ensure you are using $$($(GO) version) for formatting code."; \ + exit 1; \ + fi + +.PHONY: common-check_license +common-check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: common-deps +common-deps: + @echo ">> getting dependencies" +ifdef GO111MODULE + GO111MODULE=$(GO111MODULE) $(GO) mod download +else + $(GO) get $(GOOPTS) -t ./... +endif + +.PHONY: common-test-short +common-test-short: $(GOTEST_DIR) + @echo ">> running short tests" + GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) + +.PHONY: common-test +common-test: $(GOTEST_DIR) + @echo ">> running all tests" + GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + +$(GOTEST_DIR): + @mkdir -p $@ + +.PHONY: common-format +common-format: + @echo ">> formatting code" + GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) + +.PHONY: common-vet +common-vet: + @echo ">> vetting code" + GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + +.PHONY: common-lint +common-lint: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint" +ifdef GO111MODULE +# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. +# Otherwise staticcheck might fail randomly for some reason not yet explained. + GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) +else + $(GOLANGCI_LINT) run $(pkgs) +endif +endif + +# For backward-compatibility. +.PHONY: common-staticcheck +common-staticcheck: lint + +.PHONY: common-unused +common-unused: $(GOVENDOR) +ifdef GOVENDOR + @echo ">> running check for unused packages" + @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' +else +ifdef GO111MODULE + @echo ">> running check for unused/missing packages in go.mod" + GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifeq (,$(wildcard vendor)) + @git diff --exit-code -- go.sum go.mod +else + @echo ">> running check for unused packages in vendor/" + GO111MODULE=$(GO111MODULE) $(GO) mod vendor + @git diff --exit-code -- go.sum go.mod vendor/ +endif +endif +endif + +.PHONY: common-build +common-build: promu + @echo ">> building binaries" + GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + +.PHONY: common-tarball +common-tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +.PHONY: common-docker $(BUILD_DOCKER_ARCHS) +common-docker: $(BUILD_DOCKER_ARCHS) +$(BUILD_DOCKER_ARCHS): common-docker-%: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + -f $(DOCKERFILE_PATH) \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + $(DOCKERBUILD_CONTEXT) + +.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) +common-docker-publish: $(PUBLISH_DOCKER_ARCHS) +$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + +.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) +common-docker-tag-latest: $(TAG_DOCKER_ARCHS) +$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + +.PHONY: common-docker-manifest +common-docker-manifest: + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + +.PHONY: promu +promu: $(PROMU) + +$(PROMU): + $(eval PROMU_TMP := $(shell mktemp -d)) + curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) + mkdir -p $(FIRST_GOPATH)/bin + cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu + rm -r $(PROMU_TMP) + +.PHONY: proto +proto: + @echo ">> generating code from proto files" + @./scripts/genproto.sh + +ifdef GOLANGCI_LINT +$(GOLANGCI_LINT): + mkdir -p $(FIRST_GOPATH)/bin + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ + | sed -e '/install -d/d' \ + | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) +endif + +ifdef GOVENDOR +.PHONY: $(GOVENDOR) +$(GOVENDOR): + GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor +endif + +.PHONY: precheck +precheck:: + +define PRECHECK_COMMAND_template = +precheck:: $(1)_precheck + +PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) +.PHONY: $(1)_precheck +$(1)_precheck: + @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ + echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ + exit 1; \ + fi +endef diff --git a/NOTICE b/NOTICE index 33179a9..092b9ab 100644 --- a/NOTICE +++ b/NOTICE @@ -1,5 +1,5 @@ -StatsD-to-Prometheus exporter -Copyright 2013-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). +StatsD-to-Prometheus exporter +Copyright 2013-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/README.md b/README.md index bdadbcd..ffca02c 100644 --- a/README.md +++ b/README.md @@ -1,474 +1,474 @@ -# statsd exporter [![Build Status](https://travis-ci.org/prometheus/statsd_exporter.svg)][travis] - -[![CircleCI](https://circleci.com/gh/prometheus/statsd_exporter/tree/master.svg?style=shield)][circleci] -[![Docker Repository on Quay](https://quay.io/repository/prometheus/statsd-exporter/status)][quay] -[![Docker Pulls](https://img.shields.io/docker/pulls/prom/statsd-exporter.svg)][hub] - -`statsd_exporter` receives StatsD-style metrics and exports them as Prometheus metrics. - -## Overview - -### With StatsD - -To pipe metrics from an existing StatsD environment into Prometheus, configure -StatsD's repeater backend to repeat all received metrics to a `statsd_exporter` -process. This exporter translates StatsD metrics to Prometheus metrics via -configured mapping rules. - - +----------+ +-------------------+ +--------------+ - | StatsD |---(UDP/TCP repeater)--->| statsd_exporter |<---(scrape /metrics)---| Prometheus | - +----------+ +-------------------+ +--------------+ - -### Without StatsD - -Since the StatsD exporter uses the same line protocol as StatsD itself, you can -also configure your applications to send StatsD metrics directly to the exporter. -In that case, you don't need to run a StatsD server anymore. - -We recommend this only as an intermediate solution and recommend switching to -[native Prometheus instrumentation](http://prometheus.io/docs/instrumenting/clientlibs/) -in the long term. - -### Tagging Extensions - -The exporter supports Librato, InfluxDB, and DogStatsD-style tags, -which will be converted into Prometheus labels. - -For Librato-style tags, they must be appended to the metric name with a -delimiting `#`, as so: - -``` -metric.name#tagName=val,tag2Name=val2:0|c -``` - -See the [statsd-librato-backend README](https://github.com/librato/statsd-librato-backend#tags) -for a more complete description. - -For InfluxDB-style tags, they must be appended to the metric name with a -delimiting comma, as so: - -``` -metric.name,tagName=val,tag2Name=val2:0|c -``` - -See [this InfluxDB blog post](https://www.influxdata.com/blog/getting-started-with-sending-statsd-metrics-to-telegraf-influxdb/#introducing-influx-statsd) -for a larger overview. - - -For DogStatsD-style tags, they're appended as a `|#` delimited section at the -end of the metric, as so: - -``` -metric.name:0|c|#tagName:val,tag2Name:val2 -``` - -See [Tags](https://docs.datadoghq.com/developers/dogstatsd/data_types/#tagging) -in the DogStatsD documentation for the concept description and -[Datagram Format](https://docs.datadoghq.com/developers/dogstatsd/datagram_shell/). -If you encounter problems, note that this tagging style is incompatible with -the original `statsd` implementation. - -Be aware: If you mix tag styles (e.g., Librato/InfluxDB with DogStatsD), the -exporter will consider this an error and the sample will be discarded. Also, -tags without values (`#some_tag`) are not supported and will be ignored. - -## Building and Running - -NOTE: Version 0.7.0 switched to the [kingpin](https://github.com/alecthomas/kingpin) flags library. With this change, flag behaviour is POSIX-ish: - -* long flags start with two dashes (`--version`) -* multiple short flags can be combined (but there currently is only one) -* flag processing stops at the first `--` - - ``` - $ go build - $ ./statsd_exporter --help - usage: statsd_exporter [] - - Flags: - -h, --help Show context-sensitive help (also try --help-long and --help-man). - --web.listen-address=":9102" - The address on which to expose the web interface and generated Prometheus metrics. - --web.telemetry-path="/metrics" - Path under which to expose metrics. - --statsd.listen-udp=":9125" - The UDP address on which to receive statsd metric lines. "" disables it. - --statsd.listen-tcp=":9125" - The TCP address on which to receive statsd metric lines. "" disables it. - --statsd.listen-unixgram="" - The Unixgram socket path to receive statsd metric lines in datagram. "" disables it. - --statsd.unixsocket-mode="755" - The permission mode of the unix socket. - --statsd.mapping-config=STATSD.MAPPING-CONFIG - Metric mapping configuration file name. - --statsd.read-buffer=STATSD.READ-BUFFER - Size (in bytes) of the operating system's transmit read buffer associated with the UDP or Unixgram connection. Please make sure the kernel parameters net.core.rmem_max is set to - a value greater than the value specified. - --statsd.cache-size=1000 Maximum size of your metric mapping cache. Relies on least recently used replacement policy if max size is reached. - --statsd.event-queue-size=10000 - Size of internal queue for processing events - --statsd.event-flush-threshold=1000 - Number of events to hold in queue before flushing - --statsd.event-flush-interval=200ms - Number of events to hold in queue before flushing - --debug.dump-fsm="" The path to dump internal FSM generated for glob matching as Dot file. - --log.level="info" Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal] - --log.format="logger:stderr" - Set the log target and format. Example: "logger:syslog?appname=bob& local=7" or "logger:stdout?json=true" - --version Show application version. - - ``` - -## Tests - - $ go test - -## Metric Mapping and Configuration - -The `statsd_exporter` can be configured to translate specific dot-separated StatsD -metrics into labeled Prometheus metrics via a simple mapping language. The config -file is reloaded on SIGHUP. - -A mapping definition starts with a line matching the StatsD metric in question, -with `*`s acting as wildcards for each dot-separated metric component. The -lines following the matching expression must contain one `label="value"` pair -each, and at least define the metric name (label name `name`). The Prometheus -metric is then constructed from these labels. `$n`-style references in the -label value are replaced by the n-th wildcard match in the matching line, -starting at 1. Multiple matching definitions are separated by one or more empty -lines. The first mapping rule that matches a StatsD metric wins. - -Metrics that don't match any mapping in the configuration file are translated -into Prometheus metrics without any labels and with any non-alphanumeric -characters, including periods, translated into underscores. - -In general, the different metric types are translated as follows: - - StatsD gauge -> Prometheus gauge - - StatsD counter -> Prometheus counter - - StatsD timer -> Prometheus summary <-- indicates timer quantiles - -> Prometheus counter (suffix `_total`) <-- indicates total time spent - -> Prometheus counter (suffix `_count`) <-- indicates total number of timer events - -An example mapping configuration: - -```yaml -mappings: -- match: "test.dispatcher.*.*.*" - name: "dispatcher_events_total" - labels: - processor: "$1" - action: "$2" - outcome: "$3" - job: "test_dispatcher" -- match: "*.signup.*.*" - name: "signup_events_total" - labels: - provider: "$2" - outcome: "$3" - job: "${1}_server" -``` - -This would transform these example StatsD metrics into Prometheus metrics as -follows: - - test.dispatcher.FooProcessor.send.success - => dispatcher_events_total{processor="FooProcessor", action="send", outcome="success", job="test_dispatcher"} - - foo_product.signup.facebook.failure - => signup_events_total{provider="facebook", outcome="failure", job="foo_product_server"} - - test.web-server.foo.bar - => test_web_server_foo_bar{} - -Each mapping in the configuration file must define a `name` for the metric. The -metric's name can contain `$n`-style references to be replaced by the n-th -wildcard match in the matching line. That allows for dynamic rewrites, such as: - -```yaml -mappings: -- match: "test.*.*.counter" - name: "${2}_total" - labels: - provider: "$1" -``` - -The metric name can also contain references to regex matches. The mapping above -could be written as: - -```yaml -mappings: -- match: "test\\.(\\w+)\\.(\\w+)\\.counter" - match_type: regex - name: "${2}_total" - labels: - provider: "$1" -``` - -Be aware about yaml escape rules as a mapping like the following one will not work. -```yaml -mappings: -- match: "test\.(\w+)\.(\w+)\.counter" - match_type: regex - name: "${2}_total" - labels: - provider: "$1" -``` - -Please note that metrics with the same name must also have the same set of -label names. - -If the default metric help text is insufficient for your needs you may use the YAML -configuration to specify a custom help text for each mapping: - -```yaml -mappings: -- match: "http.request.*" - help: "Total number of http requests" - name: "http_requests_total" - labels: - code: "$1" -``` - -### StatsD timers - -By default, statsd timers are represented as a Prometheus summary with -quantiles. You may optionally configure the [quantiles and acceptable -error](https://prometheus.io/docs/practices/histograms/#quantiles), as -well as adjusting how the summary metric is aggregated: - -```yaml -mappings: -- match: "test.timing.*.*.*" - timer_type: summary - name: "my_timer" - labels: - provider: "$2" - outcome: "$3" - job: "${1}_server" - summary_options: - quantiles: - - quantile: 0.99 - error: 0.001 - - quantile: 0.95 - error: 0.01 - - quantile: 0.9 - error: 0.05 - - quantile: 0.5 - error: 0.005 - max_summary_age: 30s - summary_age_buckets: 3 - stream_buffer_size: 1000 -``` - -The default quantiles are 0.99, 0.9, and 0.5. - -The default summary age is 10 minutes, the default number of buckets -is 5 and the default buffer size is 500. See also the -[`golang_client` docs](https://godoc.org/github.com/prometheus/client_golang/prometheus#SummaryOpts). -The `max_summary_age` corresponds to `SummaryOptions.MaxAge`, `summary_age_buckets` -to `SummaryOptions.AgeBuckets` and `stream_buffer_size` to `SummaryOptions.BufCap`. - -In the configuration, one may also set the timer type to "histogram". The -default is "summary" as in the plain text configuration format. For example, -to set the timer type for a single metric: - -```yaml -mappings: -- match: "test.timing.*.*.*" - timer_type: histogram - histogram_options: - buckets: [ 0.01, 0.025, 0.05, 0.1 ] - name: "my_timer" - labels: - provider: "$2" - outcome: "$3" - job: "${1}_server" -``` - -Note that timers will be accepted with the `ms`, `h`, and `d` statsd types. The first two are timers and histograms and the `d` type is for DataDog's "distribution" type. The distribution type is treated identically to timers and histograms. - -It should be noted that whereas timers in statsd expects the unit of timing data to be in milliseconds, -prometheus expects the unit to be seconds. Hence, the exporter converts all timers to seconds -before exporting them. - -### DogStatsD Client Behavior - -#### `timed()` decorator - -If you are using the DogStatsD client's [timed](https://datadogpy.readthedocs.io/en/latest/#datadog.threadstats.base.ThreadStats.timed) decorator, -it emits the metric in seconds, set [use_ms](https://datadogpy.readthedocs.io/en/latest/index.html?highlight=use_ms) to `True` to fix this. - -### Regular expression matching - -Another capability when using YAML configuration is the ability to define matches -using raw regular expressions as opposed to the default globbing style of match. -This may allow for pulling structured data from otherwise poorly named statsd -metrics AND allow for more precise targetting of match rules. When no `match_type` -paramter is specified the default value of `glob` will be assumed: - -```yaml -mappings: -- match: "(.*)\.(.*)--(.*)\.status\.(.*)\.count" - match_type: regex - name: "request_total" - labels: - hostname: "$1" - exec: "$2" - protocol: "$3" - code: "$4" -``` - -Note, that one may also set the histogram buckets. If not set, then the default -[Prometheus client values](https://godoc.org/github.com/prometheus/client_golang/prometheus#pkg-variables) are used: `[.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10]`. `+Inf` is added -automatically. - -`timer_type` is only used when the statsd metric type is a timer. `buckets` is -only used when the statsd metric type is a timerand the `timer_type` is set to -"histogram." - -### Global defaults - -One may also set defaults for the timer type, buckets or quantiles, and match_type. These will be used -by all mappings that do not define these. - -An option that can only be configured in `defaults` is `glob_disable_ordering`, which is `false` if omitted. By setting this to `true`, `glob` match type will not honor the occurance of rules in the mapping rules file and always treat `*` as lower priority than a general string. - -```yaml -defaults: - timer_type: histogram - buckets: [.005, .01, .025, .05, .1, .25, .5, 1, 2.5 ] - match_type: glob - glob_disable_ordering: false - ttl: 0 # metrics do not expire -mappings: -# This will be a histogram using the buckets set in `defaults`. -- match: "test.timing.*.*.*" - name: "my_timer" - labels: - provider: "$2" - outcome: "$3" - job: "${1}_server" -# This will be a summary timer. -- match: "other.timing.*.*.*" - timer_type: summary - name: "other_timer" - labels: - provider: "$2" - outcome: "$3" - job: "${1}_server_other" -``` - -### Choosing between glob or regex match type - -Despite from the missing flexibility of using regular expression in mapping and -formatting labels, `glob` matching is optimized to have better performance than -`regex` in certain use cases. In short, glob will have best performance if the -rules amount is not so less and captures (using of `*`) is not to much in a -single rule. Whether disabling ordering in glob or not won't have a noticable -effect on performance in general use cases. In edge cases like the below however, -disabling ordering will be beneficial: - - a.*.*.*.* - a.b.*.*.* - a.b.c.*.* - a.b.c.d.* - -The reason is that the list assignment of captures (using of `*`) is the most -expensive operation in glob. Honoring ordering will result in up to 10 list -assignments, while without ordering it will need only 4 at most. - -For details, see [pkg/mapper/fsm/README.md](pkg/mapper/fsm/README.md). -Running `go test -bench .` in **pkg/mapper** directory will produce -a detailed comparison between the two match type. - -### `drop` action - -You may also drop metrics by specifying a "drop" action on a match. For -example: - -```yaml -mappings: -# This metric would match as normal. -- match: "test.timing.*.*.*" - name: "my_timer" - labels: - provider: "$2" - outcome: "$3" - job: "${1}_server" -# Any metric not matched will be dropped because "." matches all metrics. -- match: "." - match_type: regex - action: drop - name: "dropped" -``` - -You can drop any metric using the normal match syntax. -The default action is "map" which does the normal metrics mapping. - -### Explicit metric type mapping - -StatsD allows emitting of different metric types under the same metric name, -but the Prometheus client library can't merge those. For this use-case the -mapping definition allows you to specify which metric type to match: - -``` -mappings: -- match: "test.foo.*" - name: "test_foo" - match_metric_type: counter - labels: - provider: "$1" -``` - -Possible values for `match_metric_type` are `gauge`, `counter` and `timer`. - -### Mapping cache size and cache replacement policy - -There is a cache used to improve the performance of the metric mapping, that can greatly improvement performance. -The cache has a default maximum of 1000 unique statsd metric names -> prometheus metrics mappings that it can store. -This maximum can be adjust using the `statsd.cache-size` flag. - -If the maximum is reached, entries are by default rotated using the [least recently used replacement policy](https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)). This strategy is optimal when memory is constrained as only the most recent entries are retained. - -Alternatively, you can choose a [random-replacement cache strategy](https://en.wikipedia.org/wiki/Cache_replacement_policies#Random_replacement_(RR)). This is less optimal if the cache is smaller than the cacheable set, but requires less locking. Use this for very high throughput, but make sure to allow for a cache that holds all metrics. - -The optimal cache size is determined by the cardinality of the _incoming_ metrics. - -### Time series expiration - -The `ttl` parameter can be used to define the expiration time for stale metrics. -The value is a time duration with valid time units: "ns", "us" (or "µs"), -"ms", "s", "m", "h". For example, `ttl: 1m20s`. `0` value is used to indicate -metrics that do not expire. - - TTL configuration is stored for each mapped metric name/labels combination - whenever new samples are received. This means that you cannot immediately - expire a metric only by changing the mapping configuration. At least one - sample must be received for updated mappings to take effect. - - ### Event flushing configuration - - Internally `statsd_exporter` runs a goroutine for each network listener (UDP, TCP & Unix Socket). These each receive and parse metrics received into an event. For performance purposes, these events are queued internally and flushed to the main exporter goroutine periodically in batches. The size of this queue and the flush criteria can be tuned with the `--statsd.event-queue-size`, `--statsd.event-flush-threshold` and `--statsd.event-flush-interval`. However, the defaults should perform well even for very high traffic environments. - -## Using Docker - -You can deploy this exporter using the [prom/statsd-exporter](https://registry.hub.docker.com/r/prom/statsd-exporter) Docker image. - -For example: - -```bash -docker pull prom/statsd-exporter - -docker run -d -p 9102:9102 -p 9125:9125 -p 9125:9125/udp \ - -v $PWD/statsd_mapping.yml:/tmp/statsd_mapping.yml \ - prom/statsd-exporter --statsd.mapping-config=/tmp/statsd_mapping.yml -``` - - -[travis]: https://travis-ci.org/prometheus/statsd_exporter -[circleci]: https://circleci.com/gh/prometheus/statsd_exporter -[quay]: https://quay.io/repository/prometheus/statsd-exporter -[hub]: https://hub.docker.com/r/prom/statsd-exporter/ +# statsd exporter [![Build Status](https://travis-ci.org/prometheus/statsd_exporter.svg)][travis] + +[![CircleCI](https://circleci.com/gh/prometheus/statsd_exporter/tree/master.svg?style=shield)][circleci] +[![Docker Repository on Quay](https://quay.io/repository/prometheus/statsd-exporter/status)][quay] +[![Docker Pulls](https://img.shields.io/docker/pulls/prom/statsd-exporter.svg)][hub] + +`statsd_exporter` receives StatsD-style metrics and exports them as Prometheus metrics. + +## Overview + +### With StatsD + +To pipe metrics from an existing StatsD environment into Prometheus, configure +StatsD's repeater backend to repeat all received metrics to a `statsd_exporter` +process. This exporter translates StatsD metrics to Prometheus metrics via +configured mapping rules. + + +----------+ +-------------------+ +--------------+ + | StatsD |---(UDP/TCP repeater)--->| statsd_exporter |<---(scrape /metrics)---| Prometheus | + +----------+ +-------------------+ +--------------+ + +### Without StatsD + +Since the StatsD exporter uses the same line protocol as StatsD itself, you can +also configure your applications to send StatsD metrics directly to the exporter. +In that case, you don't need to run a StatsD server anymore. + +We recommend this only as an intermediate solution and recommend switching to +[native Prometheus instrumentation](http://prometheus.io/docs/instrumenting/clientlibs/) +in the long term. + +### Tagging Extensions + +The exporter supports Librato, InfluxDB, and DogStatsD-style tags, +which will be converted into Prometheus labels. + +For Librato-style tags, they must be appended to the metric name with a +delimiting `#`, as so: + +``` +metric.name#tagName=val,tag2Name=val2:0|c +``` + +See the [statsd-librato-backend README](https://github.com/librato/statsd-librato-backend#tags) +for a more complete description. + +For InfluxDB-style tags, they must be appended to the metric name with a +delimiting comma, as so: + +``` +metric.name,tagName=val,tag2Name=val2:0|c +``` + +See [this InfluxDB blog post](https://www.influxdata.com/blog/getting-started-with-sending-statsd-metrics-to-telegraf-influxdb/#introducing-influx-statsd) +for a larger overview. + + +For DogStatsD-style tags, they're appended as a `|#` delimited section at the +end of the metric, as so: + +``` +metric.name:0|c|#tagName:val,tag2Name:val2 +``` + +See [Tags](https://docs.datadoghq.com/developers/dogstatsd/data_types/#tagging) +in the DogStatsD documentation for the concept description and +[Datagram Format](https://docs.datadoghq.com/developers/dogstatsd/datagram_shell/). +If you encounter problems, note that this tagging style is incompatible with +the original `statsd` implementation. + +Be aware: If you mix tag styles (e.g., Librato/InfluxDB with DogStatsD), the +exporter will consider this an error and the sample will be discarded. Also, +tags without values (`#some_tag`) are not supported and will be ignored. + +## Building and Running + +NOTE: Version 0.7.0 switched to the [kingpin](https://github.com/alecthomas/kingpin) flags library. With this change, flag behaviour is POSIX-ish: + +* long flags start with two dashes (`--version`) +* multiple short flags can be combined (but there currently is only one) +* flag processing stops at the first `--` + + ``` + $ go build + $ ./statsd_exporter --help + usage: statsd_exporter [] + + Flags: + -h, --help Show context-sensitive help (also try --help-long and --help-man). + --web.listen-address=":9102" + The address on which to expose the web interface and generated Prometheus metrics. + --web.telemetry-path="/metrics" + Path under which to expose metrics. + --statsd.listen-udp=":9125" + The UDP address on which to receive statsd metric lines. "" disables it. + --statsd.listen-tcp=":9125" + The TCP address on which to receive statsd metric lines. "" disables it. + --statsd.listen-unixgram="" + The Unixgram socket path to receive statsd metric lines in datagram. "" disables it. + --statsd.unixsocket-mode="755" + The permission mode of the unix socket. + --statsd.mapping-config=STATSD.MAPPING-CONFIG + Metric mapping configuration file name. + --statsd.read-buffer=STATSD.READ-BUFFER + Size (in bytes) of the operating system's transmit read buffer associated with the UDP or Unixgram connection. Please make sure the kernel parameters net.core.rmem_max is set to + a value greater than the value specified. + --statsd.cache-size=1000 Maximum size of your metric mapping cache. Relies on least recently used replacement policy if max size is reached. + --statsd.event-queue-size=10000 + Size of internal queue for processing events + --statsd.event-flush-threshold=1000 + Number of events to hold in queue before flushing + --statsd.event-flush-interval=200ms + Number of events to hold in queue before flushing + --debug.dump-fsm="" The path to dump internal FSM generated for glob matching as Dot file. + --log.level="info" Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal] + --log.format="logger:stderr" + Set the log target and format. Example: "logger:syslog?appname=bob& local=7" or "logger:stdout?json=true" + --version Show application version. + + ``` + +## Tests + + $ go test + +## Metric Mapping and Configuration + +The `statsd_exporter` can be configured to translate specific dot-separated StatsD +metrics into labeled Prometheus metrics via a simple mapping language. The config +file is reloaded on SIGHUP. + +A mapping definition starts with a line matching the StatsD metric in question, +with `*`s acting as wildcards for each dot-separated metric component. The +lines following the matching expression must contain one `label="value"` pair +each, and at least define the metric name (label name `name`). The Prometheus +metric is then constructed from these labels. `$n`-style references in the +label value are replaced by the n-th wildcard match in the matching line, +starting at 1. Multiple matching definitions are separated by one or more empty +lines. The first mapping rule that matches a StatsD metric wins. + +Metrics that don't match any mapping in the configuration file are translated +into Prometheus metrics without any labels and with any non-alphanumeric +characters, including periods, translated into underscores. + +In general, the different metric types are translated as follows: + + StatsD gauge -> Prometheus gauge + + StatsD counter -> Prometheus counter + + StatsD timer -> Prometheus summary <-- indicates timer quantiles + -> Prometheus counter (suffix `_total`) <-- indicates total time spent + -> Prometheus counter (suffix `_count`) <-- indicates total number of timer events + +An example mapping configuration: + +```yaml +mappings: +- match: "test.dispatcher.*.*.*" + name: "dispatcher_events_total" + labels: + processor: "$1" + action: "$2" + outcome: "$3" + job: "test_dispatcher" +- match: "*.signup.*.*" + name: "signup_events_total" + labels: + provider: "$2" + outcome: "$3" + job: "${1}_server" +``` + +This would transform these example StatsD metrics into Prometheus metrics as +follows: + + test.dispatcher.FooProcessor.send.success + => dispatcher_events_total{processor="FooProcessor", action="send", outcome="success", job="test_dispatcher"} + + foo_product.signup.facebook.failure + => signup_events_total{provider="facebook", outcome="failure", job="foo_product_server"} + + test.web-server.foo.bar + => test_web_server_foo_bar{} + +Each mapping in the configuration file must define a `name` for the metric. The +metric's name can contain `$n`-style references to be replaced by the n-th +wildcard match in the matching line. That allows for dynamic rewrites, such as: + +```yaml +mappings: +- match: "test.*.*.counter" + name: "${2}_total" + labels: + provider: "$1" +``` + +The metric name can also contain references to regex matches. The mapping above +could be written as: + +```yaml +mappings: +- match: "test\\.(\\w+)\\.(\\w+)\\.counter" + match_type: regex + name: "${2}_total" + labels: + provider: "$1" +``` + +Be aware about yaml escape rules as a mapping like the following one will not work. +```yaml +mappings: +- match: "test\.(\w+)\.(\w+)\.counter" + match_type: regex + name: "${2}_total" + labels: + provider: "$1" +``` + +Please note that metrics with the same name must also have the same set of +label names. + +If the default metric help text is insufficient for your needs you may use the YAML +configuration to specify a custom help text for each mapping: + +```yaml +mappings: +- match: "http.request.*" + help: "Total number of http requests" + name: "http_requests_total" + labels: + code: "$1" +``` + +### StatsD timers + +By default, statsd timers are represented as a Prometheus summary with +quantiles. You may optionally configure the [quantiles and acceptable +error](https://prometheus.io/docs/practices/histograms/#quantiles), as +well as adjusting how the summary metric is aggregated: + +```yaml +mappings: +- match: "test.timing.*.*.*" + timer_type: summary + name: "my_timer" + labels: + provider: "$2" + outcome: "$3" + job: "${1}_server" + summary_options: + quantiles: + - quantile: 0.99 + error: 0.001 + - quantile: 0.95 + error: 0.01 + - quantile: 0.9 + error: 0.05 + - quantile: 0.5 + error: 0.005 + max_summary_age: 30s + summary_age_buckets: 3 + stream_buffer_size: 1000 +``` + +The default quantiles are 0.99, 0.9, and 0.5. + +The default summary age is 10 minutes, the default number of buckets +is 5 and the default buffer size is 500. See also the +[`golang_client` docs](https://godoc.org/github.com/prometheus/client_golang/prometheus#SummaryOpts). +The `max_summary_age` corresponds to `SummaryOptions.MaxAge`, `summary_age_buckets` +to `SummaryOptions.AgeBuckets` and `stream_buffer_size` to `SummaryOptions.BufCap`. + +In the configuration, one may also set the timer type to "histogram". The +default is "summary" as in the plain text configuration format. For example, +to set the timer type for a single metric: + +```yaml +mappings: +- match: "test.timing.*.*.*" + timer_type: histogram + histogram_options: + buckets: [ 0.01, 0.025, 0.05, 0.1 ] + name: "my_timer" + labels: + provider: "$2" + outcome: "$3" + job: "${1}_server" +``` + +Note that timers will be accepted with the `ms`, `h`, and `d` statsd types. The first two are timers and histograms and the `d` type is for DataDog's "distribution" type. The distribution type is treated identically to timers and histograms. + +It should be noted that whereas timers in statsd expects the unit of timing data to be in milliseconds, +prometheus expects the unit to be seconds. Hence, the exporter converts all timers to seconds +before exporting them. + +### DogStatsD Client Behavior + +#### `timed()` decorator + +If you are using the DogStatsD client's [timed](https://datadogpy.readthedocs.io/en/latest/#datadog.threadstats.base.ThreadStats.timed) decorator, +it emits the metric in seconds, set [use_ms](https://datadogpy.readthedocs.io/en/latest/index.html?highlight=use_ms) to `True` to fix this. + +### Regular expression matching + +Another capability when using YAML configuration is the ability to define matches +using raw regular expressions as opposed to the default globbing style of match. +This may allow for pulling structured data from otherwise poorly named statsd +metrics AND allow for more precise targetting of match rules. When no `match_type` +paramter is specified the default value of `glob` will be assumed: + +```yaml +mappings: +- match: "(.*)\.(.*)--(.*)\.status\.(.*)\.count" + match_type: regex + name: "request_total" + labels: + hostname: "$1" + exec: "$2" + protocol: "$3" + code: "$4" +``` + +Note, that one may also set the histogram buckets. If not set, then the default +[Prometheus client values](https://godoc.org/github.com/prometheus/client_golang/prometheus#pkg-variables) are used: `[.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10]`. `+Inf` is added +automatically. + +`timer_type` is only used when the statsd metric type is a timer. `buckets` is +only used when the statsd metric type is a timerand the `timer_type` is set to +"histogram." + +### Global defaults + +One may also set defaults for the timer type, buckets or quantiles, and match_type. These will be used +by all mappings that do not define these. + +An option that can only be configured in `defaults` is `glob_disable_ordering`, which is `false` if omitted. By setting this to `true`, `glob` match type will not honor the occurance of rules in the mapping rules file and always treat `*` as lower priority than a general string. + +```yaml +defaults: + timer_type: histogram + buckets: [.005, .01, .025, .05, .1, .25, .5, 1, 2.5 ] + match_type: glob + glob_disable_ordering: false + ttl: 0 # metrics do not expire +mappings: +# This will be a histogram using the buckets set in `defaults`. +- match: "test.timing.*.*.*" + name: "my_timer" + labels: + provider: "$2" + outcome: "$3" + job: "${1}_server" +# This will be a summary timer. +- match: "other.timing.*.*.*" + timer_type: summary + name: "other_timer" + labels: + provider: "$2" + outcome: "$3" + job: "${1}_server_other" +``` + +### Choosing between glob or regex match type + +Despite from the missing flexibility of using regular expression in mapping and +formatting labels, `glob` matching is optimized to have better performance than +`regex` in certain use cases. In short, glob will have best performance if the +rules amount is not so less and captures (using of `*`) is not to much in a +single rule. Whether disabling ordering in glob or not won't have a noticable +effect on performance in general use cases. In edge cases like the below however, +disabling ordering will be beneficial: + + a.*.*.*.* + a.b.*.*.* + a.b.c.*.* + a.b.c.d.* + +The reason is that the list assignment of captures (using of `*`) is the most +expensive operation in glob. Honoring ordering will result in up to 10 list +assignments, while without ordering it will need only 4 at most. + +For details, see [pkg/mapper/fsm/README.md](pkg/mapper/fsm/README.md). +Running `go test -bench .` in **pkg/mapper** directory will produce +a detailed comparison between the two match type. + +### `drop` action + +You may also drop metrics by specifying a "drop" action on a match. For +example: + +```yaml +mappings: +# This metric would match as normal. +- match: "test.timing.*.*.*" + name: "my_timer" + labels: + provider: "$2" + outcome: "$3" + job: "${1}_server" +# Any metric not matched will be dropped because "." matches all metrics. +- match: "." + match_type: regex + action: drop + name: "dropped" +``` + +You can drop any metric using the normal match syntax. +The default action is "map" which does the normal metrics mapping. + +### Explicit metric type mapping + +StatsD allows emitting of different metric types under the same metric name, +but the Prometheus client library can't merge those. For this use-case the +mapping definition allows you to specify which metric type to match: + +``` +mappings: +- match: "test.foo.*" + name: "test_foo" + match_metric_type: counter + labels: + provider: "$1" +``` + +Possible values for `match_metric_type` are `gauge`, `counter` and `timer`. + +### Mapping cache size and cache replacement policy + +There is a cache used to improve the performance of the metric mapping, that can greatly improvement performance. +The cache has a default maximum of 1000 unique statsd metric names -> prometheus metrics mappings that it can store. +This maximum can be adjust using the `statsd.cache-size` flag. + +If the maximum is reached, entries are by default rotated using the [least recently used replacement policy](https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)). This strategy is optimal when memory is constrained as only the most recent entries are retained. + +Alternatively, you can choose a [random-replacement cache strategy](https://en.wikipedia.org/wiki/Cache_replacement_policies#Random_replacement_(RR)). This is less optimal if the cache is smaller than the cacheable set, but requires less locking. Use this for very high throughput, but make sure to allow for a cache that holds all metrics. + +The optimal cache size is determined by the cardinality of the _incoming_ metrics. + +### Time series expiration + +The `ttl` parameter can be used to define the expiration time for stale metrics. +The value is a time duration with valid time units: "ns", "us" (or "µs"), +"ms", "s", "m", "h". For example, `ttl: 1m20s`. `0` value is used to indicate +metrics that do not expire. + + TTL configuration is stored for each mapped metric name/labels combination + whenever new samples are received. This means that you cannot immediately + expire a metric only by changing the mapping configuration. At least one + sample must be received for updated mappings to take effect. + + ### Event flushing configuration + + Internally `statsd_exporter` runs a goroutine for each network listener (UDP, TCP & Unix Socket). These each receive and parse metrics received into an event. For performance purposes, these events are queued internally and flushed to the main exporter goroutine periodically in batches. The size of this queue and the flush criteria can be tuned with the `--statsd.event-queue-size`, `--statsd.event-flush-threshold` and `--statsd.event-flush-interval`. However, the defaults should perform well even for very high traffic environments. + +## Using Docker + +You can deploy this exporter using the [prom/statsd-exporter](https://registry.hub.docker.com/r/prom/statsd-exporter) Docker image. + +For example: + +```bash +docker pull prom/statsd-exporter + +docker run -d -p 9102:9102 -p 9125:9125 -p 9125:9125/udp \ + -v $PWD/statsd_mapping.yml:/tmp/statsd_mapping.yml \ + prom/statsd-exporter --statsd.mapping-config=/tmp/statsd_mapping.yml +``` + + +[travis]: https://travis-ci.org/prometheus/statsd_exporter +[circleci]: https://circleci.com/gh/prometheus/statsd_exporter +[quay]: https://quay.io/repository/prometheus/statsd-exporter +[hub]: https://hub.docker.com/r/prom/statsd-exporter/ diff --git a/VERSION b/VERSION index a551051..31d7b1a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.15.0 +0.15.0 diff --git a/bridge_test.go b/bridge_test.go index c8862df..221adfb 100644 --- a/bridge_test.go +++ b/bridge_test.go @@ -1,442 +1,444 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "reflect" - "testing" - - "github.com/go-kit/kit/log" -) - -func TestHandlePacket(t *testing.T) { - scenarios := []struct { - name string - in string - out Events - }{ - { - name: "empty", - }, { - name: "simple counter", - in: "foo:2|c", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 2, - labels: map[string]string{}, - }, - }, - }, { - name: "simple gauge", - in: "foo:3|g", - out: Events{ - &GaugeEvent{ - metricName: "foo", - value: 3, - labels: map[string]string{}, - }, - }, - }, { - name: "gauge with sampling", - in: "foo:3|g|@0.2", - out: Events{ - &GaugeEvent{ - metricName: "foo", - value: 3, - labels: map[string]string{}, - }, - }, - }, { - name: "gauge decrement", - in: "foo:-10|g", - out: Events{ - &GaugeEvent{ - metricName: "foo", - value: -10, - relative: true, - labels: map[string]string{}, - }, - }, - }, { - name: "simple timer", - in: "foo:200|ms", - out: Events{ - &TimerEvent{ - metricName: "foo", - value: 200, - labels: map[string]string{}, - }, - }, - }, { - name: "simple histogram", - in: "foo:200|h", - out: Events{ - &TimerEvent{ - metricName: "foo", - value: 200, - labels: map[string]string{}, - }, - }, - }, { - name: "simple distribution", - in: "foo:200|d", - out: Events{ - &TimerEvent{ - metricName: "foo", - value: 200, - labels: map[string]string{}, - }, - }, - }, { - name: "distribution with sampling", - in: "foo:0.01|d|@0.2|#tag1:bar,#tag2:baz", - out: Events{ - &TimerEvent{ - metricName: "foo", - value: 0.01, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - &TimerEvent{ - metricName: "foo", - value: 0.01, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - &TimerEvent{ - metricName: "foo", - value: 0.01, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - &TimerEvent{ - metricName: "foo", - value: 0.01, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - &TimerEvent{ - metricName: "foo", - value: 0.01, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - }, - }, { - name: "librato tag extension", - in: "foo#tag1=bar,tag2=baz:100|c", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 100, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - }, - }, { - name: "librato tag extension with tag keys unsupported by prometheus", - in: "foo#09digits=0,tag.with.dots=1:100|c", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 100, - labels: map[string]string{"_09digits": "0", "tag_with_dots": "1"}, - }, - }, - }, { - name: "influxdb tag extension", - in: "foo,tag1=bar,tag2=baz:100|c", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 100, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - }, - }, { - name: "influxdb tag extension with tag keys unsupported by prometheus", - in: "foo,09digits=0,tag.with.dots=1:100|c", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 100, - labels: map[string]string{"_09digits": "0", "tag_with_dots": "1"}, - }, - }, - }, { - name: "datadog tag extension", - in: "foo:100|c|#tag1:bar,tag2:baz", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 100, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - }, - }, { - name: "datadog tag extension with # in all keys (as sent by datadog php client)", - in: "foo:100|c|#tag1:bar,#tag2:baz", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 100, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - }, - }, { - name: "datadog tag extension with tag keys unsupported by prometheus", - in: "foo:100|c|#09digits:0,tag.with.dots:1", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 100, - labels: map[string]string{"_09digits": "0", "tag_with_dots": "1"}, - }, - }, - }, { - name: "datadog tag extension with valueless tags: ignored", - in: "foo:100|c|#tag_without_a_value", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 100, - labels: map[string]string{}, - }, - }, - }, { - name: "datadog tag extension with valueless tags (edge case)", - in: "foo:100|c|#tag_without_a_value,tag:value", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 100, - labels: map[string]string{"tag": "value"}, - }, - }, - }, { - name: "datadog tag extension with empty tags (edge case)", - in: "foo:100|c|#tag:value,,", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 100, - labels: map[string]string{"tag": "value"}, - }, - }, - }, { - name: "datadog tag extension with sampling", - in: "foo:100|c|@0.1|#tag1:bar,#tag2:baz", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 1000, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - }, - }, { - name: "librato/dogstatsd mixed tag styles without sampling", - in: "foo#tag1=foo,tag3=bing:100|c|#tag1:bar,#tag2:baz", - out: Events{}, - }, { - name: "influxdb/dogstatsd mixed tag styles without sampling", - in: "foo,tag1=foo,tag3=bing:100|c|#tag1:bar,#tag2:baz", - out: Events{}, - }, { - name: "mixed tag styles with sampling", - in: "foo#tag1=foo,tag3=bing:100|c|@0.1|#tag1:bar,#tag2:baz", - out: Events{}, - }, { - name: "histogram with sampling", - in: "foo:0.01|h|@0.2|#tag1:bar,#tag2:baz", - out: Events{ - &TimerEvent{ - metricName: "foo", - value: 0.01, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - &TimerEvent{ - metricName: "foo", - value: 0.01, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - &TimerEvent{ - metricName: "foo", - value: 0.01, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - &TimerEvent{ - metricName: "foo", - value: 0.01, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - &TimerEvent{ - metricName: "foo", - value: 0.01, - labels: map[string]string{"tag1": "bar", "tag2": "baz"}, - }, - }, - }, { - name: "datadog tag extension with multiple colons", - in: "foo:100|c|@0.1|#tag1:foo:bar", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 1000, - labels: map[string]string{"tag1": "foo:bar"}, - }, - }, - }, { - name: "datadog tag extension with invalid utf8 tag values", - in: "foo:100|c|@0.1|#tag:\xc3\x28invalid", - }, { - name: "datadog tag extension with both valid and invalid utf8 tag values", - in: "foo:100|c|@0.1|#tag1:valid,tag2:\xc3\x28invalid", - }, { - name: "multiple metrics with invalid datadog utf8 tag values", - in: "foo:200|c|#tag:value\nfoo:300|c|#tag:\xc3\x28invalid", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 200, - labels: map[string]string{"tag": "value"}, - }, - }, - }, { - name: "combined multiline metrics", - in: "foo:200|ms:300|ms:5|c|@0.1:6|g\nbar:1|c:5|ms", - out: Events{ - &TimerEvent{ - metricName: "foo", - value: 200, - labels: map[string]string{}, - }, - &TimerEvent{ - metricName: "foo", - value: 300, - labels: map[string]string{}, - }, - &CounterEvent{ - metricName: "foo", - value: 50, - labels: map[string]string{}, - }, - &GaugeEvent{ - metricName: "foo", - value: 6, - labels: map[string]string{}, - }, - &CounterEvent{ - metricName: "bar", - value: 1, - labels: map[string]string{}, - }, - &TimerEvent{ - metricName: "bar", - value: 5, - labels: map[string]string{}, - }, - }, - }, { - name: "timings with sampling factor", - in: "foo.timing:0.5|ms|@0.1", - out: Events{ - &TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}}, - &TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}}, - &TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}}, - &TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}}, - &TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}}, - &TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}}, - &TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}}, - &TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}}, - &TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}}, - &TimerEvent{metricName: "foo.timing", value: 0.5, labels: map[string]string{}}, - }, - }, { - name: "bad line", - in: "foo", - }, { - name: "bad component", - in: "foo:1", - }, { - name: "bad value", - in: "foo:1o|c", - }, { - name: "illegal sampling factor", - in: "foo:1|c|@bar", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 1, - labels: map[string]string{}, - }, - }, - }, { - name: "zero sampling factor", - in: "foo:2|c|@0", - out: Events{ - &CounterEvent{ - metricName: "foo", - value: 2, - labels: map[string]string{}, - }, - }, - }, { - name: "illegal stat type", - in: "foo:2|t", - }, - { - name: "empty metric name", - in: ":100|ms", - }, - { - name: "empty component", - in: "foo:1|c|", - }, - { - name: "invalid utf8", - in: "invalid\xc3\x28utf8:1|c", - }, - { - name: "some invalid utf8", - in: "valid_utf8:1|c\ninvalid\xc3\x28utf8:1|c", - out: Events{ - &CounterEvent{ - metricName: "valid_utf8", - value: 1, - labels: map[string]string{}, - }, - }, - }, - } - - for k, l := range []statsDPacketHandler{&StatsDUDPListener{nil, nil, log.NewNopLogger()}, &mockStatsDTCPListener{StatsDTCPListener{nil, nil, log.NewNopLogger()}, log.NewNopLogger()}} { - events := make(chan Events, 32) - l.SetEventHandler(&unbufferedEventHandler{c: events}) - for i, scenario := range scenarios { - l.handlePacket([]byte(scenario.in)) - - le := len(events) - // Flatten actual events. - actual := Events{} - for i := 0; i < le; i++ { - actual = append(actual, <-events...) - } - - if len(actual) != len(scenario.out) { - t.Fatalf("%d.%d. Expected %d events, got %d in scenario '%s'", k, i, len(scenario.out), len(actual), scenario.name) - } - - for j, expected := range scenario.out { - if !reflect.DeepEqual(&expected, &actual[j]) { - t.Fatalf("%d.%d.%d. Expected %#v, got %#v in scenario '%s'", k, i, j, expected, actual[j], scenario.name) - } - } - } - } -} +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "reflect" + "testing" + + "github.com/go-kit/kit/log" + "github.com/prometheus/statsd_exporter/pkg/event" + "github.com/prometheus/statsd_exporter/pkg/listener" +) + +func TestHandlePacket(t *testing.T) { + scenarios := []struct { + name string + in string + out event.Events + }{ + { + name: "empty", + }, { + name: "simple counter", + in: "foo:2|c", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 2, + CLabels: map[string]string{}, + }, + }, + }, { + name: "simple gauge", + in: "foo:3|g", + out: event.Events{ + &event.GaugeEvent{ + GMetricName: "foo", + GValue: 3, + GLabels: map[string]string{}, + }, + }, + }, { + name: "gauge with sampling", + in: "foo:3|g|@0.2", + out: event.Events{ + &event.GaugeEvent{ + GMetricName: "foo", + GValue: 3, + GLabels: map[string]string{}, + }, + }, + }, { + name: "gauge decrement", + in: "foo:-10|g", + out: event.Events{ + &event.GaugeEvent{ + GMetricName: "foo", + GValue: -10, + GRelative: true, + GLabels: map[string]string{}, + }, + }, + }, { + name: "simple timer", + in: "foo:200|ms", + out: event.Events{ + &event.TimerEvent{ + TMetricName: "foo", + TValue: 200, + TLabels: map[string]string{}, + }, + }, + }, { + name: "simple histogram", + in: "foo:200|h", + out: event.Events{ + &event.TimerEvent{ + TMetricName: "foo", + TValue: 200, + TLabels: map[string]string{}, + }, + }, + }, { + name: "simple distribution", + in: "foo:200|d", + out: event.Events{ + &event.TimerEvent{ + TMetricName: "foo", + TValue: 200, + TLabels: map[string]string{}, + }, + }, + }, { + name: "distribution with sampling", + in: "foo:0.01|d|@0.2|#tag1:bar,#tag2:baz", + out: event.Events{ + &event.TimerEvent{ + TMetricName: "foo", + TValue: 0.01, + TLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + &event.TimerEvent{ + TMetricName: "foo", + TValue: 0.01, + TLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + &event.TimerEvent{ + TMetricName: "foo", + TValue: 0.01, + TLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + &event.TimerEvent{ + TMetricName: "foo", + TValue: 0.01, + TLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + &event.TimerEvent{ + TMetricName: "foo", + TValue: 0.01, + TLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + }, + }, { + name: "librato tag extension", + in: "foo#tag1=bar,tag2=baz:100|c", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 100, + CLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + }, + }, { + name: "librato tag extension with tag keys unsupported by prometheus", + in: "foo#09digits=0,tag.with.dots=1:100|c", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 100, + CLabels: map[string]string{"_09digits": "0", "tag_with_dots": "1"}, + }, + }, + }, { + name: "influxdb tag extension", + in: "foo,tag1=bar,tag2=baz:100|c", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 100, + CLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + }, + }, { + name: "influxdb tag extension with tag keys unsupported by prometheus", + in: "foo,09digits=0,tag.with.dots=1:100|c", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 100, + CLabels: map[string]string{"_09digits": "0", "tag_with_dots": "1"}, + }, + }, + }, { + name: "datadog tag extension", + in: "foo:100|c|#tag1:bar,tag2:baz", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 100, + CLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + }, + }, { + name: "datadog tag extension with # in all keys (as sent by datadog php client)", + in: "foo:100|c|#tag1:bar,#tag2:baz", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 100, + CLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + }, + }, { + name: "datadog tag extension with tag keys unsupported by prometheus", + in: "foo:100|c|#09digits:0,tag.with.dots:1", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 100, + CLabels: map[string]string{"_09digits": "0", "tag_with_dots": "1"}, + }, + }, + }, { + name: "datadog tag extension with valueless tags: ignored", + in: "foo:100|c|#tag_without_a_value", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 100, + CLabels: map[string]string{}, + }, + }, + }, { + name: "datadog tag extension with valueless tags (edge case)", + in: "foo:100|c|#tag_without_a_value,tag:value", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 100, + CLabels: map[string]string{"tag": "value"}, + }, + }, + }, { + name: "datadog tag extension with empty tags (edge case)", + in: "foo:100|c|#tag:value,,", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 100, + CLabels: map[string]string{"tag": "value"}, + }, + }, + }, { + name: "datadog tag extension with sampling", + in: "foo:100|c|@0.1|#tag1:bar,#tag2:baz", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 1000, + CLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + }, + }, { + name: "librato/dogstatsd mixed tag styles without sampling", + in: "foo#tag1=foo,tag3=bing:100|c|#tag1:bar,#tag2:baz", + out: event.Events{}, + }, { + name: "influxdb/dogstatsd mixed tag styles without sampling", + in: "foo,tag1=foo,tag3=bing:100|c|#tag1:bar,#tag2:baz", + out: event.Events{}, + }, { + name: "mixed tag styles with sampling", + in: "foo#tag1=foo,tag3=bing:100|c|@0.1|#tag1:bar,#tag2:baz", + out: event.Events{}, + }, { + name: "histogram with sampling", + in: "foo:0.01|h|@0.2|#tag1:bar,#tag2:baz", + out: event.Events{ + &event.TimerEvent{ + TMetricName: "foo", + TValue: 0.01, + TLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + &event.TimerEvent{ + TMetricName: "foo", + TValue: 0.01, + TLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + &event.TimerEvent{ + TMetricName: "foo", + TValue: 0.01, + TLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + &event.TimerEvent{ + TMetricName: "foo", + TValue: 0.01, + TLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + &event.TimerEvent{ + TMetricName: "foo", + TValue: 0.01, + TLabels: map[string]string{"tag1": "bar", "tag2": "baz"}, + }, + }, + }, { + name: "datadog tag extension with multiple colons", + in: "foo:100|c|@0.1|#tag1:foo:bar", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 1000, + CLabels: map[string]string{"tag1": "foo:bar"}, + }, + }, + }, { + name: "datadog tag extension with invalid utf8 tag values", + in: "foo:100|c|@0.1|#tag:\xc3\x28invalid", + }, { + name: "datadog tag extension with both valid and invalid utf8 tag values", + in: "foo:100|c|@0.1|#tag1:valid,tag2:\xc3\x28invalid", + }, { + name: "multiple metrics with invalid datadog utf8 tag values", + in: "foo:200|c|#tag:value\nfoo:300|c|#tag:\xc3\x28invalid", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 200, + CLabels: map[string]string{"tag": "value"}, + }, + }, + }, { + name: "combined multiline metrics", + in: "foo:200|ms:300|ms:5|c|@0.1:6|g\nbar:1|c:5|ms", + out: event.Events{ + &event.TimerEvent{ + TMetricName: "foo", + TValue: 200, + TLabels: map[string]string{}, + }, + &event.TimerEvent{ + TMetricName: "foo", + TValue: 300, + TLabels: map[string]string{}, + }, + &event.CounterEvent{ + CMetricName: "foo", + CValue: 50, + CLabels: map[string]string{}, + }, + &event.GaugeEvent{ + GMetricName: "foo", + GValue: 6, + GLabels: map[string]string{}, + }, + &event.CounterEvent{ + CMetricName: "bar", + CValue: 1, + CLabels: map[string]string{}, + }, + &event.TimerEvent{ + TMetricName: "bar", + TValue: 5, + TLabels: map[string]string{}, + }, + }, + }, { + name: "timings with sampling factor", + in: "foo.timing:0.5|ms|@0.1", + out: event.Events{ + &event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}}, + &event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}}, + &event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}}, + &event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}}, + &event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}}, + &event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}}, + &event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}}, + &event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}}, + &event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}}, + &event.TimerEvent{TMetricName: "foo.timing", TValue: 0.5, TLabels: map[string]string{}}, + }, + }, { + name: "bad line", + in: "foo", + }, { + name: "bad component", + in: "foo:1", + }, { + name: "bad value", + in: "foo:1o|c", + }, { + name: "illegal sampling factor", + in: "foo:1|c|@bar", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 1, + CLabels: map[string]string{}, + }, + }, + }, { + name: "zero sampling factor", + in: "foo:2|c|@0", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: 2, + CLabels: map[string]string{}, + }, + }, + }, { + name: "illegal stat type", + in: "foo:2|t", + }, + { + name: "empty metric name", + in: ":100|ms", + }, + { + name: "empty component", + in: "foo:1|c|", + }, + { + name: "invalid utf8", + in: "invalid\xc3\x28utf8:1|c", + }, + { + name: "some invalid utf8", + in: "valid_utf8:1|c\ninvalid\xc3\x28utf8:1|c", + out: event.Events{ + &event.CounterEvent{ + CMetricName: "valid_utf8", + CValue: 1, + CLabels: map[string]string{}, + }, + }, + }, + } + + for k, l := range []statsDPacketHandler{&listener.StatsDUDPListener{nil, nil, log.NewNopLogger()}, &mockStatsDTCPListener{listener.StatsDTCPListener{nil, nil, log.NewNopLogger()}, log.NewNopLogger()}} { + events := make(chan event.Events, 32) + l.SetEventHandler(&event.UnbufferedEventHandler{C: events}) + for i, scenario := range scenarios { + l.HandlePacket([]byte(scenario.in), udpPackets, linesReceived, eventsFlushed, *sampleErrors, samplesReceived, tagErrors, tagsReceived) + + le := len(events) + // Flatten actual events. + actual := event.Events{} + for i := 0; i < le; i++ { + actual = append(actual, <-events...) + } + + if len(actual) != len(scenario.out) { + t.Fatalf("%d.%d. Expected %d events, got %d in scenario '%s'", k, i, len(scenario.out), len(actual), scenario.name) + } + + for j, expected := range scenario.out { + if !reflect.DeepEqual(&expected, &actual[j]) { + t.Fatalf("%d.%d.%d. Expected %#v, got %#v in scenario '%s'", k, i, j, expected, actual[j], scenario.name) + } + } + } + } +} diff --git a/event_test.go b/event_test.go index 97a2722..e207b61 100644 --- a/event_test.go +++ b/event_test.go @@ -1,80 +1,81 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "testing" - "time" - - "github.com/prometheus/statsd_exporter/pkg/clock" -) - -func TestEventThresholdFlush(t *testing.T) { - c := make(chan Events, 100) - // We're not going to flush during this test, so the duration doesn't matter. - eq := newEventQueue(c, 5, time.Second) - e := make(Events, 13) - go func() { - eq.queue(e) - }() - - batch := <-c - if len(batch) != 5 { - t.Fatalf("Expected event batch to be 5 elements, but got %v", len(batch)) - } - batch = <-c - if len(batch) != 5 { - t.Fatalf("Expected event batch to be 5 elements, but got %v", len(batch)) - } - batch = <-c - if len(batch) != 3 { - t.Fatalf("Expected event batch to be 3 elements, but got %v", len(batch)) - } -} - -func TestEventIntervalFlush(t *testing.T) { - // Mock a time.NewTicker - tickerCh := make(chan time.Time) - clock.ClockInstance = &clock.Clock{ - TickerCh: tickerCh, - } - clock.ClockInstance.Instant = time.Unix(0, 0) - - c := make(chan Events, 100) - eq := newEventQueue(c, 1000, time.Second*1000) - e := make(Events, 10) - eq.queue(e) - - if eq.len() != 10 { - t.Fatal("Expected 10 events to be queued, but got", eq.len()) - } - - if len(eq.c) != 0 { - t.Fatal("Expected 0 events in the event channel, but got", len(eq.c)) - } - - // Tick time forward to trigger a flush - clock.ClockInstance.Instant = time.Unix(10000, 0) - clock.ClockInstance.TickerCh <- time.Unix(10000, 0) - - events := <-eq.c - if eq.len() != 0 { - t.Fatal("Expected 0 events to be queued, but got", eq.len()) - } - - if len(events) != 10 { - t.Fatal("Expected 10 events in the event channel, but got", len(events)) - } - -} +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "testing" + "time" + + "github.com/prometheus/statsd_exporter/pkg/clock" + "github.com/prometheus/statsd_exporter/pkg/event" +) + +func TestEventThresholdFlush(t *testing.T) { + c := make(chan event.Events, 100) + // We're not going to flush during this test, so the duration doesn't matter. + eq := event.NewEventQueue(c, 5, time.Second, eventsFlushed) + e := make(event.Events, 13) + go func() { + eq.Queue(e, &eventsFlushed) + }() + + batch := <-c + if len(batch) != 5 { + t.Fatalf("Expected event batch to be 5 elements, but got %v", len(batch)) + } + batch = <-c + if len(batch) != 5 { + t.Fatalf("Expected event batch to be 5 elements, but got %v", len(batch)) + } + batch = <-c + if len(batch) != 3 { + t.Fatalf("Expected event batch to be 3 elements, but got %v", len(batch)) + } +} + +func TestEventIntervalFlush(t *testing.T) { + // Mock a time.NewTicker + tickerCh := make(chan time.Time) + clock.ClockInstance = &clock.Clock{ + TickerCh: tickerCh, + } + clock.ClockInstance.Instant = time.Unix(0, 0) + + c := make(chan event.Events, 100) + eq := event.NewEventQueue(c, 1000, time.Second*1000, eventsFlushed) + e := make(event.Events, 10) + eq.Queue(e, &eventsFlushed) + + if eq.Len() != 10 { + t.Fatal("Expected 10 events to be queued, but got", eq.Len()) + } + + if len(eq.C) != 0 { + t.Fatal("Expected 0 events in the event channel, but got", len(eq.C)) + } + + // Tick time forward to trigger a flush + clock.ClockInstance.Instant = time.Unix(10000, 0) + clock.ClockInstance.TickerCh <- time.Unix(10000, 0) + + events := <-eq.C + if eq.Len() != 0 { + t.Fatal("Expected 0 events to be queued, but got", eq.Len()) + } + + if len(events) != 10 { + t.Fatal("Expected 10 events in the event channel, but got", len(events)) + } + +} diff --git a/exporter_benchmark_test.go b/exporter_benchmark_test.go index 1c86479..170539c 100644 --- a/exporter_benchmark_test.go +++ b/exporter_benchmark_test.go @@ -1,154 +1,157 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "testing" - - "github.com/go-kit/kit/log" - "github.com/prometheus/statsd_exporter/pkg/mapper" -) - -func benchmarkUDPListener(times int, b *testing.B) { - input := []string{ - "foo1:2|c", - "foo2:3|g", - "foo3:200|ms", - "foo4:100|c|#tag1:bar,tag2:baz", - "foo5:100|c|#tag1:bar,#tag2:baz", - "foo6:100|c|#09digits:0,tag.with.dots:1", - "foo10:100|c|@0.1|#tag1:bar,#tag2:baz", - "foo11:100|c|@0.1|#tag1:foo:bar", - "foo15:200|ms:300|ms:5|c|@0.1:6|g\nfoo15a:1|c:5|ms", - "some_very_useful_metrics_with_quite_a_log_name:13|c", - } - bytesInput := make([]string, len(input)*times) - for run := 0; run < times; run++ { - for i := 0; i < len(input); i++ { - bytesInput[run*len(input)+i] = fmt.Sprintf("run%d%s", run, input[i]) - } - } - for n := 0; n < b.N; n++ { - // there are more events than input lines, need bigger buffer - events := make(chan Events, len(bytesInput)*times*2) - l := StatsDUDPListener{eventHandler: &unbufferedEventHandler{c: events}} - - for i := 0; i < times; i++ { - for _, line := range bytesInput { - l.handlePacket([]byte(line)) - } - } - } -} - -func BenchmarkUDPListener1(b *testing.B) { - benchmarkUDPListener(1, b) -} -func BenchmarkUDPListener5(b *testing.B) { - benchmarkUDPListener(5, b) -} -func BenchmarkUDPListener50(b *testing.B) { - benchmarkUDPListener(50, b) -} - -func BenchmarkExporterListener(b *testing.B) { - events := Events{ - &CounterEvent{ // simple counter - metricName: "counter", - value: 2, - }, - &GaugeEvent{ // simple gauge - metricName: "gauge", - value: 10, - }, - &TimerEvent{ // simple timer - metricName: "timer", - value: 200, - }, - &TimerEvent{ // simple histogram - metricName: "histogram.test", - value: 200, - }, - &CounterEvent{ // simple_tags - metricName: "simple_tags", - value: 100, - labels: map[string]string{ - "alpha": "bar", - "bravo": "baz", - }, - }, - &CounterEvent{ // slightly different tags - metricName: "simple_tags", - value: 100, - labels: map[string]string{ - "alpha": "bar", - "charlie": "baz", - }, - }, - &CounterEvent{ // and even more different tags - metricName: "simple_tags", - value: 100, - labels: map[string]string{ - "alpha": "bar", - "bravo": "baz", - "golf": "looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong", - }, - }, - &CounterEvent{ // datadog tag extension with complex tags - metricName: "foo", - value: 100, - labels: map[string]string{ - "action": "test", - "application": "testapp", - "application_component": "testcomp", - "application_role": "test_role", - "category": "category", - "controller": "controller", - "deployed_to": "production", - "kube_deployment": "deploy", - "kube_namespace": "kube-production", - "method": "get", - "version": "5.2.8374", - "status": "200", - "status_range": "2xx", - }, - }, - } - config := ` -mappings: -- match: histogram.test - timer_type: histogram - name: "histogram_test" -` - - testMapper := &mapper.MetricMapper{} - err := testMapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - ex := NewExporter(testMapper, log.NewNopLogger()) - for i := 0; i < b.N; i++ { - ec := make(chan Events, 1000) - go func() { - for i := 0; i < 1000; i++ { - ec <- events - } - close(ec) - }() - - ex.Listen(ec) - } -} +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "testing" + + "github.com/go-kit/kit/log" + "github.com/prometheus/statsd_exporter/pkg/event" + "github.com/prometheus/statsd_exporter/pkg/exporter" + "github.com/prometheus/statsd_exporter/pkg/listener" + "github.com/prometheus/statsd_exporter/pkg/mapper" +) + +func benchmarkUDPListener(times int, b *testing.B) { + input := []string{ + "foo1:2|c", + "foo2:3|g", + "foo3:200|ms", + "foo4:100|c|#tag1:bar,tag2:baz", + "foo5:100|c|#tag1:bar,#tag2:baz", + "foo6:100|c|#09digits:0,tag.with.dots:1", + "foo10:100|c|@0.1|#tag1:bar,#tag2:baz", + "foo11:100|c|@0.1|#tag1:foo:bar", + "foo15:200|ms:300|ms:5|c|@0.1:6|g\nfoo15a:1|c:5|ms", + "some_very_useful_metrics_with_quite_a_log_name:13|c", + } + bytesInput := make([]string, len(input)*times) + for run := 0; run < times; run++ { + for i := 0; i < len(input); i++ { + bytesInput[run*len(input)+i] = fmt.Sprintf("run%d%s", run, input[i]) + } + } + for n := 0; n < b.N; n++ { + // there are more events than input lines, need bigger buffer + events := make(chan event.Events, len(bytesInput)*times*2) + l := listener.StatsDUDPListener{EventHandler: &event.UnbufferedEventHandler{C: events}} + + for i := 0; i < times; i++ { + for _, line := range bytesInput { + l.HandlePacket([]byte(line), udpPackets, linesReceived, eventsFlushed, *sampleErrors, samplesReceived, tagErrors, tagsReceived) + } + } + } +} + +func BenchmarkUDPListener1(b *testing.B) { + benchmarkUDPListener(1, b) +} +func BenchmarkUDPListener5(b *testing.B) { + benchmarkUDPListener(5, b) +} +func BenchmarkUDPListener50(b *testing.B) { + benchmarkUDPListener(50, b) +} + +func BenchmarkExporterListener(b *testing.B) { + events := event.Events{ + &event.CounterEvent{ // simple counter + CMetricName: "counter", + CValue: 2, + }, + &event.GaugeEvent{ // simple gauge + GMetricName: "gauge", + GValue: 10, + }, + &event.TimerEvent{ // simple timer + TMetricName: "timer", + TValue: 200, + }, + &event.TimerEvent{ // simple histogram + TMetricName: "histogram.test", + TValue: 200, + }, + &event.CounterEvent{ // simple_tags + CMetricName: "simple_tags", + CValue: 100, + CLabels: map[string]string{ + "alpha": "bar", + "bravo": "baz", + }, + }, + &event.CounterEvent{ // slightly different tags + CMetricName: "simple_tags", + CValue: 100, + CLabels: map[string]string{ + "alpha": "bar", + "charlie": "baz", + }, + }, + &event.CounterEvent{ // and even more different tags + CMetricName: "simple_tags", + CValue: 100, + CLabels: map[string]string{ + "alpha": "bar", + "bravo": "baz", + "golf": "looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong", + }, + }, + &event.CounterEvent{ // datadog tag extension with complex tags + CMetricName: "foo", + CValue: 100, + CLabels: map[string]string{ + "action": "test", + "application": "testapp", + "application_component": "testcomp", + "application_role": "test_role", + "category": "category", + "controller": "controller", + "deployed_to": "production", + "kube_deployment": "deploy", + "kube_namespace": "kube-production", + "method": "get", + "version": "5.2.8374", + "status": "200", + "status_range": "2xx", + }, + }, + } + config := ` +mappings: +- match: histogram.test + timer_type: histogram + name: "histogram_test" +` + + testMapper := &mapper.MetricMapper{} + err := testMapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + ex := exporter.NewExporter(testMapper, log.NewNopLogger()) + for i := 0; i < b.N; i++ { + ec := make(chan event.Events, 1000) + go func() { + for i := 0; i < 1000; i++ { + ec <- events + } + close(ec) + }() + + ex.Listen(ec, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) + } +} diff --git a/exporter_test.go b/exporter_test.go index c64a5a2..4b9fa61 100644 --- a/exporter_test.go +++ b/exporter_test.go @@ -1,964 +1,969 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "net" - "testing" - "time" - - "github.com/go-kit/kit/log" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/statsd_exporter/pkg/clock" - "github.com/prometheus/statsd_exporter/pkg/mapper" -) - -// TestNegativeCounter validates when we send a negative -// number to a counter that we no longer panic the Exporter Listener. -func TestNegativeCounter(t *testing.T) { - defer func() { - if e := recover(); e != nil { - err := e.(error) - if err.Error() == "counter cannot decrease in value" { - t.Fatalf("Counter was negative and causes a panic.") - } else { - t.Fatalf("Unknown panic and error: %q", err.Error()) - } - } - }() - - events := make(chan Events) - go func() { - c := Events{ - &CounterEvent{ - metricName: "foo", - value: -1, - }, - } - events <- c - close(events) - }() - - errorCounter := errorEventStats.WithLabelValues("illegal_negative_counter") - prev := getTelemetryCounterValue(errorCounter) - - testMapper := mapper.MetricMapper{} - testMapper.InitCache(0) - - ex := NewExporter(&testMapper, log.NewNopLogger()) - ex.Listen(events) - - updated := getTelemetryCounterValue(errorCounter) - if updated-prev != 1 { - t.Fatal("Illegal negative counter error not counted") - } -} - -// TestInconsistentLabelSets validates that the exporter will register -// and record metrics with the same metric name but inconsistent label -// sets e.g foo{a="1"} and foo{b="1"} -func TestInconsistentLabelSets(t *testing.T) { - firstLabelSet := make(map[string]string) - secondLabelSet := make(map[string]string) - metricNames := [4]string{"counter_test", "gauge_test", "histogram_test", "summary_test"} - - firstLabelSet["foo"] = "1" - secondLabelSet["foo"] = "1" - secondLabelSet["bar"] = "2" - - events := make(chan Events) - go func() { - c := Events{ - &CounterEvent{ - metricName: "counter_test", - value: 1, - labels: firstLabelSet, - }, - &CounterEvent{ - metricName: "counter_test", - value: 1, - labels: secondLabelSet, - }, - &GaugeEvent{ - metricName: "gauge_test", - value: 1, - labels: firstLabelSet, - }, - &GaugeEvent{ - metricName: "gauge_test", - value: 1, - labels: secondLabelSet, - }, - &TimerEvent{ - metricName: "histogram.test", - value: 1, - labels: firstLabelSet, - }, - &TimerEvent{ - metricName: "histogram.test", - value: 1, - labels: secondLabelSet, - }, - &TimerEvent{ - metricName: "summary_test", - value: 1, - labels: firstLabelSet, - }, - &TimerEvent{ - metricName: "summary_test", - value: 1, - labels: secondLabelSet, - }, - } - events <- c - close(events) - }() - - config := ` -mappings: -- match: histogram.test - timer_type: histogram - name: "histogram_test" -` - testMapper := &mapper.MetricMapper{} - err := testMapper.InitFromYAMLString(config, 0) - if err != nil { - t.Fatalf("Config load error: %s %s", config, err) - } - - ex := NewExporter(testMapper, log.NewNopLogger()) - ex.Listen(events) - - metrics, err := prometheus.DefaultGatherer.Gather() - if err != nil { - t.Fatalf("Cannot gather from DefaultGatherer: %v", err) - } - - for _, metricName := range metricNames { - firstMetric := getFloat64(metrics, metricName, firstLabelSet) - secondMetric := getFloat64(metrics, metricName, secondLabelSet) - - if firstMetric == nil { - t.Fatalf("Could not find time series with first label set for metric: %s", metricName) - } - if secondMetric == nil { - t.Fatalf("Could not find time series with second label set for metric: %s", metricName) - } - } -} - -// TestLabelParsing verifies that labels getting parsed out of metric -// names are being properly created. -func TestLabelParsing(t *testing.T) { - codes := [2]string{"200", "300"} - - events := make(chan Events) - go func() { - c := Events{ - &CounterEvent{ - metricName: "counter.test.200", - value: 1, - labels: make(map[string]string), - }, - &CounterEvent{ - metricName: "counter.test.300", - value: 1, - labels: make(map[string]string), - }, - } - events <- c - close(events) - }() - - config := ` -mappings: -- match: counter.test.* - name: "counter_test" - labels: - code: $1 -` - - testMapper := &mapper.MetricMapper{} - err := testMapper.InitFromYAMLString(config, 0) - if err != nil { - t.Fatalf("Config load error: %s %s", config, err) - } - - ex := NewExporter(testMapper, log.NewNopLogger()) - ex.Listen(events) - - metrics, err := prometheus.DefaultGatherer.Gather() - if err != nil { - t.Fatalf("Cannot gather from DefaultGatherer: %v", err) - } - - labels := make(map[string]string) - - for _, code := range codes { - labels["code"] = code - if getFloat64(metrics, "counter_test", labels) == nil { - t.Fatalf("Could not find metrics for counter_test code %s", code) - } - } -} - -// TestConflictingMetrics validates that the exporter will not register metrics -// of different types that have overlapping names. -func TestConflictingMetrics(t *testing.T) { - scenarios := []struct { - name string - expected []float64 - in Events - }{ - { - name: "counter vs gauge", - expected: []float64{1}, - in: Events{ - &CounterEvent{ - metricName: "cvg_test", - value: 1, - }, - &GaugeEvent{ - metricName: "cvg_test", - value: 2, - }, - }, - }, - { - name: "counter vs gauge with different labels", - expected: []float64{1, 2}, - in: Events{ - &CounterEvent{ - metricName: "cvgl_test", - value: 1, - labels: map[string]string{"tag": "1"}, - }, - &CounterEvent{ - metricName: "cvgl_test", - value: 2, - labels: map[string]string{"tag": "2"}, - }, - &GaugeEvent{ - metricName: "cvgl_test", - value: 3, - labels: map[string]string{"tag": "1"}, - }, - }, - }, - { - name: "counter vs gauge with same labels", - expected: []float64{3}, - in: Events{ - &CounterEvent{ - metricName: "cvgsl_test", - value: 1, - labels: map[string]string{"tag": "1"}, - }, - &CounterEvent{ - metricName: "cvgsl_test", - value: 2, - labels: map[string]string{"tag": "1"}, - }, - &GaugeEvent{ - metricName: "cvgsl_test", - value: 3, - labels: map[string]string{"tag": "1"}, - }, - }, - }, - { - name: "gauge vs counter", - expected: []float64{2}, - in: Events{ - &GaugeEvent{ - metricName: "gvc_test", - value: 2, - }, - &CounterEvent{ - metricName: "gvc_test", - value: 1, - }, - }, - }, - { - name: "counter vs histogram", - expected: []float64{1}, - in: Events{ - &CounterEvent{ - metricName: "histogram_test1", - value: 1, - }, - &TimerEvent{ - metricName: "histogram.test1", - value: 2, - }, - }, - }, - { - name: "counter vs histogram sum", - expected: []float64{1}, - in: Events{ - &CounterEvent{ - metricName: "histogram_test1_sum", - value: 1, - }, - &TimerEvent{ - metricName: "histogram.test1", - value: 2, - }, - }, - }, - { - name: "counter vs histogram count", - expected: []float64{1}, - in: Events{ - &CounterEvent{ - metricName: "histogram_test2_count", - value: 1, - }, - &TimerEvent{ - metricName: "histogram.test2", - value: 2, - }, - }, - }, - { - name: "counter vs histogram bucket", - expected: []float64{1}, - in: Events{ - &CounterEvent{ - metricName: "histogram_test3_bucket", - value: 1, - }, - &TimerEvent{ - metricName: "histogram.test3", - value: 2, - }, - }, - }, - { - name: "counter vs summary quantile", - expected: []float64{1}, - in: Events{ - &CounterEvent{ - metricName: "cvsq_test", - value: 1, - }, - &TimerEvent{ - metricName: "cvsq_test", - value: 2, - }, - }, - }, - { - name: "counter vs summary count", - expected: []float64{1}, - in: Events{ - &CounterEvent{ - metricName: "cvsc_count", - value: 1, - }, - &TimerEvent{ - metricName: "cvsc", - value: 2, - }, - }, - }, - { - name: "counter vs summary sum", - expected: []float64{1}, - in: Events{ - &CounterEvent{ - metricName: "cvss_sum", - value: 1, - }, - &TimerEvent{ - metricName: "cvss", - value: 2, - }, - }, - }, - } - - config := ` -mappings: -- match: histogram.* - timer_type: histogram - name: "histogram_${1}" -` - for _, s := range scenarios { - t.Run(s.name, func(t *testing.T) { - testMapper := &mapper.MetricMapper{} - err := testMapper.InitFromYAMLString(config, 0) - if err != nil { - t.Fatalf("Config load error: %s %s", config, err) - } - - events := make(chan Events) - go func() { - events <- s.in - close(events) - }() - ex := NewExporter(testMapper, log.NewNopLogger()) - ex.Listen(events) - - metrics, err := prometheus.DefaultGatherer.Gather() - if err != nil { - t.Fatalf("Cannot gather from DefaultGatherer: %v", err) - } - - for i, e := range s.expected { - mn := s.in[i].MetricName() - m := getFloat64(metrics, mn, s.in[i].Labels()) - - if m == nil { - t.Fatalf("Could not find time series with metric name '%v'", mn) - } - - if *m != e { - t.Fatalf("Expected to get %v, but got %v instead", e, *m) - } - } - }) - } -} - -// TestEmptyStringMetric validates when a metric name ends up -// being the empty string after applying the match replacements -// tha we don't panic the Exporter Listener. -func TestEmptyStringMetric(t *testing.T) { - events := make(chan Events) - go func() { - c := Events{ - &CounterEvent{ - metricName: "foo_bar", - value: 1, - }, - } - events <- c - close(events) - }() - - config := ` -mappings: -- match: .*_bar - match_type: regex - name: "${1}" -` - testMapper := &mapper.MetricMapper{} - err := testMapper.InitFromYAMLString(config, 0) - if err != nil { - t.Fatalf("Config load error: %s %s", config, err) - } - - errorCounter := errorEventStats.WithLabelValues("empty_metric_name") - prev := getTelemetryCounterValue(errorCounter) - - ex := NewExporter(testMapper, log.NewNopLogger()) - ex.Listen(events) - - updated := getTelemetryCounterValue(errorCounter) - if updated-prev != 1 { - t.Fatal("Empty metric name error event not counted") - } -} - -// TestInvalidUtf8InDatadogTagValue validates robustness of exporter listener -// against datadog tags with invalid tag values. -// It sends the same tags first with a valid value, then with an invalid one. -// The exporter should not panic, but drop the invalid event -func TestInvalidUtf8InDatadogTagValue(t *testing.T) { - defer func() { - if e := recover(); e != nil { - err := e.(error) - t.Fatalf("Exporter listener should not panic on bad utf8: %q", err.Error()) - } - }() - - events := make(chan Events) - ueh := &unbufferedEventHandler{c: events} - - go func() { - for _, l := range []statsDPacketHandler{&StatsDUDPListener{nil, nil, log.NewNopLogger()}, &mockStatsDTCPListener{StatsDTCPListener{nil, nil, log.NewNopLogger()}, log.NewNopLogger()}} { - l.SetEventHandler(ueh) - l.handlePacket([]byte("bar:200|c|#tag:value\nbar:200|c|#tag:\xc3\x28invalid")) - } - close(events) - }() - - testMapper := mapper.MetricMapper{} - testMapper.InitCache(0) - - ex := NewExporter(&testMapper, log.NewNopLogger()) - ex.Listen(events) -} - -// In the case of someone starting the statsd exporter with no mapping file specified -// which is valid, we want to make sure that the default quantile metrics are generated -// as well as the sum/count metrics -func TestSummaryWithQuantilesEmptyMapping(t *testing.T) { - // Start exporter with a synchronous channel - events := make(chan Events) - go func() { - testMapper := mapper.MetricMapper{} - testMapper.InitCache(0) - - ex := NewExporter(&testMapper, log.NewNopLogger()) - ex.Listen(events) - }() - - name := "default_foo" - c := Events{ - &TimerEvent{ - metricName: name, - value: 300, - }, - } - events <- c - events <- Events{} - close(events) - - metrics, err := prometheus.DefaultGatherer.Gather() - if err != nil { - t.Fatal("Gather should not fail: ", err) - } - - var metricFamily *dto.MetricFamily - for _, m := range metrics { - if *m.Name == name { - metricFamily = m - break - } - } - - if metricFamily == nil { - t.Fatal("Metric could not be found") - } - - quantiles := metricFamily.Metric[0].Summary.Quantile - if len(quantiles) == 0 { - t.Fatal("Summary has no quantiles available") - } -} - -func TestHistogramUnits(t *testing.T) { - // Start exporter with a synchronous channel - events := make(chan Events) - go func() { - testMapper := mapper.MetricMapper{} - testMapper.InitCache(0) - ex := NewExporter(&testMapper, log.NewNopLogger()) - ex.mapper.Defaults.TimerType = mapper.TimerTypeHistogram - ex.Listen(events) - }() - - // Synchronously send a statsd event to wait for handleEvent execution. - // Then close events channel to stop a listener. - name := "foo" - c := Events{ - &TimerEvent{ - metricName: name, - value: 300, - }, - } - events <- c - events <- Events{} - close(events) - - // Check histogram value - metrics, err := prometheus.DefaultGatherer.Gather() - if err != nil { - t.Fatalf("Cannot gather from DefaultGatherer: %v", err) - } - value := getFloat64(metrics, name, prometheus.Labels{}) - if value == nil { - t.Fatal("Histogram value should not be nil") - } - if *value == 300 { - t.Fatalf("Histogram observations not scaled into Seconds") - } else if *value != .300 { - t.Fatalf("Received unexpected value for histogram observation %f != .300", *value) - } -} -func TestCounterIncrement(t *testing.T) { - // Start exporter with a synchronous channel - events := make(chan Events) - go func() { - testMapper := mapper.MetricMapper{} - testMapper.InitCache(0) - ex := NewExporter(&testMapper, log.NewNopLogger()) - ex.Listen(events) - }() - - // Synchronously send a statsd event to wait for handleEvent execution. - // Then close events channel to stop a listener. - name := "foo_counter" - labels := map[string]string{ - "foo": "bar", - } - c := Events{ - &CounterEvent{ - metricName: name, - value: 1, - labels: labels, - }, - &CounterEvent{ - metricName: name, - value: 1, - labels: labels, - }, - } - events <- c - // Push empty event so that we block until the first event is consumed. - events <- Events{} - close(events) - - // Check histogram value - metrics, err := prometheus.DefaultGatherer.Gather() - if err != nil { - t.Fatalf("Cannot gather from DefaultGatherer: %v", err) - } - value := getFloat64(metrics, name, labels) - if value == nil { - t.Fatal("Counter value should not be nil") - } - if *value != 2 { - t.Fatalf("Counter wasn't incremented properly") - } -} - -type statsDPacketHandler interface { - handlePacket(packet []byte) - SetEventHandler(eh eventHandler) -} - -type mockStatsDTCPListener struct { - StatsDTCPListener - log.Logger -} - -func (ml *mockStatsDTCPListener) handlePacket(packet []byte) { - // Forcing IPv4 because the TravisCI build environment does not have IPv6 - // addresses. - lc, err := net.ListenTCP("tcp4", nil) - if err != nil { - panic(fmt.Sprintf("mockStatsDTCPListener: listen failed: %v", err)) - } - - defer lc.Close() - - go func() { - cc, err := net.DialTCP("tcp", nil, lc.Addr().(*net.TCPAddr)) - if err != nil { - panic(fmt.Sprintf("mockStatsDTCPListener: dial failed: %v", err)) - } - - defer cc.Close() - - n, err := cc.Write(packet) - if err != nil || n != len(packet) { - panic(fmt.Sprintf("mockStatsDTCPListener: write failed: %v,%d", err, n)) - } - }() - - sc, err := lc.AcceptTCP() - if err != nil { - panic(fmt.Sprintf("mockStatsDTCPListener: accept failed: %v", err)) - } - ml.handleConn(sc) -} - -// TestTtlExpiration validates expiration of time series. -// foobar metric without mapping should expire with default ttl of 1s -// bazqux metric should expire with ttl of 2s -func TestTtlExpiration(t *testing.T) { - // Mock a time.NewTicker - tickerCh := make(chan time.Time) - clock.ClockInstance = &clock.Clock{ - TickerCh: tickerCh, - } - - config := ` -defaults: - ttl: 1s -mappings: -- match: bazqux.* - name: bazqux - ttl: 2s -` - // Create mapper from config and start an Exporter with a synchronous channel - testMapper := &mapper.MetricMapper{} - err := testMapper.InitFromYAMLString(config, 0) - if err != nil { - t.Fatalf("Config load error: %s %s", config, err) - } - events := make(chan Events) - defer close(events) - go func() { - ex := NewExporter(testMapper, log.NewNopLogger()) - ex.Listen(events) - }() - - ev := Events{ - // event with default ttl = 1s - &GaugeEvent{ - metricName: "foobar", - value: 200, - }, - // event with ttl = 2s from a mapping - &TimerEvent{ - metricName: "bazqux.main", - value: 42000, - }, - } - - var metrics []*dto.MetricFamily - var foobarValue *float64 - var bazquxValue *float64 - - // Step 1. Send events with statsd metrics. - // Send empty Events to wait for events are handled. - // saveLabelValues will use fake instant as a lastRegisteredAt time. - clock.ClockInstance.Instant = time.Unix(0, 0) - events <- ev - events <- Events{} - - // Check values - metrics, err = prometheus.DefaultGatherer.Gather() - if err != nil { - t.Fatal("Gather should not fail") - } - foobarValue = getFloat64(metrics, "foobar", prometheus.Labels{}) - bazquxValue = getFloat64(metrics, "bazqux", prometheus.Labels{}) - if foobarValue == nil || bazquxValue == nil { - t.Fatalf("Gauge `foobar` and Summary `bazqux` should be gathered") - } - if *foobarValue != 200 { - t.Fatalf("Gauge `foobar` observation %f is not expected. Should be 200", *foobarValue) - } - if *bazquxValue != 42 { - t.Fatalf("Summary `bazqux` observation %f is not expected. Should be 42", *bazquxValue) - } - - // Step 2. Increase Instant to emulate metrics expiration after 1s - clock.ClockInstance.Instant = time.Unix(1, 10) - clock.ClockInstance.TickerCh <- time.Unix(0, 0) - events <- Events{} - - // Check values - metrics, err = prometheus.DefaultGatherer.Gather() - if err != nil { - t.Fatal("Gather should not fail") - } - foobarValue = getFloat64(metrics, "foobar", prometheus.Labels{}) - bazquxValue = getFloat64(metrics, "bazqux", prometheus.Labels{}) - if foobarValue != nil { - t.Fatalf("Gauge `foobar` should be expired") - } - if bazquxValue == nil { - t.Fatalf("Summary `bazqux` should be gathered") - } - if *bazquxValue != 42 { - t.Fatalf("Summary `bazqux` observation %f is not expected. Should be 42", *bazquxValue) - } - - // Step 3. Increase Instant to emulate metrics expiration after 2s - clock.ClockInstance.Instant = time.Unix(2, 200) - clock.ClockInstance.TickerCh <- time.Unix(0, 0) - events <- Events{} - - // Check values - metrics, err = prometheus.DefaultGatherer.Gather() - if err != nil { - t.Fatal("Gather should not fail") - } - foobarValue = getFloat64(metrics, "foobar", prometheus.Labels{}) - bazquxValue = getFloat64(metrics, "bazqux", prometheus.Labels{}) - if bazquxValue != nil { - t.Fatalf("Summary `bazqux` should be expired") - } - if foobarValue != nil { - t.Fatalf("Gauge `foobar` should not be gathered after expiration") - } -} - -func TestHashLabelNames(t *testing.T) { - r := newRegistry(nil) - // Validate value hash changes and name has doesn't when just the value changes. - hash1, _ := r.hashLabels(map[string]string{ - "label": "value1", - }) - hash2, _ := r.hashLabels(map[string]string{ - "label": "value2", - }) - if hash1.names != hash2.names { - t.Fatal("Hash of label names should match, but doesn't") - } - if hash1.values == hash2.values { - t.Fatal("Hash of label names shouldn't match, but do") - } - - // Validate value and name hashes change when the name changes. - hash1, _ = r.hashLabels(map[string]string{ - "label1": "value", - }) - hash2, _ = r.hashLabels(map[string]string{ - "label2": "value", - }) - if hash1.names == hash2.names { - t.Fatal("Hash of label names shouldn't match, but do") - } - if hash1.values == hash2.values { - t.Fatal("Hash of label names shouldn't match, but do") - } -} - -// getFloat64 search for metric by name in array of MetricFamily and then search a value by labels. -// Method returns a value or nil if metric is not found. -func getFloat64(metrics []*dto.MetricFamily, name string, labels prometheus.Labels) *float64 { - var metricFamily *dto.MetricFamily - for _, m := range metrics { - if *m.Name == name { - metricFamily = m - break - } - } - if metricFamily == nil { - return nil - } - - var metric *dto.Metric - labelStr := fmt.Sprintf("%v", labels) - for _, m := range metricFamily.Metric { - l := labelPairsAsLabels(m.GetLabel()) - ls := fmt.Sprintf("%v", l) - if labelStr == ls { - metric = m - break - } - } - if metric == nil { - return nil - } - - var value float64 - if metric.Gauge != nil { - value = metric.Gauge.GetValue() - return &value - } - if metric.Counter != nil { - value = metric.Counter.GetValue() - return &value - } - if metric.Histogram != nil { - value = metric.Histogram.GetSampleSum() - return &value - } - if metric.Summary != nil { - value = metric.Summary.GetSampleSum() - return &value - } - if metric.Untyped != nil { - value = metric.Untyped.GetValue() - return &value - } - panic(fmt.Errorf("collected a non-gauge/counter/histogram/summary/untyped metric: %s", metric)) -} - -func labelPairsAsLabels(pairs []*dto.LabelPair) (labels prometheus.Labels) { - labels = prometheus.Labels{} - for _, pair := range pairs { - if pair.Name == nil { - continue - } - value := "" - if pair.Value != nil { - value = *pair.Value - } - labels[*pair.Name] = value - } - return -} - -func getTelemetryCounterValue(counter prometheus.Counter) float64 { - var metric dto.Metric - err := counter.Write(&metric) - if err != nil { - return 0.0 - } - return metric.Counter.GetValue() -} - -func BenchmarkParseDogStatsDTags(b *testing.B) { - scenarios := map[string]string{ - "1 tag w/hash": "#test:tag", - "1 tag w/o hash": "test:tag", - "2 tags, mixed hashes": "tag1:test,#tag2:test", - "3 long tags": "tag1:reallylongtagthisisreallylong,tag2:anotherreallylongtag,tag3:thisisyetanotherextraordinarilylongtag", - "a-z tags": "a:0,b:1,c:2,d:3,e:4,f:5,g:6,h:7,i:8,j:9,k:0,l:1,m:2,n:3,o:4,p:5,q:6,r:7,s:8,t:9,u:0,v:1,w:2,x:3,y:4,z:5", - } - - for name, tags := range scenarios { - b.Run(name, func(b *testing.B) { - for n := 0; n < b.N; n++ { - labels := map[string]string{} - parseDogStatsDTags(tags, labels, log.NewNopLogger()) - } - }) - } -} - -func BenchmarkHashNameAndLabels(b *testing.B) { - scenarios := []struct { - name string - metric string - labels map[string]string - }{ - { - name: "no labels", - labels: map[string]string{}, - }, { - name: "one label", - labels: map[string]string{ - "label": "value", - }, - }, { - name: "many labels", - labels: map[string]string{ - "label0": "value", - "label1": "value", - "label2": "value", - "label3": "value", - "label4": "value", - "label5": "value", - "label6": "value", - "label7": "value", - "label8": "value", - "label9": "value", - }, - }, - } - - r := newRegistry(nil) - for _, s := range scenarios { - b.Run(s.name, func(b *testing.B) { - for n := 0; n < b.N; n++ { - r.hashLabels(s.labels) - } - }) - } -} +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/go-kit/kit/log" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/statsd_exporter/pkg/clock" + "github.com/prometheus/statsd_exporter/pkg/event" + "github.com/prometheus/statsd_exporter/pkg/exporter" + "github.com/prometheus/statsd_exporter/pkg/line" + "github.com/prometheus/statsd_exporter/pkg/listener" + "github.com/prometheus/statsd_exporter/pkg/mapper" + "github.com/prometheus/statsd_exporter/pkg/registry" +) + +// TestNegativeCounter validates when we send a negative +// number to a counter that we no longer panic the Exporter Listener. +func TestNegativeCounter(t *testing.T) { + defer func() { + if e := recover(); e != nil { + err := e.(error) + if err.Error() == "counter cannot decrease in value" { + t.Fatalf("Counter was negative and causes a panic.") + } else { + t.Fatalf("Unknown panic and error: %q", err.Error()) + } + } + }() + + events := make(chan event.Events) + go func() { + c := event.Events{ + &event.CounterEvent{ + CMetricName: "foo", + CValue: -1, + }, + } + events <- c + close(events) + }() + + errorCounter := errorEventStats.WithLabelValues("illegal_negative_counter") + prev := getTelemetryCounterValue(errorCounter) + + testMapper := mapper.MetricMapper{} + testMapper.InitCache(0) + + ex := exporter.NewExporter(&testMapper, log.NewNopLogger()) + ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) + + updated := getTelemetryCounterValue(errorCounter) + if updated-prev != 1 { + t.Fatal("Illegal negative counter error not counted") + } +} + +// TestInconsistentLabelSets validates that the exporter will register +// and record metrics with the same metric name but inconsistent label +// sets e.g foo{a="1"} and foo{b="1"} +func TestInconsistentLabelSets(t *testing.T) { + firstLabelSet := make(map[string]string) + secondLabelSet := make(map[string]string) + metricNames := [4]string{"counter_test", "gauge_test", "histogram_test", "summary_test"} + + firstLabelSet["foo"] = "1" + secondLabelSet["foo"] = "1" + secondLabelSet["bar"] = "2" + + events := make(chan event.Events) + go func() { + c := event.Events{ + &event.CounterEvent{ + CMetricName: "counter_test", + CValue: 1, + CLabels: firstLabelSet, + }, + &event.CounterEvent{ + CMetricName: "counter_test", + CValue: 1, + CLabels: secondLabelSet, + }, + &event.GaugeEvent{ + GMetricName: "gauge_test", + GValue: 1, + GLabels: firstLabelSet, + }, + &event.GaugeEvent{ + GMetricName: "gauge_test", + GValue: 1, + GLabels: secondLabelSet, + }, + &event.TimerEvent{ + TMetricName: "histogram.test", + TValue: 1, + TLabels: firstLabelSet, + }, + &event.TimerEvent{ + TMetricName: "histogram.test", + TValue: 1, + TLabels: secondLabelSet, + }, + &event.TimerEvent{ + TMetricName: "summary_test", + TValue: 1, + TLabels: firstLabelSet, + }, + &event.TimerEvent{ + TMetricName: "summary_test", + TValue: 1, + TLabels: secondLabelSet, + }, + } + events <- c + close(events) + }() + + config := ` +mappings: +- match: histogram.test + timer_type: histogram + name: "histogram_test" +` + testMapper := &mapper.MetricMapper{} + err := testMapper.InitFromYAMLString(config, 0) + if err != nil { + t.Fatalf("Config load error: %s %s", config, err) + } + + ex := exporter.NewExporter(testMapper, log.NewNopLogger()) + ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) + + metrics, err := prometheus.DefaultGatherer.Gather() + if err != nil { + t.Fatalf("Cannot gather from DefaultGatherer: %v", err) + } + + for _, metricName := range metricNames { + firstMetric := getFloat64(metrics, metricName, firstLabelSet) + secondMetric := getFloat64(metrics, metricName, secondLabelSet) + + if firstMetric == nil { + t.Fatalf("Could not find time series with first label set for metric: %s", metricName) + } + if secondMetric == nil { + t.Fatalf("Could not find time series with second label set for metric: %s", metricName) + } + } +} + +// TestLabelParsing verifies that labels getting parsed out of metric +// names are being properly created. +func TestLabelParsing(t *testing.T) { + codes := [2]string{"200", "300"} + + events := make(chan event.Events) + go func() { + c := event.Events{ + &event.CounterEvent{ + CMetricName: "counter.test.200", + CValue: 1, + CLabels: make(map[string]string), + }, + &event.CounterEvent{ + CMetricName: "counter.test.300", + CValue: 1, + CLabels: make(map[string]string), + }, + } + events <- c + close(events) + }() + + config := ` +mappings: +- match: counter.test.* + name: "counter_test" + labels: + code: $1 +` + + testMapper := &mapper.MetricMapper{} + err := testMapper.InitFromYAMLString(config, 0) + if err != nil { + t.Fatalf("Config load error: %s %s", config, err) + } + + ex := exporter.NewExporter(testMapper, log.NewNopLogger()) + ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) + + metrics, err := prometheus.DefaultGatherer.Gather() + if err != nil { + t.Fatalf("Cannot gather from DefaultGatherer: %v", err) + } + + labels := make(map[string]string) + + for _, code := range codes { + labels["code"] = code + if getFloat64(metrics, "counter_test", labels) == nil { + t.Fatalf("Could not find metrics for counter_test code %s", code) + } + } +} + +// TestConflictingMetrics validates that the exporter will not register metrics +// of different types that have overlapping names. +func TestConflictingMetrics(t *testing.T) { + scenarios := []struct { + name string + expected []float64 + in event.Events + }{ + { + name: "counter vs gauge", + expected: []float64{1}, + in: event.Events{ + &event.CounterEvent{ + CMetricName: "cvg_test", + CValue: 1, + }, + &event.GaugeEvent{ + GMetricName: "cvg_test", + GValue: 2, + }, + }, + }, + { + name: "counter vs gauge with different labels", + expected: []float64{1, 2}, + in: event.Events{ + &event.CounterEvent{ + CMetricName: "cvgl_test", + CValue: 1, + CLabels: map[string]string{"tag": "1"}, + }, + &event.CounterEvent{ + CMetricName: "cvgl_test", + CValue: 2, + CLabels: map[string]string{"tag": "2"}, + }, + &event.GaugeEvent{ + GMetricName: "cvgl_test", + GValue: 3, + GLabels: map[string]string{"tag": "1"}, + }, + }, + }, + { + name: "counter vs gauge with same labels", + expected: []float64{3}, + in: event.Events{ + &event.CounterEvent{ + CMetricName: "cvgsl_test", + CValue: 1, + CLabels: map[string]string{"tag": "1"}, + }, + &event.CounterEvent{ + CMetricName: "cvgsl_test", + CValue: 2, + CLabels: map[string]string{"tag": "1"}, + }, + &event.GaugeEvent{ + GMetricName: "cvgsl_test", + GValue: 3, + GLabels: map[string]string{"tag": "1"}, + }, + }, + }, + { + name: "gauge vs counter", + expected: []float64{2}, + in: event.Events{ + &event.GaugeEvent{ + GMetricName: "gvc_test", + GValue: 2, + }, + &event.CounterEvent{ + CMetricName: "gvc_test", + CValue: 1, + }, + }, + }, + { + name: "counter vs histogram", + expected: []float64{1}, + in: event.Events{ + &event.CounterEvent{ + CMetricName: "histogram_test1", + CValue: 1, + }, + &event.TimerEvent{ + TMetricName: "histogram.test1", + TValue: 2, + }, + }, + }, + { + name: "counter vs histogram sum", + expected: []float64{1}, + in: event.Events{ + &event.CounterEvent{ + CMetricName: "histogram_test1_sum", + CValue: 1, + }, + &event.TimerEvent{ + TMetricName: "histogram.test1", + TValue: 2, + }, + }, + }, + { + name: "counter vs histogram count", + expected: []float64{1}, + in: event.Events{ + &event.CounterEvent{ + CMetricName: "histogram_test2_count", + CValue: 1, + }, + &event.TimerEvent{ + TMetricName: "histogram.test2", + TValue: 2, + }, + }, + }, + { + name: "counter vs histogram bucket", + expected: []float64{1}, + in: event.Events{ + &event.CounterEvent{ + CMetricName: "histogram_test3_bucket", + CValue: 1, + }, + &event.TimerEvent{ + TMetricName: "histogram.test3", + TValue: 2, + }, + }, + }, + { + name: "counter vs summary quantile", + expected: []float64{1}, + in: event.Events{ + &event.CounterEvent{ + CMetricName: "cvsq_test", + CValue: 1, + }, + &event.TimerEvent{ + TMetricName: "cvsq_test", + TValue: 2, + }, + }, + }, + { + name: "counter vs summary count", + expected: []float64{1}, + in: event.Events{ + &event.CounterEvent{ + CMetricName: "cvsc_count", + CValue: 1, + }, + &event.TimerEvent{ + TMetricName: "cvsc", + TValue: 2, + }, + }, + }, + { + name: "counter vs summary sum", + expected: []float64{1}, + in: event.Events{ + &event.CounterEvent{ + CMetricName: "cvss_sum", + CValue: 1, + }, + &event.TimerEvent{ + TMetricName: "cvss", + TValue: 2, + }, + }, + }, + } + + config := ` +mappings: +- match: histogram.* + timer_type: histogram + name: "histogram_${1}" +` + for _, s := range scenarios { + t.Run(s.name, func(t *testing.T) { + testMapper := &mapper.MetricMapper{} + err := testMapper.InitFromYAMLString(config, 0) + if err != nil { + t.Fatalf("Config load error: %s %s", config, err) + } + + events := make(chan event.Events) + go func() { + events <- s.in + close(events) + }() + ex := exporter.NewExporter(testMapper, log.NewNopLogger()) + ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) + + metrics, err := prometheus.DefaultGatherer.Gather() + if err != nil { + t.Fatalf("Cannot gather from DefaultGatherer: %v", err) + } + + for i, e := range s.expected { + mn := s.in[i].MetricName() + m := getFloat64(metrics, mn, s.in[i].Labels()) + + if m == nil { + t.Fatalf("Could not find time series with metric name '%v'", mn) + } + + if *m != e { + t.Fatalf("Expected to get %v, but got %v instead", e, *m) + } + } + }) + } +} + +// TestEmptyStringMetric validates when a metric name ends up +// being the empty string after applying the match replacements +// tha we don't panic the Exporter Listener. +func TestEmptyStringMetric(t *testing.T) { + events := make(chan event.Events) + go func() { + c := event.Events{ + &event.CounterEvent{ + CMetricName: "foo_bar", + CValue: 1, + }, + } + events <- c + close(events) + }() + + config := ` +mappings: +- match: .*_bar + match_type: regex + name: "${1}" +` + testMapper := &mapper.MetricMapper{} + err := testMapper.InitFromYAMLString(config, 0) + if err != nil { + t.Fatalf("Config load error: %s %s", config, err) + } + + errorCounter := errorEventStats.WithLabelValues("empty_metric_name") + prev := getTelemetryCounterValue(errorCounter) + + ex := exporter.NewExporter(testMapper, log.NewNopLogger()) + ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) + + updated := getTelemetryCounterValue(errorCounter) + if updated-prev != 1 { + t.Fatal("Empty metric name error event not counted") + } +} + +// TestInvalidUtf8InDatadogTagValue validates robustness of exporter listener +// against datadog tags with invalid tag values. +// It sends the same tags first with a valid value, then with an invalid one. +// The exporter should not panic, but drop the invalid event +func TestInvalidUtf8InDatadogTagValue(t *testing.T) { + defer func() { + if e := recover(); e != nil { + err := e.(error) + t.Fatalf("Exporter listener should not panic on bad utf8: %q", err.Error()) + } + }() + + events := make(chan event.Events) + ueh := &event.UnbufferedEventHandler{C: events} + + go func() { + for _, l := range []statsDPacketHandler{&listener.StatsDUDPListener{nil, nil, log.NewNopLogger()}, &mockStatsDTCPListener{listener.StatsDTCPListener{nil, nil, log.NewNopLogger()}, log.NewNopLogger()}} { + l.SetEventHandler(ueh) + l.HandlePacket([]byte("bar:200|c|#tag:value\nbar:200|c|#tag:\xc3\x28invalid"), udpPackets, linesReceived, eventsFlushed, *sampleErrors, samplesReceived, tagErrors, tagsReceived) + } + close(events) + }() + + testMapper := mapper.MetricMapper{} + testMapper.InitCache(0) + + ex := exporter.NewExporter(&testMapper, log.NewNopLogger()) + ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) +} + +// In the case of someone starting the statsd exporter with no mapping file specified +// which is valid, we want to make sure that the default quantile metrics are generated +// as well as the sum/count metrics +func TestSummaryWithQuantilesEmptyMapping(t *testing.T) { + // Start exporter with a synchronous channel + events := make(chan event.Events) + go func() { + testMapper := mapper.MetricMapper{} + testMapper.InitCache(0) + + ex := exporter.NewExporter(&testMapper, log.NewNopLogger()) + ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) + }() + + name := "default_foo" + c := event.Events{ + &event.TimerEvent{ + TMetricName: name, + TValue: 300, + }, + } + events <- c + events <- event.Events{} + close(events) + + metrics, err := prometheus.DefaultGatherer.Gather() + if err != nil { + t.Fatal("Gather should not fail: ", err) + } + + var metricFamily *dto.MetricFamily + for _, m := range metrics { + if *m.Name == name { + metricFamily = m + break + } + } + + if metricFamily == nil { + t.Fatal("Metric could not be found") + } + + quantiles := metricFamily.Metric[0].Summary.Quantile + if len(quantiles) == 0 { + t.Fatal("Summary has no quantiles available") + } +} + +func TestHistogramUnits(t *testing.T) { + // Start exporter with a synchronous channel + events := make(chan event.Events) + go func() { + testMapper := mapper.MetricMapper{} + testMapper.InitCache(0) + ex := exporter.NewExporter(&testMapper, log.NewNopLogger()) + ex.Mapper.Defaults.TimerType = mapper.TimerTypeHistogram + ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) + }() + + // Synchronously send a statsd event to wait for handleEvent execution. + // Then close events channel to stop a listener. + name := "foo" + c := event.Events{ + &event.TimerEvent{ + TMetricName: name, + TValue: 300, + }, + } + events <- c + events <- event.Events{} + close(events) + + // Check histogram value + metrics, err := prometheus.DefaultGatherer.Gather() + if err != nil { + t.Fatalf("Cannot gather from DefaultGatherer: %v", err) + } + value := getFloat64(metrics, name, prometheus.Labels{}) + if value == nil { + t.Fatal("Histogram value should not be nil") + } + if *value == 300 { + t.Fatalf("Histogram observations not scaled into Seconds") + } else if *value != .300 { + t.Fatalf("Received unexpected value for histogram observation %f != .300", *value) + } +} +func TestCounterIncrement(t *testing.T) { + // Start exporter with a synchronous channel + events := make(chan event.Events) + go func() { + testMapper := mapper.MetricMapper{} + testMapper.InitCache(0) + ex := exporter.NewExporter(&testMapper, log.NewNopLogger()) + ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) + }() + + // Synchronously send a statsd event to wait for handleEvent execution. + // Then close events channel to stop a listener. + name := "foo_counter" + labels := map[string]string{ + "foo": "bar", + } + c := event.Events{ + &event.CounterEvent{ + CMetricName: name, + CValue: 1, + CLabels: labels, + }, + &event.CounterEvent{ + CMetricName: name, + CValue: 1, + CLabels: labels, + }, + } + events <- c + // Push empty event so that we block until the first event is consumed. + events <- event.Events{} + close(events) + + // Check histogram value + metrics, err := prometheus.DefaultGatherer.Gather() + if err != nil { + t.Fatalf("Cannot gather from DefaultGatherer: %v", err) + } + value := getFloat64(metrics, name, labels) + if value == nil { + t.Fatal("Counter value should not be nil") + } + if *value != 2 { + t.Fatalf("Counter wasn't incremented properly") + } +} + +type statsDPacketHandler interface { + HandlePacket(packet []byte, udpPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) + SetEventHandler(eh event.EventHandler) +} + +type mockStatsDTCPListener struct { + listener.StatsDTCPListener + log.Logger +} + +func (ml *mockStatsDTCPListener) HandlePacket(packet []byte, udpPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + // Forcing IPv4 because the TravisCI build environment does not have IPv6 + // addresses. + lc, err := net.ListenTCP("tcp4", nil) + if err != nil { + panic(fmt.Sprintf("mockStatsDTCPListener: listen failed: %v", err)) + } + + defer lc.Close() + + go func() { + cc, err := net.DialTCP("tcp", nil, lc.Addr().(*net.TCPAddr)) + if err != nil { + panic(fmt.Sprintf("mockStatsDTCPListener: dial failed: %v", err)) + } + + defer cc.Close() + + n, err := cc.Write(packet) + if err != nil || n != len(packet) { + panic(fmt.Sprintf("mockStatsDTCPListener: write failed: %v,%d", err, n)) + } + }() + + sc, err := lc.AcceptTCP() + if err != nil { + panic(fmt.Sprintf("mockStatsDTCPListener: accept failed: %v", err)) + } + ml.HandleConn(sc, linesReceived, eventsFlushed, tcpConnections, tcpErrors, tcpLineTooLong, sampleErrors, samplesReceived, tagErrors, tagsReceived) +} + +// TestTtlExpiration validates expiration of time series. +// foobar metric without mapping should expire with default ttl of 1s +// bazqux metric should expire with ttl of 2s +func TestTtlExpiration(t *testing.T) { + // Mock a time.NewTicker + tickerCh := make(chan time.Time) + clock.ClockInstance = &clock.Clock{ + TickerCh: tickerCh, + } + + config := ` +defaults: + ttl: 1s +mappings: +- match: bazqux.* + name: bazqux + ttl: 2s +` + // Create mapper from config and start an Exporter with a synchronous channel + testMapper := &mapper.MetricMapper{} + err := testMapper.InitFromYAMLString(config, 0) + if err != nil { + t.Fatalf("Config load error: %s %s", config, err) + } + events := make(chan event.Events) + defer close(events) + go func() { + ex := exporter.NewExporter(testMapper, log.NewNopLogger()) + ex.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) + }() + + ev := event.Events{ + // event with default ttl = 1s + &event.GaugeEvent{ + GMetricName: "foobar", + GValue: 200, + }, + // event with ttl = 2s from a mapping + &event.TimerEvent{ + TMetricName: "bazqux.main", + TValue: 42000, + }, + } + + var metrics []*dto.MetricFamily + var foobarValue *float64 + var bazquxValue *float64 + + // Step 1. Send events with statsd metrics. + // Send empty Events to wait for events are handled. + // saveLabelValues will use fake instant as a lastRegisteredAt time. + clock.ClockInstance.Instant = time.Unix(0, 0) + events <- ev + events <- event.Events{} + + // Check values + metrics, err = prometheus.DefaultGatherer.Gather() + if err != nil { + t.Fatal("Gather should not fail") + } + foobarValue = getFloat64(metrics, "foobar", prometheus.Labels{}) + bazquxValue = getFloat64(metrics, "bazqux", prometheus.Labels{}) + if foobarValue == nil || bazquxValue == nil { + t.Fatalf("Gauge `foobar` and Summary `bazqux` should be gathered") + } + if *foobarValue != 200 { + t.Fatalf("Gauge `foobar` observation %f is not expected. Should be 200", *foobarValue) + } + if *bazquxValue != 42 { + t.Fatalf("Summary `bazqux` observation %f is not expected. Should be 42", *bazquxValue) + } + + // Step 2. Increase Instant to emulate metrics expiration after 1s + clock.ClockInstance.Instant = time.Unix(1, 10) + clock.ClockInstance.TickerCh <- time.Unix(0, 0) + events <- event.Events{} + + // Check values + metrics, err = prometheus.DefaultGatherer.Gather() + if err != nil { + t.Fatal("Gather should not fail") + } + foobarValue = getFloat64(metrics, "foobar", prometheus.Labels{}) + bazquxValue = getFloat64(metrics, "bazqux", prometheus.Labels{}) + if foobarValue != nil { + t.Fatalf("Gauge `foobar` should be expired") + } + if bazquxValue == nil { + t.Fatalf("Summary `bazqux` should be gathered") + } + if *bazquxValue != 42 { + t.Fatalf("Summary `bazqux` observation %f is not expected. Should be 42", *bazquxValue) + } + + // Step 3. Increase Instant to emulate metrics expiration after 2s + clock.ClockInstance.Instant = time.Unix(2, 200) + clock.ClockInstance.TickerCh <- time.Unix(0, 0) + events <- event.Events{} + + // Check values + metrics, err = prometheus.DefaultGatherer.Gather() + if err != nil { + t.Fatal("Gather should not fail") + } + foobarValue = getFloat64(metrics, "foobar", prometheus.Labels{}) + bazquxValue = getFloat64(metrics, "bazqux", prometheus.Labels{}) + if bazquxValue != nil { + t.Fatalf("Summary `bazqux` should be expired") + } + if foobarValue != nil { + t.Fatalf("Gauge `foobar` should not be gathered after expiration") + } +} + +func TestHashLabelNames(t *testing.T) { + r := registry.NewRegistry(nil) + // Validate value hash changes and name has doesn't when just the value changes. + hash1, _ := r.HashLabels(map[string]string{ + "label": "value1", + }) + hash2, _ := r.HashLabels(map[string]string{ + "label": "value2", + }) + if hash1.Names != hash2.Names { + t.Fatal("Hash of label names should match, but doesn't") + } + if hash1.Values == hash2.Values { + t.Fatal("Hash of label names shouldn't match, but do") + } + + // Validate value and name hashes change when the name changes. + hash1, _ = r.HashLabels(map[string]string{ + "label1": "value", + }) + hash2, _ = r.HashLabels(map[string]string{ + "label2": "value", + }) + if hash1.Names == hash2.Names { + t.Fatal("Hash of label names shouldn't match, but do") + } + if hash1.Values == hash2.Values { + t.Fatal("Hash of label names shouldn't match, but do") + } +} + +// getFloat64 search for metric by name in array of MetricFamily and then search a value by labels. +// Method returns a value or nil if metric is not found. +func getFloat64(metrics []*dto.MetricFamily, name string, labels prometheus.Labels) *float64 { + var metricFamily *dto.MetricFamily + for _, m := range metrics { + if *m.Name == name { + metricFamily = m + break + } + } + if metricFamily == nil { + return nil + } + + var metric *dto.Metric + labelStr := fmt.Sprintf("%v", labels) + for _, m := range metricFamily.Metric { + l := labelPairsAsLabels(m.GetLabel()) + ls := fmt.Sprintf("%v", l) + if labelStr == ls { + metric = m + break + } + } + if metric == nil { + return nil + } + + var value float64 + if metric.Gauge != nil { + value = metric.Gauge.GetValue() + return &value + } + if metric.Counter != nil { + value = metric.Counter.GetValue() + return &value + } + if metric.Histogram != nil { + value = metric.Histogram.GetSampleSum() + return &value + } + if metric.Summary != nil { + value = metric.Summary.GetSampleSum() + return &value + } + if metric.Untyped != nil { + value = metric.Untyped.GetValue() + return &value + } + panic(fmt.Errorf("collected a non-gauge/counter/histogram/summary/untyped metric: %s", metric)) +} + +func labelPairsAsLabels(pairs []*dto.LabelPair) (labels prometheus.Labels) { + labels = prometheus.Labels{} + for _, pair := range pairs { + if pair.Name == nil { + continue + } + value := "" + if pair.Value != nil { + value = *pair.Value + } + labels[*pair.Name] = value + } + return +} + +func getTelemetryCounterValue(counter prometheus.Counter) float64 { + var metric dto.Metric + err := counter.Write(&metric) + if err != nil { + return 0.0 + } + return metric.Counter.GetValue() +} + +func BenchmarkParseDogStatsDTags(b *testing.B) { + scenarios := map[string]string{ + "1 tag w/hash": "#test:tag", + "1 tag w/o hash": "test:tag", + "2 tags, mixed hashes": "tag1:test,#tag2:test", + "3 long tags": "tag1:reallylongtagthisisreallylong,tag2:anotherreallylongtag,tag3:thisisyetanotherextraordinarilylongtag", + "a-z tags": "a:0,b:1,c:2,d:3,e:4,f:5,g:6,h:7,i:8,j:9,k:0,l:1,m:2,n:3,o:4,p:5,q:6,r:7,s:8,t:9,u:0,v:1,w:2,x:3,y:4,z:5", + } + + for name, tags := range scenarios { + b.Run(name, func(b *testing.B) { + for n := 0; n < b.N; n++ { + labels := map[string]string{} + line.ParseDogStatsDTags(tags, labels, tagErrors, log.NewNopLogger()) + } + }) + } +} + +func BenchmarkHashNameAndLabels(b *testing.B) { + scenarios := []struct { + name string + metric string + labels map[string]string + }{ + { + name: "no labels", + labels: map[string]string{}, + }, { + name: "one label", + labels: map[string]string{ + "label": "value", + }, + }, { + name: "many labels", + labels: map[string]string{ + "label0": "value", + "label1": "value", + "label2": "value", + "label3": "value", + "label4": "value", + "label5": "value", + "label6": "value", + "label7": "value", + "label8": "value", + "label9": "value", + }, + }, + } + + r := registry.NewRegistry(nil) + for _, s := range scenarios { + b.Run(s.name, func(b *testing.B) { + for n := 0; n < b.N; n++ { + r.HashLabels(s.labels) + } + }) + } +} diff --git a/go.mod b/go.mod index 6873c9f..1cd2402 100644 --- a/go.mod +++ b/go.mod @@ -1,15 +1,15 @@ -module github.com/prometheus/statsd_exporter - -require ( - github.com/go-kit/kit v0.9.0 - github.com/hashicorp/golang-lru v0.5.1 - github.com/kr/pretty v0.1.0 // indirect - github.com/prometheus/client_golang v1.0.0 - github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 - github.com/prometheus/common v0.7.0 - gopkg.in/alecthomas/kingpin.v2 v2.2.6 - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect - gopkg.in/yaml.v2 v2.2.2 -) - -go 1.13 +module github.com/prometheus/statsd_exporter + +require ( + github.com/go-kit/kit v0.9.0 + github.com/hashicorp/golang-lru v0.5.1 + github.com/kr/pretty v0.1.0 // indirect + github.com/prometheus/client_golang v1.0.0 + github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 + github.com/prometheus/common v0.7.0 + gopkg.in/alecthomas/kingpin.v2 v2.2.6 + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/yaml.v2 v2.2.2 +) + +go 1.13 diff --git a/go.sum b/go.sum index 5799ea8..bd2f8b0 100644 --- a/go.sum +++ b/go.sum @@ -56,6 +56,7 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.5.0 h1:Ctq0iGpCmr3jeP77kbF2UxgvRwzWWz+4Bh9/vJTyg1A= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= diff --git a/main.go b/main.go index b36d3c4..67c8d82 100644 --- a/main.go +++ b/main.go @@ -1,302 +1,414 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bufio" - "fmt" - "net" - "net/http" - _ "net/http/pprof" - "os" - "os/signal" - "strconv" - "syscall" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/prometheus/common/promlog" - "github.com/prometheus/common/promlog/flag" - "github.com/prometheus/common/version" - "gopkg.in/alecthomas/kingpin.v2" - - "github.com/prometheus/statsd_exporter/pkg/mapper" -) - -func init() { - prometheus.MustRegister(version.NewCollector("statsd_exporter")) -} - -func serveHTTP(listenAddress, metricsEndpoint string, logger log.Logger) { - http.Handle(metricsEndpoint, promhttp.Handler()) - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(` - StatsD Exporter - -

StatsD Exporter

-

Metrics

- - `)) - }) - level.Error(logger).Log("msg", http.ListenAndServe(listenAddress, nil)) - os.Exit(1) -} - -func ipPortFromString(addr string) (*net.IPAddr, int, error) { - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - return nil, 0, fmt.Errorf("bad StatsD listening address: %s", addr) - } - - if host == "" { - host = "0.0.0.0" - } - ip, err := net.ResolveIPAddr("ip", host) - if err != nil { - return nil, 0, fmt.Errorf("Unable to resolve %s: %s", host, err) - } - - port, err := strconv.Atoi(portStr) - if err != nil || port < 0 || port > 65535 { - return nil, 0, fmt.Errorf("Bad port %s: %s", portStr, err) - } - - return ip, port, nil -} - -func udpAddrFromString(addr string) (*net.UDPAddr, error) { - ip, port, err := ipPortFromString(addr) - if err != nil { - return nil, err - } - return &net.UDPAddr{ - IP: ip.IP, - Port: port, - Zone: ip.Zone, - }, nil -} - -func tcpAddrFromString(addr string) (*net.TCPAddr, error) { - ip, port, err := ipPortFromString(addr) - if err != nil { - return nil, err - } - return &net.TCPAddr{ - IP: ip.IP, - Port: port, - Zone: ip.Zone, - }, nil -} - -func configReloader(fileName string, mapper *mapper.MetricMapper, cacheSize int, logger log.Logger, option mapper.CacheOption) { - - signals := make(chan os.Signal, 1) - signal.Notify(signals, syscall.SIGHUP) - - for s := range signals { - if fileName == "" { - level.Warn(logger).Log("msg", "Received signal but no mapping config to reload", "signal", s) - continue - } - level.Info(logger).Log("msg", "Received signal, attempting reload", "signal", s) - err := mapper.InitFromFile(fileName, cacheSize, option) - if err != nil { - level.Info(logger).Log("msg", "Error reloading config", "error", err) - configLoads.WithLabelValues("failure").Inc() - } else { - level.Info(logger).Log("msg", "Config reloaded successfully") - configLoads.WithLabelValues("success").Inc() - } - } -} - -func dumpFSM(mapper *mapper.MetricMapper, dumpFilename string, logger log.Logger) error { - f, err := os.Create(dumpFilename) - if err != nil { - return err - } - level.Info(logger).Log("msg", "Start dumping FSM", "file_name", dumpFilename) - w := bufio.NewWriter(f) - mapper.FSM.DumpFSM(w) - w.Flush() - f.Close() - level.Info(logger).Log("msg", "Finish dumping FSM") - return nil -} - -func main() { - var ( - listenAddress = kingpin.Flag("web.listen-address", "The address on which to expose the web interface and generated Prometheus metrics.").Default(":9102").String() - metricsEndpoint = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String() - statsdListenUDP = kingpin.Flag("statsd.listen-udp", "The UDP address on which to receive statsd metric lines. \"\" disables it.").Default(":9125").String() - statsdListenTCP = kingpin.Flag("statsd.listen-tcp", "The TCP address on which to receive statsd metric lines. \"\" disables it.").Default(":9125").String() - statsdListenUnixgram = kingpin.Flag("statsd.listen-unixgram", "The Unixgram socket path to receive statsd metric lines in datagram. \"\" disables it.").Default("").String() - // not using Int here because flag diplays default in decimal, 0755 will show as 493 - statsdUnixSocketMode = kingpin.Flag("statsd.unixsocket-mode", "The permission mode of the unix socket.").Default("755").String() - mappingConfig = kingpin.Flag("statsd.mapping-config", "Metric mapping configuration file name.").String() - readBuffer = kingpin.Flag("statsd.read-buffer", "Size (in bytes) of the operating system's transmit read buffer associated with the UDP or Unixgram connection. Please make sure the kernel parameters net.core.rmem_max is set to a value greater than the value specified.").Int() - cacheSize = kingpin.Flag("statsd.cache-size", "Maximum size of your metric mapping cache. Relies on least recently used replacement policy if max size is reached.").Default("1000").Int() - cacheType = kingpin.Flag("statsd.cache-type", "Metric mapping cache type. Valid options are \"lru\" and \"random\"").Default("lru").Enum("lru", "random") - eventQueueSize = kingpin.Flag("statsd.event-queue-size", "Size of internal queue for processing events").Default("10000").Int() - eventFlushThreshold = kingpin.Flag("statsd.event-flush-threshold", "Number of events to hold in queue before flushing").Default("1000").Int() - eventFlushInterval = kingpin.Flag("statsd.event-flush-interval", "Number of events to hold in queue before flushing").Default("200ms").Duration() - dumpFSMPath = kingpin.Flag("debug.dump-fsm", "The path to dump internal FSM generated for glob matching as Dot file.").Default("").String() - ) - - promlogConfig := &promlog.Config{} - flag.AddFlags(kingpin.CommandLine, promlogConfig) - kingpin.Version(version.Print("statsd_exporter")) - kingpin.HelpFlag.Short('h') - kingpin.Parse() - logger := promlog.New(promlogConfig) - - cacheOption := mapper.WithCacheType(*cacheType) - - if *statsdListenUDP == "" && *statsdListenTCP == "" && *statsdListenUnixgram == "" { - level.Error(logger).Log("At least one of UDP/TCP/Unixgram listeners must be specified.") - os.Exit(1) - } - - level.Info(logger).Log("msg", "Starting StatsD -> Prometheus Exporter", "version", version.Info()) - level.Info(logger).Log("msg", "Build context", "context", version.BuildContext()) - level.Info(logger).Log("msg", "Accepting StatsD Traffic", "udp", *statsdListenUDP, "tcp", *statsdListenTCP, "unixgram", *statsdListenUnixgram) - level.Info(logger).Log("msg", "Accepting Prometheus Requests", "addr", *listenAddress) - - go serveHTTP(*listenAddress, *metricsEndpoint, logger) - - events := make(chan Events, *eventQueueSize) - defer close(events) - eventQueue := newEventQueue(events, *eventFlushThreshold, *eventFlushInterval) - - if *statsdListenUDP != "" { - udpListenAddr, err := udpAddrFromString(*statsdListenUDP) - if err != nil { - level.Error(logger).Log("msg", "invalid UDP listen address", "address", *statsdListenUDP, "error", err) - os.Exit(1) - } - uconn, err := net.ListenUDP("udp", udpListenAddr) - if err != nil { - level.Error(logger).Log("msg", "failed to start UDP listener", "error", err) - os.Exit(1) - } - - if *readBuffer != 0 { - err = uconn.SetReadBuffer(*readBuffer) - if err != nil { - level.Error(logger).Log("msg", "error setting UDP read buffer", "error", err) - os.Exit(1) - } - } - - ul := &StatsDUDPListener{conn: uconn, eventHandler: eventQueue, logger: logger} - go ul.Listen() - } - - if *statsdListenTCP != "" { - tcpListenAddr, err := tcpAddrFromString(*statsdListenTCP) - if err != nil { - level.Error(logger).Log("msg", "invalid TCP listen address", "address", *statsdListenUDP, "error", err) - os.Exit(1) - } - tconn, err := net.ListenTCP("tcp", tcpListenAddr) - if err != nil { - level.Error(logger).Log("msg", err) - os.Exit(1) - } - defer tconn.Close() - - tl := &StatsDTCPListener{conn: tconn, eventHandler: eventQueue, logger: logger} - go tl.Listen() - } - - if *statsdListenUnixgram != "" { - var err error - if _, err = os.Stat(*statsdListenUnixgram); !os.IsNotExist(err) { - level.Error(logger).Log("msg", "Unixgram socket already exists", "socket_name", *statsdListenUnixgram) - os.Exit(1) - } - uxgconn, err := net.ListenUnixgram("unixgram", &net.UnixAddr{ - Net: "unixgram", - Name: *statsdListenUnixgram, - }) - if err != nil { - level.Error(logger).Log("msg", "failed to listen on Unixgram socket", "error", err) - os.Exit(1) - } - - defer uxgconn.Close() - - if *readBuffer != 0 { - err = uxgconn.SetReadBuffer(*readBuffer) - if err != nil { - level.Error(logger).Log("msg", "error setting Unixgram read buffer", "error", err) - os.Exit(1) - } - } - - ul := &StatsDUnixgramListener{conn: uxgconn, eventHandler: eventQueue, logger: logger} - go ul.Listen() - - // if it's an abstract unix domain socket, it won't exist on fs - // so we can't chmod it either - if _, err := os.Stat(*statsdListenUnixgram); !os.IsNotExist(err) { - defer os.Remove(*statsdListenUnixgram) - - // convert the string to octet - perm, err := strconv.ParseInt("0"+string(*statsdUnixSocketMode), 8, 32) - if err != nil { - level.Warn(logger).Log("Bad permission %s: %v, ignoring\n", *statsdUnixSocketMode, err) - } else { - err = os.Chmod(*statsdListenUnixgram, os.FileMode(perm)) - if err != nil { - level.Warn(logger).Log("Failed to change unixgram socket permission: %v", err) - } - } - } - - } - - mapper := &mapper.MetricMapper{MappingsCount: mappingsCount} - if *mappingConfig != "" { - err := mapper.InitFromFile(*mappingConfig, *cacheSize, cacheOption) - if err != nil { - level.Error(logger).Log("msg", "error loading config", "error", err) - os.Exit(1) - } - if *dumpFSMPath != "" { - err := dumpFSM(mapper, *dumpFSMPath, logger) - if err != nil { - level.Error(logger).Log("msg", "error dumping FSM", "error", err) - // Failure to dump the FSM is an error (the user asked for it and it - // didn't happen) but not fatal (the exporter is fully functional - // afterwards). - } - } - } else { - mapper.InitCache(*cacheSize, cacheOption) - } - - go configReloader(*mappingConfig, mapper, *cacheSize, logger, cacheOption) - - exporter := NewExporter(mapper, logger) - - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt, syscall.SIGTERM) - - go exporter.Listen(events) - - <-signals -} +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bufio" + "net" + "net/http" + _ "net/http/pprof" + "os" + "os/signal" + "strconv" + "syscall" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/common/promlog" + "github.com/prometheus/common/promlog/flag" + "github.com/prometheus/common/version" + "gopkg.in/alecthomas/kingpin.v2" + + "github.com/prometheus/statsd_exporter/pkg/event" + "github.com/prometheus/statsd_exporter/pkg/exporter" + "github.com/prometheus/statsd_exporter/pkg/listener" + "github.com/prometheus/statsd_exporter/pkg/mapper" + "github.com/prometheus/statsd_exporter/pkg/util" +) + +const ( + defaultHelp = "Metric autogenerated by statsd_exporter." + regErrF = "Failed to update metric" +) + +var ( + eventStats = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_total", + Help: "The total number of StatsD events seen.", + }, + []string{"type"}, + ) + eventsFlushed = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_exporter_event_queue_flushed_total", + Help: "Number of times events were flushed to exporter", + }, + ) + eventsUnmapped = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "statsd_exporter_events_unmapped_total", + Help: "The total number of StatsD events no mapping was found for.", + }) + udpPackets = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_exporter_udp_packets_total", + Help: "The total number of StatsD packets received over UDP.", + }, + ) + tcpConnections = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_exporter_tcp_connections_total", + Help: "The total number of TCP connections handled.", + }, + ) + tcpErrors = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_exporter_tcp_connection_errors_total", + Help: "The number of errors encountered reading from TCP.", + }, + ) + tcpLineTooLong = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_exporter_tcp_too_long_lines_total", + Help: "The number of lines discarded due to being too long.", + }, + ) + unixgramPackets = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_exporter_unixgram_packets_total", + Help: "The total number of StatsD packets received over Unixgram.", + }, + ) + linesReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_exporter_lines_total", + Help: "The total number of StatsD lines received.", + }, + ) + samplesReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_exporter_samples_total", + Help: "The total number of StatsD samples received.", + }, + ) + sampleErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_sample_errors_total", + Help: "The total number of errors parsing StatsD samples.", + }, + []string{"reason"}, + ) + tagsReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_exporter_tags_total", + Help: "The total number of DogStatsD tags processed.", + }, + ) + tagErrors = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_exporter_tag_errors_total", + Help: "The number of errors parsing DogStatsD tags.", + }, + ) + configLoads = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_config_reloads_total", + Help: "The number of configuration reloads.", + }, + []string{"outcome"}, + ) + mappingsCount = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "statsd_exporter_loaded_mappings", + Help: "The current number of configured metric mappings.", + }) + conflictingEventStats = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_conflict_total", + Help: "The total number of StatsD events with conflicting names.", + }, + []string{"type"}, + ) + errorEventStats = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_error_total", + Help: "The total number of StatsD events discarded due to errors.", + }, + []string{"reason"}, + ) + eventsActions = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_actions_total", + Help: "The total number of StatsD events by action.", + }, + []string{"action"}, + ) + metricsCount = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "statsd_exporter_metrics_total", + Help: "The total number of metrics.", + }, + []string{"type"}, + ) +) + +func init() { + prometheus.MustRegister(version.NewCollector("statsd_exporter")) + prometheus.MustRegister(eventStats) + prometheus.MustRegister(eventsFlushed) + prometheus.MustRegister(eventsUnmapped) + prometheus.MustRegister(udpPackets) + prometheus.MustRegister(tcpConnections) + prometheus.MustRegister(tcpErrors) + prometheus.MustRegister(tcpLineTooLong) + prometheus.MustRegister(unixgramPackets) + prometheus.MustRegister(linesReceived) + prometheus.MustRegister(samplesReceived) + prometheus.MustRegister(sampleErrors) + prometheus.MustRegister(tagsReceived) + prometheus.MustRegister(tagErrors) + prometheus.MustRegister(configLoads) + prometheus.MustRegister(mappingsCount) + prometheus.MustRegister(conflictingEventStats) + prometheus.MustRegister(errorEventStats) + prometheus.MustRegister(eventsActions) + prometheus.MustRegister(metricsCount) +} + +// uncheckedCollector wraps a Collector but its Describe method yields no Desc. +// This allows incoming metrics to have inconsistent label sets +type uncheckedCollector struct { + c prometheus.Collector +} + +func (u uncheckedCollector) Describe(_ chan<- *prometheus.Desc) {} +func (u uncheckedCollector) Collect(c chan<- prometheus.Metric) { + u.c.Collect(c) +} + +func serveHTTP(listenAddress, metricsEndpoint string, logger log.Logger) { + http.Handle(metricsEndpoint, promhttp.Handler()) + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(` + StatsD Exporter + +

StatsD Exporter

+

Metrics

+ + `)) + }) + level.Error(logger).Log("msg", http.ListenAndServe(listenAddress, nil)) + os.Exit(1) +} + +func configReloader(fileName string, mapper *mapper.MetricMapper, cacheSize int, logger log.Logger, option mapper.CacheOption) { + + signals := make(chan os.Signal, 1) + signal.Notify(signals, syscall.SIGHUP) + + for s := range signals { + if fileName == "" { + level.Warn(logger).Log("msg", "Received signal but no mapping config to reload", "signal", s) + continue + } + level.Info(logger).Log("msg", "Received signal, attempting reload", "signal", s) + err := mapper.InitFromFile(fileName, cacheSize, option) + if err != nil { + level.Info(logger).Log("msg", "Error reloading config", "error", err) + configLoads.WithLabelValues("failure").Inc() + } else { + level.Info(logger).Log("msg", "Config reloaded successfully") + configLoads.WithLabelValues("success").Inc() + } + } +} + +func dumpFSM(mapper *mapper.MetricMapper, dumpFilename string, logger log.Logger) error { + f, err := os.Create(dumpFilename) + if err != nil { + return err + } + level.Info(logger).Log("msg", "Start dumping FSM", "file_name", dumpFilename) + w := bufio.NewWriter(f) + mapper.FSM.DumpFSM(w) + w.Flush() + f.Close() + level.Info(logger).Log("msg", "Finish dumping FSM") + return nil +} + +func main() { + var ( + listenAddress = kingpin.Flag("web.listen-address", "The address on which to expose the web interface and generated Prometheus metrics.").Default(":9102").String() + metricsEndpoint = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String() + statsdListenUDP = kingpin.Flag("statsd.listen-udp", "The UDP address on which to receive statsd metric lines. \"\" disables it.").Default(":9125").String() + statsdListenTCP = kingpin.Flag("statsd.listen-tcp", "The TCP address on which to receive statsd metric lines. \"\" disables it.").Default(":9125").String() + statsdListenUnixgram = kingpin.Flag("statsd.listen-unixgram", "The Unixgram socket path to receive statsd metric lines in datagram. \"\" disables it.").Default("").String() + // not using Int here because flag diplays default in decimal, 0755 will show as 493 + statsdUnixSocketMode = kingpin.Flag("statsd.unixsocket-mode", "The permission mode of the unix socket.").Default("755").String() + mappingConfig = kingpin.Flag("statsd.mapping-config", "Metric mapping configuration file name.").String() + readBuffer = kingpin.Flag("statsd.read-buffer", "Size (in bytes) of the operating system's transmit read buffer associated with the UDP or Unixgram connection. Please make sure the kernel parameters net.core.rmem_max is set to a value greater than the value specified.").Int() + cacheSize = kingpin.Flag("statsd.cache-size", "Maximum size of your metric mapping cache. Relies on least recently used replacement policy if max size is reached.").Default("1000").Int() + cacheType = kingpin.Flag("statsd.cache-type", "Metric mapping cache type. Valid options are \"lru\" and \"random\"").Default("lru").Enum("lru", "random") + eventQueueSize = kingpin.Flag("statsd.event-queue-size", "Size of internal queue for processing events").Default("10000").Int() + eventFlushThreshold = kingpin.Flag("statsd.event-flush-threshold", "Number of events to hold in queue before flushing").Default("1000").Int() + eventFlushInterval = kingpin.Flag("statsd.event-flush-interval", "Number of events to hold in queue before flushing").Default("200ms").Duration() + dumpFSMPath = kingpin.Flag("debug.dump-fsm", "The path to dump internal FSM generated for glob matching as Dot file.").Default("").String() + ) + + promlogConfig := &promlog.Config{} + flag.AddFlags(kingpin.CommandLine, promlogConfig) + kingpin.Version(version.Print("statsd_exporter")) + kingpin.HelpFlag.Short('h') + kingpin.Parse() + logger := promlog.New(promlogConfig) + + cacheOption := mapper.WithCacheType(*cacheType) + + if *statsdListenUDP == "" && *statsdListenTCP == "" && *statsdListenUnixgram == "" { + level.Error(logger).Log("At least one of UDP/TCP/Unixgram listeners must be specified.") + os.Exit(1) + } + + level.Info(logger).Log("msg", "Starting StatsD -> Prometheus Exporter", "version", version.Info()) + level.Info(logger).Log("msg", "Build context", "context", version.BuildContext()) + level.Info(logger).Log("msg", "Accepting StatsD Traffic", "udp", *statsdListenUDP, "tcp", *statsdListenTCP, "unixgram", *statsdListenUnixgram) + level.Info(logger).Log("msg", "Accepting Prometheus Requests", "addr", *listenAddress) + + go serveHTTP(*listenAddress, *metricsEndpoint, logger) + + events := make(chan event.Events, *eventQueueSize) + defer close(events) + eventQueue := event.NewEventQueue(events, *eventFlushThreshold, *eventFlushInterval, eventsFlushed) + + if *statsdListenUDP != "" { + udpListenAddr, err := util.UDPAddrFromString(*statsdListenUDP) + if err != nil { + level.Error(logger).Log("msg", "invalid UDP listen address", "address", *statsdListenUDP, "error", err) + os.Exit(1) + } + uconn, err := net.ListenUDP("udp", udpListenAddr) + if err != nil { + level.Error(logger).Log("msg", "failed to start UDP listener", "error", err) + os.Exit(1) + } + + if *readBuffer != 0 { + err = uconn.SetReadBuffer(*readBuffer) + if err != nil { + level.Error(logger).Log("msg", "error setting UDP read buffer", "error", err) + os.Exit(1) + } + } + + ul := &listener.StatsDUDPListener{Conn: uconn, EventHandler: eventQueue, Logger: logger} + go ul.Listen(udpPackets, linesReceived, eventsFlushed, *sampleErrors, samplesReceived, tagErrors, tagsReceived) + } + + if *statsdListenTCP != "" { + tcpListenAddr, err := util.TCPAddrFromString(*statsdListenTCP) + if err != nil { + level.Error(logger).Log("msg", "invalid TCP listen address", "address", *statsdListenUDP, "error", err) + os.Exit(1) + } + tconn, err := net.ListenTCP("tcp", tcpListenAddr) + if err != nil { + level.Error(logger).Log("msg", err) + os.Exit(1) + } + defer tconn.Close() + + tl := &listener.StatsDTCPListener{Conn: tconn, EventHandler: eventQueue, Logger: logger} + go tl.Listen(linesReceived, eventsFlushed, tcpConnections, tcpErrors, tcpLineTooLong, *sampleErrors, samplesReceived, tagErrors, tagsReceived) + } + + if *statsdListenUnixgram != "" { + var err error + if _, err = os.Stat(*statsdListenUnixgram); !os.IsNotExist(err) { + level.Error(logger).Log("msg", "Unixgram socket already exists", "socket_name", *statsdListenUnixgram) + os.Exit(1) + } + uxgconn, err := net.ListenUnixgram("unixgram", &net.UnixAddr{ + Net: "unixgram", + Name: *statsdListenUnixgram, + }) + if err != nil { + level.Error(logger).Log("msg", "failed to listen on Unixgram socket", "error", err) + os.Exit(1) + } + + defer uxgconn.Close() + + if *readBuffer != 0 { + err = uxgconn.SetReadBuffer(*readBuffer) + if err != nil { + level.Error(logger).Log("msg", "error setting Unixgram read buffer", "error", err) + os.Exit(1) + } + } + + ul := &listener.StatsDUnixgramListener{Conn: uxgconn, EventHandler: eventQueue, Logger: logger} + go ul.Listen(unixgramPackets, linesReceived, eventsFlushed, *sampleErrors, samplesReceived, tagErrors, tagsReceived) + + // if it's an abstract unix domain socket, it won't exist on fs + // so we can't chmod it either + if _, err := os.Stat(*statsdListenUnixgram); !os.IsNotExist(err) { + defer os.Remove(*statsdListenUnixgram) + + // convert the string to octet + perm, err := strconv.ParseInt("0"+string(*statsdUnixSocketMode), 8, 32) + if err != nil { + level.Warn(logger).Log("Bad permission %s: %v, ignoring\n", *statsdUnixSocketMode, err) + } else { + err = os.Chmod(*statsdListenUnixgram, os.FileMode(perm)) + if err != nil { + level.Warn(logger).Log("Failed to change unixgram socket permission: %v", err) + } + } + } + + } + + mapper := &mapper.MetricMapper{MappingsCount: mappingsCount} + if *mappingConfig != "" { + err := mapper.InitFromFile(*mappingConfig, *cacheSize, cacheOption) + if err != nil { + level.Error(logger).Log("msg", "error loading config", "error", err) + os.Exit(1) + } + if *dumpFSMPath != "" { + err := dumpFSM(mapper, *dumpFSMPath, logger) + if err != nil { + level.Error(logger).Log("msg", "error dumping FSM", "error", err) + // Failure to dump the FSM is an error (the user asked for it and it + // didn't happen) but not fatal (the exporter is fully functional + // afterwards). + } + } + } else { + mapper.InitCache(*cacheSize, cacheOption) + } + + go configReloader(*mappingConfig, mapper, *cacheSize, logger, cacheOption) + + exporter := exporter.NewExporter(mapper, logger) + + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt, syscall.SIGTERM) + + go exporter.Listen(events, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) + + <-signals +} diff --git a/pkg/clock/clock.go b/pkg/clock/clock.go index 27e631b..408d4ff 100644 --- a/pkg/clock/clock.go +++ b/pkg/clock/clock.go @@ -1,41 +1,41 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clock - -import ( - "time" -) - -var ClockInstance *Clock - -type Clock struct { - Instant time.Time - TickerCh chan time.Time -} - -func Now() time.Time { - if ClockInstance == nil { - return time.Now() - } - return ClockInstance.Instant -} - -func NewTicker(d time.Duration) *time.Ticker { - if ClockInstance == nil || ClockInstance.TickerCh == nil { - return time.NewTicker(d) - } - return &time.Ticker{ - C: ClockInstance.TickerCh, - } -} +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clock + +import ( + "time" +) + +var ClockInstance *Clock + +type Clock struct { + Instant time.Time + TickerCh chan time.Time +} + +func Now() time.Time { + if ClockInstance == nil { + return time.Now() + } + return ClockInstance.Instant +} + +func NewTicker(d time.Duration) *time.Ticker { + if ClockInstance == nil || ClockInstance.TickerCh == nil { + return time.NewTicker(d) + } + return &time.Ticker{ + C: ClockInstance.TickerCh, + } +} diff --git a/pkg/event.go~ b/pkg/event.go~ new file mode 100644 index 0000000..3bc8a8e --- /dev/null +++ b/pkg/event.go~ @@ -0,0 +1,133 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "sync" + "time" + + "github.com/prometheus/statsd_exporter/pkg/clock" + "github.com/prometheus/statsd_exporter/pkg/mapper" +) + +type Event interface { + MetricName() string + Value() float64 + Labels() map[string]string + MetricType() mapper.MetricType +} + +type CounterEvent struct { + metricName string + value float64 + labels map[string]string +} + +func (c *CounterEvent) MetricName() string { return c.metricName } +func (c *CounterEvent) Value() float64 { return c.value } +func (c *CounterEvent) Labels() map[string]string { return c.labels } +func (c *CounterEvent) MetricType() mapper.MetricType { return mapper.MetricTypeCounter } + +type GaugeEvent struct { + metricName string + value float64 + relative bool + labels map[string]string +} + +func (g *GaugeEvent) MetricName() string { return g.metricName } +func (g *GaugeEvent) Value() float64 { return g.value } +func (c *GaugeEvent) Labels() map[string]string { return c.labels } +func (c *GaugeEvent) MetricType() mapper.MetricType { return mapper.MetricTypeGauge } + +type TimerEvent struct { + metricName string + value float64 + labels map[string]string +} + +func (t *TimerEvent) MetricName() string { return t.metricName } +func (t *TimerEvent) Value() float64 { return t.value } +func (c *TimerEvent) Labels() map[string]string { return c.labels } +func (c *TimerEvent) MetricType() mapper.MetricType { return mapper.MetricTypeTimer } + +type Events []Event + +type eventQueue struct { + c chan Events + q Events + m sync.Mutex + flushThreshold int + flushTicker *time.Ticker +} + +type eventHandler interface { + queue(event Events) +} + +func newEventQueue(c chan Events, flushThreshold int, flushInterval time.Duration) *eventQueue { + ticker := clock.NewTicker(flushInterval) + eq := &eventQueue{ + c: c, + flushThreshold: flushThreshold, + flushTicker: ticker, + q: make([]Event, 0, flushThreshold), + } + go func() { + for { + <-ticker.C + eq.flush() + } + }() + return eq +} + +func (eq *eventQueue) queue(events Events) { + eq.m.Lock() + defer eq.m.Unlock() + + for _, e := range events { + eq.q = append(eq.q, e) + if len(eq.q) >= eq.flushThreshold { + eq.flushUnlocked() + } + } +} + +func (eq *eventQueue) flush() { + eq.m.Lock() + defer eq.m.Unlock() + eq.flushUnlocked() +} + +func (eq *eventQueue) flushUnlocked() { + eq.c <- eq.q + eq.q = make([]Event, 0, cap(eq.q)) + eventsFlushed.Inc() +} + +func (eq *eventQueue) len() int { + eq.m.Lock() + defer eq.m.Unlock() + + return len(eq.q) +} + +type unbufferedEventHandler struct { + c chan Events +} + +func (ueh *unbufferedEventHandler) queue(events Events) { + ueh.c <- events +} diff --git a/pkg/event/event.go b/pkg/event/event.go new file mode 100644 index 0000000..8175e3a --- /dev/null +++ b/pkg/event/event.go @@ -0,0 +1,134 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package event + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/statsd_exporter/pkg/clock" + "github.com/prometheus/statsd_exporter/pkg/mapper" +) + +type Event interface { + MetricName() string + Value() float64 + Labels() map[string]string + MetricType() mapper.MetricType +} + +type CounterEvent struct { + CMetricName string + CValue float64 + CLabels map[string]string +} + +func (c *CounterEvent) MetricName() string { return c.CMetricName } +func (c *CounterEvent) Value() float64 { return c.CValue } +func (c *CounterEvent) Labels() map[string]string { return c.CLabels } +func (c *CounterEvent) MetricType() mapper.MetricType { return mapper.MetricTypeCounter } + +type GaugeEvent struct { + GMetricName string + GValue float64 + GRelative bool + GLabels map[string]string +} + +func (g *GaugeEvent) MetricName() string { return g.GMetricName } +func (g *GaugeEvent) Value() float64 { return g.GValue } +func (c *GaugeEvent) Labels() map[string]string { return c.GLabels } +func (c *GaugeEvent) MetricType() mapper.MetricType { return mapper.MetricTypeGauge } + +type TimerEvent struct { + TMetricName string + TValue float64 + TLabels map[string]string +} + +func (t *TimerEvent) MetricName() string { return t.TMetricName } +func (t *TimerEvent) Value() float64 { return t.TValue } +func (c *TimerEvent) Labels() map[string]string { return c.TLabels } +func (c *TimerEvent) MetricType() mapper.MetricType { return mapper.MetricTypeTimer } + +type Events []Event + +type EventQueue struct { + C chan Events + q Events + m sync.Mutex + flushThreshold int + flushTicker *time.Ticker +} + +type EventHandler interface { + Queue(event Events, eventsFlushed *prometheus.Counter) +} + +func NewEventQueue(c chan Events, flushThreshold int, flushInterval time.Duration, eventsFlushed prometheus.Counter) *EventQueue { + ticker := clock.NewTicker(flushInterval) + eq := &EventQueue{ + C: c, + flushThreshold: flushThreshold, + flushTicker: ticker, + q: make([]Event, 0, flushThreshold), + } + go func() { + for { + <-ticker.C + eq.Flush(eventsFlushed) + } + }() + return eq +} + +func (eq *EventQueue) Queue(events Events, eventsFlushed *prometheus.Counter) { + eq.m.Lock() + defer eq.m.Unlock() + + for _, e := range events { + eq.q = append(eq.q, e) + if len(eq.q) >= eq.flushThreshold { + eq.FlushUnlocked(*eventsFlushed) + } + } +} + +func (eq *EventQueue) Flush(eventsFlushed prometheus.Counter) { + eq.m.Lock() + defer eq.m.Unlock() + eq.FlushUnlocked(eventsFlushed) +} + +func (eq *EventQueue) FlushUnlocked(eventsFlushed prometheus.Counter) { + eq.C <- eq.q + eq.q = make([]Event, 0, cap(eq.q)) + eventsFlushed.Inc() +} + +func (eq *EventQueue) Len() int { + eq.m.Lock() + defer eq.m.Unlock() + + return len(eq.q) +} + +type UnbufferedEventHandler struct { + C chan Events +} + +func (ueh *UnbufferedEventHandler) Queue(events Events, eventsFlushed *prometheus.Counter) { + ueh.C <- events +} diff --git a/pkg/event/event.go~ b/pkg/event/event.go~ new file mode 100644 index 0000000..e17b223 --- /dev/null +++ b/pkg/event/event.go~ @@ -0,0 +1,134 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package event + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/statsd_exporter/pkg/clock" + "github.com/prometheus/statsd_exporter/pkg/mapper" +) + +type Event interface { + MetricName() string + Value() float64 + Labels() map[string]string + MetricType() mapper.MetricType +} + +type CounterEvent struct { + CMetricName string + CValue float64 + CLabels map[string]string +} + +func (c *CounterEvent) MetricName() string { return c.CMetricName } +func (c *CounterEvent) Value() float64 { return c.CValue } +func (c *CounterEvent) Labels() map[string]string { return c.CLabels } +func (c *CounterEvent) MetricType() mapper.MetricType { return mapper.MetricTypeCounter } + +type GaugeEvent struct { + GMetricName string + GValue float64 + GRelative bool + GLabels map[string]string +} + +func (g *GaugeEvent) MetricName() string { return g.GMetricName } +func (g *GaugeEvent) Value() float64 { return g.GValue } +func (c *GaugeEvent) Labels() map[string]string { return c.GLabels } +func (c *GaugeEvent) MetricType() mapper.MetricType { return mapper.MetricTypeGauge } + +type TimerEvent struct { + TMetricName string + TValue float64 + TLabels map[string]string +} + +func (t *TimerEvent) MetricName() string { return t.TMetricName } +func (t *TimerEvent) Value() float64 { return t.TValue } +func (c *TimerEvent) Labels() map[string]string { return c.TLabels } +func (c *TimerEvent) MetricType() mapper.MetricType { return mapper.MetricTypeTimer } + +type Events []Event + +type EventQueue struct { + C chan Events + q Events + m sync.Mutex + flushThreshold int + flushTicker *time.Ticker +} + +type EventHandler interface { + Queue(event Events, eventsFlushed *prometheus.Counter) +} + +func NewEventQueue(c chan Events, flushThreshold int, flushInterval time.Duration, eventsFlushed prometheus.Counter) *EventQueue { + ticker := clock.NewTicker(flushInterval) + eq := &EventQueue{ + C: c, + flushThreshold: flushThreshold, + flushTicker: ticker, + q: make([]Event, 0, flushThreshold), + } + go func() { + for { + <-ticker.C + eq.Flush(eventsFlushed) + } + }() + return eq +} + +func (eq *EventQueue) Queue(events Events, eventsFlushed *prometheus.Counter) { + eq.m.Lock() + defer eq.m.Unlock() + + for _, e := range events { + eq.q = append(eq.q, e) + if len(eq.q) >= eq.flushThreshold { + eq.FlushUnlocked(*eventsFlushed) + } + } +} + +func (eq *EventQueue) Flush(eventsFlushed prometheus.Counter) { + eq.m.Lock() + defer eq.m.Unlock() + eq.FlushUnlocked(eventsFlushed) +} + +func (eq *EventQueue) FlushUnlocked(eventsFlushed prometheus.Counter) { + eq.C <- eq.q + eq.q = make([]Event, 0, cap(eq.q)) + eventsFlushed.Inc() +} + +func (eq *EventQueue) Len() int { + eq.m.Lock() + defer eq.m.Unlock() + + return len(eq.q) +} + +type UnbufferedEventHandler struct { + C chan Events +} + +func (ueh *UnbufferedEventHandler) Queue(events Events) { + ueh.C <- events +} diff --git a/pkg/exporter/exporter.go b/pkg/exporter/exporter.go new file mode 100644 index 0000000..c7cde63 --- /dev/null +++ b/pkg/exporter/exporter.go @@ -0,0 +1,173 @@ +package exporter + +import ( + "os" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/statsd_exporter/pkg/clock" + "github.com/prometheus/statsd_exporter/pkg/event" + "github.com/prometheus/statsd_exporter/pkg/mapper" + "github.com/prometheus/statsd_exporter/pkg/registry" +) + +const ( + defaultHelp = "Metric autogenerated by statsd_exporter." + regErrF = "Failed to update metric" +) + +type Exporter struct { + Mapper *mapper.MetricMapper + Registry *registry.Registry + Logger log.Logger +} + +// Listen handles all events sent to the given channel sequentially. It +// terminates when the channel is closed. +func (b *Exporter) Listen(e <-chan event.Events, eventsActions *prometheus.CounterVec, eventsUnmapped prometheus.Counter, + errorEventStats *prometheus.CounterVec, eventStats *prometheus.CounterVec, conflictingEventStats *prometheus.CounterVec, metricsCount *prometheus.GaugeVec) { + + removeStaleMetricsTicker := clock.NewTicker(time.Second) + + for { + select { + case <-removeStaleMetricsTicker.C: + b.Registry.RemoveStaleMetrics() + case events, ok := <-e: + if !ok { + level.Debug(b.Logger).Log("msg", "Channel is closed. Break out of Exporter.Listener.") + removeStaleMetricsTicker.Stop() + return + } + for _, event := range events { + b.handleEvent(event, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount) + } + } + } +} + +// handleEvent processes a single Event according to the configured mapping. +func (b *Exporter) handleEvent(thisEvent event.Event, eventsActions *prometheus.CounterVec, eventsUnmapped prometheus.Counter, + errorEventStats *prometheus.CounterVec, eventStats *prometheus.CounterVec, conflictingEventStats *prometheus.CounterVec, metricsCount *prometheus.GaugeVec) { + + mapping, labels, present := b.Mapper.GetMapping(thisEvent.MetricName(), thisEvent.MetricType()) + if mapping == nil { + mapping = &mapper.MetricMapping{} + if b.Mapper.Defaults.Ttl != 0 { + mapping.Ttl = b.Mapper.Defaults.Ttl + } + } + + if mapping.Action == mapper.ActionTypeDrop { + eventsActions.WithLabelValues("drop").Inc() + return + } + + metricName := "" + + help := defaultHelp + if mapping.HelpText != "" { + help = mapping.HelpText + } + + prometheusLabels := thisEvent.Labels() + if present { + if mapping.Name == "" { + level.Debug(b.Logger).Log("msg", "The mapping generates an empty metric name", "metric_name", thisEvent.MetricName(), "match", mapping.Match) + errorEventStats.WithLabelValues("empty_metric_name").Inc() + return + } + metricName = mapper.EscapeMetricName(mapping.Name) + for label, value := range labels { + prometheusLabels[label] = value + } + eventsActions.WithLabelValues(string(mapping.Action)).Inc() + } else { + eventsUnmapped.Inc() + metricName = mapper.EscapeMetricName(thisEvent.MetricName()) + } + + switch ev := thisEvent.(type) { + case *event.CounterEvent: + // We don't accept negative values for counters. Incrementing the counter with a negative number + // will cause the exporter to panic. Instead we will warn and continue to the next event. + if thisEvent.Value() < 0.0 { + level.Debug(b.Logger).Log("msg", "counter must be non-negative value", "metric", metricName, "event_value", thisEvent.Value()) + errorEventStats.WithLabelValues("illegal_negative_counter").Inc() + return + } + + counter, err := b.Registry.GetCounter(metricName, prometheusLabels, help, mapping, metricsCount) + if err == nil { + counter.Add(thisEvent.Value()) + eventStats.WithLabelValues("counter").Inc() + } else { + level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err) + conflictingEventStats.WithLabelValues("counter").Inc() + } + + case *event.GaugeEvent: + gauge, err := b.Registry.GetGauge(metricName, prometheusLabels, help, mapping, metricsCount) + + if err == nil { + if ev.GRelative { + gauge.Add(thisEvent.Value()) + } else { + gauge.Set(thisEvent.Value()) + } + eventStats.WithLabelValues("gauge").Inc() + } else { + level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err) + conflictingEventStats.WithLabelValues("gauge").Inc() + } + + case *event.TimerEvent: + t := mapper.TimerTypeDefault + if mapping != nil { + t = mapping.TimerType + } + if t == mapper.TimerTypeDefault { + t = b.Mapper.Defaults.TimerType + } + + switch t { + case mapper.TimerTypeHistogram: + histogram, err := b.Registry.GetHistogram(metricName, prometheusLabels, help, mapping, metricsCount) + if err == nil { + histogram.Observe(thisEvent.Value() / 1000) // prometheus presumes seconds, statsd millisecond + eventStats.WithLabelValues("timer").Inc() + } else { + level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err) + conflictingEventStats.WithLabelValues("timer").Inc() + } + + case mapper.TimerTypeDefault, mapper.TimerTypeSummary: + summary, err := b.Registry.GetSummary(metricName, prometheusLabels, help, mapping, metricsCount) + if err == nil { + summary.Observe(thisEvent.Value() / 1000) // prometheus presumes seconds, statsd millisecond + eventStats.WithLabelValues("timer").Inc() + } else { + level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err) + conflictingEventStats.WithLabelValues("timer").Inc() + } + + default: + level.Error(b.Logger).Log("msg", "unknown timer type", "type", t) + os.Exit(1) + } + + default: + level.Debug(b.Logger).Log("msg", "Unsupported event type") + eventStats.WithLabelValues("illegal").Inc() + } +} + +func NewExporter(mapper *mapper.MetricMapper, logger log.Logger) *Exporter { + return &Exporter{ + Mapper: mapper, + Registry: registry.NewRegistry(mapper), + Logger: logger, + } +} diff --git a/pkg/exporter/exporter.go~ b/pkg/exporter/exporter.go~ new file mode 100644 index 0000000..6fcd288 --- /dev/null +++ b/pkg/exporter/exporter.go~ @@ -0,0 +1,172 @@ +package exporter + +import ( + "os" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/statsd_exporter/pkg/clock" + "github.com/prometheus/statsd_exporter/pkg/event" + "github.com/prometheus/statsd_exporter/pkg/mapper" + "github.com/prometheus/statsd_exporter/pkg/registry" +) + +const ( + defaultHelp = "Metric autogenerated by statsd_exporter." + regErrF = "Failed to update metric" +) + +type Exporter struct { + Mapper *mapper.MetricMapper + Registry *registry.Registry + Logger log.Logger +} + +// Listen handles all events sent to the given channel sequentially. It +// terminates when the channel is closed. +func (b *Exporter) Listen(e <-chan event.Events, thisEvent event.Event, eventsActions prometheus.GaugeVec, eventsUnmapped prometheus.Gauge, + errorEventStats prometheus.GaugeVec, eventStats prometheus.GaugeVec, conflictingEventStats prometheus.GaugeVec, metricsCount prometheus.GaugeVec, l func(string, log.Logger)) { + removeStaleMetricsTicker := clock.NewTicker(time.Second) + + for { + select { + case <-removeStaleMetricsTicker.C: + b.Registry.RemoveStaleMetrics() + case events, ok := <-e: + if !ok { + level.Debug(b.Logger).Log("msg", "Channel is closed. Break out of Exporter.Listener.") + removeStaleMetricsTicker.Stop() + return + } + for _, event := range events { + b.handleEvent(event, eventsActions, eventsUnmapped, errorEventStats, eventStats, conflictingEventStats, metricsCount, l) + } + } + } +} + +// handleEvent processes a single Event according to the configured mapping. +func (b *Exporter) handleEvent(thisEvent event.Event, eventsActions prometheus.GaugeVec, eventsUnmapped prometheus.Gauge, + errorEventStats prometheus.GaugeVec, eventStats prometheus.GaugeVec, conflictingEventStats prometheus.GaugeVec, metricsCount prometheus.GaugeVec, l func(string, log.Logger)) { + + mapping, labels, present := b.Mapper.GetMapping(thisEvent.MetricName(), thisEvent.MetricType()) + if mapping == nil { + mapping = &mapper.MetricMapping{} + if b.Mapper.Defaults.Ttl != 0 { + mapping.Ttl = b.Mapper.Defaults.Ttl + } + } + + if mapping.Action == mapper.ActionTypeDrop { + eventsActions.WithLabelValues("drop").Inc() + return + } + + metricName := "" + + help := defaultHelp + if mapping.HelpText != "" { + help = mapping.HelpText + } + + prometheusLabels := thisEvent.Labels() + if present { + if mapping.Name == "" { + level.Debug(b.Logger).Log("msg", "The mapping generates an empty metric name", "metric_name", thisEvent.MetricName(), "match", mapping.Match) + errorEventStats.WithLabelValues("empty_metric_name").Inc() + return + } + metricName = mapper.EscapeMetricName(mapping.Name) + for label, value := range labels { + prometheusLabels[label] = value + } + eventsActions.WithLabelValues(string(mapping.Action)).Inc() + } else { + eventsUnmapped.Inc() + metricName = mapper.EscapeMetricName(thisEvent.MetricName()) + } + + switch ev := thisEvent.(type) { + case *event.CounterEvent: + // We don't accept negative values for counters. Incrementing the counter with a negative number + // will cause the exporter to panic. Instead we will warn and continue to the next event. + if thisEvent.Value() < 0.0 { + level.Debug(b.Logger).Log("msg", "counter must be non-negative value", "metric", metricName, "event_value", thisEvent.Value()) + errorEventStats.WithLabelValues("illegal_negative_counter").Inc() + return + } + + counter, err := b.Registry.GetCounter(metricName, prometheusLabels, help, mapping, &metricsCount) + if err == nil { + counter.Add(thisEvent.Value()) + eventStats.WithLabelValues("counter").Inc() + } else { + level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err) + conflictingEventStats.WithLabelValues("counter").Inc() + } + + case *event.GaugeEvent: + gauge, err := b.Registry.GetGauge(metricName, prometheusLabels, help, mapping, &metricsCount) + + if err == nil { + if ev.GRelative { + gauge.Add(thisEvent.Value()) + } else { + gauge.Set(thisEvent.Value()) + } + eventStats.WithLabelValues("gauge").Inc() + } else { + level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err) + conflictingEventStats.WithLabelValues("gauge").Inc() + } + + case *event.TimerEvent: + t := mapper.TimerTypeDefault + if mapping != nil { + t = mapping.TimerType + } + if t == mapper.TimerTypeDefault { + t = b.Mapper.Defaults.TimerType + } + + switch t { + case mapper.TimerTypeHistogram: + histogram, err := b.Registry.GetHistogram(metricName, prometheusLabels, help, mapping, &metricsCount) + if err == nil { + histogram.Observe(thisEvent.Value() / 1000) // prometheus presumes seconds, statsd millisecond + eventStats.WithLabelValues("timer").Inc() + } else { + level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err) + conflictingEventStats.WithLabelValues("timer").Inc() + } + + case mapper.TimerTypeDefault, mapper.TimerTypeSummary: + summary, err := b.Registry.GetSummary(metricName, prometheusLabels, help, mapping, &metricsCount) + if err == nil { + summary.Observe(thisEvent.Value() / 1000) // prometheus presumes seconds, statsd millisecond + eventStats.WithLabelValues("timer").Inc() + } else { + level.Debug(b.Logger).Log("msg", regErrF, "metric", metricName, "error", err) + conflictingEventStats.WithLabelValues("timer").Inc() + } + + default: + level.Error(b.Logger).Log("msg", "unknown timer type", "type", t) + os.Exit(1) + } + + default: + level.Debug(b.Logger).Log("msg", "Unsupported event type") + eventStats.WithLabelValues("illegal").Inc() + } +} + +func NewExporter(mapper *mapper.MetricMapper, logger log.Logger) *Exporter { + return &Exporter{ + Mapper: mapper, + Registry: registry.NewRegistry(mapper), + Logger: logger, + } +} diff --git a/pkg/line/line.go b/pkg/line/line.go new file mode 100644 index 0000000..5a0a6c3 --- /dev/null +++ b/pkg/line/line.go @@ -0,0 +1,241 @@ +package line + +import ( + "fmt" + "strconv" + "strings" + "unicode/utf8" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/statsd_exporter/pkg/event" + "github.com/prometheus/statsd_exporter/pkg/mapper" +) + +func buildEvent(statType, metric string, value float64, relative bool, labels map[string]string) (event.Event, error) { + switch statType { + case "c": + return &event.CounterEvent{ + CMetricName: metric, + CValue: float64(value), + CLabels: labels, + }, nil + case "g": + return &event.GaugeEvent{ + GMetricName: metric, + GValue: float64(value), + GRelative: relative, + GLabels: labels, + }, nil + case "ms", "h", "d": + return &event.TimerEvent{ + TMetricName: metric, + TValue: float64(value), + TLabels: labels, + }, nil + case "s": + return nil, fmt.Errorf("no support for StatsD sets") + default: + return nil, fmt.Errorf("bad stat type %s", statType) + } +} + +func parseTag(component, tag string, separator rune, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) { + // Entirely empty tag is an error + if len(tag) == 0 { + tagErrors.Inc() + level.Debug(logger).Log("msg", "Empty name tag", "component", component) + return + } + + for i, c := range tag { + if c == separator { + k := tag[:i] + v := tag[i+1:] + + if len(k) == 0 || len(v) == 0 { + // Empty key or value is an error + tagErrors.Inc() + level.Debug(logger).Log("msg", "Malformed name tag", "k", k, "v", v, "component", component) + } else { + labels[mapper.EscapeMetricName(k)] = v + } + return + } + } + + // Missing separator (no value) is an error + tagErrors.Inc() + level.Debug(logger).Log("msg", "Malformed name tag", "tag", tag, "component", component) +} + +func parseNameTags(component string, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) { + lastTagEndIndex := 0 + for i, c := range component { + if c == ',' { + tag := component[lastTagEndIndex:i] + lastTagEndIndex = i + 1 + parseTag(component, tag, '=', labels, tagErrors, logger) + } + } + + // If we're not off the end of the string, add the last tag + if lastTagEndIndex < len(component) { + tag := component[lastTagEndIndex:] + parseTag(component, tag, '=', labels, tagErrors, logger) + } +} + +func trimLeftHash(s string) string { + if s != "" && s[0] == '#' { + return s[1:] + } + return s +} + +func ParseDogStatsDTags(component string, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) { + lastTagEndIndex := 0 + for i, c := range component { + if c == ',' { + tag := component[lastTagEndIndex:i] + lastTagEndIndex = i + 1 + parseTag(component, trimLeftHash(tag), ':', labels, tagErrors, logger) + } + } + + // If we're not off the end of the string, add the last tag + if lastTagEndIndex < len(component) { + tag := component[lastTagEndIndex:] + parseTag(component, trimLeftHash(tag), ':', labels, tagErrors, logger) + } +} + +func parseNameAndTags(name string, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) string { + for i, c := range name { + // `#` delimits start of tags by Librato + // https://www.librato.com/docs/kb/collect/collection_agents/stastd/#stat-level-tags + // `,` delimits start of tags by InfluxDB + // https://www.influxdata.com/blog/getting-started-with-sending-statsd-metrics-to-telegraf-influxdb/#introducing-influx-statsd + if c == '#' || c == ',' { + parseNameTags(name[i+1:], labels, tagErrors, logger) + return name[:i] + } + } + return name +} + +func LineToEvents(line string, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter, logger log.Logger) event.Events { + events := event.Events{} + if line == "" { + return events + } + + elements := strings.SplitN(line, ":", 2) + if len(elements) < 2 || len(elements[0]) == 0 || !utf8.ValidString(line) { + sampleErrors.WithLabelValues("malformed_line").Inc() + level.Debug(logger).Log("msg", "Bad line from StatsD", "line", line) + return events + } + + labels := map[string]string{} + metric := parseNameAndTags(elements[0], labels, tagErrors, logger) + + var samples []string + if strings.Contains(elements[1], "|#") { + // using DogStatsD tags + + // don't allow mixed tagging styles + if len(labels) > 0 { + sampleErrors.WithLabelValues("mixed_tagging_styles").Inc() + level.Debug(logger).Log("msg", "Bad line (multiple tagging styles) from StatsD", "line", line) + return events + } + + // disable multi-metrics + samples = elements[1:] + } else { + samples = strings.Split(elements[1], ":") + } + +samples: + for _, sample := range samples { + samplesReceived.Inc() + components := strings.Split(sample, "|") + samplingFactor := 1.0 + if len(components) < 2 || len(components) > 4 { + sampleErrors.WithLabelValues("malformed_component").Inc() + level.Debug(logger).Log("msg", "Bad component", "line", line) + continue + } + valueStr, statType := components[0], components[1] + + var relative = false + if strings.Index(valueStr, "+") == 0 || strings.Index(valueStr, "-") == 0 { + relative = true + } + + value, err := strconv.ParseFloat(valueStr, 64) + if err != nil { + level.Debug(logger).Log("msg", "Bad value", "value", valueStr, "line", line) + sampleErrors.WithLabelValues("malformed_value").Inc() + continue + } + + multiplyEvents := 1 + if len(components) >= 3 { + for _, component := range components[2:] { + if len(component) == 0 { + level.Debug(logger).Log("msg", "Empty component", "line", line) + sampleErrors.WithLabelValues("malformed_component").Inc() + continue samples + } + } + + for _, component := range components[2:] { + switch component[0] { + case '@': + + samplingFactor, err = strconv.ParseFloat(component[1:], 64) + if err != nil { + level.Debug(logger).Log("msg", "Invalid sampling factor", "component", component[1:], "line", line) + sampleErrors.WithLabelValues("invalid_sample_factor").Inc() + } + if samplingFactor == 0 { + samplingFactor = 1 + } + + if statType == "g" { + continue + } else if statType == "c" { + value /= samplingFactor + } else if statType == "ms" || statType == "h" || statType == "d" { + multiplyEvents = int(1 / samplingFactor) + } + case '#': + ParseDogStatsDTags(component[1:], labels, tagErrors, logger) + default: + level.Debug(logger).Log("msg", "Invalid sampling factor or tag section", "component", components[2], "line", line) + sampleErrors.WithLabelValues("invalid_sample_factor").Inc() + continue + } + } + } + + if len(labels) > 0 { + tagsReceived.Inc() + } + + for i := 0; i < multiplyEvents; i++ { + event, err := buildEvent(statType, metric, value, relative, labels) + if err != nil { + level.Debug(logger).Log("msg", "Error building event", "line", line, "error", err) + sampleErrors.WithLabelValues("illegal_event").Inc() + continue + } + events = append(events, event) + } + } + return events +} diff --git a/pkg/line/line.go~ b/pkg/line/line.go~ new file mode 100644 index 0000000..af7a7bb --- /dev/null +++ b/pkg/line/line.go~ @@ -0,0 +1,241 @@ +package line + +import ( + "fmt" + "strconv" + "strings" + "unicode/utf8" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/statsd_exporter/pkg/event" + "github.com/prometheus/statsd_exporter/pkg/mapper" +) + +func buildEvent(statType, metric string, value float64, relative bool, labels map[string]string) (event.Event, error) { + switch statType { + case "c": + return &event.CounterEvent{ + CMetricName: metric, + CValue: float64(value), + CLabels: labels, + }, nil + case "g": + return &event.GaugeEvent{ + GMetricName: metric, + GValue: float64(value), + GRelative: relative, + GLabels: labels, + }, nil + case "ms", "h", "d": + return &event.TimerEvent{ + TMetricName: metric, + TValue: float64(value), + TLabels: labels, + }, nil + case "s": + return nil, fmt.Errorf("no support for StatsD sets") + default: + return nil, fmt.Errorf("bad stat type %s", statType) + } +} + +func parseTag(component, tag string, separator rune, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) { + // Entirely empty tag is an error + if len(tag) == 0 { + tagErrors.Inc() + level.Debug(logger).Log("msg", "Empty name tag", "component", component) + return + } + + for i, c := range tag { + if c == separator { + k := tag[:i] + v := tag[i+1:] + + if len(k) == 0 || len(v) == 0 { + // Empty key or value is an error + tagErrors.Inc() + level.Debug(logger).Log("msg", "Malformed name tag", "k", k, "v", v, "component", component) + } else { + labels[mapper.EscapeMetricName(k)] = v + } + return + } + } + + // Missing separator (no value) is an error + tagErrors.Inc() + level.Debug(logger).Log("msg", "Malformed name tag", "tag", tag, "component", component) +} + +func parseNameTags(component string, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) { + lastTagEndIndex := 0 + for i, c := range component { + if c == ',' { + tag := component[lastTagEndIndex:i] + lastTagEndIndex = i + 1 + parseTag(component, tag, '=', labels, tagErrors, logger) + } + } + + // If we're not off the end of the string, add the last tag + if lastTagEndIndex < len(component) { + tag := component[lastTagEndIndex:] + parseTag(component, tag, '=', labels, tagErrors, logger) + } +} + +func trimLeftHash(s string) string { + if s != "" && s[0] == '#' { + return s[1:] + } + return s +} + +func parseDogStatsDTags(component string, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) { + lastTagEndIndex := 0 + for i, c := range component { + if c == ',' { + tag := component[lastTagEndIndex:i] + lastTagEndIndex = i + 1 + parseTag(component, trimLeftHash(tag), ':', labels, tagErrors, logger) + } + } + + // If we're not off the end of the string, add the last tag + if lastTagEndIndex < len(component) { + tag := component[lastTagEndIndex:] + parseTag(component, trimLeftHash(tag), ':', labels, tagErrors, logger) + } +} + +func parseNameAndTags(name string, labels map[string]string, tagErrors prometheus.Counter, logger log.Logger) string { + for i, c := range name { + // `#` delimits start of tags by Librato + // https://www.librato.com/docs/kb/collect/collection_agents/stastd/#stat-level-tags + // `,` delimits start of tags by InfluxDB + // https://www.influxdata.com/blog/getting-started-with-sending-statsd-metrics-to-telegraf-influxdb/#introducing-influx-statsd + if c == '#' || c == ',' { + parseNameTags(name[i+1:], labels, tagErrors, logger) + return name[:i] + } + } + return name +} + +func LineToEvents(line string, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter, logger log.Logger) event.Events { + events := event.Events{} + if line == "" { + return events + } + + elements := strings.SplitN(line, ":", 2) + if len(elements) < 2 || len(elements[0]) == 0 || !utf8.ValidString(line) { + sampleErrors.WithLabelValues("malformed_line").Inc() + level.Debug(logger).Log("msg", "Bad line from StatsD", "line", line) + return events + } + + labels := map[string]string{} + metric := parseNameAndTags(elements[0], labels, tagErrors, logger) + + var samples []string + if strings.Contains(elements[1], "|#") { + // using DogStatsD tags + + // don't allow mixed tagging styles + if len(labels) > 0 { + sampleErrors.WithLabelValues("mixed_tagging_styles").Inc() + level.Debug(logger).Log("msg", "Bad line (multiple tagging styles) from StatsD", "line", line) + return events + } + + // disable multi-metrics + samples = elements[1:] + } else { + samples = strings.Split(elements[1], ":") + } + +samples: + for _, sample := range samples { + samplesReceived.Inc() + components := strings.Split(sample, "|") + samplingFactor := 1.0 + if len(components) < 2 || len(components) > 4 { + sampleErrors.WithLabelValues("malformed_component").Inc() + level.Debug(logger).Log("msg", "Bad component", "line", line) + continue + } + valueStr, statType := components[0], components[1] + + var relative = false + if strings.Index(valueStr, "+") == 0 || strings.Index(valueStr, "-") == 0 { + relative = true + } + + value, err := strconv.ParseFloat(valueStr, 64) + if err != nil { + level.Debug(logger).Log("msg", "Bad value", "value", valueStr, "line", line) + sampleErrors.WithLabelValues("malformed_value").Inc() + continue + } + + multiplyEvents := 1 + if len(components) >= 3 { + for _, component := range components[2:] { + if len(component) == 0 { + level.Debug(logger).Log("msg", "Empty component", "line", line) + sampleErrors.WithLabelValues("malformed_component").Inc() + continue samples + } + } + + for _, component := range components[2:] { + switch component[0] { + case '@': + + samplingFactor, err = strconv.ParseFloat(component[1:], 64) + if err != nil { + level.Debug(logger).Log("msg", "Invalid sampling factor", "component", component[1:], "line", line) + sampleErrors.WithLabelValues("invalid_sample_factor").Inc() + } + if samplingFactor == 0 { + samplingFactor = 1 + } + + if statType == "g" { + continue + } else if statType == "c" { + value /= samplingFactor + } else if statType == "ms" || statType == "h" || statType == "d" { + multiplyEvents = int(1 / samplingFactor) + } + case '#': + parseDogStatsDTags(component[1:], labels, tagErrors, logger) + default: + level.Debug(logger).Log("msg", "Invalid sampling factor or tag section", "component", components[2], "line", line) + sampleErrors.WithLabelValues("invalid_sample_factor").Inc() + continue + } + } + } + + if len(labels) > 0 { + tagsReceived.Inc() + } + + for i := 0; i < multiplyEvents; i++ { + event, err := buildEvent(statType, metric, value, relative, labels) + if err != nil { + level.Debug(logger).Log("msg", "Error building event", "line", line, "error", err) + sampleErrors.WithLabelValues("illegal_event").Inc() + continue + } + events = append(events, event) + } + } + return events +} diff --git a/pkg/listener/listener.go b/pkg/listener/listener.go new file mode 100644 index 0000000..5261bd2 --- /dev/null +++ b/pkg/listener/listener.go @@ -0,0 +1,138 @@ +package listener + +import ( + "bufio" + "io" + "net" + "os" + "strings" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/statsd_exporter/pkg/event" + pkgLine "github.com/prometheus/statsd_exporter/pkg/line" +) + +type StatsDUDPListener struct { + Conn *net.UDPConn + EventHandler event.EventHandler + Logger log.Logger +} + +func (l *StatsDUDPListener) SetEventHandler(eh event.EventHandler) { + l.EventHandler = eh +} + +func (l *StatsDUDPListener) Listen(udpPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + buf := make([]byte, 65535) + for { + n, _, err := l.Conn.ReadFromUDP(buf) + if err != nil { + // https://github.com/golang/go/issues/4373 + // ignore net: errClosing error as it will occur during shutdown + if strings.HasSuffix(err.Error(), "use of closed network connection") { + return + } + level.Error(l.Logger).Log("error", err) + return + } + l.HandlePacket(buf[0:n], udpPackets, linesReceived, eventsFlushed, sampleErrors, samplesReceived, tagErrors, tagsReceived) + } +} + +func (l *StatsDUDPListener) HandlePacket(packet []byte, udpPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + udpPackets.Inc() + lines := strings.Split(string(packet), "\n") + for _, line := range lines { + linesReceived.Inc() + l.EventHandler.Queue(pkgLine.LineToEvents(line, sampleErrors, samplesReceived, tagErrors, tagsReceived, l.Logger), &eventsFlushed) + } +} + +type StatsDTCPListener struct { + Conn *net.TCPListener + EventHandler event.EventHandler + Logger log.Logger +} + +func (l *StatsDTCPListener) SetEventHandler(eh event.EventHandler) { + l.EventHandler = eh +} + +func (l *StatsDTCPListener) Listen(linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, tcpConnections prometheus.Counter, tcpErrors prometheus.Counter, tcpLineTooLong prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + for { + c, err := l.Conn.AcceptTCP() + if err != nil { + // https://github.com/golang/go/issues/4373 + // ignore net: errClosing error as it will occur during shutdown + if strings.HasSuffix(err.Error(), "use of closed network connection") { + return + } + level.Error(l.Logger).Log("msg", "AcceptTCP failed", "error", err) + os.Exit(1) + } + go l.HandleConn(c, linesReceived, eventsFlushed, tcpConnections, tcpErrors, tcpLineTooLong, sampleErrors, samplesReceived, tagErrors, tagsReceived) + } +} + +func (l *StatsDTCPListener) HandleConn(c *net.TCPConn, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, tcpConnections prometheus.Counter, tcpErrors prometheus.Counter, tcpLineTooLong prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + defer c.Close() + + tcpConnections.Inc() + + r := bufio.NewReader(c) + for { + line, isPrefix, err := r.ReadLine() + if err != nil { + if err != io.EOF { + tcpErrors.Inc() + level.Debug(l.Logger).Log("msg", "Read failed", "addr", c.RemoteAddr(), "error", err) + } + break + } + if isPrefix { + tcpLineTooLong.Inc() + level.Debug(l.Logger).Log("msg", "Read failed: line too long", "addr", c.RemoteAddr()) + break + } + linesReceived.Inc() + l.EventHandler.Queue(pkgLine.LineToEvents(string(line), sampleErrors, samplesReceived, tagErrors, tagsReceived, l.Logger), &eventsFlushed) + } +} + +type StatsDUnixgramListener struct { + Conn *net.UnixConn + EventHandler event.EventHandler + Logger log.Logger +} + +func (l *StatsDUnixgramListener) SetEventHandler(eh event.EventHandler) { + l.EventHandler = eh +} + +func (l *StatsDUnixgramListener) Listen(unixgramPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + buf := make([]byte, 65535) + for { + n, _, err := l.Conn.ReadFromUnix(buf) + if err != nil { + // https://github.com/golang/go/issues/4373 + // ignore net: errClosing error as it will occur during shutdown + if strings.HasSuffix(err.Error(), "use of closed network connection") { + return + } + level.Error(l.Logger).Log(err) + os.Exit(1) + } + l.HandlePacket(buf[:n], unixgramPackets, linesReceived, eventsFlushed, sampleErrors, samplesReceived, tagErrors, tagsReceived) + } +} + +func (l *StatsDUnixgramListener) HandlePacket(packet []byte, unixgramPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + unixgramPackets.Inc() + lines := strings.Split(string(packet), "\n") + for _, line := range lines { + linesReceived.Inc() + l.EventHandler.Queue(pkgLine.LineToEvents(line, sampleErrors, samplesReceived, tagErrors, tagsReceived, l.Logger), &eventsFlushed) + } +} diff --git a/pkg/listener/listener.go~ b/pkg/listener/listener.go~ new file mode 100644 index 0000000..de8e968 --- /dev/null +++ b/pkg/listener/listener.go~ @@ -0,0 +1,138 @@ +package listener + +import ( + "bufio" + "io" + "net" + "os" + "strings" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/statsd_exporter/pkg/event" + pkgLine "github.com/prometheus/statsd_exporter/pkg/line" +) + +type StatsDUDPListener struct { + Conn *net.UDPConn + EventHandler event.EventHandler + Logger log.Logger +} + +func (l *StatsDUDPListener) SetEventHandler(eh event.EventHandler) { + l.EventHandler = eh +} + +func (l *StatsDUDPListener) Listen(udpPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + buf := make([]byte, 65535) + for { + n, _, err := l.Conn.ReadFromUDP(buf) + if err != nil { + // https://github.com/golang/go/issues/4373 + // ignore net: errClosing error as it will occur during shutdown + if strings.HasSuffix(err.Error(), "use of closed network connection") { + return + } + level.Error(l.Logger).Log("error", err) + return + } + l.handlePacket(buf[0:n], udpPackets, linesReceived, eventsFlushed, sampleErrors, samplesReceived, tagErrors, tagsReceived) + } +} + +func (l *StatsDUDPListener) handlePacket(packet []byte, udpPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + udpPackets.Inc() + lines := strings.Split(string(packet), "\n") + for _, line := range lines { + linesReceived.Inc() + l.EventHandler.Queue(pkgLine.LineToEvents(line, sampleErrors, samplesReceived, tagErrors, tagsReceived, l.Logger), &eventsFlushed) + } +} + +type StatsDTCPListener struct { + Conn *net.TCPListener + EventHandler event.EventHandler + Logger log.Logger +} + +func (l *StatsDTCPListener) SetEventHandler(eh event.EventHandler) { + l.EventHandler = eh +} + +func (l *StatsDTCPListener) Listen(linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, tcpConnections prometheus.Counter, tcpErrors prometheus.Counter, tcpLineTooLong prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + for { + c, err := l.Conn.AcceptTCP() + if err != nil { + // https://github.com/golang/go/issues/4373 + // ignore net: errClosing error as it will occur during shutdown + if strings.HasSuffix(err.Error(), "use of closed network connection") { + return + } + level.Error(l.Logger).Log("msg", "AcceptTCP failed", "error", err) + os.Exit(1) + } + go l.handleConn(c, linesReceived, eventsFlushed, tcpConnections, tcpErrors, tcpLineTooLong, sampleErrors, samplesReceived, tagErrors, tagsReceived) + } +} + +func (l *StatsDTCPListener) handleConn(c *net.TCPConn, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, tcpConnections prometheus.Counter, tcpErrors prometheus.Counter, tcpLineTooLong prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + defer c.Close() + + tcpConnections.Inc() + + r := bufio.NewReader(c) + for { + line, isPrefix, err := r.ReadLine() + if err != nil { + if err != io.EOF { + tcpErrors.Inc() + level.Debug(l.Logger).Log("msg", "Read failed", "addr", c.RemoteAddr(), "error", err) + } + break + } + if isPrefix { + tcpLineTooLong.Inc() + level.Debug(l.Logger).Log("msg", "Read failed: line too long", "addr", c.RemoteAddr()) + break + } + linesReceived.Inc() + l.EventHandler.Queue(pkgLine.LineToEvents(string(line), sampleErrors, samplesReceived, tagErrors, tagsReceived, l.Logger), &eventsFlushed) + } +} + +type StatsDUnixgramListener struct { + Conn *net.UnixConn + EventHandler event.EventHandler + Logger log.Logger +} + +func (l *StatsDUnixgramListener) SetEventHandler(eh event.EventHandler) { + l.EventHandler = eh +} + +func (l *StatsDUnixgramListener) Listen(unixgramPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + buf := make([]byte, 65535) + for { + n, _, err := l.Conn.ReadFromUnix(buf) + if err != nil { + // https://github.com/golang/go/issues/4373 + // ignore net: errClosing error as it will occur during shutdown + if strings.HasSuffix(err.Error(), "use of closed network connection") { + return + } + level.Error(l.Logger).Log(err) + os.Exit(1) + } + l.handlePacket(buf[:n], unixgramPackets, linesReceived, eventsFlushed, sampleErrors, samplesReceived, tagErrors, tagsReceived) + } +} + +func (l *StatsDUnixgramListener) handlePacket(packet []byte, unixgramPackets prometheus.Counter, linesReceived prometheus.Counter, eventsFlushed prometheus.Counter, sampleErrors prometheus.CounterVec, samplesReceived prometheus.Counter, tagErrors prometheus.Counter, tagsReceived prometheus.Counter) { + unixgramPackets.Inc() + lines := strings.Split(string(packet), "\n") + for _, line := range lines { + linesReceived.Inc() + l.EventHandler.Queue(pkgLine.LineToEvents(line, sampleErrors, samplesReceived, tagErrors, tagsReceived, l.Logger), &eventsFlushed) + } +} diff --git a/pkg/mapper/action.go b/pkg/mapper/action.go index b8c0977..e0773f0 100644 --- a/pkg/mapper/action.go +++ b/pkg/mapper/action.go @@ -1,42 +1,42 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapper - -import "fmt" - -type ActionType string - -const ( - ActionTypeMap ActionType = "map" - ActionTypeDrop ActionType = "drop" - ActionTypeDefault ActionType = "" -) - -func (t *ActionType) UnmarshalYAML(unmarshal func(interface{}) error) error { - var v string - - if err := unmarshal(&v); err != nil { - return err - } - - switch ActionType(v) { - case ActionTypeDrop: - *t = ActionTypeDrop - case ActionTypeMap, ActionTypeDefault: - *t = ActionTypeMap - default: - return fmt.Errorf("invalid action type %q", v) - } - return nil -} +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mapper + +import "fmt" + +type ActionType string + +const ( + ActionTypeMap ActionType = "map" + ActionTypeDrop ActionType = "drop" + ActionTypeDefault ActionType = "" +) + +func (t *ActionType) UnmarshalYAML(unmarshal func(interface{}) error) error { + var v string + + if err := unmarshal(&v); err != nil { + return err + } + + switch ActionType(v) { + case ActionTypeDrop: + *t = ActionTypeDrop + case ActionTypeMap, ActionTypeDefault: + *t = ActionTypeMap + default: + return fmt.Errorf("invalid action type %q", v) + } + return nil +} diff --git a/pkg/mapper/escape.go b/pkg/mapper/escape.go index fc8d194..4b27cdd 100644 --- a/pkg/mapper/escape.go +++ b/pkg/mapper/escape.go @@ -1,74 +1,74 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapper - -import ( - "strings" - "unicode/utf8" -) - -// EscapeMetricName replaces invalid characters in the metric name with "_" -// Valid characters are a-z, A-Z, 0-9, and _ -func EscapeMetricName(metricName string) string { - metricLen := len(metricName) - if metricLen == 0 { - return "" - } - - escaped := false - var sb strings.Builder - // If a metric starts with a digit, allocate the memory and prepend an - // underscore. - if metricName[0] >= '0' && metricName[0] <= '9' { - escaped = true - sb.Grow(metricLen + 1) - sb.WriteByte('_') - } - - // This is an character replacement method optimized for this limited - // use case. It is much faster than using a regex. - offset := 0 - for i, c := range metricName { - // Seek forward, skipping valid characters until we find one that needs - // to be replaced, then add all the characters we've seen so far to the - // string.Builder. - if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || - (c >= '0' && c <= '9') || (c == '_') { - // Character is valid, so skip over it without doing anything. - } else { - if !escaped { - // Up until now we've been lazy and avoided actually allocating - // memory. Unfortunately we've now determined this string needs - // escaping, so allocate the buffer for the whole string. - escaped = true - sb.Grow(metricLen) - } - sb.WriteString(metricName[offset:i]) - offset = i + utf8.RuneLen(c) - sb.WriteByte('_') - } - } - - if !escaped { - // This is the happy path where nothing had to be escaped, so we can - // avoid doing anything. - return metricName - } - - if offset < metricLen { - sb.WriteString(metricName[offset:]) - } - - return sb.String() -} +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mapper + +import ( + "strings" + "unicode/utf8" +) + +// EscapeMetricName replaces invalid characters in the metric name with "_" +// Valid characters are a-z, A-Z, 0-9, and _ +func EscapeMetricName(metricName string) string { + metricLen := len(metricName) + if metricLen == 0 { + return "" + } + + escaped := false + var sb strings.Builder + // If a metric starts with a digit, allocate the memory and prepend an + // underscore. + if metricName[0] >= '0' && metricName[0] <= '9' { + escaped = true + sb.Grow(metricLen + 1) + sb.WriteByte('_') + } + + // This is an character replacement method optimized for this limited + // use case. It is much faster than using a regex. + offset := 0 + for i, c := range metricName { + // Seek forward, skipping valid characters until we find one that needs + // to be replaced, then add all the characters we've seen so far to the + // string.Builder. + if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || (c == '_') { + // Character is valid, so skip over it without doing anything. + } else { + if !escaped { + // Up until now we've been lazy and avoided actually allocating + // memory. Unfortunately we've now determined this string needs + // escaping, so allocate the buffer for the whole string. + escaped = true + sb.Grow(metricLen) + } + sb.WriteString(metricName[offset:i]) + offset = i + utf8.RuneLen(c) + sb.WriteByte('_') + } + } + + if !escaped { + // This is the happy path where nothing had to be escaped, so we can + // avoid doing anything. + return metricName + } + + if offset < metricLen { + sb.WriteString(metricName[offset:]) + } + + return sb.String() +} diff --git a/pkg/mapper/escape_test.go b/pkg/mapper/escape_test.go index 336692d..d76cdc1 100644 --- a/pkg/mapper/escape_test.go +++ b/pkg/mapper/escape_test.go @@ -1,56 +1,56 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapper - -import "testing" - -func TestEscapeMetricName(t *testing.T) { - scenarios := map[string]string{ - "clean": "clean", - "0starts_with_digit": "_0starts_with_digit", - "with_underscore": "with_underscore", - "with.dot": "with_dot", - "with😱emoji": "with_emoji", - "with.*.multiple": "with___multiple", - "test.web-server.foo.bar": "test_web_server_foo_bar", - "": "", - } - - for in, want := range scenarios { - if got := EscapeMetricName(in); want != got { - t.Errorf("expected `%s` to be escaped to `%s`, got `%s`", in, want, got) - } - } -} - -func BenchmarkEscapeMetricName(b *testing.B) { - scenarios := []string{ - "clean", - "0starts_with_digit", - "with_underscore", - "with.dot", - "with😱emoji", - "with.*.multiple", - "test.web-server.foo.bar", - "", - } - - for _, s := range scenarios { - b.Run(s, func(b *testing.B) { - for n := 0; n < b.N; n++ { - EscapeMetricName(s) - } - }) - } -} +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mapper + +import "testing" + +func TestEscapeMetricName(t *testing.T) { + scenarios := map[string]string{ + "clean": "clean", + "0starts_with_digit": "_0starts_with_digit", + "with_underscore": "with_underscore", + "with.dot": "with_dot", + "with😱emoji": "with_emoji", + "with.*.multiple": "with___multiple", + "test.web-server.foo.bar": "test_web_server_foo_bar", + "": "", + } + + for in, want := range scenarios { + if got := EscapeMetricName(in); want != got { + t.Errorf("expected `%s` to be escaped to `%s`, got `%s`", in, want, got) + } + } +} + +func BenchmarkEscapeMetricName(b *testing.B) { + scenarios := []string{ + "clean", + "0starts_with_digit", + "with_underscore", + "with.dot", + "with😱emoji", + "with.*.multiple", + "test.web-server.foo.bar", + "", + } + + for _, s := range scenarios { + b.Run(s, func(b *testing.B) { + for n := 0; n < b.N; n++ { + EscapeMetricName(s) + } + }) + } +} diff --git a/pkg/mapper/fsm/README.md b/pkg/mapper/fsm/README.md index 722ee21..c078240 100644 --- a/pkg/mapper/fsm/README.md +++ b/pkg/mapper/fsm/README.md @@ -1,132 +1,132 @@ -# FSM Mapping - -## Overview - -This package implements a fast and efficient algorithm for generic glob style -string matching using a finite state machine (FSM). - -### Source Hierachy - -``` - '-- fsm - '-- dump.go // functionality to dump the FSM to Dot file - '-- formatter.go // format glob templates using captured * groups - '-- fsm.go // manipulating and searching of FSM - '-- minmax.go // min() max() function for interger -``` - -## FSM Explained - -Per [Wikipedia](https://en.wikipedia.org/wiki/Finite-state_machine): - -> A finite-state machine (FSM) or finite-state automaton (FSA, plural: automata), -> finite automaton, or simply a state machine, is a mathematical model of -> computation. It is an abstract machine that can be in exactly one of a finite -> number of states at any given time. The FSM can change from one state to -> another in response to some external inputs; the change from one state to -> another is called a transition. An FSM is defined by a list of its states, its -> initial state, and the conditions for each transition. - -In our use case, each *state* is a substring after the input StatsD metric name is splitted by `.`. - -### Add state to FSM - -`func (f *FSM) AddState(match string, matchMetricType string, -maxPossibleTransitions int, result interface{}) int` - -At first, the FSM only contains three states, representing three possible metric types: - - ____ [gauge] - / - (start)---- [counter] - \ - '--- [ timer ] - - -Adding a rule `client.*.request.count` with type `counter` will make the FSM to be: - - - ____ [gauge] - / - (start)---- [counter] -- [client] -- [*] -- [request] -- [count] -- {R1} - \ - '--- [timer] - -`{R1}` is short for result 1, which is the match result for `client.*.request.count`. - -Adding a rule `client.*.*.size` with type `counter` will make the FSM to be: - - ____ [gauge] __ [request] -- [count] -- {R1} - / / - (start)---- [counter] -- [client] -- [*] - \ \__ [*] -- [size] -- {R2} - '--- [timer] - - -### Finding a result state in FSM - -`func (f *FSM) GetMapping(statsdMetric string, statsdMetricType string) -(*mappingState, []string)` - -For example, when mapping `client.aaa.request.count` with `counter` type in the -FSM, the `^1` to `^7` symbols indicate how FSM will traversal in its tree: - - - ____ [gauge] __ [request] -- [count] -- {R1} - / / ^5 ^6 ^7 - (start)---- [counter] -- [client] -- [*] - ^1 \ ^2 ^3 \__ [*] -- [size] -- {R2} - '--- [timer] ^4 - - -To map `client.bbb.request.size`, FSM will do a backtracking: - - - ____ [gauge] __ [request] -- [count] -- {R1} - / / ^5 ^6 - (start)---- [counter] -- [client] -- [*] - ^1 \ ^2 ^3 \__ [*] -- [size] -- {R2} - '--- [timer] ^4 - ^7 ^8 ^9 - - -## Debugging - -To see all the states of the current FSM, use `func (f *FSM) DumpFSM(w io.Writer)` -to dump into a Dot file. The Dot file can be further renderer into image using: - -```shell -$ dot -Tpng dump.dot > dump.png -``` - -In StatsD exporter, one could use the following: - -```shell -$ statsd_exporter --statsd.mapping-config=statsd.rules --debug.dump-fsm=dump.dot -$ dot -Tpng dump.dot > dump.png -``` - -For example, the following rules: - -```yaml -mappings: -- match: client.*.request.count - name: request_count - match_metric_type: counter - labels: - client: $1 - -- match: client.*.*.size - name: sizes - match_metric_type: counter - labels: - client: $1 - direction: $2 -``` - -will be rendered as: - -![FSM](fsm.png) - -The `dot` program is part of [Graphviz](https://www.graphviz.org/) and is -available in most of popular operating systems. +# FSM Mapping + +## Overview + +This package implements a fast and efficient algorithm for generic glob style +string matching using a finite state machine (FSM). + +### Source Hierachy + +``` + '-- fsm + '-- dump.go // functionality to dump the FSM to Dot file + '-- formatter.go // format glob templates using captured * groups + '-- fsm.go // manipulating and searching of FSM + '-- minmax.go // min() max() function for interger +``` + +## FSM Explained + +Per [Wikipedia](https://en.wikipedia.org/wiki/Finite-state_machine): + +> A finite-state machine (FSM) or finite-state automaton (FSA, plural: automata), +> finite automaton, or simply a state machine, is a mathematical model of +> computation. It is an abstract machine that can be in exactly one of a finite +> number of states at any given time. The FSM can change from one state to +> another in response to some external inputs; the change from one state to +> another is called a transition. An FSM is defined by a list of its states, its +> initial state, and the conditions for each transition. + +In our use case, each *state* is a substring after the input StatsD metric name is splitted by `.`. + +### Add state to FSM + +`func (f *FSM) AddState(match string, matchMetricType string, +maxPossibleTransitions int, result interface{}) int` + +At first, the FSM only contains three states, representing three possible metric types: + + ____ [gauge] + / + (start)---- [counter] + \ + '--- [ timer ] + + +Adding a rule `client.*.request.count` with type `counter` will make the FSM to be: + + + ____ [gauge] + / + (start)---- [counter] -- [client] -- [*] -- [request] -- [count] -- {R1} + \ + '--- [timer] + +`{R1}` is short for result 1, which is the match result for `client.*.request.count`. + +Adding a rule `client.*.*.size` with type `counter` will make the FSM to be: + + ____ [gauge] __ [request] -- [count] -- {R1} + / / + (start)---- [counter] -- [client] -- [*] + \ \__ [*] -- [size] -- {R2} + '--- [timer] + + +### Finding a result state in FSM + +`func (f *FSM) GetMapping(statsdMetric string, statsdMetricType string) +(*mappingState, []string)` + +For example, when mapping `client.aaa.request.count` with `counter` type in the +FSM, the `^1` to `^7` symbols indicate how FSM will traversal in its tree: + + + ____ [gauge] __ [request] -- [count] -- {R1} + / / ^5 ^6 ^7 + (start)---- [counter] -- [client] -- [*] + ^1 \ ^2 ^3 \__ [*] -- [size] -- {R2} + '--- [timer] ^4 + + +To map `client.bbb.request.size`, FSM will do a backtracking: + + + ____ [gauge] __ [request] -- [count] -- {R1} + / / ^5 ^6 + (start)---- [counter] -- [client] -- [*] + ^1 \ ^2 ^3 \__ [*] -- [size] -- {R2} + '--- [timer] ^4 + ^7 ^8 ^9 + + +## Debugging + +To see all the states of the current FSM, use `func (f *FSM) DumpFSM(w io.Writer)` +to dump into a Dot file. The Dot file can be further renderer into image using: + +```shell +$ dot -Tpng dump.dot > dump.png +``` + +In StatsD exporter, one could use the following: + +```shell +$ statsd_exporter --statsd.mapping-config=statsd.rules --debug.dump-fsm=dump.dot +$ dot -Tpng dump.dot > dump.png +``` + +For example, the following rules: + +```yaml +mappings: +- match: client.*.request.count + name: request_count + match_metric_type: counter + labels: + client: $1 + +- match: client.*.*.size + name: sizes + match_metric_type: counter + labels: + client: $1 + direction: $2 +``` + +will be rendered as: + +![FSM](fsm.png) + +The `dot` program is part of [Graphviz](https://www.graphviz.org/) and is +available in most of popular operating systems. diff --git a/pkg/mapper/fsm/dump.go b/pkg/mapper/fsm/dump.go index d91e2cf..0725d60 100644 --- a/pkg/mapper/fsm/dump.go +++ b/pkg/mapper/fsm/dump.go @@ -1,48 +1,48 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fsm - -import ( - "fmt" - "io" -) - -// DumpFSM accepts a io.writer and write the current FSM into dot file format. -func (f *FSM) DumpFSM(w io.Writer) { - idx := 0 - states := make(map[int]*mappingState) - states[idx] = f.root - - w.Write([]byte("digraph g {\n")) - w.Write([]byte("rankdir=LR\n")) // make it vertical - w.Write([]byte("node [ label=\"\",style=filled,fillcolor=white,shape=circle ]\n")) // remove label of node - - for idx < len(states) { - for field, transition := range states[idx].transitions { - states[len(states)] = transition - w.Write([]byte(fmt.Sprintf("%d -> %d [label = \"%s\"];\n", idx, len(states)-1, field))) - if idx == 0 { - // color for metric types - w.Write([]byte(fmt.Sprintf("%d [color=\"#D6B656\",fillcolor=\"#FFF2CC\"];\n", len(states)-1))) - } else if transition.transitions == nil || len(transition.transitions) == 0 { - // color for end state - w.Write([]byte(fmt.Sprintf("%d [color=\"#82B366\",fillcolor=\"#D5E8D4\"];\n", len(states)-1))) - } - } - idx++ - } - // color for start state - w.Write([]byte(fmt.Sprintf("0 [color=\"#a94442\",fillcolor=\"#f2dede\"];\n"))) - w.Write([]byte("}")) -} +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsm + +import ( + "fmt" + "io" +) + +// DumpFSM accepts a io.writer and write the current FSM into dot file format. +func (f *FSM) DumpFSM(w io.Writer) { + idx := 0 + states := make(map[int]*mappingState) + states[idx] = f.root + + w.Write([]byte("digraph g {\n")) + w.Write([]byte("rankdir=LR\n")) // make it vertical + w.Write([]byte("node [ label=\"\",style=filled,fillcolor=white,shape=circle ]\n")) // remove label of node + + for idx < len(states) { + for field, transition := range states[idx].transitions { + states[len(states)] = transition + w.Write([]byte(fmt.Sprintf("%d -> %d [label = \"%s\"];\n", idx, len(states)-1, field))) + if idx == 0 { + // color for metric types + w.Write([]byte(fmt.Sprintf("%d [color=\"#D6B656\",fillcolor=\"#FFF2CC\"];\n", len(states)-1))) + } else if transition.transitions == nil || len(transition.transitions) == 0 { + // color for end state + w.Write([]byte(fmt.Sprintf("%d [color=\"#82B366\",fillcolor=\"#D5E8D4\"];\n", len(states)-1))) + } + } + idx++ + } + // color for start state + w.Write([]byte(fmt.Sprintf("0 [color=\"#a94442\",fillcolor=\"#f2dede\"];\n"))) + w.Write([]byte("}")) +} diff --git a/pkg/mapper/fsm/formatter.go b/pkg/mapper/fsm/formatter.go index 567bbc2..3864443 100644 --- a/pkg/mapper/fsm/formatter.go +++ b/pkg/mapper/fsm/formatter.go @@ -1,76 +1,76 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fsm - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -var ( - templateReplaceCaptureRE = regexp.MustCompile(`\$\{?([a-zA-Z0-9_\$]+)\}?`) -) - -type TemplateFormatter struct { - captureIndexes []int - captureCount int - fmtString string -} - -// NewTemplateFormatter instantiates a TemplateFormatter -// from given template string and the maximum amount of captures. -func NewTemplateFormatter(template string, captureCount int) *TemplateFormatter { - matches := templateReplaceCaptureRE.FindAllStringSubmatch(template, -1) - if len(matches) == 0 { - // if no regex reference found, keep it as it is - return &TemplateFormatter{captureCount: 0, fmtString: template} - } - - var indexes []int - valueFormatter := template - for _, match := range matches { - idx, err := strconv.Atoi(match[len(match)-1]) - if err != nil || idx > captureCount || idx < 1 { - // if index larger than captured count or using unsupported named capture group, - // replace with empty string - valueFormatter = strings.Replace(valueFormatter, match[0], "", -1) - } else { - valueFormatter = strings.Replace(valueFormatter, match[0], "%s", -1) - // note: the regex reference variable $? starts from 1 - indexes = append(indexes, idx-1) - } - } - return &TemplateFormatter{ - captureIndexes: indexes, - captureCount: len(indexes), - fmtString: valueFormatter, - } -} - -// Format accepts a list containing captured strings and returns the formatted -// string using the template stored in current TemplateFormatter. -func (formatter *TemplateFormatter) Format(captures []string) string { - if formatter.captureCount == 0 { - // no label substitution, keep as it is - return formatter.fmtString - } - indexes := formatter.captureIndexes - vargs := make([]interface{}, formatter.captureCount) - for i, idx := range indexes { - vargs[i] = captures[idx] - } - return fmt.Sprintf(formatter.fmtString, vargs...) -} +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsm + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +var ( + templateReplaceCaptureRE = regexp.MustCompile(`\$\{?([a-zA-Z0-9_\$]+)\}?`) +) + +type TemplateFormatter struct { + captureIndexes []int + captureCount int + fmtString string +} + +// NewTemplateFormatter instantiates a TemplateFormatter +// from given template string and the maximum amount of captures. +func NewTemplateFormatter(template string, captureCount int) *TemplateFormatter { + matches := templateReplaceCaptureRE.FindAllStringSubmatch(template, -1) + if len(matches) == 0 { + // if no regex reference found, keep it as it is + return &TemplateFormatter{captureCount: 0, fmtString: template} + } + + var indexes []int + valueFormatter := template + for _, match := range matches { + idx, err := strconv.Atoi(match[len(match)-1]) + if err != nil || idx > captureCount || idx < 1 { + // if index larger than captured count or using unsupported named capture group, + // replace with empty string + valueFormatter = strings.Replace(valueFormatter, match[0], "", -1) + } else { + valueFormatter = strings.Replace(valueFormatter, match[0], "%s", -1) + // note: the regex reference variable $? starts from 1 + indexes = append(indexes, idx-1) + } + } + return &TemplateFormatter{ + captureIndexes: indexes, + captureCount: len(indexes), + fmtString: valueFormatter, + } +} + +// Format accepts a list containing captured strings and returns the formatted +// string using the template stored in current TemplateFormatter. +func (formatter *TemplateFormatter) Format(captures []string) string { + if formatter.captureCount == 0 { + // no label substitution, keep as it is + return formatter.fmtString + } + indexes := formatter.captureIndexes + vargs := make([]interface{}, formatter.captureCount) + for i, idx := range indexes { + vargs[i] = captures[idx] + } + return fmt.Sprintf(formatter.fmtString, vargs...) +} diff --git a/pkg/mapper/fsm/fsm.go b/pkg/mapper/fsm/fsm.go index cf8c00e..a57f9a2 100644 --- a/pkg/mapper/fsm/fsm.go +++ b/pkg/mapper/fsm/fsm.go @@ -1,326 +1,326 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fsm - -import ( - "regexp" - "strings" - - "github.com/prometheus/common/log" -) - -type mappingState struct { - transitions map[string]*mappingState - minRemainingLength int - maxRemainingLength int - // result* members are nil unless there's a metric ends with this state - Result interface{} - ResultPriority int -} - -type fsmBacktrackStackCursor struct { - fieldIndex int - captureIndex int - currentCapture string - state *mappingState - prev *fsmBacktrackStackCursor - next *fsmBacktrackStackCursor -} - -type FSM struct { - root *mappingState - metricTypes []string - statesCount int - BacktrackingNeeded bool - OrderingDisabled bool -} - -// NewFSM creates a new FSM instance -func NewFSM(metricTypes []string, maxPossibleTransitions int, orderingDisabled bool) *FSM { - fsm := FSM{} - root := &mappingState{} - root.transitions = make(map[string]*mappingState, len(metricTypes)) - - for _, field := range metricTypes { - state := &mappingState{} - (*state).transitions = make(map[string]*mappingState, maxPossibleTransitions) - root.transitions[string(field)] = state - } - fsm.OrderingDisabled = orderingDisabled - fsm.metricTypes = metricTypes - fsm.statesCount = 0 - fsm.root = root - return &fsm -} - -// AddState adds a mapping rule into the existing FSM. -// The maxPossibleTransitions parameter sets the expected count of transitions left. -// The result parameter sets the generic type to be returned when fsm found a match in GetMapping. -func (f *FSM) AddState(match string, matchMetricType string, maxPossibleTransitions int, result interface{}) int { - // first split by "." - matchFields := strings.Split(match, ".") - // fill into our FSM - roots := []*mappingState{} - // first state is the metric type - if matchMetricType == "" { - // if metricType not specified, connect the start state from all three types - for _, metricType := range f.metricTypes { - roots = append(roots, f.root.transitions[string(metricType)]) - } - } else { - roots = append(roots, f.root.transitions[matchMetricType]) - } - var captureCount int - var finalStates []*mappingState - // iterating over different start state (different metric types) - for _, root := range roots { - captureCount = 0 - // for each start state, connect from start state to end state - for i, field := range matchFields { - state, prs := root.transitions[field] - if !prs { - // create a state if it's not exist in the fsm - state = &mappingState{} - (*state).transitions = make(map[string]*mappingState, maxPossibleTransitions) - (*state).maxRemainingLength = len(matchFields) - i - 1 - (*state).minRemainingLength = len(matchFields) - i - 1 - root.transitions[field] = state - // if this is last field, set result to currentMapping instance - if i == len(matchFields)-1 { - root.transitions[field].Result = result - } - } else { - (*state).maxRemainingLength = max(len(matchFields)-i-1, (*state).maxRemainingLength) - (*state).minRemainingLength = min(len(matchFields)-i-1, (*state).minRemainingLength) - } - if field == "*" { - captureCount++ - } - - // goto next state - root = state - } - finalStates = append(finalStates, root) - } - - for _, state := range finalStates { - state.ResultPriority = f.statesCount - } - - f.statesCount++ - - return captureCount -} - -// GetMapping using the fsm to find matching rules according to given statsdMetric and statsdMetricType. -// If it finds a match, the final state and the captured strings are returned; -// if there's no match found, nil and a empty list will be returned. -func (f *FSM) GetMapping(statsdMetric string, statsdMetricType string) (*mappingState, []string) { - matchFields := strings.Split(statsdMetric, ".") - currentState := f.root.transitions[statsdMetricType] - - // the cursor/pointer in the backtrack stack implemented as a double-linked list - var backtrackCursor *fsmBacktrackStackCursor - resumeFromBacktrack := false - - // the return variable - var finalState *mappingState - - captures := make([]string, len(matchFields)) - finalCaptures := make([]string, len(matchFields)) - // keep track of captured group so we don't need to do append() on captures - captureIdx := 0 - filedsCount := len(matchFields) - i := 0 - var state *mappingState - for { // the loop for backtracking - for { // the loop for a single "depth only" search - var present bool - // if we resume from backtrack, we should skip this branch in this case - // since the state that were saved at the end of this branch - if !resumeFromBacktrack { - if len(currentState.transitions) > 0 { - field := matchFields[i] - state, present = currentState.transitions[field] - fieldsLeft := filedsCount - i - 1 - // also compare length upfront to avoid unnecessary loop or backtrack - if !present || fieldsLeft > state.maxRemainingLength || fieldsLeft < state.minRemainingLength { - state, present = currentState.transitions["*"] - if !present || fieldsLeft > state.maxRemainingLength || fieldsLeft < state.minRemainingLength { - break - } else { - captures[captureIdx] = field - captureIdx++ - } - } else if f.BacktrackingNeeded { - // if backtracking is needed, also check for alternative transition, i.e. * - altState, present := currentState.transitions["*"] - if !present || fieldsLeft > altState.maxRemainingLength || fieldsLeft < altState.minRemainingLength { - } else { - // push to backtracking stack - newCursor := fsmBacktrackStackCursor{prev: backtrackCursor, state: altState, - fieldIndex: i, - captureIndex: captureIdx, currentCapture: field, - } - // if this is not the first time, connect to the previous cursor - if backtrackCursor != nil { - backtrackCursor.next = &newCursor - } - backtrackCursor = &newCursor - } - } - } else { - // no more transitions for this state - break - } - } // backtrack will resume from here - - // do we reach a final state? - if state.Result != nil && i == filedsCount-1 { - if f.OrderingDisabled { - finalState = state - return finalState, captures - } else if finalState == nil || finalState.ResultPriority > state.ResultPriority { - // if we care about ordering, try to find a result with highest prioity - finalState = state - // do a deep copy to preserve current captures - copy(finalCaptures, captures) - } - break - } - - i++ - if i >= filedsCount { - break - } - - resumeFromBacktrack = false - currentState = state - } - if backtrackCursor == nil { - // if we are not doing backtracking or all path has been travesaled - break - } else { - // pop one from stack - state = backtrackCursor.state - currentState = state - i = backtrackCursor.fieldIndex - captureIdx = backtrackCursor.captureIndex + 1 - // put the * capture back - captures[captureIdx-1] = backtrackCursor.currentCapture - backtrackCursor = backtrackCursor.prev - if backtrackCursor != nil { - // deref for GC - backtrackCursor.next = nil - } - resumeFromBacktrack = true - } - } - return finalState, finalCaptures -} - -// TestIfNeedBacktracking tests if backtrack is needed for given list of mappings -// and whether ordering is disabled. -func TestIfNeedBacktracking(mappings []string, orderingDisabled bool) bool { - backtrackingNeeded := false - // A has * in rules, but there's other transisitions at the same state, - // this makes A the cause of backtracking - ruleByLength := make(map[int][]string) - ruleREByLength := make(map[int][]*regexp.Regexp) - - // first sort rules by length - for _, mapping := range mappings { - l := len(strings.Split(mapping, ".")) - ruleByLength[l] = append(ruleByLength[l], mapping) - - metricRe := strings.Replace(mapping, ".", "\\.", -1) - metricRe = strings.Replace(metricRe, "*", "([^.]*)", -1) - regex, err := regexp.Compile("^" + metricRe + "$") - if err != nil { - log.Warnf("invalid match %s. cannot compile regex in mapping: %v", mapping, err) - } - // put into array no matter there's error or not, we will skip later if regex is nil - ruleREByLength[l] = append(ruleREByLength[l], regex) - } - - for l, rules := range ruleByLength { - if len(rules) == 1 { - continue - } - rulesRE := ruleREByLength[l] - for i1, r1 := range rules { - currentRuleNeedBacktrack := false - re1 := rulesRE[i1] - if re1 == nil || !strings.Contains(r1, "*") { - continue - } - // if rule r1 is A.B.C.*.E.*, is there a rule r2 is A.B.C.D.x.x or A.B.C.*.E.F ? (x is any string or *) - // if such r2 exists, then to match r1 we will need backtracking - for index := 0; index < len(r1); index++ { - if r1[index] != '*' { - continue - } - // translate the substring of r1 from 0 to the index of current * into regex - // A.B.C.*.E.* will becomes ^A\.B\.C\. and ^A\.B\.C\.\*\.E\. - reStr := strings.Replace(r1[:index], ".", "\\.", -1) - reStr = strings.Replace(reStr, "*", "\\*", -1) - re := regexp.MustCompile("^" + reStr) - for i2, r2 := range rules { - if i2 == i1 { - continue - } - if len(re.FindStringSubmatchIndex(r2)) > 0 { - currentRuleNeedBacktrack = true - break - } - } - } - - for i2, r2 := range rules { - if i2 != i1 && len(re1.FindStringSubmatchIndex(r2)) > 0 { - // log if we care about ordering and the superset occurs before - if !orderingDisabled && i1 < i2 { - log.Warnf("match \"%s\" is a super set of match \"%s\" but in a lower order, "+ - "the first will never be matched", r1, r2) - } - currentRuleNeedBacktrack = false - } - } - for i2, re2 := range rulesRE { - if i2 == i1 || re2 == nil { - continue - } - // if r1 is a subset of other rule, we don't need backtrack - // because either we turned on ordering - // or we disabled ordering and can't match it even with backtrack - if len(re2.FindStringSubmatchIndex(r1)) > 0 { - currentRuleNeedBacktrack = false - } - } - - if currentRuleNeedBacktrack { - log.Warnf("backtracking required because of match \"%s\", "+ - "matching performance may be degraded", r1) - backtrackingNeeded = true - } - } - } - - // backtracking will always be needed if ordering of rules is not disabled - // since transistions are stored in (unordered) map - // note: don't move this branch to the beginning of this function - // since we need logs for superset rules - - return !orderingDisabled || backtrackingNeeded -} +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsm + +import ( + "regexp" + "strings" + + "github.com/prometheus/common/log" +) + +type mappingState struct { + transitions map[string]*mappingState + minRemainingLength int + maxRemainingLength int + // result* members are nil unless there's a metric ends with this state + Result interface{} + ResultPriority int +} + +type fsmBacktrackStackCursor struct { + fieldIndex int + captureIndex int + currentCapture string + state *mappingState + prev *fsmBacktrackStackCursor + next *fsmBacktrackStackCursor +} + +type FSM struct { + root *mappingState + metricTypes []string + statesCount int + BacktrackingNeeded bool + OrderingDisabled bool +} + +// NewFSM creates a new FSM instance +func NewFSM(metricTypes []string, maxPossibleTransitions int, orderingDisabled bool) *FSM { + fsm := FSM{} + root := &mappingState{} + root.transitions = make(map[string]*mappingState, len(metricTypes)) + + for _, field := range metricTypes { + state := &mappingState{} + (*state).transitions = make(map[string]*mappingState, maxPossibleTransitions) + root.transitions[string(field)] = state + } + fsm.OrderingDisabled = orderingDisabled + fsm.metricTypes = metricTypes + fsm.statesCount = 0 + fsm.root = root + return &fsm +} + +// AddState adds a mapping rule into the existing FSM. +// The maxPossibleTransitions parameter sets the expected count of transitions left. +// The result parameter sets the generic type to be returned when fsm found a match in GetMapping. +func (f *FSM) AddState(match string, matchMetricType string, maxPossibleTransitions int, result interface{}) int { + // first split by "." + matchFields := strings.Split(match, ".") + // fill into our FSM + roots := []*mappingState{} + // first state is the metric type + if matchMetricType == "" { + // if metricType not specified, connect the start state from all three types + for _, metricType := range f.metricTypes { + roots = append(roots, f.root.transitions[string(metricType)]) + } + } else { + roots = append(roots, f.root.transitions[matchMetricType]) + } + var captureCount int + var finalStates []*mappingState + // iterating over different start state (different metric types) + for _, root := range roots { + captureCount = 0 + // for each start state, connect from start state to end state + for i, field := range matchFields { + state, prs := root.transitions[field] + if !prs { + // create a state if it's not exist in the fsm + state = &mappingState{} + (*state).transitions = make(map[string]*mappingState, maxPossibleTransitions) + (*state).maxRemainingLength = len(matchFields) - i - 1 + (*state).minRemainingLength = len(matchFields) - i - 1 + root.transitions[field] = state + // if this is last field, set result to currentMapping instance + if i == len(matchFields)-1 { + root.transitions[field].Result = result + } + } else { + (*state).maxRemainingLength = max(len(matchFields)-i-1, (*state).maxRemainingLength) + (*state).minRemainingLength = min(len(matchFields)-i-1, (*state).minRemainingLength) + } + if field == "*" { + captureCount++ + } + + // goto next state + root = state + } + finalStates = append(finalStates, root) + } + + for _, state := range finalStates { + state.ResultPriority = f.statesCount + } + + f.statesCount++ + + return captureCount +} + +// GetMapping using the fsm to find matching rules according to given statsdMetric and statsdMetricType. +// If it finds a match, the final state and the captured strings are returned; +// if there's no match found, nil and a empty list will be returned. +func (f *FSM) GetMapping(statsdMetric string, statsdMetricType string) (*mappingState, []string) { + matchFields := strings.Split(statsdMetric, ".") + currentState := f.root.transitions[statsdMetricType] + + // the cursor/pointer in the backtrack stack implemented as a double-linked list + var backtrackCursor *fsmBacktrackStackCursor + resumeFromBacktrack := false + + // the return variable + var finalState *mappingState + + captures := make([]string, len(matchFields)) + finalCaptures := make([]string, len(matchFields)) + // keep track of captured group so we don't need to do append() on captures + captureIdx := 0 + filedsCount := len(matchFields) + i := 0 + var state *mappingState + for { // the loop for backtracking + for { // the loop for a single "depth only" search + var present bool + // if we resume from backtrack, we should skip this branch in this case + // since the state that were saved at the end of this branch + if !resumeFromBacktrack { + if len(currentState.transitions) > 0 { + field := matchFields[i] + state, present = currentState.transitions[field] + fieldsLeft := filedsCount - i - 1 + // also compare length upfront to avoid unnecessary loop or backtrack + if !present || fieldsLeft > state.maxRemainingLength || fieldsLeft < state.minRemainingLength { + state, present = currentState.transitions["*"] + if !present || fieldsLeft > state.maxRemainingLength || fieldsLeft < state.minRemainingLength { + break + } else { + captures[captureIdx] = field + captureIdx++ + } + } else if f.BacktrackingNeeded { + // if backtracking is needed, also check for alternative transition, i.e. * + altState, present := currentState.transitions["*"] + if !present || fieldsLeft > altState.maxRemainingLength || fieldsLeft < altState.minRemainingLength { + } else { + // push to backtracking stack + newCursor := fsmBacktrackStackCursor{prev: backtrackCursor, state: altState, + fieldIndex: i, + captureIndex: captureIdx, currentCapture: field, + } + // if this is not the first time, connect to the previous cursor + if backtrackCursor != nil { + backtrackCursor.next = &newCursor + } + backtrackCursor = &newCursor + } + } + } else { + // no more transitions for this state + break + } + } // backtrack will resume from here + + // do we reach a final state? + if state.Result != nil && i == filedsCount-1 { + if f.OrderingDisabled { + finalState = state + return finalState, captures + } else if finalState == nil || finalState.ResultPriority > state.ResultPriority { + // if we care about ordering, try to find a result with highest prioity + finalState = state + // do a deep copy to preserve current captures + copy(finalCaptures, captures) + } + break + } + + i++ + if i >= filedsCount { + break + } + + resumeFromBacktrack = false + currentState = state + } + if backtrackCursor == nil { + // if we are not doing backtracking or all path has been travesaled + break + } else { + // pop one from stack + state = backtrackCursor.state + currentState = state + i = backtrackCursor.fieldIndex + captureIdx = backtrackCursor.captureIndex + 1 + // put the * capture back + captures[captureIdx-1] = backtrackCursor.currentCapture + backtrackCursor = backtrackCursor.prev + if backtrackCursor != nil { + // deref for GC + backtrackCursor.next = nil + } + resumeFromBacktrack = true + } + } + return finalState, finalCaptures +} + +// TestIfNeedBacktracking tests if backtrack is needed for given list of mappings +// and whether ordering is disabled. +func TestIfNeedBacktracking(mappings []string, orderingDisabled bool) bool { + backtrackingNeeded := false + // A has * in rules, but there's other transisitions at the same state, + // this makes A the cause of backtracking + ruleByLength := make(map[int][]string) + ruleREByLength := make(map[int][]*regexp.Regexp) + + // first sort rules by length + for _, mapping := range mappings { + l := len(strings.Split(mapping, ".")) + ruleByLength[l] = append(ruleByLength[l], mapping) + + metricRe := strings.Replace(mapping, ".", "\\.", -1) + metricRe = strings.Replace(metricRe, "*", "([^.]*)", -1) + regex, err := regexp.Compile("^" + metricRe + "$") + if err != nil { + log.Warnf("invalid match %s. cannot compile regex in mapping: %v", mapping, err) + } + // put into array no matter there's error or not, we will skip later if regex is nil + ruleREByLength[l] = append(ruleREByLength[l], regex) + } + + for l, rules := range ruleByLength { + if len(rules) == 1 { + continue + } + rulesRE := ruleREByLength[l] + for i1, r1 := range rules { + currentRuleNeedBacktrack := false + re1 := rulesRE[i1] + if re1 == nil || !strings.Contains(r1, "*") { + continue + } + // if rule r1 is A.B.C.*.E.*, is there a rule r2 is A.B.C.D.x.x or A.B.C.*.E.F ? (x is any string or *) + // if such r2 exists, then to match r1 we will need backtracking + for index := 0; index < len(r1); index++ { + if r1[index] != '*' { + continue + } + // translate the substring of r1 from 0 to the index of current * into regex + // A.B.C.*.E.* will becomes ^A\.B\.C\. and ^A\.B\.C\.\*\.E\. + reStr := strings.Replace(r1[:index], ".", "\\.", -1) + reStr = strings.Replace(reStr, "*", "\\*", -1) + re := regexp.MustCompile("^" + reStr) + for i2, r2 := range rules { + if i2 == i1 { + continue + } + if len(re.FindStringSubmatchIndex(r2)) > 0 { + currentRuleNeedBacktrack = true + break + } + } + } + + for i2, r2 := range rules { + if i2 != i1 && len(re1.FindStringSubmatchIndex(r2)) > 0 { + // log if we care about ordering and the superset occurs before + if !orderingDisabled && i1 < i2 { + log.Warnf("match \"%s\" is a super set of match \"%s\" but in a lower order, "+ + "the first will never be matched", r1, r2) + } + currentRuleNeedBacktrack = false + } + } + for i2, re2 := range rulesRE { + if i2 == i1 || re2 == nil { + continue + } + // if r1 is a subset of other rule, we don't need backtrack + // because either we turned on ordering + // or we disabled ordering and can't match it even with backtrack + if len(re2.FindStringSubmatchIndex(r1)) > 0 { + currentRuleNeedBacktrack = false + } + } + + if currentRuleNeedBacktrack { + log.Warnf("backtracking required because of match \"%s\", "+ + "matching performance may be degraded", r1) + backtrackingNeeded = true + } + } + } + + // backtracking will always be needed if ordering of rules is not disabled + // since transistions are stored in (unordered) map + // note: don't move this branch to the beginning of this function + // since we need logs for superset rules + + return !orderingDisabled || backtrackingNeeded +} diff --git a/pkg/mapper/fsm/minmax.go b/pkg/mapper/fsm/minmax.go index 95bd9c5..c60695f 100644 --- a/pkg/mapper/fsm/minmax.go +++ b/pkg/mapper/fsm/minmax.go @@ -1,30 +1,30 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fsm - -// min and max implementation for integer - -func min(x, y int) int { - if x < y { - return x - } - return y -} - -func max(x, y int) int { - if x > y { - return x - } - return y -} +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsm + +// min and max implementation for integer + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func max(x, y int) int { + if x > y { + return x + } + return y +} diff --git a/pkg/mapper/mapper.go b/pkg/mapper/mapper.go index 8aac9d1..4f14573 100644 --- a/pkg/mapper/mapper.go +++ b/pkg/mapper/mapper.go @@ -1,386 +1,386 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapper - -import ( - "fmt" - "io/ioutil" - "regexp" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" - "github.com/prometheus/statsd_exporter/pkg/mapper/fsm" - yaml "gopkg.in/yaml.v2" -) - -var ( - statsdMetricRE = `[a-zA-Z_](-?[a-zA-Z0-9_])+` - templateReplaceRE = `(\$\{?\d+\}?)` - - metricLineRE = regexp.MustCompile(`^(\*\.|` + statsdMetricRE + `\.)+(\*|` + statsdMetricRE + `)$`) - metricNameRE = regexp.MustCompile(`^([a-zA-Z_]|` + templateReplaceRE + `)([a-zA-Z0-9_]|` + templateReplaceRE + `)*$`) - labelNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]+$`) -) - -type mapperConfigDefaults struct { - TimerType TimerType `yaml:"timer_type"` - Buckets []float64 `yaml:"buckets"` - Quantiles []metricObjective `yaml:"quantiles"` - MatchType MatchType `yaml:"match_type"` - GlobDisableOrdering bool `yaml:"glob_disable_ordering"` - Ttl time.Duration `yaml:"ttl"` -} - -type MetricMapper struct { - Defaults mapperConfigDefaults `yaml:"defaults"` - Mappings []MetricMapping `yaml:"mappings"` - FSM *fsm.FSM - doFSM bool - doRegex bool - cache MetricMapperCache - mutex sync.RWMutex - - MappingsCount prometheus.Gauge -} - -type MetricMapping struct { - Match string `yaml:"match"` - Name string `yaml:"name"` - nameFormatter *fsm.TemplateFormatter - regex *regexp.Regexp - Labels prometheus.Labels `yaml:"labels"` - labelKeys []string - labelFormatters []*fsm.TemplateFormatter - TimerType TimerType `yaml:"timer_type"` - LegacyBuckets []float64 `yaml:"buckets"` - LegacyQuantiles []metricObjective `yaml:"quantiles"` - MatchType MatchType `yaml:"match_type"` - HelpText string `yaml:"help"` - Action ActionType `yaml:"action"` - MatchMetricType MetricType `yaml:"match_metric_type"` - Ttl time.Duration `yaml:"ttl"` - SummaryOptions *SummaryOptions `yaml:"summary_options"` - HistogramOptions *HistogramOptions `yaml:"histogram_options"` -} - -type SummaryOptions struct { - Quantiles []metricObjective `yaml:"quantiles"` - MaxAge time.Duration `yaml:"max_age"` - AgeBuckets uint32 `yaml:"age_buckets"` - BufCap uint32 `yaml:"buf_cap"` -} - -type HistogramOptions struct { - Buckets []float64 `yaml:"buckets"` -} - -type metricObjective struct { - Quantile float64 `yaml:"quantile"` - Error float64 `yaml:"error"` -} - -var defaultQuantiles = []metricObjective{ - {Quantile: 0.5, Error: 0.05}, - {Quantile: 0.9, Error: 0.01}, - {Quantile: 0.99, Error: 0.001}, -} - -func (m *MetricMapper) InitFromYAMLString(fileContents string, cacheSize int, options ...CacheOption) error { - var n MetricMapper - - if err := yaml.Unmarshal([]byte(fileContents), &n); err != nil { - return err - } - - if n.Defaults.Buckets == nil || len(n.Defaults.Buckets) == 0 { - n.Defaults.Buckets = prometheus.DefBuckets - } - - if n.Defaults.Quantiles == nil || len(n.Defaults.Quantiles) == 0 { - n.Defaults.Quantiles = defaultQuantiles - } - - if n.Defaults.MatchType == MatchTypeDefault { - n.Defaults.MatchType = MatchTypeGlob - } - - remainingMappingsCount := len(n.Mappings) - - n.FSM = fsm.NewFSM([]string{string(MetricTypeCounter), string(MetricTypeGauge), string(MetricTypeTimer)}, - remainingMappingsCount, n.Defaults.GlobDisableOrdering) - - for i := range n.Mappings { - remainingMappingsCount-- - - currentMapping := &n.Mappings[i] - - // check that label is correct - for k := range currentMapping.Labels { - if !labelNameRE.MatchString(k) { - return fmt.Errorf("invalid label key: %s", k) - } - } - - if currentMapping.Name == "" { - return fmt.Errorf("line %d: metric mapping didn't set a metric name", i) - } - - if !metricNameRE.MatchString(currentMapping.Name) { - return fmt.Errorf("metric name '%s' doesn't match regex '%s'", currentMapping.Name, metricNameRE) - } - - if currentMapping.MatchType == "" { - currentMapping.MatchType = n.Defaults.MatchType - } - - if currentMapping.Action == "" { - currentMapping.Action = ActionTypeMap - } - - if currentMapping.MatchType == MatchTypeGlob { - n.doFSM = true - if !metricLineRE.MatchString(currentMapping.Match) { - return fmt.Errorf("invalid match: %s", currentMapping.Match) - } - - captureCount := n.FSM.AddState(currentMapping.Match, string(currentMapping.MatchMetricType), - remainingMappingsCount, currentMapping) - - currentMapping.nameFormatter = fsm.NewTemplateFormatter(currentMapping.Name, captureCount) - - labelKeys := make([]string, len(currentMapping.Labels)) - labelFormatters := make([]*fsm.TemplateFormatter, len(currentMapping.Labels)) - labelIndex := 0 - for label, valueExpr := range currentMapping.Labels { - labelKeys[labelIndex] = label - labelFormatters[labelIndex] = fsm.NewTemplateFormatter(valueExpr, captureCount) - labelIndex++ - } - currentMapping.labelFormatters = labelFormatters - currentMapping.labelKeys = labelKeys - - } else { - if regex, err := regexp.Compile(currentMapping.Match); err != nil { - return fmt.Errorf("invalid regex %s in mapping: %v", currentMapping.Match, err) - } else { - currentMapping.regex = regex - } - n.doRegex = true - } - - if currentMapping.TimerType == "" { - currentMapping.TimerType = n.Defaults.TimerType - } - - if currentMapping.LegacyQuantiles != nil && - (currentMapping.SummaryOptions == nil || currentMapping.SummaryOptions.Quantiles != nil) { - log.Warn("using the top level quantiles is deprecated. Please use quantiles in the summary_options hierarchy") - } - - if currentMapping.LegacyBuckets != nil && - (currentMapping.HistogramOptions == nil || currentMapping.HistogramOptions.Buckets != nil) { - log.Warn("using the top level buckets is deprecated. Please use buckets in the histogram_options hierarchy") - } - - if currentMapping.SummaryOptions != nil && - currentMapping.LegacyQuantiles != nil && - currentMapping.SummaryOptions.Quantiles != nil { - return fmt.Errorf("cannot use quantiles in both the top level and summary options at the same time in %s", currentMapping.Match) - } - - if currentMapping.HistogramOptions != nil && - currentMapping.LegacyBuckets != nil && - currentMapping.HistogramOptions.Buckets != nil { - return fmt.Errorf("cannot use buckets in both the top level and histogram options at the same time in %s", currentMapping.Match) - } - - if currentMapping.TimerType == TimerTypeHistogram { - if currentMapping.SummaryOptions != nil { - return fmt.Errorf("cannot use histogram timer and summary options at the same time") - } - if currentMapping.HistogramOptions == nil { - currentMapping.HistogramOptions = &HistogramOptions{} - } - if currentMapping.LegacyBuckets != nil && len(currentMapping.LegacyBuckets) != 0 { - currentMapping.HistogramOptions.Buckets = currentMapping.LegacyBuckets - } - if currentMapping.HistogramOptions.Buckets == nil || len(currentMapping.HistogramOptions.Buckets) == 0 { - currentMapping.HistogramOptions.Buckets = n.Defaults.Buckets - } - } - - if currentMapping.TimerType == TimerTypeSummary { - if currentMapping.HistogramOptions != nil { - return fmt.Errorf("cannot use summary timer and histogram options at the same time") - } - if currentMapping.SummaryOptions == nil { - currentMapping.SummaryOptions = &SummaryOptions{} - } - if currentMapping.LegacyQuantiles != nil && len(currentMapping.LegacyQuantiles) != 0 { - currentMapping.SummaryOptions.Quantiles = currentMapping.LegacyQuantiles - } - if currentMapping.SummaryOptions.Quantiles == nil || len(currentMapping.SummaryOptions.Quantiles) == 0 { - currentMapping.SummaryOptions.Quantiles = n.Defaults.Quantiles - } - } - - if currentMapping.Ttl == 0 && n.Defaults.Ttl > 0 { - currentMapping.Ttl = n.Defaults.Ttl - } - - } - - m.mutex.Lock() - defer m.mutex.Unlock() - - m.Defaults = n.Defaults - m.Mappings = n.Mappings - m.InitCache(cacheSize, options...) - - if n.doFSM { - var mappings []string - for _, mapping := range n.Mappings { - if mapping.MatchType == MatchTypeGlob { - mappings = append(mappings, mapping.Match) - } - } - n.FSM.BacktrackingNeeded = fsm.TestIfNeedBacktracking(mappings, n.FSM.OrderingDisabled) - - m.FSM = n.FSM - m.doRegex = n.doRegex - } - m.doFSM = n.doFSM - - if m.MappingsCount != nil { - m.MappingsCount.Set(float64(len(n.Mappings))) - } - return nil -} - -func (m *MetricMapper) InitFromFile(fileName string, cacheSize int, options ...CacheOption) error { - mappingStr, err := ioutil.ReadFile(fileName) - if err != nil { - return err - } - - return m.InitFromYAMLString(string(mappingStr), cacheSize, options...) -} - -func (m *MetricMapper) InitCache(cacheSize int, options ...CacheOption) { - if cacheSize == 0 { - m.cache = NewMetricMapperNoopCache() - } else { - o := cacheOptions{ - cacheType: "lru", - } - for _, f := range options { - f(&o) - } - - var ( - cache MetricMapperCache - err error - ) - switch o.cacheType { - case "lru": - cache, err = NewMetricMapperCache(cacheSize) - case "random": - cache, err = NewMetricMapperRRCache(cacheSize) - default: - err = fmt.Errorf("unsupported cache type %q", o.cacheType) - } - - if err != nil { - log.Fatalf("Unable to setup metric cache. Caused by: %s", err) - } - m.cache = cache - } -} - -func (m *MetricMapper) GetMapping(statsdMetric string, statsdMetricType MetricType) (*MetricMapping, prometheus.Labels, bool) { - m.mutex.RLock() - defer m.mutex.RUnlock() - result, cached := m.cache.Get(statsdMetric, statsdMetricType) - if cached { - return result.Mapping, result.Labels, result.Matched - } - // glob matching - if m.doFSM { - finalState, captures := m.FSM.GetMapping(statsdMetric, string(statsdMetricType)) - if finalState != nil && finalState.Result != nil { - v := finalState.Result.(*MetricMapping) - result := copyMetricMapping(v) - result.Name = result.nameFormatter.Format(captures) - - labels := prometheus.Labels{} - for index, formatter := range result.labelFormatters { - labels[result.labelKeys[index]] = formatter.Format(captures) - } - - m.cache.AddMatch(statsdMetric, statsdMetricType, result, labels) - - return result, labels, true - } else if !m.doRegex { - // if there's no regex match type, return immediately - m.cache.AddMiss(statsdMetric, statsdMetricType) - return nil, nil, false - } - } - - // regex matching - for _, mapping := range m.Mappings { - // if a rule don't have regex matching type, the regex field is unset - if mapping.regex == nil { - continue - } - matches := mapping.regex.FindStringSubmatchIndex(statsdMetric) - if len(matches) == 0 { - continue - } - - mapping.Name = string(mapping.regex.ExpandString( - []byte{}, - mapping.Name, - statsdMetric, - matches, - )) - - if mt := mapping.MatchMetricType; mt != "" && mt != statsdMetricType { - continue - } - - labels := prometheus.Labels{} - for label, valueExpr := range mapping.Labels { - value := mapping.regex.ExpandString([]byte{}, valueExpr, statsdMetric, matches) - labels[label] = string(value) - } - - m.cache.AddMatch(statsdMetric, statsdMetricType, &mapping, labels) - - return &mapping, labels, true - } - - m.cache.AddMiss(statsdMetric, statsdMetricType) - return nil, nil, false -} - -// make a shallow copy so that we do not overwrite name -// as multiple names can be matched by same mapping -func copyMetricMapping(in *MetricMapping) *MetricMapping { - var out MetricMapping - out = *in - return &out -} +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mapper + +import ( + "fmt" + "io/ioutil" + "regexp" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/log" + "github.com/prometheus/statsd_exporter/pkg/mapper/fsm" + yaml "gopkg.in/yaml.v2" +) + +var ( + statsdMetricRE = `[a-zA-Z_](-?[a-zA-Z0-9_])+` + templateReplaceRE = `(\$\{?\d+\}?)` + + metricLineRE = regexp.MustCompile(`^(\*\.|` + statsdMetricRE + `\.)+(\*|` + statsdMetricRE + `)$`) + metricNameRE = regexp.MustCompile(`^([a-zA-Z_]|` + templateReplaceRE + `)([a-zA-Z0-9_]|` + templateReplaceRE + `)*$`) + labelNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]+$`) +) + +type mapperConfigDefaults struct { + TimerType TimerType `yaml:"timer_type"` + Buckets []float64 `yaml:"buckets"` + Quantiles []metricObjective `yaml:"quantiles"` + MatchType MatchType `yaml:"match_type"` + GlobDisableOrdering bool `yaml:"glob_disable_ordering"` + Ttl time.Duration `yaml:"ttl"` +} + +type MetricMapper struct { + Defaults mapperConfigDefaults `yaml:"defaults"` + Mappings []MetricMapping `yaml:"mappings"` + FSM *fsm.FSM + doFSM bool + doRegex bool + cache MetricMapperCache + mutex sync.RWMutex + + MappingsCount prometheus.Gauge +} + +type MetricMapping struct { + Match string `yaml:"match"` + Name string `yaml:"name"` + nameFormatter *fsm.TemplateFormatter + regex *regexp.Regexp + Labels prometheus.Labels `yaml:"labels"` + labelKeys []string + labelFormatters []*fsm.TemplateFormatter + TimerType TimerType `yaml:"timer_type"` + LegacyBuckets []float64 `yaml:"buckets"` + LegacyQuantiles []metricObjective `yaml:"quantiles"` + MatchType MatchType `yaml:"match_type"` + HelpText string `yaml:"help"` + Action ActionType `yaml:"action"` + MatchMetricType MetricType `yaml:"match_metric_type"` + Ttl time.Duration `yaml:"ttl"` + SummaryOptions *SummaryOptions `yaml:"summary_options"` + HistogramOptions *HistogramOptions `yaml:"histogram_options"` +} + +type SummaryOptions struct { + Quantiles []metricObjective `yaml:"quantiles"` + MaxAge time.Duration `yaml:"max_age"` + AgeBuckets uint32 `yaml:"age_buckets"` + BufCap uint32 `yaml:"buf_cap"` +} + +type HistogramOptions struct { + Buckets []float64 `yaml:"buckets"` +} + +type metricObjective struct { + Quantile float64 `yaml:"quantile"` + Error float64 `yaml:"error"` +} + +var defaultQuantiles = []metricObjective{ + {Quantile: 0.5, Error: 0.05}, + {Quantile: 0.9, Error: 0.01}, + {Quantile: 0.99, Error: 0.001}, +} + +func (m *MetricMapper) InitFromYAMLString(fileContents string, cacheSize int, options ...CacheOption) error { + var n MetricMapper + + if err := yaml.Unmarshal([]byte(fileContents), &n); err != nil { + return err + } + + if n.Defaults.Buckets == nil || len(n.Defaults.Buckets) == 0 { + n.Defaults.Buckets = prometheus.DefBuckets + } + + if n.Defaults.Quantiles == nil || len(n.Defaults.Quantiles) == 0 { + n.Defaults.Quantiles = defaultQuantiles + } + + if n.Defaults.MatchType == MatchTypeDefault { + n.Defaults.MatchType = MatchTypeGlob + } + + remainingMappingsCount := len(n.Mappings) + + n.FSM = fsm.NewFSM([]string{string(MetricTypeCounter), string(MetricTypeGauge), string(MetricTypeTimer)}, + remainingMappingsCount, n.Defaults.GlobDisableOrdering) + + for i := range n.Mappings { + remainingMappingsCount-- + + currentMapping := &n.Mappings[i] + + // check that label is correct + for k := range currentMapping.Labels { + if !labelNameRE.MatchString(k) { + return fmt.Errorf("invalid label key: %s", k) + } + } + + if currentMapping.Name == "" { + return fmt.Errorf("line %d: metric mapping didn't set a metric name", i) + } + + if !metricNameRE.MatchString(currentMapping.Name) { + return fmt.Errorf("metric name '%s' doesn't match regex '%s'", currentMapping.Name, metricNameRE) + } + + if currentMapping.MatchType == "" { + currentMapping.MatchType = n.Defaults.MatchType + } + + if currentMapping.Action == "" { + currentMapping.Action = ActionTypeMap + } + + if currentMapping.MatchType == MatchTypeGlob { + n.doFSM = true + if !metricLineRE.MatchString(currentMapping.Match) { + return fmt.Errorf("invalid match: %s", currentMapping.Match) + } + + captureCount := n.FSM.AddState(currentMapping.Match, string(currentMapping.MatchMetricType), + remainingMappingsCount, currentMapping) + + currentMapping.nameFormatter = fsm.NewTemplateFormatter(currentMapping.Name, captureCount) + + labelKeys := make([]string, len(currentMapping.Labels)) + labelFormatters := make([]*fsm.TemplateFormatter, len(currentMapping.Labels)) + labelIndex := 0 + for label, valueExpr := range currentMapping.Labels { + labelKeys[labelIndex] = label + labelFormatters[labelIndex] = fsm.NewTemplateFormatter(valueExpr, captureCount) + labelIndex++ + } + currentMapping.labelFormatters = labelFormatters + currentMapping.labelKeys = labelKeys + + } else { + if regex, err := regexp.Compile(currentMapping.Match); err != nil { + return fmt.Errorf("invalid regex %s in mapping: %v", currentMapping.Match, err) + } else { + currentMapping.regex = regex + } + n.doRegex = true + } + + if currentMapping.TimerType == "" { + currentMapping.TimerType = n.Defaults.TimerType + } + + if currentMapping.LegacyQuantiles != nil && + (currentMapping.SummaryOptions == nil || currentMapping.SummaryOptions.Quantiles != nil) { + log.Warn("using the top level quantiles is deprecated. Please use quantiles in the summary_options hierarchy") + } + + if currentMapping.LegacyBuckets != nil && + (currentMapping.HistogramOptions == nil || currentMapping.HistogramOptions.Buckets != nil) { + log.Warn("using the top level buckets is deprecated. Please use buckets in the histogram_options hierarchy") + } + + if currentMapping.SummaryOptions != nil && + currentMapping.LegacyQuantiles != nil && + currentMapping.SummaryOptions.Quantiles != nil { + return fmt.Errorf("cannot use quantiles in both the top level and summary options at the same time in %s", currentMapping.Match) + } + + if currentMapping.HistogramOptions != nil && + currentMapping.LegacyBuckets != nil && + currentMapping.HistogramOptions.Buckets != nil { + return fmt.Errorf("cannot use buckets in both the top level and histogram options at the same time in %s", currentMapping.Match) + } + + if currentMapping.TimerType == TimerTypeHistogram { + if currentMapping.SummaryOptions != nil { + return fmt.Errorf("cannot use histogram timer and summary options at the same time") + } + if currentMapping.HistogramOptions == nil { + currentMapping.HistogramOptions = &HistogramOptions{} + } + if currentMapping.LegacyBuckets != nil && len(currentMapping.LegacyBuckets) != 0 { + currentMapping.HistogramOptions.Buckets = currentMapping.LegacyBuckets + } + if currentMapping.HistogramOptions.Buckets == nil || len(currentMapping.HistogramOptions.Buckets) == 0 { + currentMapping.HistogramOptions.Buckets = n.Defaults.Buckets + } + } + + if currentMapping.TimerType == TimerTypeSummary { + if currentMapping.HistogramOptions != nil { + return fmt.Errorf("cannot use summary timer and histogram options at the same time") + } + if currentMapping.SummaryOptions == nil { + currentMapping.SummaryOptions = &SummaryOptions{} + } + if currentMapping.LegacyQuantiles != nil && len(currentMapping.LegacyQuantiles) != 0 { + currentMapping.SummaryOptions.Quantiles = currentMapping.LegacyQuantiles + } + if currentMapping.SummaryOptions.Quantiles == nil || len(currentMapping.SummaryOptions.Quantiles) == 0 { + currentMapping.SummaryOptions.Quantiles = n.Defaults.Quantiles + } + } + + if currentMapping.Ttl == 0 && n.Defaults.Ttl > 0 { + currentMapping.Ttl = n.Defaults.Ttl + } + + } + + m.mutex.Lock() + defer m.mutex.Unlock() + + m.Defaults = n.Defaults + m.Mappings = n.Mappings + m.InitCache(cacheSize, options...) + + if n.doFSM { + var mappings []string + for _, mapping := range n.Mappings { + if mapping.MatchType == MatchTypeGlob { + mappings = append(mappings, mapping.Match) + } + } + n.FSM.BacktrackingNeeded = fsm.TestIfNeedBacktracking(mappings, n.FSM.OrderingDisabled) + + m.FSM = n.FSM + m.doRegex = n.doRegex + } + m.doFSM = n.doFSM + + if m.MappingsCount != nil { + m.MappingsCount.Set(float64(len(n.Mappings))) + } + return nil +} + +func (m *MetricMapper) InitFromFile(fileName string, cacheSize int, options ...CacheOption) error { + mappingStr, err := ioutil.ReadFile(fileName) + if err != nil { + return err + } + + return m.InitFromYAMLString(string(mappingStr), cacheSize, options...) +} + +func (m *MetricMapper) InitCache(cacheSize int, options ...CacheOption) { + if cacheSize == 0 { + m.cache = NewMetricMapperNoopCache() + } else { + o := cacheOptions{ + cacheType: "lru", + } + for _, f := range options { + f(&o) + } + + var ( + cache MetricMapperCache + err error + ) + switch o.cacheType { + case "lru": + cache, err = NewMetricMapperCache(cacheSize) + case "random": + cache, err = NewMetricMapperRRCache(cacheSize) + default: + err = fmt.Errorf("unsupported cache type %q", o.cacheType) + } + + if err != nil { + log.Fatalf("Unable to setup metric cache. Caused by: %s", err) + } + m.cache = cache + } +} + +func (m *MetricMapper) GetMapping(statsdMetric string, statsdMetricType MetricType) (*MetricMapping, prometheus.Labels, bool) { + m.mutex.RLock() + defer m.mutex.RUnlock() + result, cached := m.cache.Get(statsdMetric, statsdMetricType) + if cached { + return result.Mapping, result.Labels, result.Matched + } + // glob matching + if m.doFSM { + finalState, captures := m.FSM.GetMapping(statsdMetric, string(statsdMetricType)) + if finalState != nil && finalState.Result != nil { + v := finalState.Result.(*MetricMapping) + result := copyMetricMapping(v) + result.Name = result.nameFormatter.Format(captures) + + labels := prometheus.Labels{} + for index, formatter := range result.labelFormatters { + labels[result.labelKeys[index]] = formatter.Format(captures) + } + + m.cache.AddMatch(statsdMetric, statsdMetricType, result, labels) + + return result, labels, true + } else if !m.doRegex { + // if there's no regex match type, return immediately + m.cache.AddMiss(statsdMetric, statsdMetricType) + return nil, nil, false + } + } + + // regex matching + for _, mapping := range m.Mappings { + // if a rule don't have regex matching type, the regex field is unset + if mapping.regex == nil { + continue + } + matches := mapping.regex.FindStringSubmatchIndex(statsdMetric) + if len(matches) == 0 { + continue + } + + mapping.Name = string(mapping.regex.ExpandString( + []byte{}, + mapping.Name, + statsdMetric, + matches, + )) + + if mt := mapping.MatchMetricType; mt != "" && mt != statsdMetricType { + continue + } + + labels := prometheus.Labels{} + for label, valueExpr := range mapping.Labels { + value := mapping.regex.ExpandString([]byte{}, valueExpr, statsdMetric, matches) + labels[label] = string(value) + } + + m.cache.AddMatch(statsdMetric, statsdMetricType, &mapping, labels) + + return &mapping, labels, true + } + + m.cache.AddMiss(statsdMetric, statsdMetricType) + return nil, nil, false +} + +// make a shallow copy so that we do not overwrite name +// as multiple names can be matched by same mapping +func copyMetricMapping(in *MetricMapping) *MetricMapping { + var out MetricMapping + out = *in + return &out +} diff --git a/pkg/mapper/mapper_benchmark_test.go b/pkg/mapper/mapper_benchmark_test.go index 9210571..4c5c6ae 100644 --- a/pkg/mapper/mapper_benchmark_test.go +++ b/pkg/mapper/mapper_benchmark_test.go @@ -1,1000 +1,1000 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapper - -import ( - "fmt" - "math/rand" - "testing" -) - -var ( - ruleTemplateSingleMatchGlob = ` -- match: metric%d.* - name: "metric_single" - labels: - name: "$1" -` - ruleTemplateSingleMatchRegex = ` -- match: metric%d\.([^.]*) - name: "metric_single" - labels: - name: "$1" -` - - ruleTemplateMultipleMatchGlob = ` -- match: metric%d.*.*.*.*.*.*.*.*.*.*.*.* - name: "metric_multi" - labels: - name: "$1-$2-$3.$4-$5-$6.$7-$8-$9.$10-$11-$12" -` - - ruleTemplateMultipleMatchRegex = ` -- match: metric%d\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*) - name: "metric_multi" - labels: - name: "$1-$2-$3.$4-$5-$6.$7-$8-$9.$10-$11-$12" -` -) - -func duplicateRules(count int, template string) string { - rules := "" - for i := 0; i < count; i++ { - rules += fmt.Sprintf(template, i) - } - return rules -} - -func BenchmarkGlob(b *testing.B) { - config := `--- -mappings: -- match: test.dispatcher.*.*.succeeded - name: "dispatch_events" - labels: - processor: "$1" - action: "$2" - result: "succeeded" - job: "test_dispatcher" -- match: test.my-dispatch-host01.name.dispatcher.*.*.* - name: "host_dispatch_events" - labels: - processor: "$1" - action: "$2" - result: "$3" - job: "test_dispatcher" -- match: request_time.*.*.*.*.*.*.*.*.*.*.*.* - name: "tyk_http_request" - labels: - method_and_path: "${1}" - response_code: "${2}" - apikey: "${3}" - apiversion: "${4}" - apiname: "${5}" - apiid: "${6}" - ipv4_t1: "${7}" - ipv4_t2: "${8}" - ipv4_t3: "${9}" - ipv4_t4: "${10}" - orgid: "${11}" - oauthid: "${12}" -- match: "*.*" - name: "catchall" - labels: - first: "$1" - second: "$2" - third: "$3" - job: "-" - ` - mappings := []string{ - "test.dispatcher.FooProcessor.send.succeeded", - "test.my-dispatch-host01.name.dispatcher.FooProcessor.send.succeeded", - "request_time.get/threads/1/posts.200.00000000.nonversioned.discussions.a11bbcdf0ac64ec243658dc64b7100fb.172.20.0.1.12ba97b7eaa1a50001000001.", - "foo.bar", - "foo.bar.baz", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkGlobNoOrdering(b *testing.B) { - config := `--- -defaults: - glob_disable_ordering: true -mappings: -- match: test.dispatcher.*.*.succeeded - name: "dispatch_events" - labels: - processor: "$1" - action: "$2" - result: "succeeded" - job: "test_dispatcher" -- match: test.my-dispatch-host01.name.dispatcher.*.*.* - name: "host_dispatch_events" - labels: - processor: "$1" - action: "$2" - result: "$3" - job: "test_dispatcher" -- match: request_time.*.*.*.*.*.*.*.*.*.*.*.* - name: "tyk_http_request" - labels: - method_and_path: "${1}" - response_code: "${2}" - apikey: "${3}" - apiversion: "${4}" - apiname: "${5}" - apiid: "${6}" - ipv4_t1: "${7}" - ipv4_t2: "${8}" - ipv4_t3: "${9}" - ipv4_t4: "${10}" - orgid: "${11}" - oauthid: "${12}" -- match: "*.*" - name: "catchall" - labels: - first: "$1" - second: "$2" - third: "$3" - job: "-" - ` - mappings := []string{ - "test.dispatcher.FooProcessor.send.succeeded", - "test.my-dispatch-host01.name.dispatcher.FooProcessor.send.succeeded", - "request_time.get/threads/1/posts.200.00000000.nonversioned.discussions.a11bbcdf0ac64ec243658dc64b7100fb.172.20.0.1.12ba97b7eaa1a50001000001.", - "foo.bar", - "foo.bar.baz", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkGlobNoOrderingWithBacktracking(b *testing.B) { - config := `--- -defaults: - glob_disable_ordering: true -mappings: -- match: test.dispatcher.*.*.succeeded - name: "dispatch_events" - labels: - processor: "$1" - action: "$2" - result: "succeeded" - job: "test_dispatcher" -- match: test.dispatcher.*.received.* - name: "dispatch_events_wont_match" - labels: - processor: "$1" - action: "received" - result: "$2" - job: "test_dispatcher" -- match: test.my-dispatch-host01.name.dispatcher.*.*.* - name: "host_dispatch_events" - labels: - processor: "$1" - action: "$2" - result: "$3" - job: "test_dispatcher" -- match: request_time.*.*.*.*.*.*.*.*.*.*.*.* - name: "tyk_http_request" - labels: - method_and_path: "${1}" - response_code: "${2}" - apikey: "${3}" - apiversion: "${4}" - apiname: "${5}" - apiid: "${6}" - ipv4_t1: "${7}" - ipv4_t2: "${8}" - ipv4_t3: "${9}" - ipv4_t4: "${10}" - orgid: "${11}" - oauthid: "${12}" -- match: "*.*" - name: "catchall" - labels: - first: "$1" - second: "$2" - third: "$3" - job: "-" - ` - mappings := []string{ - "test.dispatcher.FooProcessor.send.succeeded", - "test.my-dispatch-host01.name.dispatcher.FooProcessor.send.succeeded", - "request_time.get/threads/1/posts.200.00000000.nonversioned.discussions.a11bbcdf0ac64ec243658dc64b7100fb.172.20.0.1.12ba97b7eaa1a50001000001.", - "foo.bar", - "foo.bar.baz", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkRegex(b *testing.B) { - config := `--- -defaults: - match_type: regex -mappings: -- match: test\.dispatcher\.([^.]*)\.([^.]*)\.([^.]*) - name: "dispatch_events" - labels: - processor: "$1" - action: "$2" - result: "$3" - job: "test_dispatcher" -- match: test.my-dispatch-host01.name.dispatcher\.([^.]*)\.([^.]*)\.([^.]*) - name: "host_dispatch_events" - labels: - processor: "$1" - action: "$2" - result: "$3" - job: "test_dispatcher" -- match: request_time\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*) - name: "tyk_http_request" - labels: - method_and_path: "${1}" - response_code: "${2}" - apikey: "${3}" - apiversion: "${4}" - apiname: "${5}" - apiid: "${6}" - ipv4_t1: "${7}" - ipv4_t2: "${8}" - ipv4_t3: "${9}" - ipv4_t4: "${10}" - orgid: "${11}" - oauthid: "${12}" -- match: \.([^.]*)\.([^.]*) - name: "catchall" - labels: - first: "$1" - second: "$2" - third: "$3" - job: "-" - ` - mappings := []string{ - "test.dispatcher.FooProcessor.send.succeeded", - "test.my-dispatch-host01.name.dispatcher.FooProcessor.send.succeeded", - "request_time.get/threads/1/posts.200.00000000.nonversioned.discussions.a11bbcdf0ac64ec243658dc64b7100fb.172.20.0.1.12ba97b7eaa1a50001000001.", - "foo.bar", - "foo.bar.baz", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkGlobSingleMatch(b *testing.B) { - config := `--- -mappings: -- match: metric.* - name: "metric_one" - labels: - name: "$1" - ` - mappings := []string{ - "metric.aaa", - "metric.bbb", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkRegexSingleMatch(b *testing.B) { - config := `--- -mappings: -- match: metric\.([^.]*) - name: "metric_one" - match_type: regex - labels: - name: "$1" - ` - mappings := []string{ - "metric.aaa", - "metric.bbb", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkGlobMultipleCaptures(b *testing.B) { - config := `--- -mappings: -- match: metric.*.*.*.*.*.*.*.*.*.*.*.* - name: "metric_multi" - labels: - name: "$1-$2-$3.$4-$5-$6.$7-$8-$9.$10-$11-$12" - ` - mappings := []string{ - "metric.a.b.c.d.e.f.g.h.i.j.k.l", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkRegexMultipleCaptures(b *testing.B) { - config := `--- -mappings: -- match: metric\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*) - name: "metric_multi" - match_type: regex - labels: - name: "$1-$2-$3.$4-$5-$6.$7-$8-$9.$10-$11-$12" - ` - mappings := []string{ - "metric.a.b.c.d.e.f.g.h.i.j.k.l", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkGlobMultipleCapturesNoFormat(b *testing.B) { - config := `--- -mappings: -- match: metric.*.*.*.*.*.*.*.*.*.*.*.* - name: "metric_multi" - labels: - name: "not_relevant" - ` - mappings := []string{ - "metric.a.b.c.d.e.f.g.h.i.j.k.l", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkRegexMultipleCapturesNoFormat(b *testing.B) { - config := `--- -mappings: -- match: metric\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*) - name: "metric_multi" - match_type: regex - labels: - name: "not_relevant" - ` - mappings := []string{ - "metric.a.b.c.d.e.f.g.h.i.j.k.l", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkGlobMultipleCapturesDifferentLabels(b *testing.B) { - config := `--- -mappings: -- match: metric.*.*.*.*.*.*.*.*.*.*.*.* - name: "metric_multi" - labels: - label1: "$1" - label2: "$2" - label3: "$3" - label4: "$4" - label5: "$5" - label6: "$6" - label7: "$7" - label8: "$8" - label9: "$9" - label10: "$10" - label11: "$11" - label12: "$12" - ` - mappings := []string{ - "metric.a.b.c.d.e.f.g.h.i.j.k.l", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkRegexMultipleCapturesDifferentLabels(b *testing.B) { - config := `--- -mappings: -- match: metric\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*) - name: "metric_multi" - match_type: regex - labels: - label1: "$1" - label2: "$2" - label3: "$3" - label4: "$4" - label5: "$5" - label6: "$6" - label7: "$7" - label8: "$8" - label9: "$9" - label10: "$10" - label11: "$11" - label12: "$12" - ` - mappings := []string{ - "metric.a.b.c.d.e.f.g.h.i.j.k.l", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkGlob10Rules(b *testing.B) { - config := `--- -mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) - mappings := []string{ - "metric100.a", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkGlob10RulesCached(b *testing.B) { - config := `--- -mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) - mappings := []string{ - "metric100.a", - } - - for _, cacheType := range []string{"lru", "random"} { - b.Run(cacheType, func(b *testing.B) { - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 1000, WithCacheType(cacheType)) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } - }) - } -} - -func BenchmarkRegex10RulesAverage(b *testing.B) { - config := `--- -defaults: - match_type: regex -mappings:` + duplicateRules(10, ruleTemplateSingleMatchRegex) - mappings := []string{ - "metric5.a", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkRegex10RulesAverageCached(b *testing.B) { - config := `--- -defaults: - match_type: regex -mappings:` + duplicateRules(10, ruleTemplateSingleMatchRegex) - mappings := []string{ - "metric5.a", - } - - for _, cacheType := range []string{"lru", "random"} { - b.Run(cacheType, func(b *testing.B) { - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 1000, WithCacheType(cacheType)) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } - }) - } -} - -func BenchmarkGlob100Rules(b *testing.B) { - config := `--- -mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) - mappings := []string{ - "metric100.a", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkGlob100RulesCached(b *testing.B) { - config := `--- -mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) - mappings := []string{ - "metric100.a", - } - - for _, cacheType := range []string{"lru", "random"} { - b.Run(cacheType, func(b *testing.B) { - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 1000, WithCacheType(cacheType)) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } - }) - } -} - -func BenchmarkGlob100RulesNoMatch(b *testing.B) { - config := `--- -mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) - mappings := []string{ - "metricnomatchy.a", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkGlob100RulesNoOrderingNoMatch(b *testing.B) { - config := `--- -defaults: - glob_disable_ordering: true -mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) - mappings := []string{ - "metricnomatchy.a", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkRegex100RulesAverage(b *testing.B) { - config := `--- -defaults: - match_type: regex -mappings:` + duplicateRules(100, ruleTemplateSingleMatchRegex) - mappings := []string{ - "metric50.a", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkRegex100RulesWorst(b *testing.B) { - config := `--- -defaults: - match_type: regex -mappings:` + duplicateRules(100, ruleTemplateSingleMatchRegex) - mappings := []string{ - "metric100.a", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkGlob100RulesMultipleCaptures(b *testing.B) { - config := `--- -mappings:` + duplicateRules(100, ruleTemplateMultipleMatchGlob) - mappings := []string{ - "metric50.a.b.c.d.e.f.g.h.i.j.k.l", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkGlob100RulesMultipleCapturesCached(b *testing.B) { - config := `--- -mappings:` + duplicateRules(100, ruleTemplateMultipleMatchGlob) - mappings := []string{ - "metric50.a.b.c.d.e.f.g.h.i.j.k.l", - } - - for _, cacheType := range []string{"lru", "random"} { - b.Run(cacheType, func(b *testing.B) { - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 1000, WithCacheType(cacheType)) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } - }) - } -} - -func BenchmarkRegex100RulesMultipleCapturesAverage(b *testing.B) { - config := `--- -defaults: - match_type: regex -mappings:` + duplicateRules(100, ruleTemplateMultipleMatchRegex) - mappings := []string{ - "metric50.a.b.c.d.e.f.g.h.i.j.k.l", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkRegex100RulesMultipleCapturesWorst(b *testing.B) { - config := `--- -defaults: - match_type: regex -mappings:` + duplicateRules(100, ruleTemplateMultipleMatchRegex) - mappings := []string{ - "metric100.a.b.c.d.e.f.g.h.i.j.k.l", - } - - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } -} - -func BenchmarkRegex100RulesMultipleCapturesWorstCached(b *testing.B) { - config := `--- -defaults: - match_type: regex -mappings:` + duplicateRules(100, ruleTemplateMultipleMatchRegex) - mappings := []string{ - "metric100.a.b.c.d.e.f.g.h.i.j.k.l", - } - - for _, cacheType := range []string{"lru", "random"} { - b.Run(cacheType, func(b *testing.B) { - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 1000, WithCacheType(cacheType)) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } - }) - } -} - -func duplicateMetrics(count int, template string) []string { - var out []string - for i := 0; i < count; i++ { - out = append(out, fmt.Sprintf(template, i)) - } - return out -} - -func BenchmarkGlob100RulesCached100Metrics(b *testing.B) { - config := `--- -mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) - - mappings := duplicateMetrics(100, "metric100") - - for _, cacheType := range []string{"lru", "random"} { - b.Run(cacheType, func(b *testing.B) { - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 1000, WithCacheType(cacheType)) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } - }) - } -} - -func BenchmarkGlob100RulesCached100MetricsSmallCache(b *testing.B) { - // This benchmark is the worst case for the LRU cache. - // The cache is smaller than the total number of metrics and - // we iterate linearly through the metrics, so we will - // constantly evict cache entries. - config := `--- -mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) - - mappings := duplicateMetrics(100, "metric100") - - for _, cacheType := range []string{"lru", "random"} { - b.Run(cacheType, func(b *testing.B) { - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 50, WithCacheType(cacheType)) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } - }) - } -} - -func BenchmarkGlob100RulesCached100MetricsRandomSmallCache(b *testing.B) { - // Slighly more realistic benchmark with a smaller cache. - // Randomly match metrics so we should have some cache hits. - config := `--- -mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) - - base := duplicateMetrics(100, "metric100") - var mappings []string - for i := 0; i < 10; i++ { - mappings = append(mappings, base...) - } - - r := rand.New(rand.NewSource(42)) - r.Shuffle(len(mappings), func(i, j int) { - mappings[i], mappings[j] = mappings[j], mappings[i] - }) - - for _, cacheType := range []string{"lru", "random"} { - b.Run(cacheType, func(b *testing.B) { - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 50, WithCacheType(cacheType)) - if err != nil { - b.Fatalf("Config load error: %s %s", config, err) - } - - b.ResetTimer() - for j := 0; j < b.N; j++ { - for _, metric := range mappings { - mapper.GetMapping(metric, MetricTypeCounter) - } - } - }) - } -} +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mapper + +import ( + "fmt" + "math/rand" + "testing" +) + +var ( + ruleTemplateSingleMatchGlob = ` +- match: metric%d.* + name: "metric_single" + labels: + name: "$1" +` + ruleTemplateSingleMatchRegex = ` +- match: metric%d\.([^.]*) + name: "metric_single" + labels: + name: "$1" +` + + ruleTemplateMultipleMatchGlob = ` +- match: metric%d.*.*.*.*.*.*.*.*.*.*.*.* + name: "metric_multi" + labels: + name: "$1-$2-$3.$4-$5-$6.$7-$8-$9.$10-$11-$12" +` + + ruleTemplateMultipleMatchRegex = ` +- match: metric%d\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*) + name: "metric_multi" + labels: + name: "$1-$2-$3.$4-$5-$6.$7-$8-$9.$10-$11-$12" +` +) + +func duplicateRules(count int, template string) string { + rules := "" + for i := 0; i < count; i++ { + rules += fmt.Sprintf(template, i) + } + return rules +} + +func BenchmarkGlob(b *testing.B) { + config := `--- +mappings: +- match: test.dispatcher.*.*.succeeded + name: "dispatch_events" + labels: + processor: "$1" + action: "$2" + result: "succeeded" + job: "test_dispatcher" +- match: test.my-dispatch-host01.name.dispatcher.*.*.* + name: "host_dispatch_events" + labels: + processor: "$1" + action: "$2" + result: "$3" + job: "test_dispatcher" +- match: request_time.*.*.*.*.*.*.*.*.*.*.*.* + name: "tyk_http_request" + labels: + method_and_path: "${1}" + response_code: "${2}" + apikey: "${3}" + apiversion: "${4}" + apiname: "${5}" + apiid: "${6}" + ipv4_t1: "${7}" + ipv4_t2: "${8}" + ipv4_t3: "${9}" + ipv4_t4: "${10}" + orgid: "${11}" + oauthid: "${12}" +- match: "*.*" + name: "catchall" + labels: + first: "$1" + second: "$2" + third: "$3" + job: "-" + ` + mappings := []string{ + "test.dispatcher.FooProcessor.send.succeeded", + "test.my-dispatch-host01.name.dispatcher.FooProcessor.send.succeeded", + "request_time.get/threads/1/posts.200.00000000.nonversioned.discussions.a11bbcdf0ac64ec243658dc64b7100fb.172.20.0.1.12ba97b7eaa1a50001000001.", + "foo.bar", + "foo.bar.baz", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkGlobNoOrdering(b *testing.B) { + config := `--- +defaults: + glob_disable_ordering: true +mappings: +- match: test.dispatcher.*.*.succeeded + name: "dispatch_events" + labels: + processor: "$1" + action: "$2" + result: "succeeded" + job: "test_dispatcher" +- match: test.my-dispatch-host01.name.dispatcher.*.*.* + name: "host_dispatch_events" + labels: + processor: "$1" + action: "$2" + result: "$3" + job: "test_dispatcher" +- match: request_time.*.*.*.*.*.*.*.*.*.*.*.* + name: "tyk_http_request" + labels: + method_and_path: "${1}" + response_code: "${2}" + apikey: "${3}" + apiversion: "${4}" + apiname: "${5}" + apiid: "${6}" + ipv4_t1: "${7}" + ipv4_t2: "${8}" + ipv4_t3: "${9}" + ipv4_t4: "${10}" + orgid: "${11}" + oauthid: "${12}" +- match: "*.*" + name: "catchall" + labels: + first: "$1" + second: "$2" + third: "$3" + job: "-" + ` + mappings := []string{ + "test.dispatcher.FooProcessor.send.succeeded", + "test.my-dispatch-host01.name.dispatcher.FooProcessor.send.succeeded", + "request_time.get/threads/1/posts.200.00000000.nonversioned.discussions.a11bbcdf0ac64ec243658dc64b7100fb.172.20.0.1.12ba97b7eaa1a50001000001.", + "foo.bar", + "foo.bar.baz", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkGlobNoOrderingWithBacktracking(b *testing.B) { + config := `--- +defaults: + glob_disable_ordering: true +mappings: +- match: test.dispatcher.*.*.succeeded + name: "dispatch_events" + labels: + processor: "$1" + action: "$2" + result: "succeeded" + job: "test_dispatcher" +- match: test.dispatcher.*.received.* + name: "dispatch_events_wont_match" + labels: + processor: "$1" + action: "received" + result: "$2" + job: "test_dispatcher" +- match: test.my-dispatch-host01.name.dispatcher.*.*.* + name: "host_dispatch_events" + labels: + processor: "$1" + action: "$2" + result: "$3" + job: "test_dispatcher" +- match: request_time.*.*.*.*.*.*.*.*.*.*.*.* + name: "tyk_http_request" + labels: + method_and_path: "${1}" + response_code: "${2}" + apikey: "${3}" + apiversion: "${4}" + apiname: "${5}" + apiid: "${6}" + ipv4_t1: "${7}" + ipv4_t2: "${8}" + ipv4_t3: "${9}" + ipv4_t4: "${10}" + orgid: "${11}" + oauthid: "${12}" +- match: "*.*" + name: "catchall" + labels: + first: "$1" + second: "$2" + third: "$3" + job: "-" + ` + mappings := []string{ + "test.dispatcher.FooProcessor.send.succeeded", + "test.my-dispatch-host01.name.dispatcher.FooProcessor.send.succeeded", + "request_time.get/threads/1/posts.200.00000000.nonversioned.discussions.a11bbcdf0ac64ec243658dc64b7100fb.172.20.0.1.12ba97b7eaa1a50001000001.", + "foo.bar", + "foo.bar.baz", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkRegex(b *testing.B) { + config := `--- +defaults: + match_type: regex +mappings: +- match: test\.dispatcher\.([^.]*)\.([^.]*)\.([^.]*) + name: "dispatch_events" + labels: + processor: "$1" + action: "$2" + result: "$3" + job: "test_dispatcher" +- match: test.my-dispatch-host01.name.dispatcher\.([^.]*)\.([^.]*)\.([^.]*) + name: "host_dispatch_events" + labels: + processor: "$1" + action: "$2" + result: "$3" + job: "test_dispatcher" +- match: request_time\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*) + name: "tyk_http_request" + labels: + method_and_path: "${1}" + response_code: "${2}" + apikey: "${3}" + apiversion: "${4}" + apiname: "${5}" + apiid: "${6}" + ipv4_t1: "${7}" + ipv4_t2: "${8}" + ipv4_t3: "${9}" + ipv4_t4: "${10}" + orgid: "${11}" + oauthid: "${12}" +- match: \.([^.]*)\.([^.]*) + name: "catchall" + labels: + first: "$1" + second: "$2" + third: "$3" + job: "-" + ` + mappings := []string{ + "test.dispatcher.FooProcessor.send.succeeded", + "test.my-dispatch-host01.name.dispatcher.FooProcessor.send.succeeded", + "request_time.get/threads/1/posts.200.00000000.nonversioned.discussions.a11bbcdf0ac64ec243658dc64b7100fb.172.20.0.1.12ba97b7eaa1a50001000001.", + "foo.bar", + "foo.bar.baz", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkGlobSingleMatch(b *testing.B) { + config := `--- +mappings: +- match: metric.* + name: "metric_one" + labels: + name: "$1" + ` + mappings := []string{ + "metric.aaa", + "metric.bbb", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkRegexSingleMatch(b *testing.B) { + config := `--- +mappings: +- match: metric\.([^.]*) + name: "metric_one" + match_type: regex + labels: + name: "$1" + ` + mappings := []string{ + "metric.aaa", + "metric.bbb", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkGlobMultipleCaptures(b *testing.B) { + config := `--- +mappings: +- match: metric.*.*.*.*.*.*.*.*.*.*.*.* + name: "metric_multi" + labels: + name: "$1-$2-$3.$4-$5-$6.$7-$8-$9.$10-$11-$12" + ` + mappings := []string{ + "metric.a.b.c.d.e.f.g.h.i.j.k.l", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkRegexMultipleCaptures(b *testing.B) { + config := `--- +mappings: +- match: metric\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*) + name: "metric_multi" + match_type: regex + labels: + name: "$1-$2-$3.$4-$5-$6.$7-$8-$9.$10-$11-$12" + ` + mappings := []string{ + "metric.a.b.c.d.e.f.g.h.i.j.k.l", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkGlobMultipleCapturesNoFormat(b *testing.B) { + config := `--- +mappings: +- match: metric.*.*.*.*.*.*.*.*.*.*.*.* + name: "metric_multi" + labels: + name: "not_relevant" + ` + mappings := []string{ + "metric.a.b.c.d.e.f.g.h.i.j.k.l", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkRegexMultipleCapturesNoFormat(b *testing.B) { + config := `--- +mappings: +- match: metric\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*) + name: "metric_multi" + match_type: regex + labels: + name: "not_relevant" + ` + mappings := []string{ + "metric.a.b.c.d.e.f.g.h.i.j.k.l", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkGlobMultipleCapturesDifferentLabels(b *testing.B) { + config := `--- +mappings: +- match: metric.*.*.*.*.*.*.*.*.*.*.*.* + name: "metric_multi" + labels: + label1: "$1" + label2: "$2" + label3: "$3" + label4: "$4" + label5: "$5" + label6: "$6" + label7: "$7" + label8: "$8" + label9: "$9" + label10: "$10" + label11: "$11" + label12: "$12" + ` + mappings := []string{ + "metric.a.b.c.d.e.f.g.h.i.j.k.l", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkRegexMultipleCapturesDifferentLabels(b *testing.B) { + config := `--- +mappings: +- match: metric\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*)\.([^.]*) + name: "metric_multi" + match_type: regex + labels: + label1: "$1" + label2: "$2" + label3: "$3" + label4: "$4" + label5: "$5" + label6: "$6" + label7: "$7" + label8: "$8" + label9: "$9" + label10: "$10" + label11: "$11" + label12: "$12" + ` + mappings := []string{ + "metric.a.b.c.d.e.f.g.h.i.j.k.l", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkGlob10Rules(b *testing.B) { + config := `--- +mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) + mappings := []string{ + "metric100.a", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkGlob10RulesCached(b *testing.B) { + config := `--- +mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) + mappings := []string{ + "metric100.a", + } + + for _, cacheType := range []string{"lru", "random"} { + b.Run(cacheType, func(b *testing.B) { + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 1000, WithCacheType(cacheType)) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } + }) + } +} + +func BenchmarkRegex10RulesAverage(b *testing.B) { + config := `--- +defaults: + match_type: regex +mappings:` + duplicateRules(10, ruleTemplateSingleMatchRegex) + mappings := []string{ + "metric5.a", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkRegex10RulesAverageCached(b *testing.B) { + config := `--- +defaults: + match_type: regex +mappings:` + duplicateRules(10, ruleTemplateSingleMatchRegex) + mappings := []string{ + "metric5.a", + } + + for _, cacheType := range []string{"lru", "random"} { + b.Run(cacheType, func(b *testing.B) { + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 1000, WithCacheType(cacheType)) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } + }) + } +} + +func BenchmarkGlob100Rules(b *testing.B) { + config := `--- +mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) + mappings := []string{ + "metric100.a", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkGlob100RulesCached(b *testing.B) { + config := `--- +mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) + mappings := []string{ + "metric100.a", + } + + for _, cacheType := range []string{"lru", "random"} { + b.Run(cacheType, func(b *testing.B) { + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 1000, WithCacheType(cacheType)) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } + }) + } +} + +func BenchmarkGlob100RulesNoMatch(b *testing.B) { + config := `--- +mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) + mappings := []string{ + "metricnomatchy.a", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkGlob100RulesNoOrderingNoMatch(b *testing.B) { + config := `--- +defaults: + glob_disable_ordering: true +mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) + mappings := []string{ + "metricnomatchy.a", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkRegex100RulesAverage(b *testing.B) { + config := `--- +defaults: + match_type: regex +mappings:` + duplicateRules(100, ruleTemplateSingleMatchRegex) + mappings := []string{ + "metric50.a", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkRegex100RulesWorst(b *testing.B) { + config := `--- +defaults: + match_type: regex +mappings:` + duplicateRules(100, ruleTemplateSingleMatchRegex) + mappings := []string{ + "metric100.a", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkGlob100RulesMultipleCaptures(b *testing.B) { + config := `--- +mappings:` + duplicateRules(100, ruleTemplateMultipleMatchGlob) + mappings := []string{ + "metric50.a.b.c.d.e.f.g.h.i.j.k.l", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkGlob100RulesMultipleCapturesCached(b *testing.B) { + config := `--- +mappings:` + duplicateRules(100, ruleTemplateMultipleMatchGlob) + mappings := []string{ + "metric50.a.b.c.d.e.f.g.h.i.j.k.l", + } + + for _, cacheType := range []string{"lru", "random"} { + b.Run(cacheType, func(b *testing.B) { + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 1000, WithCacheType(cacheType)) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } + }) + } +} + +func BenchmarkRegex100RulesMultipleCapturesAverage(b *testing.B) { + config := `--- +defaults: + match_type: regex +mappings:` + duplicateRules(100, ruleTemplateMultipleMatchRegex) + mappings := []string{ + "metric50.a.b.c.d.e.f.g.h.i.j.k.l", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkRegex100RulesMultipleCapturesWorst(b *testing.B) { + config := `--- +defaults: + match_type: regex +mappings:` + duplicateRules(100, ruleTemplateMultipleMatchRegex) + mappings := []string{ + "metric100.a.b.c.d.e.f.g.h.i.j.k.l", + } + + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } +} + +func BenchmarkRegex100RulesMultipleCapturesWorstCached(b *testing.B) { + config := `--- +defaults: + match_type: regex +mappings:` + duplicateRules(100, ruleTemplateMultipleMatchRegex) + mappings := []string{ + "metric100.a.b.c.d.e.f.g.h.i.j.k.l", + } + + for _, cacheType := range []string{"lru", "random"} { + b.Run(cacheType, func(b *testing.B) { + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 1000, WithCacheType(cacheType)) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } + }) + } +} + +func duplicateMetrics(count int, template string) []string { + var out []string + for i := 0; i < count; i++ { + out = append(out, fmt.Sprintf(template, i)) + } + return out +} + +func BenchmarkGlob100RulesCached100Metrics(b *testing.B) { + config := `--- +mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) + + mappings := duplicateMetrics(100, "metric100") + + for _, cacheType := range []string{"lru", "random"} { + b.Run(cacheType, func(b *testing.B) { + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 1000, WithCacheType(cacheType)) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } + }) + } +} + +func BenchmarkGlob100RulesCached100MetricsSmallCache(b *testing.B) { + // This benchmark is the worst case for the LRU cache. + // The cache is smaller than the total number of metrics and + // we iterate linearly through the metrics, so we will + // constantly evict cache entries. + config := `--- +mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) + + mappings := duplicateMetrics(100, "metric100") + + for _, cacheType := range []string{"lru", "random"} { + b.Run(cacheType, func(b *testing.B) { + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 50, WithCacheType(cacheType)) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } + }) + } +} + +func BenchmarkGlob100RulesCached100MetricsRandomSmallCache(b *testing.B) { + // Slighly more realistic benchmark with a smaller cache. + // Randomly match metrics so we should have some cache hits. + config := `--- +mappings:` + duplicateRules(100, ruleTemplateSingleMatchGlob) + + base := duplicateMetrics(100, "metric100") + var mappings []string + for i := 0; i < 10; i++ { + mappings = append(mappings, base...) + } + + r := rand.New(rand.NewSource(42)) + r.Shuffle(len(mappings), func(i, j int) { + mappings[i], mappings[j] = mappings[j], mappings[i] + }) + + for _, cacheType := range []string{"lru", "random"} { + b.Run(cacheType, func(b *testing.B) { + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 50, WithCacheType(cacheType)) + if err != nil { + b.Fatalf("Config load error: %s %s", config, err) + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + for _, metric := range mappings { + mapper.GetMapping(metric, MetricTypeCounter) + } + } + }) + } +} diff --git a/pkg/mapper/mapper_cache.go b/pkg/mapper/mapper_cache.go index 5b252a7..db127a7 100644 --- a/pkg/mapper/mapper_cache.go +++ b/pkg/mapper/mapper_cache.go @@ -1,198 +1,198 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapper - -import ( - "sync" - - lru "github.com/hashicorp/golang-lru" - "github.com/prometheus/client_golang/prometheus" -) - -var ( - cacheLength = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "statsd_metric_mapper_cache_length", - Help: "The count of unique metrics currently cached.", - }, - ) - cacheGetsTotal = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "statsd_metric_mapper_cache_gets_total", - Help: "The count of total metric cache gets.", - }, - ) - cacheHitsTotal = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "statsd_metric_mapper_cache_hits_total", - Help: "The count of total metric cache hits.", - }, - ) -) - -type cacheOptions struct { - cacheType string -} - -type CacheOption func(*cacheOptions) - -func WithCacheType(cacheType string) CacheOption { - return func(o *cacheOptions) { - o.cacheType = cacheType - } -} - -type MetricMapperCacheResult struct { - Mapping *MetricMapping - Matched bool - Labels prometheus.Labels -} - -type MetricMapperCache interface { - Get(metricString string, metricType MetricType) (*MetricMapperCacheResult, bool) - AddMatch(metricString string, metricType MetricType, mapping *MetricMapping, labels prometheus.Labels) - AddMiss(metricString string, metricType MetricType) -} - -type MetricMapperLRUCache struct { - MetricMapperCache - cache *lru.Cache -} - -type MetricMapperNoopCache struct { - MetricMapperCache -} - -func NewMetricMapperCache(size int) (*MetricMapperLRUCache, error) { - cacheLength.Set(0) - cache, err := lru.New(size) - if err != nil { - return &MetricMapperLRUCache{}, err - } - return &MetricMapperLRUCache{cache: cache}, nil -} - -func (m *MetricMapperLRUCache) Get(metricString string, metricType MetricType) (*MetricMapperCacheResult, bool) { - cacheGetsTotal.Inc() - if result, ok := m.cache.Get(formatKey(metricString, metricType)); ok { - cacheHitsTotal.Inc() - return result.(*MetricMapperCacheResult), true - } else { - return nil, false - } -} - -func (m *MetricMapperLRUCache) AddMatch(metricString string, metricType MetricType, mapping *MetricMapping, labels prometheus.Labels) { - go m.trackCacheLength() - m.cache.Add(formatKey(metricString, metricType), &MetricMapperCacheResult{Mapping: mapping, Matched: true, Labels: labels}) -} - -func (m *MetricMapperLRUCache) AddMiss(metricString string, metricType MetricType) { - go m.trackCacheLength() - m.cache.Add(formatKey(metricString, metricType), &MetricMapperCacheResult{Matched: false}) -} - -func (m *MetricMapperLRUCache) trackCacheLength() { - cacheLength.Set(float64(m.cache.Len())) -} - -func formatKey(metricString string, metricType MetricType) string { - return string(metricType) + "." + metricString -} - -func NewMetricMapperNoopCache() *MetricMapperNoopCache { - cacheLength.Set(0) - return &MetricMapperNoopCache{} -} - -func (m *MetricMapperNoopCache) Get(metricString string, metricType MetricType) (*MetricMapperCacheResult, bool) { - return nil, false -} - -func (m *MetricMapperNoopCache) AddMatch(metricString string, metricType MetricType, mapping *MetricMapping, labels prometheus.Labels) { - return -} - -func (m *MetricMapperNoopCache) AddMiss(metricString string, metricType MetricType) { - return -} - -type MetricMapperRRCache struct { - MetricMapperCache - lock sync.RWMutex - size int - items map[string]*MetricMapperCacheResult -} - -func NewMetricMapperRRCache(size int) (*MetricMapperRRCache, error) { - cacheLength.Set(0) - c := &MetricMapperRRCache{ - items: make(map[string]*MetricMapperCacheResult, size+1), - size: size, - } - return c, nil -} - -func (m *MetricMapperRRCache) Get(metricString string, metricType MetricType) (*MetricMapperCacheResult, bool) { - key := formatKey(metricString, metricType) - - m.lock.RLock() - result, ok := m.items[key] - m.lock.RUnlock() - - return result, ok -} - -func (m *MetricMapperRRCache) addItem(metricString string, metricType MetricType, result *MetricMapperCacheResult) { - go m.trackCacheLength() - - key := formatKey(metricString, metricType) - - m.lock.Lock() - - m.items[key] = result - - // evict an item if needed - if len(m.items) > m.size { - for k := range m.items { - delete(m.items, k) - break - } - } - - m.lock.Unlock() -} - -func (m *MetricMapperRRCache) AddMatch(metricString string, metricType MetricType, mapping *MetricMapping, labels prometheus.Labels) { - e := &MetricMapperCacheResult{Mapping: mapping, Matched: true, Labels: labels} - m.addItem(metricString, metricType, e) -} - -func (m *MetricMapperRRCache) AddMiss(metricString string, metricType MetricType) { - e := &MetricMapperCacheResult{Matched: false} - m.addItem(metricString, metricType, e) -} - -func (m *MetricMapperRRCache) trackCacheLength() { - m.lock.RLock() - length := len(m.items) - m.lock.RUnlock() - cacheLength.Set(float64(length)) -} - -func init() { - prometheus.MustRegister(cacheLength) - prometheus.MustRegister(cacheGetsTotal) - prometheus.MustRegister(cacheHitsTotal) -} +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mapper + +import ( + "sync" + + lru "github.com/hashicorp/golang-lru" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + cacheLength = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "statsd_metric_mapper_cache_length", + Help: "The count of unique metrics currently cached.", + }, + ) + cacheGetsTotal = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_metric_mapper_cache_gets_total", + Help: "The count of total metric cache gets.", + }, + ) + cacheHitsTotal = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_metric_mapper_cache_hits_total", + Help: "The count of total metric cache hits.", + }, + ) +) + +type cacheOptions struct { + cacheType string +} + +type CacheOption func(*cacheOptions) + +func WithCacheType(cacheType string) CacheOption { + return func(o *cacheOptions) { + o.cacheType = cacheType + } +} + +type MetricMapperCacheResult struct { + Mapping *MetricMapping + Matched bool + Labels prometheus.Labels +} + +type MetricMapperCache interface { + Get(metricString string, metricType MetricType) (*MetricMapperCacheResult, bool) + AddMatch(metricString string, metricType MetricType, mapping *MetricMapping, labels prometheus.Labels) + AddMiss(metricString string, metricType MetricType) +} + +type MetricMapperLRUCache struct { + MetricMapperCache + cache *lru.Cache +} + +type MetricMapperNoopCache struct { + MetricMapperCache +} + +func NewMetricMapperCache(size int) (*MetricMapperLRUCache, error) { + cacheLength.Set(0) + cache, err := lru.New(size) + if err != nil { + return &MetricMapperLRUCache{}, err + } + return &MetricMapperLRUCache{cache: cache}, nil +} + +func (m *MetricMapperLRUCache) Get(metricString string, metricType MetricType) (*MetricMapperCacheResult, bool) { + cacheGetsTotal.Inc() + if result, ok := m.cache.Get(formatKey(metricString, metricType)); ok { + cacheHitsTotal.Inc() + return result.(*MetricMapperCacheResult), true + } else { + return nil, false + } +} + +func (m *MetricMapperLRUCache) AddMatch(metricString string, metricType MetricType, mapping *MetricMapping, labels prometheus.Labels) { + go m.trackCacheLength() + m.cache.Add(formatKey(metricString, metricType), &MetricMapperCacheResult{Mapping: mapping, Matched: true, Labels: labels}) +} + +func (m *MetricMapperLRUCache) AddMiss(metricString string, metricType MetricType) { + go m.trackCacheLength() + m.cache.Add(formatKey(metricString, metricType), &MetricMapperCacheResult{Matched: false}) +} + +func (m *MetricMapperLRUCache) trackCacheLength() { + cacheLength.Set(float64(m.cache.Len())) +} + +func formatKey(metricString string, metricType MetricType) string { + return string(metricType) + "." + metricString +} + +func NewMetricMapperNoopCache() *MetricMapperNoopCache { + cacheLength.Set(0) + return &MetricMapperNoopCache{} +} + +func (m *MetricMapperNoopCache) Get(metricString string, metricType MetricType) (*MetricMapperCacheResult, bool) { + return nil, false +} + +func (m *MetricMapperNoopCache) AddMatch(metricString string, metricType MetricType, mapping *MetricMapping, labels prometheus.Labels) { + return +} + +func (m *MetricMapperNoopCache) AddMiss(metricString string, metricType MetricType) { + return +} + +type MetricMapperRRCache struct { + MetricMapperCache + lock sync.RWMutex + size int + items map[string]*MetricMapperCacheResult +} + +func NewMetricMapperRRCache(size int) (*MetricMapperRRCache, error) { + cacheLength.Set(0) + c := &MetricMapperRRCache{ + items: make(map[string]*MetricMapperCacheResult, size+1), + size: size, + } + return c, nil +} + +func (m *MetricMapperRRCache) Get(metricString string, metricType MetricType) (*MetricMapperCacheResult, bool) { + key := formatKey(metricString, metricType) + + m.lock.RLock() + result, ok := m.items[key] + m.lock.RUnlock() + + return result, ok +} + +func (m *MetricMapperRRCache) addItem(metricString string, metricType MetricType, result *MetricMapperCacheResult) { + go m.trackCacheLength() + + key := formatKey(metricString, metricType) + + m.lock.Lock() + + m.items[key] = result + + // evict an item if needed + if len(m.items) > m.size { + for k := range m.items { + delete(m.items, k) + break + } + } + + m.lock.Unlock() +} + +func (m *MetricMapperRRCache) AddMatch(metricString string, metricType MetricType, mapping *MetricMapping, labels prometheus.Labels) { + e := &MetricMapperCacheResult{Mapping: mapping, Matched: true, Labels: labels} + m.addItem(metricString, metricType, e) +} + +func (m *MetricMapperRRCache) AddMiss(metricString string, metricType MetricType) { + e := &MetricMapperCacheResult{Matched: false} + m.addItem(metricString, metricType, e) +} + +func (m *MetricMapperRRCache) trackCacheLength() { + m.lock.RLock() + length := len(m.items) + m.lock.RUnlock() + cacheLength.Set(float64(length)) +} + +func init() { + prometheus.MustRegister(cacheLength) + prometheus.MustRegister(cacheGetsTotal) + prometheus.MustRegister(cacheHitsTotal) +} diff --git a/pkg/mapper/mapper_test.go b/pkg/mapper/mapper_test.go index dc8a45d..c3748bf 100644 --- a/pkg/mapper/mapper_test.go +++ b/pkg/mapper/mapper_test.go @@ -1,1032 +1,1032 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapper - -import ( - "testing" - "time" -) - -type mappings []struct { - statsdMetric string - name string - labels map[string]string - quantiles []metricObjective - notPresent bool - ttl time.Duration - metricType MetricType - maxAge time.Duration - ageBuckets uint32 - bufCap uint32 -} - -func TestMetricMapperYAML(t *testing.T) { - scenarios := []struct { - config string - configBad bool - mappings mappings - }{ - // Empty config. - {}, - // Config with several mapping definitions. - { - config: `--- -mappings: -- match: test.dispatcher.*.*.* - name: "dispatch_events" - labels: - processor: "$1" - action: "$2" - result: "$3" - job: "test_dispatcher" -- match: test.my-dispatch-host01.name.dispatcher.*.*.* - name: "host_dispatch_events" - labels: - processor: "$1" - action: "$2" - result: "$3" - job: "test_dispatcher" -- match: request_time.*.*.*.*.*.*.*.*.*.*.*.* - name: "tyk_http_request" - labels: - method_and_path: "${1}" - response_code: "${2}" - apikey: "${3}" - apiversion: "${4}" - apiname: "${5}" - apiid: "${6}" - ipv4_t1: "${7}" - ipv4_t2: "${8}" - ipv4_t3: "${9}" - ipv4_t4: "${10}" - orgid: "${11}" - oauthid: "${12}" -- match: "*.*" - name: "catchall" - labels: - first: "$1" - second: "$2" - third: "$3" - job: "$1-$2-$3" -- match: (.*)\.(.*)-(.*)\.(.*) - match_type: regex - name: "proxy_requests_total" - labels: - job: "$1" - protocol: "$2" - endpoint: "$3" - result: "$4" - - `, - mappings: mappings{ - { - statsdMetric: "test.dispatcher.FooProcessor.send.succeeded", - name: "dispatch_events", - labels: map[string]string{ - "processor": "FooProcessor", - "action": "send", - "result": "succeeded", - "job": "test_dispatcher", - }, - }, - { - statsdMetric: "test.my-dispatch-host01.name.dispatcher.FooProcessor.send.succeeded", - name: "host_dispatch_events", - labels: map[string]string{ - "processor": "FooProcessor", - "action": "send", - "result": "succeeded", - "job": "test_dispatcher", - }, - }, - { - statsdMetric: "request_time.get/threads/1/posts.200.00000000.nonversioned.discussions.a11bbcdf0ac64ec243658dc64b7100fb.172.20.0.1.12ba97b7eaa1a50001000001.", - name: "tyk_http_request", - labels: map[string]string{ - "method_and_path": "get/threads/1/posts", - "response_code": "200", - "apikey": "00000000", - "apiversion": "nonversioned", - "apiname": "discussions", - "apiid": "a11bbcdf0ac64ec243658dc64b7100fb", - "ipv4_t1": "172", - "ipv4_t2": "20", - "ipv4_t3": "0", - "ipv4_t4": "1", - "orgid": "12ba97b7eaa1a50001000001", - "oauthid": "", - }, - }, - { - statsdMetric: "foo.bar", - name: "catchall", - labels: map[string]string{ - "first": "foo", - "second": "bar", - "third": "", - "job": "foo-bar-", - }, - }, - { - statsdMetric: "foo.bar.baz", - }, - { - statsdMetric: "proxy-1.http-goober.success", - name: "proxy_requests_total", - labels: map[string]string{ - "job": "proxy-1", - "protocol": "http", - "endpoint": "goober", - "result": "success", - }, - }, - }, - }, - //Config with backtracking - { - config: ` -defaults: - glob_disable_ordering: true -mappings: -- match: backtrack.*.bbb - name: "testb" - labels: - label: "${1}_foo" -- match: backtrack.justatest.aaa - name: "testa" - labels: - label: "${1}_foo" - `, - mappings: mappings{ - { - statsdMetric: "backtrack.good.bbb", - name: "testb", - labels: map[string]string{ - "label": "good_foo", - }, - }, - { - statsdMetric: "backtrack.justatest.bbb", - name: "testb", - labels: map[string]string{ - "label": "justatest_foo", - }, - }, - { - statsdMetric: "backtrack.justatest.aaa", - name: "testa", - labels: map[string]string{ - "label": "_foo", - }, - }, - }, - }, - //Config with backtracking, the non-matched rule has star(s) - // A metric like full.name.anothertest will first match full.name.* and then tries - // to match *.dummy.* and then failed. - // This test case makes sure the captures in the non-matched later rule - // doesn't affect the captures in the first matched rule. - { - config: ` -defaults: - glob_disable_ordering: false -mappings: -- match: '*.dummy.*' - name: metric_one - labels: - system: $1 - attribute: $2 -- match: 'full.name.*' - name: metric_two - labels: - system: static - attribute: $1 -`, - mappings: mappings{ - { - statsdMetric: "whatever.dummy.test", - name: "metric_one", - labels: map[string]string{ - "system": "whatever", - "attribute": "test", - }, - }, - { - statsdMetric: "full.name.anothertest", - name: "metric_two", - labels: map[string]string{ - "system": "static", - "attribute": "anothertest", - }, - }, - }, - }, - //Config with super sets, disables ordering - { - config: ` -defaults: - glob_disable_ordering: true -mappings: -- match: noorder.*.* - name: "testa" - labels: - label: "${1}_foo" -- match: noorder.*.bbb - name: "testb" - labels: - label: "${1}_foo" -- match: noorder.ccc.bbb - name: "testc" - labels: - label: "ccc_foo" - `, - mappings: mappings{ - { - statsdMetric: "noorder.good.bbb", - name: "testb", - labels: map[string]string{ - "label": "good_foo", - }, - }, - { - statsdMetric: "noorder.ccc.bbb", - name: "testc", - labels: map[string]string{ - "label": "ccc_foo", - }, - }, - }, - }, - //Config with super sets, keeps ordering - { - config: ` -defaults: - glob_disable_ordering: false -mappings: -- match: order.*.* - name: "testa" - labels: - label: "${1}_foo" -- match: order.*.bbb - name: "testb" - labels: - label: "${1}_foo" - `, - mappings: mappings{ - { - statsdMetric: "order.good.bbb", - name: "testa", - labels: map[string]string{ - "label": "good_foo", - }, - }, - }, - }, - // Config with bad regex reference. - { - config: `--- -mappings: -- match: test.* - name: "name" - labels: - label: "$1_foo" - `, - mappings: mappings{ - { - statsdMetric: "test.a", - name: "name", - labels: map[string]string{ - "label": "", - }, - }, - }, - }, - // Config with good regex reference. - { - config: ` -mappings: -- match: test.* - name: "name" - labels: - label: "${1}_foo" - `, - mappings: mappings{ - { - statsdMetric: "test.a", - name: "name", - labels: map[string]string{ - "label": "a_foo", - }, - }, - }, - }, - // Config with bad metric line. - { - config: `--- -mappings: -- match: bad--metric-line.*.* - name: "foo" - labels: {} - `, - configBad: true, - }, - // Config with dynamic metric name. - { - config: `--- -mappings: -- match: test1.*.* - name: "$1" - labels: {} -- match: test2.*.* - name: "${1}_$2" - labels: {} -- match: test3\.(\w+)\.(\w+) - match_type: regex - name: "${2}_$1" - labels: {} - `, - mappings: mappings{ - { - statsdMetric: "test1.total_requests.count", - name: "total_requests", - }, - { - statsdMetric: "test2.total_requests.count", - name: "total_requests_count", - }, - { - statsdMetric: "test3.total_requests.count", - name: "count_total_requests", - }, - }, - }, - // Config with bad metric name. - { - config: `--- -mappings: -- match: test.*.* - name: "0foo" - labels: {} - `, - configBad: true, - }, - // Config with no metric name. - { - config: `--- -mappings: -- match: test.*.* - labels: - this: "$1" - `, - configBad: true, - }, - // Config with no mappings. - { - config: ``, - mappings: mappings{}, - }, - // Config without a trailing newline. - { - config: `mappings: -- match: test.* - name: "name" - labels: - label: "${1}_foo"`, - mappings: mappings{ - { - statsdMetric: "test.a", - name: "name", - labels: map[string]string{ - "label": "a_foo", - }, - }, - }, - }, - // Config with an improperly escaped *. - { - config: ` -mappings: -- match: *.test.* - name: "name" - labels: - label: "${1}_foo"`, - configBad: true, - }, - // Config with a properly escaped *. - { - config: ` -mappings: -- match: "*.test.*" - name: "name" - labels: - label: "${2}_foo"`, - mappings: mappings{ - { - statsdMetric: "foo.test.a", - name: "name", - labels: map[string]string{ - "label": "a_foo", - }, - }, - }, - }, - // Config with good timer type. - { - config: `--- -mappings: -- match: test.*.* - timer_type: summary - name: "foo" - labels: {} - quantiles: - - quantile: 0.42 - error: 0.04 - - quantile: 0.7 - error: 0.002 - `, - mappings: mappings{ - { - statsdMetric: "test.*.*", - name: "foo", - labels: map[string]string{}, - quantiles: []metricObjective{ - {Quantile: 0.42, Error: 0.04}, - {Quantile: 0.7, Error: 0.002}, - }, - }, - }, - }, - { - config: `--- -mappings: -- match: test1.*.* - timer_type: summary - name: "foo" - labels: {} - `, - mappings: mappings{ - { - statsdMetric: "test1.*.*", - name: "foo", - labels: map[string]string{}, - quantiles: []metricObjective{ - {Quantile: 0.5, Error: 0.05}, - {Quantile: 0.9, Error: 0.01}, - {Quantile: 0.99, Error: 0.001}, - }, - }, - }, - }, - // Config with bad timer type. - { - config: `--- -mappings: -- match: test.*.* - timer_type: wrong - name: "foo" - labels: {} - `, - configBad: true, - }, - // new style quantiles - { - config: `--- -mappings: -- match: test.*.* - timer_type: summary - name: "foo" - labels: {} - summary_options: - quantiles: - - quantile: 0.42 - error: 0.04 - - quantile: 0.7 - error: 0.002 - `, - mappings: mappings{ - { - statsdMetric: "test.*.*", - name: "foo", - labels: map[string]string{}, - quantiles: []metricObjective{ - {Quantile: 0.42, Error: 0.04}, - {Quantile: 0.7, Error: 0.002}, - }, - }, - }, - }, - // Config with summary configuration. - { - config: `--- -mappings: -- match: test.*.* - timer_type: summary - name: "foo" - labels: {} - summary_options: - quantiles: - - quantile: 0.42 - error: 0.04 - - quantile: 0.7 - error: 0.002 - max_age: 5m - age_buckets: 2 - buf_cap: 1000 - `, - mappings: mappings{ - { - statsdMetric: "test.*.*", - name: "foo", - labels: map[string]string{}, - quantiles: []metricObjective{ - {Quantile: 0.42, Error: 0.04}, - {Quantile: 0.7, Error: 0.002}, - }, - maxAge: 5 * time.Minute, - ageBuckets: 2, - bufCap: 1000, - }, - }, - }, - // duplicate quantiles are bad - { - config: `--- -mappings: -- match: test.*.* - timer_type: summary - name: "foo" - labels: {} - quantiles: - - quantile: 0.42 - error: 0.04 - summary_options: - quantiles: - - quantile: 0.42 - error: 0.04 - `, - configBad: true, - }, - // Config with good metric type. - { - config: `--- -mappings: -- match: test.*.* - match_metric_type: counter - name: "foo" - labels: {} - `, - }, - // Config with bad metric type matcher. - { - config: `--- -mappings: -- match: test.*.* - match_metric_type: wrong - name: "foo" - labels: {} - `, - configBad: true, - }, - // Config with multiple explicit metric types - { - config: `--- -mappings: -- match: test.foo.* - name: "test_foo_sum" - match_metric_type: counter -- match: test.foo.* - name: "test_foo_current" - match_metric_type: gauge - `, - mappings: mappings{ - { - statsdMetric: "test.foo.test", - name: "test_foo_sum", - metricType: MetricTypeCounter, - }, - { - statsdMetric: "test.foo.test", - name: "test_foo_current", - metricType: MetricTypeGauge, - }, - }, - }, - //Config with uncompilable regex. - { - config: `--- -mappings: -- match: "*\\.foo" - match_type: regex - name: "foo" - labels: {} - `, - configBad: true, - }, - //Config with non-matched metric. - { - config: `--- -mappings: -- match: foo.*.* - timer_type: summary - name: "foo" - labels: {} - `, - mappings: mappings{ - { - statsdMetric: "test.1.2", - name: "test_1_2", - labels: map[string]string{}, - notPresent: true, - }, - }, - }, - //Config with no name. - { - config: `--- -mappings: -- match: *\.foo - match_type: regex - labels: - bar: "foo" - `, - configBad: true, - }, - // Example from the README. - { - config: ` -mappings: -- match: test.dispatcher.*.*.* - name: "dispatcher_events_total" - labels: - processor: "$1" - action: "$2" - outcome: "$3" - job: "test_dispatcher" -- match: "*.signup.*.*" - name: "signup_events_total" - labels: - provider: "$2" - outcome: "$3" - job: "${1}_server" -`, - mappings: mappings{ - { - statsdMetric: "test.dispatcher.FooProcessor.send.success", - name: "dispatcher_events_total", - labels: map[string]string{ - "processor": "FooProcessor", - "action": "send", - "outcome": "success", - "job": "test_dispatcher", - }, - }, - { - statsdMetric: "foo_product.signup.facebook.failure", - name: "signup_events_total", - labels: map[string]string{ - "provider": "facebook", - "outcome": "failure", - "job": "foo_product_server", - }, - }, - { - statsdMetric: "test.web-server.foo.bar", - name: "test_web_server_foo_bar", - labels: map[string]string{}, - }, - }, - }, - // Config that drops all. - { - config: `mappings: -- match: . - match_type: regex - name: "drop" - action: drop`, - mappings: mappings{ - { - statsdMetric: "test.a", - }, - { - statsdMetric: "abc", - }, - }, - }, - // Config that has a catch-all to drop all. - { - config: `mappings: -- match: web.* - name: "web" - labels: - site: "$1" -- match: . - match_type: regex - name: "drop" - action: drop`, - mappings: mappings{ - { - statsdMetric: "test.a", - }, - { - statsdMetric: "web.localhost", - name: "web", - labels: map[string]string{ - "site": "localhost", - }, - }, - }, - }, - // Config that has a ttl. - { - config: `mappings: -- match: web.* - name: "web" - ttl: 10s - labels: - site: "$1"`, - mappings: mappings{ - { - statsdMetric: "test.a", - }, - { - statsdMetric: "web.localhost", - name: "web", - labels: map[string]string{ - "site": "localhost", - }, - ttl: time.Second * 10, - }, - }, - }, - // Config that has a default ttl. - { - config: `defaults: - ttl: 1m2s -mappings: -- match: web.* - name: "web" - labels: - site: "$1"`, - mappings: mappings{ - { - statsdMetric: "test.a", - }, - { - statsdMetric: "web.localhost", - name: "web", - labels: map[string]string{ - "site": "localhost", - }, - ttl: time.Minute + time.Second*2, - }, - }, - }, - // Config that override a default ttl. - { - config: `defaults: - ttl: 1m2s -mappings: -- match: web.* - name: "web" - ttl: 5s - labels: - site: "$1"`, - mappings: mappings{ - { - statsdMetric: "test.a", - }, - { - statsdMetric: "web.localhost", - name: "web", - labels: map[string]string{ - "site": "localhost", - }, - ttl: time.Second * 5, - }, - }, - }, - } - - mapper := MetricMapper{} - for i, scenario := range scenarios { - err := mapper.InitFromYAMLString(scenario.config, 1000) - if err != nil && !scenario.configBad { - t.Fatalf("%d. Config load error: %s %s", i, scenario.config, err) - } - if err == nil && scenario.configBad { - t.Fatalf("%d. Expected bad config, but loaded ok: %s", i, scenario.config) - } - - for metric, mapping := range scenario.mappings { - // exporter will call mapper.GetMapping with valid MetricType - // so we also pass a sane MetricType in testing if it's not specified - mapType := mapping.metricType - if mapType == "" { - mapType = MetricTypeCounter - } - m, labels, present := mapper.GetMapping(mapping.statsdMetric, mapType) - if present && mapping.name != "" && m.Name != mapping.name { - t.Fatalf("%d.%q: Expected name %v, got %v", i, metric, m.Name, mapping.name) - } - if mapping.notPresent && present { - t.Fatalf("%d.%q: Expected metric to not be present", i, metric) - } - if len(labels) != len(mapping.labels) { - t.Fatalf("%d.%q: Expected %d labels, got %d", i, metric, len(mapping.labels), len(labels)) - } - for label, value := range labels { - if mapping.labels[label] != value { - t.Fatalf("%d.%q: Expected labels %v, got %v", i, metric, mapping, labels) - } - } - if mapping.ttl > 0 && mapping.ttl != m.Ttl { - t.Fatalf("%d.%q: Expected ttl of %s, got %s", i, metric, mapping.ttl.String(), m.Ttl.String()) - } - if mapping.metricType != "" && mapType != m.MatchMetricType { - t.Fatalf("%d.%q: Expected match metric of %s, got %s", i, metric, mapType, m.MatchMetricType) - } - - if len(mapping.quantiles) != 0 { - if len(mapping.quantiles) != len(m.SummaryOptions.Quantiles) { - t.Fatalf("%d.%q: Expected %d quantiles, got %d", i, metric, len(mapping.quantiles), len(m.SummaryOptions.Quantiles)) - } - for i, quantile := range mapping.quantiles { - if quantile.Quantile != m.SummaryOptions.Quantiles[i].Quantile { - t.Fatalf("%d.%q: Expected quantile %v, got %v", i, metric, m.SummaryOptions.Quantiles[i].Quantile, quantile.Quantile) - } - if quantile.Error != m.SummaryOptions.Quantiles[i].Error { - t.Fatalf("%d.%q: Expected Error margin %v, got %v", i, metric, m.SummaryOptions.Quantiles[i].Error, quantile.Error) - } - } - } - if mapping.maxAge != 0 && mapping.maxAge != m.SummaryOptions.MaxAge { - t.Fatalf("%d.%q: Expected max age %v, got %v", i, metric, mapping.maxAge, m.SummaryOptions.MaxAge) - } - if mapping.ageBuckets != 0 && mapping.ageBuckets != m.SummaryOptions.AgeBuckets { - t.Fatalf("%d.%q: Expected max age %v, got %v", i, metric, mapping.ageBuckets, m.SummaryOptions.AgeBuckets) - } - if mapping.bufCap != 0 && mapping.bufCap != m.SummaryOptions.BufCap { - t.Fatalf("%d.%q: Expected max age %v, got %v", i, metric, mapping.bufCap, m.SummaryOptions.BufCap) - } - } - } -} - -func TestAction(t *testing.T) { - scenarios := []struct { - config string - configBad bool - expectedAction ActionType - }{ - { - // no action set - config: `--- -mappings: -- match: test.*.* - name: "foo" -`, - configBad: false, - expectedAction: ActionTypeMap, - }, - { - // map action set - config: `--- -mappings: -- match: test.*.* - name: "foo" - action: map -`, - configBad: false, - expectedAction: ActionTypeMap, - }, - { - // drop action set - config: `--- -mappings: -- match: test.*.* - name: "foo" - action: drop -`, - configBad: false, - expectedAction: ActionTypeDrop, - }, - { - // invalid action set - config: `--- -mappings: -- match: test.*.* - name: "foo" - action: xyz -`, - configBad: true, - expectedAction: ActionTypeDrop, - }, - { - // valid yaml example - config: `--- -mappings: -- match: "test\\.(\\w+)\\.(\\w+)\\.counter" - match_type: regex - name: "${2}_total" - labels: - provider: "$1" -`, - configBad: false, - expectedAction: ActionTypeMap, - }, - { - // invalid yaml example - config: `--- -mappings: -- match: "test\.(\w+)\.(\w+)\.counter" - match_type: regex - name: "${2}_total" - labels: - provider: "$1" -`, - configBad: true, - }, - } - - for i, scenario := range scenarios { - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(scenario.config, 0) - if err != nil && !scenario.configBad { - t.Fatalf("%d. Config load error: %s %s", i, scenario.config, err) - } - if err == nil && scenario.configBad { - t.Fatalf("%d. Expected bad config, but loaded ok: %s", i, scenario.config) - } - - if !scenario.configBad { - a := mapper.Mappings[0].Action - if scenario.expectedAction != a { - t.Fatalf("%d: Expected action %v, got %v", i, scenario.expectedAction, a) - } - } - } -} - -// Test for https://github.com/prometheus/statsd_exporter/issues/273 -// Corrupt cache for multiple names matching in fsm -func TestMultipleMatches(t *testing.T) { - config := `--- -mappings: -- match: aa.bb.*.* - name: "aa_bb_${1}_total" - labels: - app: "$2" -` - mapper := MetricMapper{} - err := mapper.InitFromYAMLString(config, 0) - if err != nil { - t.Fatalf("config load error: %s ", err) - } - - names := map[string]string{ - "aa.bb.aa.myapp": "aa_bb_aa_total", - "aa.bb.bb.myapp": "aa_bb_bb_total", - "aa.bb.cc.myapp": "aa_bb_cc_total", - "aa.bb.dd.myapp": "aa_bb_dd_total", - } - - scenarios := []struct { - cacheSize int - }{ - { - cacheSize: 0, - }, - { - cacheSize: len(names), - }, - } - - for i, scenario := range scenarios { - mapper.InitCache(scenario.cacheSize) - - // run multiple times to ensure cache works as expected - for j := 0; j < 10; j++ { - for name, expected := range names { - m, _, ok := mapper.GetMapping(name, MetricTypeCounter) - if !ok { - t.Fatalf("%d:%d Did not find match for %s", i, j, name) - } - if m.Name != expected { - t.Fatalf("%d:%d Expected name %s, got %s", i, j, expected, m.Name) - } - } - } - } - -} +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mapper + +import ( + "testing" + "time" +) + +type mappings []struct { + statsdMetric string + name string + labels map[string]string + quantiles []metricObjective + notPresent bool + ttl time.Duration + metricType MetricType + maxAge time.Duration + ageBuckets uint32 + bufCap uint32 +} + +func TestMetricMapperYAML(t *testing.T) { + scenarios := []struct { + config string + configBad bool + mappings mappings + }{ + // Empty config. + {}, + // Config with several mapping definitions. + { + config: `--- +mappings: +- match: test.dispatcher.*.*.* + name: "dispatch_events" + labels: + processor: "$1" + action: "$2" + result: "$3" + job: "test_dispatcher" +- match: test.my-dispatch-host01.name.dispatcher.*.*.* + name: "host_dispatch_events" + labels: + processor: "$1" + action: "$2" + result: "$3" + job: "test_dispatcher" +- match: request_time.*.*.*.*.*.*.*.*.*.*.*.* + name: "tyk_http_request" + labels: + method_and_path: "${1}" + response_code: "${2}" + apikey: "${3}" + apiversion: "${4}" + apiname: "${5}" + apiid: "${6}" + ipv4_t1: "${7}" + ipv4_t2: "${8}" + ipv4_t3: "${9}" + ipv4_t4: "${10}" + orgid: "${11}" + oauthid: "${12}" +- match: "*.*" + name: "catchall" + labels: + first: "$1" + second: "$2" + third: "$3" + job: "$1-$2-$3" +- match: (.*)\.(.*)-(.*)\.(.*) + match_type: regex + name: "proxy_requests_total" + labels: + job: "$1" + protocol: "$2" + endpoint: "$3" + result: "$4" + + `, + mappings: mappings{ + { + statsdMetric: "test.dispatcher.FooProcessor.send.succeeded", + name: "dispatch_events", + labels: map[string]string{ + "processor": "FooProcessor", + "action": "send", + "result": "succeeded", + "job": "test_dispatcher", + }, + }, + { + statsdMetric: "test.my-dispatch-host01.name.dispatcher.FooProcessor.send.succeeded", + name: "host_dispatch_events", + labels: map[string]string{ + "processor": "FooProcessor", + "action": "send", + "result": "succeeded", + "job": "test_dispatcher", + }, + }, + { + statsdMetric: "request_time.get/threads/1/posts.200.00000000.nonversioned.discussions.a11bbcdf0ac64ec243658dc64b7100fb.172.20.0.1.12ba97b7eaa1a50001000001.", + name: "tyk_http_request", + labels: map[string]string{ + "method_and_path": "get/threads/1/posts", + "response_code": "200", + "apikey": "00000000", + "apiversion": "nonversioned", + "apiname": "discussions", + "apiid": "a11bbcdf0ac64ec243658dc64b7100fb", + "ipv4_t1": "172", + "ipv4_t2": "20", + "ipv4_t3": "0", + "ipv4_t4": "1", + "orgid": "12ba97b7eaa1a50001000001", + "oauthid": "", + }, + }, + { + statsdMetric: "foo.bar", + name: "catchall", + labels: map[string]string{ + "first": "foo", + "second": "bar", + "third": "", + "job": "foo-bar-", + }, + }, + { + statsdMetric: "foo.bar.baz", + }, + { + statsdMetric: "proxy-1.http-goober.success", + name: "proxy_requests_total", + labels: map[string]string{ + "job": "proxy-1", + "protocol": "http", + "endpoint": "goober", + "result": "success", + }, + }, + }, + }, + //Config with backtracking + { + config: ` +defaults: + glob_disable_ordering: true +mappings: +- match: backtrack.*.bbb + name: "testb" + labels: + label: "${1}_foo" +- match: backtrack.justatest.aaa + name: "testa" + labels: + label: "${1}_foo" + `, + mappings: mappings{ + { + statsdMetric: "backtrack.good.bbb", + name: "testb", + labels: map[string]string{ + "label": "good_foo", + }, + }, + { + statsdMetric: "backtrack.justatest.bbb", + name: "testb", + labels: map[string]string{ + "label": "justatest_foo", + }, + }, + { + statsdMetric: "backtrack.justatest.aaa", + name: "testa", + labels: map[string]string{ + "label": "_foo", + }, + }, + }, + }, + //Config with backtracking, the non-matched rule has star(s) + // A metric like full.name.anothertest will first match full.name.* and then tries + // to match *.dummy.* and then failed. + // This test case makes sure the captures in the non-matched later rule + // doesn't affect the captures in the first matched rule. + { + config: ` +defaults: + glob_disable_ordering: false +mappings: +- match: '*.dummy.*' + name: metric_one + labels: + system: $1 + attribute: $2 +- match: 'full.name.*' + name: metric_two + labels: + system: static + attribute: $1 +`, + mappings: mappings{ + { + statsdMetric: "whatever.dummy.test", + name: "metric_one", + labels: map[string]string{ + "system": "whatever", + "attribute": "test", + }, + }, + { + statsdMetric: "full.name.anothertest", + name: "metric_two", + labels: map[string]string{ + "system": "static", + "attribute": "anothertest", + }, + }, + }, + }, + //Config with super sets, disables ordering + { + config: ` +defaults: + glob_disable_ordering: true +mappings: +- match: noorder.*.* + name: "testa" + labels: + label: "${1}_foo" +- match: noorder.*.bbb + name: "testb" + labels: + label: "${1}_foo" +- match: noorder.ccc.bbb + name: "testc" + labels: + label: "ccc_foo" + `, + mappings: mappings{ + { + statsdMetric: "noorder.good.bbb", + name: "testb", + labels: map[string]string{ + "label": "good_foo", + }, + }, + { + statsdMetric: "noorder.ccc.bbb", + name: "testc", + labels: map[string]string{ + "label": "ccc_foo", + }, + }, + }, + }, + //Config with super sets, keeps ordering + { + config: ` +defaults: + glob_disable_ordering: false +mappings: +- match: order.*.* + name: "testa" + labels: + label: "${1}_foo" +- match: order.*.bbb + name: "testb" + labels: + label: "${1}_foo" + `, + mappings: mappings{ + { + statsdMetric: "order.good.bbb", + name: "testa", + labels: map[string]string{ + "label": "good_foo", + }, + }, + }, + }, + // Config with bad regex reference. + { + config: `--- +mappings: +- match: test.* + name: "name" + labels: + label: "$1_foo" + `, + mappings: mappings{ + { + statsdMetric: "test.a", + name: "name", + labels: map[string]string{ + "label": "", + }, + }, + }, + }, + // Config with good regex reference. + { + config: ` +mappings: +- match: test.* + name: "name" + labels: + label: "${1}_foo" + `, + mappings: mappings{ + { + statsdMetric: "test.a", + name: "name", + labels: map[string]string{ + "label": "a_foo", + }, + }, + }, + }, + // Config with bad metric line. + { + config: `--- +mappings: +- match: bad--metric-line.*.* + name: "foo" + labels: {} + `, + configBad: true, + }, + // Config with dynamic metric name. + { + config: `--- +mappings: +- match: test1.*.* + name: "$1" + labels: {} +- match: test2.*.* + name: "${1}_$2" + labels: {} +- match: test3\.(\w+)\.(\w+) + match_type: regex + name: "${2}_$1" + labels: {} + `, + mappings: mappings{ + { + statsdMetric: "test1.total_requests.count", + name: "total_requests", + }, + { + statsdMetric: "test2.total_requests.count", + name: "total_requests_count", + }, + { + statsdMetric: "test3.total_requests.count", + name: "count_total_requests", + }, + }, + }, + // Config with bad metric name. + { + config: `--- +mappings: +- match: test.*.* + name: "0foo" + labels: {} + `, + configBad: true, + }, + // Config with no metric name. + { + config: `--- +mappings: +- match: test.*.* + labels: + this: "$1" + `, + configBad: true, + }, + // Config with no mappings. + { + config: ``, + mappings: mappings{}, + }, + // Config without a trailing newline. + { + config: `mappings: +- match: test.* + name: "name" + labels: + label: "${1}_foo"`, + mappings: mappings{ + { + statsdMetric: "test.a", + name: "name", + labels: map[string]string{ + "label": "a_foo", + }, + }, + }, + }, + // Config with an improperly escaped *. + { + config: ` +mappings: +- match: *.test.* + name: "name" + labels: + label: "${1}_foo"`, + configBad: true, + }, + // Config with a properly escaped *. + { + config: ` +mappings: +- match: "*.test.*" + name: "name" + labels: + label: "${2}_foo"`, + mappings: mappings{ + { + statsdMetric: "foo.test.a", + name: "name", + labels: map[string]string{ + "label": "a_foo", + }, + }, + }, + }, + // Config with good timer type. + { + config: `--- +mappings: +- match: test.*.* + timer_type: summary + name: "foo" + labels: {} + quantiles: + - quantile: 0.42 + error: 0.04 + - quantile: 0.7 + error: 0.002 + `, + mappings: mappings{ + { + statsdMetric: "test.*.*", + name: "foo", + labels: map[string]string{}, + quantiles: []metricObjective{ + {Quantile: 0.42, Error: 0.04}, + {Quantile: 0.7, Error: 0.002}, + }, + }, + }, + }, + { + config: `--- +mappings: +- match: test1.*.* + timer_type: summary + name: "foo" + labels: {} + `, + mappings: mappings{ + { + statsdMetric: "test1.*.*", + name: "foo", + labels: map[string]string{}, + quantiles: []metricObjective{ + {Quantile: 0.5, Error: 0.05}, + {Quantile: 0.9, Error: 0.01}, + {Quantile: 0.99, Error: 0.001}, + }, + }, + }, + }, + // Config with bad timer type. + { + config: `--- +mappings: +- match: test.*.* + timer_type: wrong + name: "foo" + labels: {} + `, + configBad: true, + }, + // new style quantiles + { + config: `--- +mappings: +- match: test.*.* + timer_type: summary + name: "foo" + labels: {} + summary_options: + quantiles: + - quantile: 0.42 + error: 0.04 + - quantile: 0.7 + error: 0.002 + `, + mappings: mappings{ + { + statsdMetric: "test.*.*", + name: "foo", + labels: map[string]string{}, + quantiles: []metricObjective{ + {Quantile: 0.42, Error: 0.04}, + {Quantile: 0.7, Error: 0.002}, + }, + }, + }, + }, + // Config with summary configuration. + { + config: `--- +mappings: +- match: test.*.* + timer_type: summary + name: "foo" + labels: {} + summary_options: + quantiles: + - quantile: 0.42 + error: 0.04 + - quantile: 0.7 + error: 0.002 + max_age: 5m + age_buckets: 2 + buf_cap: 1000 + `, + mappings: mappings{ + { + statsdMetric: "test.*.*", + name: "foo", + labels: map[string]string{}, + quantiles: []metricObjective{ + {Quantile: 0.42, Error: 0.04}, + {Quantile: 0.7, Error: 0.002}, + }, + maxAge: 5 * time.Minute, + ageBuckets: 2, + bufCap: 1000, + }, + }, + }, + // duplicate quantiles are bad + { + config: `--- +mappings: +- match: test.*.* + timer_type: summary + name: "foo" + labels: {} + quantiles: + - quantile: 0.42 + error: 0.04 + summary_options: + quantiles: + - quantile: 0.42 + error: 0.04 + `, + configBad: true, + }, + // Config with good metric type. + { + config: `--- +mappings: +- match: test.*.* + match_metric_type: counter + name: "foo" + labels: {} + `, + }, + // Config with bad metric type matcher. + { + config: `--- +mappings: +- match: test.*.* + match_metric_type: wrong + name: "foo" + labels: {} + `, + configBad: true, + }, + // Config with multiple explicit metric types + { + config: `--- +mappings: +- match: test.foo.* + name: "test_foo_sum" + match_metric_type: counter +- match: test.foo.* + name: "test_foo_current" + match_metric_type: gauge + `, + mappings: mappings{ + { + statsdMetric: "test.foo.test", + name: "test_foo_sum", + metricType: MetricTypeCounter, + }, + { + statsdMetric: "test.foo.test", + name: "test_foo_current", + metricType: MetricTypeGauge, + }, + }, + }, + //Config with uncompilable regex. + { + config: `--- +mappings: +- match: "*\\.foo" + match_type: regex + name: "foo" + labels: {} + `, + configBad: true, + }, + //Config with non-matched metric. + { + config: `--- +mappings: +- match: foo.*.* + timer_type: summary + name: "foo" + labels: {} + `, + mappings: mappings{ + { + statsdMetric: "test.1.2", + name: "test_1_2", + labels: map[string]string{}, + notPresent: true, + }, + }, + }, + //Config with no name. + { + config: `--- +mappings: +- match: *\.foo + match_type: regex + labels: + bar: "foo" + `, + configBad: true, + }, + // Example from the README. + { + config: ` +mappings: +- match: test.dispatcher.*.*.* + name: "dispatcher_events_total" + labels: + processor: "$1" + action: "$2" + outcome: "$3" + job: "test_dispatcher" +- match: "*.signup.*.*" + name: "signup_events_total" + labels: + provider: "$2" + outcome: "$3" + job: "${1}_server" +`, + mappings: mappings{ + { + statsdMetric: "test.dispatcher.FooProcessor.send.success", + name: "dispatcher_events_total", + labels: map[string]string{ + "processor": "FooProcessor", + "action": "send", + "outcome": "success", + "job": "test_dispatcher", + }, + }, + { + statsdMetric: "foo_product.signup.facebook.failure", + name: "signup_events_total", + labels: map[string]string{ + "provider": "facebook", + "outcome": "failure", + "job": "foo_product_server", + }, + }, + { + statsdMetric: "test.web-server.foo.bar", + name: "test_web_server_foo_bar", + labels: map[string]string{}, + }, + }, + }, + // Config that drops all. + { + config: `mappings: +- match: . + match_type: regex + name: "drop" + action: drop`, + mappings: mappings{ + { + statsdMetric: "test.a", + }, + { + statsdMetric: "abc", + }, + }, + }, + // Config that has a catch-all to drop all. + { + config: `mappings: +- match: web.* + name: "web" + labels: + site: "$1" +- match: . + match_type: regex + name: "drop" + action: drop`, + mappings: mappings{ + { + statsdMetric: "test.a", + }, + { + statsdMetric: "web.localhost", + name: "web", + labels: map[string]string{ + "site": "localhost", + }, + }, + }, + }, + // Config that has a ttl. + { + config: `mappings: +- match: web.* + name: "web" + ttl: 10s + labels: + site: "$1"`, + mappings: mappings{ + { + statsdMetric: "test.a", + }, + { + statsdMetric: "web.localhost", + name: "web", + labels: map[string]string{ + "site": "localhost", + }, + ttl: time.Second * 10, + }, + }, + }, + // Config that has a default ttl. + { + config: `defaults: + ttl: 1m2s +mappings: +- match: web.* + name: "web" + labels: + site: "$1"`, + mappings: mappings{ + { + statsdMetric: "test.a", + }, + { + statsdMetric: "web.localhost", + name: "web", + labels: map[string]string{ + "site": "localhost", + }, + ttl: time.Minute + time.Second*2, + }, + }, + }, + // Config that override a default ttl. + { + config: `defaults: + ttl: 1m2s +mappings: +- match: web.* + name: "web" + ttl: 5s + labels: + site: "$1"`, + mappings: mappings{ + { + statsdMetric: "test.a", + }, + { + statsdMetric: "web.localhost", + name: "web", + labels: map[string]string{ + "site": "localhost", + }, + ttl: time.Second * 5, + }, + }, + }, + } + + mapper := MetricMapper{} + for i, scenario := range scenarios { + err := mapper.InitFromYAMLString(scenario.config, 1000) + if err != nil && !scenario.configBad { + t.Fatalf("%d. Config load error: %s %s", i, scenario.config, err) + } + if err == nil && scenario.configBad { + t.Fatalf("%d. Expected bad config, but loaded ok: %s", i, scenario.config) + } + + for metric, mapping := range scenario.mappings { + // exporter will call mapper.GetMapping with valid MetricType + // so we also pass a sane MetricType in testing if it's not specified + mapType := mapping.metricType + if mapType == "" { + mapType = MetricTypeCounter + } + m, labels, present := mapper.GetMapping(mapping.statsdMetric, mapType) + if present && mapping.name != "" && m.Name != mapping.name { + t.Fatalf("%d.%q: Expected name %v, got %v", i, metric, m.Name, mapping.name) + } + if mapping.notPresent && present { + t.Fatalf("%d.%q: Expected metric to not be present", i, metric) + } + if len(labels) != len(mapping.labels) { + t.Fatalf("%d.%q: Expected %d labels, got %d", i, metric, len(mapping.labels), len(labels)) + } + for label, value := range labels { + if mapping.labels[label] != value { + t.Fatalf("%d.%q: Expected labels %v, got %v", i, metric, mapping, labels) + } + } + if mapping.ttl > 0 && mapping.ttl != m.Ttl { + t.Fatalf("%d.%q: Expected ttl of %s, got %s", i, metric, mapping.ttl.String(), m.Ttl.String()) + } + if mapping.metricType != "" && mapType != m.MatchMetricType { + t.Fatalf("%d.%q: Expected match metric of %s, got %s", i, metric, mapType, m.MatchMetricType) + } + + if len(mapping.quantiles) != 0 { + if len(mapping.quantiles) != len(m.SummaryOptions.Quantiles) { + t.Fatalf("%d.%q: Expected %d quantiles, got %d", i, metric, len(mapping.quantiles), len(m.SummaryOptions.Quantiles)) + } + for i, quantile := range mapping.quantiles { + if quantile.Quantile != m.SummaryOptions.Quantiles[i].Quantile { + t.Fatalf("%d.%q: Expected quantile %v, got %v", i, metric, m.SummaryOptions.Quantiles[i].Quantile, quantile.Quantile) + } + if quantile.Error != m.SummaryOptions.Quantiles[i].Error { + t.Fatalf("%d.%q: Expected Error margin %v, got %v", i, metric, m.SummaryOptions.Quantiles[i].Error, quantile.Error) + } + } + } + if mapping.maxAge != 0 && mapping.maxAge != m.SummaryOptions.MaxAge { + t.Fatalf("%d.%q: Expected max age %v, got %v", i, metric, mapping.maxAge, m.SummaryOptions.MaxAge) + } + if mapping.ageBuckets != 0 && mapping.ageBuckets != m.SummaryOptions.AgeBuckets { + t.Fatalf("%d.%q: Expected max age %v, got %v", i, metric, mapping.ageBuckets, m.SummaryOptions.AgeBuckets) + } + if mapping.bufCap != 0 && mapping.bufCap != m.SummaryOptions.BufCap { + t.Fatalf("%d.%q: Expected max age %v, got %v", i, metric, mapping.bufCap, m.SummaryOptions.BufCap) + } + } + } +} + +func TestAction(t *testing.T) { + scenarios := []struct { + config string + configBad bool + expectedAction ActionType + }{ + { + // no action set + config: `--- +mappings: +- match: test.*.* + name: "foo" +`, + configBad: false, + expectedAction: ActionTypeMap, + }, + { + // map action set + config: `--- +mappings: +- match: test.*.* + name: "foo" + action: map +`, + configBad: false, + expectedAction: ActionTypeMap, + }, + { + // drop action set + config: `--- +mappings: +- match: test.*.* + name: "foo" + action: drop +`, + configBad: false, + expectedAction: ActionTypeDrop, + }, + { + // invalid action set + config: `--- +mappings: +- match: test.*.* + name: "foo" + action: xyz +`, + configBad: true, + expectedAction: ActionTypeDrop, + }, + { + // valid yaml example + config: `--- +mappings: +- match: "test\\.(\\w+)\\.(\\w+)\\.counter" + match_type: regex + name: "${2}_total" + labels: + provider: "$1" +`, + configBad: false, + expectedAction: ActionTypeMap, + }, + { + // invalid yaml example + config: `--- +mappings: +- match: "test\.(\w+)\.(\w+)\.counter" + match_type: regex + name: "${2}_total" + labels: + provider: "$1" +`, + configBad: true, + }, + } + + for i, scenario := range scenarios { + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(scenario.config, 0) + if err != nil && !scenario.configBad { + t.Fatalf("%d. Config load error: %s %s", i, scenario.config, err) + } + if err == nil && scenario.configBad { + t.Fatalf("%d. Expected bad config, but loaded ok: %s", i, scenario.config) + } + + if !scenario.configBad { + a := mapper.Mappings[0].Action + if scenario.expectedAction != a { + t.Fatalf("%d: Expected action %v, got %v", i, scenario.expectedAction, a) + } + } + } +} + +// Test for https://github.com/prometheus/statsd_exporter/issues/273 +// Corrupt cache for multiple names matching in fsm +func TestMultipleMatches(t *testing.T) { + config := `--- +mappings: +- match: aa.bb.*.* + name: "aa_bb_${1}_total" + labels: + app: "$2" +` + mapper := MetricMapper{} + err := mapper.InitFromYAMLString(config, 0) + if err != nil { + t.Fatalf("config load error: %s ", err) + } + + names := map[string]string{ + "aa.bb.aa.myapp": "aa_bb_aa_total", + "aa.bb.bb.myapp": "aa_bb_bb_total", + "aa.bb.cc.myapp": "aa_bb_cc_total", + "aa.bb.dd.myapp": "aa_bb_dd_total", + } + + scenarios := []struct { + cacheSize int + }{ + { + cacheSize: 0, + }, + { + cacheSize: len(names), + }, + } + + for i, scenario := range scenarios { + mapper.InitCache(scenario.cacheSize) + + // run multiple times to ensure cache works as expected + for j := 0; j < 10; j++ { + for name, expected := range names { + m, _, ok := mapper.GetMapping(name, MetricTypeCounter) + if !ok { + t.Fatalf("%d:%d Did not find match for %s", i, j, name) + } + if m.Name != expected { + t.Fatalf("%d:%d Expected name %s, got %s", i, j, expected, m.Name) + } + } + } + } + +} diff --git a/pkg/mapper/match.go b/pkg/mapper/match.go index 12d5e8d..ff6d1b6 100644 --- a/pkg/mapper/match.go +++ b/pkg/mapper/match.go @@ -1,41 +1,41 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapper - -import "fmt" - -type MatchType string - -const ( - MatchTypeGlob MatchType = "glob" - MatchTypeRegex MatchType = "regex" - MatchTypeDefault MatchType = "" -) - -func (t *MatchType) UnmarshalYAML(unmarshal func(interface{}) error) error { - var v string - if err := unmarshal(&v); err != nil { - return err - } - - switch MatchType(v) { - case MatchTypeRegex: - *t = MatchTypeRegex - case MatchTypeGlob, MatchTypeDefault: - *t = MatchTypeGlob - default: - return fmt.Errorf("invalid match type %q", v) - } - return nil -} +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mapper + +import "fmt" + +type MatchType string + +const ( + MatchTypeGlob MatchType = "glob" + MatchTypeRegex MatchType = "regex" + MatchTypeDefault MatchType = "" +) + +func (t *MatchType) UnmarshalYAML(unmarshal func(interface{}) error) error { + var v string + if err := unmarshal(&v); err != nil { + return err + } + + switch MatchType(v) { + case MatchTypeRegex: + *t = MatchTypeRegex + case MatchTypeGlob, MatchTypeDefault: + *t = MatchTypeGlob + default: + return fmt.Errorf("invalid match type %q", v) + } + return nil +} diff --git a/pkg/mapper/metric_type.go b/pkg/mapper/metric_type.go index 0a0810f..af53b59 100644 --- a/pkg/mapper/metric_type.go +++ b/pkg/mapper/metric_type.go @@ -1,43 +1,43 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapper - -import "fmt" - -type MetricType string - -const ( - MetricTypeCounter MetricType = "counter" - MetricTypeGauge MetricType = "gauge" - MetricTypeTimer MetricType = "timer" -) - -func (m *MetricType) UnmarshalYAML(unmarshal func(interface{}) error) error { - var v string - if err := unmarshal(&v); err != nil { - return err - } - - switch MetricType(v) { - case MetricTypeCounter: - *m = MetricTypeCounter - case MetricTypeGauge: - *m = MetricTypeGauge - case MetricTypeTimer: - *m = MetricTypeTimer - default: - return fmt.Errorf("invalid metric type '%s'", v) - } - return nil -} +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mapper + +import "fmt" + +type MetricType string + +const ( + MetricTypeCounter MetricType = "counter" + MetricTypeGauge MetricType = "gauge" + MetricTypeTimer MetricType = "timer" +) + +func (m *MetricType) UnmarshalYAML(unmarshal func(interface{}) error) error { + var v string + if err := unmarshal(&v); err != nil { + return err + } + + switch MetricType(v) { + case MetricTypeCounter: + *m = MetricTypeCounter + case MetricTypeGauge: + *m = MetricTypeGauge + case MetricTypeTimer: + *m = MetricTypeTimer + default: + return fmt.Errorf("invalid metric type '%s'", v) + } + return nil +} diff --git a/pkg/mapper/timer.go b/pkg/mapper/timer.go index f1d2fb7..60ccb01 100644 --- a/pkg/mapper/timer.go +++ b/pkg/mapper/timer.go @@ -1,41 +1,41 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapper - -import "fmt" - -type TimerType string - -const ( - TimerTypeHistogram TimerType = "histogram" - TimerTypeSummary TimerType = "summary" - TimerTypeDefault TimerType = "" -) - -func (t *TimerType) UnmarshalYAML(unmarshal func(interface{}) error) error { - var v string - if err := unmarshal(&v); err != nil { - return err - } - - switch TimerType(v) { - case TimerTypeHistogram: - *t = TimerTypeHistogram - case TimerTypeSummary, TimerTypeDefault: - *t = TimerTypeSummary - default: - return fmt.Errorf("invalid timer type '%s'", v) - } - return nil -} +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mapper + +import "fmt" + +type TimerType string + +const ( + TimerTypeHistogram TimerType = "histogram" + TimerTypeSummary TimerType = "summary" + TimerTypeDefault TimerType = "" +) + +func (t *TimerType) UnmarshalYAML(unmarshal func(interface{}) error) error { + var v string + if err := unmarshal(&v); err != nil { + return err + } + + switch TimerType(v) { + case TimerTypeHistogram: + *t = TimerTypeHistogram + case TimerTypeSummary, TimerTypeDefault: + *t = TimerTypeSummary + default: + return fmt.Errorf("invalid timer type '%s'", v) + } + return nil +} diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go new file mode 100644 index 0000000..e655233 --- /dev/null +++ b/pkg/metrics/metrics.go @@ -0,0 +1,54 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +type MetricType int + +const ( + CounterMetricType MetricType = iota + GaugeMetricType + SummaryMetricType + HistogramMetricType +) + +type NameHash uint64 + +type ValueHash uint64 + +type LabelHash struct { + // This is a hash over the label names + Names NameHash + // This is a hash over the label names + label values + Values ValueHash +} + +type MetricHolder interface{} + +type VectorHolder interface { + Delete(label prometheus.Labels) bool +} + +type Vector struct { + Holder VectorHolder + RefCount uint64 +} + +type Metric struct { + MetricType MetricType + // Vectors key is the hash of the label names + Vectors map[NameHash]*Vector + // Metrics key is a hash of the label names + label values + Metrics map[ValueHash]*RegisteredMetric +} + +type RegisteredMetric struct { + LastRegisteredAt time.Time + Labels prometheus.Labels + TTL time.Duration + Metric MetricHolder + VecKey NameHash +} diff --git a/pkg/metrics/metrics.go~ b/pkg/metrics/metrics.go~ new file mode 100644 index 0000000..b379eeb --- /dev/null +++ b/pkg/metrics/metrics.go~ @@ -0,0 +1,42 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +type metricType int + +const ( + CounterMetricType metricType = iota + GaugeMetricType + SummaryMetricType + HistogramMetricType +) + +type nameHash uint64 +type valueHash uint64 +type labelHash struct { + // This is a hash over the label names + names nameHash + // This is a hash over the label names + label values + values valueHash +} + +type metricHolder interface{} + +type vectorHolder interface { + Delete(label prometheus.Labels) bool +} + +type vector struct { + holder vectorHolder + refCount uint64 +} + +type metric struct { + metricType metricType + // Vectors key is the hash of the label names + vectors map[nameHash]*vector + // Metrics key is a hash of the label names + label values + metrics map[valueHash]*registeredMetric +} diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go new file mode 100644 index 0000000..e4f44fc --- /dev/null +++ b/pkg/registry/registry.go @@ -0,0 +1,370 @@ +package registry + +import ( + "bytes" + "fmt" + "hash" + "hash/fnv" + "sort" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/statsd_exporter/pkg/clock" + "github.com/prometheus/statsd_exporter/pkg/mapper" + "github.com/prometheus/statsd_exporter/pkg/metrics" +) + +// uncheckedCollector wraps a Collector but its Describe method yields no Desc. +// This allows incoming metrics to have inconsistent label sets +type uncheckedCollector struct { + c prometheus.Collector +} + +func (u uncheckedCollector) Describe(_ chan<- *prometheus.Desc) {} +func (u uncheckedCollector) Collect(c chan<- prometheus.Metric) { + u.c.Collect(c) +} + +type Registry struct { + Metrics map[string]metrics.Metric + Mapper *mapper.MetricMapper + // The below value and label variables are allocated in the registry struct + // so that we don't have to allocate them every time have to compute a label + // hash. + ValueBuf, NameBuf bytes.Buffer + Hasher hash.Hash64 +} + +func NewRegistry(mapper *mapper.MetricMapper) *Registry { + return &Registry{ + Metrics: make(map[string]metrics.Metric), + Mapper: mapper, + Hasher: fnv.New64a(), + } +} + +func (r *Registry) MetricConflicts(metricName string, metricType metrics.MetricType) bool { + vector, hasMetrics := r.Metrics[metricName] + if !hasMetrics { + // No metrics.Metric with this name exists + return false + } + + if vector.MetricType == metricType { + // We've found a copy of this metrics.Metric with this type, but different + // labels, so it's safe to create a new one. + return false + } + + // The metrics.Metric exists, but it's of a different type than we're trying to + // create. + return true +} + +func (r *Registry) StoreCounter(metricName string, hash metrics.LabelHash, labels prometheus.Labels, vec *prometheus.CounterVec, c prometheus.Counter, ttl time.Duration) { + r.Store(metricName, hash, labels, vec, c, metrics.CounterMetricType, ttl) +} + +func (r *Registry) StoreGauge(metricName string, hash metrics.LabelHash, labels prometheus.Labels, vec *prometheus.GaugeVec, g prometheus.Counter, ttl time.Duration) { + r.Store(metricName, hash, labels, vec, g, metrics.GaugeMetricType, ttl) +} + +func (r *Registry) StoreHistogram(metricName string, hash metrics.LabelHash, labels prometheus.Labels, vec *prometheus.HistogramVec, o prometheus.Observer, ttl time.Duration) { + r.Store(metricName, hash, labels, vec, o, metrics.HistogramMetricType, ttl) +} + +func (r *Registry) StoreSummary(metricName string, hash metrics.LabelHash, labels prometheus.Labels, vec *prometheus.SummaryVec, o prometheus.Observer, ttl time.Duration) { + r.Store(metricName, hash, labels, vec, o, metrics.SummaryMetricType, ttl) +} + +func (r *Registry) Store(metricName string, hash metrics.LabelHash, labels prometheus.Labels, vh metrics.VectorHolder, mh metrics.MetricHolder, metricType metrics.MetricType, ttl time.Duration) { + metric, hasMetrics := r.Metrics[metricName] + if !hasMetrics { + metric.MetricType = metricType + metric.Vectors = make(map[metrics.NameHash]*metrics.Vector) + metric.Metrics = make(map[metrics.ValueHash]*metrics.RegisteredMetric) + + r.Metrics[metricName] = metric + } + + v, ok := metric.Vectors[hash.Names] + if !ok { + v = &metrics.Vector{Holder: vh} + metric.Vectors[hash.Names] = v + } + + rm, ok := metric.Metrics[hash.Values] + if !ok { + rm = &metrics.RegisteredMetric{ + Labels: labels, + TTL: ttl, + Metric: mh, + VecKey: hash.Names, + } + metric.Metrics[hash.Values] = rm + v.RefCount++ + } + now := clock.Now() + rm.LastRegisteredAt = now + // Update ttl from mapping + rm.TTL = ttl +} + +func (r *Registry) Get(metricName string, hash metrics.LabelHash, metricType metrics.MetricType) (metrics.VectorHolder, metrics.MetricHolder) { + metric, hasMetric := r.Metrics[metricName] + + if !hasMetric { + return nil, nil + } + if metric.MetricType != metricType { + return nil, nil + } + + rm, ok := metric.Metrics[hash.Values] + if ok { + now := clock.Now() + rm.LastRegisteredAt = now + return metric.Vectors[hash.Names].Holder, rm.Metric + } + + vector, ok := metric.Vectors[hash.Names] + if ok { + return vector.Holder, nil + } + + return nil, nil +} + +func (r *Registry) GetCounter(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping, metricsCount *prometheus.GaugeVec) (prometheus.Counter, error) { + hash, labelNames := r.HashLabels(labels) + vh, mh := r.Get(metricName, hash, metrics.CounterMetricType) + if mh != nil { + return mh.(prometheus.Counter), nil + } + + if r.MetricConflicts(metricName, metrics.CounterMetricType) { + return nil, fmt.Errorf("Metric with name %s is already registered", metricName) + } + + var counterVec *prometheus.CounterVec + if vh == nil { + metricsCount.WithLabelValues("counter").Inc() + counterVec = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: metricName, + Help: help, + }, labelNames) + + if err := prometheus.Register(uncheckedCollector{counterVec}); err != nil { + return nil, err + } + } else { + counterVec = vh.(*prometheus.CounterVec) + } + + var counter prometheus.Counter + var err error + if counter, err = counterVec.GetMetricWith(labels); err != nil { + return nil, err + } + r.StoreCounter(metricName, hash, labels, counterVec, counter, mapping.Ttl) + + return counter, nil +} + +func (r *Registry) GetGauge(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping, metricsCount *prometheus.GaugeVec) (prometheus.Gauge, error) { + hash, labelNames := r.HashLabels(labels) + vh, mh := r.Get(metricName, hash, metrics.GaugeMetricType) + if mh != nil { + return mh.(prometheus.Gauge), nil + } + + if r.MetricConflicts(metricName, metrics.GaugeMetricType) { + return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName) + } + + var gaugeVec *prometheus.GaugeVec + if vh == nil { + metricsCount.WithLabelValues("gauge").Inc() + gaugeVec = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: metricName, + Help: help, + }, labelNames) + + if err := prometheus.Register(uncheckedCollector{gaugeVec}); err != nil { + return nil, err + } + } else { + gaugeVec = vh.(*prometheus.GaugeVec) + } + + var gauge prometheus.Gauge + var err error + if gauge, err = gaugeVec.GetMetricWith(labels); err != nil { + return nil, err + } + r.StoreGauge(metricName, hash, labels, gaugeVec, gauge, mapping.Ttl) + + return gauge, nil +} + +func (r *Registry) GetHistogram(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping, metricsCount *prometheus.GaugeVec) (prometheus.Observer, error) { + hash, labelNames := r.HashLabels(labels) + vh, mh := r.Get(metricName, hash, metrics.HistogramMetricType) + if mh != nil { + return mh.(prometheus.Observer), nil + } + + if r.MetricConflicts(metricName, metrics.HistogramMetricType) { + return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName) + } + if r.MetricConflicts(metricName+"_sum", metrics.HistogramMetricType) { + return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName) + } + if r.MetricConflicts(metricName+"_count", metrics.HistogramMetricType) { + return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName) + } + if r.MetricConflicts(metricName+"_bucket", metrics.HistogramMetricType) { + return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName) + } + + var histogramVec *prometheus.HistogramVec + if vh == nil { + metricsCount.WithLabelValues("histogram").Inc() + buckets := r.Mapper.Defaults.Buckets + if mapping.HistogramOptions != nil && len(mapping.HistogramOptions.Buckets) > 0 { + buckets = mapping.HistogramOptions.Buckets + } + histogramVec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: metricName, + Help: help, + Buckets: buckets, + }, labelNames) + + if err := prometheus.Register(uncheckedCollector{histogramVec}); err != nil { + return nil, err + } + } else { + histogramVec = vh.(*prometheus.HistogramVec) + } + + var observer prometheus.Observer + var err error + if observer, err = histogramVec.GetMetricWith(labels); err != nil { + return nil, err + } + r.StoreHistogram(metricName, hash, labels, histogramVec, observer, mapping.Ttl) + + return observer, nil +} + +func (r *Registry) GetSummary(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping, metricsCount *prometheus.GaugeVec) (prometheus.Observer, error) { + hash, labelNames := r.HashLabels(labels) + vh, mh := r.Get(metricName, hash, metrics.SummaryMetricType) + if mh != nil { + return mh.(prometheus.Observer), nil + } + + if r.MetricConflicts(metricName, metrics.SummaryMetricType) { + return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName) + } + if r.MetricConflicts(metricName+"_sum", metrics.SummaryMetricType) { + return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName) + } + if r.MetricConflicts(metricName+"_count", metrics.SummaryMetricType) { + return nil, fmt.Errorf("metrics.Metric with name %s is already registered", metricName) + } + + var summaryVec *prometheus.SummaryVec + if vh == nil { + metricsCount.WithLabelValues("summary").Inc() + quantiles := r.Mapper.Defaults.Quantiles + if mapping != nil && mapping.SummaryOptions != nil && len(mapping.SummaryOptions.Quantiles) > 0 { + quantiles = mapping.SummaryOptions.Quantiles + } + summaryOptions := mapper.SummaryOptions{} + if mapping != nil && mapping.SummaryOptions != nil { + summaryOptions = *mapping.SummaryOptions + } + objectives := make(map[float64]float64) + for _, q := range quantiles { + objectives[q.Quantile] = q.Error + } + // In the case of no mapping file, explicitly define the default quantiles + if len(objectives) == 0 { + objectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + } + summaryVec = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Name: metricName, + Help: help, + Objectives: objectives, + MaxAge: summaryOptions.MaxAge, + AgeBuckets: summaryOptions.AgeBuckets, + BufCap: summaryOptions.BufCap, + }, labelNames) + + if err := prometheus.Register(uncheckedCollector{summaryVec}); err != nil { + return nil, err + } + } else { + summaryVec = vh.(*prometheus.SummaryVec) + } + + var observer prometheus.Observer + var err error + if observer, err = summaryVec.GetMetricWith(labels); err != nil { + return nil, err + } + r.StoreSummary(metricName, hash, labels, summaryVec, observer, mapping.Ttl) + + return observer, nil +} + +func (r *Registry) RemoveStaleMetrics() { + now := clock.Now() + // delete timeseries with expired ttl + for _, metric := range r.Metrics { + for hash, rm := range metric.Metrics { + if rm.TTL == 0 { + continue + } + if rm.LastRegisteredAt.Add(rm.TTL).Before(now) { + metric.Vectors[rm.VecKey].Holder.Delete(rm.Labels) + metric.Vectors[rm.VecKey].RefCount-- + delete(metric.Metrics, hash) + } + } + } +} + +// Calculates a hash of both the label names and the label names and values. +func (r *Registry) HashLabels(labels prometheus.Labels) (metrics.LabelHash, []string) { + r.Hasher.Reset() + r.NameBuf.Reset() + r.ValueBuf.Reset() + labelNames := make([]string, 0, len(labels)) + + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + r.ValueBuf.WriteByte(model.SeparatorByte) + for _, labelName := range labelNames { + r.ValueBuf.WriteString(labels[labelName]) + r.ValueBuf.WriteByte(model.SeparatorByte) + + r.NameBuf.WriteString(labelName) + r.NameBuf.WriteByte(model.SeparatorByte) + } + + lh := metrics.LabelHash{} + r.Hasher.Write(r.NameBuf.Bytes()) + lh.Names = metrics.NameHash(r.Hasher.Sum64()) + + // Now add the values to the names we've already hashed. + r.Hasher.Write(r.ValueBuf.Bytes()) + lh.Values = metrics.ValueHash(r.Hasher.Sum64()) + + return lh, labelNames +} diff --git a/pkg/registry/registry.go~ b/pkg/registry/registry.go~ new file mode 100644 index 0000000..96da234 --- /dev/null +++ b/pkg/registry/registry.go~ @@ -0,0 +1,356 @@ +package registry + +import ( + "github.com/prometheus/statsd_exporter/pkg/metrics" +) + +type RegisteredMetric struct { + lastRegisteredAt time.Time + labels prometheus.Labels + ttl time.Duration + metric metricHolder + vecKey nameHash +} + +type Registry struct { + metrics map[string]metric + mapper *mapper.MetricMapper + // The below value and label variables are allocated in the registry struct + // so that we don't have to allocate them every time have to compute a label + // hash. + valueBuf, nameBuf bytes.Buffer + hasher hash.Hash64 +} + +func NewRegistry(mapper *mapper.MetricMapper) *registry { + return ®istry{ + metrics: make(map[string]metric), + mapper: mapper, + hasher: fnv.New64a(), + } +} + +func (r *registry) MetricConflicts(metricName string, metricType metricType) bool { + vector, hasMetric := r.metrics[metricName] + if !hasMetric { + // No metric with this name exists + return false + } + + if vector.metricType == metricType { + // We've found a copy of this metric with this type, but different + // labels, so it's safe to create a new one. + return false + } + + // The metric exists, but it's of a different type than we're trying to + // create. + return true +} + +func (r *registry) StoreCounter(metricName string, hash labelHash, labels prometheus.Labels, vec *prometheus.CounterVec, c prometheus.Counter, ttl time.Duration) { + r.store(metricName, hash, labels, vec, c, CounterMetricType, ttl) +} + +func (r *registry) StoreGauge(metricName string, hash labelHash, labels prometheus.Labels, vec *prometheus.GaugeVec, g prometheus.Counter, ttl time.Duration) { + r.store(metricName, hash, labels, vec, g, GaugeMetricType, ttl) +} + +func (r *registry) StoreHistogram(metricName string, hash labelHash, labels prometheus.Labels, vec *prometheus.HistogramVec, o prometheus.Observer, ttl time.Duration) { + r.store(metricName, hash, labels, vec, o, HistogramMetricType, ttl) +} + +func (r *registry) StoreSummary(metricName string, hash labelHash, labels prometheus.Labels, vec *prometheus.SummaryVec, o prometheus.Observer, ttl time.Duration) { + r.store(metricName, hash, labels, vec, o, SummaryMetricType, ttl) +} + +func (r *registry) Store(metricName string, hash labelHash, labels prometheus.Labels, vh vectorHolder, mh metricHolder, metricType metricType, ttl time.Duration) { + metric, hasMetric := r.metrics[metricName] + if !hasMetric { + metric.metricType = metricType + metric.vectors = make(map[nameHash]*vector) + metric.metrics = make(map[valueHash]*registeredMetric) + + r.metrics[metricName] = metric + } + + v, ok := metric.vectors[hash.names] + if !ok { + v = &vector{holder: vh} + metric.vectors[hash.names] = v + } + + rm, ok := metric.metrics[hash.values] + if !ok { + rm = ®isteredMetric{ + labels: labels, + ttl: ttl, + metric: mh, + vecKey: hash.names, + } + metric.metrics[hash.values] = rm + v.refCount++ + } + now := clock.Now() + rm.lastRegisteredAt = now + // Update ttl from mapping + rm.ttl = ttl +} + +func (r *registry) Get(metricName string, hash labelHash, metricType metricType) (vectorHolder, metricHolder) { + metric, hasMetric := r.metrics[metricName] + + if !hasMetric { + return nil, nil + } + if metric.metricType != metricType { + return nil, nil + } + + rm, ok := metric.metrics[hash.values] + if ok { + now := clock.Now() + rm.lastRegisteredAt = now + return metric.vectors[hash.names].holder, rm.metric + } + + vector, ok := metric.vectors[hash.names] + if ok { + return vector.holder, nil + } + + return nil, nil +} + +func (r *registry) GetCounter(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping) (prometheus.Counter, error) { + hash, labelNames := r.hashLabels(labels) + vh, mh := r.get(metricName, hash, CounterMetricType) + if mh != nil { + return mh.(prometheus.Counter), nil + } + + if r.metricConflicts(metricName, CounterMetricType) { + return nil, fmt.Errorf("metric with name %s is already registered", metricName) + } + + var counterVec *prometheus.CounterVec + if vh == nil { + metricsCount.WithLabelValues("counter").Inc() + counterVec = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: metricName, + Help: help, + }, labelNames) + + if err := prometheus.Register(uncheckedCollector{counterVec}); err != nil { + return nil, err + } + } else { + counterVec = vh.(*prometheus.CounterVec) + } + + var counter prometheus.Counter + var err error + if counter, err = counterVec.GetMetricWith(labels); err != nil { + return nil, err + } + r.storeCounter(metricName, hash, labels, counterVec, counter, mapping.Ttl) + + return counter, nil +} + +func (r *registry) GetGauge(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping) (prometheus.Gauge, error) { + hash, labelNames := r.hashLabels(labels) + vh, mh := r.get(metricName, hash, GaugeMetricType) + if mh != nil { + return mh.(prometheus.Gauge), nil + } + + if r.metricConflicts(metricName, GaugeMetricType) { + return nil, fmt.Errorf("metric with name %s is already registered", metricName) + } + + var gaugeVec *prometheus.GaugeVec + if vh == nil { + metricsCount.WithLabelValues("gauge").Inc() + gaugeVec = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: metricName, + Help: help, + }, labelNames) + + if err := prometheus.Register(uncheckedCollector{gaugeVec}); err != nil { + return nil, err + } + } else { + gaugeVec = vh.(*prometheus.GaugeVec) + } + + var gauge prometheus.Gauge + var err error + if gauge, err = gaugeVec.GetMetricWith(labels); err != nil { + return nil, err + } + r.storeGauge(metricName, hash, labels, gaugeVec, gauge, mapping.Ttl) + + return gauge, nil +} + +func (r *registry) GetHistogram(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping) (prometheus.Observer, error) { + hash, labelNames := r.hashLabels(labels) + vh, mh := r.get(metricName, hash, HistogramMetricType) + if mh != nil { + return mh.(prometheus.Observer), nil + } + + if r.metricConflicts(metricName, HistogramMetricType) { + return nil, fmt.Errorf("metric with name %s is already registered", metricName) + } + if r.metricConflicts(metricName+"_sum", HistogramMetricType) { + return nil, fmt.Errorf("metric with name %s is already registered", metricName) + } + if r.metricConflicts(metricName+"_count", HistogramMetricType) { + return nil, fmt.Errorf("metric with name %s is already registered", metricName) + } + if r.metricConflicts(metricName+"_bucket", HistogramMetricType) { + return nil, fmt.Errorf("metric with name %s is already registered", metricName) + } + + var histogramVec *prometheus.HistogramVec + if vh == nil { + metricsCount.WithLabelValues("histogram").Inc() + buckets := r.mapper.Defaults.Buckets + if mapping.HistogramOptions != nil && len(mapping.HistogramOptions.Buckets) > 0 { + buckets = mapping.HistogramOptions.Buckets + } + histogramVec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: metricName, + Help: help, + Buckets: buckets, + }, labelNames) + + if err := prometheus.Register(uncheckedCollector{histogramVec}); err != nil { + return nil, err + } + } else { + histogramVec = vh.(*prometheus.HistogramVec) + } + + var observer prometheus.Observer + var err error + if observer, err = histogramVec.GetMetricWith(labels); err != nil { + return nil, err + } + r.storeHistogram(metricName, hash, labels, histogramVec, observer, mapping.Ttl) + + return observer, nil +} + +func (r *registry) GetSummary(metricName string, labels prometheus.Labels, help string, mapping *mapper.MetricMapping) (prometheus.Observer, error) { + hash, labelNames := r.hashLabels(labels) + vh, mh := r.get(metricName, hash, SummaryMetricType) + if mh != nil { + return mh.(prometheus.Observer), nil + } + + if r.metricConflicts(metricName, SummaryMetricType) { + return nil, fmt.Errorf("metric with name %s is already registered", metricName) + } + if r.metricConflicts(metricName+"_sum", SummaryMetricType) { + return nil, fmt.Errorf("metric with name %s is already registered", metricName) + } + if r.metricConflicts(metricName+"_count", SummaryMetricType) { + return nil, fmt.Errorf("metric with name %s is already registered", metricName) + } + + var summaryVec *prometheus.SummaryVec + if vh == nil { + metricsCount.WithLabelValues("summary").Inc() + quantiles := r.mapper.Defaults.Quantiles + if mapping != nil && mapping.SummaryOptions != nil && len(mapping.SummaryOptions.Quantiles) > 0 { + quantiles = mapping.SummaryOptions.Quantiles + } + summaryOptions := mapper.SummaryOptions{} + if mapping != nil && mapping.SummaryOptions != nil { + summaryOptions = *mapping.SummaryOptions + } + objectives := make(map[float64]float64) + for _, q := range quantiles { + objectives[q.Quantile] = q.Error + } + // In the case of no mapping file, explicitly define the default quantiles + if len(objectives) == 0 { + objectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + } + summaryVec = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Name: metricName, + Help: help, + Objectives: objectives, + MaxAge: summaryOptions.MaxAge, + AgeBuckets: summaryOptions.AgeBuckets, + BufCap: summaryOptions.BufCap, + }, labelNames) + + if err := prometheus.Register(uncheckedCollector{summaryVec}); err != nil { + return nil, err + } + } else { + summaryVec = vh.(*prometheus.SummaryVec) + } + + var observer prometheus.Observer + var err error + if observer, err = summaryVec.GetMetricWith(labels); err != nil { + return nil, err + } + r.storeSummary(metricName, hash, labels, summaryVec, observer, mapping.Ttl) + + return observer, nil +} + +func (r *registry) RemoveStaleMetrics() { + now := clock.Now() + // delete timeseries with expired ttl + for _, metric := range r.metrics { + for hash, rm := range metric.metrics { + if rm.ttl == 0 { + continue + } + if rm.lastRegisteredAt.Add(rm.ttl).Before(now) { + metric.vectors[rm.vecKey].holder.Delete(rm.labels) + metric.vectors[rm.vecKey].refCount-- + delete(metric.metrics, hash) + } + } + } +} + +// Calculates a hash of both the label names and the label names and values. +func (r *registry) HashLabels(labels prometheus.Labels) (labelHash, []string) { + r.hasher.Reset() + r.nameBuf.Reset() + r.valueBuf.Reset() + labelNames := make([]string, 0, len(labels)) + + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + r.valueBuf.WriteByte(model.SeparatorByte) + for _, labelName := range labelNames { + r.valueBuf.WriteString(labels[labelName]) + r.valueBuf.WriteByte(model.SeparatorByte) + + r.nameBuf.WriteString(labelName) + r.nameBuf.WriteByte(model.SeparatorByte) + } + + lh := labelHash{} + r.hasher.Write(r.nameBuf.Bytes()) + lh.names = nameHash(r.hasher.Sum64()) + + // Now add the values to the names we've already hashed. + r.hasher.Write(r.valueBuf.Bytes()) + lh.values = valueHash(r.hasher.Sum64()) + + return lh, labelNames +} diff --git a/pkg/util/util.go b/pkg/util/util.go new file mode 100644 index 0000000..d5a3c12 --- /dev/null +++ b/pkg/util/util.go @@ -0,0 +1,53 @@ +package util + +import ( + "fmt" + "net" + "strconv" +) + +func IPPortFromString(addr string) (*net.IPAddr, int, error) { + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, 0, fmt.Errorf("bad StatsD listening address: %s", addr) + } + + if host == "" { + host = "0.0.0.0" + } + ip, err := net.ResolveIPAddr("ip", host) + if err != nil { + return nil, 0, fmt.Errorf("Unable to resolve %s: %s", host, err) + } + + port, err := strconv.Atoi(portStr) + if err != nil || port < 0 || port > 65535 { + return nil, 0, fmt.Errorf("Bad port %s: %s", portStr, err) + } + + return ip, port, nil +} + +func UDPAddrFromString(addr string) (*net.UDPAddr, error) { + ip, port, err := IPPortFromString(addr) + if err != nil { + return nil, err + } + return &net.UDPAddr{ + IP: ip.IP, + Port: port, + Zone: ip.Zone, + }, nil +} + +func TCPAddrFromString(addr string) (*net.TCPAddr, error) { + ip, port, err := IPPortFromString(addr) + if err != nil { + return nil, err + } + return &net.TCPAddr{ + IP: ip.IP, + Port: port, + Zone: ip.Zone, + }, nil +} diff --git a/pkg/util/util.go~ b/pkg/util/util.go~ new file mode 100644 index 0000000..3dc88da --- /dev/null +++ b/pkg/util/util.go~ @@ -0,0 +1,53 @@ +package util + +import ( + "fmt" + "net" + "strconv" +) + +func IPPortFromString(addr string) (*net.IPAddr, int, error) { + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, 0, fmt.Errorf("bad StatsD listening address: %s", addr) + } + + if host == "" { + host = "0.0.0.0" + } + ip, err := net.ResolveIPAddr("ip", host) + if err != nil { + return nil, 0, fmt.Errorf("Unable to resolve %s: %s", host, err) + } + + port, err := strconv.Atoi(portStr) + if err != nil || port < 0 || port > 65535 { + return nil, 0, fmt.Errorf("Bad port %s: %s", portStr, err) + } + + return ip, port, nil +} + +func UDPAddrFromString(addr string) (*net.UDPAddr, error) { + ip, port, err := ipPortFromString(addr) + if err != nil { + return nil, err + } + return &net.UDPAddr{ + IP: ip.IP, + Port: port, + Zone: ip.Zone, + }, nil +} + +func TCPAddrFromString(addr string) (*net.TCPAddr, error) { + ip, port, err := ipPortFromString(addr) + if err != nil { + return nil, err + } + return &net.TCPAddr{ + IP: ip.IP, + Port: port, + Zone: ip.Zone, + }, nil +} diff --git a/statsd_exporter.exe b/statsd_exporter.exe new file mode 100644 index 0000000..974a50a Binary files /dev/null and b/statsd_exporter.exe differ diff --git a/vendor/github.com/alecthomas/template/LICENSE b/vendor/github.com/alecthomas/template/LICENSE index 7448756..32d347f 100644 --- a/vendor/github.com/alecthomas/template/LICENSE +++ b/vendor/github.com/alecthomas/template/LICENSE @@ -1,27 +1,27 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/alecthomas/template/README.md b/vendor/github.com/alecthomas/template/README.md index ef6a8ee..211008a 100644 --- a/vendor/github.com/alecthomas/template/README.md +++ b/vendor/github.com/alecthomas/template/README.md @@ -1,25 +1,25 @@ -# Go's `text/template` package with newline elision - -This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline. - -eg. - -``` -{{if true}}\ -hello -{{end}}\ -``` - -Will result in: - -``` -hello\n -``` - -Rather than: - -``` -\n -hello\n -\n -``` +# Go's `text/template` package with newline elision + +This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline. + +eg. + +``` +{{if true}}\ +hello +{{end}}\ +``` + +Will result in: + +``` +hello\n +``` + +Rather than: + +``` +\n +hello\n +\n +``` diff --git a/vendor/github.com/alecthomas/template/doc.go b/vendor/github.com/alecthomas/template/doc.go index 223c595..554ceb7 100644 --- a/vendor/github.com/alecthomas/template/doc.go +++ b/vendor/github.com/alecthomas/template/doc.go @@ -1,406 +1,406 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package template implements data-driven templates for generating textual output. - -To generate HTML output, see package html/template, which has the same interface -as this package but automatically secures HTML output against certain attacks. - -Templates are executed by applying them to a data structure. Annotations in the -template refer to elements of the data structure (typically a field of a struct -or a key in a map) to control execution and derive values to be displayed. -Execution of the template walks the structure and sets the cursor, represented -by a period '.' and called "dot", to the value at the current location in the -structure as execution proceeds. - -The input text for a template is UTF-8-encoded text in any format. -"Actions"--data evaluations or control structures--are delimited by -"{{" and "}}"; all text outside actions is copied to the output unchanged. -Actions may not span newlines, although comments can. - -Once parsed, a template may be executed safely in parallel. - -Here is a trivial example that prints "17 items are made of wool". - - type Inventory struct { - Material string - Count uint - } - sweaters := Inventory{"wool", 17} - tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}") - if err != nil { panic(err) } - err = tmpl.Execute(os.Stdout, sweaters) - if err != nil { panic(err) } - -More intricate examples appear below. - -Actions - -Here is the list of actions. "Arguments" and "pipelines" are evaluations of -data, defined in detail below. - -*/ -// {{/* a comment */}} -// A comment; discarded. May contain newlines. -// Comments do not nest and must start and end at the -// delimiters, as shown here. -/* - - {{pipeline}} - The default textual representation of the value of the pipeline - is copied to the output. - - {{if pipeline}} T1 {{end}} - If the value of the pipeline is empty, no output is generated; - otherwise, T1 is executed. The empty values are false, 0, any - nil pointer or interface value, and any array, slice, map, or - string of length zero. - Dot is unaffected. - - {{if pipeline}} T1 {{else}} T0 {{end}} - If the value of the pipeline is empty, T0 is executed; - otherwise, T1 is executed. Dot is unaffected. - - {{if pipeline}} T1 {{else if pipeline}} T0 {{end}} - To simplify the appearance of if-else chains, the else action - of an if may include another if directly; the effect is exactly - the same as writing - {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}} - - {{range pipeline}} T1 {{end}} - The value of the pipeline must be an array, slice, map, or channel. - If the value of the pipeline has length zero, nothing is output; - otherwise, dot is set to the successive elements of the array, - slice, or map and T1 is executed. If the value is a map and the - keys are of basic type with a defined order ("comparable"), the - elements will be visited in sorted key order. - - {{range pipeline}} T1 {{else}} T0 {{end}} - The value of the pipeline must be an array, slice, map, or channel. - If the value of the pipeline has length zero, dot is unaffected and - T0 is executed; otherwise, dot is set to the successive elements - of the array, slice, or map and T1 is executed. - - {{template "name"}} - The template with the specified name is executed with nil data. - - {{template "name" pipeline}} - The template with the specified name is executed with dot set - to the value of the pipeline. - - {{with pipeline}} T1 {{end}} - If the value of the pipeline is empty, no output is generated; - otherwise, dot is set to the value of the pipeline and T1 is - executed. - - {{with pipeline}} T1 {{else}} T0 {{end}} - If the value of the pipeline is empty, dot is unaffected and T0 - is executed; otherwise, dot is set to the value of the pipeline - and T1 is executed. - -Arguments - -An argument is a simple value, denoted by one of the following. - - - A boolean, string, character, integer, floating-point, imaginary - or complex constant in Go syntax. These behave like Go's untyped - constants, although raw strings may not span newlines. - - The keyword nil, representing an untyped Go nil. - - The character '.' (period): - . - The result is the value of dot. - - A variable name, which is a (possibly empty) alphanumeric string - preceded by a dollar sign, such as - $piOver2 - or - $ - The result is the value of the variable. - Variables are described below. - - The name of a field of the data, which must be a struct, preceded - by a period, such as - .Field - The result is the value of the field. Field invocations may be - chained: - .Field1.Field2 - Fields can also be evaluated on variables, including chaining: - $x.Field1.Field2 - - The name of a key of the data, which must be a map, preceded - by a period, such as - .Key - The result is the map element value indexed by the key. - Key invocations may be chained and combined with fields to any - depth: - .Field1.Key1.Field2.Key2 - Although the key must be an alphanumeric identifier, unlike with - field names they do not need to start with an upper case letter. - Keys can also be evaluated on variables, including chaining: - $x.key1.key2 - - The name of a niladic method of the data, preceded by a period, - such as - .Method - The result is the value of invoking the method with dot as the - receiver, dot.Method(). Such a method must have one return value (of - any type) or two return values, the second of which is an error. - If it has two and the returned error is non-nil, execution terminates - and an error is returned to the caller as the value of Execute. - Method invocations may be chained and combined with fields and keys - to any depth: - .Field1.Key1.Method1.Field2.Key2.Method2 - Methods can also be evaluated on variables, including chaining: - $x.Method1.Field - - The name of a niladic function, such as - fun - The result is the value of invoking the function, fun(). The return - types and values behave as in methods. Functions and function - names are described below. - - A parenthesized instance of one the above, for grouping. The result - may be accessed by a field or map key invocation. - print (.F1 arg1) (.F2 arg2) - (.StructValuedMethod "arg").Field - -Arguments may evaluate to any type; if they are pointers the implementation -automatically indirects to the base type when required. -If an evaluation yields a function value, such as a function-valued -field of a struct, the function is not invoked automatically, but it -can be used as a truth value for an if action and the like. To invoke -it, use the call function, defined below. - -A pipeline is a possibly chained sequence of "commands". A command is a simple -value (argument) or a function or method call, possibly with multiple arguments: - - Argument - The result is the value of evaluating the argument. - .Method [Argument...] - The method can be alone or the last element of a chain but, - unlike methods in the middle of a chain, it can take arguments. - The result is the value of calling the method with the - arguments: - dot.Method(Argument1, etc.) - functionName [Argument...] - The result is the value of calling the function associated - with the name: - function(Argument1, etc.) - Functions and function names are described below. - -Pipelines - -A pipeline may be "chained" by separating a sequence of commands with pipeline -characters '|'. In a chained pipeline, the result of the each command is -passed as the last argument of the following command. The output of the final -command in the pipeline is the value of the pipeline. - -The output of a command will be either one value or two values, the second of -which has type error. If that second value is present and evaluates to -non-nil, execution terminates and the error is returned to the caller of -Execute. - -Variables - -A pipeline inside an action may initialize a variable to capture the result. -The initialization has syntax - - $variable := pipeline - -where $variable is the name of the variable. An action that declares a -variable produces no output. - -If a "range" action initializes a variable, the variable is set to the -successive elements of the iteration. Also, a "range" may declare two -variables, separated by a comma: - - range $index, $element := pipeline - -in which case $index and $element are set to the successive values of the -array/slice index or map key and element, respectively. Note that if there is -only one variable, it is assigned the element; this is opposite to the -convention in Go range clauses. - -A variable's scope extends to the "end" action of the control structure ("if", -"with", or "range") in which it is declared, or to the end of the template if -there is no such control structure. A template invocation does not inherit -variables from the point of its invocation. - -When execution begins, $ is set to the data argument passed to Execute, that is, -to the starting value of dot. - -Examples - -Here are some example one-line templates demonstrating pipelines and variables. -All produce the quoted word "output": - - {{"\"output\""}} - A string constant. - {{`"output"`}} - A raw string constant. - {{printf "%q" "output"}} - A function call. - {{"output" | printf "%q"}} - A function call whose final argument comes from the previous - command. - {{printf "%q" (print "out" "put")}} - A parenthesized argument. - {{"put" | printf "%s%s" "out" | printf "%q"}} - A more elaborate call. - {{"output" | printf "%s" | printf "%q"}} - A longer chain. - {{with "output"}}{{printf "%q" .}}{{end}} - A with action using dot. - {{with $x := "output" | printf "%q"}}{{$x}}{{end}} - A with action that creates and uses a variable. - {{with $x := "output"}}{{printf "%q" $x}}{{end}} - A with action that uses the variable in another action. - {{with $x := "output"}}{{$x | printf "%q"}}{{end}} - The same, but pipelined. - -Functions - -During execution functions are found in two function maps: first in the -template, then in the global function map. By default, no functions are defined -in the template but the Funcs method can be used to add them. - -Predefined global functions are named as follows. - - and - Returns the boolean AND of its arguments by returning the - first empty argument or the last argument, that is, - "and x y" behaves as "if x then y else x". All the - arguments are evaluated. - call - Returns the result of calling the first argument, which - must be a function, with the remaining arguments as parameters. - Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where - Y is a func-valued field, map entry, or the like. - The first argument must be the result of an evaluation - that yields a value of function type (as distinct from - a predefined function such as print). The function must - return either one or two result values, the second of which - is of type error. If the arguments don't match the function - or the returned error value is non-nil, execution stops. - html - Returns the escaped HTML equivalent of the textual - representation of its arguments. - index - Returns the result of indexing its first argument by the - following arguments. Thus "index x 1 2 3" is, in Go syntax, - x[1][2][3]. Each indexed item must be a map, slice, or array. - js - Returns the escaped JavaScript equivalent of the textual - representation of its arguments. - len - Returns the integer length of its argument. - not - Returns the boolean negation of its single argument. - or - Returns the boolean OR of its arguments by returning the - first non-empty argument or the last argument, that is, - "or x y" behaves as "if x then x else y". All the - arguments are evaluated. - print - An alias for fmt.Sprint - printf - An alias for fmt.Sprintf - println - An alias for fmt.Sprintln - urlquery - Returns the escaped value of the textual representation of - its arguments in a form suitable for embedding in a URL query. - -The boolean functions take any zero value to be false and a non-zero -value to be true. - -There is also a set of binary comparison operators defined as -functions: - - eq - Returns the boolean truth of arg1 == arg2 - ne - Returns the boolean truth of arg1 != arg2 - lt - Returns the boolean truth of arg1 < arg2 - le - Returns the boolean truth of arg1 <= arg2 - gt - Returns the boolean truth of arg1 > arg2 - ge - Returns the boolean truth of arg1 >= arg2 - -For simpler multi-way equality tests, eq (only) accepts two or more -arguments and compares the second and subsequent to the first, -returning in effect - - arg1==arg2 || arg1==arg3 || arg1==arg4 ... - -(Unlike with || in Go, however, eq is a function call and all the -arguments will be evaluated.) - -The comparison functions work on basic types only (or named basic -types, such as "type Celsius float32"). They implement the Go rules -for comparison of values, except that size and exact type are -ignored, so any integer value, signed or unsigned, may be compared -with any other integer value. (The arithmetic value is compared, -not the bit pattern, so all negative integers are less than all -unsigned integers.) However, as usual, one may not compare an int -with a float32 and so on. - -Associated templates - -Each template is named by a string specified when it is created. Also, each -template is associated with zero or more other templates that it may invoke by -name; such associations are transitive and form a name space of templates. - -A template may use a template invocation to instantiate another associated -template; see the explanation of the "template" action above. The name must be -that of a template associated with the template that contains the invocation. - -Nested template definitions - -When parsing a template, another template may be defined and associated with the -template being parsed. Template definitions must appear at the top level of the -template, much like global variables in a Go program. - -The syntax of such definitions is to surround each template declaration with a -"define" and "end" action. - -The define action names the template being created by providing a string -constant. Here is a simple example: - - `{{define "T1"}}ONE{{end}} - {{define "T2"}}TWO{{end}} - {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}} - {{template "T3"}}` - -This defines two templates, T1 and T2, and a third T3 that invokes the other two -when it is executed. Finally it invokes T3. If executed this template will -produce the text - - ONE TWO - -By construction, a template may reside in only one association. If it's -necessary to have a template addressable from multiple associations, the -template definition must be parsed multiple times to create distinct *Template -values, or must be copied with the Clone or AddParseTree method. - -Parse may be called multiple times to assemble the various associated templates; -see the ParseFiles and ParseGlob functions and methods for simple ways to parse -related templates stored in files. - -A template may be executed directly or through ExecuteTemplate, which executes -an associated template identified by name. To invoke our example above, we -might write, - - err := tmpl.Execute(os.Stdout, "no data needed") - if err != nil { - log.Fatalf("execution failed: %s", err) - } - -or to invoke a particular template explicitly by name, - - err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed") - if err != nil { - log.Fatalf("execution failed: %s", err) - } - -*/ -package template +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package template implements data-driven templates for generating textual output. + +To generate HTML output, see package html/template, which has the same interface +as this package but automatically secures HTML output against certain attacks. + +Templates are executed by applying them to a data structure. Annotations in the +template refer to elements of the data structure (typically a field of a struct +or a key in a map) to control execution and derive values to be displayed. +Execution of the template walks the structure and sets the cursor, represented +by a period '.' and called "dot", to the value at the current location in the +structure as execution proceeds. + +The input text for a template is UTF-8-encoded text in any format. +"Actions"--data evaluations or control structures--are delimited by +"{{" and "}}"; all text outside actions is copied to the output unchanged. +Actions may not span newlines, although comments can. + +Once parsed, a template may be executed safely in parallel. + +Here is a trivial example that prints "17 items are made of wool". + + type Inventory struct { + Material string + Count uint + } + sweaters := Inventory{"wool", 17} + tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}") + if err != nil { panic(err) } + err = tmpl.Execute(os.Stdout, sweaters) + if err != nil { panic(err) } + +More intricate examples appear below. + +Actions + +Here is the list of actions. "Arguments" and "pipelines" are evaluations of +data, defined in detail below. + +*/ +// {{/* a comment */}} +// A comment; discarded. May contain newlines. +// Comments do not nest and must start and end at the +// delimiters, as shown here. +/* + + {{pipeline}} + The default textual representation of the value of the pipeline + is copied to the output. + + {{if pipeline}} T1 {{end}} + If the value of the pipeline is empty, no output is generated; + otherwise, T1 is executed. The empty values are false, 0, any + nil pointer or interface value, and any array, slice, map, or + string of length zero. + Dot is unaffected. + + {{if pipeline}} T1 {{else}} T0 {{end}} + If the value of the pipeline is empty, T0 is executed; + otherwise, T1 is executed. Dot is unaffected. + + {{if pipeline}} T1 {{else if pipeline}} T0 {{end}} + To simplify the appearance of if-else chains, the else action + of an if may include another if directly; the effect is exactly + the same as writing + {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}} + + {{range pipeline}} T1 {{end}} + The value of the pipeline must be an array, slice, map, or channel. + If the value of the pipeline has length zero, nothing is output; + otherwise, dot is set to the successive elements of the array, + slice, or map and T1 is executed. If the value is a map and the + keys are of basic type with a defined order ("comparable"), the + elements will be visited in sorted key order. + + {{range pipeline}} T1 {{else}} T0 {{end}} + The value of the pipeline must be an array, slice, map, or channel. + If the value of the pipeline has length zero, dot is unaffected and + T0 is executed; otherwise, dot is set to the successive elements + of the array, slice, or map and T1 is executed. + + {{template "name"}} + The template with the specified name is executed with nil data. + + {{template "name" pipeline}} + The template with the specified name is executed with dot set + to the value of the pipeline. + + {{with pipeline}} T1 {{end}} + If the value of the pipeline is empty, no output is generated; + otherwise, dot is set to the value of the pipeline and T1 is + executed. + + {{with pipeline}} T1 {{else}} T0 {{end}} + If the value of the pipeline is empty, dot is unaffected and T0 + is executed; otherwise, dot is set to the value of the pipeline + and T1 is executed. + +Arguments + +An argument is a simple value, denoted by one of the following. + + - A boolean, string, character, integer, floating-point, imaginary + or complex constant in Go syntax. These behave like Go's untyped + constants, although raw strings may not span newlines. + - The keyword nil, representing an untyped Go nil. + - The character '.' (period): + . + The result is the value of dot. + - A variable name, which is a (possibly empty) alphanumeric string + preceded by a dollar sign, such as + $piOver2 + or + $ + The result is the value of the variable. + Variables are described below. + - The name of a field of the data, which must be a struct, preceded + by a period, such as + .Field + The result is the value of the field. Field invocations may be + chained: + .Field1.Field2 + Fields can also be evaluated on variables, including chaining: + $x.Field1.Field2 + - The name of a key of the data, which must be a map, preceded + by a period, such as + .Key + The result is the map element value indexed by the key. + Key invocations may be chained and combined with fields to any + depth: + .Field1.Key1.Field2.Key2 + Although the key must be an alphanumeric identifier, unlike with + field names they do not need to start with an upper case letter. + Keys can also be evaluated on variables, including chaining: + $x.key1.key2 + - The name of a niladic method of the data, preceded by a period, + such as + .Method + The result is the value of invoking the method with dot as the + receiver, dot.Method(). Such a method must have one return value (of + any type) or two return values, the second of which is an error. + If it has two and the returned error is non-nil, execution terminates + and an error is returned to the caller as the value of Execute. + Method invocations may be chained and combined with fields and keys + to any depth: + .Field1.Key1.Method1.Field2.Key2.Method2 + Methods can also be evaluated on variables, including chaining: + $x.Method1.Field + - The name of a niladic function, such as + fun + The result is the value of invoking the function, fun(). The return + types and values behave as in methods. Functions and function + names are described below. + - A parenthesized instance of one the above, for grouping. The result + may be accessed by a field or map key invocation. + print (.F1 arg1) (.F2 arg2) + (.StructValuedMethod "arg").Field + +Arguments may evaluate to any type; if they are pointers the implementation +automatically indirects to the base type when required. +If an evaluation yields a function value, such as a function-valued +field of a struct, the function is not invoked automatically, but it +can be used as a truth value for an if action and the like. To invoke +it, use the call function, defined below. + +A pipeline is a possibly chained sequence of "commands". A command is a simple +value (argument) or a function or method call, possibly with multiple arguments: + + Argument + The result is the value of evaluating the argument. + .Method [Argument...] + The method can be alone or the last element of a chain but, + unlike methods in the middle of a chain, it can take arguments. + The result is the value of calling the method with the + arguments: + dot.Method(Argument1, etc.) + functionName [Argument...] + The result is the value of calling the function associated + with the name: + function(Argument1, etc.) + Functions and function names are described below. + +Pipelines + +A pipeline may be "chained" by separating a sequence of commands with pipeline +characters '|'. In a chained pipeline, the result of the each command is +passed as the last argument of the following command. The output of the final +command in the pipeline is the value of the pipeline. + +The output of a command will be either one value or two values, the second of +which has type error. If that second value is present and evaluates to +non-nil, execution terminates and the error is returned to the caller of +Execute. + +Variables + +A pipeline inside an action may initialize a variable to capture the result. +The initialization has syntax + + $variable := pipeline + +where $variable is the name of the variable. An action that declares a +variable produces no output. + +If a "range" action initializes a variable, the variable is set to the +successive elements of the iteration. Also, a "range" may declare two +variables, separated by a comma: + + range $index, $element := pipeline + +in which case $index and $element are set to the successive values of the +array/slice index or map key and element, respectively. Note that if there is +only one variable, it is assigned the element; this is opposite to the +convention in Go range clauses. + +A variable's scope extends to the "end" action of the control structure ("if", +"with", or "range") in which it is declared, or to the end of the template if +there is no such control structure. A template invocation does not inherit +variables from the point of its invocation. + +When execution begins, $ is set to the data argument passed to Execute, that is, +to the starting value of dot. + +Examples + +Here are some example one-line templates demonstrating pipelines and variables. +All produce the quoted word "output": + + {{"\"output\""}} + A string constant. + {{`"output"`}} + A raw string constant. + {{printf "%q" "output"}} + A function call. + {{"output" | printf "%q"}} + A function call whose final argument comes from the previous + command. + {{printf "%q" (print "out" "put")}} + A parenthesized argument. + {{"put" | printf "%s%s" "out" | printf "%q"}} + A more elaborate call. + {{"output" | printf "%s" | printf "%q"}} + A longer chain. + {{with "output"}}{{printf "%q" .}}{{end}} + A with action using dot. + {{with $x := "output" | printf "%q"}}{{$x}}{{end}} + A with action that creates and uses a variable. + {{with $x := "output"}}{{printf "%q" $x}}{{end}} + A with action that uses the variable in another action. + {{with $x := "output"}}{{$x | printf "%q"}}{{end}} + The same, but pipelined. + +Functions + +During execution functions are found in two function maps: first in the +template, then in the global function map. By default, no functions are defined +in the template but the Funcs method can be used to add them. + +Predefined global functions are named as follows. + + and + Returns the boolean AND of its arguments by returning the + first empty argument or the last argument, that is, + "and x y" behaves as "if x then y else x". All the + arguments are evaluated. + call + Returns the result of calling the first argument, which + must be a function, with the remaining arguments as parameters. + Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where + Y is a func-valued field, map entry, or the like. + The first argument must be the result of an evaluation + that yields a value of function type (as distinct from + a predefined function such as print). The function must + return either one or two result values, the second of which + is of type error. If the arguments don't match the function + or the returned error value is non-nil, execution stops. + html + Returns the escaped HTML equivalent of the textual + representation of its arguments. + index + Returns the result of indexing its first argument by the + following arguments. Thus "index x 1 2 3" is, in Go syntax, + x[1][2][3]. Each indexed item must be a map, slice, or array. + js + Returns the escaped JavaScript equivalent of the textual + representation of its arguments. + len + Returns the integer length of its argument. + not + Returns the boolean negation of its single argument. + or + Returns the boolean OR of its arguments by returning the + first non-empty argument or the last argument, that is, + "or x y" behaves as "if x then x else y". All the + arguments are evaluated. + print + An alias for fmt.Sprint + printf + An alias for fmt.Sprintf + println + An alias for fmt.Sprintln + urlquery + Returns the escaped value of the textual representation of + its arguments in a form suitable for embedding in a URL query. + +The boolean functions take any zero value to be false and a non-zero +value to be true. + +There is also a set of binary comparison operators defined as +functions: + + eq + Returns the boolean truth of arg1 == arg2 + ne + Returns the boolean truth of arg1 != arg2 + lt + Returns the boolean truth of arg1 < arg2 + le + Returns the boolean truth of arg1 <= arg2 + gt + Returns the boolean truth of arg1 > arg2 + ge + Returns the boolean truth of arg1 >= arg2 + +For simpler multi-way equality tests, eq (only) accepts two or more +arguments and compares the second and subsequent to the first, +returning in effect + + arg1==arg2 || arg1==arg3 || arg1==arg4 ... + +(Unlike with || in Go, however, eq is a function call and all the +arguments will be evaluated.) + +The comparison functions work on basic types only (or named basic +types, such as "type Celsius float32"). They implement the Go rules +for comparison of values, except that size and exact type are +ignored, so any integer value, signed or unsigned, may be compared +with any other integer value. (The arithmetic value is compared, +not the bit pattern, so all negative integers are less than all +unsigned integers.) However, as usual, one may not compare an int +with a float32 and so on. + +Associated templates + +Each template is named by a string specified when it is created. Also, each +template is associated with zero or more other templates that it may invoke by +name; such associations are transitive and form a name space of templates. + +A template may use a template invocation to instantiate another associated +template; see the explanation of the "template" action above. The name must be +that of a template associated with the template that contains the invocation. + +Nested template definitions + +When parsing a template, another template may be defined and associated with the +template being parsed. Template definitions must appear at the top level of the +template, much like global variables in a Go program. + +The syntax of such definitions is to surround each template declaration with a +"define" and "end" action. + +The define action names the template being created by providing a string +constant. Here is a simple example: + + `{{define "T1"}}ONE{{end}} + {{define "T2"}}TWO{{end}} + {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}} + {{template "T3"}}` + +This defines two templates, T1 and T2, and a third T3 that invokes the other two +when it is executed. Finally it invokes T3. If executed this template will +produce the text + + ONE TWO + +By construction, a template may reside in only one association. If it's +necessary to have a template addressable from multiple associations, the +template definition must be parsed multiple times to create distinct *Template +values, or must be copied with the Clone or AddParseTree method. + +Parse may be called multiple times to assemble the various associated templates; +see the ParseFiles and ParseGlob functions and methods for simple ways to parse +related templates stored in files. + +A template may be executed directly or through ExecuteTemplate, which executes +an associated template identified by name. To invoke our example above, we +might write, + + err := tmpl.Execute(os.Stdout, "no data needed") + if err != nil { + log.Fatalf("execution failed: %s", err) + } + +or to invoke a particular template explicitly by name, + + err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed") + if err != nil { + log.Fatalf("execution failed: %s", err) + } + +*/ +package template diff --git a/vendor/github.com/alecthomas/template/exec.go b/vendor/github.com/alecthomas/template/exec.go index c3078e5..48843f8 100644 --- a/vendor/github.com/alecthomas/template/exec.go +++ b/vendor/github.com/alecthomas/template/exec.go @@ -1,845 +1,845 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "fmt" - "io" - "reflect" - "runtime" - "sort" - "strings" - - "github.com/alecthomas/template/parse" -) - -// state represents the state of an execution. It's not part of the -// template so that multiple executions of the same template -// can execute in parallel. -type state struct { - tmpl *Template - wr io.Writer - node parse.Node // current node, for errors - vars []variable // push-down stack of variable values. -} - -// variable holds the dynamic value of a variable such as $, $x etc. -type variable struct { - name string - value reflect.Value -} - -// push pushes a new variable on the stack. -func (s *state) push(name string, value reflect.Value) { - s.vars = append(s.vars, variable{name, value}) -} - -// mark returns the length of the variable stack. -func (s *state) mark() int { - return len(s.vars) -} - -// pop pops the variable stack up to the mark. -func (s *state) pop(mark int) { - s.vars = s.vars[0:mark] -} - -// setVar overwrites the top-nth variable on the stack. Used by range iterations. -func (s *state) setVar(n int, value reflect.Value) { - s.vars[len(s.vars)-n].value = value -} - -// varValue returns the value of the named variable. -func (s *state) varValue(name string) reflect.Value { - for i := s.mark() - 1; i >= 0; i-- { - if s.vars[i].name == name { - return s.vars[i].value - } - } - s.errorf("undefined variable: %s", name) - return zero -} - -var zero reflect.Value - -// at marks the state to be on node n, for error reporting. -func (s *state) at(node parse.Node) { - s.node = node -} - -// doublePercent returns the string with %'s replaced by %%, if necessary, -// so it can be used safely inside a Printf format string. -func doublePercent(str string) string { - if strings.Contains(str, "%") { - str = strings.Replace(str, "%", "%%", -1) - } - return str -} - -// errorf formats the error and terminates processing. -func (s *state) errorf(format string, args ...interface{}) { - name := doublePercent(s.tmpl.Name()) - if s.node == nil { - format = fmt.Sprintf("template: %s: %s", name, format) - } else { - location, context := s.tmpl.ErrorContext(s.node) - format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format) - } - panic(fmt.Errorf(format, args...)) -} - -// errRecover is the handler that turns panics into returns from the top -// level of Parse. -func errRecover(errp *error) { - e := recover() - if e != nil { - switch err := e.(type) { - case runtime.Error: - panic(e) - case error: - *errp = err - default: - panic(e) - } - } -} - -// ExecuteTemplate applies the template associated with t that has the given name -// to the specified data object and writes the output to wr. -// If an error occurs executing the template or writing its output, -// execution stops, but partial results may already have been written to -// the output writer. -// A template may be executed safely in parallel. -func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { - tmpl := t.tmpl[name] - if tmpl == nil { - return fmt.Errorf("template: no template %q associated with template %q", name, t.name) - } - return tmpl.Execute(wr, data) -} - -// Execute applies a parsed template to the specified data object, -// and writes the output to wr. -// If an error occurs executing the template or writing its output, -// execution stops, but partial results may already have been written to -// the output writer. -// A template may be executed safely in parallel. -func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { - defer errRecover(&err) - value := reflect.ValueOf(data) - state := &state{ - tmpl: t, - wr: wr, - vars: []variable{{"$", value}}, - } - t.init() - if t.Tree == nil || t.Root == nil { - var b bytes.Buffer - for name, tmpl := range t.tmpl { - if tmpl.Tree == nil || tmpl.Root == nil { - continue - } - if b.Len() > 0 { - b.WriteString(", ") - } - fmt.Fprintf(&b, "%q", name) - } - var s string - if b.Len() > 0 { - s = "; defined templates are: " + b.String() - } - state.errorf("%q is an incomplete or empty template%s", t.Name(), s) - } - state.walk(value, t.Root) - return -} - -// Walk functions step through the major pieces of the template structure, -// generating output as they go. -func (s *state) walk(dot reflect.Value, node parse.Node) { - s.at(node) - switch node := node.(type) { - case *parse.ActionNode: - // Do not pop variables so they persist until next end. - // Also, if the action declares variables, don't print the result. - val := s.evalPipeline(dot, node.Pipe) - if len(node.Pipe.Decl) == 0 { - s.printValue(node, val) - } - case *parse.IfNode: - s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList) - case *parse.ListNode: - for _, node := range node.Nodes { - s.walk(dot, node) - } - case *parse.RangeNode: - s.walkRange(dot, node) - case *parse.TemplateNode: - s.walkTemplate(dot, node) - case *parse.TextNode: - if _, err := s.wr.Write(node.Text); err != nil { - s.errorf("%s", err) - } - case *parse.WithNode: - s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList) - default: - s.errorf("unknown node: %s", node) - } -} - -// walkIfOrWith walks an 'if' or 'with' node. The two control structures -// are identical in behavior except that 'with' sets dot. -func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) { - defer s.pop(s.mark()) - val := s.evalPipeline(dot, pipe) - truth, ok := isTrue(val) - if !ok { - s.errorf("if/with can't use %v", val) - } - if truth { - if typ == parse.NodeWith { - s.walk(val, list) - } else { - s.walk(dot, list) - } - } else if elseList != nil { - s.walk(dot, elseList) - } -} - -// isTrue reports whether the value is 'true', in the sense of not the zero of its type, -// and whether the value has a meaningful truth value. -func isTrue(val reflect.Value) (truth, ok bool) { - if !val.IsValid() { - // Something like var x interface{}, never set. It's a form of nil. - return false, true - } - switch val.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - truth = val.Len() > 0 - case reflect.Bool: - truth = val.Bool() - case reflect.Complex64, reflect.Complex128: - truth = val.Complex() != 0 - case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: - truth = !val.IsNil() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - truth = val.Int() != 0 - case reflect.Float32, reflect.Float64: - truth = val.Float() != 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - truth = val.Uint() != 0 - case reflect.Struct: - truth = true // Struct values are always true. - default: - return - } - return truth, true -} - -func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) { - s.at(r) - defer s.pop(s.mark()) - val, _ := indirect(s.evalPipeline(dot, r.Pipe)) - // mark top of stack before any variables in the body are pushed. - mark := s.mark() - oneIteration := func(index, elem reflect.Value) { - // Set top var (lexically the second if there are two) to the element. - if len(r.Pipe.Decl) > 0 { - s.setVar(1, elem) - } - // Set next var (lexically the first if there are two) to the index. - if len(r.Pipe.Decl) > 1 { - s.setVar(2, index) - } - s.walk(elem, r.List) - s.pop(mark) - } - switch val.Kind() { - case reflect.Array, reflect.Slice: - if val.Len() == 0 { - break - } - for i := 0; i < val.Len(); i++ { - oneIteration(reflect.ValueOf(i), val.Index(i)) - } - return - case reflect.Map: - if val.Len() == 0 { - break - } - for _, key := range sortKeys(val.MapKeys()) { - oneIteration(key, val.MapIndex(key)) - } - return - case reflect.Chan: - if val.IsNil() { - break - } - i := 0 - for ; ; i++ { - elem, ok := val.Recv() - if !ok { - break - } - oneIteration(reflect.ValueOf(i), elem) - } - if i == 0 { - break - } - return - case reflect.Invalid: - break // An invalid value is likely a nil map, etc. and acts like an empty map. - default: - s.errorf("range can't iterate over %v", val) - } - if r.ElseList != nil { - s.walk(dot, r.ElseList) - } -} - -func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) { - s.at(t) - tmpl := s.tmpl.tmpl[t.Name] - if tmpl == nil { - s.errorf("template %q not defined", t.Name) - } - // Variables declared by the pipeline persist. - dot = s.evalPipeline(dot, t.Pipe) - newState := *s - newState.tmpl = tmpl - // No dynamic scoping: template invocations inherit no variables. - newState.vars = []variable{{"$", dot}} - newState.walk(dot, tmpl.Root) -} - -// Eval functions evaluate pipelines, commands, and their elements and extract -// values from the data structure by examining fields, calling methods, and so on. -// The printing of those values happens only through walk functions. - -// evalPipeline returns the value acquired by evaluating a pipeline. If the -// pipeline has a variable declaration, the variable will be pushed on the -// stack. Callers should therefore pop the stack after they are finished -// executing commands depending on the pipeline value. -func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) { - if pipe == nil { - return - } - s.at(pipe) - for _, cmd := range pipe.Cmds { - value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg. - // If the object has type interface{}, dig down one level to the thing inside. - if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 { - value = reflect.ValueOf(value.Interface()) // lovely! - } - } - for _, variable := range pipe.Decl { - s.push(variable.Ident[0], value) - } - return value -} - -func (s *state) notAFunction(args []parse.Node, final reflect.Value) { - if len(args) > 1 || final.IsValid() { - s.errorf("can't give argument to non-function %s", args[0]) - } -} - -func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value { - firstWord := cmd.Args[0] - switch n := firstWord.(type) { - case *parse.FieldNode: - return s.evalFieldNode(dot, n, cmd.Args, final) - case *parse.ChainNode: - return s.evalChainNode(dot, n, cmd.Args, final) - case *parse.IdentifierNode: - // Must be a function. - return s.evalFunction(dot, n, cmd, cmd.Args, final) - case *parse.PipeNode: - // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored. - return s.evalPipeline(dot, n) - case *parse.VariableNode: - return s.evalVariableNode(dot, n, cmd.Args, final) - } - s.at(firstWord) - s.notAFunction(cmd.Args, final) - switch word := firstWord.(type) { - case *parse.BoolNode: - return reflect.ValueOf(word.True) - case *parse.DotNode: - return dot - case *parse.NilNode: - s.errorf("nil is not a command") - case *parse.NumberNode: - return s.idealConstant(word) - case *parse.StringNode: - return reflect.ValueOf(word.Text) - } - s.errorf("can't evaluate command %q", firstWord) - panic("not reached") -} - -// idealConstant is called to return the value of a number in a context where -// we don't know the type. In that case, the syntax of the number tells us -// its type, and we use Go rules to resolve. Note there is no such thing as -// a uint ideal constant in this situation - the value must be of int type. -func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { - // These are ideal constants but we don't know the type - // and we have no context. (If it was a method argument, - // we'd know what we need.) The syntax guides us to some extent. - s.at(constant) - switch { - case constant.IsComplex: - return reflect.ValueOf(constant.Complex128) // incontrovertible. - case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0: - return reflect.ValueOf(constant.Float64) - case constant.IsInt: - n := int(constant.Int64) - if int64(n) != constant.Int64 { - s.errorf("%s overflows int", constant.Text) - } - return reflect.ValueOf(n) - case constant.IsUint: - s.errorf("%s overflows int", constant.Text) - } - return zero -} - -func isHexConstant(s string) bool { - return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') -} - -func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value { - s.at(field) - return s.evalFieldChain(dot, dot, field, field.Ident, args, final) -} - -func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value { - s.at(chain) - // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields. - pipe := s.evalArg(dot, nil, chain.Node) - if len(chain.Field) == 0 { - s.errorf("internal error: no fields in evalChainNode") - } - return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final) -} - -func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value { - // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields. - s.at(variable) - value := s.varValue(variable.Ident[0]) - if len(variable.Ident) == 1 { - s.notAFunction(args, final) - return value - } - return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final) -} - -// evalFieldChain evaluates .X.Y.Z possibly followed by arguments. -// dot is the environment in which to evaluate arguments, while -// receiver is the value being walked along the chain. -func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value { - n := len(ident) - for i := 0; i < n-1; i++ { - receiver = s.evalField(dot, ident[i], node, nil, zero, receiver) - } - // Now if it's a method, it gets the arguments. - return s.evalField(dot, ident[n-1], node, args, final, receiver) -} - -func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value { - s.at(node) - name := node.Ident - function, ok := findFunction(name, s.tmpl) - if !ok { - s.errorf("%q is not a defined function", name) - } - return s.evalCall(dot, function, cmd, name, args, final) -} - -// evalField evaluates an expression like (.Field) or (.Field arg1 arg2). -// The 'final' argument represents the return value from the preceding -// value of the pipeline, if any. -func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value { - if !receiver.IsValid() { - return zero - } - typ := receiver.Type() - receiver, _ = indirect(receiver) - // Unless it's an interface, need to get to a value of type *T to guarantee - // we see all methods of T and *T. - ptr := receiver - if ptr.Kind() != reflect.Interface && ptr.CanAddr() { - ptr = ptr.Addr() - } - if method := ptr.MethodByName(fieldName); method.IsValid() { - return s.evalCall(dot, method, node, fieldName, args, final) - } - hasArgs := len(args) > 1 || final.IsValid() - // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil. - receiver, isNil := indirect(receiver) - if isNil { - s.errorf("nil pointer evaluating %s.%s", typ, fieldName) - } - switch receiver.Kind() { - case reflect.Struct: - tField, ok := receiver.Type().FieldByName(fieldName) - if ok { - field := receiver.FieldByIndex(tField.Index) - if tField.PkgPath != "" { // field is unexported - s.errorf("%s is an unexported field of struct type %s", fieldName, typ) - } - // If it's a function, we must call it. - if hasArgs { - s.errorf("%s has arguments but cannot be invoked as function", fieldName) - } - return field - } - s.errorf("%s is not a field of struct type %s", fieldName, typ) - case reflect.Map: - // If it's a map, attempt to use the field name as a key. - nameVal := reflect.ValueOf(fieldName) - if nameVal.Type().AssignableTo(receiver.Type().Key()) { - if hasArgs { - s.errorf("%s is not a method but has arguments", fieldName) - } - return receiver.MapIndex(nameVal) - } - } - s.errorf("can't evaluate field %s in type %s", fieldName, typ) - panic("not reached") -} - -var ( - errorType = reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() -) - -// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so -// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0] -// as the function itself. -func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value { - if args != nil { - args = args[1:] // Zeroth arg is function name/node; not passed to function. - } - typ := fun.Type() - numIn := len(args) - if final.IsValid() { - numIn++ - } - numFixed := len(args) - if typ.IsVariadic() { - numFixed = typ.NumIn() - 1 // last arg is the variadic one. - if numIn < numFixed { - s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args)) - } - } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() { - s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args)) - } - if !goodFunc(typ) { - // TODO: This could still be a confusing error; maybe goodFunc should provide info. - s.errorf("can't call method/function %q with %d results", name, typ.NumOut()) - } - // Build the arg list. - argv := make([]reflect.Value, numIn) - // Args must be evaluated. Fixed args first. - i := 0 - for ; i < numFixed && i < len(args); i++ { - argv[i] = s.evalArg(dot, typ.In(i), args[i]) - } - // Now the ... args. - if typ.IsVariadic() { - argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice. - for ; i < len(args); i++ { - argv[i] = s.evalArg(dot, argType, args[i]) - } - } - // Add final value if necessary. - if final.IsValid() { - t := typ.In(typ.NumIn() - 1) - if typ.IsVariadic() { - t = t.Elem() - } - argv[i] = s.validateType(final, t) - } - result := fun.Call(argv) - // If we have an error that is not nil, stop execution and return that error to the caller. - if len(result) == 2 && !result[1].IsNil() { - s.at(node) - s.errorf("error calling %s: %s", name, result[1].Interface().(error)) - } - return result[0] -} - -// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. -func canBeNil(typ reflect.Type) bool { - switch typ.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return true - } - return false -} - -// validateType guarantees that the value is valid and assignable to the type. -func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value { - if !value.IsValid() { - if typ == nil || canBeNil(typ) { - // An untyped nil interface{}. Accept as a proper nil value. - return reflect.Zero(typ) - } - s.errorf("invalid value; expected %s", typ) - } - if typ != nil && !value.Type().AssignableTo(typ) { - if value.Kind() == reflect.Interface && !value.IsNil() { - value = value.Elem() - if value.Type().AssignableTo(typ) { - return value - } - // fallthrough - } - // Does one dereference or indirection work? We could do more, as we - // do with method receivers, but that gets messy and method receivers - // are much more constrained, so it makes more sense there than here. - // Besides, one is almost always all you need. - switch { - case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ): - value = value.Elem() - if !value.IsValid() { - s.errorf("dereference of nil pointer of type %s", typ) - } - case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr(): - value = value.Addr() - default: - s.errorf("wrong type for value; expected %s; got %s", typ, value.Type()) - } - } - return value -} - -func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - switch arg := n.(type) { - case *parse.DotNode: - return s.validateType(dot, typ) - case *parse.NilNode: - if canBeNil(typ) { - return reflect.Zero(typ) - } - s.errorf("cannot assign nil to %s", typ) - case *parse.FieldNode: - return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ) - case *parse.VariableNode: - return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ) - case *parse.PipeNode: - return s.validateType(s.evalPipeline(dot, arg), typ) - case *parse.IdentifierNode: - return s.evalFunction(dot, arg, arg, nil, zero) - case *parse.ChainNode: - return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ) - } - switch typ.Kind() { - case reflect.Bool: - return s.evalBool(typ, n) - case reflect.Complex64, reflect.Complex128: - return s.evalComplex(typ, n) - case reflect.Float32, reflect.Float64: - return s.evalFloat(typ, n) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return s.evalInteger(typ, n) - case reflect.Interface: - if typ.NumMethod() == 0 { - return s.evalEmptyInterface(dot, n) - } - case reflect.String: - return s.evalString(typ, n) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return s.evalUnsignedInteger(typ, n) - } - s.errorf("can't handle %s for arg of type %s", n, typ) - panic("not reached") -} - -func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.BoolNode); ok { - value := reflect.New(typ).Elem() - value.SetBool(n.True) - return value - } - s.errorf("expected bool; found %s", n) - panic("not reached") -} - -func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.StringNode); ok { - value := reflect.New(typ).Elem() - value.SetString(n.Text) - return value - } - s.errorf("expected string; found %s", n) - panic("not reached") -} - -func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsInt { - value := reflect.New(typ).Elem() - value.SetInt(n.Int64) - return value - } - s.errorf("expected integer; found %s", n) - panic("not reached") -} - -func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsUint { - value := reflect.New(typ).Elem() - value.SetUint(n.Uint64) - return value - } - s.errorf("expected unsigned integer; found %s", n) - panic("not reached") -} - -func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsFloat { - value := reflect.New(typ).Elem() - value.SetFloat(n.Float64) - return value - } - s.errorf("expected float; found %s", n) - panic("not reached") -} - -func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value { - if n, ok := n.(*parse.NumberNode); ok && n.IsComplex { - value := reflect.New(typ).Elem() - value.SetComplex(n.Complex128) - return value - } - s.errorf("expected complex; found %s", n) - panic("not reached") -} - -func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value { - s.at(n) - switch n := n.(type) { - case *parse.BoolNode: - return reflect.ValueOf(n.True) - case *parse.DotNode: - return dot - case *parse.FieldNode: - return s.evalFieldNode(dot, n, nil, zero) - case *parse.IdentifierNode: - return s.evalFunction(dot, n, n, nil, zero) - case *parse.NilNode: - // NilNode is handled in evalArg, the only place that calls here. - s.errorf("evalEmptyInterface: nil (can't happen)") - case *parse.NumberNode: - return s.idealConstant(n) - case *parse.StringNode: - return reflect.ValueOf(n.Text) - case *parse.VariableNode: - return s.evalVariableNode(dot, n, nil, zero) - case *parse.PipeNode: - return s.evalPipeline(dot, n) - } - s.errorf("can't handle assignment of %s to empty interface argument", n) - panic("not reached") -} - -// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. -// We indirect through pointers and empty interfaces (only) because -// non-empty interfaces have methods we might need. -func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { - for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { - if v.IsNil() { - return v, true - } - if v.Kind() == reflect.Interface && v.NumMethod() > 0 { - break - } - } - return v, false -} - -// printValue writes the textual representation of the value to the output of -// the template. -func (s *state) printValue(n parse.Node, v reflect.Value) { - s.at(n) - iface, ok := printableValue(v) - if !ok { - s.errorf("can't print %s of type %s", n, v.Type()) - } - fmt.Fprint(s.wr, iface) -} - -// printableValue returns the, possibly indirected, interface value inside v that -// is best for a call to formatted printer. -func printableValue(v reflect.Value) (interface{}, bool) { - if v.Kind() == reflect.Ptr { - v, _ = indirect(v) // fmt.Fprint handles nil. - } - if !v.IsValid() { - return "", true - } - - if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { - if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { - v = v.Addr() - } else { - switch v.Kind() { - case reflect.Chan, reflect.Func: - return nil, false - } - } - } - return v.Interface(), true -} - -// Types to help sort the keys in a map for reproducible output. - -type rvs []reflect.Value - -func (x rvs) Len() int { return len(x) } -func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -type rvInts struct{ rvs } - -func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() } - -type rvUints struct{ rvs } - -func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() } - -type rvFloats struct{ rvs } - -func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() } - -type rvStrings struct{ rvs } - -func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() } - -// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys. -func sortKeys(v []reflect.Value) []reflect.Value { - if len(v) <= 1 { - return v - } - switch v[0].Kind() { - case reflect.Float32, reflect.Float64: - sort.Sort(rvFloats{v}) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - sort.Sort(rvInts{v}) - case reflect.String: - sort.Sort(rvStrings{v}) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - sort.Sort(rvUints{v}) - } - return v -} +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "fmt" + "io" + "reflect" + "runtime" + "sort" + "strings" + + "github.com/alecthomas/template/parse" +) + +// state represents the state of an execution. It's not part of the +// template so that multiple executions of the same template +// can execute in parallel. +type state struct { + tmpl *Template + wr io.Writer + node parse.Node // current node, for errors + vars []variable // push-down stack of variable values. +} + +// variable holds the dynamic value of a variable such as $, $x etc. +type variable struct { + name string + value reflect.Value +} + +// push pushes a new variable on the stack. +func (s *state) push(name string, value reflect.Value) { + s.vars = append(s.vars, variable{name, value}) +} + +// mark returns the length of the variable stack. +func (s *state) mark() int { + return len(s.vars) +} + +// pop pops the variable stack up to the mark. +func (s *state) pop(mark int) { + s.vars = s.vars[0:mark] +} + +// setVar overwrites the top-nth variable on the stack. Used by range iterations. +func (s *state) setVar(n int, value reflect.Value) { + s.vars[len(s.vars)-n].value = value +} + +// varValue returns the value of the named variable. +func (s *state) varValue(name string) reflect.Value { + for i := s.mark() - 1; i >= 0; i-- { + if s.vars[i].name == name { + return s.vars[i].value + } + } + s.errorf("undefined variable: %s", name) + return zero +} + +var zero reflect.Value + +// at marks the state to be on node n, for error reporting. +func (s *state) at(node parse.Node) { + s.node = node +} + +// doublePercent returns the string with %'s replaced by %%, if necessary, +// so it can be used safely inside a Printf format string. +func doublePercent(str string) string { + if strings.Contains(str, "%") { + str = strings.Replace(str, "%", "%%", -1) + } + return str +} + +// errorf formats the error and terminates processing. +func (s *state) errorf(format string, args ...interface{}) { + name := doublePercent(s.tmpl.Name()) + if s.node == nil { + format = fmt.Sprintf("template: %s: %s", name, format) + } else { + location, context := s.tmpl.ErrorContext(s.node) + format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format) + } + panic(fmt.Errorf(format, args...)) +} + +// errRecover is the handler that turns panics into returns from the top +// level of Parse. +func errRecover(errp *error) { + e := recover() + if e != nil { + switch err := e.(type) { + case runtime.Error: + panic(e) + case error: + *errp = err + default: + panic(e) + } + } +} + +// ExecuteTemplate applies the template associated with t that has the given name +// to the specified data object and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel. +func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { + tmpl := t.tmpl[name] + if tmpl == nil { + return fmt.Errorf("template: no template %q associated with template %q", name, t.name) + } + return tmpl.Execute(wr, data) +} + +// Execute applies a parsed template to the specified data object, +// and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel. +func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { + defer errRecover(&err) + value := reflect.ValueOf(data) + state := &state{ + tmpl: t, + wr: wr, + vars: []variable{{"$", value}}, + } + t.init() + if t.Tree == nil || t.Root == nil { + var b bytes.Buffer + for name, tmpl := range t.tmpl { + if tmpl.Tree == nil || tmpl.Root == nil { + continue + } + if b.Len() > 0 { + b.WriteString(", ") + } + fmt.Fprintf(&b, "%q", name) + } + var s string + if b.Len() > 0 { + s = "; defined templates are: " + b.String() + } + state.errorf("%q is an incomplete or empty template%s", t.Name(), s) + } + state.walk(value, t.Root) + return +} + +// Walk functions step through the major pieces of the template structure, +// generating output as they go. +func (s *state) walk(dot reflect.Value, node parse.Node) { + s.at(node) + switch node := node.(type) { + case *parse.ActionNode: + // Do not pop variables so they persist until next end. + // Also, if the action declares variables, don't print the result. + val := s.evalPipeline(dot, node.Pipe) + if len(node.Pipe.Decl) == 0 { + s.printValue(node, val) + } + case *parse.IfNode: + s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList) + case *parse.ListNode: + for _, node := range node.Nodes { + s.walk(dot, node) + } + case *parse.RangeNode: + s.walkRange(dot, node) + case *parse.TemplateNode: + s.walkTemplate(dot, node) + case *parse.TextNode: + if _, err := s.wr.Write(node.Text); err != nil { + s.errorf("%s", err) + } + case *parse.WithNode: + s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList) + default: + s.errorf("unknown node: %s", node) + } +} + +// walkIfOrWith walks an 'if' or 'with' node. The two control structures +// are identical in behavior except that 'with' sets dot. +func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) { + defer s.pop(s.mark()) + val := s.evalPipeline(dot, pipe) + truth, ok := isTrue(val) + if !ok { + s.errorf("if/with can't use %v", val) + } + if truth { + if typ == parse.NodeWith { + s.walk(val, list) + } else { + s.walk(dot, list) + } + } else if elseList != nil { + s.walk(dot, elseList) + } +} + +// isTrue reports whether the value is 'true', in the sense of not the zero of its type, +// and whether the value has a meaningful truth value. +func isTrue(val reflect.Value) (truth, ok bool) { + if !val.IsValid() { + // Something like var x interface{}, never set. It's a form of nil. + return false, true + } + switch val.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + truth = val.Len() > 0 + case reflect.Bool: + truth = val.Bool() + case reflect.Complex64, reflect.Complex128: + truth = val.Complex() != 0 + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: + truth = !val.IsNil() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + truth = val.Int() != 0 + case reflect.Float32, reflect.Float64: + truth = val.Float() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + truth = val.Uint() != 0 + case reflect.Struct: + truth = true // Struct values are always true. + default: + return + } + return truth, true +} + +func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) { + s.at(r) + defer s.pop(s.mark()) + val, _ := indirect(s.evalPipeline(dot, r.Pipe)) + // mark top of stack before any variables in the body are pushed. + mark := s.mark() + oneIteration := func(index, elem reflect.Value) { + // Set top var (lexically the second if there are two) to the element. + if len(r.Pipe.Decl) > 0 { + s.setVar(1, elem) + } + // Set next var (lexically the first if there are two) to the index. + if len(r.Pipe.Decl) > 1 { + s.setVar(2, index) + } + s.walk(elem, r.List) + s.pop(mark) + } + switch val.Kind() { + case reflect.Array, reflect.Slice: + if val.Len() == 0 { + break + } + for i := 0; i < val.Len(); i++ { + oneIteration(reflect.ValueOf(i), val.Index(i)) + } + return + case reflect.Map: + if val.Len() == 0 { + break + } + for _, key := range sortKeys(val.MapKeys()) { + oneIteration(key, val.MapIndex(key)) + } + return + case reflect.Chan: + if val.IsNil() { + break + } + i := 0 + for ; ; i++ { + elem, ok := val.Recv() + if !ok { + break + } + oneIteration(reflect.ValueOf(i), elem) + } + if i == 0 { + break + } + return + case reflect.Invalid: + break // An invalid value is likely a nil map, etc. and acts like an empty map. + default: + s.errorf("range can't iterate over %v", val) + } + if r.ElseList != nil { + s.walk(dot, r.ElseList) + } +} + +func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) { + s.at(t) + tmpl := s.tmpl.tmpl[t.Name] + if tmpl == nil { + s.errorf("template %q not defined", t.Name) + } + // Variables declared by the pipeline persist. + dot = s.evalPipeline(dot, t.Pipe) + newState := *s + newState.tmpl = tmpl + // No dynamic scoping: template invocations inherit no variables. + newState.vars = []variable{{"$", dot}} + newState.walk(dot, tmpl.Root) +} + +// Eval functions evaluate pipelines, commands, and their elements and extract +// values from the data structure by examining fields, calling methods, and so on. +// The printing of those values happens only through walk functions. + +// evalPipeline returns the value acquired by evaluating a pipeline. If the +// pipeline has a variable declaration, the variable will be pushed on the +// stack. Callers should therefore pop the stack after they are finished +// executing commands depending on the pipeline value. +func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) { + if pipe == nil { + return + } + s.at(pipe) + for _, cmd := range pipe.Cmds { + value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg. + // If the object has type interface{}, dig down one level to the thing inside. + if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 { + value = reflect.ValueOf(value.Interface()) // lovely! + } + } + for _, variable := range pipe.Decl { + s.push(variable.Ident[0], value) + } + return value +} + +func (s *state) notAFunction(args []parse.Node, final reflect.Value) { + if len(args) > 1 || final.IsValid() { + s.errorf("can't give argument to non-function %s", args[0]) + } +} + +func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value { + firstWord := cmd.Args[0] + switch n := firstWord.(type) { + case *parse.FieldNode: + return s.evalFieldNode(dot, n, cmd.Args, final) + case *parse.ChainNode: + return s.evalChainNode(dot, n, cmd.Args, final) + case *parse.IdentifierNode: + // Must be a function. + return s.evalFunction(dot, n, cmd, cmd.Args, final) + case *parse.PipeNode: + // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored. + return s.evalPipeline(dot, n) + case *parse.VariableNode: + return s.evalVariableNode(dot, n, cmd.Args, final) + } + s.at(firstWord) + s.notAFunction(cmd.Args, final) + switch word := firstWord.(type) { + case *parse.BoolNode: + return reflect.ValueOf(word.True) + case *parse.DotNode: + return dot + case *parse.NilNode: + s.errorf("nil is not a command") + case *parse.NumberNode: + return s.idealConstant(word) + case *parse.StringNode: + return reflect.ValueOf(word.Text) + } + s.errorf("can't evaluate command %q", firstWord) + panic("not reached") +} + +// idealConstant is called to return the value of a number in a context where +// we don't know the type. In that case, the syntax of the number tells us +// its type, and we use Go rules to resolve. Note there is no such thing as +// a uint ideal constant in this situation - the value must be of int type. +func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { + // These are ideal constants but we don't know the type + // and we have no context. (If it was a method argument, + // we'd know what we need.) The syntax guides us to some extent. + s.at(constant) + switch { + case constant.IsComplex: + return reflect.ValueOf(constant.Complex128) // incontrovertible. + case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0: + return reflect.ValueOf(constant.Float64) + case constant.IsInt: + n := int(constant.Int64) + if int64(n) != constant.Int64 { + s.errorf("%s overflows int", constant.Text) + } + return reflect.ValueOf(n) + case constant.IsUint: + s.errorf("%s overflows int", constant.Text) + } + return zero +} + +func isHexConstant(s string) bool { + return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') +} + +func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value { + s.at(field) + return s.evalFieldChain(dot, dot, field, field.Ident, args, final) +} + +func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value { + s.at(chain) + // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields. + pipe := s.evalArg(dot, nil, chain.Node) + if len(chain.Field) == 0 { + s.errorf("internal error: no fields in evalChainNode") + } + return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final) +} + +func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value { + // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields. + s.at(variable) + value := s.varValue(variable.Ident[0]) + if len(variable.Ident) == 1 { + s.notAFunction(args, final) + return value + } + return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final) +} + +// evalFieldChain evaluates .X.Y.Z possibly followed by arguments. +// dot is the environment in which to evaluate arguments, while +// receiver is the value being walked along the chain. +func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value { + n := len(ident) + for i := 0; i < n-1; i++ { + receiver = s.evalField(dot, ident[i], node, nil, zero, receiver) + } + // Now if it's a method, it gets the arguments. + return s.evalField(dot, ident[n-1], node, args, final, receiver) +} + +func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value { + s.at(node) + name := node.Ident + function, ok := findFunction(name, s.tmpl) + if !ok { + s.errorf("%q is not a defined function", name) + } + return s.evalCall(dot, function, cmd, name, args, final) +} + +// evalField evaluates an expression like (.Field) or (.Field arg1 arg2). +// The 'final' argument represents the return value from the preceding +// value of the pipeline, if any. +func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value { + if !receiver.IsValid() { + return zero + } + typ := receiver.Type() + receiver, _ = indirect(receiver) + // Unless it's an interface, need to get to a value of type *T to guarantee + // we see all methods of T and *T. + ptr := receiver + if ptr.Kind() != reflect.Interface && ptr.CanAddr() { + ptr = ptr.Addr() + } + if method := ptr.MethodByName(fieldName); method.IsValid() { + return s.evalCall(dot, method, node, fieldName, args, final) + } + hasArgs := len(args) > 1 || final.IsValid() + // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil. + receiver, isNil := indirect(receiver) + if isNil { + s.errorf("nil pointer evaluating %s.%s", typ, fieldName) + } + switch receiver.Kind() { + case reflect.Struct: + tField, ok := receiver.Type().FieldByName(fieldName) + if ok { + field := receiver.FieldByIndex(tField.Index) + if tField.PkgPath != "" { // field is unexported + s.errorf("%s is an unexported field of struct type %s", fieldName, typ) + } + // If it's a function, we must call it. + if hasArgs { + s.errorf("%s has arguments but cannot be invoked as function", fieldName) + } + return field + } + s.errorf("%s is not a field of struct type %s", fieldName, typ) + case reflect.Map: + // If it's a map, attempt to use the field name as a key. + nameVal := reflect.ValueOf(fieldName) + if nameVal.Type().AssignableTo(receiver.Type().Key()) { + if hasArgs { + s.errorf("%s is not a method but has arguments", fieldName) + } + return receiver.MapIndex(nameVal) + } + } + s.errorf("can't evaluate field %s in type %s", fieldName, typ) + panic("not reached") +} + +var ( + errorType = reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() +) + +// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so +// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0] +// as the function itself. +func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value { + if args != nil { + args = args[1:] // Zeroth arg is function name/node; not passed to function. + } + typ := fun.Type() + numIn := len(args) + if final.IsValid() { + numIn++ + } + numFixed := len(args) + if typ.IsVariadic() { + numFixed = typ.NumIn() - 1 // last arg is the variadic one. + if numIn < numFixed { + s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args)) + } + } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() { + s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args)) + } + if !goodFunc(typ) { + // TODO: This could still be a confusing error; maybe goodFunc should provide info. + s.errorf("can't call method/function %q with %d results", name, typ.NumOut()) + } + // Build the arg list. + argv := make([]reflect.Value, numIn) + // Args must be evaluated. Fixed args first. + i := 0 + for ; i < numFixed && i < len(args); i++ { + argv[i] = s.evalArg(dot, typ.In(i), args[i]) + } + // Now the ... args. + if typ.IsVariadic() { + argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice. + for ; i < len(args); i++ { + argv[i] = s.evalArg(dot, argType, args[i]) + } + } + // Add final value if necessary. + if final.IsValid() { + t := typ.In(typ.NumIn() - 1) + if typ.IsVariadic() { + t = t.Elem() + } + argv[i] = s.validateType(final, t) + } + result := fun.Call(argv) + // If we have an error that is not nil, stop execution and return that error to the caller. + if len(result) == 2 && !result[1].IsNil() { + s.at(node) + s.errorf("error calling %s: %s", name, result[1].Interface().(error)) + } + return result[0] +} + +// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. +func canBeNil(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +// validateType guarantees that the value is valid and assignable to the type. +func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value { + if !value.IsValid() { + if typ == nil || canBeNil(typ) { + // An untyped nil interface{}. Accept as a proper nil value. + return reflect.Zero(typ) + } + s.errorf("invalid value; expected %s", typ) + } + if typ != nil && !value.Type().AssignableTo(typ) { + if value.Kind() == reflect.Interface && !value.IsNil() { + value = value.Elem() + if value.Type().AssignableTo(typ) { + return value + } + // fallthrough + } + // Does one dereference or indirection work? We could do more, as we + // do with method receivers, but that gets messy and method receivers + // are much more constrained, so it makes more sense there than here. + // Besides, one is almost always all you need. + switch { + case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ): + value = value.Elem() + if !value.IsValid() { + s.errorf("dereference of nil pointer of type %s", typ) + } + case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr(): + value = value.Addr() + default: + s.errorf("wrong type for value; expected %s; got %s", typ, value.Type()) + } + } + return value +} + +func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + switch arg := n.(type) { + case *parse.DotNode: + return s.validateType(dot, typ) + case *parse.NilNode: + if canBeNil(typ) { + return reflect.Zero(typ) + } + s.errorf("cannot assign nil to %s", typ) + case *parse.FieldNode: + return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ) + case *parse.VariableNode: + return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ) + case *parse.PipeNode: + return s.validateType(s.evalPipeline(dot, arg), typ) + case *parse.IdentifierNode: + return s.evalFunction(dot, arg, arg, nil, zero) + case *parse.ChainNode: + return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ) + } + switch typ.Kind() { + case reflect.Bool: + return s.evalBool(typ, n) + case reflect.Complex64, reflect.Complex128: + return s.evalComplex(typ, n) + case reflect.Float32, reflect.Float64: + return s.evalFloat(typ, n) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return s.evalInteger(typ, n) + case reflect.Interface: + if typ.NumMethod() == 0 { + return s.evalEmptyInterface(dot, n) + } + case reflect.String: + return s.evalString(typ, n) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return s.evalUnsignedInteger(typ, n) + } + s.errorf("can't handle %s for arg of type %s", n, typ) + panic("not reached") +} + +func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.BoolNode); ok { + value := reflect.New(typ).Elem() + value.SetBool(n.True) + return value + } + s.errorf("expected bool; found %s", n) + panic("not reached") +} + +func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.StringNode); ok { + value := reflect.New(typ).Elem() + value.SetString(n.Text) + return value + } + s.errorf("expected string; found %s", n) + panic("not reached") +} + +func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsInt { + value := reflect.New(typ).Elem() + value.SetInt(n.Int64) + return value + } + s.errorf("expected integer; found %s", n) + panic("not reached") +} + +func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsUint { + value := reflect.New(typ).Elem() + value.SetUint(n.Uint64) + return value + } + s.errorf("expected unsigned integer; found %s", n) + panic("not reached") +} + +func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsFloat { + value := reflect.New(typ).Elem() + value.SetFloat(n.Float64) + return value + } + s.errorf("expected float; found %s", n) + panic("not reached") +} + +func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value { + if n, ok := n.(*parse.NumberNode); ok && n.IsComplex { + value := reflect.New(typ).Elem() + value.SetComplex(n.Complex128) + return value + } + s.errorf("expected complex; found %s", n) + panic("not reached") +} + +func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value { + s.at(n) + switch n := n.(type) { + case *parse.BoolNode: + return reflect.ValueOf(n.True) + case *parse.DotNode: + return dot + case *parse.FieldNode: + return s.evalFieldNode(dot, n, nil, zero) + case *parse.IdentifierNode: + return s.evalFunction(dot, n, n, nil, zero) + case *parse.NilNode: + // NilNode is handled in evalArg, the only place that calls here. + s.errorf("evalEmptyInterface: nil (can't happen)") + case *parse.NumberNode: + return s.idealConstant(n) + case *parse.StringNode: + return reflect.ValueOf(n.Text) + case *parse.VariableNode: + return s.evalVariableNode(dot, n, nil, zero) + case *parse.PipeNode: + return s.evalPipeline(dot, n) + } + s.errorf("can't handle assignment of %s to empty interface argument", n) + panic("not reached") +} + +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// printValue writes the textual representation of the value to the output of +// the template. +func (s *state) printValue(n parse.Node, v reflect.Value) { + s.at(n) + iface, ok := printableValue(v) + if !ok { + s.errorf("can't print %s of type %s", n, v.Type()) + } + fmt.Fprint(s.wr, iface) +} + +// printableValue returns the, possibly indirected, interface value inside v that +// is best for a call to formatted printer. +func printableValue(v reflect.Value) (interface{}, bool) { + if v.Kind() == reflect.Ptr { + v, _ = indirect(v) // fmt.Fprint handles nil. + } + if !v.IsValid() { + return "", true + } + + if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { + if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { + v = v.Addr() + } else { + switch v.Kind() { + case reflect.Chan, reflect.Func: + return nil, false + } + } + } + return v.Interface(), true +} + +// Types to help sort the keys in a map for reproducible output. + +type rvs []reflect.Value + +func (x rvs) Len() int { return len(x) } +func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +type rvInts struct{ rvs } + +func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() } + +type rvUints struct{ rvs } + +func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() } + +type rvFloats struct{ rvs } + +func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() } + +type rvStrings struct{ rvs } + +func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() } + +// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys. +func sortKeys(v []reflect.Value) []reflect.Value { + if len(v) <= 1 { + return v + } + switch v[0].Kind() { + case reflect.Float32, reflect.Float64: + sort.Sort(rvFloats{v}) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + sort.Sort(rvInts{v}) + case reflect.String: + sort.Sort(rvStrings{v}) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + sort.Sort(rvUints{v}) + } + return v +} diff --git a/vendor/github.com/alecthomas/template/funcs.go b/vendor/github.com/alecthomas/template/funcs.go index 39ee5ed..f5093a4 100644 --- a/vendor/github.com/alecthomas/template/funcs.go +++ b/vendor/github.com/alecthomas/template/funcs.go @@ -1,598 +1,598 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "errors" - "fmt" - "io" - "net/url" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -// FuncMap is the type of the map defining the mapping from names to functions. -// Each function must have either a single return value, or two return values of -// which the second has type error. In that case, if the second (error) -// return value evaluates to non-nil during execution, execution terminates and -// Execute returns that error. -type FuncMap map[string]interface{} - -var builtins = FuncMap{ - "and": and, - "call": call, - "html": HTMLEscaper, - "index": index, - "js": JSEscaper, - "len": length, - "not": not, - "or": or, - "print": fmt.Sprint, - "printf": fmt.Sprintf, - "println": fmt.Sprintln, - "urlquery": URLQueryEscaper, - - // Comparisons - "eq": eq, // == - "ge": ge, // >= - "gt": gt, // > - "le": le, // <= - "lt": lt, // < - "ne": ne, // != -} - -var builtinFuncs = createValueFuncs(builtins) - -// createValueFuncs turns a FuncMap into a map[string]reflect.Value -func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { - m := make(map[string]reflect.Value) - addValueFuncs(m, funcMap) - return m -} - -// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. -func addValueFuncs(out map[string]reflect.Value, in FuncMap) { - for name, fn := range in { - v := reflect.ValueOf(fn) - if v.Kind() != reflect.Func { - panic("value for " + name + " not a function") - } - if !goodFunc(v.Type()) { - panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) - } - out[name] = v - } -} - -// addFuncs adds to values the functions in funcs. It does no checking of the input - -// call addValueFuncs first. -func addFuncs(out, in FuncMap) { - for name, fn := range in { - out[name] = fn - } -} - -// goodFunc checks that the function or method has the right result signature. -func goodFunc(typ reflect.Type) bool { - // We allow functions with 1 result or 2 results where the second is an error. - switch { - case typ.NumOut() == 1: - return true - case typ.NumOut() == 2 && typ.Out(1) == errorType: - return true - } - return false -} - -// findFunction looks for a function in the template, and global map. -func findFunction(name string, tmpl *Template) (reflect.Value, bool) { - if tmpl != nil && tmpl.common != nil { - if fn := tmpl.execFuncs[name]; fn.IsValid() { - return fn, true - } - } - if fn := builtinFuncs[name]; fn.IsValid() { - return fn, true - } - return reflect.Value{}, false -} - -// Indexing. - -// index returns the result of indexing its first argument by the following -// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each -// indexed item must be a map, slice, or array. -func index(item interface{}, indices ...interface{}) (interface{}, error) { - v := reflect.ValueOf(item) - for _, i := range indices { - index := reflect.ValueOf(i) - var isNil bool - if v, isNil = indirect(v); isNil { - return nil, fmt.Errorf("index of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - var x int64 - switch index.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x = index.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x = int64(index.Uint()) - default: - return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) - } - if x < 0 || x >= int64(v.Len()) { - return nil, fmt.Errorf("index out of range: %d", x) - } - v = v.Index(int(x)) - case reflect.Map: - if !index.IsValid() { - index = reflect.Zero(v.Type().Key()) - } - if !index.Type().AssignableTo(v.Type().Key()) { - return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) - } - if x := v.MapIndex(index); x.IsValid() { - v = x - } else { - v = reflect.Zero(v.Type().Elem()) - } - default: - return nil, fmt.Errorf("can't index item of type %s", v.Type()) - } - } - return v.Interface(), nil -} - -// Length - -// length returns the length of the item, with an error if it has no defined length. -func length(item interface{}) (int, error) { - v, isNil := indirect(reflect.ValueOf(item)) - if isNil { - return 0, fmt.Errorf("len of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: - return v.Len(), nil - } - return 0, fmt.Errorf("len of type %s", v.Type()) -} - -// Function invocation - -// call returns the result of evaluating the first argument as a function. -// The function must return 1 result, or 2 results, the second of which is an error. -func call(fn interface{}, args ...interface{}) (interface{}, error) { - v := reflect.ValueOf(fn) - typ := v.Type() - if typ.Kind() != reflect.Func { - return nil, fmt.Errorf("non-function of type %s", typ) - } - if !goodFunc(typ) { - return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) - } - numIn := typ.NumIn() - var dddType reflect.Type - if typ.IsVariadic() { - if len(args) < numIn-1 { - return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) - } - dddType = typ.In(numIn - 1).Elem() - } else { - if len(args) != numIn { - return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) - } - } - argv := make([]reflect.Value, len(args)) - for i, arg := range args { - value := reflect.ValueOf(arg) - // Compute the expected type. Clumsy because of variadics. - var argType reflect.Type - if !typ.IsVariadic() || i < numIn-1 { - argType = typ.In(i) - } else { - argType = dddType - } - if !value.IsValid() && canBeNil(argType) { - value = reflect.Zero(argType) - } - if !value.Type().AssignableTo(argType) { - return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) - } - argv[i] = value - } - result := v.Call(argv) - if len(result) == 2 && !result[1].IsNil() { - return result[0].Interface(), result[1].Interface().(error) - } - return result[0].Interface(), nil -} - -// Boolean logic. - -func truth(a interface{}) bool { - t, _ := isTrue(reflect.ValueOf(a)) - return t -} - -// and computes the Boolean AND of its arguments, returning -// the first false argument it encounters, or the last argument. -func and(arg0 interface{}, args ...interface{}) interface{} { - if !truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if !truth(arg0) { - break - } - } - return arg0 -} - -// or computes the Boolean OR of its arguments, returning -// the first true argument it encounters, or the last argument. -func or(arg0 interface{}, args ...interface{}) interface{} { - if truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if truth(arg0) { - break - } - } - return arg0 -} - -// not returns the Boolean negation of its argument. -func not(arg interface{}) (truth bool) { - truth, _ = isTrue(reflect.ValueOf(arg)) - return !truth -} - -// Comparison. - -// TODO: Perhaps allow comparison between signed and unsigned integers. - -var ( - errBadComparisonType = errors.New("invalid type for comparison") - errBadComparison = errors.New("incompatible types for comparison") - errNoComparison = errors.New("missing argument for comparison") -) - -type kind int - -const ( - invalidKind kind = iota - boolKind - complexKind - intKind - floatKind - integerKind - stringKind - uintKind -) - -func basicKind(v reflect.Value) (kind, error) { - switch v.Kind() { - case reflect.Bool: - return boolKind, nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intKind, nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintKind, nil - case reflect.Float32, reflect.Float64: - return floatKind, nil - case reflect.Complex64, reflect.Complex128: - return complexKind, nil - case reflect.String: - return stringKind, nil - } - return invalidKind, errBadComparisonType -} - -// eq evaluates the comparison a == b || a == c || ... -func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - if len(arg2) == 0 { - return false, errNoComparison - } - for _, arg := range arg2 { - v2 := reflect.ValueOf(arg) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind: - truth = v1.Bool() == v2.Bool() - case complexKind: - truth = v1.Complex() == v2.Complex() - case floatKind: - truth = v1.Float() == v2.Float() - case intKind: - truth = v1.Int() == v2.Int() - case stringKind: - truth = v1.String() == v2.String() - case uintKind: - truth = v1.Uint() == v2.Uint() - default: - panic("invalid kind") - } - } - if truth { - return true, nil - } - } - return false, nil -} - -// ne evaluates the comparison a != b. -func ne(arg1, arg2 interface{}) (bool, error) { - // != is the inverse of ==. - equal, err := eq(arg1, arg2) - return !equal, err -} - -// lt evaluates the comparison a < b. -func lt(arg1, arg2 interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - v2 := reflect.ValueOf(arg2) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind, complexKind: - return false, errBadComparisonType - case floatKind: - truth = v1.Float() < v2.Float() - case intKind: - truth = v1.Int() < v2.Int() - case stringKind: - truth = v1.String() < v2.String() - case uintKind: - truth = v1.Uint() < v2.Uint() - default: - panic("invalid kind") - } - } - return truth, nil -} - -// le evaluates the comparison <= b. -func le(arg1, arg2 interface{}) (bool, error) { - // <= is < or ==. - lessThan, err := lt(arg1, arg2) - if lessThan || err != nil { - return lessThan, err - } - return eq(arg1, arg2) -} - -// gt evaluates the comparison a > b. -func gt(arg1, arg2 interface{}) (bool, error) { - // > is the inverse of <=. - lessOrEqual, err := le(arg1, arg2) - if err != nil { - return false, err - } - return !lessOrEqual, nil -} - -// ge evaluates the comparison a >= b. -func ge(arg1, arg2 interface{}) (bool, error) { - // >= is the inverse of <. - lessThan, err := lt(arg1, arg2) - if err != nil { - return false, err - } - return !lessThan, nil -} - -// HTML escaping. - -var ( - htmlQuot = []byte(""") // shorter than """ - htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 - htmlAmp = []byte("&") - htmlLt = []byte("<") - htmlGt = []byte(">") -) - -// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. -func HTMLEscape(w io.Writer, b []byte) { - last := 0 - for i, c := range b { - var html []byte - switch c { - case '"': - html = htmlQuot - case '\'': - html = htmlApos - case '&': - html = htmlAmp - case '<': - html = htmlLt - case '>': - html = htmlGt - default: - continue - } - w.Write(b[last:i]) - w.Write(html) - last = i + 1 - } - w.Write(b[last:]) -} - -// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. -func HTMLEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexAny(s, `'"&<>`) < 0 { - return s - } - var b bytes.Buffer - HTMLEscape(&b, []byte(s)) - return b.String() -} - -// HTMLEscaper returns the escaped HTML equivalent of the textual -// representation of its arguments. -func HTMLEscaper(args ...interface{}) string { - return HTMLEscapeString(evalArgs(args)) -} - -// JavaScript escaping. - -var ( - jsLowUni = []byte(`\u00`) - hex = []byte("0123456789ABCDEF") - - jsBackslash = []byte(`\\`) - jsApos = []byte(`\'`) - jsQuot = []byte(`\"`) - jsLt = []byte(`\x3C`) - jsGt = []byte(`\x3E`) -) - -// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. -func JSEscape(w io.Writer, b []byte) { - last := 0 - for i := 0; i < len(b); i++ { - c := b[i] - - if !jsIsSpecial(rune(c)) { - // fast path: nothing to do - continue - } - w.Write(b[last:i]) - - if c < utf8.RuneSelf { - // Quotes, slashes and angle brackets get quoted. - // Control characters get written as \u00XX. - switch c { - case '\\': - w.Write(jsBackslash) - case '\'': - w.Write(jsApos) - case '"': - w.Write(jsQuot) - case '<': - w.Write(jsLt) - case '>': - w.Write(jsGt) - default: - w.Write(jsLowUni) - t, b := c>>4, c&0x0f - w.Write(hex[t : t+1]) - w.Write(hex[b : b+1]) - } - } else { - // Unicode rune. - r, size := utf8.DecodeRune(b[i:]) - if unicode.IsPrint(r) { - w.Write(b[i : i+size]) - } else { - fmt.Fprintf(w, "\\u%04X", r) - } - i += size - 1 - } - last = i + 1 - } - w.Write(b[last:]) -} - -// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. -func JSEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexFunc(s, jsIsSpecial) < 0 { - return s - } - var b bytes.Buffer - JSEscape(&b, []byte(s)) - return b.String() -} - -func jsIsSpecial(r rune) bool { - switch r { - case '\\', '\'', '"', '<', '>': - return true - } - return r < ' ' || utf8.RuneSelf <= r -} - -// JSEscaper returns the escaped JavaScript equivalent of the textual -// representation of its arguments. -func JSEscaper(args ...interface{}) string { - return JSEscapeString(evalArgs(args)) -} - -// URLQueryEscaper returns the escaped value of the textual representation of -// its arguments in a form suitable for embedding in a URL query. -func URLQueryEscaper(args ...interface{}) string { - return url.QueryEscape(evalArgs(args)) -} - -// evalArgs formats the list of arguments into a string. It is therefore equivalent to -// fmt.Sprint(args...) -// except that each argument is indirected (if a pointer), as required, -// using the same rules as the default string evaluation during template -// execution. -func evalArgs(args []interface{}) string { - ok := false - var s string - // Fast path for simple common case. - if len(args) == 1 { - s, ok = args[0].(string) - } - if !ok { - for i, arg := range args { - a, ok := printableValue(reflect.ValueOf(arg)) - if ok { - args[i] = a - } // else left fmt do its thing - } - s = fmt.Sprint(args...) - } - return s -} +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/url" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// FuncMap is the type of the map defining the mapping from names to functions. +// Each function must have either a single return value, or two return values of +// which the second has type error. In that case, if the second (error) +// return value evaluates to non-nil during execution, execution terminates and +// Execute returns that error. +type FuncMap map[string]interface{} + +var builtins = FuncMap{ + "and": and, + "call": call, + "html": HTMLEscaper, + "index": index, + "js": JSEscaper, + "len": length, + "not": not, + "or": or, + "print": fmt.Sprint, + "printf": fmt.Sprintf, + "println": fmt.Sprintln, + "urlquery": URLQueryEscaper, + + // Comparisons + "eq": eq, // == + "ge": ge, // >= + "gt": gt, // > + "le": le, // <= + "lt": lt, // < + "ne": ne, // != +} + +var builtinFuncs = createValueFuncs(builtins) + +// createValueFuncs turns a FuncMap into a map[string]reflect.Value +func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { + m := make(map[string]reflect.Value) + addValueFuncs(m, funcMap) + return m +} + +// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. +func addValueFuncs(out map[string]reflect.Value, in FuncMap) { + for name, fn := range in { + v := reflect.ValueOf(fn) + if v.Kind() != reflect.Func { + panic("value for " + name + " not a function") + } + if !goodFunc(v.Type()) { + panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) + } + out[name] = v + } +} + +// addFuncs adds to values the functions in funcs. It does no checking of the input - +// call addValueFuncs first. +func addFuncs(out, in FuncMap) { + for name, fn := range in { + out[name] = fn + } +} + +// goodFunc checks that the function or method has the right result signature. +func goodFunc(typ reflect.Type) bool { + // We allow functions with 1 result or 2 results where the second is an error. + switch { + case typ.NumOut() == 1: + return true + case typ.NumOut() == 2 && typ.Out(1) == errorType: + return true + } + return false +} + +// findFunction looks for a function in the template, and global map. +func findFunction(name string, tmpl *Template) (reflect.Value, bool) { + if tmpl != nil && tmpl.common != nil { + if fn := tmpl.execFuncs[name]; fn.IsValid() { + return fn, true + } + } + if fn := builtinFuncs[name]; fn.IsValid() { + return fn, true + } + return reflect.Value{}, false +} + +// Indexing. + +// index returns the result of indexing its first argument by the following +// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each +// indexed item must be a map, slice, or array. +func index(item interface{}, indices ...interface{}) (interface{}, error) { + v := reflect.ValueOf(item) + for _, i := range indices { + index := reflect.ValueOf(i) + var isNil bool + if v, isNil = indirect(v); isNil { + return nil, fmt.Errorf("index of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + var x int64 + switch index.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x = index.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x = int64(index.Uint()) + default: + return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) + } + if x < 0 || x >= int64(v.Len()) { + return nil, fmt.Errorf("index out of range: %d", x) + } + v = v.Index(int(x)) + case reflect.Map: + if !index.IsValid() { + index = reflect.Zero(v.Type().Key()) + } + if !index.Type().AssignableTo(v.Type().Key()) { + return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) + } + if x := v.MapIndex(index); x.IsValid() { + v = x + } else { + v = reflect.Zero(v.Type().Elem()) + } + default: + return nil, fmt.Errorf("can't index item of type %s", v.Type()) + } + } + return v.Interface(), nil +} + +// Length + +// length returns the length of the item, with an error if it has no defined length. +func length(item interface{}) (int, error) { + v, isNil := indirect(reflect.ValueOf(item)) + if isNil { + return 0, fmt.Errorf("len of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return v.Len(), nil + } + return 0, fmt.Errorf("len of type %s", v.Type()) +} + +// Function invocation + +// call returns the result of evaluating the first argument as a function. +// The function must return 1 result, or 2 results, the second of which is an error. +func call(fn interface{}, args ...interface{}) (interface{}, error) { + v := reflect.ValueOf(fn) + typ := v.Type() + if typ.Kind() != reflect.Func { + return nil, fmt.Errorf("non-function of type %s", typ) + } + if !goodFunc(typ) { + return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) + } + numIn := typ.NumIn() + var dddType reflect.Type + if typ.IsVariadic() { + if len(args) < numIn-1 { + return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) + } + dddType = typ.In(numIn - 1).Elem() + } else { + if len(args) != numIn { + return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) + } + } + argv := make([]reflect.Value, len(args)) + for i, arg := range args { + value := reflect.ValueOf(arg) + // Compute the expected type. Clumsy because of variadics. + var argType reflect.Type + if !typ.IsVariadic() || i < numIn-1 { + argType = typ.In(i) + } else { + argType = dddType + } + if !value.IsValid() && canBeNil(argType) { + value = reflect.Zero(argType) + } + if !value.Type().AssignableTo(argType) { + return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) + } + argv[i] = value + } + result := v.Call(argv) + if len(result) == 2 && !result[1].IsNil() { + return result[0].Interface(), result[1].Interface().(error) + } + return result[0].Interface(), nil +} + +// Boolean logic. + +func truth(a interface{}) bool { + t, _ := isTrue(reflect.ValueOf(a)) + return t +} + +// and computes the Boolean AND of its arguments, returning +// the first false argument it encounters, or the last argument. +func and(arg0 interface{}, args ...interface{}) interface{} { + if !truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if !truth(arg0) { + break + } + } + return arg0 +} + +// or computes the Boolean OR of its arguments, returning +// the first true argument it encounters, or the last argument. +func or(arg0 interface{}, args ...interface{}) interface{} { + if truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if truth(arg0) { + break + } + } + return arg0 +} + +// not returns the Boolean negation of its argument. +func not(arg interface{}) (truth bool) { + truth, _ = isTrue(reflect.ValueOf(arg)) + return !truth +} + +// Comparison. + +// TODO: Perhaps allow comparison between signed and unsigned integers. + +var ( + errBadComparisonType = errors.New("invalid type for comparison") + errBadComparison = errors.New("incompatible types for comparison") + errNoComparison = errors.New("missing argument for comparison") +) + +type kind int + +const ( + invalidKind kind = iota + boolKind + complexKind + intKind + floatKind + integerKind + stringKind + uintKind +) + +func basicKind(v reflect.Value) (kind, error) { + switch v.Kind() { + case reflect.Bool: + return boolKind, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intKind, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintKind, nil + case reflect.Float32, reflect.Float64: + return floatKind, nil + case reflect.Complex64, reflect.Complex128: + return complexKind, nil + case reflect.String: + return stringKind, nil + } + return invalidKind, errBadComparisonType +} + +// eq evaluates the comparison a == b || a == c || ... +func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + if len(arg2) == 0 { + return false, errNoComparison + } + for _, arg := range arg2 { + v2 := reflect.ValueOf(arg) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind: + truth = v1.Bool() == v2.Bool() + case complexKind: + truth = v1.Complex() == v2.Complex() + case floatKind: + truth = v1.Float() == v2.Float() + case intKind: + truth = v1.Int() == v2.Int() + case stringKind: + truth = v1.String() == v2.String() + case uintKind: + truth = v1.Uint() == v2.Uint() + default: + panic("invalid kind") + } + } + if truth { + return true, nil + } + } + return false, nil +} + +// ne evaluates the comparison a != b. +func ne(arg1, arg2 interface{}) (bool, error) { + // != is the inverse of ==. + equal, err := eq(arg1, arg2) + return !equal, err +} + +// lt evaluates the comparison a < b. +func lt(arg1, arg2 interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + v2 := reflect.ValueOf(arg2) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = v1.Float() < v2.Float() + case intKind: + truth = v1.Int() < v2.Int() + case stringKind: + truth = v1.String() < v2.String() + case uintKind: + truth = v1.Uint() < v2.Uint() + default: + panic("invalid kind") + } + } + return truth, nil +} + +// le evaluates the comparison <= b. +func le(arg1, arg2 interface{}) (bool, error) { + // <= is < or ==. + lessThan, err := lt(arg1, arg2) + if lessThan || err != nil { + return lessThan, err + } + return eq(arg1, arg2) +} + +// gt evaluates the comparison a > b. +func gt(arg1, arg2 interface{}) (bool, error) { + // > is the inverse of <=. + lessOrEqual, err := le(arg1, arg2) + if err != nil { + return false, err + } + return !lessOrEqual, nil +} + +// ge evaluates the comparison a >= b. +func ge(arg1, arg2 interface{}) (bool, error) { + // >= is the inverse of <. + lessThan, err := lt(arg1, arg2) + if err != nil { + return false, err + } + return !lessThan, nil +} + +// HTML escaping. + +var ( + htmlQuot = []byte(""") // shorter than """ + htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 + htmlAmp = []byte("&") + htmlLt = []byte("<") + htmlGt = []byte(">") +) + +// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. +func HTMLEscape(w io.Writer, b []byte) { + last := 0 + for i, c := range b { + var html []byte + switch c { + case '"': + html = htmlQuot + case '\'': + html = htmlApos + case '&': + html = htmlAmp + case '<': + html = htmlLt + case '>': + html = htmlGt + default: + continue + } + w.Write(b[last:i]) + w.Write(html) + last = i + 1 + } + w.Write(b[last:]) +} + +// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. +func HTMLEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexAny(s, `'"&<>`) < 0 { + return s + } + var b bytes.Buffer + HTMLEscape(&b, []byte(s)) + return b.String() +} + +// HTMLEscaper returns the escaped HTML equivalent of the textual +// representation of its arguments. +func HTMLEscaper(args ...interface{}) string { + return HTMLEscapeString(evalArgs(args)) +} + +// JavaScript escaping. + +var ( + jsLowUni = []byte(`\u00`) + hex = []byte("0123456789ABCDEF") + + jsBackslash = []byte(`\\`) + jsApos = []byte(`\'`) + jsQuot = []byte(`\"`) + jsLt = []byte(`\x3C`) + jsGt = []byte(`\x3E`) +) + +// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. +func JSEscape(w io.Writer, b []byte) { + last := 0 + for i := 0; i < len(b); i++ { + c := b[i] + + if !jsIsSpecial(rune(c)) { + // fast path: nothing to do + continue + } + w.Write(b[last:i]) + + if c < utf8.RuneSelf { + // Quotes, slashes and angle brackets get quoted. + // Control characters get written as \u00XX. + switch c { + case '\\': + w.Write(jsBackslash) + case '\'': + w.Write(jsApos) + case '"': + w.Write(jsQuot) + case '<': + w.Write(jsLt) + case '>': + w.Write(jsGt) + default: + w.Write(jsLowUni) + t, b := c>>4, c&0x0f + w.Write(hex[t : t+1]) + w.Write(hex[b : b+1]) + } + } else { + // Unicode rune. + r, size := utf8.DecodeRune(b[i:]) + if unicode.IsPrint(r) { + w.Write(b[i : i+size]) + } else { + fmt.Fprintf(w, "\\u%04X", r) + } + i += size - 1 + } + last = i + 1 + } + w.Write(b[last:]) +} + +// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. +func JSEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexFunc(s, jsIsSpecial) < 0 { + return s + } + var b bytes.Buffer + JSEscape(&b, []byte(s)) + return b.String() +} + +func jsIsSpecial(r rune) bool { + switch r { + case '\\', '\'', '"', '<', '>': + return true + } + return r < ' ' || utf8.RuneSelf <= r +} + +// JSEscaper returns the escaped JavaScript equivalent of the textual +// representation of its arguments. +func JSEscaper(args ...interface{}) string { + return JSEscapeString(evalArgs(args)) +} + +// URLQueryEscaper returns the escaped value of the textual representation of +// its arguments in a form suitable for embedding in a URL query. +func URLQueryEscaper(args ...interface{}) string { + return url.QueryEscape(evalArgs(args)) +} + +// evalArgs formats the list of arguments into a string. It is therefore equivalent to +// fmt.Sprint(args...) +// except that each argument is indirected (if a pointer), as required, +// using the same rules as the default string evaluation during template +// execution. +func evalArgs(args []interface{}) string { + ok := false + var s string + // Fast path for simple common case. + if len(args) == 1 { + s, ok = args[0].(string) + } + if !ok { + for i, arg := range args { + a, ok := printableValue(reflect.ValueOf(arg)) + if ok { + args[i] = a + } // else left fmt do its thing + } + s = fmt.Sprint(args...) + } + return s +} diff --git a/vendor/github.com/alecthomas/template/go.mod b/vendor/github.com/alecthomas/template/go.mod index a70670a..e597ea7 100644 --- a/vendor/github.com/alecthomas/template/go.mod +++ b/vendor/github.com/alecthomas/template/go.mod @@ -1 +1 @@ -module github.com/alecthomas/template +module github.com/alecthomas/template diff --git a/vendor/github.com/alecthomas/template/helper.go b/vendor/github.com/alecthomas/template/helper.go index 3636fb5..a333435 100644 --- a/vendor/github.com/alecthomas/template/helper.go +++ b/vendor/github.com/alecthomas/template/helper.go @@ -1,108 +1,108 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Helper functions to make constructing templates easier. - -package template - -import ( - "fmt" - "io/ioutil" - "path/filepath" -) - -// Functions and methods to parse templates. - -// Must is a helper that wraps a call to a function returning (*Template, error) -// and panics if the error is non-nil. It is intended for use in variable -// initializations such as -// var t = template.Must(template.New("name").Parse("text")) -func Must(t *Template, err error) *Template { - if err != nil { - panic(err) - } - return t -} - -// ParseFiles creates a new Template and parses the template definitions from -// the named files. The returned template's name will have the (base) name and -// (parsed) contents of the first file. There must be at least one file. -// If an error occurs, parsing stops and the returned *Template is nil. -func ParseFiles(filenames ...string) (*Template, error) { - return parseFiles(nil, filenames...) -} - -// ParseFiles parses the named files and associates the resulting templates with -// t. If an error occurs, parsing stops and the returned template is nil; -// otherwise it is t. There must be at least one file. -func (t *Template) ParseFiles(filenames ...string) (*Template, error) { - return parseFiles(t, filenames...) -} - -// parseFiles is the helper for the method and function. If the argument -// template is nil, it is created from the first file. -func parseFiles(t *Template, filenames ...string) (*Template, error) { - if len(filenames) == 0 { - // Not really a problem, but be consistent. - return nil, fmt.Errorf("template: no files named in call to ParseFiles") - } - for _, filename := range filenames { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - s := string(b) - name := filepath.Base(filename) - // First template becomes return value if not already defined, - // and we use that one for subsequent New calls to associate - // all the templates together. Also, if this file has the same name - // as t, this file becomes the contents of t, so - // t, err := New(name).Funcs(xxx).ParseFiles(name) - // works. Otherwise we create a new template associated with t. - var tmpl *Template - if t == nil { - t = New(name) - } - if name == t.Name() { - tmpl = t - } else { - tmpl = t.New(name) - } - _, err = tmpl.Parse(s) - if err != nil { - return nil, err - } - } - return t, nil -} - -// ParseGlob creates a new Template and parses the template definitions from the -// files identified by the pattern, which must match at least one file. The -// returned template will have the (base) name and (parsed) contents of the -// first file matched by the pattern. ParseGlob is equivalent to calling -// ParseFiles with the list of files matched by the pattern. -func ParseGlob(pattern string) (*Template, error) { - return parseGlob(nil, pattern) -} - -// ParseGlob parses the template definitions in the files identified by the -// pattern and associates the resulting templates with t. The pattern is -// processed by filepath.Glob and must match at least one file. ParseGlob is -// equivalent to calling t.ParseFiles with the list of files matched by the -// pattern. -func (t *Template) ParseGlob(pattern string) (*Template, error) { - return parseGlob(t, pattern) -} - -// parseGlob is the implementation of the function and method ParseGlob. -func parseGlob(t *Template, pattern string) (*Template, error) { - filenames, err := filepath.Glob(pattern) - if err != nil { - return nil, err - } - if len(filenames) == 0 { - return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) - } - return parseFiles(t, filenames...) -} +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper functions to make constructing templates easier. + +package template + +import ( + "fmt" + "io/ioutil" + "path/filepath" +) + +// Functions and methods to parse templates. + +// Must is a helper that wraps a call to a function returning (*Template, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var t = template.Must(template.New("name").Parse("text")) +func Must(t *Template, err error) *Template { + if err != nil { + panic(err) + } + return t +} + +// ParseFiles creates a new Template and parses the template definitions from +// the named files. The returned template's name will have the (base) name and +// (parsed) contents of the first file. There must be at least one file. +// If an error occurs, parsing stops and the returned *Template is nil. +func ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(nil, filenames...) +} + +// ParseFiles parses the named files and associates the resulting templates with +// t. If an error occurs, parsing stops and the returned template is nil; +// otherwise it is t. There must be at least one file. +func (t *Template) ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(t, filenames...) +} + +// parseFiles is the helper for the method and function. If the argument +// template is nil, it is created from the first file. +func parseFiles(t *Template, filenames ...string) (*Template, error) { + if len(filenames) == 0 { + // Not really a problem, but be consistent. + return nil, fmt.Errorf("template: no files named in call to ParseFiles") + } + for _, filename := range filenames { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + s := string(b) + name := filepath.Base(filename) + // First template becomes return value if not already defined, + // and we use that one for subsequent New calls to associate + // all the templates together. Also, if this file has the same name + // as t, this file becomes the contents of t, so + // t, err := New(name).Funcs(xxx).ParseFiles(name) + // works. Otherwise we create a new template associated with t. + var tmpl *Template + if t == nil { + t = New(name) + } + if name == t.Name() { + tmpl = t + } else { + tmpl = t.New(name) + } + _, err = tmpl.Parse(s) + if err != nil { + return nil, err + } + } + return t, nil +} + +// ParseGlob creates a new Template and parses the template definitions from the +// files identified by the pattern, which must match at least one file. The +// returned template will have the (base) name and (parsed) contents of the +// first file matched by the pattern. ParseGlob is equivalent to calling +// ParseFiles with the list of files matched by the pattern. +func ParseGlob(pattern string) (*Template, error) { + return parseGlob(nil, pattern) +} + +// ParseGlob parses the template definitions in the files identified by the +// pattern and associates the resulting templates with t. The pattern is +// processed by filepath.Glob and must match at least one file. ParseGlob is +// equivalent to calling t.ParseFiles with the list of files matched by the +// pattern. +func (t *Template) ParseGlob(pattern string) (*Template, error) { + return parseGlob(t, pattern) +} + +// parseGlob is the implementation of the function and method ParseGlob. +func parseGlob(t *Template, pattern string) (*Template, error) { + filenames, err := filepath.Glob(pattern) + if err != nil { + return nil, err + } + if len(filenames) == 0 { + return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) + } + return parseFiles(t, filenames...) +} diff --git a/vendor/github.com/alecthomas/template/parse/lex.go b/vendor/github.com/alecthomas/template/parse/lex.go index 55f1c05..0128deb 100644 --- a/vendor/github.com/alecthomas/template/parse/lex.go +++ b/vendor/github.com/alecthomas/template/parse/lex.go @@ -1,556 +1,556 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package parse - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos Pos // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case i.typ > itemKeyword: - return fmt.Sprintf("<%s>", i.val) - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemBool // boolean constant - itemChar // printable ASCII character; grab bag for comma etc. - itemCharConstant // character constant - itemComplex // complex constant (1+2i); imaginary is just a number - itemColonEquals // colon-equals (':=') introducing a declaration - itemEOF - itemField // alphanumeric identifier starting with '.' - itemIdentifier // alphanumeric identifier not starting with '.' - itemLeftDelim // left action delimiter - itemLeftParen // '(' inside action - itemNumber // simple number, including imaginary - itemPipe // pipe symbol - itemRawString // raw quoted string (includes quotes) - itemRightDelim // right action delimiter - itemElideNewline // elide newline after right delim - itemRightParen // ')' inside action - itemSpace // run of spaces separating arguments - itemString // quoted string (includes quotes) - itemText // plain text - itemVariable // variable starting with '$', such as '$' or '$1' or '$hello' - // Keywords appear after all the rest. - itemKeyword // used only to delimit the keywords - itemDot // the cursor, spelled '.' - itemDefine // define keyword - itemElse // else keyword - itemEnd // end keyword - itemIf // if keyword - itemNil // the untyped nil constant, easiest to treat as a keyword - itemRange // range keyword - itemTemplate // template keyword - itemWith // with keyword -) - -var key = map[string]itemType{ - ".": itemDot, - "define": itemDefine, - "else": itemElse, - "end": itemEnd, - "if": itemIf, - "range": itemRange, - "nil": itemNil, - "template": itemTemplate, - "with": itemWith, -} - -const eof = -1 - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - name string // the name of the input; used only for error reports - input string // the string being scanned - leftDelim string // start of action - rightDelim string // end of action - state stateFn // the next lexing function to enter - pos Pos // current position in the input - start Pos // start position of this item - width Pos // width of last rune read from input - lastPos Pos // position of most recent item returned by nextItem - items chan item // channel of scanned items - parenDepth int // nesting depth of ( ) exprs -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if int(l.pos) >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = Pos(w) - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - l.items <- item{t, l.start, l.input[l.start:l.pos]} - l.start = l.pos -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.IndexRune(valid, l.next()) >= 0 { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.IndexRune(valid, l.next()) >= 0 { - } - l.backup() -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - item := <-l.items - l.lastPos = item.pos - return item -} - -// lex creates a new scanner for the input string. -func lex(name, input, left, right string) *lexer { - if left == "" { - left = leftDelim - } - if right == "" { - right = rightDelim - } - l := &lexer{ - name: name, - input: input, - leftDelim: left, - rightDelim: right, - items: make(chan item), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexText; l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -const ( - leftDelim = "{{" - rightDelim = "}}" - leftComment = "/*" - rightComment = "*/" -) - -// lexText scans until an opening action delimiter, "{{". -func lexText(l *lexer) stateFn { - for { - if strings.HasPrefix(l.input[l.pos:], l.leftDelim) { - if l.pos > l.start { - l.emit(itemText) - } - return lexLeftDelim - } - if l.next() == eof { - break - } - } - // Correctly reached EOF. - if l.pos > l.start { - l.emit(itemText) - } - l.emit(itemEOF) - return nil -} - -// lexLeftDelim scans the left delimiter, which is known to be present. -func lexLeftDelim(l *lexer) stateFn { - l.pos += Pos(len(l.leftDelim)) - if strings.HasPrefix(l.input[l.pos:], leftComment) { - return lexComment - } - l.emit(itemLeftDelim) - l.parenDepth = 0 - return lexInsideAction -} - -// lexComment scans a comment. The left comment marker is known to be present. -func lexComment(l *lexer) stateFn { - l.pos += Pos(len(leftComment)) - i := strings.Index(l.input[l.pos:], rightComment) - if i < 0 { - return l.errorf("unclosed comment") - } - l.pos += Pos(i + len(rightComment)) - if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) { - return l.errorf("comment ends before closing delimiter") - - } - l.pos += Pos(len(l.rightDelim)) - l.ignore() - return lexText -} - -// lexRightDelim scans the right delimiter, which is known to be present. -func lexRightDelim(l *lexer) stateFn { - l.pos += Pos(len(l.rightDelim)) - l.emit(itemRightDelim) - if l.peek() == '\\' { - l.pos++ - l.emit(itemElideNewline) - } - return lexText -} - -// lexInsideAction scans the elements inside action delimiters. -func lexInsideAction(l *lexer) stateFn { - // Either number, quoted string, or identifier. - // Spaces separate arguments; runs of spaces turn into itemSpace. - // Pipe symbols separate and are emitted. - if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) { - if l.parenDepth == 0 { - return lexRightDelim - } - return l.errorf("unclosed left paren") - } - switch r := l.next(); { - case r == eof || isEndOfLine(r): - return l.errorf("unclosed action") - case isSpace(r): - return lexSpace - case r == ':': - if l.next() != '=' { - return l.errorf("expected :=") - } - l.emit(itemColonEquals) - case r == '|': - l.emit(itemPipe) - case r == '"': - return lexQuote - case r == '`': - return lexRawQuote - case r == '$': - return lexVariable - case r == '\'': - return lexChar - case r == '.': - // special look-ahead for ".field" so we don't break l.backup(). - if l.pos < Pos(len(l.input)) { - r := l.input[l.pos] - if r < '0' || '9' < r { - return lexField - } - } - fallthrough // '.' can start a number. - case r == '+' || r == '-' || ('0' <= r && r <= '9'): - l.backup() - return lexNumber - case isAlphaNumeric(r): - l.backup() - return lexIdentifier - case r == '(': - l.emit(itemLeftParen) - l.parenDepth++ - return lexInsideAction - case r == ')': - l.emit(itemRightParen) - l.parenDepth-- - if l.parenDepth < 0 { - return l.errorf("unexpected right paren %#U", r) - } - return lexInsideAction - case r <= unicode.MaxASCII && unicode.IsPrint(r): - l.emit(itemChar) - return lexInsideAction - default: - return l.errorf("unrecognized character in action: %#U", r) - } - return lexInsideAction -} - -// lexSpace scans a run of space characters. -// One space has already been seen. -func lexSpace(l *lexer) stateFn { - for isSpace(l.peek()) { - l.next() - } - l.emit(itemSpace) - return lexInsideAction -} - -// lexIdentifier scans an alphanumeric. -func lexIdentifier(l *lexer) stateFn { -Loop: - for { - switch r := l.next(); { - case isAlphaNumeric(r): - // absorb. - default: - l.backup() - word := l.input[l.start:l.pos] - if !l.atTerminator() { - return l.errorf("bad character %#U", r) - } - switch { - case key[word] > itemKeyword: - l.emit(key[word]) - case word[0] == '.': - l.emit(itemField) - case word == "true", word == "false": - l.emit(itemBool) - default: - l.emit(itemIdentifier) - } - break Loop - } - } - return lexInsideAction -} - -// lexField scans a field: .Alphanumeric. -// The . has been scanned. -func lexField(l *lexer) stateFn { - return lexFieldOrVariable(l, itemField) -} - -// lexVariable scans a Variable: $Alphanumeric. -// The $ has been scanned. -func lexVariable(l *lexer) stateFn { - if l.atTerminator() { // Nothing interesting follows -> "$". - l.emit(itemVariable) - return lexInsideAction - } - return lexFieldOrVariable(l, itemVariable) -} - -// lexVariable scans a field or variable: [.$]Alphanumeric. -// The . or $ has been scanned. -func lexFieldOrVariable(l *lexer, typ itemType) stateFn { - if l.atTerminator() { // Nothing interesting follows -> "." or "$". - if typ == itemVariable { - l.emit(itemVariable) - } else { - l.emit(itemDot) - } - return lexInsideAction - } - var r rune - for { - r = l.next() - if !isAlphaNumeric(r) { - l.backup() - break - } - } - if !l.atTerminator() { - return l.errorf("bad character %#U", r) - } - l.emit(typ) - return lexInsideAction -} - -// atTerminator reports whether the input is at valid termination character to -// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases -// like "$x+2" not being acceptable without a space, in case we decide one -// day to implement arithmetic. -func (l *lexer) atTerminator() bool { - r := l.peek() - if isSpace(r) || isEndOfLine(r) { - return true - } - switch r { - case eof, '.', ',', '|', ':', ')', '(': - return true - } - // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will - // succeed but should fail) but only in extremely rare cases caused by willfully - // bad choice of delimiter. - if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r { - return true - } - return false -} - -// lexChar scans a character constant. The initial quote is already -// scanned. Syntax checking is done by the parser. -func lexChar(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != eof && r != '\n' { - break - } - fallthrough - case eof, '\n': - return l.errorf("unterminated character constant") - case '\'': - break Loop - } - } - l.emit(itemCharConstant) - return lexInsideAction -} - -// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This -// isn't a perfect number scanner - for instance it accepts "." and "0x0.2" -// and "089" - but when it's wrong the input is invalid and the parser (via -// strconv) will notice. -func lexNumber(l *lexer) stateFn { - if !l.scanNumber() { - return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) - } - if sign := l.peek(); sign == '+' || sign == '-' { - // Complex: 1+2i. No spaces, must end in 'i'. - if !l.scanNumber() || l.input[l.pos-1] != 'i' { - return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) - } - l.emit(itemComplex) - } else { - l.emit(itemNumber) - } - return lexInsideAction -} - -func (l *lexer) scanNumber() bool { - // Optional leading sign. - l.accept("+-") - // Is it hex? - digits := "0123456789" - if l.accept("0") && l.accept("xX") { - digits = "0123456789abcdefABCDEF" - } - l.acceptRun(digits) - if l.accept(".") { - l.acceptRun(digits) - } - if l.accept("eE") { - l.accept("+-") - l.acceptRun("0123456789") - } - // Is it imaginary? - l.accept("i") - // Next thing mustn't be alphanumeric. - if isAlphaNumeric(l.peek()) { - l.next() - return false - } - return true -} - -// lexQuote scans a quoted string. -func lexQuote(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != eof && r != '\n' { - break - } - fallthrough - case eof, '\n': - return l.errorf("unterminated quoted string") - case '"': - break Loop - } - } - l.emit(itemString) - return lexInsideAction -} - -// lexRawQuote scans a raw quoted string. -func lexRawQuote(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case eof, '\n': - return l.errorf("unterminated raw quoted string") - case '`': - break Loop - } - } - l.emit(itemRawString) - return lexInsideAction -} - -// isSpace reports whether r is a space character. -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -// isEndOfLine reports whether r is an end-of-line character. -func isEndOfLine(r rune) bool { - return r == '\r' || r == '\n' -} - -// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. -func isAlphaNumeric(r rune) bool { - return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) -} +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package parse + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// item represents a token or text string returned from the scanner. +type item struct { + typ itemType // The type of this item. + pos Pos // The starting position, in bytes, of this item in the input string. + val string // The value of this item. +} + +func (i item) String() string { + switch { + case i.typ == itemEOF: + return "EOF" + case i.typ == itemError: + return i.val + case i.typ > itemKeyword: + return fmt.Sprintf("<%s>", i.val) + case len(i.val) > 10: + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +// itemType identifies the type of lex items. +type itemType int + +const ( + itemError itemType = iota // error occurred; value is text of error + itemBool // boolean constant + itemChar // printable ASCII character; grab bag for comma etc. + itemCharConstant // character constant + itemComplex // complex constant (1+2i); imaginary is just a number + itemColonEquals // colon-equals (':=') introducing a declaration + itemEOF + itemField // alphanumeric identifier starting with '.' + itemIdentifier // alphanumeric identifier not starting with '.' + itemLeftDelim // left action delimiter + itemLeftParen // '(' inside action + itemNumber // simple number, including imaginary + itemPipe // pipe symbol + itemRawString // raw quoted string (includes quotes) + itemRightDelim // right action delimiter + itemElideNewline // elide newline after right delim + itemRightParen // ')' inside action + itemSpace // run of spaces separating arguments + itemString // quoted string (includes quotes) + itemText // plain text + itemVariable // variable starting with '$', such as '$' or '$1' or '$hello' + // Keywords appear after all the rest. + itemKeyword // used only to delimit the keywords + itemDot // the cursor, spelled '.' + itemDefine // define keyword + itemElse // else keyword + itemEnd // end keyword + itemIf // if keyword + itemNil // the untyped nil constant, easiest to treat as a keyword + itemRange // range keyword + itemTemplate // template keyword + itemWith // with keyword +) + +var key = map[string]itemType{ + ".": itemDot, + "define": itemDefine, + "else": itemElse, + "end": itemEnd, + "if": itemIf, + "range": itemRange, + "nil": itemNil, + "template": itemTemplate, + "with": itemWith, +} + +const eof = -1 + +// stateFn represents the state of the scanner as a function that returns the next state. +type stateFn func(*lexer) stateFn + +// lexer holds the state of the scanner. +type lexer struct { + name string // the name of the input; used only for error reports + input string // the string being scanned + leftDelim string // start of action + rightDelim string // end of action + state stateFn // the next lexing function to enter + pos Pos // current position in the input + start Pos // start position of this item + width Pos // width of last rune read from input + lastPos Pos // position of most recent item returned by nextItem + items chan item // channel of scanned items + parenDepth int // nesting depth of ( ) exprs +} + +// next returns the next rune in the input. +func (l *lexer) next() rune { + if int(l.pos) >= len(l.input) { + l.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = Pos(w) + l.pos += l.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (l *lexer) backup() { + l.pos -= l.width +} + +// emit passes an item back to the client. +func (l *lexer) emit(t itemType) { + l.items <- item{t, l.start, l.input[l.start:l.pos]} + l.start = l.pos +} + +// ignore skips over the pending input before this point. +func (l *lexer) ignore() { + l.start = l.pos +} + +// accept consumes the next rune if it's from the valid set. +func (l *lexer) accept(valid string) bool { + if strings.IndexRune(valid, l.next()) >= 0 { + return true + } + l.backup() + return false +} + +// acceptRun consumes a run of runes from the valid set. +func (l *lexer) acceptRun(valid string) { + for strings.IndexRune(valid, l.next()) >= 0 { + } + l.backup() +} + +// lineNumber reports which line we're on, based on the position of +// the previous item returned by nextItem. Doing it this way +// means we don't have to worry about peek double counting. +func (l *lexer) lineNumber() int { + return 1 + strings.Count(l.input[:l.lastPos], "\n") +} + +// errorf returns an error token and terminates the scan by passing +// back a nil pointer that will be the next state, terminating l.nextItem. +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} + return nil +} + +// nextItem returns the next item from the input. +func (l *lexer) nextItem() item { + item := <-l.items + l.lastPos = item.pos + return item +} + +// lex creates a new scanner for the input string. +func lex(name, input, left, right string) *lexer { + if left == "" { + left = leftDelim + } + if right == "" { + right = rightDelim + } + l := &lexer{ + name: name, + input: input, + leftDelim: left, + rightDelim: right, + items: make(chan item), + } + go l.run() + return l +} + +// run runs the state machine for the lexer. +func (l *lexer) run() { + for l.state = lexText; l.state != nil; { + l.state = l.state(l) + } +} + +// state functions + +const ( + leftDelim = "{{" + rightDelim = "}}" + leftComment = "/*" + rightComment = "*/" +) + +// lexText scans until an opening action delimiter, "{{". +func lexText(l *lexer) stateFn { + for { + if strings.HasPrefix(l.input[l.pos:], l.leftDelim) { + if l.pos > l.start { + l.emit(itemText) + } + return lexLeftDelim + } + if l.next() == eof { + break + } + } + // Correctly reached EOF. + if l.pos > l.start { + l.emit(itemText) + } + l.emit(itemEOF) + return nil +} + +// lexLeftDelim scans the left delimiter, which is known to be present. +func lexLeftDelim(l *lexer) stateFn { + l.pos += Pos(len(l.leftDelim)) + if strings.HasPrefix(l.input[l.pos:], leftComment) { + return lexComment + } + l.emit(itemLeftDelim) + l.parenDepth = 0 + return lexInsideAction +} + +// lexComment scans a comment. The left comment marker is known to be present. +func lexComment(l *lexer) stateFn { + l.pos += Pos(len(leftComment)) + i := strings.Index(l.input[l.pos:], rightComment) + if i < 0 { + return l.errorf("unclosed comment") + } + l.pos += Pos(i + len(rightComment)) + if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) { + return l.errorf("comment ends before closing delimiter") + + } + l.pos += Pos(len(l.rightDelim)) + l.ignore() + return lexText +} + +// lexRightDelim scans the right delimiter, which is known to be present. +func lexRightDelim(l *lexer) stateFn { + l.pos += Pos(len(l.rightDelim)) + l.emit(itemRightDelim) + if l.peek() == '\\' { + l.pos++ + l.emit(itemElideNewline) + } + return lexText +} + +// lexInsideAction scans the elements inside action delimiters. +func lexInsideAction(l *lexer) stateFn { + // Either number, quoted string, or identifier. + // Spaces separate arguments; runs of spaces turn into itemSpace. + // Pipe symbols separate and are emitted. + if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) { + if l.parenDepth == 0 { + return lexRightDelim + } + return l.errorf("unclosed left paren") + } + switch r := l.next(); { + case r == eof || isEndOfLine(r): + return l.errorf("unclosed action") + case isSpace(r): + return lexSpace + case r == ':': + if l.next() != '=' { + return l.errorf("expected :=") + } + l.emit(itemColonEquals) + case r == '|': + l.emit(itemPipe) + case r == '"': + return lexQuote + case r == '`': + return lexRawQuote + case r == '$': + return lexVariable + case r == '\'': + return lexChar + case r == '.': + // special look-ahead for ".field" so we don't break l.backup(). + if l.pos < Pos(len(l.input)) { + r := l.input[l.pos] + if r < '0' || '9' < r { + return lexField + } + } + fallthrough // '.' can start a number. + case r == '+' || r == '-' || ('0' <= r && r <= '9'): + l.backup() + return lexNumber + case isAlphaNumeric(r): + l.backup() + return lexIdentifier + case r == '(': + l.emit(itemLeftParen) + l.parenDepth++ + return lexInsideAction + case r == ')': + l.emit(itemRightParen) + l.parenDepth-- + if l.parenDepth < 0 { + return l.errorf("unexpected right paren %#U", r) + } + return lexInsideAction + case r <= unicode.MaxASCII && unicode.IsPrint(r): + l.emit(itemChar) + return lexInsideAction + default: + return l.errorf("unrecognized character in action: %#U", r) + } + return lexInsideAction +} + +// lexSpace scans a run of space characters. +// One space has already been seen. +func lexSpace(l *lexer) stateFn { + for isSpace(l.peek()) { + l.next() + } + l.emit(itemSpace) + return lexInsideAction +} + +// lexIdentifier scans an alphanumeric. +func lexIdentifier(l *lexer) stateFn { +Loop: + for { + switch r := l.next(); { + case isAlphaNumeric(r): + // absorb. + default: + l.backup() + word := l.input[l.start:l.pos] + if !l.atTerminator() { + return l.errorf("bad character %#U", r) + } + switch { + case key[word] > itemKeyword: + l.emit(key[word]) + case word[0] == '.': + l.emit(itemField) + case word == "true", word == "false": + l.emit(itemBool) + default: + l.emit(itemIdentifier) + } + break Loop + } + } + return lexInsideAction +} + +// lexField scans a field: .Alphanumeric. +// The . has been scanned. +func lexField(l *lexer) stateFn { + return lexFieldOrVariable(l, itemField) +} + +// lexVariable scans a Variable: $Alphanumeric. +// The $ has been scanned. +func lexVariable(l *lexer) stateFn { + if l.atTerminator() { // Nothing interesting follows -> "$". + l.emit(itemVariable) + return lexInsideAction + } + return lexFieldOrVariable(l, itemVariable) +} + +// lexVariable scans a field or variable: [.$]Alphanumeric. +// The . or $ has been scanned. +func lexFieldOrVariable(l *lexer, typ itemType) stateFn { + if l.atTerminator() { // Nothing interesting follows -> "." or "$". + if typ == itemVariable { + l.emit(itemVariable) + } else { + l.emit(itemDot) + } + return lexInsideAction + } + var r rune + for { + r = l.next() + if !isAlphaNumeric(r) { + l.backup() + break + } + } + if !l.atTerminator() { + return l.errorf("bad character %#U", r) + } + l.emit(typ) + return lexInsideAction +} + +// atTerminator reports whether the input is at valid termination character to +// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases +// like "$x+2" not being acceptable without a space, in case we decide one +// day to implement arithmetic. +func (l *lexer) atTerminator() bool { + r := l.peek() + if isSpace(r) || isEndOfLine(r) { + return true + } + switch r { + case eof, '.', ',', '|', ':', ')', '(': + return true + } + // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will + // succeed but should fail) but only in extremely rare cases caused by willfully + // bad choice of delimiter. + if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r { + return true + } + return false +} + +// lexChar scans a character constant. The initial quote is already +// scanned. Syntax checking is done by the parser. +func lexChar(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != eof && r != '\n' { + break + } + fallthrough + case eof, '\n': + return l.errorf("unterminated character constant") + case '\'': + break Loop + } + } + l.emit(itemCharConstant) + return lexInsideAction +} + +// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This +// isn't a perfect number scanner - for instance it accepts "." and "0x0.2" +// and "089" - but when it's wrong the input is invalid and the parser (via +// strconv) will notice. +func lexNumber(l *lexer) stateFn { + if !l.scanNumber() { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + if sign := l.peek(); sign == '+' || sign == '-' { + // Complex: 1+2i. No spaces, must end in 'i'. + if !l.scanNumber() || l.input[l.pos-1] != 'i' { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + l.emit(itemComplex) + } else { + l.emit(itemNumber) + } + return lexInsideAction +} + +func (l *lexer) scanNumber() bool { + // Optional leading sign. + l.accept("+-") + // Is it hex? + digits := "0123456789" + if l.accept("0") && l.accept("xX") { + digits = "0123456789abcdefABCDEF" + } + l.acceptRun(digits) + if l.accept(".") { + l.acceptRun(digits) + } + if l.accept("eE") { + l.accept("+-") + l.acceptRun("0123456789") + } + // Is it imaginary? + l.accept("i") + // Next thing mustn't be alphanumeric. + if isAlphaNumeric(l.peek()) { + l.next() + return false + } + return true +} + +// lexQuote scans a quoted string. +func lexQuote(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != eof && r != '\n' { + break + } + fallthrough + case eof, '\n': + return l.errorf("unterminated quoted string") + case '"': + break Loop + } + } + l.emit(itemString) + return lexInsideAction +} + +// lexRawQuote scans a raw quoted string. +func lexRawQuote(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case eof, '\n': + return l.errorf("unterminated raw quoted string") + case '`': + break Loop + } + } + l.emit(itemRawString) + return lexInsideAction +} + +// isSpace reports whether r is a space character. +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +// isEndOfLine reports whether r is an end-of-line character. +func isEndOfLine(r rune) bool { + return r == '\r' || r == '\n' +} + +// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. +func isAlphaNumeric(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) +} diff --git a/vendor/github.com/alecthomas/template/parse/node.go b/vendor/github.com/alecthomas/template/parse/node.go index 55c37f6..81d4ccc 100644 --- a/vendor/github.com/alecthomas/template/parse/node.go +++ b/vendor/github.com/alecthomas/template/parse/node.go @@ -1,834 +1,834 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Parse nodes. - -package parse - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -var textFormat = "%s" // Changed to "%q" in tests for better error messages. - -// A Node is an element in the parse tree. The interface is trivial. -// The interface contains an unexported method so that only -// types local to this package can satisfy it. -type Node interface { - Type() NodeType - String() string - // Copy does a deep copy of the Node and all its components. - // To avoid type assertions, some XxxNodes also have specialized - // CopyXxx methods that return *XxxNode. - Copy() Node - Position() Pos // byte position of start of node in full original input string - // tree returns the containing *Tree. - // It is unexported so all implementations of Node are in this package. - tree() *Tree -} - -// NodeType identifies the type of a parse tree node. -type NodeType int - -// Pos represents a byte position in the original input text from which -// this template was parsed. -type Pos int - -func (p Pos) Position() Pos { - return p -} - -// Type returns itself and provides an easy default implementation -// for embedding in a Node. Embedded in all non-trivial Nodes. -func (t NodeType) Type() NodeType { - return t -} - -const ( - NodeText NodeType = iota // Plain text. - NodeAction // A non-control action such as a field evaluation. - NodeBool // A boolean constant. - NodeChain // A sequence of field accesses. - NodeCommand // An element of a pipeline. - NodeDot // The cursor, dot. - nodeElse // An else action. Not added to tree. - nodeEnd // An end action. Not added to tree. - NodeField // A field or method name. - NodeIdentifier // An identifier; always a function name. - NodeIf // An if action. - NodeList // A list of Nodes. - NodeNil // An untyped nil constant. - NodeNumber // A numerical constant. - NodePipe // A pipeline of commands. - NodeRange // A range action. - NodeString // A string constant. - NodeTemplate // A template invocation action. - NodeVariable // A $ variable. - NodeWith // A with action. -) - -// Nodes. - -// ListNode holds a sequence of nodes. -type ListNode struct { - NodeType - Pos - tr *Tree - Nodes []Node // The element nodes in lexical order. -} - -func (t *Tree) newList(pos Pos) *ListNode { - return &ListNode{tr: t, NodeType: NodeList, Pos: pos} -} - -func (l *ListNode) append(n Node) { - l.Nodes = append(l.Nodes, n) -} - -func (l *ListNode) tree() *Tree { - return l.tr -} - -func (l *ListNode) String() string { - b := new(bytes.Buffer) - for _, n := range l.Nodes { - fmt.Fprint(b, n) - } - return b.String() -} - -func (l *ListNode) CopyList() *ListNode { - if l == nil { - return l - } - n := l.tr.newList(l.Pos) - for _, elem := range l.Nodes { - n.append(elem.Copy()) - } - return n -} - -func (l *ListNode) Copy() Node { - return l.CopyList() -} - -// TextNode holds plain text. -type TextNode struct { - NodeType - Pos - tr *Tree - Text []byte // The text; may span newlines. -} - -func (t *Tree) newText(pos Pos, text string) *TextNode { - return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)} -} - -func (t *TextNode) String() string { - return fmt.Sprintf(textFormat, t.Text) -} - -func (t *TextNode) tree() *Tree { - return t.tr -} - -func (t *TextNode) Copy() Node { - return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} -} - -// PipeNode holds a pipeline with optional declaration -type PipeNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Decl []*VariableNode // Variable declarations in lexical order. - Cmds []*CommandNode // The commands in lexical order. -} - -func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { - return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} -} - -func (p *PipeNode) append(command *CommandNode) { - p.Cmds = append(p.Cmds, command) -} - -func (p *PipeNode) String() string { - s := "" - if len(p.Decl) > 0 { - for i, v := range p.Decl { - if i > 0 { - s += ", " - } - s += v.String() - } - s += " := " - } - for i, c := range p.Cmds { - if i > 0 { - s += " | " - } - s += c.String() - } - return s -} - -func (p *PipeNode) tree() *Tree { - return p.tr -} - -func (p *PipeNode) CopyPipe() *PipeNode { - if p == nil { - return p - } - var decl []*VariableNode - for _, d := range p.Decl { - decl = append(decl, d.Copy().(*VariableNode)) - } - n := p.tr.newPipeline(p.Pos, p.Line, decl) - for _, c := range p.Cmds { - n.append(c.Copy().(*CommandNode)) - } - return n -} - -func (p *PipeNode) Copy() Node { - return p.CopyPipe() -} - -// ActionNode holds an action (something bounded by delimiters). -// Control actions have their own nodes; ActionNode represents simple -// ones such as field evaluations and parenthesized pipelines. -type ActionNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Pipe *PipeNode // The pipeline in the action. -} - -func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { - return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} -} - -func (a *ActionNode) String() string { - return fmt.Sprintf("{{%s}}", a.Pipe) - -} - -func (a *ActionNode) tree() *Tree { - return a.tr -} - -func (a *ActionNode) Copy() Node { - return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) - -} - -// CommandNode holds a command (a pipeline inside an evaluating action). -type CommandNode struct { - NodeType - Pos - tr *Tree - Args []Node // Arguments in lexical order: Identifier, field, or constant. -} - -func (t *Tree) newCommand(pos Pos) *CommandNode { - return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos} -} - -func (c *CommandNode) append(arg Node) { - c.Args = append(c.Args, arg) -} - -func (c *CommandNode) String() string { - s := "" - for i, arg := range c.Args { - if i > 0 { - s += " " - } - if arg, ok := arg.(*PipeNode); ok { - s += "(" + arg.String() + ")" - continue - } - s += arg.String() - } - return s -} - -func (c *CommandNode) tree() *Tree { - return c.tr -} - -func (c *CommandNode) Copy() Node { - if c == nil { - return c - } - n := c.tr.newCommand(c.Pos) - for _, c := range c.Args { - n.append(c.Copy()) - } - return n -} - -// IdentifierNode holds an identifier. -type IdentifierNode struct { - NodeType - Pos - tr *Tree - Ident string // The identifier's name. -} - -// NewIdentifier returns a new IdentifierNode with the given identifier name. -func NewIdentifier(ident string) *IdentifierNode { - return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident} -} - -// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature. -// Chained for convenience. -// TODO: fix one day? -func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { - i.Pos = pos - return i -} - -// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. -// Chained for convenience. -// TODO: fix one day? -func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { - i.tr = t - return i -} - -func (i *IdentifierNode) String() string { - return i.Ident -} - -func (i *IdentifierNode) tree() *Tree { - return i.tr -} - -func (i *IdentifierNode) Copy() Node { - return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos) -} - -// VariableNode holds a list of variable names, possibly with chained field -// accesses. The dollar sign is part of the (first) name. -type VariableNode struct { - NodeType - Pos - tr *Tree - Ident []string // Variable name and fields in lexical order. -} - -func (t *Tree) newVariable(pos Pos, ident string) *VariableNode { - return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} -} - -func (v *VariableNode) String() string { - s := "" - for i, id := range v.Ident { - if i > 0 { - s += "." - } - s += id - } - return s -} - -func (v *VariableNode) tree() *Tree { - return v.tr -} - -func (v *VariableNode) Copy() Node { - return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} -} - -// DotNode holds the special identifier '.'. -type DotNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newDot(pos Pos) *DotNode { - return &DotNode{tr: t, NodeType: NodeDot, Pos: pos} -} - -func (d *DotNode) Type() NodeType { - // Override method on embedded NodeType for API compatibility. - // TODO: Not really a problem; could change API without effect but - // api tool complains. - return NodeDot -} - -func (d *DotNode) String() string { - return "." -} - -func (d *DotNode) tree() *Tree { - return d.tr -} - -func (d *DotNode) Copy() Node { - return d.tr.newDot(d.Pos) -} - -// NilNode holds the special identifier 'nil' representing an untyped nil constant. -type NilNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newNil(pos Pos) *NilNode { - return &NilNode{tr: t, NodeType: NodeNil, Pos: pos} -} - -func (n *NilNode) Type() NodeType { - // Override method on embedded NodeType for API compatibility. - // TODO: Not really a problem; could change API without effect but - // api tool complains. - return NodeNil -} - -func (n *NilNode) String() string { - return "nil" -} - -func (n *NilNode) tree() *Tree { - return n.tr -} - -func (n *NilNode) Copy() Node { - return n.tr.newNil(n.Pos) -} - -// FieldNode holds a field (identifier starting with '.'). -// The names may be chained ('.x.y'). -// The period is dropped from each ident. -type FieldNode struct { - NodeType - Pos - tr *Tree - Ident []string // The identifiers in lexical order. -} - -func (t *Tree) newField(pos Pos, ident string) *FieldNode { - return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period -} - -func (f *FieldNode) String() string { - s := "" - for _, id := range f.Ident { - s += "." + id - } - return s -} - -func (f *FieldNode) tree() *Tree { - return f.tr -} - -func (f *FieldNode) Copy() Node { - return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} -} - -// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.'). -// The names may be chained ('.x.y'). -// The periods are dropped from each ident. -type ChainNode struct { - NodeType - Pos - tr *Tree - Node Node - Field []string // The identifiers in lexical order. -} - -func (t *Tree) newChain(pos Pos, node Node) *ChainNode { - return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node} -} - -// Add adds the named field (which should start with a period) to the end of the chain. -func (c *ChainNode) Add(field string) { - if len(field) == 0 || field[0] != '.' { - panic("no dot in field") - } - field = field[1:] // Remove leading dot. - if field == "" { - panic("empty field") - } - c.Field = append(c.Field, field) -} - -func (c *ChainNode) String() string { - s := c.Node.String() - if _, ok := c.Node.(*PipeNode); ok { - s = "(" + s + ")" - } - for _, field := range c.Field { - s += "." + field - } - return s -} - -func (c *ChainNode) tree() *Tree { - return c.tr -} - -func (c *ChainNode) Copy() Node { - return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} -} - -// BoolNode holds a boolean constant. -type BoolNode struct { - NodeType - Pos - tr *Tree - True bool // The value of the boolean constant. -} - -func (t *Tree) newBool(pos Pos, true bool) *BoolNode { - return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true} -} - -func (b *BoolNode) String() string { - if b.True { - return "true" - } - return "false" -} - -func (b *BoolNode) tree() *Tree { - return b.tr -} - -func (b *BoolNode) Copy() Node { - return b.tr.newBool(b.Pos, b.True) -} - -// NumberNode holds a number: signed or unsigned integer, float, or complex. -// The value is parsed and stored under all the types that can represent the value. -// This simulates in a small amount of code the behavior of Go's ideal constants. -type NumberNode struct { - NodeType - Pos - tr *Tree - IsInt bool // Number has an integral value. - IsUint bool // Number has an unsigned integral value. - IsFloat bool // Number has a floating-point value. - IsComplex bool // Number is complex. - Int64 int64 // The signed integer value. - Uint64 uint64 // The unsigned integer value. - Float64 float64 // The floating-point value. - Complex128 complex128 // The complex value. - Text string // The original textual representation from the input. -} - -func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { - n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text} - switch typ { - case itemCharConstant: - rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0]) - if err != nil { - return nil, err - } - if tail != "'" { - return nil, fmt.Errorf("malformed character constant: %s", text) - } - n.Int64 = int64(rune) - n.IsInt = true - n.Uint64 = uint64(rune) - n.IsUint = true - n.Float64 = float64(rune) // odd but those are the rules. - n.IsFloat = true - return n, nil - case itemComplex: - // fmt.Sscan can parse the pair, so let it do the work. - if _, err := fmt.Sscan(text, &n.Complex128); err != nil { - return nil, err - } - n.IsComplex = true - n.simplifyComplex() - return n, nil - } - // Imaginary constants can only be complex unless they are zero. - if len(text) > 0 && text[len(text)-1] == 'i' { - f, err := strconv.ParseFloat(text[:len(text)-1], 64) - if err == nil { - n.IsComplex = true - n.Complex128 = complex(0, f) - n.simplifyComplex() - return n, nil - } - } - // Do integer test first so we get 0x123 etc. - u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below. - if err == nil { - n.IsUint = true - n.Uint64 = u - } - i, err := strconv.ParseInt(text, 0, 64) - if err == nil { - n.IsInt = true - n.Int64 = i - if i == 0 { - n.IsUint = true // in case of -0. - n.Uint64 = u - } - } - // If an integer extraction succeeded, promote the float. - if n.IsInt { - n.IsFloat = true - n.Float64 = float64(n.Int64) - } else if n.IsUint { - n.IsFloat = true - n.Float64 = float64(n.Uint64) - } else { - f, err := strconv.ParseFloat(text, 64) - if err == nil { - n.IsFloat = true - n.Float64 = f - // If a floating-point extraction succeeded, extract the int if needed. - if !n.IsInt && float64(int64(f)) == f { - n.IsInt = true - n.Int64 = int64(f) - } - if !n.IsUint && float64(uint64(f)) == f { - n.IsUint = true - n.Uint64 = uint64(f) - } - } - } - if !n.IsInt && !n.IsUint && !n.IsFloat { - return nil, fmt.Errorf("illegal number syntax: %q", text) - } - return n, nil -} - -// simplifyComplex pulls out any other types that are represented by the complex number. -// These all require that the imaginary part be zero. -func (n *NumberNode) simplifyComplex() { - n.IsFloat = imag(n.Complex128) == 0 - if n.IsFloat { - n.Float64 = real(n.Complex128) - n.IsInt = float64(int64(n.Float64)) == n.Float64 - if n.IsInt { - n.Int64 = int64(n.Float64) - } - n.IsUint = float64(uint64(n.Float64)) == n.Float64 - if n.IsUint { - n.Uint64 = uint64(n.Float64) - } - } -} - -func (n *NumberNode) String() string { - return n.Text -} - -func (n *NumberNode) tree() *Tree { - return n.tr -} - -func (n *NumberNode) Copy() Node { - nn := new(NumberNode) - *nn = *n // Easy, fast, correct. - return nn -} - -// StringNode holds a string constant. The value has been "unquoted". -type StringNode struct { - NodeType - Pos - tr *Tree - Quoted string // The original text of the string, with quotes. - Text string // The string, after quote processing. -} - -func (t *Tree) newString(pos Pos, orig, text string) *StringNode { - return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} -} - -func (s *StringNode) String() string { - return s.Quoted -} - -func (s *StringNode) tree() *Tree { - return s.tr -} - -func (s *StringNode) Copy() Node { - return s.tr.newString(s.Pos, s.Quoted, s.Text) -} - -// endNode represents an {{end}} action. -// It does not appear in the final parse tree. -type endNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newEnd(pos Pos) *endNode { - return &endNode{tr: t, NodeType: nodeEnd, Pos: pos} -} - -func (e *endNode) String() string { - return "{{end}}" -} - -func (e *endNode) tree() *Tree { - return e.tr -} - -func (e *endNode) Copy() Node { - return e.tr.newEnd(e.Pos) -} - -// elseNode represents an {{else}} action. Does not appear in the final tree. -type elseNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) -} - -func (t *Tree) newElse(pos Pos, line int) *elseNode { - return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line} -} - -func (e *elseNode) Type() NodeType { - return nodeElse -} - -func (e *elseNode) String() string { - return "{{else}}" -} - -func (e *elseNode) tree() *Tree { - return e.tr -} - -func (e *elseNode) Copy() Node { - return e.tr.newElse(e.Pos, e.Line) -} - -// BranchNode is the common representation of if, range, and with. -type BranchNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Pipe *PipeNode // The pipeline to be evaluated. - List *ListNode // What to execute if the value is non-empty. - ElseList *ListNode // What to execute if the value is empty (nil if absent). -} - -func (b *BranchNode) String() string { - name := "" - switch b.NodeType { - case NodeIf: - name = "if" - case NodeRange: - name = "range" - case NodeWith: - name = "with" - default: - panic("unknown branch type") - } - if b.ElseList != nil { - return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList) - } - return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List) -} - -func (b *BranchNode) tree() *Tree { - return b.tr -} - -func (b *BranchNode) Copy() Node { - switch b.NodeType { - case NodeIf: - return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - case NodeRange: - return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - case NodeWith: - return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - default: - panic("unknown branch type") - } -} - -// IfNode represents an {{if}} action and its commands. -type IfNode struct { - BranchNode -} - -func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { - return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (i *IfNode) Copy() Node { - return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) -} - -// RangeNode represents a {{range}} action and its commands. -type RangeNode struct { - BranchNode -} - -func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { - return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (r *RangeNode) Copy() Node { - return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) -} - -// WithNode represents a {{with}} action and its commands. -type WithNode struct { - BranchNode -} - -func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { - return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (w *WithNode) Copy() Node { - return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) -} - -// TemplateNode represents a {{template}} action. -type TemplateNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Name string // The name of the template (unquoted). - Pipe *PipeNode // The command to evaluate as dot for the template. -} - -func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { - return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe} -} - -func (t *TemplateNode) String() string { - if t.Pipe == nil { - return fmt.Sprintf("{{template %q}}", t.Name) - } - return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe) -} - -func (t *TemplateNode) tree() *Tree { - return t.tr -} - -func (t *TemplateNode) Copy() Node { - return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) -} +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Parse nodes. + +package parse + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +var textFormat = "%s" // Changed to "%q" in tests for better error messages. + +// A Node is an element in the parse tree. The interface is trivial. +// The interface contains an unexported method so that only +// types local to this package can satisfy it. +type Node interface { + Type() NodeType + String() string + // Copy does a deep copy of the Node and all its components. + // To avoid type assertions, some XxxNodes also have specialized + // CopyXxx methods that return *XxxNode. + Copy() Node + Position() Pos // byte position of start of node in full original input string + // tree returns the containing *Tree. + // It is unexported so all implementations of Node are in this package. + tree() *Tree +} + +// NodeType identifies the type of a parse tree node. +type NodeType int + +// Pos represents a byte position in the original input text from which +// this template was parsed. +type Pos int + +func (p Pos) Position() Pos { + return p +} + +// Type returns itself and provides an easy default implementation +// for embedding in a Node. Embedded in all non-trivial Nodes. +func (t NodeType) Type() NodeType { + return t +} + +const ( + NodeText NodeType = iota // Plain text. + NodeAction // A non-control action such as a field evaluation. + NodeBool // A boolean constant. + NodeChain // A sequence of field accesses. + NodeCommand // An element of a pipeline. + NodeDot // The cursor, dot. + nodeElse // An else action. Not added to tree. + nodeEnd // An end action. Not added to tree. + NodeField // A field or method name. + NodeIdentifier // An identifier; always a function name. + NodeIf // An if action. + NodeList // A list of Nodes. + NodeNil // An untyped nil constant. + NodeNumber // A numerical constant. + NodePipe // A pipeline of commands. + NodeRange // A range action. + NodeString // A string constant. + NodeTemplate // A template invocation action. + NodeVariable // A $ variable. + NodeWith // A with action. +) + +// Nodes. + +// ListNode holds a sequence of nodes. +type ListNode struct { + NodeType + Pos + tr *Tree + Nodes []Node // The element nodes in lexical order. +} + +func (t *Tree) newList(pos Pos) *ListNode { + return &ListNode{tr: t, NodeType: NodeList, Pos: pos} +} + +func (l *ListNode) append(n Node) { + l.Nodes = append(l.Nodes, n) +} + +func (l *ListNode) tree() *Tree { + return l.tr +} + +func (l *ListNode) String() string { + b := new(bytes.Buffer) + for _, n := range l.Nodes { + fmt.Fprint(b, n) + } + return b.String() +} + +func (l *ListNode) CopyList() *ListNode { + if l == nil { + return l + } + n := l.tr.newList(l.Pos) + for _, elem := range l.Nodes { + n.append(elem.Copy()) + } + return n +} + +func (l *ListNode) Copy() Node { + return l.CopyList() +} + +// TextNode holds plain text. +type TextNode struct { + NodeType + Pos + tr *Tree + Text []byte // The text; may span newlines. +} + +func (t *Tree) newText(pos Pos, text string) *TextNode { + return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)} +} + +func (t *TextNode) String() string { + return fmt.Sprintf(textFormat, t.Text) +} + +func (t *TextNode) tree() *Tree { + return t.tr +} + +func (t *TextNode) Copy() Node { + return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} +} + +// PipeNode holds a pipeline with optional declaration +type PipeNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Decl []*VariableNode // Variable declarations in lexical order. + Cmds []*CommandNode // The commands in lexical order. +} + +func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { + return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} +} + +func (p *PipeNode) append(command *CommandNode) { + p.Cmds = append(p.Cmds, command) +} + +func (p *PipeNode) String() string { + s := "" + if len(p.Decl) > 0 { + for i, v := range p.Decl { + if i > 0 { + s += ", " + } + s += v.String() + } + s += " := " + } + for i, c := range p.Cmds { + if i > 0 { + s += " | " + } + s += c.String() + } + return s +} + +func (p *PipeNode) tree() *Tree { + return p.tr +} + +func (p *PipeNode) CopyPipe() *PipeNode { + if p == nil { + return p + } + var decl []*VariableNode + for _, d := range p.Decl { + decl = append(decl, d.Copy().(*VariableNode)) + } + n := p.tr.newPipeline(p.Pos, p.Line, decl) + for _, c := range p.Cmds { + n.append(c.Copy().(*CommandNode)) + } + return n +} + +func (p *PipeNode) Copy() Node { + return p.CopyPipe() +} + +// ActionNode holds an action (something bounded by delimiters). +// Control actions have their own nodes; ActionNode represents simple +// ones such as field evaluations and parenthesized pipelines. +type ActionNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Pipe *PipeNode // The pipeline in the action. +} + +func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { + return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} +} + +func (a *ActionNode) String() string { + return fmt.Sprintf("{{%s}}", a.Pipe) + +} + +func (a *ActionNode) tree() *Tree { + return a.tr +} + +func (a *ActionNode) Copy() Node { + return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) + +} + +// CommandNode holds a command (a pipeline inside an evaluating action). +type CommandNode struct { + NodeType + Pos + tr *Tree + Args []Node // Arguments in lexical order: Identifier, field, or constant. +} + +func (t *Tree) newCommand(pos Pos) *CommandNode { + return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos} +} + +func (c *CommandNode) append(arg Node) { + c.Args = append(c.Args, arg) +} + +func (c *CommandNode) String() string { + s := "" + for i, arg := range c.Args { + if i > 0 { + s += " " + } + if arg, ok := arg.(*PipeNode); ok { + s += "(" + arg.String() + ")" + continue + } + s += arg.String() + } + return s +} + +func (c *CommandNode) tree() *Tree { + return c.tr +} + +func (c *CommandNode) Copy() Node { + if c == nil { + return c + } + n := c.tr.newCommand(c.Pos) + for _, c := range c.Args { + n.append(c.Copy()) + } + return n +} + +// IdentifierNode holds an identifier. +type IdentifierNode struct { + NodeType + Pos + tr *Tree + Ident string // The identifier's name. +} + +// NewIdentifier returns a new IdentifierNode with the given identifier name. +func NewIdentifier(ident string) *IdentifierNode { + return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident} +} + +// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { + i.Pos = pos + return i +} + +// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { + i.tr = t + return i +} + +func (i *IdentifierNode) String() string { + return i.Ident +} + +func (i *IdentifierNode) tree() *Tree { + return i.tr +} + +func (i *IdentifierNode) Copy() Node { + return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos) +} + +// VariableNode holds a list of variable names, possibly with chained field +// accesses. The dollar sign is part of the (first) name. +type VariableNode struct { + NodeType + Pos + tr *Tree + Ident []string // Variable name and fields in lexical order. +} + +func (t *Tree) newVariable(pos Pos, ident string) *VariableNode { + return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} +} + +func (v *VariableNode) String() string { + s := "" + for i, id := range v.Ident { + if i > 0 { + s += "." + } + s += id + } + return s +} + +func (v *VariableNode) tree() *Tree { + return v.tr +} + +func (v *VariableNode) Copy() Node { + return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} +} + +// DotNode holds the special identifier '.'. +type DotNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newDot(pos Pos) *DotNode { + return &DotNode{tr: t, NodeType: NodeDot, Pos: pos} +} + +func (d *DotNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. + return NodeDot +} + +func (d *DotNode) String() string { + return "." +} + +func (d *DotNode) tree() *Tree { + return d.tr +} + +func (d *DotNode) Copy() Node { + return d.tr.newDot(d.Pos) +} + +// NilNode holds the special identifier 'nil' representing an untyped nil constant. +type NilNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newNil(pos Pos) *NilNode { + return &NilNode{tr: t, NodeType: NodeNil, Pos: pos} +} + +func (n *NilNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. + return NodeNil +} + +func (n *NilNode) String() string { + return "nil" +} + +func (n *NilNode) tree() *Tree { + return n.tr +} + +func (n *NilNode) Copy() Node { + return n.tr.newNil(n.Pos) +} + +// FieldNode holds a field (identifier starting with '.'). +// The names may be chained ('.x.y'). +// The period is dropped from each ident. +type FieldNode struct { + NodeType + Pos + tr *Tree + Ident []string // The identifiers in lexical order. +} + +func (t *Tree) newField(pos Pos, ident string) *FieldNode { + return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period +} + +func (f *FieldNode) String() string { + s := "" + for _, id := range f.Ident { + s += "." + id + } + return s +} + +func (f *FieldNode) tree() *Tree { + return f.tr +} + +func (f *FieldNode) Copy() Node { + return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} +} + +// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.'). +// The names may be chained ('.x.y'). +// The periods are dropped from each ident. +type ChainNode struct { + NodeType + Pos + tr *Tree + Node Node + Field []string // The identifiers in lexical order. +} + +func (t *Tree) newChain(pos Pos, node Node) *ChainNode { + return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node} +} + +// Add adds the named field (which should start with a period) to the end of the chain. +func (c *ChainNode) Add(field string) { + if len(field) == 0 || field[0] != '.' { + panic("no dot in field") + } + field = field[1:] // Remove leading dot. + if field == "" { + panic("empty field") + } + c.Field = append(c.Field, field) +} + +func (c *ChainNode) String() string { + s := c.Node.String() + if _, ok := c.Node.(*PipeNode); ok { + s = "(" + s + ")" + } + for _, field := range c.Field { + s += "." + field + } + return s +} + +func (c *ChainNode) tree() *Tree { + return c.tr +} + +func (c *ChainNode) Copy() Node { + return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} +} + +// BoolNode holds a boolean constant. +type BoolNode struct { + NodeType + Pos + tr *Tree + True bool // The value of the boolean constant. +} + +func (t *Tree) newBool(pos Pos, true bool) *BoolNode { + return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true} +} + +func (b *BoolNode) String() string { + if b.True { + return "true" + } + return "false" +} + +func (b *BoolNode) tree() *Tree { + return b.tr +} + +func (b *BoolNode) Copy() Node { + return b.tr.newBool(b.Pos, b.True) +} + +// NumberNode holds a number: signed or unsigned integer, float, or complex. +// The value is parsed and stored under all the types that can represent the value. +// This simulates in a small amount of code the behavior of Go's ideal constants. +type NumberNode struct { + NodeType + Pos + tr *Tree + IsInt bool // Number has an integral value. + IsUint bool // Number has an unsigned integral value. + IsFloat bool // Number has a floating-point value. + IsComplex bool // Number is complex. + Int64 int64 // The signed integer value. + Uint64 uint64 // The unsigned integer value. + Float64 float64 // The floating-point value. + Complex128 complex128 // The complex value. + Text string // The original textual representation from the input. +} + +func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { + n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text} + switch typ { + case itemCharConstant: + rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0]) + if err != nil { + return nil, err + } + if tail != "'" { + return nil, fmt.Errorf("malformed character constant: %s", text) + } + n.Int64 = int64(rune) + n.IsInt = true + n.Uint64 = uint64(rune) + n.IsUint = true + n.Float64 = float64(rune) // odd but those are the rules. + n.IsFloat = true + return n, nil + case itemComplex: + // fmt.Sscan can parse the pair, so let it do the work. + if _, err := fmt.Sscan(text, &n.Complex128); err != nil { + return nil, err + } + n.IsComplex = true + n.simplifyComplex() + return n, nil + } + // Imaginary constants can only be complex unless they are zero. + if len(text) > 0 && text[len(text)-1] == 'i' { + f, err := strconv.ParseFloat(text[:len(text)-1], 64) + if err == nil { + n.IsComplex = true + n.Complex128 = complex(0, f) + n.simplifyComplex() + return n, nil + } + } + // Do integer test first so we get 0x123 etc. + u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below. + if err == nil { + n.IsUint = true + n.Uint64 = u + } + i, err := strconv.ParseInt(text, 0, 64) + if err == nil { + n.IsInt = true + n.Int64 = i + if i == 0 { + n.IsUint = true // in case of -0. + n.Uint64 = u + } + } + // If an integer extraction succeeded, promote the float. + if n.IsInt { + n.IsFloat = true + n.Float64 = float64(n.Int64) + } else if n.IsUint { + n.IsFloat = true + n.Float64 = float64(n.Uint64) + } else { + f, err := strconv.ParseFloat(text, 64) + if err == nil { + n.IsFloat = true + n.Float64 = f + // If a floating-point extraction succeeded, extract the int if needed. + if !n.IsInt && float64(int64(f)) == f { + n.IsInt = true + n.Int64 = int64(f) + } + if !n.IsUint && float64(uint64(f)) == f { + n.IsUint = true + n.Uint64 = uint64(f) + } + } + } + if !n.IsInt && !n.IsUint && !n.IsFloat { + return nil, fmt.Errorf("illegal number syntax: %q", text) + } + return n, nil +} + +// simplifyComplex pulls out any other types that are represented by the complex number. +// These all require that the imaginary part be zero. +func (n *NumberNode) simplifyComplex() { + n.IsFloat = imag(n.Complex128) == 0 + if n.IsFloat { + n.Float64 = real(n.Complex128) + n.IsInt = float64(int64(n.Float64)) == n.Float64 + if n.IsInt { + n.Int64 = int64(n.Float64) + } + n.IsUint = float64(uint64(n.Float64)) == n.Float64 + if n.IsUint { + n.Uint64 = uint64(n.Float64) + } + } +} + +func (n *NumberNode) String() string { + return n.Text +} + +func (n *NumberNode) tree() *Tree { + return n.tr +} + +func (n *NumberNode) Copy() Node { + nn := new(NumberNode) + *nn = *n // Easy, fast, correct. + return nn +} + +// StringNode holds a string constant. The value has been "unquoted". +type StringNode struct { + NodeType + Pos + tr *Tree + Quoted string // The original text of the string, with quotes. + Text string // The string, after quote processing. +} + +func (t *Tree) newString(pos Pos, orig, text string) *StringNode { + return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} +} + +func (s *StringNode) String() string { + return s.Quoted +} + +func (s *StringNode) tree() *Tree { + return s.tr +} + +func (s *StringNode) Copy() Node { + return s.tr.newString(s.Pos, s.Quoted, s.Text) +} + +// endNode represents an {{end}} action. +// It does not appear in the final parse tree. +type endNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newEnd(pos Pos) *endNode { + return &endNode{tr: t, NodeType: nodeEnd, Pos: pos} +} + +func (e *endNode) String() string { + return "{{end}}" +} + +func (e *endNode) tree() *Tree { + return e.tr +} + +func (e *endNode) Copy() Node { + return e.tr.newEnd(e.Pos) +} + +// elseNode represents an {{else}} action. Does not appear in the final tree. +type elseNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) +} + +func (t *Tree) newElse(pos Pos, line int) *elseNode { + return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line} +} + +func (e *elseNode) Type() NodeType { + return nodeElse +} + +func (e *elseNode) String() string { + return "{{else}}" +} + +func (e *elseNode) tree() *Tree { + return e.tr +} + +func (e *elseNode) Copy() Node { + return e.tr.newElse(e.Pos, e.Line) +} + +// BranchNode is the common representation of if, range, and with. +type BranchNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Pipe *PipeNode // The pipeline to be evaluated. + List *ListNode // What to execute if the value is non-empty. + ElseList *ListNode // What to execute if the value is empty (nil if absent). +} + +func (b *BranchNode) String() string { + name := "" + switch b.NodeType { + case NodeIf: + name = "if" + case NodeRange: + name = "range" + case NodeWith: + name = "with" + default: + panic("unknown branch type") + } + if b.ElseList != nil { + return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList) + } + return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List) +} + +func (b *BranchNode) tree() *Tree { + return b.tr +} + +func (b *BranchNode) Copy() Node { + switch b.NodeType { + case NodeIf: + return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeRange: + return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeWith: + return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + default: + panic("unknown branch type") + } +} + +// IfNode represents an {{if}} action and its commands. +type IfNode struct { + BranchNode +} + +func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { + return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (i *IfNode) Copy() Node { + return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) +} + +// RangeNode represents a {{range}} action and its commands. +type RangeNode struct { + BranchNode +} + +func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { + return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (r *RangeNode) Copy() Node { + return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) +} + +// WithNode represents a {{with}} action and its commands. +type WithNode struct { + BranchNode +} + +func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { + return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (w *WithNode) Copy() Node { + return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) +} + +// TemplateNode represents a {{template}} action. +type TemplateNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Name string // The name of the template (unquoted). + Pipe *PipeNode // The command to evaluate as dot for the template. +} + +func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { + return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe} +} + +func (t *TemplateNode) String() string { + if t.Pipe == nil { + return fmt.Sprintf("{{template %q}}", t.Name) + } + return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe) +} + +func (t *TemplateNode) tree() *Tree { + return t.tr +} + +func (t *TemplateNode) Copy() Node { + return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) +} diff --git a/vendor/github.com/alecthomas/template/parse/parse.go b/vendor/github.com/alecthomas/template/parse/parse.go index 0d77ade..aabf4e7 100644 --- a/vendor/github.com/alecthomas/template/parse/parse.go +++ b/vendor/github.com/alecthomas/template/parse/parse.go @@ -1,700 +1,700 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package parse builds parse trees for templates as defined by text/template -// and html/template. Clients should use those packages to construct templates -// rather than this one, which provides shared internal data structures not -// intended for general use. -package parse - -import ( - "bytes" - "fmt" - "runtime" - "strconv" - "strings" -) - -// Tree is the representation of a single parsed template. -type Tree struct { - Name string // name of the template represented by the tree. - ParseName string // name of the top-level template during parsing, for error messages. - Root *ListNode // top-level root of the tree. - text string // text parsed to create the template (or its parent) - // Parsing only; cleared after parse. - funcs []map[string]interface{} - lex *lexer - token [3]item // three-token lookahead for parser. - peekCount int - vars []string // variables defined at the moment. -} - -// Copy returns a copy of the Tree. Any parsing state is discarded. -func (t *Tree) Copy() *Tree { - if t == nil { - return nil - } - return &Tree{ - Name: t.Name, - ParseName: t.ParseName, - Root: t.Root.CopyList(), - text: t.text, - } -} - -// Parse returns a map from template name to parse.Tree, created by parsing the -// templates described in the argument string. The top-level template will be -// given the specified name. If an error is encountered, parsing stops and an -// empty map is returned with the error. -func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) { - treeSet = make(map[string]*Tree) - t := New(name) - t.text = text - _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...) - return -} - -// next returns the next token. -func (t *Tree) next() item { - if t.peekCount > 0 { - t.peekCount-- - } else { - t.token[0] = t.lex.nextItem() - } - return t.token[t.peekCount] -} - -// backup backs the input stream up one token. -func (t *Tree) backup() { - t.peekCount++ -} - -// backup2 backs the input stream up two tokens. -// The zeroth token is already there. -func (t *Tree) backup2(t1 item) { - t.token[1] = t1 - t.peekCount = 2 -} - -// backup3 backs the input stream up three tokens -// The zeroth token is already there. -func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back. - t.token[1] = t1 - t.token[2] = t2 - t.peekCount = 3 -} - -// peek returns but does not consume the next token. -func (t *Tree) peek() item { - if t.peekCount > 0 { - return t.token[t.peekCount-1] - } - t.peekCount = 1 - t.token[0] = t.lex.nextItem() - return t.token[0] -} - -// nextNonSpace returns the next non-space token. -func (t *Tree) nextNonSpace() (token item) { - for { - token = t.next() - if token.typ != itemSpace { - break - } - } - return token -} - -// peekNonSpace returns but does not consume the next non-space token. -func (t *Tree) peekNonSpace() (token item) { - for { - token = t.next() - if token.typ != itemSpace { - break - } - } - t.backup() - return token -} - -// Parsing. - -// New allocates a new parse tree with the given name. -func New(name string, funcs ...map[string]interface{}) *Tree { - return &Tree{ - Name: name, - funcs: funcs, - } -} - -// ErrorContext returns a textual representation of the location of the node in the input text. -// The receiver is only used when the node does not have a pointer to the tree inside, -// which can occur in old code. -func (t *Tree) ErrorContext(n Node) (location, context string) { - pos := int(n.Position()) - tree := n.tree() - if tree == nil { - tree = t - } - text := tree.text[:pos] - byteNum := strings.LastIndex(text, "\n") - if byteNum == -1 { - byteNum = pos // On first line. - } else { - byteNum++ // After the newline. - byteNum = pos - byteNum - } - lineNum := 1 + strings.Count(text, "\n") - context = n.String() - if len(context) > 20 { - context = fmt.Sprintf("%.20s...", context) - } - return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context -} - -// errorf formats the error and terminates processing. -func (t *Tree) errorf(format string, args ...interface{}) { - t.Root = nil - format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -// error terminates processing. -func (t *Tree) error(err error) { - t.errorf("%s", err) -} - -// expect consumes the next token and guarantees it has the required type. -func (t *Tree) expect(expected itemType, context string) item { - token := t.nextNonSpace() - if token.typ != expected { - t.unexpected(token, context) - } - return token -} - -// expectOneOf consumes the next token and guarantees it has one of the required types. -func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item { - token := t.nextNonSpace() - if token.typ != expected1 && token.typ != expected2 { - t.unexpected(token, context) - } - return token -} - -// unexpected complains about the token and terminates processing. -func (t *Tree) unexpected(token item, context string) { - t.errorf("unexpected %s in %s", token, context) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (t *Tree) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - if t != nil { - t.stopParse() - } - *errp = e.(error) - } - return -} - -// startParse initializes the parser, using the lexer. -func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) { - t.Root = nil - t.lex = lex - t.vars = []string{"$"} - t.funcs = funcs -} - -// stopParse terminates parsing. -func (t *Tree) stopParse() { - t.lex = nil - t.vars = nil - t.funcs = nil -} - -// Parse parses the template definition string to construct a representation of -// the template for execution. If either action delimiter string is empty, the -// default ("{{" or "}}") is used. Embedded template definitions are added to -// the treeSet map. -func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) { - defer t.recover(&err) - t.ParseName = t.Name - t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim)) - t.text = text - t.parse(treeSet) - t.add(treeSet) - t.stopParse() - return t, nil -} - -// add adds tree to the treeSet. -func (t *Tree) add(treeSet map[string]*Tree) { - tree := treeSet[t.Name] - if tree == nil || IsEmptyTree(tree.Root) { - treeSet[t.Name] = t - return - } - if !IsEmptyTree(t.Root) { - t.errorf("template: multiple definition of template %q", t.Name) - } -} - -// IsEmptyTree reports whether this tree (node) is empty of everything but space. -func IsEmptyTree(n Node) bool { - switch n := n.(type) { - case nil: - return true - case *ActionNode: - case *IfNode: - case *ListNode: - for _, node := range n.Nodes { - if !IsEmptyTree(node) { - return false - } - } - return true - case *RangeNode: - case *TemplateNode: - case *TextNode: - return len(bytes.TrimSpace(n.Text)) == 0 - case *WithNode: - default: - panic("unknown node: " + n.String()) - } - return false -} - -// parse is the top-level parser for a template, essentially the same -// as itemList except it also parses {{define}} actions. -// It runs to EOF. -func (t *Tree) parse(treeSet map[string]*Tree) (next Node) { - t.Root = t.newList(t.peek().pos) - for t.peek().typ != itemEOF { - if t.peek().typ == itemLeftDelim { - delim := t.next() - if t.nextNonSpace().typ == itemDefine { - newT := New("definition") // name will be updated once we know it. - newT.text = t.text - newT.ParseName = t.ParseName - newT.startParse(t.funcs, t.lex) - newT.parseDefinition(treeSet) - continue - } - t.backup2(delim) - } - n := t.textOrAction() - if n.Type() == nodeEnd { - t.errorf("unexpected %s", n) - } - t.Root.append(n) - } - return nil -} - -// parseDefinition parses a {{define}} ... {{end}} template definition and -// installs the definition in the treeSet map. The "define" keyword has already -// been scanned. -func (t *Tree) parseDefinition(treeSet map[string]*Tree) { - const context = "define clause" - name := t.expectOneOf(itemString, itemRawString, context) - var err error - t.Name, err = strconv.Unquote(name.val) - if err != nil { - t.error(err) - } - t.expect(itemRightDelim, context) - var end Node - t.Root, end = t.itemList() - if end.Type() != nodeEnd { - t.errorf("unexpected %s in %s", end, context) - } - t.add(treeSet) - t.stopParse() -} - -// itemList: -// textOrAction* -// Terminates at {{end}} or {{else}}, returned separately. -func (t *Tree) itemList() (list *ListNode, next Node) { - list = t.newList(t.peekNonSpace().pos) - for t.peekNonSpace().typ != itemEOF { - n := t.textOrAction() - switch n.Type() { - case nodeEnd, nodeElse: - return list, n - } - list.append(n) - } - t.errorf("unexpected EOF") - return -} - -// textOrAction: -// text | action -func (t *Tree) textOrAction() Node { - switch token := t.nextNonSpace(); token.typ { - case itemElideNewline: - return t.elideNewline() - case itemText: - return t.newText(token.pos, token.val) - case itemLeftDelim: - return t.action() - default: - t.unexpected(token, "input") - } - return nil -} - -// elideNewline: -// Remove newlines trailing rightDelim if \\ is present. -func (t *Tree) elideNewline() Node { - token := t.peek() - if token.typ != itemText { - t.unexpected(token, "input") - return nil - } - - t.next() - stripped := strings.TrimLeft(token.val, "\n\r") - diff := len(token.val) - len(stripped) - if diff > 0 { - // This is a bit nasty. We mutate the token in-place to remove - // preceding newlines. - token.pos += Pos(diff) - token.val = stripped - } - return t.newText(token.pos, token.val) -} - -// Action: -// control -// command ("|" command)* -// Left delim is past. Now get actions. -// First word could be a keyword such as range. -func (t *Tree) action() (n Node) { - switch token := t.nextNonSpace(); token.typ { - case itemElse: - return t.elseControl() - case itemEnd: - return t.endControl() - case itemIf: - return t.ifControl() - case itemRange: - return t.rangeControl() - case itemTemplate: - return t.templateControl() - case itemWith: - return t.withControl() - } - t.backup() - // Do not pop variables; they persist until "end". - return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) -} - -// Pipeline: -// declarations? command ('|' command)* -func (t *Tree) pipeline(context string) (pipe *PipeNode) { - var decl []*VariableNode - pos := t.peekNonSpace().pos - // Are there declarations? - for { - if v := t.peekNonSpace(); v.typ == itemVariable { - t.next() - // Since space is a token, we need 3-token look-ahead here in the worst case: - // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an - // argument variable rather than a declaration. So remember the token - // adjacent to the variable so we can push it back if necessary. - tokenAfterVariable := t.peek() - if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") { - t.nextNonSpace() - variable := t.newVariable(v.pos, v.val) - decl = append(decl, variable) - t.vars = append(t.vars, v.val) - if next.typ == itemChar && next.val == "," { - if context == "range" && len(decl) < 2 { - continue - } - t.errorf("too many declarations in %s", context) - } - } else if tokenAfterVariable.typ == itemSpace { - t.backup3(v, tokenAfterVariable) - } else { - t.backup2(v) - } - } - break - } - pipe = t.newPipeline(pos, t.lex.lineNumber(), decl) - for { - switch token := t.nextNonSpace(); token.typ { - case itemRightDelim, itemRightParen: - if len(pipe.Cmds) == 0 { - t.errorf("missing value for %s", context) - } - if token.typ == itemRightParen { - t.backup() - } - return - case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier, - itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen: - t.backup() - pipe.append(t.command()) - default: - t.unexpected(token, context) - } - } -} - -func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { - defer t.popVars(len(t.vars)) - line = t.lex.lineNumber() - pipe = t.pipeline(context) - var next Node - list, next = t.itemList() - switch next.Type() { - case nodeEnd: //done - case nodeElse: - if allowElseIf { - // Special case for "else if". If the "else" is followed immediately by an "if", - // the elseControl will have left the "if" token pending. Treat - // {{if a}}_{{else if b}}_{{end}} - // as - // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}. - // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}} - // is assumed. This technique works even for long if-else-if chains. - // TODO: Should we allow else-if in with and range? - if t.peek().typ == itemIf { - t.next() // Consume the "if" token. - elseList = t.newList(next.Position()) - elseList.append(t.ifControl()) - // Do not consume the next item - only one {{end}} required. - break - } - } - elseList, next = t.itemList() - if next.Type() != nodeEnd { - t.errorf("expected end; found %s", next) - } - } - return pipe.Position(), line, pipe, list, elseList -} - -// If: -// {{if pipeline}} itemList {{end}} -// {{if pipeline}} itemList {{else}} itemList {{end}} -// If keyword is past. -func (t *Tree) ifControl() Node { - return t.newIf(t.parseControl(true, "if")) -} - -// Range: -// {{range pipeline}} itemList {{end}} -// {{range pipeline}} itemList {{else}} itemList {{end}} -// Range keyword is past. -func (t *Tree) rangeControl() Node { - return t.newRange(t.parseControl(false, "range")) -} - -// With: -// {{with pipeline}} itemList {{end}} -// {{with pipeline}} itemList {{else}} itemList {{end}} -// If keyword is past. -func (t *Tree) withControl() Node { - return t.newWith(t.parseControl(false, "with")) -} - -// End: -// {{end}} -// End keyword is past. -func (t *Tree) endControl() Node { - return t.newEnd(t.expect(itemRightDelim, "end").pos) -} - -// Else: -// {{else}} -// Else keyword is past. -func (t *Tree) elseControl() Node { - // Special case for "else if". - peek := t.peekNonSpace() - if peek.typ == itemIf { - // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". - return t.newElse(peek.pos, t.lex.lineNumber()) - } - return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) -} - -// Template: -// {{template stringValue pipeline}} -// Template keyword is past. The name must be something that can evaluate -// to a string. -func (t *Tree) templateControl() Node { - var name string - token := t.nextNonSpace() - switch token.typ { - case itemString, itemRawString: - s, err := strconv.Unquote(token.val) - if err != nil { - t.error(err) - } - name = s - default: - t.unexpected(token, "template invocation") - } - var pipe *PipeNode - if t.nextNonSpace().typ != itemRightDelim { - t.backup() - // Do not pop variables; they persist until "end". - pipe = t.pipeline("template") - } - return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe) -} - -// command: -// operand (space operand)* -// space-separated arguments up to a pipeline character or right delimiter. -// we consume the pipe character but leave the right delim to terminate the action. -func (t *Tree) command() *CommandNode { - cmd := t.newCommand(t.peekNonSpace().pos) - for { - t.peekNonSpace() // skip leading spaces. - operand := t.operand() - if operand != nil { - cmd.append(operand) - } - switch token := t.next(); token.typ { - case itemSpace: - continue - case itemError: - t.errorf("%s", token.val) - case itemRightDelim, itemRightParen: - t.backup() - case itemPipe: - default: - t.errorf("unexpected %s in operand; missing space?", token) - } - break - } - if len(cmd.Args) == 0 { - t.errorf("empty command") - } - return cmd -} - -// operand: -// term .Field* -// An operand is a space-separated component of a command, -// a term possibly followed by field accesses. -// A nil return means the next item is not an operand. -func (t *Tree) operand() Node { - node := t.term() - if node == nil { - return nil - } - if t.peek().typ == itemField { - chain := t.newChain(t.peek().pos, node) - for t.peek().typ == itemField { - chain.Add(t.next().val) - } - // Compatibility with original API: If the term is of type NodeField - // or NodeVariable, just put more fields on the original. - // Otherwise, keep the Chain node. - // TODO: Switch to Chains always when we can. - switch node.Type() { - case NodeField: - node = t.newField(chain.Position(), chain.String()) - case NodeVariable: - node = t.newVariable(chain.Position(), chain.String()) - default: - node = chain - } - } - return node -} - -// term: -// literal (number, string, nil, boolean) -// function (identifier) -// . -// .Field -// $ -// '(' pipeline ')' -// A term is a simple "expression". -// A nil return means the next item is not a term. -func (t *Tree) term() Node { - switch token := t.nextNonSpace(); token.typ { - case itemError: - t.errorf("%s", token.val) - case itemIdentifier: - if !t.hasFunction(token.val) { - t.errorf("function %q not defined", token.val) - } - return NewIdentifier(token.val).SetTree(t).SetPos(token.pos) - case itemDot: - return t.newDot(token.pos) - case itemNil: - return t.newNil(token.pos) - case itemVariable: - return t.useVar(token.pos, token.val) - case itemField: - return t.newField(token.pos, token.val) - case itemBool: - return t.newBool(token.pos, token.val == "true") - case itemCharConstant, itemComplex, itemNumber: - number, err := t.newNumber(token.pos, token.val, token.typ) - if err != nil { - t.error(err) - } - return number - case itemLeftParen: - pipe := t.pipeline("parenthesized pipeline") - if token := t.next(); token.typ != itemRightParen { - t.errorf("unclosed right paren: unexpected %s", token) - } - return pipe - case itemString, itemRawString: - s, err := strconv.Unquote(token.val) - if err != nil { - t.error(err) - } - return t.newString(token.pos, token.val, s) - } - t.backup() - return nil -} - -// hasFunction reports if a function name exists in the Tree's maps. -func (t *Tree) hasFunction(name string) bool { - for _, funcMap := range t.funcs { - if funcMap == nil { - continue - } - if funcMap[name] != nil { - return true - } - } - return false -} - -// popVars trims the variable list to the specified length -func (t *Tree) popVars(n int) { - t.vars = t.vars[:n] -} - -// useVar returns a node for a variable reference. It errors if the -// variable is not defined. -func (t *Tree) useVar(pos Pos, name string) Node { - v := t.newVariable(pos, name) - for _, varName := range t.vars { - if varName == v.Ident[0] { - return v - } - } - t.errorf("undefined variable %q", v.Ident[0]) - return nil -} +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package parse builds parse trees for templates as defined by text/template +// and html/template. Clients should use those packages to construct templates +// rather than this one, which provides shared internal data structures not +// intended for general use. +package parse + +import ( + "bytes" + "fmt" + "runtime" + "strconv" + "strings" +) + +// Tree is the representation of a single parsed template. +type Tree struct { + Name string // name of the template represented by the tree. + ParseName string // name of the top-level template during parsing, for error messages. + Root *ListNode // top-level root of the tree. + text string // text parsed to create the template (or its parent) + // Parsing only; cleared after parse. + funcs []map[string]interface{} + lex *lexer + token [3]item // three-token lookahead for parser. + peekCount int + vars []string // variables defined at the moment. +} + +// Copy returns a copy of the Tree. Any parsing state is discarded. +func (t *Tree) Copy() *Tree { + if t == nil { + return nil + } + return &Tree{ + Name: t.Name, + ParseName: t.ParseName, + Root: t.Root.CopyList(), + text: t.text, + } +} + +// Parse returns a map from template name to parse.Tree, created by parsing the +// templates described in the argument string. The top-level template will be +// given the specified name. If an error is encountered, parsing stops and an +// empty map is returned with the error. +func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) { + treeSet = make(map[string]*Tree) + t := New(name) + t.text = text + _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...) + return +} + +// next returns the next token. +func (t *Tree) next() item { + if t.peekCount > 0 { + t.peekCount-- + } else { + t.token[0] = t.lex.nextItem() + } + return t.token[t.peekCount] +} + +// backup backs the input stream up one token. +func (t *Tree) backup() { + t.peekCount++ +} + +// backup2 backs the input stream up two tokens. +// The zeroth token is already there. +func (t *Tree) backup2(t1 item) { + t.token[1] = t1 + t.peekCount = 2 +} + +// backup3 backs the input stream up three tokens +// The zeroth token is already there. +func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back. + t.token[1] = t1 + t.token[2] = t2 + t.peekCount = 3 +} + +// peek returns but does not consume the next token. +func (t *Tree) peek() item { + if t.peekCount > 0 { + return t.token[t.peekCount-1] + } + t.peekCount = 1 + t.token[0] = t.lex.nextItem() + return t.token[0] +} + +// nextNonSpace returns the next non-space token. +func (t *Tree) nextNonSpace() (token item) { + for { + token = t.next() + if token.typ != itemSpace { + break + } + } + return token +} + +// peekNonSpace returns but does not consume the next non-space token. +func (t *Tree) peekNonSpace() (token item) { + for { + token = t.next() + if token.typ != itemSpace { + break + } + } + t.backup() + return token +} + +// Parsing. + +// New allocates a new parse tree with the given name. +func New(name string, funcs ...map[string]interface{}) *Tree { + return &Tree{ + Name: name, + funcs: funcs, + } +} + +// ErrorContext returns a textual representation of the location of the node in the input text. +// The receiver is only used when the node does not have a pointer to the tree inside, +// which can occur in old code. +func (t *Tree) ErrorContext(n Node) (location, context string) { + pos := int(n.Position()) + tree := n.tree() + if tree == nil { + tree = t + } + text := tree.text[:pos] + byteNum := strings.LastIndex(text, "\n") + if byteNum == -1 { + byteNum = pos // On first line. + } else { + byteNum++ // After the newline. + byteNum = pos - byteNum + } + lineNum := 1 + strings.Count(text, "\n") + context = n.String() + if len(context) > 20 { + context = fmt.Sprintf("%.20s...", context) + } + return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context +} + +// errorf formats the error and terminates processing. +func (t *Tree) errorf(format string, args ...interface{}) { + t.Root = nil + format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format) + panic(fmt.Errorf(format, args...)) +} + +// error terminates processing. +func (t *Tree) error(err error) { + t.errorf("%s", err) +} + +// expect consumes the next token and guarantees it has the required type. +func (t *Tree) expect(expected itemType, context string) item { + token := t.nextNonSpace() + if token.typ != expected { + t.unexpected(token, context) + } + return token +} + +// expectOneOf consumes the next token and guarantees it has one of the required types. +func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item { + token := t.nextNonSpace() + if token.typ != expected1 && token.typ != expected2 { + t.unexpected(token, context) + } + return token +} + +// unexpected complains about the token and terminates processing. +func (t *Tree) unexpected(token item, context string) { + t.errorf("unexpected %s in %s", token, context) +} + +// recover is the handler that turns panics into returns from the top level of Parse. +func (t *Tree) recover(errp *error) { + e := recover() + if e != nil { + if _, ok := e.(runtime.Error); ok { + panic(e) + } + if t != nil { + t.stopParse() + } + *errp = e.(error) + } + return +} + +// startParse initializes the parser, using the lexer. +func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) { + t.Root = nil + t.lex = lex + t.vars = []string{"$"} + t.funcs = funcs +} + +// stopParse terminates parsing. +func (t *Tree) stopParse() { + t.lex = nil + t.vars = nil + t.funcs = nil +} + +// Parse parses the template definition string to construct a representation of +// the template for execution. If either action delimiter string is empty, the +// default ("{{" or "}}") is used. Embedded template definitions are added to +// the treeSet map. +func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) { + defer t.recover(&err) + t.ParseName = t.Name + t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim)) + t.text = text + t.parse(treeSet) + t.add(treeSet) + t.stopParse() + return t, nil +} + +// add adds tree to the treeSet. +func (t *Tree) add(treeSet map[string]*Tree) { + tree := treeSet[t.Name] + if tree == nil || IsEmptyTree(tree.Root) { + treeSet[t.Name] = t + return + } + if !IsEmptyTree(t.Root) { + t.errorf("template: multiple definition of template %q", t.Name) + } +} + +// IsEmptyTree reports whether this tree (node) is empty of everything but space. +func IsEmptyTree(n Node) bool { + switch n := n.(type) { + case nil: + return true + case *ActionNode: + case *IfNode: + case *ListNode: + for _, node := range n.Nodes { + if !IsEmptyTree(node) { + return false + } + } + return true + case *RangeNode: + case *TemplateNode: + case *TextNode: + return len(bytes.TrimSpace(n.Text)) == 0 + case *WithNode: + default: + panic("unknown node: " + n.String()) + } + return false +} + +// parse is the top-level parser for a template, essentially the same +// as itemList except it also parses {{define}} actions. +// It runs to EOF. +func (t *Tree) parse(treeSet map[string]*Tree) (next Node) { + t.Root = t.newList(t.peek().pos) + for t.peek().typ != itemEOF { + if t.peek().typ == itemLeftDelim { + delim := t.next() + if t.nextNonSpace().typ == itemDefine { + newT := New("definition") // name will be updated once we know it. + newT.text = t.text + newT.ParseName = t.ParseName + newT.startParse(t.funcs, t.lex) + newT.parseDefinition(treeSet) + continue + } + t.backup2(delim) + } + n := t.textOrAction() + if n.Type() == nodeEnd { + t.errorf("unexpected %s", n) + } + t.Root.append(n) + } + return nil +} + +// parseDefinition parses a {{define}} ... {{end}} template definition and +// installs the definition in the treeSet map. The "define" keyword has already +// been scanned. +func (t *Tree) parseDefinition(treeSet map[string]*Tree) { + const context = "define clause" + name := t.expectOneOf(itemString, itemRawString, context) + var err error + t.Name, err = strconv.Unquote(name.val) + if err != nil { + t.error(err) + } + t.expect(itemRightDelim, context) + var end Node + t.Root, end = t.itemList() + if end.Type() != nodeEnd { + t.errorf("unexpected %s in %s", end, context) + } + t.add(treeSet) + t.stopParse() +} + +// itemList: +// textOrAction* +// Terminates at {{end}} or {{else}}, returned separately. +func (t *Tree) itemList() (list *ListNode, next Node) { + list = t.newList(t.peekNonSpace().pos) + for t.peekNonSpace().typ != itemEOF { + n := t.textOrAction() + switch n.Type() { + case nodeEnd, nodeElse: + return list, n + } + list.append(n) + } + t.errorf("unexpected EOF") + return +} + +// textOrAction: +// text | action +func (t *Tree) textOrAction() Node { + switch token := t.nextNonSpace(); token.typ { + case itemElideNewline: + return t.elideNewline() + case itemText: + return t.newText(token.pos, token.val) + case itemLeftDelim: + return t.action() + default: + t.unexpected(token, "input") + } + return nil +} + +// elideNewline: +// Remove newlines trailing rightDelim if \\ is present. +func (t *Tree) elideNewline() Node { + token := t.peek() + if token.typ != itemText { + t.unexpected(token, "input") + return nil + } + + t.next() + stripped := strings.TrimLeft(token.val, "\n\r") + diff := len(token.val) - len(stripped) + if diff > 0 { + // This is a bit nasty. We mutate the token in-place to remove + // preceding newlines. + token.pos += Pos(diff) + token.val = stripped + } + return t.newText(token.pos, token.val) +} + +// Action: +// control +// command ("|" command)* +// Left delim is past. Now get actions. +// First word could be a keyword such as range. +func (t *Tree) action() (n Node) { + switch token := t.nextNonSpace(); token.typ { + case itemElse: + return t.elseControl() + case itemEnd: + return t.endControl() + case itemIf: + return t.ifControl() + case itemRange: + return t.rangeControl() + case itemTemplate: + return t.templateControl() + case itemWith: + return t.withControl() + } + t.backup() + // Do not pop variables; they persist until "end". + return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) +} + +// Pipeline: +// declarations? command ('|' command)* +func (t *Tree) pipeline(context string) (pipe *PipeNode) { + var decl []*VariableNode + pos := t.peekNonSpace().pos + // Are there declarations? + for { + if v := t.peekNonSpace(); v.typ == itemVariable { + t.next() + // Since space is a token, we need 3-token look-ahead here in the worst case: + // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an + // argument variable rather than a declaration. So remember the token + // adjacent to the variable so we can push it back if necessary. + tokenAfterVariable := t.peek() + if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") { + t.nextNonSpace() + variable := t.newVariable(v.pos, v.val) + decl = append(decl, variable) + t.vars = append(t.vars, v.val) + if next.typ == itemChar && next.val == "," { + if context == "range" && len(decl) < 2 { + continue + } + t.errorf("too many declarations in %s", context) + } + } else if tokenAfterVariable.typ == itemSpace { + t.backup3(v, tokenAfterVariable) + } else { + t.backup2(v) + } + } + break + } + pipe = t.newPipeline(pos, t.lex.lineNumber(), decl) + for { + switch token := t.nextNonSpace(); token.typ { + case itemRightDelim, itemRightParen: + if len(pipe.Cmds) == 0 { + t.errorf("missing value for %s", context) + } + if token.typ == itemRightParen { + t.backup() + } + return + case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier, + itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen: + t.backup() + pipe.append(t.command()) + default: + t.unexpected(token, context) + } + } +} + +func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { + defer t.popVars(len(t.vars)) + line = t.lex.lineNumber() + pipe = t.pipeline(context) + var next Node + list, next = t.itemList() + switch next.Type() { + case nodeEnd: //done + case nodeElse: + if allowElseIf { + // Special case for "else if". If the "else" is followed immediately by an "if", + // the elseControl will have left the "if" token pending. Treat + // {{if a}}_{{else if b}}_{{end}} + // as + // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}. + // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}} + // is assumed. This technique works even for long if-else-if chains. + // TODO: Should we allow else-if in with and range? + if t.peek().typ == itemIf { + t.next() // Consume the "if" token. + elseList = t.newList(next.Position()) + elseList.append(t.ifControl()) + // Do not consume the next item - only one {{end}} required. + break + } + } + elseList, next = t.itemList() + if next.Type() != nodeEnd { + t.errorf("expected end; found %s", next) + } + } + return pipe.Position(), line, pipe, list, elseList +} + +// If: +// {{if pipeline}} itemList {{end}} +// {{if pipeline}} itemList {{else}} itemList {{end}} +// If keyword is past. +func (t *Tree) ifControl() Node { + return t.newIf(t.parseControl(true, "if")) +} + +// Range: +// {{range pipeline}} itemList {{end}} +// {{range pipeline}} itemList {{else}} itemList {{end}} +// Range keyword is past. +func (t *Tree) rangeControl() Node { + return t.newRange(t.parseControl(false, "range")) +} + +// With: +// {{with pipeline}} itemList {{end}} +// {{with pipeline}} itemList {{else}} itemList {{end}} +// If keyword is past. +func (t *Tree) withControl() Node { + return t.newWith(t.parseControl(false, "with")) +} + +// End: +// {{end}} +// End keyword is past. +func (t *Tree) endControl() Node { + return t.newEnd(t.expect(itemRightDelim, "end").pos) +} + +// Else: +// {{else}} +// Else keyword is past. +func (t *Tree) elseControl() Node { + // Special case for "else if". + peek := t.peekNonSpace() + if peek.typ == itemIf { + // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". + return t.newElse(peek.pos, t.lex.lineNumber()) + } + return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) +} + +// Template: +// {{template stringValue pipeline}} +// Template keyword is past. The name must be something that can evaluate +// to a string. +func (t *Tree) templateControl() Node { + var name string + token := t.nextNonSpace() + switch token.typ { + case itemString, itemRawString: + s, err := strconv.Unquote(token.val) + if err != nil { + t.error(err) + } + name = s + default: + t.unexpected(token, "template invocation") + } + var pipe *PipeNode + if t.nextNonSpace().typ != itemRightDelim { + t.backup() + // Do not pop variables; they persist until "end". + pipe = t.pipeline("template") + } + return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe) +} + +// command: +// operand (space operand)* +// space-separated arguments up to a pipeline character or right delimiter. +// we consume the pipe character but leave the right delim to terminate the action. +func (t *Tree) command() *CommandNode { + cmd := t.newCommand(t.peekNonSpace().pos) + for { + t.peekNonSpace() // skip leading spaces. + operand := t.operand() + if operand != nil { + cmd.append(operand) + } + switch token := t.next(); token.typ { + case itemSpace: + continue + case itemError: + t.errorf("%s", token.val) + case itemRightDelim, itemRightParen: + t.backup() + case itemPipe: + default: + t.errorf("unexpected %s in operand; missing space?", token) + } + break + } + if len(cmd.Args) == 0 { + t.errorf("empty command") + } + return cmd +} + +// operand: +// term .Field* +// An operand is a space-separated component of a command, +// a term possibly followed by field accesses. +// A nil return means the next item is not an operand. +func (t *Tree) operand() Node { + node := t.term() + if node == nil { + return nil + } + if t.peek().typ == itemField { + chain := t.newChain(t.peek().pos, node) + for t.peek().typ == itemField { + chain.Add(t.next().val) + } + // Compatibility with original API: If the term is of type NodeField + // or NodeVariable, just put more fields on the original. + // Otherwise, keep the Chain node. + // TODO: Switch to Chains always when we can. + switch node.Type() { + case NodeField: + node = t.newField(chain.Position(), chain.String()) + case NodeVariable: + node = t.newVariable(chain.Position(), chain.String()) + default: + node = chain + } + } + return node +} + +// term: +// literal (number, string, nil, boolean) +// function (identifier) +// . +// .Field +// $ +// '(' pipeline ')' +// A term is a simple "expression". +// A nil return means the next item is not a term. +func (t *Tree) term() Node { + switch token := t.nextNonSpace(); token.typ { + case itemError: + t.errorf("%s", token.val) + case itemIdentifier: + if !t.hasFunction(token.val) { + t.errorf("function %q not defined", token.val) + } + return NewIdentifier(token.val).SetTree(t).SetPos(token.pos) + case itemDot: + return t.newDot(token.pos) + case itemNil: + return t.newNil(token.pos) + case itemVariable: + return t.useVar(token.pos, token.val) + case itemField: + return t.newField(token.pos, token.val) + case itemBool: + return t.newBool(token.pos, token.val == "true") + case itemCharConstant, itemComplex, itemNumber: + number, err := t.newNumber(token.pos, token.val, token.typ) + if err != nil { + t.error(err) + } + return number + case itemLeftParen: + pipe := t.pipeline("parenthesized pipeline") + if token := t.next(); token.typ != itemRightParen { + t.errorf("unclosed right paren: unexpected %s", token) + } + return pipe + case itemString, itemRawString: + s, err := strconv.Unquote(token.val) + if err != nil { + t.error(err) + } + return t.newString(token.pos, token.val, s) + } + t.backup() + return nil +} + +// hasFunction reports if a function name exists in the Tree's maps. +func (t *Tree) hasFunction(name string) bool { + for _, funcMap := range t.funcs { + if funcMap == nil { + continue + } + if funcMap[name] != nil { + return true + } + } + return false +} + +// popVars trims the variable list to the specified length +func (t *Tree) popVars(n int) { + t.vars = t.vars[:n] +} + +// useVar returns a node for a variable reference. It errors if the +// variable is not defined. +func (t *Tree) useVar(pos Pos, name string) Node { + v := t.newVariable(pos, name) + for _, varName := range t.vars { + if varName == v.Ident[0] { + return v + } + } + t.errorf("undefined variable %q", v.Ident[0]) + return nil +} diff --git a/vendor/github.com/alecthomas/template/template.go b/vendor/github.com/alecthomas/template/template.go index 447ed2a..8a658f0 100644 --- a/vendor/github.com/alecthomas/template/template.go +++ b/vendor/github.com/alecthomas/template/template.go @@ -1,218 +1,218 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "fmt" - "reflect" - - "github.com/alecthomas/template/parse" -) - -// common holds the information shared by related templates. -type common struct { - tmpl map[string]*Template - // We use two maps, one for parsing and one for execution. - // This separation makes the API cleaner since it doesn't - // expose reflection to the client. - parseFuncs FuncMap - execFuncs map[string]reflect.Value -} - -// Template is the representation of a parsed template. The *parse.Tree -// field is exported only for use by html/template and should be treated -// as unexported by all other clients. -type Template struct { - name string - *parse.Tree - *common - leftDelim string - rightDelim string -} - -// New allocates a new template with the given name. -func New(name string) *Template { - return &Template{ - name: name, - } -} - -// Name returns the name of the template. -func (t *Template) Name() string { - return t.name -} - -// New allocates a new template associated with the given one and with the same -// delimiters. The association, which is transitive, allows one template to -// invoke another with a {{template}} action. -func (t *Template) New(name string) *Template { - t.init() - return &Template{ - name: name, - common: t.common, - leftDelim: t.leftDelim, - rightDelim: t.rightDelim, - } -} - -func (t *Template) init() { - if t.common == nil { - t.common = new(common) - t.tmpl = make(map[string]*Template) - t.parseFuncs = make(FuncMap) - t.execFuncs = make(map[string]reflect.Value) - } -} - -// Clone returns a duplicate of the template, including all associated -// templates. The actual representation is not copied, but the name space of -// associated templates is, so further calls to Parse in the copy will add -// templates to the copy but not to the original. Clone can be used to prepare -// common templates and use them with variant definitions for other templates -// by adding the variants after the clone is made. -func (t *Template) Clone() (*Template, error) { - nt := t.copy(nil) - nt.init() - nt.tmpl[t.name] = nt - for k, v := range t.tmpl { - if k == t.name { // Already installed. - continue - } - // The associated templates share nt's common structure. - tmpl := v.copy(nt.common) - nt.tmpl[k] = tmpl - } - for k, v := range t.parseFuncs { - nt.parseFuncs[k] = v - } - for k, v := range t.execFuncs { - nt.execFuncs[k] = v - } - return nt, nil -} - -// copy returns a shallow copy of t, with common set to the argument. -func (t *Template) copy(c *common) *Template { - nt := New(t.name) - nt.Tree = t.Tree - nt.common = c - nt.leftDelim = t.leftDelim - nt.rightDelim = t.rightDelim - return nt -} - -// AddParseTree creates a new template with the name and parse tree -// and associates it with t. -func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { - if t.common != nil && t.tmpl[name] != nil { - return nil, fmt.Errorf("template: redefinition of template %q", name) - } - nt := t.New(name) - nt.Tree = tree - t.tmpl[name] = nt - return nt, nil -} - -// Templates returns a slice of the templates associated with t, including t -// itself. -func (t *Template) Templates() []*Template { - if t.common == nil { - return nil - } - // Return a slice so we don't expose the map. - m := make([]*Template, 0, len(t.tmpl)) - for _, v := range t.tmpl { - m = append(m, v) - } - return m -} - -// Delims sets the action delimiters to the specified strings, to be used in -// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template -// definitions will inherit the settings. An empty delimiter stands for the -// corresponding default: {{ or }}. -// The return value is the template, so calls can be chained. -func (t *Template) Delims(left, right string) *Template { - t.leftDelim = left - t.rightDelim = right - return t -} - -// Funcs adds the elements of the argument map to the template's function map. -// It panics if a value in the map is not a function with appropriate return -// type. However, it is legal to overwrite elements of the map. The return -// value is the template, so calls can be chained. -func (t *Template) Funcs(funcMap FuncMap) *Template { - t.init() - addValueFuncs(t.execFuncs, funcMap) - addFuncs(t.parseFuncs, funcMap) - return t -} - -// Lookup returns the template with the given name that is associated with t, -// or nil if there is no such template. -func (t *Template) Lookup(name string) *Template { - if t.common == nil { - return nil - } - return t.tmpl[name] -} - -// Parse parses a string into a template. Nested template definitions will be -// associated with the top-level template t. Parse may be called multiple times -// to parse definitions of templates to associate with t. It is an error if a -// resulting template is non-empty (contains content other than template -// definitions) and would replace a non-empty template with the same name. -// (In multiple calls to Parse with the same receiver template, only one call -// can contain text other than space, comments, and template definitions.) -func (t *Template) Parse(text string) (*Template, error) { - t.init() - trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins) - if err != nil { - return nil, err - } - // Add the newly parsed trees, including the one for t, into our common structure. - for name, tree := range trees { - // If the name we parsed is the name of this template, overwrite this template. - // The associate method checks it's not a redefinition. - tmpl := t - if name != t.name { - tmpl = t.New(name) - } - // Even if t == tmpl, we need to install it in the common.tmpl map. - if replace, err := t.associate(tmpl, tree); err != nil { - return nil, err - } else if replace { - tmpl.Tree = tree - } - tmpl.leftDelim = t.leftDelim - tmpl.rightDelim = t.rightDelim - } - return t, nil -} - -// associate installs the new template into the group of templates associated -// with t. It is an error to reuse a name except to overwrite an empty -// template. The two are already known to share the common structure. -// The boolean return value reports wither to store this tree as t.Tree. -func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) { - if new.common != t.common { - panic("internal error: associate not common") - } - name := new.name - if old := t.tmpl[name]; old != nil { - oldIsEmpty := parse.IsEmptyTree(old.Root) - newIsEmpty := parse.IsEmptyTree(tree.Root) - if newIsEmpty { - // Whether old is empty or not, new is empty; no reason to replace old. - return false, nil - } - if !oldIsEmpty { - return false, fmt.Errorf("template: redefinition of template %q", name) - } - } - t.tmpl[name] = new - return true, nil -} +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "fmt" + "reflect" + + "github.com/alecthomas/template/parse" +) + +// common holds the information shared by related templates. +type common struct { + tmpl map[string]*Template + // We use two maps, one for parsing and one for execution. + // This separation makes the API cleaner since it doesn't + // expose reflection to the client. + parseFuncs FuncMap + execFuncs map[string]reflect.Value +} + +// Template is the representation of a parsed template. The *parse.Tree +// field is exported only for use by html/template and should be treated +// as unexported by all other clients. +type Template struct { + name string + *parse.Tree + *common + leftDelim string + rightDelim string +} + +// New allocates a new template with the given name. +func New(name string) *Template { + return &Template{ + name: name, + } +} + +// Name returns the name of the template. +func (t *Template) Name() string { + return t.name +} + +// New allocates a new template associated with the given one and with the same +// delimiters. The association, which is transitive, allows one template to +// invoke another with a {{template}} action. +func (t *Template) New(name string) *Template { + t.init() + return &Template{ + name: name, + common: t.common, + leftDelim: t.leftDelim, + rightDelim: t.rightDelim, + } +} + +func (t *Template) init() { + if t.common == nil { + t.common = new(common) + t.tmpl = make(map[string]*Template) + t.parseFuncs = make(FuncMap) + t.execFuncs = make(map[string]reflect.Value) + } +} + +// Clone returns a duplicate of the template, including all associated +// templates. The actual representation is not copied, but the name space of +// associated templates is, so further calls to Parse in the copy will add +// templates to the copy but not to the original. Clone can be used to prepare +// common templates and use them with variant definitions for other templates +// by adding the variants after the clone is made. +func (t *Template) Clone() (*Template, error) { + nt := t.copy(nil) + nt.init() + nt.tmpl[t.name] = nt + for k, v := range t.tmpl { + if k == t.name { // Already installed. + continue + } + // The associated templates share nt's common structure. + tmpl := v.copy(nt.common) + nt.tmpl[k] = tmpl + } + for k, v := range t.parseFuncs { + nt.parseFuncs[k] = v + } + for k, v := range t.execFuncs { + nt.execFuncs[k] = v + } + return nt, nil +} + +// copy returns a shallow copy of t, with common set to the argument. +func (t *Template) copy(c *common) *Template { + nt := New(t.name) + nt.Tree = t.Tree + nt.common = c + nt.leftDelim = t.leftDelim + nt.rightDelim = t.rightDelim + return nt +} + +// AddParseTree creates a new template with the name and parse tree +// and associates it with t. +func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { + if t.common != nil && t.tmpl[name] != nil { + return nil, fmt.Errorf("template: redefinition of template %q", name) + } + nt := t.New(name) + nt.Tree = tree + t.tmpl[name] = nt + return nt, nil +} + +// Templates returns a slice of the templates associated with t, including t +// itself. +func (t *Template) Templates() []*Template { + if t.common == nil { + return nil + } + // Return a slice so we don't expose the map. + m := make([]*Template, 0, len(t.tmpl)) + for _, v := range t.tmpl { + m = append(m, v) + } + return m +} + +// Delims sets the action delimiters to the specified strings, to be used in +// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template +// definitions will inherit the settings. An empty delimiter stands for the +// corresponding default: {{ or }}. +// The return value is the template, so calls can be chained. +func (t *Template) Delims(left, right string) *Template { + t.leftDelim = left + t.rightDelim = right + return t +} + +// Funcs adds the elements of the argument map to the template's function map. +// It panics if a value in the map is not a function with appropriate return +// type. However, it is legal to overwrite elements of the map. The return +// value is the template, so calls can be chained. +func (t *Template) Funcs(funcMap FuncMap) *Template { + t.init() + addValueFuncs(t.execFuncs, funcMap) + addFuncs(t.parseFuncs, funcMap) + return t +} + +// Lookup returns the template with the given name that is associated with t, +// or nil if there is no such template. +func (t *Template) Lookup(name string) *Template { + if t.common == nil { + return nil + } + return t.tmpl[name] +} + +// Parse parses a string into a template. Nested template definitions will be +// associated with the top-level template t. Parse may be called multiple times +// to parse definitions of templates to associate with t. It is an error if a +// resulting template is non-empty (contains content other than template +// definitions) and would replace a non-empty template with the same name. +// (In multiple calls to Parse with the same receiver template, only one call +// can contain text other than space, comments, and template definitions.) +func (t *Template) Parse(text string) (*Template, error) { + t.init() + trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins) + if err != nil { + return nil, err + } + // Add the newly parsed trees, including the one for t, into our common structure. + for name, tree := range trees { + // If the name we parsed is the name of this template, overwrite this template. + // The associate method checks it's not a redefinition. + tmpl := t + if name != t.name { + tmpl = t.New(name) + } + // Even if t == tmpl, we need to install it in the common.tmpl map. + if replace, err := t.associate(tmpl, tree); err != nil { + return nil, err + } else if replace { + tmpl.Tree = tree + } + tmpl.leftDelim = t.leftDelim + tmpl.rightDelim = t.rightDelim + } + return t, nil +} + +// associate installs the new template into the group of templates associated +// with t. It is an error to reuse a name except to overwrite an empty +// template. The two are already known to share the common structure. +// The boolean return value reports wither to store this tree as t.Tree. +func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) { + if new.common != t.common { + panic("internal error: associate not common") + } + name := new.name + if old := t.tmpl[name]; old != nil { + oldIsEmpty := parse.IsEmptyTree(old.Root) + newIsEmpty := parse.IsEmptyTree(tree.Root) + if newIsEmpty { + // Whether old is empty or not, new is empty; no reason to replace old. + return false, nil + } + if !oldIsEmpty { + return false, fmt.Errorf("template: redefinition of template %q", name) + } + } + t.tmpl[name] = new + return true, nil +} diff --git a/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/alecthomas/units/COPYING index 2993ec0..38ea62a 100644 --- a/vendor/github.com/alecthomas/units/COPYING +++ b/vendor/github.com/alecthomas/units/COPYING @@ -1,19 +1,19 @@ -Copyright (C) 2014 Alec Thomas - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +Copyright (C) 2014 Alec Thomas + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alecthomas/units/README.md b/vendor/github.com/alecthomas/units/README.md index bee884e..c597591 100644 --- a/vendor/github.com/alecthomas/units/README.md +++ b/vendor/github.com/alecthomas/units/README.md @@ -1,11 +1,11 @@ -# Units - Helpful unit multipliers and functions for Go - -The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package. - -It allows for code like this: - -```go -n, err := ParseBase2Bytes("1KB") -// n == 1024 -n = units.Mebibyte * 512 -``` +# Units - Helpful unit multipliers and functions for Go + +The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package. + +It allows for code like this: + +```go +n, err := ParseBase2Bytes("1KB") +// n == 1024 +n = units.Mebibyte * 512 +``` diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go index eaadeb8..be2b2fd 100644 --- a/vendor/github.com/alecthomas/units/bytes.go +++ b/vendor/github.com/alecthomas/units/bytes.go @@ -1,83 +1,83 @@ -package units - -// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte, -// etc.). -type Base2Bytes int64 - -// Base-2 byte units. -const ( - Kibibyte Base2Bytes = 1024 - KiB = Kibibyte - Mebibyte = Kibibyte * 1024 - MiB = Mebibyte - Gibibyte = Mebibyte * 1024 - GiB = Gibibyte - Tebibyte = Gibibyte * 1024 - TiB = Tebibyte - Pebibyte = Tebibyte * 1024 - PiB = Pebibyte - Exbibyte = Pebibyte * 1024 - EiB = Exbibyte -) - -var ( - bytesUnitMap = MakeUnitMap("iB", "B", 1024) - oldBytesUnitMap = MakeUnitMap("B", "B", 1024) -) - -// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB -// and KiB are both 1024. -func ParseBase2Bytes(s string) (Base2Bytes, error) { - n, err := ParseUnit(s, bytesUnitMap) - if err != nil { - n, err = ParseUnit(s, oldBytesUnitMap) - } - return Base2Bytes(n), err -} - -func (b Base2Bytes) String() string { - return ToString(int64(b), 1024, "iB", "B") -} - -var ( - metricBytesUnitMap = MakeUnitMap("B", "B", 1000) -) - -// MetricBytes are SI byte units (1000 bytes in a kilobyte). -type MetricBytes SI - -// SI base-10 byte units. -const ( - Kilobyte MetricBytes = 1000 - KB = Kilobyte - Megabyte = Kilobyte * 1000 - MB = Megabyte - Gigabyte = Megabyte * 1000 - GB = Gigabyte - Terabyte = Gigabyte * 1000 - TB = Terabyte - Petabyte = Terabyte * 1000 - PB = Petabyte - Exabyte = Petabyte * 1000 - EB = Exabyte -) - -// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes. -func ParseMetricBytes(s string) (MetricBytes, error) { - n, err := ParseUnit(s, metricBytesUnitMap) - return MetricBytes(n), err -} - -func (m MetricBytes) String() string { - return ToString(int64(m), 1000, "B", "B") -} - -// ParseStrictBytes supports both iB and B suffixes for base 2 and metric, -// respectively. That is, KiB represents 1024 and KB represents 1000. -func ParseStrictBytes(s string) (int64, error) { - n, err := ParseUnit(s, bytesUnitMap) - if err != nil { - n, err = ParseUnit(s, metricBytesUnitMap) - } - return int64(n), err -} +package units + +// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte, +// etc.). +type Base2Bytes int64 + +// Base-2 byte units. +const ( + Kibibyte Base2Bytes = 1024 + KiB = Kibibyte + Mebibyte = Kibibyte * 1024 + MiB = Mebibyte + Gibibyte = Mebibyte * 1024 + GiB = Gibibyte + Tebibyte = Gibibyte * 1024 + TiB = Tebibyte + Pebibyte = Tebibyte * 1024 + PiB = Pebibyte + Exbibyte = Pebibyte * 1024 + EiB = Exbibyte +) + +var ( + bytesUnitMap = MakeUnitMap("iB", "B", 1024) + oldBytesUnitMap = MakeUnitMap("B", "B", 1024) +) + +// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB +// and KiB are both 1024. +func ParseBase2Bytes(s string) (Base2Bytes, error) { + n, err := ParseUnit(s, bytesUnitMap) + if err != nil { + n, err = ParseUnit(s, oldBytesUnitMap) + } + return Base2Bytes(n), err +} + +func (b Base2Bytes) String() string { + return ToString(int64(b), 1024, "iB", "B") +} + +var ( + metricBytesUnitMap = MakeUnitMap("B", "B", 1000) +) + +// MetricBytes are SI byte units (1000 bytes in a kilobyte). +type MetricBytes SI + +// SI base-10 byte units. +const ( + Kilobyte MetricBytes = 1000 + KB = Kilobyte + Megabyte = Kilobyte * 1000 + MB = Megabyte + Gigabyte = Megabyte * 1000 + GB = Gigabyte + Terabyte = Gigabyte * 1000 + TB = Terabyte + Petabyte = Terabyte * 1000 + PB = Petabyte + Exabyte = Petabyte * 1000 + EB = Exabyte +) + +// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes. +func ParseMetricBytes(s string) (MetricBytes, error) { + n, err := ParseUnit(s, metricBytesUnitMap) + return MetricBytes(n), err +} + +func (m MetricBytes) String() string { + return ToString(int64(m), 1000, "B", "B") +} + +// ParseStrictBytes supports both iB and B suffixes for base 2 and metric, +// respectively. That is, KiB represents 1024 and KB represents 1000. +func ParseStrictBytes(s string) (int64, error) { + n, err := ParseUnit(s, bytesUnitMap) + if err != nil { + n, err = ParseUnit(s, metricBytesUnitMap) + } + return int64(n), err +} diff --git a/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/alecthomas/units/doc.go index 156ae38..81255ff 100644 --- a/vendor/github.com/alecthomas/units/doc.go +++ b/vendor/github.com/alecthomas/units/doc.go @@ -1,13 +1,13 @@ -// Package units provides helpful unit multipliers and functions for Go. -// -// The goal of this package is to have functionality similar to the time [1] package. -// -// -// [1] http://golang.org/pkg/time/ -// -// It allows for code like this: -// -// n, err := ParseBase2Bytes("1KB") -// // n == 1024 -// n = units.Mebibyte * 512 -package units +// Package units provides helpful unit multipliers and functions for Go. +// +// The goal of this package is to have functionality similar to the time [1] package. +// +// +// [1] http://golang.org/pkg/time/ +// +// It allows for code like this: +// +// n, err := ParseBase2Bytes("1KB") +// // n == 1024 +// n = units.Mebibyte * 512 +package units diff --git a/vendor/github.com/alecthomas/units/go.mod b/vendor/github.com/alecthomas/units/go.mod index f572173..c8d3c0b 100644 --- a/vendor/github.com/alecthomas/units/go.mod +++ b/vendor/github.com/alecthomas/units/go.mod @@ -1 +1 @@ -module github.com/alecthomas/units +module github.com/alecthomas/units diff --git a/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/alecthomas/units/si.go index 8234a9d..afda49a 100644 --- a/vendor/github.com/alecthomas/units/si.go +++ b/vendor/github.com/alecthomas/units/si.go @@ -1,26 +1,26 @@ -package units - -// SI units. -type SI int64 - -// SI unit multiples. -const ( - Kilo SI = 1000 - Mega = Kilo * 1000 - Giga = Mega * 1000 - Tera = Giga * 1000 - Peta = Tera * 1000 - Exa = Peta * 1000 -) - -func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 { - return map[string]float64{ - shortSuffix: 1, - "K" + suffix: float64(scale), - "M" + suffix: float64(scale * scale), - "G" + suffix: float64(scale * scale * scale), - "T" + suffix: float64(scale * scale * scale * scale), - "P" + suffix: float64(scale * scale * scale * scale * scale), - "E" + suffix: float64(scale * scale * scale * scale * scale * scale), - } -} +package units + +// SI units. +type SI int64 + +// SI unit multiples. +const ( + Kilo SI = 1000 + Mega = Kilo * 1000 + Giga = Mega * 1000 + Tera = Giga * 1000 + Peta = Tera * 1000 + Exa = Peta * 1000 +) + +func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 { + return map[string]float64{ + shortSuffix: 1, + "K" + suffix: float64(scale), + "M" + suffix: float64(scale * scale), + "G" + suffix: float64(scale * scale * scale), + "T" + suffix: float64(scale * scale * scale * scale), + "P" + suffix: float64(scale * scale * scale * scale * scale), + "E" + suffix: float64(scale * scale * scale * scale * scale * scale), + } +} diff --git a/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/alecthomas/units/util.go index 6527e92..5ff0644 100644 --- a/vendor/github.com/alecthomas/units/util.go +++ b/vendor/github.com/alecthomas/units/util.go @@ -1,138 +1,138 @@ -package units - -import ( - "errors" - "fmt" - "strings" -) - -var ( - siUnits = []string{"", "K", "M", "G", "T", "P", "E"} -) - -func ToString(n int64, scale int64, suffix, baseSuffix string) string { - mn := len(siUnits) - out := make([]string, mn) - for i, m := range siUnits { - if n%scale != 0 || i == 0 && n == 0 { - s := suffix - if i == 0 { - s = baseSuffix - } - out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s) - } - n /= scale - if n == 0 { - break - } - } - return strings.Join(out, "") -} - -// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123 -var errLeadingInt = errors.New("units: bad [0-9]*") // never printed - -// leadingInt consumes the leading [0-9]* from s. -func leadingInt(s string) (x int64, rem string, err error) { - i := 0 - for ; i < len(s); i++ { - c := s[i] - if c < '0' || c > '9' { - break - } - if x >= (1<<63-10)/10 { - // overflow - return 0, "", errLeadingInt - } - x = x*10 + int64(c) - '0' - } - return x, s[i:], nil -} - -func ParseUnit(s string, unitMap map[string]float64) (int64, error) { - // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ - orig := s - f := float64(0) - neg := false - - // Consume [-+]? - if s != "" { - c := s[0] - if c == '-' || c == '+' { - neg = c == '-' - s = s[1:] - } - } - // Special case: if all that is left is "0", this is zero. - if s == "0" { - return 0, nil - } - if s == "" { - return 0, errors.New("units: invalid " + orig) - } - for s != "" { - g := float64(0) // this element of the sequence - - var x int64 - var err error - - // The next character must be [0-9.] - if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) { - return 0, errors.New("units: invalid " + orig) - } - // Consume [0-9]* - pl := len(s) - x, s, err = leadingInt(s) - if err != nil { - return 0, errors.New("units: invalid " + orig) - } - g = float64(x) - pre := pl != len(s) // whether we consumed anything before a period - - // Consume (\.[0-9]*)? - post := false - if s != "" && s[0] == '.' { - s = s[1:] - pl := len(s) - x, s, err = leadingInt(s) - if err != nil { - return 0, errors.New("units: invalid " + orig) - } - scale := 1.0 - for n := pl - len(s); n > 0; n-- { - scale *= 10 - } - g += float64(x) / scale - post = pl != len(s) - } - if !pre && !post { - // no digits (e.g. ".s" or "-.s") - return 0, errors.New("units: invalid " + orig) - } - - // Consume unit. - i := 0 - for ; i < len(s); i++ { - c := s[i] - if c == '.' || ('0' <= c && c <= '9') { - break - } - } - u := s[:i] - s = s[i:] - unit, ok := unitMap[u] - if !ok { - return 0, errors.New("units: unknown unit " + u + " in " + orig) - } - - f += g * unit - } - - if neg { - f = -f - } - if f < float64(-1<<63) || f > float64(1<<63-1) { - return 0, errors.New("units: overflow parsing unit") - } - return int64(f), nil -} +package units + +import ( + "errors" + "fmt" + "strings" +) + +var ( + siUnits = []string{"", "K", "M", "G", "T", "P", "E"} +) + +func ToString(n int64, scale int64, suffix, baseSuffix string) string { + mn := len(siUnits) + out := make([]string, mn) + for i, m := range siUnits { + if n%scale != 0 || i == 0 && n == 0 { + s := suffix + if i == 0 { + s = baseSuffix + } + out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s) + } + n /= scale + if n == 0 { + break + } + } + return strings.Join(out, "") +} + +// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123 +var errLeadingInt = errors.New("units: bad [0-9]*") // never printed + +// leadingInt consumes the leading [0-9]* from s. +func leadingInt(s string) (x int64, rem string, err error) { + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + if x >= (1<<63-10)/10 { + // overflow + return 0, "", errLeadingInt + } + x = x*10 + int64(c) - '0' + } + return x, s[i:], nil +} + +func ParseUnit(s string, unitMap map[string]float64) (int64, error) { + // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ + orig := s + f := float64(0) + neg := false + + // Consume [-+]? + if s != "" { + c := s[0] + if c == '-' || c == '+' { + neg = c == '-' + s = s[1:] + } + } + // Special case: if all that is left is "0", this is zero. + if s == "0" { + return 0, nil + } + if s == "" { + return 0, errors.New("units: invalid " + orig) + } + for s != "" { + g := float64(0) // this element of the sequence + + var x int64 + var err error + + // The next character must be [0-9.] + if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) { + return 0, errors.New("units: invalid " + orig) + } + // Consume [0-9]* + pl := len(s) + x, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("units: invalid " + orig) + } + g = float64(x) + pre := pl != len(s) // whether we consumed anything before a period + + // Consume (\.[0-9]*)? + post := false + if s != "" && s[0] == '.' { + s = s[1:] + pl := len(s) + x, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("units: invalid " + orig) + } + scale := 1.0 + for n := pl - len(s); n > 0; n-- { + scale *= 10 + } + g += float64(x) / scale + post = pl != len(s) + } + if !pre && !post { + // no digits (e.g. ".s" or "-.s") + return 0, errors.New("units: invalid " + orig) + } + + // Consume unit. + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c == '.' || ('0' <= c && c <= '9') { + break + } + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, errors.New("units: unknown unit " + u + " in " + orig) + } + + f += g * unit + } + + if neg { + f = -f + } + if f < float64(-1<<63) || f > float64(1<<63-1) { + return 0, errors.New("units: overflow parsing unit") + } + return int64(f), nil +} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE index 339177b..f1f6712 100644 --- a/vendor/github.com/beorn7/perks/LICENSE +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -1,20 +1,20 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt index 1602287..dbbe5d7 100644 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -1,2388 +1,2388 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go index d7d14f8..9405c42 100644 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -1,316 +1,316 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE index 9d83342..1abea74 100644 --- a/vendor/github.com/go-kit/kit/LICENSE +++ b/vendor/github.com/go-kit/kit/LICENSE @@ -1,22 +1,22 @@ -The MIT License (MIT) - -Copyright (c) 2015 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - +The MIT License (MIT) + +Copyright (c) 2015 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md index a201a3d..8edd4e7 100644 --- a/vendor/github.com/go-kit/kit/log/README.md +++ b/vendor/github.com/go-kit/kit/log/README.md @@ -1,151 +1,151 @@ -# package log - -`package log` provides a minimal interface for structured logging in services. -It may be wrapped to encode conventions, enforce type-safety, provide leveled -logging, and so on. It can be used for both typical application log events, -and log-structured data streams. - -## Structured logging - -Structured logging is, basically, conceding to the reality that logs are -_data_, and warrant some level of schematic rigor. Using a stricter, -key/value-oriented message format for our logs, containing contextual and -semantic information, makes it much easier to get insight into the -operational activity of the systems we build. Consequently, `package log` is -of the strong belief that "[the benefits of structured logging outweigh the -minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". - -Migrating from unstructured to structured logging is probably a lot easier -than you'd expect. - -```go -// Unstructured -log.Printf("HTTP server listening on %s", addr) - -// Structured -logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") -``` - -## Usage - -### Typical application logging - -```go -w := log.NewSyncWriter(os.Stderr) -logger := log.NewLogfmtLogger(w) -logger.Log("question", "what is the meaning of life?", "answer", 42) - -// Output: -// question="what is the meaning of life?" answer=42 -``` - -### Contextual Loggers - -```go -func main() { - var logger log.Logger - logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - logger = log.With(logger, "instance_id", 123) - - logger.Log("msg", "starting") - NewWorker(log.With(logger, "component", "worker")).Run() - NewSlacker(log.With(logger, "component", "slacker")).Run() -} - -// Output: -// instance_id=123 msg=starting -// instance_id=123 component=worker msg=running -// instance_id=123 component=slacker msg=running -``` - -### Interact with stdlib logger - -Redirect stdlib logger to Go kit logger. - -```go -import ( - "os" - stdlog "log" - kitlog "github.com/go-kit/kit/log" -) - -func main() { - logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) - stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) - stdlog.Print("I sure like pie") -} - -// Output: -// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} -``` - -Or, if, for legacy reasons, you need to pipe all of your logging through the -stdlib log package, you can redirect Go kit logger to the stdlib logger. - -```go -logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) -logger.Log("legacy", true, "msg", "at least it's something") - -// Output: -// 2016/01/01 12:34:56 legacy=true msg="at least it's something" -``` - -### Timestamps and callers - -```go -var logger log.Logger -logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) -logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - -logger.Log("msg", "hello") - -// Output: -// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello -``` - -## Levels - -Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level). - -## Supported output formats - -- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) -- JSON - -## Enhancements - -`package log` is centered on the one-method Logger interface. - -```go -type Logger interface { - Log(keyvals ...interface{}) error -} -``` - -This interface, and its supporting code like is the product of much iteration -and evaluation. For more details on the evolution of the Logger interface, -see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), -a talk by [Chris Hines](https://github.com/ChrisHines). -Also, please see -[#63](https://github.com/go-kit/kit/issues/63), -[#76](https://github.com/go-kit/kit/pull/76), -[#131](https://github.com/go-kit/kit/issues/131), -[#157](https://github.com/go-kit/kit/pull/157), -[#164](https://github.com/go-kit/kit/issues/164), and -[#252](https://github.com/go-kit/kit/pull/252) -to review historical conversations about package log and the Logger interface. - -Value-add packages and suggestions, -like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level), -are of course welcome. Good proposals should - -- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With), -- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and -- Be friendly to packages that accept only an unadorned log.Logger. - -## Benchmarks & comparisons - -There are a few Go logging benchmarks and comparisons that include Go kit's package log. - -- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log -- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log +# package log + +`package log` provides a minimal interface for structured logging in services. +It may be wrapped to encode conventions, enforce type-safety, provide leveled +logging, and so on. It can be used for both typical application log events, +and log-structured data streams. + +## Structured logging + +Structured logging is, basically, conceding to the reality that logs are +_data_, and warrant some level of schematic rigor. Using a stricter, +key/value-oriented message format for our logs, containing contextual and +semantic information, makes it much easier to get insight into the +operational activity of the systems we build. Consequently, `package log` is +of the strong belief that "[the benefits of structured logging outweigh the +minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". + +Migrating from unstructured to structured logging is probably a lot easier +than you'd expect. + +```go +// Unstructured +log.Printf("HTTP server listening on %s", addr) + +// Structured +logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") +``` + +## Usage + +### Typical application logging + +```go +w := log.NewSyncWriter(os.Stderr) +logger := log.NewLogfmtLogger(w) +logger.Log("question", "what is the meaning of life?", "answer", 42) + +// Output: +// question="what is the meaning of life?" answer=42 +``` + +### Contextual Loggers + +```go +func main() { + var logger log.Logger + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = log.With(logger, "instance_id", 123) + + logger.Log("msg", "starting") + NewWorker(log.With(logger, "component", "worker")).Run() + NewSlacker(log.With(logger, "component", "slacker")).Run() +} + +// Output: +// instance_id=123 msg=starting +// instance_id=123 component=worker msg=running +// instance_id=123 component=slacker msg=running +``` + +### Interact with stdlib logger + +Redirect stdlib logger to Go kit logger. + +```go +import ( + "os" + stdlog "log" + kitlog "github.com/go-kit/kit/log" +) + +func main() { + logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) + stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) + stdlog.Print("I sure like pie") +} + +// Output: +// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} +``` + +Or, if, for legacy reasons, you need to pipe all of your logging through the +stdlib log package, you can redirect Go kit logger to the stdlib logger. + +```go +logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) +logger.Log("legacy", true, "msg", "at least it's something") + +// Output: +// 2016/01/01 12:34:56 legacy=true msg="at least it's something" +``` + +### Timestamps and callers + +```go +var logger log.Logger +logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) +logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + +logger.Log("msg", "hello") + +// Output: +// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello +``` + +## Levels + +Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level). + +## Supported output formats + +- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) +- JSON + +## Enhancements + +`package log` is centered on the one-method Logger interface. + +```go +type Logger interface { + Log(keyvals ...interface{}) error +} +``` + +This interface, and its supporting code like is the product of much iteration +and evaluation. For more details on the evolution of the Logger interface, +see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), +a talk by [Chris Hines](https://github.com/ChrisHines). +Also, please see +[#63](https://github.com/go-kit/kit/issues/63), +[#76](https://github.com/go-kit/kit/pull/76), +[#131](https://github.com/go-kit/kit/issues/131), +[#157](https://github.com/go-kit/kit/pull/157), +[#164](https://github.com/go-kit/kit/issues/164), and +[#252](https://github.com/go-kit/kit/pull/252) +to review historical conversations about package log and the Logger interface. + +Value-add packages and suggestions, +like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level), +are of course welcome. Good proposals should + +- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With), +- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and +- Be friendly to packages that accept only an unadorned log.Logger. + +## Benchmarks & comparisons + +There are a few Go logging benchmarks and comparisons that include Go kit's package log. + +- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log +- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/kit/log/doc.go b/vendor/github.com/go-kit/kit/log/doc.go index 918c0af..0faf274 100644 --- a/vendor/github.com/go-kit/kit/log/doc.go +++ b/vendor/github.com/go-kit/kit/log/doc.go @@ -1,116 +1,116 @@ -// Package log provides a structured logger. -// -// Structured logging produces logs easily consumed later by humans or -// machines. Humans might be interested in debugging errors, or tracing -// specific requests. Machines might be interested in counting interesting -// events, or aggregating information for off-line processing. In both cases, -// it is important that the log messages are structured and actionable. -// Package log is designed to encourage both of these best practices. -// -// Basic Usage -// -// The fundamental interface is Logger. Loggers create log events from -// key/value data. The Logger interface has a single method, Log, which -// accepts a sequence of alternating key/value pairs, which this package names -// keyvals. -// -// type Logger interface { -// Log(keyvals ...interface{}) error -// } -// -// Here is an example of a function using a Logger to create log events. -// -// func RunTask(task Task, logger log.Logger) string { -// logger.Log("taskID", task.ID, "event", "starting task") -// ... -// logger.Log("taskID", task.ID, "event", "task complete") -// } -// -// The keys in the above example are "taskID" and "event". The values are -// task.ID, "starting task", and "task complete". Every key is followed -// immediately by its value. -// -// Keys are usually plain strings. Values may be any type that has a sensible -// encoding in the chosen log format. With structured logging it is a good -// idea to log simple values without formatting them. This practice allows -// the chosen logger to encode values in the most appropriate way. -// -// Contextual Loggers -// -// A contextual logger stores keyvals that it includes in all log events. -// Building appropriate contextual loggers reduces repetition and aids -// consistency in the resulting log output. With and WithPrefix add context to -// a logger. We can use With to improve the RunTask example. -// -// func RunTask(task Task, logger log.Logger) string { -// logger = log.With(logger, "taskID", task.ID) -// logger.Log("event", "starting task") -// ... -// taskHelper(task.Cmd, logger) -// ... -// logger.Log("event", "task complete") -// } -// -// The improved version emits the same log events as the original for the -// first and last calls to Log. Passing the contextual logger to taskHelper -// enables each log event created by taskHelper to include the task.ID even -// though taskHelper does not have access to that value. Using contextual -// loggers this way simplifies producing log output that enables tracing the -// life cycle of individual tasks. (See the Contextual example for the full -// code of the above snippet.) -// -// Dynamic Contextual Values -// -// A Valuer function stored in a contextual logger generates a new value each -// time an event is logged. The Valuer example demonstrates how this feature -// works. -// -// Valuers provide the basis for consistently logging timestamps and source -// code location. The log package defines several valuers for that purpose. -// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and -// DefaultCaller. A common logger initialization sequence that ensures all log -// entries contain a timestamp and source location looks like this: -// -// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) -// -// Concurrent Safety -// -// Applications with multiple goroutines want each log event written to the -// same logger to remain separate from other log events. Package log provides -// two simple solutions for concurrent safe logging. -// -// NewSyncWriter wraps an io.Writer and serializes each call to its Write -// method. Using a SyncWriter has the benefit that the smallest practical -// portion of the logging logic is performed within a mutex, but it requires -// the formatting Logger to make only one call to Write per log event. -// -// NewSyncLogger wraps any Logger and serializes each call to its Log method. -// Using a SyncLogger has the benefit that it guarantees each log event is -// handled atomically within the wrapped logger, but it typically serializes -// both the formatting and output logic. Use a SyncLogger if the formatting -// logger may perform multiple writes per log event. -// -// Error Handling -// -// This package relies on the practice of wrapping or decorating loggers with -// other loggers to provide composable pieces of functionality. It also means -// that Logger.Log must return an error because some -// implementations—especially those that output log data to an io.Writer—may -// encounter errors that cannot be handled locally. This in turn means that -// Loggers that wrap other loggers should return errors from the wrapped -// logger up the stack. -// -// Fortunately, the decorator pattern also provides a way to avoid the -// necessity to check for errors every time an application calls Logger.Log. -// An application required to panic whenever its Logger encounters -// an error could initialize its logger as follows. -// -// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger := log.LoggerFunc(func(keyvals ...interface{}) error { -// if err := fmtlogger.Log(keyvals...); err != nil { -// panic(err) -// } -// return nil -// }) -package log +// Package log provides a structured logger. +// +// Structured logging produces logs easily consumed later by humans or +// machines. Humans might be interested in debugging errors, or tracing +// specific requests. Machines might be interested in counting interesting +// events, or aggregating information for off-line processing. In both cases, +// it is important that the log messages are structured and actionable. +// Package log is designed to encourage both of these best practices. +// +// Basic Usage +// +// The fundamental interface is Logger. Loggers create log events from +// key/value data. The Logger interface has a single method, Log, which +// accepts a sequence of alternating key/value pairs, which this package names +// keyvals. +// +// type Logger interface { +// Log(keyvals ...interface{}) error +// } +// +// Here is an example of a function using a Logger to create log events. +// +// func RunTask(task Task, logger log.Logger) string { +// logger.Log("taskID", task.ID, "event", "starting task") +// ... +// logger.Log("taskID", task.ID, "event", "task complete") +// } +// +// The keys in the above example are "taskID" and "event". The values are +// task.ID, "starting task", and "task complete". Every key is followed +// immediately by its value. +// +// Keys are usually plain strings. Values may be any type that has a sensible +// encoding in the chosen log format. With structured logging it is a good +// idea to log simple values without formatting them. This practice allows +// the chosen logger to encode values in the most appropriate way. +// +// Contextual Loggers +// +// A contextual logger stores keyvals that it includes in all log events. +// Building appropriate contextual loggers reduces repetition and aids +// consistency in the resulting log output. With and WithPrefix add context to +// a logger. We can use With to improve the RunTask example. +// +// func RunTask(task Task, logger log.Logger) string { +// logger = log.With(logger, "taskID", task.ID) +// logger.Log("event", "starting task") +// ... +// taskHelper(task.Cmd, logger) +// ... +// logger.Log("event", "task complete") +// } +// +// The improved version emits the same log events as the original for the +// first and last calls to Log. Passing the contextual logger to taskHelper +// enables each log event created by taskHelper to include the task.ID even +// though taskHelper does not have access to that value. Using contextual +// loggers this way simplifies producing log output that enables tracing the +// life cycle of individual tasks. (See the Contextual example for the full +// code of the above snippet.) +// +// Dynamic Contextual Values +// +// A Valuer function stored in a contextual logger generates a new value each +// time an event is logged. The Valuer example demonstrates how this feature +// works. +// +// Valuers provide the basis for consistently logging timestamps and source +// code location. The log package defines several valuers for that purpose. +// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and +// DefaultCaller. A common logger initialization sequence that ensures all log +// entries contain a timestamp and source location looks like this: +// +// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) +// +// Concurrent Safety +// +// Applications with multiple goroutines want each log event written to the +// same logger to remain separate from other log events. Package log provides +// two simple solutions for concurrent safe logging. +// +// NewSyncWriter wraps an io.Writer and serializes each call to its Write +// method. Using a SyncWriter has the benefit that the smallest practical +// portion of the logging logic is performed within a mutex, but it requires +// the formatting Logger to make only one call to Write per log event. +// +// NewSyncLogger wraps any Logger and serializes each call to its Log method. +// Using a SyncLogger has the benefit that it guarantees each log event is +// handled atomically within the wrapped logger, but it typically serializes +// both the formatting and output logic. Use a SyncLogger if the formatting +// logger may perform multiple writes per log event. +// +// Error Handling +// +// This package relies on the practice of wrapping or decorating loggers with +// other loggers to provide composable pieces of functionality. It also means +// that Logger.Log must return an error because some +// implementations—especially those that output log data to an io.Writer—may +// encounter errors that cannot be handled locally. This in turn means that +// Loggers that wrap other loggers should return errors from the wrapped +// logger up the stack. +// +// Fortunately, the decorator pattern also provides a way to avoid the +// necessity to check for errors every time an application calls Logger.Log. +// An application required to panic whenever its Logger encounters +// an error could initialize its logger as follows. +// +// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger := log.LoggerFunc(func(keyvals ...interface{}) error { +// if err := fmtlogger.Log(keyvals...); err != nil { +// panic(err) +// } +// return nil +// }) +package log diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go index 66094b4..74581c0 100644 --- a/vendor/github.com/go-kit/kit/log/json_logger.go +++ b/vendor/github.com/go-kit/kit/log/json_logger.go @@ -1,89 +1,89 @@ -package log - -import ( - "encoding" - "encoding/json" - "fmt" - "io" - "reflect" -) - -type jsonLogger struct { - io.Writer -} - -// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a -// single JSON object. Each log event produces no more than one call to -// w.Write. The passed Writer must be safe for concurrent use by multiple -// goroutines if the returned Logger will be used concurrently. -func NewJSONLogger(w io.Writer) Logger { - return &jsonLogger{w} -} - -func (l *jsonLogger) Log(keyvals ...interface{}) error { - n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd - m := make(map[string]interface{}, n) - for i := 0; i < len(keyvals); i += 2 { - k := keyvals[i] - var v interface{} = ErrMissingValue - if i+1 < len(keyvals) { - v = keyvals[i+1] - } - merge(m, k, v) - } - return json.NewEncoder(l.Writer).Encode(m) -} - -func merge(dst map[string]interface{}, k, v interface{}) { - var key string - switch x := k.(type) { - case string: - key = x - case fmt.Stringer: - key = safeString(x) - default: - key = fmt.Sprint(x) - } - - // We want json.Marshaler and encoding.TextMarshaller to take priority over - // err.Error() and v.String(). But json.Marshall (called later) does that by - // default so we force a no-op if it's one of those 2 case. - switch x := v.(type) { - case json.Marshaler: - case encoding.TextMarshaler: - case error: - v = safeError(x) - case fmt.Stringer: - v = safeString(x) - } - - dst[key] = v -} - -func safeString(str fmt.Stringer) (s string) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { - s = "NULL" - } else { - panic(panicVal) - } - } - }() - s = str.String() - return -} - -func safeError(err error) (s interface{}) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { - s = nil - } else { - panic(panicVal) - } - } - }() - s = err.Error() - return -} +package log + +import ( + "encoding" + "encoding/json" + "fmt" + "io" + "reflect" +) + +type jsonLogger struct { + io.Writer +} + +// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a +// single JSON object. Each log event produces no more than one call to +// w.Write. The passed Writer must be safe for concurrent use by multiple +// goroutines if the returned Logger will be used concurrently. +func NewJSONLogger(w io.Writer) Logger { + return &jsonLogger{w} +} + +func (l *jsonLogger) Log(keyvals ...interface{}) error { + n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd + m := make(map[string]interface{}, n) + for i := 0; i < len(keyvals); i += 2 { + k := keyvals[i] + var v interface{} = ErrMissingValue + if i+1 < len(keyvals) { + v = keyvals[i+1] + } + merge(m, k, v) + } + return json.NewEncoder(l.Writer).Encode(m) +} + +func merge(dst map[string]interface{}, k, v interface{}) { + var key string + switch x := k.(type) { + case string: + key = x + case fmt.Stringer: + key = safeString(x) + default: + key = fmt.Sprint(x) + } + + // We want json.Marshaler and encoding.TextMarshaller to take priority over + // err.Error() and v.String(). But json.Marshall (called later) does that by + // default so we force a no-op if it's one of those 2 case. + switch x := v.(type) { + case json.Marshaler: + case encoding.TextMarshaler: + case error: + v = safeError(x) + case fmt.Stringer: + v = safeString(x) + } + + dst[key] = v +} + +func safeString(str fmt.Stringer) (s string) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { + s = "NULL" + } else { + panic(panicVal) + } + } + }() + s = str.String() + return +} + +func safeError(err error) (s interface{}) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + s = nil + } else { + panic(panicVal) + } + } + }() + s = err.Error() + return +} diff --git a/vendor/github.com/go-kit/kit/log/level/doc.go b/vendor/github.com/go-kit/kit/log/level/doc.go index 505d307..ee623b7 100644 --- a/vendor/github.com/go-kit/kit/log/level/doc.go +++ b/vendor/github.com/go-kit/kit/log/level/doc.go @@ -1,22 +1,22 @@ -// Package level implements leveled logging on top of Go kit's log package. To -// use the level package, create a logger as per normal in your func main, and -// wrap it with level.NewFilter. -// -// var logger log.Logger -// logger = log.NewLogfmtLogger(os.Stderr) -// logger = level.NewFilter(logger, level.AllowInfo()) // <-- -// logger = log.With(logger, "ts", log.DefaultTimestampUTC) -// -// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error -// helper methods to emit leveled log events. -// -// logger.Log("foo", "bar") // as normal, no level -// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) -// if value > 100 { -// level.Error(logger).Log("value", value) -// } -// -// NewFilter allows precise control over what happens when a log event is -// emitted without a level key, or if a squelched level is used. Check the -// Option functions for details. -package level +// Package level implements leveled logging on top of Go kit's log package. To +// use the level package, create a logger as per normal in your func main, and +// wrap it with level.NewFilter. +// +// var logger log.Logger +// logger = log.NewLogfmtLogger(os.Stderr) +// logger = level.NewFilter(logger, level.AllowInfo()) // <-- +// logger = log.With(logger, "ts", log.DefaultTimestampUTC) +// +// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error +// helper methods to emit leveled log events. +// +// logger.Log("foo", "bar") // as normal, no level +// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) +// if value > 100 { +// level.Error(logger).Log("value", value) +// } +// +// NewFilter allows precise control over what happens when a log event is +// emitted without a level key, or if a squelched level is used. Check the +// Option functions for details. +package level diff --git a/vendor/github.com/go-kit/kit/log/level/level.go b/vendor/github.com/go-kit/kit/log/level/level.go index fceafc4..cc0fa29 100644 --- a/vendor/github.com/go-kit/kit/log/level/level.go +++ b/vendor/github.com/go-kit/kit/log/level/level.go @@ -1,205 +1,205 @@ -package level - -import "github.com/go-kit/kit/log" - -// Error returns a logger that includes a Key/ErrorValue pair. -func Error(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), ErrorValue()) -} - -// Warn returns a logger that includes a Key/WarnValue pair. -func Warn(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), WarnValue()) -} - -// Info returns a logger that includes a Key/InfoValue pair. -func Info(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), InfoValue()) -} - -// Debug returns a logger that includes a Key/DebugValue pair. -func Debug(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), DebugValue()) -} - -// NewFilter wraps next and implements level filtering. See the commentary on -// the Option functions for a detailed description of how to configure levels. -// If no options are provided, all leveled log events created with Debug, -// Info, Warn or Error helper methods are squelched and non-leveled log -// events are passed to next unmodified. -func NewFilter(next log.Logger, options ...Option) log.Logger { - l := &logger{ - next: next, - } - for _, option := range options { - option(l) - } - return l -} - -type logger struct { - next log.Logger - allowed level - squelchNoLevel bool - errNotAllowed error - errNoLevel error -} - -func (l *logger) Log(keyvals ...interface{}) error { - var hasLevel, levelAllowed bool - for i := 1; i < len(keyvals); i += 2 { - if v, ok := keyvals[i].(*levelValue); ok { - hasLevel = true - levelAllowed = l.allowed&v.level != 0 - break - } - } - if !hasLevel && l.squelchNoLevel { - return l.errNoLevel - } - if hasLevel && !levelAllowed { - return l.errNotAllowed - } - return l.next.Log(keyvals...) -} - -// Option sets a parameter for the leveled logger. -type Option func(*logger) - -// AllowAll is an alias for AllowDebug. -func AllowAll() Option { - return AllowDebug() -} - -// AllowDebug allows error, warn, info and debug level log events to pass. -func AllowDebug() Option { - return allowed(levelError | levelWarn | levelInfo | levelDebug) -} - -// AllowInfo allows error, warn and info level log events to pass. -func AllowInfo() Option { - return allowed(levelError | levelWarn | levelInfo) -} - -// AllowWarn allows error and warn level log events to pass. -func AllowWarn() Option { - return allowed(levelError | levelWarn) -} - -// AllowError allows only error level log events to pass. -func AllowError() Option { - return allowed(levelError) -} - -// AllowNone allows no leveled log events to pass. -func AllowNone() Option { - return allowed(0) -} - -func allowed(allowed level) Option { - return func(l *logger) { l.allowed = allowed } -} - -// ErrNotAllowed sets the error to return from Log when it squelches a log -// event disallowed by the configured Allow[Level] option. By default, -// ErrNotAllowed is nil; in this case the log event is squelched with no -// error. -func ErrNotAllowed(err error) Option { - return func(l *logger) { l.errNotAllowed = err } -} - -// SquelchNoLevel instructs Log to squelch log events with no level, so that -// they don't proceed through to the wrapped logger. If SquelchNoLevel is set -// to true and a log event is squelched in this way, the error value -// configured with ErrNoLevel is returned to the caller. -func SquelchNoLevel(squelch bool) Option { - return func(l *logger) { l.squelchNoLevel = squelch } -} - -// ErrNoLevel sets the error to return from Log when it squelches a log event -// with no level. By default, ErrNoLevel is nil; in this case the log event is -// squelched with no error. -func ErrNoLevel(err error) Option { - return func(l *logger) { l.errNoLevel = err } -} - -// NewInjector wraps next and returns a logger that adds a Key/level pair to -// the beginning of log events that don't already contain a level. In effect, -// this gives a default level to logs without a level. -func NewInjector(next log.Logger, level Value) log.Logger { - return &injector{ - next: next, - level: level, - } -} - -type injector struct { - next log.Logger - level interface{} -} - -func (l *injector) Log(keyvals ...interface{}) error { - for i := 1; i < len(keyvals); i += 2 { - if _, ok := keyvals[i].(*levelValue); ok { - return l.next.Log(keyvals...) - } - } - kvs := make([]interface{}, len(keyvals)+2) - kvs[0], kvs[1] = key, l.level - copy(kvs[2:], keyvals) - return l.next.Log(kvs...) -} - -// Value is the interface that each of the canonical level values implement. -// It contains unexported methods that prevent types from other packages from -// implementing it and guaranteeing that NewFilter can distinguish the levels -// defined in this package from all other values. -type Value interface { - String() string - levelVal() -} - -// Key returns the unique key added to log events by the loggers in this -// package. -func Key() interface{} { return key } - -// ErrorValue returns the unique value added to log events by Error. -func ErrorValue() Value { return errorValue } - -// WarnValue returns the unique value added to log events by Warn. -func WarnValue() Value { return warnValue } - -// InfoValue returns the unique value added to log events by Info. -func InfoValue() Value { return infoValue } - -// DebugValue returns the unique value added to log events by Warn. -func DebugValue() Value { return debugValue } - -var ( - // key is of type interface{} so that it allocates once during package - // initialization and avoids allocating every time the value is added to a - // []interface{} later. - key interface{} = "level" - - errorValue = &levelValue{level: levelError, name: "error"} - warnValue = &levelValue{level: levelWarn, name: "warn"} - infoValue = &levelValue{level: levelInfo, name: "info"} - debugValue = &levelValue{level: levelDebug, name: "debug"} -) - -type level byte - -const ( - levelDebug level = 1 << iota - levelInfo - levelWarn - levelError -) - -type levelValue struct { - name string - level -} - -func (v *levelValue) String() string { return v.name } -func (v *levelValue) levelVal() {} +package level + +import "github.com/go-kit/kit/log" + +// Error returns a logger that includes a Key/ErrorValue pair. +func Error(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), ErrorValue()) +} + +// Warn returns a logger that includes a Key/WarnValue pair. +func Warn(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), WarnValue()) +} + +// Info returns a logger that includes a Key/InfoValue pair. +func Info(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), InfoValue()) +} + +// Debug returns a logger that includes a Key/DebugValue pair. +func Debug(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), DebugValue()) +} + +// NewFilter wraps next and implements level filtering. See the commentary on +// the Option functions for a detailed description of how to configure levels. +// If no options are provided, all leveled log events created with Debug, +// Info, Warn or Error helper methods are squelched and non-leveled log +// events are passed to next unmodified. +func NewFilter(next log.Logger, options ...Option) log.Logger { + l := &logger{ + next: next, + } + for _, option := range options { + option(l) + } + return l +} + +type logger struct { + next log.Logger + allowed level + squelchNoLevel bool + errNotAllowed error + errNoLevel error +} + +func (l *logger) Log(keyvals ...interface{}) error { + var hasLevel, levelAllowed bool + for i := 1; i < len(keyvals); i += 2 { + if v, ok := keyvals[i].(*levelValue); ok { + hasLevel = true + levelAllowed = l.allowed&v.level != 0 + break + } + } + if !hasLevel && l.squelchNoLevel { + return l.errNoLevel + } + if hasLevel && !levelAllowed { + return l.errNotAllowed + } + return l.next.Log(keyvals...) +} + +// Option sets a parameter for the leveled logger. +type Option func(*logger) + +// AllowAll is an alias for AllowDebug. +func AllowAll() Option { + return AllowDebug() +} + +// AllowDebug allows error, warn, info and debug level log events to pass. +func AllowDebug() Option { + return allowed(levelError | levelWarn | levelInfo | levelDebug) +} + +// AllowInfo allows error, warn and info level log events to pass. +func AllowInfo() Option { + return allowed(levelError | levelWarn | levelInfo) +} + +// AllowWarn allows error and warn level log events to pass. +func AllowWarn() Option { + return allowed(levelError | levelWarn) +} + +// AllowError allows only error level log events to pass. +func AllowError() Option { + return allowed(levelError) +} + +// AllowNone allows no leveled log events to pass. +func AllowNone() Option { + return allowed(0) +} + +func allowed(allowed level) Option { + return func(l *logger) { l.allowed = allowed } +} + +// ErrNotAllowed sets the error to return from Log when it squelches a log +// event disallowed by the configured Allow[Level] option. By default, +// ErrNotAllowed is nil; in this case the log event is squelched with no +// error. +func ErrNotAllowed(err error) Option { + return func(l *logger) { l.errNotAllowed = err } +} + +// SquelchNoLevel instructs Log to squelch log events with no level, so that +// they don't proceed through to the wrapped logger. If SquelchNoLevel is set +// to true and a log event is squelched in this way, the error value +// configured with ErrNoLevel is returned to the caller. +func SquelchNoLevel(squelch bool) Option { + return func(l *logger) { l.squelchNoLevel = squelch } +} + +// ErrNoLevel sets the error to return from Log when it squelches a log event +// with no level. By default, ErrNoLevel is nil; in this case the log event is +// squelched with no error. +func ErrNoLevel(err error) Option { + return func(l *logger) { l.errNoLevel = err } +} + +// NewInjector wraps next and returns a logger that adds a Key/level pair to +// the beginning of log events that don't already contain a level. In effect, +// this gives a default level to logs without a level. +func NewInjector(next log.Logger, level Value) log.Logger { + return &injector{ + next: next, + level: level, + } +} + +type injector struct { + next log.Logger + level interface{} +} + +func (l *injector) Log(keyvals ...interface{}) error { + for i := 1; i < len(keyvals); i += 2 { + if _, ok := keyvals[i].(*levelValue); ok { + return l.next.Log(keyvals...) + } + } + kvs := make([]interface{}, len(keyvals)+2) + kvs[0], kvs[1] = key, l.level + copy(kvs[2:], keyvals) + return l.next.Log(kvs...) +} + +// Value is the interface that each of the canonical level values implement. +// It contains unexported methods that prevent types from other packages from +// implementing it and guaranteeing that NewFilter can distinguish the levels +// defined in this package from all other values. +type Value interface { + String() string + levelVal() +} + +// Key returns the unique key added to log events by the loggers in this +// package. +func Key() interface{} { return key } + +// ErrorValue returns the unique value added to log events by Error. +func ErrorValue() Value { return errorValue } + +// WarnValue returns the unique value added to log events by Warn. +func WarnValue() Value { return warnValue } + +// InfoValue returns the unique value added to log events by Info. +func InfoValue() Value { return infoValue } + +// DebugValue returns the unique value added to log events by Warn. +func DebugValue() Value { return debugValue } + +var ( + // key is of type interface{} so that it allocates once during package + // initialization and avoids allocating every time the value is added to a + // []interface{} later. + key interface{} = "level" + + errorValue = &levelValue{level: levelError, name: "error"} + warnValue = &levelValue{level: levelWarn, name: "warn"} + infoValue = &levelValue{level: levelInfo, name: "info"} + debugValue = &levelValue{level: levelDebug, name: "debug"} +) + +type level byte + +const ( + levelDebug level = 1 << iota + levelInfo + levelWarn + levelError +) + +type levelValue struct { + name string + level +} + +func (v *levelValue) String() string { return v.name } +func (v *levelValue) levelVal() {} diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go index 66a9e2f..ffd433b 100644 --- a/vendor/github.com/go-kit/kit/log/log.go +++ b/vendor/github.com/go-kit/kit/log/log.go @@ -1,135 +1,135 @@ -package log - -import "errors" - -// Logger is the fundamental interface for all log operations. Log creates a -// log event from keyvals, a variadic sequence of alternating keys and values. -// Implementations must be safe for concurrent use by multiple goroutines. In -// particular, any implementation of Logger that appends to keyvals or -// modifies or retains any of its elements must make a copy first. -type Logger interface { - Log(keyvals ...interface{}) error -} - -// ErrMissingValue is appended to keyvals slices with odd length to substitute -// the missing value. -var ErrMissingValue = errors.New("(MISSING)") - -// With returns a new contextual logger with keyvals prepended to those passed -// to calls to Log. If logger is also a contextual logger created by With or -// WithPrefix, keyvals is appended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func With(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - return &context{ - logger: l.logger, - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - keyvals: kvs[:len(kvs):len(kvs)], - hasValuer: l.hasValuer || containsValuer(keyvals), - } -} - -// WithPrefix returns a new contextual logger with keyvals prepended to those -// passed to calls to Log. If logger is also a contextual logger created by -// With or WithPrefix, keyvals is prepended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func WithPrefix(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - n := len(l.keyvals) + len(keyvals) - if len(keyvals)%2 != 0 { - n++ - } - kvs := make([]interface{}, 0, n) - kvs = append(kvs, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - kvs = append(kvs, l.keyvals...) - return &context{ - logger: l.logger, - keyvals: kvs, - hasValuer: l.hasValuer || containsValuer(keyvals), - } -} - -// context is the Logger implementation returned by With and WithPrefix. It -// wraps a Logger and holds keyvals that it includes in all log events. Its -// Log method calls bindValues to generate values for each Valuer in the -// context keyvals. -// -// A context must always have the same number of stack frames between calls to -// its Log method and the eventual binding of Valuers to their value. This -// requirement comes from the functional requirement to allow a context to -// resolve application call site information for a Caller stored in the -// context. To do this we must be able to predict the number of logging -// functions on the stack when bindValues is called. -// -// Two implementation details provide the needed stack depth consistency. -// -// 1. newContext avoids introducing an additional layer when asked to -// wrap another context. -// 2. With and WithPrefix avoid introducing an additional layer by -// returning a newly constructed context with a merged keyvals rather -// than simply wrapping the existing context. -type context struct { - logger Logger - keyvals []interface{} - hasValuer bool -} - -func newContext(logger Logger) *context { - if c, ok := logger.(*context); ok { - return c - } - return &context{logger: logger} -} - -// Log replaces all value elements (odd indexes) containing a Valuer in the -// stored context with their generated value, appends keyvals, and passes the -// result to the wrapped Logger. -func (l *context) Log(keyvals ...interface{}) error { - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - if l.hasValuer { - // If no keyvals were appended above then we must copy l.keyvals so - // that future log events will reevaluate the stored Valuers. - if len(keyvals) == 0 { - kvs = append([]interface{}{}, l.keyvals...) - } - bindValues(kvs[:len(l.keyvals)]) - } - return l.logger.Log(kvs...) -} - -// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If -// f is a function with the appropriate signature, LoggerFunc(f) is a Logger -// object that calls f. -type LoggerFunc func(...interface{}) error - -// Log implements Logger by calling f(keyvals...). -func (f LoggerFunc) Log(keyvals ...interface{}) error { - return f(keyvals...) -} +package log + +import "errors" + +// Logger is the fundamental interface for all log operations. Log creates a +// log event from keyvals, a variadic sequence of alternating keys and values. +// Implementations must be safe for concurrent use by multiple goroutines. In +// particular, any implementation of Logger that appends to keyvals or +// modifies or retains any of its elements must make a copy first. +type Logger interface { + Log(keyvals ...interface{}) error +} + +// ErrMissingValue is appended to keyvals slices with odd length to substitute +// the missing value. +var ErrMissingValue = errors.New("(MISSING)") + +// With returns a new contextual logger with keyvals prepended to those passed +// to calls to Log. If logger is also a contextual logger created by With or +// WithPrefix, keyvals is appended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func With(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + return &context{ + logger: l.logger, + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + keyvals: kvs[:len(kvs):len(kvs)], + hasValuer: l.hasValuer || containsValuer(keyvals), + } +} + +// WithPrefix returns a new contextual logger with keyvals prepended to those +// passed to calls to Log. If logger is also a contextual logger created by +// With or WithPrefix, keyvals is prepended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func WithPrefix(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + n := len(l.keyvals) + len(keyvals) + if len(keyvals)%2 != 0 { + n++ + } + kvs := make([]interface{}, 0, n) + kvs = append(kvs, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + kvs = append(kvs, l.keyvals...) + return &context{ + logger: l.logger, + keyvals: kvs, + hasValuer: l.hasValuer || containsValuer(keyvals), + } +} + +// context is the Logger implementation returned by With and WithPrefix. It +// wraps a Logger and holds keyvals that it includes in all log events. Its +// Log method calls bindValues to generate values for each Valuer in the +// context keyvals. +// +// A context must always have the same number of stack frames between calls to +// its Log method and the eventual binding of Valuers to their value. This +// requirement comes from the functional requirement to allow a context to +// resolve application call site information for a Caller stored in the +// context. To do this we must be able to predict the number of logging +// functions on the stack when bindValues is called. +// +// Two implementation details provide the needed stack depth consistency. +// +// 1. newContext avoids introducing an additional layer when asked to +// wrap another context. +// 2. With and WithPrefix avoid introducing an additional layer by +// returning a newly constructed context with a merged keyvals rather +// than simply wrapping the existing context. +type context struct { + logger Logger + keyvals []interface{} + hasValuer bool +} + +func newContext(logger Logger) *context { + if c, ok := logger.(*context); ok { + return c + } + return &context{logger: logger} +} + +// Log replaces all value elements (odd indexes) containing a Valuer in the +// stored context with their generated value, appends keyvals, and passes the +// result to the wrapped Logger. +func (l *context) Log(keyvals ...interface{}) error { + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + if l.hasValuer { + // If no keyvals were appended above then we must copy l.keyvals so + // that future log events will reevaluate the stored Valuers. + if len(keyvals) == 0 { + kvs = append([]interface{}{}, l.keyvals...) + } + bindValues(kvs[:len(l.keyvals)]) + } + return l.logger.Log(kvs...) +} + +// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If +// f is a function with the appropriate signature, LoggerFunc(f) is a Logger +// object that calls f. +type LoggerFunc func(...interface{}) error + +// Log implements Logger by calling f(keyvals...). +func (f LoggerFunc) Log(keyvals ...interface{}) error { + return f(keyvals...) +} diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go index a003052..dbf0e9d 100644 --- a/vendor/github.com/go-kit/kit/log/logfmt_logger.go +++ b/vendor/github.com/go-kit/kit/log/logfmt_logger.go @@ -1,62 +1,62 @@ -package log - -import ( - "bytes" - "io" - "sync" - - "github.com/go-logfmt/logfmt" -) - -type logfmtEncoder struct { - *logfmt.Encoder - buf bytes.Buffer -} - -func (l *logfmtEncoder) Reset() { - l.Encoder.Reset() - l.buf.Reset() -} - -var logfmtEncoderPool = sync.Pool{ - New: func() interface{} { - var enc logfmtEncoder - enc.Encoder = logfmt.NewEncoder(&enc.buf) - return &enc - }, -} - -type logfmtLogger struct { - w io.Writer -} - -// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in -// logfmt format. Each log event produces no more than one call to w.Write. -// The passed Writer must be safe for concurrent use by multiple goroutines if -// the returned Logger will be used concurrently. -func NewLogfmtLogger(w io.Writer) Logger { - return &logfmtLogger{w} -} - -func (l logfmtLogger) Log(keyvals ...interface{}) error { - enc := logfmtEncoderPool.Get().(*logfmtEncoder) - enc.Reset() - defer logfmtEncoderPool.Put(enc) - - if err := enc.EncodeKeyvals(keyvals...); err != nil { - return err - } - - // Add newline to the end of the buffer - if err := enc.EndRecord(); err != nil { - return err - } - - // The Logger interface requires implementations to be safe for concurrent - // use by multiple goroutines. For this implementation that means making - // only one call to l.w.Write() for each call to Log. - if _, err := l.w.Write(enc.buf.Bytes()); err != nil { - return err - } - return nil -} +package log + +import ( + "bytes" + "io" + "sync" + + "github.com/go-logfmt/logfmt" +) + +type logfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +func (l *logfmtEncoder) Reset() { + l.Encoder.Reset() + l.buf.Reset() +} + +var logfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc logfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +type logfmtLogger struct { + w io.Writer +} + +// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in +// logfmt format. Each log event produces no more than one call to w.Write. +// The passed Writer must be safe for concurrent use by multiple goroutines if +// the returned Logger will be used concurrently. +func NewLogfmtLogger(w io.Writer) Logger { + return &logfmtLogger{w} +} + +func (l logfmtLogger) Log(keyvals ...interface{}) error { + enc := logfmtEncoderPool.Get().(*logfmtEncoder) + enc.Reset() + defer logfmtEncoderPool.Put(enc) + + if err := enc.EncodeKeyvals(keyvals...); err != nil { + return err + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return err + } + + // The Logger interface requires implementations to be safe for concurrent + // use by multiple goroutines. For this implementation that means making + // only one call to l.w.Write() for each call to Log. + if _, err := l.w.Write(enc.buf.Bytes()); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go index 1047d62..d26bf41 100644 --- a/vendor/github.com/go-kit/kit/log/nop_logger.go +++ b/vendor/github.com/go-kit/kit/log/nop_logger.go @@ -1,8 +1,8 @@ -package log - -type nopLogger struct{} - -// NewNopLogger returns a logger that doesn't do anything. -func NewNopLogger() Logger { return nopLogger{} } - -func (nopLogger) Log(...interface{}) error { return nil } +package log + +type nopLogger struct{} + +// NewNopLogger returns a logger that doesn't do anything. +func NewNopLogger() Logger { return nopLogger{} } + +func (nopLogger) Log(...interface{}) error { return nil } diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go index ff96b5d..314d12a 100644 --- a/vendor/github.com/go-kit/kit/log/stdlib.go +++ b/vendor/github.com/go-kit/kit/log/stdlib.go @@ -1,116 +1,116 @@ -package log - -import ( - "io" - "log" - "regexp" - "strings" -) - -// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's -// designed to be passed to a Go kit logger as the writer, for cases where -// it's necessary to redirect all Go kit log output to the stdlib logger. -// -// If you have any choice in the matter, you shouldn't use this. Prefer to -// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. -type StdlibWriter struct{} - -// Write implements io.Writer. -func (w StdlibWriter) Write(p []byte) (int, error) { - log.Print(strings.TrimSpace(string(p))) - return len(p), nil -} - -// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib -// logger's SetOutput. It will extract date/timestamps, filenames, and -// messages, and place them under relevant keys. -type StdlibAdapter struct { - Logger - timestampKey string - fileKey string - messageKey string -} - -// StdlibAdapterOption sets a parameter for the StdlibAdapter. -type StdlibAdapterOption func(*StdlibAdapter) - -// TimestampKey sets the key for the timestamp field. By default, it's "ts". -func TimestampKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.timestampKey = key } -} - -// FileKey sets the key for the file and line field. By default, it's "caller". -func FileKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.fileKey = key } -} - -// MessageKey sets the key for the actual log message. By default, it's "msg". -func MessageKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.messageKey = key } -} - -// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed -// logger. It's designed to be passed to log.SetOutput. -func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { - a := StdlibAdapter{ - Logger: logger, - timestampKey: "ts", - fileKey: "caller", - messageKey: "msg", - } - for _, option := range options { - option(&a) - } - return a -} - -func (a StdlibAdapter) Write(p []byte) (int, error) { - result := subexps(p) - keyvals := []interface{}{} - var timestamp string - if date, ok := result["date"]; ok && date != "" { - timestamp = date - } - if time, ok := result["time"]; ok && time != "" { - if timestamp != "" { - timestamp += " " - } - timestamp += time - } - if timestamp != "" { - keyvals = append(keyvals, a.timestampKey, timestamp) - } - if file, ok := result["file"]; ok && file != "" { - keyvals = append(keyvals, a.fileKey, file) - } - if msg, ok := result["msg"]; ok { - keyvals = append(keyvals, a.messageKey, msg) - } - if err := a.Logger.Log(keyvals...); err != nil { - return 0, err - } - return len(p), nil -} - -const ( - logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` - logRegexpTime = `(?P