structured agent logging

This commit is contained in:
Brad Rydzewski 2017-08-03 15:36:22 -04:00
parent 03bdc42778
commit 9a5c2d5481
21 changed files with 2839 additions and 25 deletions

View file

@ -80,7 +80,7 @@ pipeline:
image: plugins/docker image: plugins/docker
repo: drone/drone repo: drone/drone
secrets: [ docker_username, docker_password ] secrets: [ docker_username, docker_password ]
tag: [ 0.8, 0.8.0, 0.8.0-rc.3 ] tag: [ 0.8, 0.8.0 ]
when: when:
event: tag event: tag
@ -89,7 +89,7 @@ pipeline:
repo: drone/agent repo: drone/agent
dockerfile: Dockerfile.agent dockerfile: Dockerfile.agent
secrets: [ docker_username, docker_password ] secrets: [ docker_username, docker_password ]
tag: [ 0.8, 0.8.0, 0.8.0-rc.3 ] tag: [ 0.8, 0.8.0 ]
when: when:
event: tag event: tag

View file

@ -5,7 +5,6 @@ import (
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil" "io/ioutil"
"log"
"os" "os"
"strconv" "strconv"
"sync" "sync"
@ -21,6 +20,8 @@ import (
"github.com/cncd/pipeline/pipeline/multipart" "github.com/cncd/pipeline/pipeline/multipart"
"github.com/cncd/pipeline/pipeline/rpc" "github.com/cncd/pipeline/pipeline/rpc"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/tevino/abool" "github.com/tevino/abool"
"github.com/urfave/cli" "github.com/urfave/cli"
oldcontext "golang.org/x/net/context" oldcontext "golang.org/x/net/context"
@ -38,6 +39,12 @@ func loop(c *cli.Context) error {
hostname, _ = os.Hostname() hostname, _ = os.Hostname()
} }
if c.BoolT("debug") {
zerolog.SetGlobalLevel(zerolog.DebugLevel)
} else {
zerolog.SetGlobalLevel(zerolog.WarnLevel)
}
// TODO pass version information to grpc server // TODO pass version information to grpc server
// TODO authenticate to grpc server // TODO authenticate to grpc server
@ -81,7 +88,7 @@ func loop(c *cli.Context) error {
return return
} }
if err := run(ctx, client, filter); err != nil { if err := run(ctx, client, filter); err != nil {
log.Printf("build runner encountered error: exiting: %s", err) log.Error().Err(err).Msg("pipeline done with error")
return return
} }
} }
@ -98,7 +105,8 @@ const (
) )
func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error { func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
log.Println("pipeline: request next execution") log.Debug().
Msg("request next execution")
meta, _ := metadata.FromOutgoingContext(ctx) meta, _ := metadata.FromOutgoingContext(ctx)
ctxmeta := metadata.NewOutgoingContext(context.Background(), meta) ctxmeta := metadata.NewOutgoingContext(context.Background(), meta)
@ -111,11 +119,23 @@ func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
if work == nil { if work == nil {
return nil return nil
} }
log.Printf("pipeline: received next execution: %s", work.ID)
logger := log.With().
Str("repo", extractRepositoryName(work.Config)).
Str("build", extractBuildNumber(work.Config)).
Str("id", work.ID).
Logger()
logger.Debug().
Msg("received execution")
// new docker engine // new docker engine
engine, err := docker.NewEnv() engine, err := docker.NewEnv()
if err != nil { if err != nil {
logger.Error().
Err(err).
Msg("cannot create docker client")
return err return err
} }
@ -129,12 +149,19 @@ func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
cancelled := abool.New() cancelled := abool.New()
go func() { go func() {
logger.Debug().
Msg("listen for cancel signal")
if werr := client.Wait(ctx, work.ID); werr != nil { if werr := client.Wait(ctx, work.ID); werr != nil {
cancelled.SetTo(true) cancelled.SetTo(true)
log.Printf("pipeline: cancel signal received: %s: %s", work.ID, werr) logger.Warn().
Err(werr).
Msg("cancel signal received")
cancel() cancel()
} else { } else {
log.Printf("pipeline: cancel channel closed: %s", work.ID) logger.Debug().
Msg("stop listening for cancel signal")
} }
}() }()
@ -142,10 +169,14 @@ func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
log.Printf("pipeline: cancel ping loop: %s", work.ID) logger.Debug().
Msg("pipeline done")
return return
case <-time.After(time.Minute): case <-time.After(time.Minute):
log.Printf("pipeline: ping queue: %s", work.ID) logger.Debug().
Msg("pipeline lease renewed")
client.Extend(ctx, work.ID) client.Extend(ctx, work.ID)
} }
} }
@ -153,13 +184,22 @@ func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
state := rpc.State{} state := rpc.State{}
state.Started = time.Now().Unix() state.Started = time.Now().Unix()
err = client.Init(ctxmeta, work.ID, state) err = client.Init(ctxmeta, work.ID, state)
if err != nil { if err != nil {
log.Printf("pipeline: error signaling pipeline init: %s: %s", work.ID, err) logger.Error().
Err(err).
Msg("pipeline initialization failed")
} }
var uploads sync.WaitGroup var uploads sync.WaitGroup
defaultLogger := pipeline.LogFunc(func(proc *backend.Step, rc multipart.Reader) error { defaultLogger := pipeline.LogFunc(func(proc *backend.Step, rc multipart.Reader) error {
loglogger := logger.With().
Str("image", proc.Image).
Str("stage", proc.Alias).
Logger()
part, rerr := rc.NextPart() part, rerr := rc.NextPart()
if rerr != nil { if rerr != nil {
return rerr return rerr
@ -173,10 +213,14 @@ func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
} }
} }
loglogger.Debug().Msg("log stream opened")
limitedPart := io.LimitReader(part, maxLogsUpload) limitedPart := io.LimitReader(part, maxLogsUpload)
logstream := rpc.NewLineWriter(client, work.ID, proc.Alias, secrets...) logstream := rpc.NewLineWriter(client, work.ID, proc.Alias, secrets...)
io.Copy(logstream, limitedPart) io.Copy(logstream, limitedPart)
loglogger.Debug().Msg("log stream copied")
file := &rpc.File{} file := &rpc.File{}
file.Mime = "application/json+logs" file.Mime = "application/json+logs"
file.Proc = proc.Alias file.Proc = proc.Alias
@ -185,14 +229,22 @@ func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
file.Size = len(file.Data) file.Size = len(file.Data)
file.Time = time.Now().Unix() file.Time = time.Now().Unix()
loglogger.Debug().
Msg("log stream uploading")
if serr := client.Upload(ctxmeta, work.ID, file); serr != nil { if serr := client.Upload(ctxmeta, work.ID, file); serr != nil {
log.Printf("pipeline: cannot upload logs: %s: %s: %s", work.ID, file.Mime, serr) loglogger.Error().
} else { Err(serr).
log.Printf("pipeline: finish uploading logs: %s: step %s: %s", file.Mime, work.ID, proc.Alias) Msg("log stream upload error")
} }
loglogger.Debug().
Msg("log stream upload complete")
defer func() { defer func() {
log.Printf("pipeline: finish uploading logs: %s: step %s", work.ID, proc.Alias) loglogger.Debug().
Msg("log stream closed")
uploads.Done() uploads.Done()
}() }()
@ -215,15 +267,34 @@ func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
file.Meta[key] = value[0] file.Meta[key] = value[0]
} }
loglogger.Debug().
Str("file", file.Name).
Str("mime", file.Mime).
Msg("file stream uploading")
if serr := client.Upload(ctxmeta, work.ID, file); serr != nil { if serr := client.Upload(ctxmeta, work.ID, file); serr != nil {
log.Printf("pipeline: cannot upload artifact: %s: %s: %s", work.ID, file.Mime, serr) loglogger.Error().
} else { Err(serr).
log.Printf("pipeline: finish uploading artifact: %s: step %s: %s", file.Mime, work.ID, proc.Alias) Str("file", file.Name).
Str("mime", file.Mime).
Msg("file stream upload error")
} }
loglogger.Debug().
Str("file", file.Name).
Str("mime", file.Mime).
Msg("file stream upload complete")
return nil return nil
}) })
defaultTracer := pipeline.TraceFunc(func(state *pipeline.State) error { defaultTracer := pipeline.TraceFunc(func(state *pipeline.State) error {
proclogger := logger.With().
Str("image", state.Pipeline.Step.Image).
Str("stage", state.Pipeline.Step.Alias).
Int("exit_code", state.Process.ExitCode).
Bool("exited", state.Process.Exited).
Logger()
procState := rpc.State{ procState := rpc.State{
Proc: state.Pipeline.Step.Alias, Proc: state.Pipeline.Step.Alias,
Exited: state.Process.Exited, Exited: state.Process.Exited,
@ -232,9 +303,17 @@ func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
Finished: time.Now().Unix(), Finished: time.Now().Unix(),
} }
defer func() { defer func() {
proclogger.Debug().
Msg("update step status")
if uerr := client.Update(ctxmeta, work.ID, procState); uerr != nil { if uerr := client.Update(ctxmeta, work.ID, procState); uerr != nil {
log.Printf("Pipeine: error updating pipeline step status: %s: %s: %s", work.ID, procState.Proc, uerr) proclogger.Debug().
Err(uerr).
Msg("update step status error")
} }
proclogger.Debug().
Msg("update step status complete")
}() }()
if state.Process.Exited { if state.Process.Exited {
return nil return nil
@ -287,17 +366,31 @@ func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
} }
} }
log.Printf("pipeline: execution complete: %s", work.ID) logger.Debug().
Str("error", state.Error).
Int("exit_code", state.ExitCode).
Msg("pipeline complete")
logger.Debug().
Msg("uploading logs")
uploads.Wait() uploads.Wait()
log.Printf("pipeline: logging complete: %s", work.ID) logger.Debug().
Msg("uploading logs complete")
logger.Debug().
Str("error", state.Error).
Int("exit_code", state.ExitCode).
Msg("updating pipeline status")
err = client.Done(ctxmeta, work.ID, state) err = client.Done(ctxmeta, work.ID, state)
if err != nil { if err != nil {
log.Printf("Pipeine: error signaling pipeline done: %s: %s", work.ID, err) logger.Error().Err(err).
Msg("updating pipeline status failed")
} else { } else {
log.Printf("pipeline: done: %s", work.ID) logger.Debug().
Msg("updating pipeline status complete")
} }
return nil return nil
@ -318,3 +411,14 @@ func (c *credentials) GetRequestMetadata(oldcontext.Context, ...string) (map[str
func (c *credentials) RequireTransportSecurity() bool { func (c *credentials) RequireTransportSecurity() bool {
return false return false
} }
// extract repository name from the configuration
func extractRepositoryName(config *backend.Config) string {
return config.Stages[0].Steps[0].Environment["DRONE_REPO_NAME"] + "/" +
config.Stages[0].Steps[0].Environment["DRONE_REPO_NAME"]
}
// extract build number from the configuration
func extractBuildNumber(config *backend.Config) string {
return config.Stages[0].Steps[0].Environment["DRONE_BUILD_NUMBER"]
}

View file

@ -34,7 +34,7 @@ func main() {
Name: "password", Name: "password",
Usage: "drone auth password", Usage: "drone auth password",
}, },
cli.BoolFlag{ cli.BoolTFlag{
EnvVar: "DRONE_DEBUG", EnvVar: "DRONE_DEBUG",
Name: "debug", Name: "debug",
Usage: "start the agent in debug mode", Usage: "start the agent in debug mode",

21
vendor/github.com/rs/zerolog/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 Olivier Poitrey
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

315
vendor/github.com/rs/zerolog/README.md generated vendored Normal file
View file

@ -0,0 +1,315 @@
# Zero Allocation JSON Logger
[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog)
The zerolog package provides a fast and simple logger dedicated to JSON output.
Zerolog's API is designed to provide both a great developer experience and stunning [performance](#performance). Its unique chaining API allows zerolog to write JSON log events by avoiding allocations and reflection.
The uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with simpler to use API and even better performance.
To keep the code base and the API simple, zerolog focuses on JSON logging only. As [suggested on reddit](https://www.reddit.com/r/golang/comments/6c9k7n/zerolog_is_now_faster_than_zap/), you may use tools like [humanlog](https://github.com/aybabtme/humanlog) to pretty print JSON on the console during development.
## Features
* Level logging
* Sampling
* Contextual fields
* `context.Context` integration
* `net/http` helpers
## Usage
```go
import "github.com/rs/zerolog/log"
```
### A global logger can be use for simple logging
```go
log.Info().Msg("hello world")
// Output: {"level":"info","time":1494567715,"message":"hello world"}
```
NOTE: To import the global logger, import the `log` subpackage `github.com/rs/zerolog/log`.
```go
log.Fatal().
Err(err).
Str("service", service).
Msgf("Cannot start %s", service)
// Output: {"level":"fatal","time":1494567715,"message":"Cannot start myservice","error":"some error","service":"myservice"}
// Exit 1
```
NOTE: Using `Msgf` generates one allocation even when the logger is disabled.
### Fields can be added to log messages
```go
log.Info().
Str("foo", "bar").
Int("n", 123).
Msg("hello world")
// Output: {"level":"info","time":1494567715,"foo":"bar","n":123,"message":"hello world"}
```
### Create logger instance to manage different outputs
```go
logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
logger.Info().Str("foo", "bar").Msg("hello world")
// Output: {"level":"info","time":1494567715,"message":"hello world","foo":"bar"}
```
### Sub-loggers let you chain loggers with additional context
```go
sublogger := log.With().
Str("component": "foo").
Logger()
sublogger.Info().Msg("hello world")
// Output: {"level":"info","time":1494567715,"message":"hello world","component":"foo"}
```
### Level logging
```go
zerolog.SetGlobalLevel(zerolog.InfoLevel)
log.Debug().Msg("filtered out message")
log.Info().Msg("routed message")
if e := log.Debug(); e.Enabled() {
// Compute log output only if enabled.
value := compute()
e.Str("foo": value).Msg("some debug message")
}
// Output: {"level":"info","time":1494567715,"message":"routed message"}
```
### Sub dictionary
```go
log.Info().
Str("foo", "bar").
Dict("dict", zerolog.Dict().
Str("bar", "baz").
Int("n", 1)
).Msg("hello world")
// Output: {"level":"info","time":1494567715,"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"}
```
### Customize automatic field names
```go
zerolog.TimestampFieldName = "t"
zerolog.LevelFieldName = "l"
zerolog.MessageFieldName = "m"
log.Info().Msg("hello world")
// Output: {"l":"info","t":1494567715,"m":"hello world"}
```
### Log with no level nor message
```go
log.Log().Str("foo","bar").Msg("")
// Output: {"time":1494567715,"foo":"bar"}
```
### Add contextual fields to the global logger
```go
log.Logger = log.With().Str("foo", "bar").Logger()
```
### Log Sampling
```go
sampled := log.Sample(10)
sampled.Info().Msg("will be logged every 10 messages")
// Output: {"time":1494567715,"sample":10,"message":"will be logged every 10 messages"}
```
### Pass a sub-logger by context
```go
ctx := log.With("component", "module").Logger().WithContext(ctx)
log.Ctx(ctx).Info().Msg("hello world")
// Output: {"component":"module","level":"info","message":"hello world"}
```
### Set as standard logger output
```go
log := zerolog.New(os.Stdout).With().
Str("foo", "bar").
Logger()
stdlog.SetFlags(0)
stdlog.SetOutput(log)
stdlog.Print("hello world")
// Output: {"foo":"bar","message":"hello world"}
```
### Integration with `net/http`
The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`.
In this example we use [alice](https://github.com/justinas/alice) to install logger for better readability.
```go
log := zerolog.New(os.Stdout).With().
Timestamp().
Str("role", "my-service").
Str("host", host).
Logger()
c := alice.New()
// Install the logger handler with default output on the console
c = c.Append(hlog.NewHandler(log))
// Install some provided extra handler to set some request's context fields.
// Thanks to those handler, all our logs will come with some pre-populated fields.
c = c.Append(hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) {
hlog.FromRequest(r).Info().
Str("method", r.Method).
Str("url", r.URL.String()).
Int("status", status).
Int("size", size).
Dur("duration", duration).
Msg("")
}))
c = c.Append(hlog.RemoteAddrHandler("ip"))
c = c.Append(hlog.UserAgentHandler("user_agent"))
c = c.Append(hlog.RefererHandler("referer"))
c = c.Append(hlog.RequestIDHandler("req_id", "Request-Id"))
// Here is your final handler
h := c.Then(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Get the logger from the request's context. You can safely assume it
// will be always there: if the handler is removed, hlog.FromRequest
// will return a no-op logger.
hlog.FromRequest(r).Info().
Str("user", "current user").
Str("status", "ok").
Msg("Something happened")
// Output: {"level":"info","time":"2001-02-03T04:05:06Z","role":"my-service","host":"local-hostname","req_id":"b4g0l5t6tfid6dtrapu0","user":"current user","status":"ok","message":"Something happened"}
}))
http.Handle("/", h)
if err := http.ListenAndServe(":8080", nil); err != nil {
log.Fatal().Err(err).Msg("Startup failed")
}
```
## Global Settings
Some settings can be changed and will by applied to all loggers:
* `log.Logger`: You can set this value to customize the global logger (the one used by package level methods).
* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Set this to `zerolog.Disable` to disable logging altogether (quiet mode).
* `zerolog.DisableSampling`: If argument is `true`, all sampled loggers will stop sampling and issue 100% of their log events.
* `zerolog.TimestampFieldName`: Can be set to customize `Timestamp` field name.
* `zerolog.LevelFieldName`: Can be set to customize level field name.
* `zerolog.MessageFieldName`: Can be set to customize message field name.
* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name.
* `zerolog.SampleFieldName`: Can be set to customize the field name added when sampling is enabled.
* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with an empty string, times are formated as UNIX timestamp.
// DurationFieldUnit defines the unit for time.Duration type fields added
// using the Dur method.
* `DurationFieldUnit`: Sets the unit of the fields added by `Dur` (default: `time.Millisecond`).
* `DurationFieldInteger`: If set to true, `Dur` fields are formatted as integers instead of floats.
## Field Types
### Standard Types
* `Str`
* `Bool`
* `Int`, `Int8`, `Int16`, `Int32`, `Int64`
* `Uint`, `Uint8`, `Uint16`, `Uint32`, `Uint64`
* `Float32`, `Float64`
### Advanced Fields
* `Err`: Takes an `error` and render it as a string using the `zerolog.ErrorFieldName` field name.
* `Timestamp`: Insert a timestamp field with `zerolog.TimestampFieldName` field name and formatted using `zerolog.TimeFieldFormat`.
* `Time`: Adds a field with the time formated with the `zerolog.TimeFieldFormat`.
* `Dur`: Adds a field with a `time.Duration`.
* `Dict`: Adds a sub-key/value as a field of the event.
* `Interface`: Uses reflection to marshal the type.
## Performance
All operations are allocation free (those numbers *include* JSON encoding):
```
BenchmarkLogEmpty-8 100000000 19.1 ns/op 0 B/op 0 allocs/op
BenchmarkDisabled-8 500000000 4.07 ns/op 0 B/op 0 allocs/op
BenchmarkInfo-8 30000000 42.5 ns/op 0 B/op 0 allocs/op
BenchmarkContextFields-8 30000000 44.9 ns/op 0 B/op 0 allocs/op
BenchmarkLogFields-8 10000000 184 ns/op 0 B/op 0 allocs/op
```
Using Uber's zap [comparison benchmark](https://github.com/uber-go/zap#performance):
Log a message and 10 fields:
| Library | Time | Bytes Allocated | Objects Allocated |
| :--- | :---: | :---: | :---: |
| zerolog | 767 ns/op | 552 B/op | 6 allocs/op |
| :zap: zap | 848 ns/op | 704 B/op | 2 allocs/op |
| :zap: zap (sugared) | 1363 ns/op | 1610 B/op | 20 allocs/op |
| go-kit | 3614 ns/op | 2895 B/op | 66 allocs/op |
| lion | 5392 ns/op | 5807 B/op | 63 allocs/op |
| logrus | 5661 ns/op | 6092 B/op | 78 allocs/op |
| apex/log | 15332 ns/op | 3832 B/op | 65 allocs/op |
| log15 | 20657 ns/op | 5632 B/op | 93 allocs/op |
Log a message with a logger that already has 10 fields of context:
| Library | Time | Bytes Allocated | Objects Allocated |
| :--- | :---: | :---: | :---: |
| zerolog | 52 ns/op | 0 B/op | 0 allocs/op |
| :zap: zap | 283 ns/op | 0 B/op | 0 allocs/op |
| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op |
| lion | 2702 ns/op | 4074 B/op | 38 allocs/op |
| go-kit | 3378 ns/op | 3046 B/op | 52 allocs/op |
| logrus | 4309 ns/op | 4564 B/op | 63 allocs/op |
| apex/log | 13456 ns/op | 2898 B/op | 51 allocs/op |
| log15 | 14179 ns/op | 2642 B/op | 44 allocs/op |
Log a static string, without any context or `printf`-style templating:
| Library | Time | Bytes Allocated | Objects Allocated |
| :--- | :---: | :---: | :---: |
| zerolog | 50 ns/op | 0 B/op | 0 allocs/op |
| :zap: zap | 236 ns/op | 0 B/op | 0 allocs/op |
| standard library | 453 ns/op | 80 B/op | 2 allocs/op |
| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op |
| go-kit | 508 ns/op | 656 B/op | 13 allocs/op |
| lion | 771 ns/op | 1224 B/op | 10 allocs/op |
| logrus | 1244 ns/op | 1505 B/op | 27 allocs/op |
| apex/log | 2751 ns/op | 584 B/op | 11 allocs/op |
| log15 | 5181 ns/op | 1592 B/op | 26 allocs/op |

169
vendor/github.com/rs/zerolog/array.go generated vendored Normal file
View file

@ -0,0 +1,169 @@
package zerolog
import (
"sync"
"time"
"github.com/rs/zerolog/internal/json"
)
var arrayPool = &sync.Pool{
New: func() interface{} {
return &Array{
buf: make([]byte, 0, 500),
}
},
}
type Array struct {
buf []byte
}
// Arr creates an array to be added to an Event or Context.
func Arr() *Array {
a := arrayPool.Get().(*Array)
a.buf = a.buf[:0]
return a
}
func (*Array) MarshalZerologArray(*Array) {
}
func (a *Array) write(dst []byte) []byte {
if len(a.buf) == 0 {
dst = append(dst, `[]`...)
} else {
a.buf[0] = '['
dst = append(append(dst, a.buf...), ']')
}
arrayPool.Put(a)
return dst
}
// Object marshals an object that implement the LogObjectMarshaler
// interface and append it to the array.
func (a *Array) Object(obj LogObjectMarshaler) *Array {
a.buf = append(a.buf, ',')
e := Dict()
obj.MarshalZerologObject(e)
e.buf = append(e.buf, '}')
a.buf = append(a.buf, e.buf...)
return a
}
// Str append the val as a string to the array.
func (a *Array) Str(val string) *Array {
a.buf = json.AppendString(append(a.buf, ','), val)
return a
}
// Bytes append the val as a string to the array.
func (a *Array) Bytes(val []byte) *Array {
a.buf = json.AppendBytes(append(a.buf, ','), val)
return a
}
// Err append the err as a string to the array.
func (a *Array) Err(err error) *Array {
a.buf = json.AppendError(append(a.buf, ','), err)
return a
}
// Bool append the val as a bool to the array.
func (a *Array) Bool(b bool) *Array {
a.buf = json.AppendBool(append(a.buf, ','), b)
return a
}
// Int append i as a int to the array.
func (a *Array) Int(i int) *Array {
a.buf = json.AppendInt(append(a.buf, ','), i)
return a
}
// Int8 append i as a int8 to the array.
func (a *Array) Int8(i int8) *Array {
a.buf = json.AppendInt8(append(a.buf, ','), i)
return a
}
// Int16 append i as a int16 to the array.
func (a *Array) Int16(i int16) *Array {
a.buf = json.AppendInt16(append(a.buf, ','), i)
return a
}
// Int32 append i as a int32 to the array.
func (a *Array) Int32(i int32) *Array {
a.buf = json.AppendInt32(append(a.buf, ','), i)
return a
}
// Int64 append i as a int64 to the array.
func (a *Array) Int64(i int64) *Array {
a.buf = json.AppendInt64(append(a.buf, ','), i)
return a
}
// Uint append i as a uint to the array.
func (a *Array) Uint(i uint) *Array {
a.buf = json.AppendUint(append(a.buf, ','), i)
return a
}
// Uint8 append i as a uint8 to the array.
func (a *Array) Uint8(i uint8) *Array {
a.buf = json.AppendUint8(append(a.buf, ','), i)
return a
}
// Uint16 append i as a uint16 to the array.
func (a *Array) Uint16(i uint16) *Array {
a.buf = json.AppendUint16(append(a.buf, ','), i)
return a
}
// Uint32 append i as a uint32 to the array.
func (a *Array) Uint32(i uint32) *Array {
a.buf = json.AppendUint32(append(a.buf, ','), i)
return a
}
// Uint64 append i as a uint64 to the array.
func (a *Array) Uint64(i uint64) *Array {
a.buf = json.AppendUint64(append(a.buf, ','), i)
return a
}
// Float32 append f as a float32 to the array.
func (a *Array) Float32(f float32) *Array {
a.buf = json.AppendFloat32(append(a.buf, ','), f)
return a
}
// Float64 append f as a float64 to the array.
func (a *Array) Float64(f float64) *Array {
a.buf = json.AppendFloat64(append(a.buf, ','), f)
return a
}
// Time append t formated as string using zerolog.TimeFieldFormat.
func (a *Array) Time(t time.Time) *Array {
a.buf = json.AppendTime(append(a.buf, ','), t, TimeFieldFormat)
return a
}
// Dur append d to the array.
func (a *Array) Dur(d time.Duration) *Array {
a.buf = json.AppendDuration(append(a.buf, ','), d, DurationFieldUnit, DurationFieldInteger)
return a
}
// Interface append i marshaled using reflection.
func (a *Array) Interface(i interface{}) *Array {
if obj, ok := i.(LogObjectMarshaler); ok {
return a.Object(obj)
}
a.buf = json.AppendInterface(append(a.buf, ','), i)
return a
}

300
vendor/github.com/rs/zerolog/context.go generated vendored Normal file
View file

@ -0,0 +1,300 @@
package zerolog
import (
"io/ioutil"
"time"
"github.com/rs/zerolog/internal/json"
)
// Context configures a new sub-logger with contextual fields.
type Context struct {
l Logger
}
// Logger returns the logger with the context previously set.
func (c Context) Logger() Logger {
return c.l
}
// Fields is a helper function to use a map to set fields using type assertion.
func (c Context) Fields(fields map[string]interface{}) Context {
c.l.context = appendFields(c.l.context, fields)
return c
}
// Dict adds the field key with the dict to the logger context.
func (c Context) Dict(key string, dict *Event) Context {
dict.buf = append(dict.buf, '}')
c.l.context = append(json.AppendKey(c.l.context, key), dict.buf...)
eventPool.Put(dict)
return c
}
// Array adds the field key with an array to the event context.
// Use zerolog.Arr() to create the array or pass a type that
// implement the LogArrayMarshaler interface.
func (c Context) Array(key string, arr LogArrayMarshaler) Context {
c.l.context = json.AppendKey(c.l.context, key)
if arr, ok := arr.(*Array); ok {
c.l.context = arr.write(c.l.context)
return c
}
var a *Array
if aa, ok := arr.(*Array); ok {
a = aa
} else {
a = Arr()
arr.MarshalZerologArray(a)
}
c.l.context = a.write(c.l.context)
return c
}
// Object marshals an object that implement the LogObjectMarshaler interface.
func (c Context) Object(key string, obj LogObjectMarshaler) Context {
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0, true)
e.Object(key, obj)
e.buf[0] = ',' // A new event starts as an object, we want to embed it.
c.l.context = append(c.l.context, e.buf...)
eventPool.Put(e)
return c
}
// Str adds the field key with val as a string to the logger context.
func (c Context) Str(key, val string) Context {
c.l.context = json.AppendString(json.AppendKey(c.l.context, key), val)
return c
}
// Strs adds the field key with val as a string to the logger context.
func (c Context) Strs(key string, vals []string) Context {
c.l.context = json.AppendStrings(json.AppendKey(c.l.context, key), vals)
return c
}
// Bytes adds the field key with val as a []byte to the logger context.
func (c Context) Bytes(key string, val []byte) Context {
c.l.context = json.AppendBytes(json.AppendKey(c.l.context, key), val)
return c
}
// AnErr adds the field key with err as a string to the logger context.
func (c Context) AnErr(key string, err error) Context {
if err != nil {
c.l.context = json.AppendError(json.AppendKey(c.l.context, key), err)
}
return c
}
// Errs adds the field key with errs as an array of strings to the logger context.
func (c Context) Errs(key string, errs []error) Context {
c.l.context = json.AppendErrors(json.AppendKey(c.l.context, key), errs)
return c
}
// Err adds the field "error" with err as a string to the logger context.
// To customize the key name, change zerolog.ErrorFieldName.
func (c Context) Err(err error) Context {
if err != nil {
c.l.context = json.AppendError(json.AppendKey(c.l.context, ErrorFieldName), err)
}
return c
}
// Bool adds the field key with val as a bool to the logger context.
func (c Context) Bool(key string, b bool) Context {
c.l.context = json.AppendBool(json.AppendKey(c.l.context, key), b)
return c
}
// Bools adds the field key with val as a []bool to the logger context.
func (c Context) Bools(key string, b []bool) Context {
c.l.context = json.AppendBools(json.AppendKey(c.l.context, key), b)
return c
}
// Int adds the field key with i as a int to the logger context.
func (c Context) Int(key string, i int) Context {
c.l.context = json.AppendInt(json.AppendKey(c.l.context, key), i)
return c
}
// Ints adds the field key with i as a []int to the logger context.
func (c Context) Ints(key string, i []int) Context {
c.l.context = json.AppendInts(json.AppendKey(c.l.context, key), i)
return c
}
// Int8 adds the field key with i as a int8 to the logger context.
func (c Context) Int8(key string, i int8) Context {
c.l.context = json.AppendInt8(json.AppendKey(c.l.context, key), i)
return c
}
// Ints8 adds the field key with i as a []int8 to the logger context.
func (c Context) Ints8(key string, i []int8) Context {
c.l.context = json.AppendInts8(json.AppendKey(c.l.context, key), i)
return c
}
// Int16 adds the field key with i as a int16 to the logger context.
func (c Context) Int16(key string, i int16) Context {
c.l.context = json.AppendInt16(json.AppendKey(c.l.context, key), i)
return c
}
// Ints16 adds the field key with i as a []int16 to the logger context.
func (c Context) Ints16(key string, i []int16) Context {
c.l.context = json.AppendInts16(json.AppendKey(c.l.context, key), i)
return c
}
// Int32 adds the field key with i as a int32 to the logger context.
func (c Context) Int32(key string, i int32) Context {
c.l.context = json.AppendInt32(json.AppendKey(c.l.context, key), i)
return c
}
// Ints32 adds the field key with i as a []int32 to the logger context.
func (c Context) Ints32(key string, i []int32) Context {
c.l.context = json.AppendInts32(json.AppendKey(c.l.context, key), i)
return c
}
// Int64 adds the field key with i as a int64 to the logger context.
func (c Context) Int64(key string, i int64) Context {
c.l.context = json.AppendInt64(json.AppendKey(c.l.context, key), i)
return c
}
// Ints64 adds the field key with i as a []int64 to the logger context.
func (c Context) Ints64(key string, i []int64) Context {
c.l.context = json.AppendInts64(json.AppendKey(c.l.context, key), i)
return c
}
// Uint adds the field key with i as a uint to the logger context.
func (c Context) Uint(key string, i uint) Context {
c.l.context = json.AppendUint(json.AppendKey(c.l.context, key), i)
return c
}
// Uints adds the field key with i as a []uint to the logger context.
func (c Context) Uints(key string, i []uint) Context {
c.l.context = json.AppendUints(json.AppendKey(c.l.context, key), i)
return c
}
// Uint8 adds the field key with i as a uint8 to the logger context.
func (c Context) Uint8(key string, i uint8) Context {
c.l.context = json.AppendUint8(json.AppendKey(c.l.context, key), i)
return c
}
// Uints8 adds the field key with i as a []uint8 to the logger context.
func (c Context) Uints8(key string, i []uint8) Context {
c.l.context = json.AppendUints8(json.AppendKey(c.l.context, key), i)
return c
}
// Uint16 adds the field key with i as a uint16 to the logger context.
func (c Context) Uint16(key string, i uint16) Context {
c.l.context = json.AppendUint16(json.AppendKey(c.l.context, key), i)
return c
}
// Uints16 adds the field key with i as a []uint16 to the logger context.
func (c Context) Uints16(key string, i []uint16) Context {
c.l.context = json.AppendUints16(json.AppendKey(c.l.context, key), i)
return c
}
// Uint32 adds the field key with i as a uint32 to the logger context.
func (c Context) Uint32(key string, i uint32) Context {
c.l.context = json.AppendUint32(json.AppendKey(c.l.context, key), i)
return c
}
// Uints32 adds the field key with i as a []uint32 to the logger context.
func (c Context) Uints32(key string, i []uint32) Context {
c.l.context = json.AppendUints32(json.AppendKey(c.l.context, key), i)
return c
}
// Uint64 adds the field key with i as a uint64 to the logger context.
func (c Context) Uint64(key string, i uint64) Context {
c.l.context = json.AppendUint64(json.AppendKey(c.l.context, key), i)
return c
}
// Uints64 adds the field key with i as a []uint64 to the logger context.
func (c Context) Uints64(key string, i []uint64) Context {
c.l.context = json.AppendUints64(json.AppendKey(c.l.context, key), i)
return c
}
// Float32 adds the field key with f as a float32 to the logger context.
func (c Context) Float32(key string, f float32) Context {
c.l.context = json.AppendFloat32(json.AppendKey(c.l.context, key), f)
return c
}
// Floats32 adds the field key with f as a []float32 to the logger context.
func (c Context) Floats32(key string, f []float32) Context {
c.l.context = json.AppendFloats32(json.AppendKey(c.l.context, key), f)
return c
}
// Float64 adds the field key with f as a float64 to the logger context.
func (c Context) Float64(key string, f float64) Context {
c.l.context = json.AppendFloat64(json.AppendKey(c.l.context, key), f)
return c
}
// Floats64 adds the field key with f as a []float64 to the logger context.
func (c Context) Floats64(key string, f []float64) Context {
c.l.context = json.AppendFloats64(json.AppendKey(c.l.context, key), f)
return c
}
// Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key.
// To customize the key name, change zerolog.TimestampFieldName.
func (c Context) Timestamp() Context {
if len(c.l.context) > 0 {
c.l.context[0] = 1
} else {
c.l.context = append(c.l.context, 1)
}
return c
}
// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
func (c Context) Time(key string, t time.Time) Context {
c.l.context = json.AppendTime(json.AppendKey(c.l.context, key), t, TimeFieldFormat)
return c
}
// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
func (c Context) Times(key string, t []time.Time) Context {
c.l.context = json.AppendTimes(json.AppendKey(c.l.context, key), t, TimeFieldFormat)
return c
}
// Dur adds the fields key with d divided by unit and stored as a float.
func (c Context) Dur(key string, d time.Duration) Context {
c.l.context = json.AppendDuration(json.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
return c
}
// Durs adds the fields key with d divided by unit and stored as a float.
func (c Context) Durs(key string, d []time.Duration) Context {
c.l.context = json.AppendDurations(json.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
return c
}
// Interface adds the field key with obj marshaled using reflection.
func (c Context) Interface(key string, i interface{}) Context {
c.l.context = json.AppendInterface(json.AppendKey(c.l.context, key), i)
return c
}

29
vendor/github.com/rs/zerolog/ctx.go generated vendored Normal file
View file

@ -0,0 +1,29 @@
package zerolog
import (
"context"
"io/ioutil"
)
var disabledLogger = New(ioutil.Discard).Level(Disabled)
type ctxKey struct{}
// WithContext returns a copy of ctx with l associated.
func (l Logger) WithContext(ctx context.Context) context.Context {
if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok {
// Update existing pointer.
*lp = l
return ctx
}
return context.WithValue(ctx, ctxKey{}, &l)
}
// Ctx returns the Logger associated with the ctx. If no logger
// is associated, a disabled logger is returned.
func Ctx(ctx context.Context) Logger {
if l, ok := ctx.Value(ctxKey{}).(*Logger); ok {
return *l
}
return disabledLogger
}

555
vendor/github.com/rs/zerolog/event.go generated vendored Normal file
View file

@ -0,0 +1,555 @@
package zerolog
import (
"fmt"
"io/ioutil"
"os"
"sync"
"time"
"github.com/rs/zerolog/internal/json"
)
var eventPool = &sync.Pool{
New: func() interface{} {
return &Event{
buf: make([]byte, 0, 500),
}
},
}
// Event represents a log event. It is instanced by one of the level method of
// Logger and finalized by the Msg or Msgf method.
type Event struct {
buf []byte
w LevelWriter
level Level
enabled bool
done func(msg string)
}
// LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface
// to be implemented by types used with Event/Context's Object methods.
type LogObjectMarshaler interface {
MarshalZerologObject(e *Event)
}
// LogArrayMarshaler provides a strongly-typed and encoding-agnostic interface
// to be implemented by types used with Event/Context's Array methods.
type LogArrayMarshaler interface {
MarshalZerologArray(a *Array)
}
func newEvent(w LevelWriter, level Level, enabled bool) *Event {
if !enabled {
return &Event{}
}
e := eventPool.Get().(*Event)
e.buf = e.buf[:1]
e.buf[0] = '{'
e.w = w
e.level = level
e.enabled = true
return e
}
func (e *Event) write() (err error) {
if !e.enabled {
return nil
}
e.buf = append(e.buf, '}', '\n')
_, err = e.w.WriteLevel(e.level, e.buf)
eventPool.Put(e)
return
}
// Enabled return false if the *Event is going to be filtered out by
// log level or sampling.
func (e *Event) Enabled() bool {
return e.enabled
}
// Msg sends the *Event with msg added as the message field if not empty.
//
// NOTICE: once this method is called, the *Event should be disposed.
// Calling Msg twice can have unexpected result.
func (e *Event) Msg(msg string) {
if !e.enabled {
return
}
if msg != "" {
e.buf = json.AppendString(json.AppendKey(e.buf, MessageFieldName), msg)
}
if e.done != nil {
defer e.done(msg)
}
if err := e.write(); err != nil {
fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v", err)
}
}
// Msgf sends the event with formated msg added as the message field if not empty.
//
// NOTICE: once this methid is called, the *Event should be disposed.
// Calling Msg twice can have unexpected result.
func (e *Event) Msgf(format string, v ...interface{}) {
if !e.enabled {
return
}
msg := fmt.Sprintf(format, v...)
if msg != "" {
e.buf = json.AppendString(json.AppendKey(e.buf, MessageFieldName), msg)
}
if e.done != nil {
defer e.done(msg)
}
if err := e.write(); err != nil {
fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v", err)
}
}
// Fields is a helper function to use a map to set fields using type assertion.
func (e *Event) Fields(fields map[string]interface{}) *Event {
if !e.enabled {
return e
}
e.buf = appendFields(e.buf, fields)
return e
}
// Dict adds the field key with a dict to the event context.
// Use zerolog.Dict() to create the dictionary.
func (e *Event) Dict(key string, dict *Event) *Event {
if !e.enabled {
return e
}
e.buf = append(append(json.AppendKey(e.buf, key), dict.buf...), '}')
eventPool.Put(dict)
return e
}
// Dict creates an Event to be used with the *Event.Dict method.
// Call usual field methods like Str, Int etc to add fields to this
// event and give it as argument the *Event.Dict method.
func Dict() *Event {
return newEvent(levelWriterAdapter{ioutil.Discard}, 0, true)
}
// Array adds the field key with an array to the event context.
// Use zerolog.Arr() to create the array or pass a type that
// implement the LogArrayMarshaler interface.
func (e *Event) Array(key string, arr LogArrayMarshaler) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendKey(e.buf, key)
var a *Array
if aa, ok := arr.(*Array); ok {
a = aa
} else {
a = Arr()
arr.MarshalZerologArray(a)
}
e.buf = a.write(e.buf)
return e
}
func (e *Event) appendObject(obj LogObjectMarshaler) {
pos := len(e.buf)
obj.MarshalZerologObject(e)
if pos < len(e.buf) {
// As MarshalZerologObject will use event API, the first field will be
// preceded by a comma. If at least one field has been added (buf grew),
// we replace this coma by the opening bracket.
e.buf[pos] = '{'
} else {
e.buf = append(e.buf, '{')
}
e.buf = append(e.buf, '}')
}
// Object marshals an object that implement the LogObjectMarshaler interface.
func (e *Event) Object(key string, obj LogObjectMarshaler) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendKey(e.buf, key)
e.appendObject(obj)
return e
}
// Str adds the field key with val as a string to the *Event context.
func (e *Event) Str(key, val string) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendString(json.AppendKey(e.buf, key), val)
return e
}
// Strs adds the field key with vals as a []string to the *Event context.
func (e *Event) Strs(key string, vals []string) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendStrings(json.AppendKey(e.buf, key), vals)
return e
}
// Bytes adds the field key with val as a string to the *Event context.
//
// Runes outside of normal ASCII ranges will be hex-encoded in the resulting
// JSON.
func (e *Event) Bytes(key string, val []byte) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendBytes(json.AppendKey(e.buf, key), val)
return e
}
// AnErr adds the field key with err as a string to the *Event context.
// If err is nil, no field is added.
func (e *Event) AnErr(key string, err error) *Event {
if !e.enabled {
return e
}
if err != nil {
e.buf = json.AppendError(json.AppendKey(e.buf, key), err)
}
return e
}
// Errs adds the field key with errs as an array of strings to the *Event context.
// If err is nil, no field is added.
func (e *Event) Errs(key string, errs []error) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendErrors(json.AppendKey(e.buf, key), errs)
return e
}
// Err adds the field "error" with err as a string to the *Event context.
// If err is nil, no field is added.
// To customize the key name, change zerolog.ErrorFieldName.
func (e *Event) Err(err error) *Event {
if !e.enabled {
return e
}
if err != nil {
e.buf = json.AppendError(json.AppendKey(e.buf, ErrorFieldName), err)
}
return e
}
// Bool adds the field key with val as a bool to the *Event context.
func (e *Event) Bool(key string, b bool) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendBool(json.AppendKey(e.buf, key), b)
return e
}
// Bools adds the field key with val as a []bool to the *Event context.
func (e *Event) Bools(key string, b []bool) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendBools(json.AppendKey(e.buf, key), b)
return e
}
// Int adds the field key with i as a int to the *Event context.
func (e *Event) Int(key string, i int) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendInt(json.AppendKey(e.buf, key), i)
return e
}
// Ints adds the field key with i as a []int to the *Event context.
func (e *Event) Ints(key string, i []int) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendInts(json.AppendKey(e.buf, key), i)
return e
}
// Int8 adds the field key with i as a int8 to the *Event context.
func (e *Event) Int8(key string, i int8) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendInt8(json.AppendKey(e.buf, key), i)
return e
}
// Ints8 adds the field key with i as a []int8 to the *Event context.
func (e *Event) Ints8(key string, i []int8) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendInts8(json.AppendKey(e.buf, key), i)
return e
}
// Int16 adds the field key with i as a int16 to the *Event context.
func (e *Event) Int16(key string, i int16) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendInt16(json.AppendKey(e.buf, key), i)
return e
}
// Ints16 adds the field key with i as a []int16 to the *Event context.
func (e *Event) Ints16(key string, i []int16) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendInts16(json.AppendKey(e.buf, key), i)
return e
}
// Int32 adds the field key with i as a int32 to the *Event context.
func (e *Event) Int32(key string, i int32) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendInt32(json.AppendKey(e.buf, key), i)
return e
}
// Ints32 adds the field key with i as a []int32 to the *Event context.
func (e *Event) Ints32(key string, i []int32) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendInts32(json.AppendKey(e.buf, key), i)
return e
}
// Int64 adds the field key with i as a int64 to the *Event context.
func (e *Event) Int64(key string, i int64) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendInt64(json.AppendKey(e.buf, key), i)
return e
}
// Ints64 adds the field key with i as a []int64 to the *Event context.
func (e *Event) Ints64(key string, i []int64) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendInts64(json.AppendKey(e.buf, key), i)
return e
}
// Uint adds the field key with i as a uint to the *Event context.
func (e *Event) Uint(key string, i uint) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendUint(json.AppendKey(e.buf, key), i)
return e
}
// Uints adds the field key with i as a []int to the *Event context.
func (e *Event) Uints(key string, i []uint) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendUints(json.AppendKey(e.buf, key), i)
return e
}
// Uint8 adds the field key with i as a uint8 to the *Event context.
func (e *Event) Uint8(key string, i uint8) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendUint8(json.AppendKey(e.buf, key), i)
return e
}
// Uints8 adds the field key with i as a []int8 to the *Event context.
func (e *Event) Uints8(key string, i []uint8) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendUints8(json.AppendKey(e.buf, key), i)
return e
}
// Uint16 adds the field key with i as a uint16 to the *Event context.
func (e *Event) Uint16(key string, i uint16) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendUint16(json.AppendKey(e.buf, key), i)
return e
}
// Uints16 adds the field key with i as a []int16 to the *Event context.
func (e *Event) Uints16(key string, i []uint16) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendUints16(json.AppendKey(e.buf, key), i)
return e
}
// Uint32 adds the field key with i as a uint32 to the *Event context.
func (e *Event) Uint32(key string, i uint32) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendUint32(json.AppendKey(e.buf, key), i)
return e
}
// Uints32 adds the field key with i as a []int32 to the *Event context.
func (e *Event) Uints32(key string, i []uint32) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendUints32(json.AppendKey(e.buf, key), i)
return e
}
// Uint64 adds the field key with i as a uint64 to the *Event context.
func (e *Event) Uint64(key string, i uint64) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendUint64(json.AppendKey(e.buf, key), i)
return e
}
// Uints64 adds the field key with i as a []int64 to the *Event context.
func (e *Event) Uints64(key string, i []uint64) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendUints64(json.AppendKey(e.buf, key), i)
return e
}
// Float32 adds the field key with f as a float32 to the *Event context.
func (e *Event) Float32(key string, f float32) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendFloat32(json.AppendKey(e.buf, key), f)
return e
}
// Floats32 adds the field key with f as a []float32 to the *Event context.
func (e *Event) Floats32(key string, f []float32) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendFloats32(json.AppendKey(e.buf, key), f)
return e
}
// Float64 adds the field key with f as a float64 to the *Event context.
func (e *Event) Float64(key string, f float64) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendFloat64(json.AppendKey(e.buf, key), f)
return e
}
// Floats64 adds the field key with f as a []float64 to the *Event context.
func (e *Event) Floats64(key string, f []float64) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendFloats64(json.AppendKey(e.buf, key), f)
return e
}
// Timestamp adds the current local time as UNIX timestamp to the *Event context with the "time" key.
// To customize the key name, change zerolog.TimestampFieldName.
func (e *Event) Timestamp() *Event {
if !e.enabled {
return e
}
e.buf = json.AppendTime(json.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat)
return e
}
// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
func (e *Event) Time(key string, t time.Time) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendTime(json.AppendKey(e.buf, key), t, TimeFieldFormat)
return e
}
// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
func (e *Event) Times(key string, t []time.Time) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendTimes(json.AppendKey(e.buf, key), t, TimeFieldFormat)
return e
}
// Dur adds the field key with duration d stored as zerolog.DurationFieldUnit.
// If zerolog.DurationFieldInteger is true, durations are rendered as integer
// instead of float.
func (e *Event) Dur(key string, d time.Duration) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendDuration(json.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
return e
}
// Durs adds the field key with duration d stored as zerolog.DurationFieldUnit.
// If zerolog.DurationFieldInteger is true, durations are rendered as integer
// instead of float.
func (e *Event) Durs(key string, d []time.Duration) *Event {
if !e.enabled {
return e
}
e.buf = json.AppendDurations(json.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
return e
}
// TimeDiff adds the field key with positive duration between time t and start.
// If time t is not greater than start, duration will be 0.
// Duration format follows the same principle as Dur().
func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event {
if !e.enabled {
return e
}
var d time.Duration
if t.After(start) {
d = t.Sub(start)
}
e.buf = json.AppendDuration(json.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
return e
}
// Interface adds the field key with i marshaled using reflection.
func (e *Event) Interface(key string, i interface{}) *Event {
if !e.enabled {
return e
}
if obj, ok := i.(LogObjectMarshaler); ok {
return e.Object(key, obj)
}
e.buf = json.AppendInterface(json.AppendKey(e.buf, key), i)
return e
}

96
vendor/github.com/rs/zerolog/fields.go generated vendored Normal file
View file

@ -0,0 +1,96 @@
package zerolog
import (
"sort"
"time"
"github.com/rs/zerolog/internal/json"
)
func appendFields(dst []byte, fields map[string]interface{}) []byte {
keys := make([]string, 0, len(fields))
for key := range fields {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
dst = json.AppendKey(dst, key)
switch val := fields[key].(type) {
case string:
dst = json.AppendString(dst, val)
case []byte:
dst = json.AppendBytes(dst, val)
case error:
dst = json.AppendError(dst, val)
case []error:
dst = json.AppendErrors(dst, val)
case bool:
dst = json.AppendBool(dst, val)
case int:
dst = json.AppendInt(dst, val)
case int8:
dst = json.AppendInt8(dst, val)
case int16:
dst = json.AppendInt16(dst, val)
case int32:
dst = json.AppendInt32(dst, val)
case int64:
dst = json.AppendInt64(dst, val)
case uint:
dst = json.AppendUint(dst, val)
case uint8:
dst = json.AppendUint8(dst, val)
case uint16:
dst = json.AppendUint16(dst, val)
case uint32:
dst = json.AppendUint32(dst, val)
case uint64:
dst = json.AppendUint64(dst, val)
case float32:
dst = json.AppendFloat32(dst, val)
case float64:
dst = json.AppendFloat64(dst, val)
case time.Time:
dst = json.AppendTime(dst, val, TimeFieldFormat)
case time.Duration:
dst = json.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger)
case []string:
dst = json.AppendStrings(dst, val)
case []bool:
dst = json.AppendBools(dst, val)
case []int:
dst = json.AppendInts(dst, val)
case []int8:
dst = json.AppendInts8(dst, val)
case []int16:
dst = json.AppendInts16(dst, val)
case []int32:
dst = json.AppendInts32(dst, val)
case []int64:
dst = json.AppendInts64(dst, val)
case []uint:
dst = json.AppendUints(dst, val)
// case []uint8:
// dst = appendUints8(dst, val)
case []uint16:
dst = json.AppendUints16(dst, val)
case []uint32:
dst = json.AppendUints32(dst, val)
case []uint64:
dst = json.AppendUints64(dst, val)
case []float32:
dst = json.AppendFloats32(dst, val)
case []float64:
dst = json.AppendFloats64(dst, val)
case []time.Time:
dst = json.AppendTimes(dst, val, TimeFieldFormat)
case []time.Duration:
dst = json.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger)
case nil:
dst = append(dst, "null"...)
default:
dst = json.AppendInterface(dst, val)
}
}
return dst
}

67
vendor/github.com/rs/zerolog/globals.go generated vendored Normal file
View file

@ -0,0 +1,67 @@
package zerolog
import "time"
import "sync/atomic"
var (
// TimestampFieldName is the field name used for the timestamp field.
TimestampFieldName = "time"
// LevelFieldName is the field name used for the level field.
LevelFieldName = "level"
// MessageFieldName is the field name used for the message field.
MessageFieldName = "message"
// ErrorFieldName is the field name used for error fields.
ErrorFieldName = "error"
// SampleFieldName is the name of the field used to report sampling.
SampleFieldName = "sample"
// TimeFieldFormat defines the time format of the Time field type.
// If set to an empty string, the time is formatted as an UNIX timestamp
// as integer.
TimeFieldFormat = time.RFC3339
// TimestampFunc defines the function called to generate a timestamp.
TimestampFunc = time.Now
// DurationFieldUnit defines the unit for time.Duration type fields added
// using the Dur method.
DurationFieldUnit = time.Millisecond
// DurationFieldInteger renders Dur fields as integer instead of float if
// set to true.
DurationFieldInteger = false
)
var (
gLevel = new(uint32)
disableSampling = new(uint32)
)
// SetGlobalLevel sets the global override for log level. If this
// values is raised, all Loggers will use at least this value.
//
// To globally disable logs, set GlobalLevel to Disabled.
func SetGlobalLevel(l Level) {
atomic.StoreUint32(gLevel, uint32(l))
}
func globalLevel() Level {
return Level(atomic.LoadUint32(gLevel))
}
// DisableSampling will disable sampling in all Loggers if true.
func DisableSampling(v bool) {
var i uint32
if v {
i = 1
}
atomic.StoreUint32(disableSampling, i)
}
func samplingDisabled() bool {
return atomic.LoadUint32(disableSampling) == 1
}

39
vendor/github.com/rs/zerolog/internal/json/base.go generated vendored Normal file
View file

@ -0,0 +1,39 @@
package json
func AppendKey(dst []byte, key string) []byte {
if len(dst) > 1 {
dst = append(dst, ',')
}
dst = AppendString(dst, key)
return append(dst, ':')
}
func AppendError(dst []byte, err error) []byte {
if err == nil {
return append(dst, `null`...)
}
return AppendString(dst, err.Error())
}
func AppendErrors(dst []byte, errs []error) []byte {
if len(errs) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
if errs[0] != nil {
dst = AppendString(dst, errs[0].Error())
} else {
dst = append(dst, "null"...)
}
if len(errs) > 1 {
for _, err := range errs[1:] {
if err == nil {
dst = append(dst, ",null"...)
continue
}
dst = AppendString(append(dst, ','), err.Error())
}
}
dst = append(dst, ']')
return dst
}

180
vendor/github.com/rs/zerolog/internal/json/string.go generated vendored Normal file
View file

@ -0,0 +1,180 @@
package json
import "unicode/utf8"
const hex = "0123456789abcdef"
func AppendStrings(dst []byte, vals []string) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = AppendString(dst, vals[0])
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = AppendString(append(dst, ','), val)
}
}
dst = append(dst, ']')
return dst
}
// AppendString encodes the input string to json and appends
// the encoded string to the input byte slice.
//
// The operation loops though each byte in the string looking
// for characters that need json or utf8 encoding. If the string
// does not need encoding, then the string is appended in it's
// entirety to the byte slice.
// If we encounter a byte that does need encoding, switch up
// the operation and perform a byte-by-byte read-encode-append.
func AppendString(dst []byte, s string) []byte {
// Start with a double quote.
dst = append(dst, '"')
// Loop through each character in the string.
for i := 0; i < len(s); i++ {
// Check if the character needs encoding. Control characters, slashes,
// and the double quote need json encoding. Bytes above the ascii
// boundary needs utf8 encoding.
if s[i] < 0x20 || s[i] > 0x7e || s[i] == '\\' || s[i] == '"' {
// We encountered a character that needs to be encoded. Switch
// to complex version of the algorithm.
dst = appendStringComplex(dst, s, i)
return append(dst, '"')
}
}
// The string has no need for encoding an therefore is directly
// appended to the byte slice.
dst = append(dst, s...)
// End with a double quote
return append(dst, '"')
}
// appendStringComplex is used by appendString to take over an in
// progress JSON string encoding that encountered a character that needs
// to be encoded.
func appendStringComplex(dst []byte, s string, i int) []byte {
start := 0
for i < len(s) {
b := s[i]
if b >= utf8.RuneSelf {
r, size := utf8.DecodeRuneInString(s[i:])
if r == utf8.RuneError && size == 1 {
// In case of error, first append previous simple characters to
// the byte slice if any and append a remplacement character code
// in place of the invalid sequence.
if start < i {
dst = append(dst, s[start:i]...)
}
dst = append(dst, `\ufffd`...)
i += size
start = i
continue
}
i += size
continue
}
if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' {
i++
continue
}
// We encountered a character that needs to be encoded.
// Let's append the previous simple characters to the byte slice
// and switch our operation to read and encode the remainder
// characters byte-by-byte.
if start < i {
dst = append(dst, s[start:i]...)
}
switch b {
case '"', '\\':
dst = append(dst, '\\', b)
case '\b':
dst = append(dst, '\\', 'b')
case '\f':
dst = append(dst, '\\', 'f')
case '\n':
dst = append(dst, '\\', 'n')
case '\r':
dst = append(dst, '\\', 'r')
case '\t':
dst = append(dst, '\\', 't')
default:
dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF])
}
i++
start = i
}
if start < len(s) {
dst = append(dst, s[start:]...)
}
return dst
}
// AppendBytes is a mirror of appendString with []byte arg
func AppendBytes(dst, s []byte) []byte {
dst = append(dst, '"')
for i := 0; i < len(s); i++ {
if s[i] < 0x20 || s[i] > 0x7e || s[i] == '\\' || s[i] == '"' {
dst = appendBytesComplex(dst, s, i)
return append(dst, '"')
}
}
dst = append(dst, s...)
return append(dst, '"')
}
// appendBytesComplex is a mirror of the appendStringComplex
// with []byte arg
func appendBytesComplex(dst, s []byte, i int) []byte {
start := 0
for i < len(s) {
b := s[i]
if b >= utf8.RuneSelf {
r, size := utf8.DecodeRune(s[i:])
if r == utf8.RuneError && size == 1 {
if start < i {
dst = append(dst, s[start:i]...)
}
dst = append(dst, `\ufffd`...)
i += size
start = i
continue
}
i += size
continue
}
if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' {
i++
continue
}
// We encountered a character that needs to be encoded.
// Let's append the previous simple characters to the byte slice
// and switch our operation to read and encode the remainder
// characters byte-by-byte.
if start < i {
dst = append(dst, s[start:i]...)
}
switch b {
case '"', '\\':
dst = append(dst, '\\', b)
case '\b':
dst = append(dst, '\\', 'b')
case '\f':
dst = append(dst, '\\', 'f')
case '\n':
dst = append(dst, '\\', 'n')
case '\r':
dst = append(dst, '\\', 'r')
case '\t':
dst = append(dst, '\\', 't')
default:
dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF])
}
i++
start = i
}
if start < len(s) {
dst = append(dst, s[start:]...)
}
return dst
}

68
vendor/github.com/rs/zerolog/internal/json/time.go generated vendored Normal file
View file

@ -0,0 +1,68 @@
package json
import (
"strconv"
"time"
)
func AppendTime(dst []byte, t time.Time, format string) []byte {
if format == "" {
return AppendInt64(dst, t.Unix())
}
return append(t.AppendFormat(append(dst, '"'), format), '"')
}
func AppendTimes(dst []byte, vals []time.Time, format string) []byte {
if format == "" {
return appendUnixTimes(dst, vals)
}
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = append(vals[0].AppendFormat(append(dst, '"'), format), '"')
if len(vals) > 1 {
for _, t := range vals[1:] {
dst = append(t.AppendFormat(append(dst, ',', '"'), format), '"')
}
}
dst = append(dst, ']')
return dst
}
func appendUnixTimes(dst []byte, vals []time.Time) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendInt(dst, vals[0].Unix(), 10)
if len(vals) > 1 {
for _, t := range vals[1:] {
dst = strconv.AppendInt(dst, t.Unix(), 10)
}
}
dst = append(dst, ']')
return dst
}
func AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
if useInt {
return strconv.AppendInt(dst, int64(d/unit), 10)
}
return AppendFloat64(dst, float64(d)/float64(unit))
}
func AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = AppendDuration(dst, vals[0], unit, useInt)
if len(vals) > 1 {
for _, d := range vals[1:] {
dst = AppendDuration(append(dst, ','), d, unit, useInt)
}
}
dst = append(dst, ']')
return dst
}

278
vendor/github.com/rs/zerolog/internal/json/types.go generated vendored Normal file
View file

@ -0,0 +1,278 @@
package json
import (
"encoding/json"
"fmt"
"math"
"strconv"
)
func AppendBool(dst []byte, val bool) []byte {
return strconv.AppendBool(dst, val)
}
func AppendBools(dst []byte, vals []bool) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendBool(dst, vals[0])
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = strconv.AppendBool(append(dst, ','), val)
}
}
dst = append(dst, ']')
return dst
}
func AppendInt(dst []byte, val int) []byte {
return strconv.AppendInt(dst, int64(val), 10)
}
func AppendInts(dst []byte, vals []int) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendInt(dst, int64(vals[0]), 10)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
}
}
dst = append(dst, ']')
return dst
}
func AppendInt8(dst []byte, val int8) []byte {
return strconv.AppendInt(dst, int64(val), 10)
}
func AppendInts8(dst []byte, vals []int8) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendInt(dst, int64(vals[0]), 10)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
}
}
dst = append(dst, ']')
return dst
}
func AppendInt16(dst []byte, val int16) []byte {
return strconv.AppendInt(dst, int64(val), 10)
}
func AppendInts16(dst []byte, vals []int16) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendInt(dst, int64(vals[0]), 10)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
}
}
dst = append(dst, ']')
return dst
}
func AppendInt32(dst []byte, val int32) []byte {
return strconv.AppendInt(dst, int64(val), 10)
}
func AppendInts32(dst []byte, vals []int32) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendInt(dst, int64(vals[0]), 10)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
}
}
dst = append(dst, ']')
return dst
}
func AppendInt64(dst []byte, val int64) []byte {
return strconv.AppendInt(dst, val, 10)
}
func AppendInts64(dst []byte, vals []int64) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendInt(dst, vals[0], 10)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = strconv.AppendInt(append(dst, ','), val, 10)
}
}
dst = append(dst, ']')
return dst
}
func AppendUint(dst []byte, val uint) []byte {
return strconv.AppendUint(dst, uint64(val), 10)
}
func AppendUints(dst []byte, vals []uint) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
}
}
dst = append(dst, ']')
return dst
}
func AppendUint8(dst []byte, val uint8) []byte {
return strconv.AppendUint(dst, uint64(val), 10)
}
func AppendUints8(dst []byte, vals []uint8) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
}
}
dst = append(dst, ']')
return dst
}
func AppendUint16(dst []byte, val uint16) []byte {
return strconv.AppendUint(dst, uint64(val), 10)
}
func AppendUints16(dst []byte, vals []uint16) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
}
}
dst = append(dst, ']')
return dst
}
func AppendUint32(dst []byte, val uint32) []byte {
return strconv.AppendUint(dst, uint64(val), 10)
}
func AppendUints32(dst []byte, vals []uint32) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
}
}
dst = append(dst, ']')
return dst
}
func AppendUint64(dst []byte, val uint64) []byte {
return strconv.AppendUint(dst, uint64(val), 10)
}
func AppendUints64(dst []byte, vals []uint64) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendUint(dst, vals[0], 10)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = strconv.AppendUint(append(dst, ','), val, 10)
}
}
dst = append(dst, ']')
return dst
}
func AppendFloat(dst []byte, val float64, bitSize int) []byte {
// JSON does not permit NaN or Infinity. A typical JSON encoder would fail
// with an error, but a logging library wants the data to get thru so we
// make a tradeoff and store those types as string.
switch {
case math.IsNaN(val):
return append(dst, `"NaN"`...)
case math.IsInf(val, 1):
return append(dst, `"+Inf"`...)
case math.IsInf(val, -1):
return append(dst, `"-Inf"`...)
}
return strconv.AppendFloat(dst, val, 'f', -1, bitSize)
}
func AppendFloat32(dst []byte, val float32) []byte {
return AppendFloat(dst, float64(val), 32)
}
func AppendFloats32(dst []byte, vals []float32) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = AppendFloat(dst, float64(vals[0]), 32)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = AppendFloat(append(dst, ','), float64(val), 32)
}
}
dst = append(dst, ']')
return dst
}
func AppendFloat64(dst []byte, val float64) []byte {
return AppendFloat(dst, val, 64)
}
func AppendFloats64(dst []byte, vals []float64) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = AppendFloat(dst, vals[0], 32)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = AppendFloat(append(dst, ','), val, 64)
}
}
dst = append(dst, ']')
return dst
}
func AppendInterface(dst []byte, i interface{}) []byte {
marshaled, err := json.Marshal(i)
if err != nil {
return AppendString(dst, fmt.Sprintf("marshaling error: %v", err))
}
return append(dst, marshaled...)
}

338
vendor/github.com/rs/zerolog/log.go generated vendored Normal file
View file

@ -0,0 +1,338 @@
// Package zerolog provides a lightweight logging library dedicated to JSON logging.
//
// A global Logger can be use for simple logging:
//
// import "github.com/rs/zerolog/log"
//
// log.Info().Msg("hello world")
// // Output: {"time":1494567715,"level":"info","message":"hello world"}
//
// NOTE: To import the global logger, import the "log" subpackage "github.com/rs/zerolog/log".
//
// Fields can be added to log messages:
//
// log.Info().Str("foo", "bar").Msg("hello world")
// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"}
//
// Create logger instance to manage different outputs:
//
// logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
// logger.Info().
// Str("foo", "bar").
// Msg("hello world")
// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"}
//
// Sub-loggers let you chain loggers with additional context:
//
// sublogger := log.With().Str("component": "foo").Logger()
// sublogger.Info().Msg("hello world")
// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"}
//
// Level logging
//
// zerolog.SetGlobalLevel(zerolog.InfoLevel)
//
// log.Debug().Msg("filtered out message")
// log.Info().Msg("routed message")
//
// if e := log.Debug(); e.Enabled() {
// // Compute log output only if enabled.
// value := compute()
// e.Str("foo": value).Msg("some debug message")
// }
// // Output: {"level":"info","time":1494567715,"routed message"}
//
// Customize automatic field names:
//
// log.TimestampFieldName = "t"
// log.LevelFieldName = "p"
// log.MessageFieldName = "m"
//
// log.Info().Msg("hello world")
// // Output: {"t":1494567715,"p":"info","m":"hello world"}
//
// Log with no level and message:
//
// log.Log().Str("foo","bar").Msg("")
// // Output: {"time":1494567715,"foo":"bar"}
//
// Add contextual fields to global Logger:
//
// log.Logger = log.With().Str("foo", "bar").Logger()
//
// Sample logs:
//
// sampled := log.Sample(10)
// sampled.Info().Msg("will be logged every 10 messages")
//
package zerolog
import (
"io"
"io/ioutil"
"os"
"strconv"
"sync/atomic"
"github.com/rs/zerolog/internal/json"
)
// Level defines log levels.
type Level uint8
const (
// DebugLevel defines debug log level.
DebugLevel Level = iota
// InfoLevel defines info log level.
InfoLevel
// WarnLevel defines warn log level.
WarnLevel
// ErrorLevel defines error log level.
ErrorLevel
// FatalLevel defines fatal log level.
FatalLevel
// PanicLevel defines panic log level.
PanicLevel
// Disabled disables the logger.
Disabled
)
func (l Level) String() string {
switch l {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warn"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
case PanicLevel:
return "panic"
}
return ""
}
const (
// Often samples log every 10 events.
Often = 10
// Sometimes samples log every 100 events.
Sometimes = 100
// Rarely samples log every 1000 events.
Rarely = 1000
)
var disabledEvent = newEvent(levelWriterAdapter{ioutil.Discard}, 0, false)
// A Logger represents an active logging object that generates lines
// of JSON output to an io.Writer. Each logging operation makes a single
// call to the Writer's Write method. There is no guaranty on access
// serialization to the Writer. If your Writer is not thread safe,
// you may consider a sync wrapper.
type Logger struct {
w LevelWriter
level Level
sample uint32
counter *uint32
context []byte
}
// New creates a root logger with given output writer. If the output writer implements
// the LevelWriter interface, the WriteLevel method will be called instead of the Write
// one.
//
// Each logging operation makes a single call to the Writer's Write method. There is no
// guaranty on access serialization to the Writer. If your Writer is not thread safe,
// you may consider using sync wrapper.
func New(w io.Writer) Logger {
if w == nil {
w = ioutil.Discard
}
lw, ok := w.(LevelWriter)
if !ok {
lw = levelWriterAdapter{w}
}
return Logger{w: lw}
}
// Nop returns a disabled logger for which all operation are no-op.
func Nop() Logger {
return New(nil).Level(Disabled)
}
// With creates a child logger with the field added to its context.
func (l Logger) With() Context {
context := l.context
l.context = make([]byte, 0, 500)
if context != nil {
l.context = append(l.context, context...)
} else {
// first byte of context is presence of timestamp or not
l.context = append(l.context, 0)
}
return Context{l}
}
// Level creates a child logger with the minimum accepted level set to level.
func (l Logger) Level(lvl Level) Logger {
return Logger{
w: l.w,
level: lvl,
sample: l.sample,
counter: l.counter,
context: l.context,
}
}
// Sample returns a logger that only let one message out of every to pass thru.
func (l Logger) Sample(every int) Logger {
if every == 0 {
// Create a child with no sampling.
return Logger{
w: l.w,
level: l.level,
context: l.context,
}
}
return Logger{
w: l.w,
level: l.level,
sample: uint32(every),
counter: new(uint32),
context: l.context,
}
}
// Debug starts a new message with debug level.
//
// You must call Msg on the returned event in order to send the event.
func (l Logger) Debug() *Event {
return l.newEvent(DebugLevel, true, nil)
}
// Info starts a new message with info level.
//
// You must call Msg on the returned event in order to send the event.
func (l Logger) Info() *Event {
return l.newEvent(InfoLevel, true, nil)
}
// Warn starts a new message with warn level.
//
// You must call Msg on the returned event in order to send the event.
func (l Logger) Warn() *Event {
return l.newEvent(WarnLevel, true, nil)
}
// Error starts a new message with error level.
//
// You must call Msg on the returned event in order to send the event.
func (l Logger) Error() *Event {
return l.newEvent(ErrorLevel, true, nil)
}
// Fatal starts a new message with fatal level. The os.Exit(1) function
// is called by the Msg method.
//
// You must call Msg on the returned event in order to send the event.
func (l Logger) Fatal() *Event {
return l.newEvent(FatalLevel, true, func(msg string) { os.Exit(1) })
}
// Panic starts a new message with panic level. The message is also sent
// to the panic function.
//
// You must call Msg on the returned event in order to send the event.
func (l Logger) Panic() *Event {
return l.newEvent(PanicLevel, true, func(msg string) { panic(msg) })
}
// WithLevel starts a new message with level.
//
// You must call Msg on the returned event in order to send the event.
func (l Logger) WithLevel(level Level) *Event {
switch level {
case DebugLevel:
return l.Debug()
case InfoLevel:
return l.Info()
case WarnLevel:
return l.Warn()
case ErrorLevel:
return l.Error()
case FatalLevel:
return l.Fatal()
case PanicLevel:
return l.Panic()
case Disabled:
return disabledEvent
default:
panic("zerolog: WithLevel(): invalid level: " + strconv.Itoa(int(level)))
}
}
// Log starts a new message with no level. Setting GlobalLevel to Disabled
// will still disable events produced by this method.
//
// You must call Msg on the returned event in order to send the event.
func (l Logger) Log() *Event {
// We use panic level with addLevelField=false to make Log passthrough all
// levels except Disabled.
return l.newEvent(PanicLevel, false, nil)
}
// Write implements the io.Writer interface. This is useful to set as a writer
// for the standard library log.
func (l Logger) Write(p []byte) (n int, err error) {
n = len(p)
if n > 0 && p[n-1] == '\n' {
// Trim CR added by stdlog.
p = p[0 : n-1]
}
l.Log().Msg(string(p))
return
}
func (l Logger) newEvent(level Level, addLevelField bool, done func(string)) *Event {
enabled := l.should(level)
if !enabled {
return disabledEvent
}
lvl := InfoLevel
if addLevelField {
lvl = level
}
e := newEvent(l.w, lvl, enabled)
e.done = done
if l.context != nil && len(l.context) > 0 && l.context[0] > 0 {
// first byte of context is ts flag
e.buf = json.AppendTime(json.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat)
}
if addLevelField {
e.Str(LevelFieldName, level.String())
}
if l.sample > 0 && SampleFieldName != "" {
e.Uint32(SampleFieldName, l.sample)
}
if l.context != nil && len(l.context) > 1 {
if len(e.buf) > 1 {
e.buf = append(e.buf, ',')
}
e.buf = append(e.buf, l.context[1:]...)
}
return e
}
// should returns true if the log event should be logged.
func (l Logger) should(lvl Level) bool {
if lvl < l.level || lvl < globalLevel() {
return false
}
if l.sample > 0 && l.counter != nil && !samplingDisabled() {
c := atomic.AddUint32(l.counter, 1)
return c%l.sample == 0
}
return true
}

85
vendor/github.com/rs/zerolog/log/log.go generated vendored Normal file
View file

@ -0,0 +1,85 @@
// Package log provides a global logger for zerolog.
package log
import (
"context"
"os"
"github.com/rs/zerolog"
)
// Logger is the global logger.
var Logger = zerolog.New(os.Stderr).With().Timestamp().Logger()
// With creates a child logger with the field added to its context.
func With() zerolog.Context {
return Logger.With()
}
// Level crestes a child logger with the minium accepted level set to level.
func Level(level zerolog.Level) zerolog.Logger {
return Logger.Level(level)
}
// Sample returns a logger that only let one message out of every to pass thru.
func Sample(every int) zerolog.Logger {
return Logger.Sample(every)
}
// Debug starts a new message with debug level.
//
// You must call Msg on the returned event in order to send the event.
func Debug() *zerolog.Event {
return Logger.Debug()
}
// Info starts a new message with info level.
//
// You must call Msg on the returned event in order to send the event.
func Info() *zerolog.Event {
return Logger.Info()
}
// Warn starts a new message with warn level.
//
// You must call Msg on the returned event in order to send the event.
func Warn() *zerolog.Event {
return Logger.Warn()
}
// Error starts a new message with error level.
//
// You must call Msg on the returned event in order to send the event.
func Error() *zerolog.Event {
return Logger.Error()
}
// Fatal starts a new message with fatal level. The os.Exit(1) function
// is called by the Msg method.
//
// You must call Msg on the returned event in order to send the event.
func Fatal() *zerolog.Event {
return Logger.Fatal()
}
// Panic starts a new message with panic level. The message is also sent
// to the panic function.
//
// You must call Msg on the returned event in order to send the event.
func Panic() *zerolog.Event {
return Logger.Panic()
}
// Log starts a new message with no level. Setting zerolog.GlobalLevel to
// zerlog.Disabled will still disable events produced by this method.
//
// You must call Msg on the returned event in order to send the event.
func Log() *zerolog.Event {
return Logger.Log()
}
// Ctx returns the Logger associated with the ctx. If no logger
// is associated, a disabled logger is returned.
func Ctx(ctx context.Context) zerolog.Logger {
return zerolog.Ctx(ctx)
}

52
vendor/github.com/rs/zerolog/syslog.go generated vendored Normal file
View file

@ -0,0 +1,52 @@
// +build !windows
package zerolog
import "io"
// SyslogWriter is an interface matching a syslog.Writer struct.
type SyslogWriter interface {
io.Writer
Debug(m string) error
Info(m string) error
Warning(m string) error
Err(m string) error
Emerg(m string) error
Crit(m string) error
}
type syslogWriter struct {
w SyslogWriter
}
// SyslogLevelWriter wraps a SyslogWriter and call the right syslog level
// method matching the zerolog level.
func SyslogLevelWriter(w SyslogWriter) LevelWriter {
return syslogWriter{w}
}
func (sw syslogWriter) Write(p []byte) (n int, err error) {
return sw.w.Write(p)
}
// WriteLevel implements LevelWriter interface.
func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) {
switch level {
case DebugLevel:
err = sw.w.Debug(string(p))
case InfoLevel:
err = sw.w.Info(string(p))
case WarnLevel:
err = sw.w.Warning(string(p))
case ErrorLevel:
err = sw.w.Err(string(p))
case FatalLevel:
err = sw.w.Emerg(string(p))
case PanicLevel:
err = sw.w.Crit(string(p))
default:
panic("invalid level")
}
n = len(p)
return
}

100
vendor/github.com/rs/zerolog/writer.go generated vendored Normal file
View file

@ -0,0 +1,100 @@
package zerolog
import (
"io"
"sync"
)
// LevelWriter defines as interface a writer may implement in order
// to receive level information with payload.
type LevelWriter interface {
io.Writer
WriteLevel(level Level, p []byte) (n int, err error)
}
type levelWriterAdapter struct {
io.Writer
}
func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) {
return lw.Write(p)
}
type syncWriter struct {
mu sync.Mutex
lw LevelWriter
}
// SyncWriter wraps w so that each call to Write is synchronized with a mutex.
// This syncer can be the call to writer's Write method is not thread safe.
// Note that os.File Write operation is using write() syscall which is supposed
// to be thread-safe on POSIX systems. So there is no need to use this with
// os.File on such systems as zerolog guaranties to issue a single Write call
// per log event.
func SyncWriter(w io.Writer) io.Writer {
if lw, ok := w.(LevelWriter); ok {
return &syncWriter{lw: lw}
}
return &syncWriter{lw: levelWriterAdapter{w}}
}
// Write implements the io.Writer interface.
func (s *syncWriter) Write(p []byte) (n int, err error) {
s.mu.Lock()
defer s.mu.Unlock()
return s.lw.Write(p)
}
// WriteLevel implements the LevelWriter interface.
func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) {
s.mu.Lock()
defer s.mu.Unlock()
return s.lw.WriteLevel(l, p)
}
type multiLevelWriter struct {
writers []LevelWriter
}
func (t multiLevelWriter) Write(p []byte) (n int, err error) {
for _, w := range t.writers {
n, err = w.Write(p)
if err != nil {
return
}
if n != len(p) {
err = io.ErrShortWrite
return
}
}
return len(p), nil
}
func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) {
for _, w := range t.writers {
n, err = w.WriteLevel(l, p)
if err != nil {
return
}
if n != len(p) {
err = io.ErrShortWrite
return
}
}
return len(p), nil
}
// MultiLevelWriter creates a writer that duplicates its writes to all the
// provided writers, similar to the Unix tee(1) command. If some writers
// implement LevelWriter, their WriteLevel method will be used instead of Write.
func MultiLevelWriter(writers ...io.Writer) LevelWriter {
lwriters := make([]LevelWriter, 0, len(writers))
for _, w := range writers {
if lw, ok := w.(LevelWriter); ok {
lwriters = append(lwriters, lw)
} else {
lwriters = append(lwriters, levelWriterAdapter{w})
}
}
return multiLevelWriter{lwriters}
}

18
vendor/vendor.json vendored
View file

@ -748,6 +748,24 @@
"revision": "6ac8c5d890d415025dd5aae7595bcb2a6e7e2fad", "revision": "6ac8c5d890d415025dd5aae7595bcb2a6e7e2fad",
"revisionTime": "2017-04-24T20:45:52Z" "revisionTime": "2017-04-24T20:45:52Z"
}, },
{
"checksumSHA1": "fGBeb3o1grSXGUNAjwptkBWfch0=",
"path": "github.com/rs/zerolog",
"revision": "89ff8dbc5f047ae9957523b07e627891079f7967",
"revisionTime": "2017-07-27T06:42:12Z"
},
{
"checksumSHA1": "AREhk6LKIp2I/4Njd756bqU6JSQ=",
"path": "github.com/rs/zerolog/internal/json",
"revision": "89ff8dbc5f047ae9957523b07e627891079f7967",
"revisionTime": "2017-07-27T06:42:12Z"
},
{
"checksumSHA1": "kolarHDX6fkauW+1KWx1SFqSF2o=",
"path": "github.com/rs/zerolog/log",
"revision": "89ff8dbc5f047ae9957523b07e627891079f7967",
"revisionTime": "2017-07-27T06:42:12Z"
},
{ {
"path": "github.com/russross/meddler", "path": "github.com/russross/meddler",
"revision": "308c3d0e5e45f543a2eb6c787cbfe0db3880e220", "revision": "308c3d0e5e45f543a2eb6c787cbfe0db3880e220",

View file

@ -10,7 +10,7 @@ var (
// VersionPatch is for backwards-compatible bug fixes // VersionPatch is for backwards-compatible bug fixes
VersionPatch int64 = 0 VersionPatch int64 = 0
// VersionPre indicates prerelease // VersionPre indicates prerelease
VersionPre string = "rc.3" VersionPre string
// VersionDev indicates development branch. Releases will be empty string. // VersionDev indicates development branch. Releases will be empty string.
VersionDev string VersionDev string
) )