[chore]: Bump github.com/minio/minio-go/v7 from 7.0.53 to 7.0.55 (#1844)

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2023-05-29 13:47:11 +01:00 committed by GitHub
parent 46d4ec0f05
commit 9ed96bc570
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
31 changed files with 4137 additions and 3218 deletions

12
go.mod
View file

@ -9,6 +9,7 @@ require (
codeberg.org/gruf/go-debug v1.3.0 codeberg.org/gruf/go-debug v1.3.0
codeberg.org/gruf/go-errors/v2 v2.2.0 codeberg.org/gruf/go-errors/v2 v2.2.0
codeberg.org/gruf/go-fastcopy v1.1.2 codeberg.org/gruf/go-fastcopy v1.1.2
codeberg.org/gruf/go-iotools v0.0.0-20221224124424-3386841cb225
codeberg.org/gruf/go-kv v1.6.1 codeberg.org/gruf/go-kv v1.6.1
codeberg.org/gruf/go-logger/v2 v2.2.1 codeberg.org/gruf/go-logger/v2 v2.2.1
codeberg.org/gruf/go-mutexes v1.1.5 codeberg.org/gruf/go-mutexes v1.1.5
@ -35,7 +36,7 @@ require (
github.com/jackc/pgx/v5 v5.3.1 github.com/jackc/pgx/v5 v5.3.1
github.com/microcosm-cc/bluemonday v1.0.23 github.com/microcosm-cc/bluemonday v1.0.23
github.com/miekg/dns v1.1.54 github.com/miekg/dns v1.1.54
github.com/minio/minio-go/v7 v7.0.53 github.com/minio/minio-go/v7 v7.0.55
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/spf13/cobra v1.7.0 github.com/spf13/cobra v1.7.0
@ -77,7 +78,6 @@ require (
codeberg.org/gruf/go-fastpath v1.0.3 // indirect codeberg.org/gruf/go-fastpath v1.0.3 // indirect
codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect
codeberg.org/gruf/go-hashenc v1.0.2 // indirect codeberg.org/gruf/go-hashenc v1.0.2 // indirect
codeberg.org/gruf/go-iotools v0.0.0-20221224124424-3386841cb225 // indirect
codeberg.org/gruf/go-mangler v1.2.3 // indirect codeberg.org/gruf/go-mangler v1.2.3 // indirect
codeberg.org/gruf/go-maps v1.0.3 // indirect codeberg.org/gruf/go-maps v1.0.3 // indirect
codeberg.org/gruf/go-pools v1.1.0 // indirect codeberg.org/gruf/go-pools v1.1.0 // indirect
@ -128,13 +128,13 @@ require (
github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.16.3 // indirect github.com/klauspost/compress v1.16.5 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/leodido/go-urn v1.2.4 // indirect github.com/leodido/go-urn v1.2.4 // indirect
github.com/magiconair/properties v1.8.7 // indirect github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect github.com/mattn/go-isatty v0.0.18 // indirect
github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect github.com/minio/sha256-simd v1.0.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect
@ -143,8 +143,8 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rs/xid v1.4.0 // indirect github.com/rs/xid v1.5.0 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect github.com/sirupsen/logrus v1.9.2 // indirect
github.com/spf13/afero v1.9.3 // indirect github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/cast v1.5.0 // indirect github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect

21
go.sum
View file

@ -409,10 +409,9 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY= github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
@ -452,10 +451,10 @@ github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.53 h1:qtPyQ+b0Cc1ums3LsnVMAYULPNdAGz8qdX8R2zl9XMU= github.com/minio/minio-go/v7 v7.0.55 h1:ZXqUO/8cgfHzI+08h/zGuTTFpISSA32BZmBE3FCLJas=
github.com/minio/minio-go/v7 v7.0.53/go.mod h1:IbbodHyjUAguneyucUaahv+VMNs/EOTV9du7A7/Z3HU= github.com/minio/minio-go/v7 v7.0.55/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@ -501,8 +500,8 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@ -513,8 +512,8 @@ github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNX
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=

View file

@ -288,10 +288,35 @@ func (z *Reader) Read(p []byte) (n int, err error) {
return n, nil return n, nil
} }
// Support the io.WriteTo interface for io.Copy and friends. type crcer interface {
io.Writer
Sum32() uint32
Reset()
}
type crcUpdater struct {
z *Reader
}
func (c *crcUpdater) Write(p []byte) (int, error) {
c.z.digest = crc32.Update(c.z.digest, crc32.IEEETable, p)
return len(p), nil
}
func (c *crcUpdater) Sum32() uint32 {
return c.z.digest
}
func (c *crcUpdater) Reset() {
c.z.digest = 0
}
// WriteTo support the io.WriteTo interface for io.Copy and friends.
func (z *Reader) WriteTo(w io.Writer) (int64, error) { func (z *Reader) WriteTo(w io.Writer) (int64, error) {
total := int64(0) total := int64(0)
crcWriter := crc32.NewIEEE() crcWriter := crcer(crc32.NewIEEE())
if z.digest != 0 {
crcWriter = &crcUpdater{z: z}
}
for { for {
if z.err != nil { if z.err != nil {
if z.err == io.EOF { if z.err == io.EOF {

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

1055
vendor/github.com/klauspost/compress/s2/reader.go generated vendored Normal file

File diff suppressed because it is too large Load diff

1020
vendor/github.com/klauspost/compress/s2/writer.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -24,6 +24,7 @@ import (
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
"time"
"github.com/minio/minio-go/v7/pkg/lifecycle" "github.com/minio/minio-go/v7/pkg/lifecycle"
"github.com/minio/minio-go/v7/pkg/s3utils" "github.com/minio/minio-go/v7/pkg/s3utils"
@ -102,29 +103,36 @@ func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) e
// GetBucketLifecycle fetch bucket lifecycle configuration // GetBucketLifecycle fetch bucket lifecycle configuration
func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) {
lc, _, err := c.GetBucketLifecycleWithInfo(ctx, bucketName)
return lc, err
}
// GetBucketLifecycleWithInfo fetch bucket lifecycle configuration along with when it was last updated
func (c *Client) GetBucketLifecycleWithInfo(ctx context.Context, bucketName string) (*lifecycle.Configuration, time.Time, error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err return nil, time.Time{}, err
} }
bucketLifecycle, err := c.getBucketLifecycle(ctx, bucketName) bucketLifecycle, updatedAt, err := c.getBucketLifecycle(ctx, bucketName)
if err != nil { if err != nil {
return nil, err return nil, time.Time{}, err
} }
config := lifecycle.NewConfiguration() config := lifecycle.NewConfiguration()
if err = xml.Unmarshal(bucketLifecycle, config); err != nil { if err = xml.Unmarshal(bucketLifecycle, config); err != nil {
return nil, err return nil, time.Time{}, err
} }
return config, nil return config, updatedAt, nil
} }
// Request server for current bucket lifecycle. // Request server for current bucket lifecycle.
func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) { func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, time.Time, error) {
// Get resources properly escaped and lined up before // Get resources properly escaped and lined up before
// using them in http request. // using them in http request.
urlValues := make(url.Values) urlValues := make(url.Values)
urlValues.Set("lifecycle", "") urlValues.Set("lifecycle", "")
urlValues.Set("withUpdatedAt", "true")
// Execute GET on bucket to get lifecycle. // Execute GET on bucket to get lifecycle.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
@ -134,14 +142,28 @@ func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]b
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return nil, err return nil, time.Time{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp, bucketName, "") return nil, time.Time{}, httpRespToErrorResponse(resp, bucketName, "")
} }
} }
return io.ReadAll(resp.Body) lcBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, time.Time{}, err
}
const minIOLifecycleCfgUpdatedAt = "X-Minio-LifecycleConfig-UpdatedAt"
var updatedAt time.Time
if timeStr := resp.Header.Get(minIOLifecycleCfgUpdatedAt); timeStr != "" {
updatedAt, err = time.Parse(iso8601DateFormat, timeStr)
if err != nil {
return nil, time.Time{}, err
}
}
return lcBytes, updatedAt, nil
} }

View file

@ -27,11 +27,12 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/minio/minio-go/v7/pkg/encrypt"
) )
// PutObjectFanOutRequest this is the request structure sent // PutObjectFanOutEntry is per object entry fan-out metadata
// to the server to fan-out the stream to multiple objects. type PutObjectFanOutEntry struct {
type PutObjectFanOutRequest struct {
Key string `json:"key"` Key string `json:"key"`
UserMetadata map[string]string `json:"metadata,omitempty"` UserMetadata map[string]string `json:"metadata,omitempty"`
UserTags map[string]string `json:"tags,omitempty"` UserTags map[string]string `json:"tags,omitempty"`
@ -44,9 +45,17 @@ type PutObjectFanOutRequest struct {
RetainUntilDate *time.Time `json:"retainUntil,omitempty"` RetainUntilDate *time.Time `json:"retainUntil,omitempty"`
} }
// PutObjectFanOutRequest this is the request structure sent
// to the server to fan-out the stream to multiple objects.
type PutObjectFanOutRequest struct {
Entries []PutObjectFanOutEntry
Checksum Checksum
SSE encrypt.ServerSide
}
// PutObjectFanOutResponse this is the response structure sent // PutObjectFanOutResponse this is the response structure sent
// by the server upon success or failure for each object // by the server upon success or failure for each object
// fan-out keys. Additionally this response carries ETag, // fan-out keys. Additionally, this response carries ETag,
// VersionID and LastModified for each object fan-out. // VersionID and LastModified for each object fan-out.
type PutObjectFanOutResponse struct { type PutObjectFanOutResponse struct {
Key string `json:"key"` Key string `json:"key"`
@ -60,8 +69,8 @@ type PutObjectFanOutResponse struct {
// stream multiple objects are written, defined via a list of PutObjectFanOutRequests. Each entry // stream multiple objects are written, defined via a list of PutObjectFanOutRequests. Each entry
// in PutObjectFanOutRequest carries an object keyname and its relevant metadata if any. `Key` is // in PutObjectFanOutRequest carries an object keyname and its relevant metadata if any. `Key` is
// mandatory, rest of the other options in PutObjectFanOutRequest are optional. // mandatory, rest of the other options in PutObjectFanOutRequest are optional.
func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, body io.Reader, fanOutReq ...PutObjectFanOutRequest) ([]PutObjectFanOutResponse, error) { func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData io.Reader, fanOutReq PutObjectFanOutRequest) ([]PutObjectFanOutResponse, error) {
if len(fanOutReq) == 0 { if len(fanOutReq.Entries) == 0 {
return nil, errInvalidArgument("fan out requests cannot be empty") return nil, errInvalidArgument("fan out requests cannot be empty")
} }
@ -72,6 +81,12 @@ func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, body io.Rea
// Expires in 15 minutes. // Expires in 15 minutes.
policy.SetExpires(time.Now().UTC().Add(15 * time.Minute)) policy.SetExpires(time.Now().UTC().Add(15 * time.Minute))
// Set encryption headers if any.
policy.SetEncryption(fanOutReq.SSE)
// Set checksum headers if any.
policy.SetChecksum(fanOutReq.Checksum)
url, formData, err := c.PresignedPostPolicy(ctx, policy) url, formData, err := c.PresignedPostPolicy(ctx, policy)
if err != nil { if err != nil {
return nil, err return nil, err
@ -87,7 +102,7 @@ func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, body io.Rea
var b strings.Builder var b strings.Builder
enc := json.NewEncoder(&b) enc := json.NewEncoder(&b)
for _, req := range fanOutReq { for _, req := range fanOutReq.Entries {
if req.Key == "" { if req.Key == "" {
w.Close() w.Close()
return nil, errors.New("PutObjectFanOutRequest.Key is mandatory and cannot be empty") return nil, errors.New("PutObjectFanOutRequest.Key is mandatory and cannot be empty")
@ -120,7 +135,7 @@ func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, body io.Rea
return return
} }
if _, err = io.Copy(mw, body); err != nil { if _, err = io.Copy(mw, fanOutData); err != nil {
return return
} }
}() }()
@ -136,7 +151,7 @@ func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, body io.Rea
} }
dec := json.NewDecoder(resp.Body) dec := json.NewDecoder(resp.Body)
fanOutResp := make([]PutObjectFanOutResponse, 0, len(fanOutReq)) fanOutResp := make([]PutObjectFanOutResponse, 0, len(fanOutReq.Entries))
for dec.More() { for dec.More() {
var m PutObjectFanOutResponse var m PutObjectFanOutResponse
if err = dec.Decode(&m); err != nil { if err = dec.Decode(&m); err != nil {

View file

@ -193,7 +193,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
} }
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
var trailer = make(http.Header, 1) trailer := make(http.Header, 1)
if withChecksum { if withChecksum {
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
@ -203,7 +203,8 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
} }
// Proceed to upload the part. // Proceed to upload the part.
p := uploadPartParams{bucketName: bucketName, p := uploadPartParams{
bucketName: bucketName,
objectName: objectName, objectName: objectName,
uploadID: uploadID, uploadID: uploadID,
reader: sectionReader, reader: sectionReader,
@ -244,7 +245,6 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
return UploadInfo{}, ctx.Err() return UploadInfo{}, ctx.Err()
case uploadRes := <-uploadedPartsCh: case uploadRes := <-uploadedPartsCh:
if uploadRes.Error != nil { if uploadRes.Error != nil {
return UploadInfo{}, uploadRes.Error return UploadInfo{}, uploadRes.Error
} }
@ -452,7 +452,8 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel. // putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel.
// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer. // This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer.
func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string, func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string,
reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { reader io.Reader, opts PutObjectOptions,
) (info UploadInfo, err error) {
// Input validation. // Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil { if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err return UploadInfo{}, err
@ -741,6 +742,17 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
// Set headers. // Set headers.
customHeader := opts.Header() customHeader := opts.Header()
// Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks.
addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure)
if addCrc {
// If user has added checksums, don't add them ourselves.
for k := range opts.UserMetadata {
if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
addCrc = false
}
}
}
// Populate request metadata. // Populate request metadata.
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
bucketName: bucketName, bucketName: bucketName,
@ -751,6 +763,7 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
contentMD5Base64: md5Base64, contentMD5Base64: md5Base64,
contentSHA256Hex: sha256Hex, contentSHA256Hex: sha256Hex,
streamSha256: !opts.DisableContentSha256, streamSha256: !opts.DisableContentSha256,
addCrc: addCrc,
} }
if opts.Internal.SourceVersionID != "" { if opts.Internal.SourceVersionID != "" {
if opts.Internal.SourceVersionID != nullVersionID { if opts.Internal.SourceVersionID != nullVersionID {

View file

@ -124,7 +124,7 @@ type Options struct {
// Global constants. // Global constants.
const ( const (
libraryName = "minio-go" libraryName = "minio-go"
libraryVersion = "v7.0.53" libraryVersion = "v7.0.55"
) )
// User Agent should always following the below style. // User Agent should always following the below style.

210
vendor/github.com/minio/minio-go/v7/checksum.go generated vendored Normal file
View file

@ -0,0 +1,210 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2023 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"hash"
"hash/crc32"
"io"
"math/bits"
)
// ChecksumType contains information about the checksum type.
type ChecksumType uint32
const (
// ChecksumSHA256 indicates a SHA256 checksum.
ChecksumSHA256 ChecksumType = 1 << iota
// ChecksumSHA1 indicates a SHA-1 checksum.
ChecksumSHA1
// ChecksumCRC32 indicates a CRC32 checksum with IEEE table.
ChecksumCRC32
// ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table.
ChecksumCRC32C
// Keep after all valid checksums
checksumLast
// checksumMask is a mask for valid checksum types.
checksumMask = checksumLast - 1
// ChecksumNone indicates no checksum.
ChecksumNone ChecksumType = 0
amzChecksumAlgo = "x-amz-checksum-algorithm"
amzChecksumCRC32 = "x-amz-checksum-crc32"
amzChecksumCRC32C = "x-amz-checksum-crc32c"
amzChecksumSHA1 = "x-amz-checksum-sha1"
amzChecksumSHA256 = "x-amz-checksum-sha256"
)
// Is returns if c is all of t.
func (c ChecksumType) Is(t ChecksumType) bool {
return c&t == t
}
// Key returns the header key.
// returns empty string if invalid or none.
func (c ChecksumType) Key() string {
switch c & checksumMask {
case ChecksumCRC32:
return amzChecksumCRC32
case ChecksumCRC32C:
return amzChecksumCRC32C
case ChecksumSHA1:
return amzChecksumSHA1
case ChecksumSHA256:
return amzChecksumSHA256
}
return ""
}
// RawByteLen returns the size of the un-encoded checksum.
func (c ChecksumType) RawByteLen() int {
switch c & checksumMask {
case ChecksumCRC32, ChecksumCRC32C:
return 4
case ChecksumSHA1:
return sha1.Size
case ChecksumSHA256:
return sha256.Size
}
return 0
}
// Hasher returns a hasher corresponding to the checksum type.
// Returns nil if no checksum.
func (c ChecksumType) Hasher() hash.Hash {
switch c & checksumMask {
case ChecksumCRC32:
return crc32.NewIEEE()
case ChecksumCRC32C:
return crc32.New(crc32.MakeTable(crc32.Castagnoli))
case ChecksumSHA1:
return sha1.New()
case ChecksumSHA256:
return sha256.New()
}
return nil
}
// IsSet returns whether the type is valid and known.
func (c ChecksumType) IsSet() bool {
return bits.OnesCount32(uint32(c)) == 1
}
// String returns the type as a string.
// CRC32, CRC32C, SHA1, and SHA256 for valid values.
// Empty string for unset and "<invalid>" if not valid.
func (c ChecksumType) String() string {
switch c & checksumMask {
case ChecksumCRC32:
return "CRC32"
case ChecksumCRC32C:
return "CRC32C"
case ChecksumSHA1:
return "SHA1"
case ChecksumSHA256:
return "SHA256"
case ChecksumNone:
return ""
}
return "<invalid>"
}
// ChecksumReader reads all of r and returns a checksum of type c.
// Returns any error that may have occurred while reading.
func (c ChecksumType) ChecksumReader(r io.Reader) (Checksum, error) {
h := c.Hasher()
if h == nil {
return Checksum{}, nil
}
_, err := io.Copy(h, r)
if err != nil {
return Checksum{}, err
}
return NewChecksum(c, h.Sum(nil)), nil
}
// ChecksumBytes returns a checksum of the content b with type c.
func (c ChecksumType) ChecksumBytes(b []byte) Checksum {
h := c.Hasher()
if h == nil {
return Checksum{}
}
n, err := h.Write(b)
if err != nil || n != len(b) {
// Shouldn't happen with these checksummers.
return Checksum{}
}
return NewChecksum(c, h.Sum(nil))
}
// Checksum is a type and encoded value.
type Checksum struct {
Type ChecksumType
r []byte
}
// NewChecksum sets the checksum to the value of b,
// which is the raw hash output.
// If the length of c does not match t.RawByteLen,
// a checksum with ChecksumNone is returned.
func NewChecksum(t ChecksumType, b []byte) Checksum {
if t.IsSet() && len(b) == t.RawByteLen() {
return Checksum{Type: t, r: b}
}
return Checksum{}
}
// NewChecksumString sets the checksum to the value of s,
// which is the base 64 encoded raw hash output.
// If the length of c does not match t.RawByteLen, it is not added.
func NewChecksumString(t ChecksumType, s string) Checksum {
b, _ := base64.StdEncoding.DecodeString(s)
if t.IsSet() && len(b) == t.RawByteLen() {
return Checksum{Type: t, r: b}
}
return Checksum{}
}
// IsSet returns whether the checksum is valid and known.
func (c Checksum) IsSet() bool {
return c.Type.IsSet() && len(c.r) == c.Type.RawByteLen()
}
// Encoded returns the encoded value.
// Returns the empty string if not set or valid.
func (c Checksum) Encoded() string {
if !c.IsSet() {
return ""
}
return base64.StdEncoding.EncodeToString(c.r)
}
// Raw returns the raw checksum value if set.
func (c Checksum) Raw() []byte {
if !c.IsSet() {
return nil
}
return c.r
}

View file

@ -2312,7 +2312,7 @@ func testPutMultipartObjectWithChecksums() {
cmpChecksum := func(got, want string) { cmpChecksum := func(got, want string) {
if want != got { if want != got {
//logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) // logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
fmt.Printf("want %s, got %s\n", want, got) fmt.Printf("want %s, got %s\n", want, got)
return return
} }
@ -2387,6 +2387,369 @@ func testPutMultipartObjectWithChecksums() {
successLogger(testName, function, args, startTime).Info() successLogger(testName, function, args, startTime).Info()
} }
// Test PutObject with trailing checksums.
func testTrailingChecksums() {
// initialize logging params
startTime := time.Now()
testName := getFuncName()
function := "PutObject(bucketName, objectName, reader,size, opts)"
args := map[string]interface{}{
"bucketName": "",
"objectName": "",
"opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
}
if !isFullMode() {
ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
return
}
// Instantiate new minio client object.
c, err := minio.New(os.Getenv(serverEndpoint),
&minio.Options{
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
Secure: mustParseBool(os.Getenv(enableHTTPS)),
TrailingHeaders: true,
})
if err != nil {
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
return
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
if err != nil {
logError(testName, function, args, startTime, "", "Make bucket failed", err)
return
}
hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string {
r := bytes.NewReader(b)
tmp := make([]byte, partSize)
parts := 0
var all []byte
for {
n, err := io.ReadFull(r, tmp)
if err != nil && err != io.ErrUnexpectedEOF {
logError(testName, function, args, startTime, "", "Calc crc failed", err)
}
if n == 0 {
break
}
parts++
hasher.Reset()
hasher.Write(tmp[:n])
all = append(all, hasher.Sum(nil)...)
if err != nil {
break
}
}
hasher.Reset()
hasher.Write(all)
return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts)
}
defer cleanupBucket(bucketName, c)
tests := []struct {
header string
hasher hash.Hash
// Checksum values
ChecksumCRC32 string
ChecksumCRC32C string
ChecksumSHA1 string
ChecksumSHA256 string
PO minio.PutObjectOptions
}{
// Currently there is no way to override the checksum type.
{header: "x-amz-checksum-crc32c",
hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
ChecksumCRC32C: "set",
PO: minio.PutObjectOptions{
DisableContentSha256: true,
DisableMultipart: false,
UserMetadata: nil,
PartSize: 5 << 20,
},
},
{header: "x-amz-checksum-crc32c",
hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
ChecksumCRC32C: "set",
PO: minio.PutObjectOptions{
DisableContentSha256: true,
DisableMultipart: false,
UserMetadata: nil,
PartSize: 6_645_654, // Rather arbitrary size
},
},
{header: "x-amz-checksum-crc32c",
hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
ChecksumCRC32C: "set",
PO: minio.PutObjectOptions{
DisableContentSha256: false,
DisableMultipart: false,
UserMetadata: nil,
PartSize: 5 << 20,
},
},
{header: "x-amz-checksum-crc32c",
hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
ChecksumCRC32C: "set",
PO: minio.PutObjectOptions{
DisableContentSha256: false,
DisableMultipart: false,
UserMetadata: nil,
PartSize: 6_645_654, // Rather arbitrary size
},
},
}
for _, test := range tests {
bufSize := dataFileMap["datafile-11-MB"]
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
cmpChecksum := func(got, want string) {
if want != got {
logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %q, got %q", want, got))
return
}
}
reader := getDataReader("datafile-11-MB")
b, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "Read failed", err)
return
}
reader.Close()
h := test.hasher
h.Reset()
test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher)
// Set correct CRC.
c.TraceOn(os.Stdout)
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO)
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
c.TraceOff()
cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C)
// Read the data back
gopts := minio.GetObjectOptions{Checksum: true}
gopts.PartNumber = 2
// We cannot use StatObject, since it ignores partnumber.
r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
if err != nil {
logError(testName, function, args, startTime, "", "GetObject failed", err)
return
}
io.Copy(io.Discard, r)
st, err := r.Stat()
if err != nil {
logError(testName, function, args, startTime, "", "Stat failed", err)
return
}
// Test part 2 checksum...
h.Reset()
p2 := b[test.PO.PartSize:]
if len(p2) > int(test.PO.PartSize) {
p2 = p2[:test.PO.PartSize]
}
h.Write(p2)
got := base64.StdEncoding.EncodeToString(h.Sum(nil))
if test.ChecksumSHA256 != "" {
cmpChecksum(st.ChecksumSHA256, got)
}
if test.ChecksumSHA1 != "" {
cmpChecksum(st.ChecksumSHA1, got)
}
if test.ChecksumCRC32 != "" {
cmpChecksum(st.ChecksumCRC32, got)
}
if test.ChecksumCRC32C != "" {
cmpChecksum(st.ChecksumCRC32C, got)
}
delete(args, "metadata")
}
}
// Test PutObject with custom checksums.
func testPutObjectWithAutomaticChecksums() {
// initialize logging params
startTime := time.Now()
testName := getFuncName()
function := "PutObject(bucketName, objectName, reader,size, opts)"
args := map[string]interface{}{
"bucketName": "",
"objectName": "",
"opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
}
if !isFullMode() {
ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
return
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Instantiate new minio client object.
c, err := minio.New(os.Getenv(serverEndpoint),
&minio.Options{
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
Secure: mustParseBool(os.Getenv(enableHTTPS)),
TrailingHeaders: true,
})
if err != nil {
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
return
}
// Set user agent.
c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
if err != nil {
logError(testName, function, args, startTime, "", "Make bucket failed", err)
return
}
defer cleanupBucket(bucketName, c)
tests := []struct {
header string
hasher hash.Hash
// Checksum values
ChecksumCRC32 string
ChecksumCRC32C string
ChecksumSHA1 string
ChecksumSHA256 string
}{
// Built-in will only add crc32c, when no MD5 nor SHA256.
{header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))},
}
// Enable tracing, write to stderr.
c.TraceOn(os.Stderr)
defer c.TraceOff()
for i, test := range tests {
bufSize := dataFileMap["datafile-10-kB"]
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
cmpChecksum := func(got, want string) {
if want != got {
logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
return
}
}
meta := map[string]string{}
reader := getDataReader("datafile-10-kB")
b, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "Read failed", err)
return
}
h := test.hasher
h.Reset()
h.Write(b)
meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
args["metadata"] = meta
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
DisableMultipart: true,
UserMetadata: nil,
DisableContentSha256: true,
SendContentMd5: false,
})
if err == nil {
if i == 0 && resp.ChecksumCRC32C == "" {
ignoredLog(testName, function, args, startTime, "Checksums does not appear to be supported by backend").Info()
return
}
} else {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
// Usually this will be the same as above, since we skip automatic checksum when SHA256 content is sent.
// When/if we add a checksum control to PutObjectOptions this will make more sense.
resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
DisableMultipart: true,
UserMetadata: nil,
DisableContentSha256: false,
SendContentMd5: false,
})
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
// The checksum will not be enabled on HTTP, since it uses SHA256 blocks.
if mustParseBool(os.Getenv(enableHTTPS)) {
cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
}
// Set SHA256 header manually
sh256 := sha256.Sum256(b)
meta = map[string]string{"x-amz-checksum-sha256": base64.StdEncoding.EncodeToString(sh256[:])}
args["metadata"] = meta
resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
DisableMultipart: true,
UserMetadata: meta,
DisableContentSha256: true,
SendContentMd5: false,
})
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
delete(args, "metadata")
}
successLogger(testName, function, args, startTime).Info()
}
// Test PutObject using a large data to trigger multipart readat // Test PutObject using a large data to trigger multipart readat
func testPutObjectWithMetadata() { func testPutObjectWithMetadata() {
// initialize logging params // initialize logging params
@ -12576,6 +12939,8 @@ func main() {
testRemoveObjectWithVersioning() testRemoveObjectWithVersioning()
testRemoveObjectsWithVersioning() testRemoveObjectsWithVersioning()
testObjectTaggingWithVersioning() testObjectTaggingWithVersioning()
testTrailingChecksums()
testPutObjectWithAutomaticChecksums()
// SSE-C tests will only work over TLS connection. // SSE-C tests will only work over TLS connection.
if tls { if tls {

View file

@ -289,7 +289,7 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati
req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
} }
req.TransferEncoding = []string{"aws-chunked"} req.Header.Set("Content-Encoding", "aws-chunked")
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10)) req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10))
} }

View file

@ -1,6 +1,6 @@
/* /*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage * MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc. * Copyright 2015-2023 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -20,8 +20,11 @@ package minio
import ( import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"net/http"
"strings" "strings"
"time" "time"
"github.com/minio/minio-go/v7/pkg/encrypt"
) )
// expirationDateFormat date format for expiration key in json policy. // expirationDateFormat date format for expiration key in json policy.
@ -258,6 +261,26 @@ func (p *PostPolicy) SetUserMetadata(key string, value string) error {
return nil return nil
} }
// SetChecksum sets the checksum of the request.
func (p *PostPolicy) SetChecksum(c Checksum) {
if c.IsSet() {
p.formData[amzChecksumAlgo] = c.Type.String()
p.formData[c.Type.Key()] = c.Encoded()
}
}
// SetEncryption - sets encryption headers for POST API
func (p *PostPolicy) SetEncryption(sse encrypt.ServerSide) {
if sse == nil {
return
}
h := http.Header{}
sse.Marshal(h)
for k, v := range h {
p.formData[k] = v[0]
}
}
// SetUserData - Set user data as a key/value couple. // SetUserData - Set user data as a key/value couple.
// Can be retrieved through a HEAD request or an event. // Can be retrieved through a HEAD request or an event.
func (p *PostPolicy) SetUserData(key string, value string) error { func (p *PostPolicy) SetUserData(key string, value string) error {

View file

@ -23,6 +23,11 @@ import (
"github.com/klauspost/cpuid/v2" "github.com/klauspost/cpuid/v2"
) )
var (
hasIntelSha = runtime.GOARCH == "amd64" && cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, cpuid.SSE4)
hasAvx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, cpuid.AVX512VL)
)
func hasArmSha2() bool { func hasArmSha2() bool {
if cpuid.CPU.Has(cpuid.SHA2) { if cpuid.CPU.Has(cpuid.SHA2) {
return true return true
@ -42,5 +47,4 @@ func hasArmSha2() bool {
return false return false
} }
return bytes.Contains(cpuInfo, []byte(sha256Feature)) return bytes.Contains(cpuInfo, []byte(sha256Feature))
} }

View file

@ -19,10 +19,8 @@ package sha256
import ( import (
"crypto/sha256" "crypto/sha256"
"encoding/binary" "encoding/binary"
"errors"
"hash" "hash"
"runtime"
"github.com/klauspost/cpuid/v2"
) )
// Size - The size of a SHA256 checksum in bytes. // Size - The size of a SHA256 checksum in bytes.
@ -68,42 +66,34 @@ func (d *digest) Reset() {
type blockfuncType int type blockfuncType int
const ( const (
blockfuncGeneric blockfuncType = iota blockfuncStdlib blockfuncType = iota
blockfuncSha blockfuncType = iota blockfuncIntelSha
blockfuncArm blockfuncType = iota blockfuncArmSha2
blockfuncForceGeneric = -1
) )
var blockfunc blockfuncType var blockfunc blockfuncType
func init() { func init() {
blockfunc = blockfuncGeneric
switch { switch {
case hasSHAExtensions(): case hasIntelSha:
blockfunc = blockfuncSha blockfunc = blockfuncIntelSha
case hasArmSha2(): case hasArmSha2():
blockfunc = blockfuncArm blockfunc = blockfuncArmSha2
default:
blockfunc = blockfuncGeneric
} }
} }
var avx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, cpuid.AVX512VL)
// hasSHAExtensions return whether the cpu supports SHA extensions.
func hasSHAExtensions() bool {
return cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, cpuid.SSE4) && runtime.GOARCH == "amd64"
}
// New returns a new hash.Hash computing the SHA256 checksum. // New returns a new hash.Hash computing the SHA256 checksum.
func New() hash.Hash { func New() hash.Hash {
if blockfunc != blockfuncGeneric { if blockfunc == blockfuncStdlib {
d := new(digest) // Fallback to the standard golang implementation
d.Reset() // if no features were found.
return d return sha256.New()
} }
// Fallback to the standard golang implementation
// if no features were found. d := new(digest)
return sha256.New() d.Reset()
return d
} }
// Sum256 - single caller sha256 helper // Sum256 - single caller sha256 helper
@ -272,11 +262,11 @@ func (d *digest) checkSum() (digest [Size]byte) {
} }
func block(dig *digest, p []byte) { func block(dig *digest, p []byte) {
if blockfunc == blockfuncSha { if blockfunc == blockfuncIntelSha {
blockShaGo(dig, p) blockIntelShaGo(dig, p)
} else if blockfunc == blockfuncArm { } else if blockfunc == blockfuncArmSha2 {
blockArmGo(dig, p) blockArmSha2Go(dig, p)
} else if blockfunc == blockfuncGeneric { } else {
blockGeneric(dig, p) blockGeneric(dig, p)
} }
} }
@ -397,3 +387,82 @@ var _K = []uint32{
0xbef9a3f7, 0xbef9a3f7,
0xc67178f2, 0xc67178f2,
} }
const (
magic256 = "sha\x03"
marshaledSize = len(magic256) + 8*4 + chunk + 8
)
func (d *digest) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaledSize)
b = append(b, magic256...)
b = appendUint32(b, d.h[0])
b = appendUint32(b, d.h[1])
b = appendUint32(b, d.h[2])
b = appendUint32(b, d.h[3])
b = appendUint32(b, d.h[4])
b = appendUint32(b, d.h[5])
b = appendUint32(b, d.h[6])
b = appendUint32(b, d.h[7])
b = append(b, d.x[:d.nx]...)
b = b[:len(b)+len(d.x)-d.nx] // already zero
b = appendUint64(b, d.len)
return b, nil
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic256) || string(b[:len(magic256)]) != magic256 {
return errors.New("crypto/sha256: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("crypto/sha256: invalid hash state size")
}
b = b[len(magic256):]
b, d.h[0] = consumeUint32(b)
b, d.h[1] = consumeUint32(b)
b, d.h[2] = consumeUint32(b)
b, d.h[3] = consumeUint32(b)
b, d.h[4] = consumeUint32(b)
b, d.h[5] = consumeUint32(b)
b, d.h[6] = consumeUint32(b)
b, d.h[7] = consumeUint32(b)
b = b[copy(d.x[:], b):]
b, d.len = consumeUint64(b)
d.nx = int(d.len % chunk)
return nil
}
func appendUint32(b []byte, v uint32) []byte {
return append(b,
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
func appendUint64(b []byte, v uint64) []byte {
return append(b,
byte(v>>56),
byte(v>>48),
byte(v>>40),
byte(v>>32),
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
func consumeUint64(b []byte) ([]byte, uint64) {
_ = b[7]
x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
return b[8:], x
}
func consumeUint32(b []byte) ([]byte, uint32) {
_ = b[3]
x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
return b[4:], x
}

View file

@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc //go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
/* /*
* Minio Cloud Storage, (C) 2017 Minio, Inc. * Minio Cloud Storage, (C) 2017 Minio, Inc.

View file

@ -1,4 +1,4 @@
//+build !noasm,!appengine //+build !noasm,!appengine,gc
TEXT ·sha256X16Avx512(SB), 7, $0 TEXT ·sha256X16Avx512(SB), 7, $0
MOVQ digests+0(FP), DI MOVQ digests+0(FP), DI

View file

@ -1,6 +0,0 @@
//+build !noasm,!appengine,gc
package sha256
//go:noescape
func blockSha(h *[8]uint32, message []uint8)

View file

@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc //go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
/* /*
* Minio Cloud Storage, (C) 2016 Minio, Inc. * Minio Cloud Storage, (C) 2016 Minio, Inc.
@ -18,10 +19,13 @@
package sha256 package sha256
func blockArmGo(dig *digest, p []byte) { func blockArmSha2Go(dig *digest, p []byte) {
panic("blockArmGo called unexpectedly") panic("blockArmSha2Go called unexpectedly")
} }
func blockShaGo(dig *digest, p []byte) { //go:noescape
blockSha(&dig.h, p) func blockIntelSha(h *[8]uint32, message []uint8)
func blockIntelShaGo(dig *digest, p []byte) {
blockIntelSha(&dig.h, p)
} }

View file

@ -1,4 +1,4 @@
//+build !noasm,!appengine //+build !noasm,!appengine,gc
// SHA intrinsic version of SHA256 // SHA intrinsic version of SHA256
@ -106,7 +106,7 @@ GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16
// X13 saved hash state // CDGH // X13 saved hash state // CDGH
// X15 data shuffle mask (constant) // X15 data shuffle mask (constant)
TEXT ·blockSha(SB), NOSPLIT, $0-32 TEXT ·blockIntelSha(SB), NOSPLIT, $0-32
MOVQ h+0(FP), DX MOVQ h+0(FP), DX
MOVQ message_base+8(FP), SI MOVQ message_base+8(FP), SI
MOVQ message_len+16(FP), DI MOVQ message_len+16(FP), DI

View file

@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc //go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
/* /*
* Minio Cloud Storage, (C) 2016 Minio, Inc. * Minio Cloud Storage, (C) 2016 Minio, Inc.
@ -18,18 +19,18 @@
package sha256 package sha256
func blockShaGo(dig *digest, p []byte) { func blockIntelShaGo(dig *digest, p []byte) {
panic("blockShaGoc called unexpectedly") panic("blockIntelShaGo called unexpectedly")
} }
//go:noescape //go:noescape
func blockArm(h []uint32, message []uint8) func blockArmSha2(h []uint32, message []uint8)
func blockArmGo(dig *digest, p []byte) { func blockArmSha2Go(dig *digest, p []byte) {
h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]}
blockArm(h[:], p[:]) blockArmSha2(h[:], p[:])
dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4],
h[5], h[6], h[7] h[5], h[6], h[7]

View file

@ -1,4 +1,4 @@
//+build !noasm,!appengine //+build !noasm,!appengine,gc
// ARM64 version of SHA256 // ARM64 version of SHA256
@ -25,7 +25,7 @@
// their Plan9 equivalents // their Plan9 equivalents
// //
TEXT ·blockArm(SB), 7, $0 TEXT ·blockArmSha2(SB), 7, $0
MOVD h+0(FP), R0 MOVD h+0(FP), R0
MOVD message+24(FP), R1 MOVD message+24(FP), R1
MOVD message_len+32(FP), R2 // length of message MOVD message_len+32(FP), R2 // length of message

View file

@ -1,4 +1,5 @@
//+build appengine noasm !amd64,!arm64 !gc //go:build appengine || noasm || (!amd64 && !arm64) || !gc
// +build appengine noasm !amd64,!arm64 !gc
/* /*
* Minio Cloud Storage, (C) 2019 Minio, Inc. * Minio Cloud Storage, (C) 2019 Minio, Inc.
@ -18,11 +19,11 @@
package sha256 package sha256
func blockShaGo(dig *digest, p []byte) { func blockIntelShaGo(dig *digest, p []byte) {
panic("blockShaGo called unexpectedly") panic("blockIntelShaGo called unexpectedly")
} }
func blockArmGo(dig *digest, p []byte) { func blockArmSha2Go(dig *digest, p []byte) {
panic("blockArmGo called unexpectedly") panic("blockArmSha2Go called unexpectedly")
} }

5
vendor/github.com/rs/xid/.golangci.yml generated vendored Normal file
View file

@ -0,0 +1,5 @@
run:
tests: false
output:
sort-results: true

3
vendor/github.com/rs/xid/README.md generated vendored
View file

@ -70,6 +70,9 @@ References:
- Ruby port by [Valar](https://github.com/valarpirai/): https://github.com/valarpirai/ruby_xid - Ruby port by [Valar](https://github.com/valarpirai/): https://github.com/valarpirai/ruby_xid
- Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid - Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid
- Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid - Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid
- PostgreSQL port by [Rasmus Holm](https://github.com/crholm): https://github.com/modfin/pg-xid
- Swift port by [Uditha Atukorala](https://github.com/uditha-atukorala): https://github.com/uditha-atukorala/swift-xid
- C++ port by [Uditha Atukorala](https://github.com/uditha-atukorala): https://github.com/uditha-atukorala/libxid
## Install ## Install

41
vendor/github.com/rs/xid/id.go generated vendored
View file

@ -43,7 +43,7 @@ package xid
import ( import (
"bytes" "bytes"
"crypto/md5" "crypto/sha256"
"crypto/rand" "crypto/rand"
"database/sql/driver" "database/sql/driver"
"encoding/binary" "encoding/binary"
@ -72,13 +72,11 @@ const (
) )
var ( var (
// objectIDCounter is atomically incremented when generating a new ObjectId // objectIDCounter is atomically incremented when generating a new ObjectId. It's
// using NewObjectId() function. It's used as a counter part of an id. // used as the counter part of an id. This id is initialized with a random value.
// This id is initialized with a random value.
objectIDCounter = randInt() objectIDCounter = randInt()
// machineId stores machine id generated once and used in subsequent calls // machineID is generated once and used in subsequent calls to the New* functions.
// to NewObjectId function.
machineID = readMachineID() machineID = readMachineID()
// pid stores the current process id // pid stores the current process id
@ -107,9 +105,9 @@ func init() {
} }
} }
// readMachineId generates machine id and puts it into the machineId global // readMachineID generates a machine ID, derived from a platform-specific machine ID
// variable. If this function fails to get the hostname, it will cause // value, or else the machine's hostname, or else a randomly-generated number.
// a runtime error. // It panics if all of these methods fail.
func readMachineID() []byte { func readMachineID() []byte {
id := make([]byte, 3) id := make([]byte, 3)
hid, err := readPlatformMachineID() hid, err := readPlatformMachineID()
@ -117,7 +115,7 @@ func readMachineID() []byte {
hid, err = os.Hostname() hid, err = os.Hostname()
} }
if err == nil && len(hid) != 0 { if err == nil && len(hid) != 0 {
hw := md5.New() hw := sha256.New()
hw.Write([]byte(hid)) hw.Write([]byte(hid))
copy(id, hw.Sum(nil)) copy(id, hw.Sum(nil))
} else { } else {
@ -148,7 +146,7 @@ func NewWithTime(t time.Time) ID {
var id ID var id ID
// Timestamp, 4 bytes, big endian // Timestamp, 4 bytes, big endian
binary.BigEndian.PutUint32(id[:], uint32(t.Unix())) binary.BigEndian.PutUint32(id[:], uint32(t.Unix()))
// Machine, first 3 bytes of md5(hostname) // Machine ID, 3 bytes
id[4] = machineID[0] id[4] = machineID[0]
id[5] = machineID[1] id[5] = machineID[1]
id[6] = machineID[2] id[6] = machineID[2]
@ -239,6 +237,7 @@ func (id *ID) UnmarshalText(text []byte) error {
} }
} }
if !decode(id, text) { if !decode(id, text) {
*id = nilID
return ErrInvalidID return ErrInvalidID
} }
return nil return nil
@ -264,6 +263,10 @@ func decode(id *ID, src []byte) bool {
_ = id[11] _ = id[11]
id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4 id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4
// check the last byte
if encoding[(id[11]<<4)&0x1F] != src[19] {
return false
}
id[10] = dec[src[16]]<<3 | dec[src[17]]>>2 id[10] = dec[src[16]]<<3 | dec[src[17]]>>2
id[9] = dec[src[14]]<<5 | dec[src[15]] id[9] = dec[src[14]]<<5 | dec[src[15]]
id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3 id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3
@ -275,16 +278,7 @@ func decode(id *ID, src []byte) bool {
id[2] = dec[src[3]]<<4 | dec[src[4]]>>1 id[2] = dec[src[3]]<<4 | dec[src[4]]>>1
id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4 id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4
id[0] = dec[src[0]]<<3 | dec[src[1]]>>2 id[0] = dec[src[0]]<<3 | dec[src[1]]>>2
return true
// Validate that there are no discarer bits (padding) in src that would
// cause the string-encoded id not to equal src.
var check [4]byte
check[3] = encoding[(id[11]<<4)&0x1F]
check[2] = encoding[(id[11]>>1)&0x1F]
check[1] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F]
check[0] = encoding[id[10]>>3]
return bytes.Equal([]byte(src[16:20]), check[:])
} }
// Time returns the timestamp part of the id. // Time returns the timestamp part of the id.
@ -344,6 +338,11 @@ func (id ID) IsNil() bool {
return id == nilID return id == nilID
} }
// Alias of IsNil
func (id ID) IsZero() bool {
return id.IsNil()
}
// NilID returns a zero value for `xid.ID`. // NilID returns a zero value for `xid.ID`.
func NilID() ID { func NilID() ID {
return nilID return nilID

View file

@ -9,7 +9,7 @@ the last thing you want from your Logging library (again...).
This does not mean Logrus is dead. Logrus will continue to be maintained for This does not mean Logrus is dead. Logrus will continue to be maintained for
security, (backwards compatible) bug fixes, and performance (where we are security, (backwards compatible) bug fixes, and performance (where we are
limited by the interface). limited by the interface).
I believe Logrus' biggest contribution is to have played a part in today's I believe Logrus' biggest contribution is to have played a part in today's
widespread use of structured logging in Golang. There doesn't seem to be a widespread use of structured logging in Golang. There doesn't seem to be a
@ -43,7 +43,7 @@ plain text):
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk: or Splunk:
```json ```text
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the {"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
@ -99,7 +99,7 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr
``` ```
Note that this does add measurable overhead - the cost will depend on the version of Go, but is Note that this does add measurable overhead - the cost will depend on the version of Go, but is
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
environment via benchmarks: environment via benchmarks:
``` ```
go test -bench=.*CallerTracing go test -bench=.*CallerTracing
``` ```
@ -317,6 +317,8 @@ log.SetLevel(log.InfoLevel)
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that. environment if your application has that.
Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging).
#### Entries #### Entries
Besides the fields added with `WithField` or `WithFields` some fields are Besides the fields added with `WithField` or `WithFields` some fields are

12
vendor/modules.txt vendored
View file

@ -355,7 +355,7 @@ github.com/json-iterator/go
# github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
## explicit ## explicit
github.com/kballard/go-shellquote github.com/kballard/go-shellquote
# github.com/klauspost/compress v1.16.3 # github.com/klauspost/compress v1.16.5
## explicit; go 1.18 ## explicit; go 1.18
github.com/klauspost/compress/flate github.com/klauspost/compress/flate
github.com/klauspost/compress/gzip github.com/klauspost/compress/gzip
@ -384,7 +384,7 @@ github.com/miekg/dns
# github.com/minio/md5-simd v1.1.2 # github.com/minio/md5-simd v1.1.2
## explicit; go 1.14 ## explicit; go 1.14
github.com/minio/md5-simd github.com/minio/md5-simd
# github.com/minio/minio-go/v7 v7.0.53 # github.com/minio/minio-go/v7 v7.0.55
## explicit; go 1.17 ## explicit; go 1.17
github.com/minio/minio-go/v7 github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/credentials github.com/minio/minio-go/v7/pkg/credentials
@ -397,8 +397,8 @@ github.com/minio/minio-go/v7/pkg/set
github.com/minio/minio-go/v7/pkg/signer github.com/minio/minio-go/v7/pkg/signer
github.com/minio/minio-go/v7/pkg/sse github.com/minio/minio-go/v7/pkg/sse
github.com/minio/minio-go/v7/pkg/tags github.com/minio/minio-go/v7/pkg/tags
# github.com/minio/sha256-simd v1.0.0 # github.com/minio/sha256-simd v1.0.1
## explicit; go 1.13 ## explicit; go 1.17
github.com/minio/sha256-simd github.com/minio/sha256-simd
# github.com/mitchellh/mapstructure v1.5.0 # github.com/mitchellh/mapstructure v1.5.0
## explicit; go 1.14 ## explicit; go 1.14
@ -434,10 +434,10 @@ github.com/quasoft/memstore
# github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec # github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec
## explicit; go 1.12 ## explicit; go 1.12
github.com/remyoudompheng/bigfft github.com/remyoudompheng/bigfft
# github.com/rs/xid v1.4.0 # github.com/rs/xid v1.5.0
## explicit; go 1.12 ## explicit; go 1.12
github.com/rs/xid github.com/rs/xid
# github.com/sirupsen/logrus v1.9.0 # github.com/sirupsen/logrus v1.9.2
## explicit; go 1.13 ## explicit; go 1.13
github.com/sirupsen/logrus github.com/sirupsen/logrus
# github.com/spf13/afero v1.9.3 # github.com/spf13/afero v1.9.3