From c0bddd272f4e667c4f473f038aa3770b8c43d1cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 10:16:41 +0100 Subject: [PATCH] [chore]: Bump github.com/minio/minio-go/v7 from 7.0.62 to 7.0.63 (#2180) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 +- .../minio-go/v7/api-putobject-snowball.go | 10 + vendor/github.com/minio/minio-go/v7/api.go | 2 +- .../minio-go/v7/pkg/credentials/iam_aws.go | 8 +- .../v7/pkg/replication/replication.go | 194 ++++++++++++------ .../minio/minio-go/v7/pkg/s3utils/utils.go | 69 +++---- vendor/modules.txt | 2 +- 8 files changed, 183 insertions(+), 108 deletions(-) diff --git a/go.mod b/go.mod index 1d40ed668..d3aa0d1cd 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/jackc/pgx/v5 v5.4.3 github.com/microcosm-cc/bluemonday v1.0.25 github.com/miekg/dns v1.1.55 - github.com/minio/minio-go/v7 v7.0.62 + github.com/minio/minio-go/v7 v7.0.63 github.com/mitchellh/mapstructure v1.5.0 github.com/oklog/ulid v1.3.1 github.com/spf13/cobra v1.7.0 diff --git a/go.sum b/go.sum index ca238d3ac..22f3237b8 100644 --- a/go.sum +++ b/go.sum @@ -451,8 +451,8 @@ github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.62 h1:qNYsFZHEzl+NfH8UxW4jpmlKav1qUAgfY30YNRneVhc= -github.com/minio/minio-go/v7 v7.0.62/go.mod h1:Q6X7Qjb7WMhvG65qKf4gUgA5XaiSox74kR1uAEjxRS4= +github.com/minio/minio-go/v7 v7.0.63 h1:GbZ2oCvaUdgT5640WJOpyDhhDxvknAJU2/T3yurwcbQ= +github.com/minio/minio-go/v7 v7.0.63/go.mod h1:Q6X7Qjb7WMhvG65qKf4gUgA5XaiSox74kR1uAEjxRS4= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go index 849471e33..983ed6744 100644 --- a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go @@ -48,6 +48,10 @@ type SnowballOptions struct { // Compression will typically reduce memory and network usage, // Compression can safely be enabled with MinIO hosts. Compress bool + + // SkipErrs if enabled will skip any errors while reading the + // object content while creating the snowball archive + SkipErrs bool } // SnowballObject contains information about a single object to be added to the snowball. @@ -184,10 +188,16 @@ objectLoop: n, err := io.Copy(t, obj.Content) if err != nil { closeObj() + if opts.SkipErrs { + continue + } return err } if n != obj.Size { closeObj() + if opts.SkipErrs { + continue + } return io.ErrUnexpectedEOF } closeObj() diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 41cf9a695..e8e324a9c 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -127,7 +127,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.62" + libraryVersion = "v7.0.63" ) // User Agent should always following the below style. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go index 8dd621004..0c9536deb 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -291,7 +291,13 @@ func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html token, err := fetchIMDSToken(client, endpoint) if err != nil { - return ec2RoleCredRespBody{}, err + // Return only errors for valid situations, if the IMDSv2 is not enabled + // we will not be able to get the token, in such a situation we have + // to rely on IMDSv1 behavior as a fallback, this check ensures that. + // Refer https://github.com/minio/minio-go/issues/1866 + if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { + return ec2RoleCredRespBody{}, err + } } // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index 6776a0ba8..0abbf6efc 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -689,10 +689,19 @@ func (e ExistingObjectReplication) Validate() error { // TargetMetrics represents inline replication metrics // such as pending, failed and completed bytes in total for a bucket remote target type TargetMetrics struct { - // Pending size in bytes - PendingSize uint64 `json:"pendingReplicationSize,omitempty"` + // Completed count + ReplicatedCount uint64 `json:"replicationCount,omitempty"` // Completed size in bytes ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` + // Bandwidth limit in bytes/sec for this target + BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"` + // Current bandwidth used in bytes/sec for this target + CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"` + // errors seen in replication in last minute, hour and total + Failed TimedErrStats `json:"failed,omitempty"` + // Deprecated fields + // Pending size in bytes + PendingSize uint64 `json:"pendingReplicationSize,omitempty"` // Total Replica size in bytes ReplicaSize uint64 `json:"replicaSize,omitempty"` // Failed size in bytes @@ -701,37 +710,62 @@ type TargetMetrics struct { PendingCount uint64 `json:"pendingReplicationCount,omitempty"` // Total number of failed operations including metadata updates FailedCount uint64 `json:"failedReplicationCount,omitempty"` - // Bandwidth limit in bytes/sec for this target - BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"` - // Current bandwidth used in bytes/sec for this target - CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"` - // Completed count - ReplicatedCount uint64 `json:"replicationCount,omitempty"` - // transfer rate for large uploads - XferRateLrg XferStats `json:"largeTransferRate"` - // transfer rate for small uploads - XferRateSml XferStats `json:"smallTransferRate"` } // Metrics represents inline replication metrics for a bucket. type Metrics struct { Stats map[string]TargetMetrics - // Total Pending size in bytes across targets - PendingSize uint64 `json:"pendingReplicationSize,omitempty"` // Completed size in bytes across targets ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` // Total Replica size in bytes across targets ReplicaSize uint64 `json:"replicaSize,omitempty"` + // Total Replica counts + ReplicaCount int64 `json:"replicaCount,omitempty"` + // Total Replicated count + ReplicatedCount int64 `json:"replicationCount,omitempty"` + // errors seen in replication in last minute, hour and total + Errors TimedErrStats `json:"failed,omitempty"` + // Total number of entries that are queued for replication + QStats InQueueMetric `json:"queued"` + // Deprecated fields + // Total Pending size in bytes across targets + PendingSize uint64 `json:"pendingReplicationSize,omitempty"` // Failed size in bytes across targets FailedSize uint64 `json:"failedReplicationSize,omitempty"` // Total number of pending operations including metadata updates across targets PendingCount uint64 `json:"pendingReplicationCount,omitempty"` // Total number of failed operations including metadata updates across targets FailedCount uint64 `json:"failedReplicationCount,omitempty"` - // Total Replica counts - ReplicaCount int64 `json:"replicaCount,omitempty"` - // Total Replicated count - ReplicatedCount int64 `json:"replicationCount,omitempty"` +} + +// RStat - has count and bytes for replication metrics +type RStat struct { + Count float64 `json:"count"` + Bytes int64 `json:"bytes"` +} + +// Add two RStat +func (r RStat) Add(r1 RStat) RStat { + return RStat{ + Count: r.Count + r1.Count, + Bytes: r.Bytes + r1.Bytes, + } +} + +// TimedErrStats holds error stats for a time period +type TimedErrStats struct { + LastMinute RStat `json:"lastMinute"` + LastHour RStat `json:"lastHour"` + Totals RStat `json:"totals"` +} + +// Add two TimedErrStats +func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats { + return TimedErrStats{ + LastMinute: te.LastMinute.Add(o.LastMinute), + LastHour: te.LastHour.Add(o.LastHour), + Totals: te.Totals.Add(o.Totals), + } } // ResyncTargetsInfo provides replication target information to resync replicated data. @@ -767,10 +801,30 @@ type XferStats struct { CurrRate float64 `json:"currRate"` } -// InQueueStats holds stats for objects in replication queue -type InQueueStats struct { - Count int32 `json:"count"` - Bytes int64 `json:"bytes"` +// Merge two XferStats +func (x *XferStats) Merge(x1 XferStats) { + x.AvgRate += x1.AvgRate + x.PeakRate += x1.PeakRate + x.CurrRate += x1.CurrRate +} + +// QStat holds count and bytes for objects in replication queue +type QStat struct { + Count float64 `json:"count"` + Bytes float64 `json:"bytes"` +} + +// Add 2 QStat entries +func (q *QStat) Add(q1 QStat) { + q.Count += q1.Count + q.Bytes += q1.Bytes +} + +// InQueueMetric holds stats for objects in replication queue +type InQueueMetric struct { + Curr QStat `json:"curr" msg:"cq"` + Avg QStat `json:"avg" msg:"aq"` + Max QStat `json:"peak" msg:"pq"` } // MetricName name of replication metric @@ -785,16 +839,34 @@ const ( Total MetricName = "Total" ) +// WorkerStat has stats on number of replication workers +type WorkerStat struct { + Curr int32 `json:"curr"` + Avg float32 `json:"avg"` + Max int32 `json:"max"` +} + +// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes +// and number of entries that failed replication after 3 retries +type ReplMRFStats struct { + LastFailedCount uint64 `json:"failedCount_last5min"` + // Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start. + TotalDroppedCount uint64 `json:"droppedCount_since_uptime"` + // Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start. + TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"` +} + // ReplQNodeStats holds stats for a node in replication queue type ReplQNodeStats struct { - NodeName string `json:"nodeName"` - Uptime int64 `json:"uptime"` - ActiveWorkers int32 `json:"activeWorkers"` + NodeName string `json:"nodeName"` + Uptime int64 `json:"uptime"` + Workers WorkerStat `json:"activeWorkers"` - XferStats map[MetricName]XferStats `json:"xferStats"` - TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"` + XferStats map[MetricName]XferStats `json:"transferSummary"` + TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"` - QStats map[MetricName]InQueueStats `json:"qStats"` + QStats InQueueMetric `json:"queueStats"` + MRFStats ReplMRFStats `json:"mrfStats"` } // ReplQueueStats holds stats for replication queue across nodes @@ -803,33 +875,54 @@ type ReplQueueStats struct { } // Workers returns number of workers across all nodes -func (q ReplQueueStats) Workers() int64 { - var workers int64 +func (q ReplQueueStats) Workers() (tot WorkerStat) { for _, node := range q.Nodes { - workers += int64(node.ActiveWorkers) + tot.Avg += node.Workers.Avg + tot.Curr += node.Workers.Curr + if tot.Max < node.Workers.Max { + tot.Max = node.Workers.Max + } } - return workers + if len(q.Nodes) > 0 { + tot.Avg /= float32(len(q.Nodes)) + tot.Curr /= int32(len(q.Nodes)) + } + return tot +} + +// qStatSummary returns cluster level stats for objects in replication queue +func (q ReplQueueStats) qStatSummary() InQueueMetric { + m := InQueueMetric{} + for _, v := range q.Nodes { + m.Avg.Add(v.QStats.Avg) + m.Curr.Add(v.QStats.Curr) + if m.Max.Count < v.QStats.Max.Count { + m.Max.Add(v.QStats.Max) + } + } + return m } // ReplQStats holds stats for objects in replication queue type ReplQStats struct { - Uptime int64 `json:"uptime"` - Workers int64 `json:"workers"` + Uptime int64 `json:"uptime"` + Workers WorkerStat `json:"workers"` XferStats map[MetricName]XferStats `json:"xferStats"` TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"` - QStats map[MetricName]InQueueStats `json:"qStats"` + QStats InQueueMetric `json:"qStats"` + MRFStats ReplMRFStats `json:"mrfStats"` } // QStats returns cluster level stats for objects in replication queue func (q ReplQueueStats) QStats() (r ReplQStats) { - r.QStats = make(map[MetricName]InQueueStats) + r.QStats = q.qStatSummary() r.XferStats = make(map[MetricName]XferStats) r.TgtXferStats = make(map[string]map[MetricName]XferStats) + r.Workers = q.Workers() for _, node := range q.Nodes { - r.Workers += int64(node.ActiveWorkers) for arn := range node.TgtXferStats { xmap, ok := node.TgtXferStats[arn] if !ok { @@ -859,39 +952,20 @@ func (q ReplQueueStats) QStats() (r ReplQStats) { st.PeakRate = math.Max(st.PeakRate, v.PeakRate) r.XferStats[k] = st } - for k, v := range node.QStats { - st, ok := r.QStats[k] - if !ok { - st = InQueueStats{} - } - st.Count += v.Count - st.Bytes += v.Bytes - r.QStats[k] = st - } + r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount + r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount + r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes r.Uptime += node.Uptime } if len(q.Nodes) > 0 { - for k := range r.XferStats { - st := r.XferStats[k] - st.AvgRate /= float64(len(q.Nodes)) - st.CurrRate /= float64(len(q.Nodes)) - r.XferStats[k] = st - } - for arn := range r.TgtXferStats { - for m, v := range r.TgtXferStats[arn] { - v.AvgRate /= float64(len(q.Nodes)) - v.CurrRate /= float64(len(q.Nodes)) - r.TgtXferStats[arn][m] = v - } - } r.Uptime /= int64(len(q.Nodes)) // average uptime } - return } // MetricsV2 represents replication metrics for a bucket. type MetricsV2 struct { + Uptime int64 `json:"uptime"` CurrentStats Metrics `json:"currStats"` QueueStats ReplQueueStats `json:"queueStats"` } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go index 51a04b06d..056e78a67 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go @@ -121,49 +121,54 @@ func GetRegionFromURL(endpointURL url.URL) string { if endpointURL.Host == "s3-external-1.amazonaws.com" { return "" } - if IsAmazonGovCloudEndpoint(endpointURL) { - return "us-gov-west-1" - } + // if elb's are used we cannot calculate which region it may be, just return empty. if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) { return "" } - parts := amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host) + + // We check for FIPS dualstack matching first to avoid the non-greedy + // regex for FIPS non-dualstack matching a dualstack URL + parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] } - if IsAmazonFIPSUSEastWestEndpoint(endpointURL) { - // We check for FIPS dualstack matching first to avoid the non-greedy - // regex for FIPS non-dualstack matching a dualstack URL - parts = amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } + + parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] } + + parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] } + parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] } + parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] } + parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] } + parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] } + return "" } @@ -186,45 +191,25 @@ func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { return false } return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || + endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" || IsAmazonFIPSGovCloudEndpoint(endpointURL)) } -// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. -// See https://aws.amazon.com/compliance/fips. +// IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud. func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { if endpointURL == sentinelURL { return false } - return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || - endpointURL.Host == "s3-fips.us-gov-west-1.amazonaws.com" || - endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" -} - -// IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint. -// See https://aws.amazon.com/compliance/fips. -func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - switch endpointURL.Host { - case "s3-fips.us-east-2.amazonaws.com": - case "s3-fips.dualstack.us-west-1.amazonaws.com": - case "s3-fips.dualstack.us-west-2.amazonaws.com": - case "s3-fips.dualstack.us-east-2.amazonaws.com": - case "s3-fips.dualstack.us-east-1.amazonaws.com": - case "s3-fips.us-west-1.amazonaws.com": - case "s3-fips.us-west-2.amazonaws.com": - case "s3-fips.us-east-1.amazonaws.com": - default: - return false - } - return true + return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Host, "us-gov-") } // IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. // See https://aws.amazon.com/compliance/fips. func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { - return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) + if endpointURL == sentinelURL { + return false + } + return strings.HasPrefix(endpointURL.Host, "s3-fips") && strings.HasSuffix(endpointURL.Host, ".amazonaws.com") } // IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint diff --git a/vendor/modules.txt b/vendor/modules.txt index 15202b7f6..6844c2102 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -390,7 +390,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.62 +# github.com/minio/minio-go/v7 v7.0.63 ## explicit; go 1.17 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/credentials