[chore]: Bump github.com/minio/minio-go/v7 from 7.0.58 to 7.0.59 (#1941)

Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.58 to 7.0.59.
- [Release notes](https://github.com/minio/minio-go/releases)
- [Commits](https://github.com/minio/minio-go/compare/v7.0.58...v7.0.59)

---
updated-dependencies:
- dependency-name: github.com/minio/minio-go/v7
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2023-07-04 11:22:40 +02:00 committed by GitHub
parent 2a40c81f10
commit 1218f97e01
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 207 additions and 18 deletions

2
go.mod
View file

@ -36,7 +36,7 @@ require (
github.com/jackc/pgx/v5 v5.4.1 github.com/jackc/pgx/v5 v5.4.1
github.com/microcosm-cc/bluemonday v1.0.24 github.com/microcosm-cc/bluemonday v1.0.24
github.com/miekg/dns v1.1.55 github.com/miekg/dns v1.1.55
github.com/minio/minio-go/v7 v7.0.58 github.com/minio/minio-go/v7 v7.0.59
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/spf13/cobra v1.7.0 github.com/spf13/cobra v1.7.0

4
go.sum
View file

@ -450,8 +450,8 @@ github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.58 h1:B9/8Az8Om/2kX8Ys2ai2PZbBTokRE5W6P5OaqnAs6po= github.com/minio/minio-go/v7 v7.0.59 h1:lxIXwsTIcQkYoEG25rUJbzpmSB/oWeVDmxFo/uWUUsw=
github.com/minio/minio-go/v7 v7.0.58/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE= github.com/minio/minio-go/v7 v7.0.59/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=

View file

@ -289,3 +289,67 @@ func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketNam
} }
return rinfo, nil return rinfo, nil
} }
// GetBucketReplicationMetricsV2 fetches bucket replication status metrics
func (c *Client) GetBucketReplicationMetricsV2(ctx context.Context, bucketName string) (s replication.MetricsV2, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return s, err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("replication-metrics", "2")
// Execute GET on bucket to get replication metrics.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return s, err
}
if resp.StatusCode != http.StatusOK {
return s, httpRespToErrorResponse(resp, bucketName, "")
}
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return s, err
}
if err := json.Unmarshal(respBytes, &s); err != nil {
return s, err
}
return s, nil
}
// CheckBucketReplication validates if replication is set up properly for a bucket
func (c *Client) CheckBucketReplication(ctx context.Context, bucketName string) (err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("replication-check", "")
// Execute GET on bucket to get replication config.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, "")
}
return nil
}

View file

@ -222,6 +222,9 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc
if dstOpts.Internal.ReplicationRequest { if dstOpts.Internal.ReplicationRequest {
headers.Set(minIOBucketReplicationRequest, "true") headers.Set(minIOBucketReplicationRequest, "true")
} }
if dstOpts.Internal.ReplicationValidityCheck {
headers.Set(minIOBucketReplicationCheck, "true")
}
if !dstOpts.Internal.LegalholdTimestamp.IsZero() { if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
} }

View file

@ -56,14 +56,15 @@ func (r ReplicationStatus) Empty() bool {
// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition // AdvancedPutOptions for internal use - to be utilized by replication, ILM transition
// implementation on MinIO server // implementation on MinIO server
type AdvancedPutOptions struct { type AdvancedPutOptions struct {
SourceVersionID string SourceVersionID string
SourceETag string SourceETag string
ReplicationStatus ReplicationStatus ReplicationStatus ReplicationStatus
SourceMTime time.Time SourceMTime time.Time
ReplicationRequest bool ReplicationRequest bool
RetentionTimestamp time.Time RetentionTimestamp time.Time
TaggingTimestamp time.Time TaggingTimestamp time.Time
LegalholdTimestamp time.Time LegalholdTimestamp time.Time
ReplicationValidityCheck bool
} }
// PutObjectOptions represents options specified by user for PutObject call // PutObjectOptions represents options specified by user for PutObject call
@ -188,6 +189,9 @@ func (opts PutObjectOptions) Header() (header http.Header) {
if opts.Internal.ReplicationRequest { if opts.Internal.ReplicationRequest {
header.Set(minIOBucketReplicationRequest, "true") header.Set(minIOBucketReplicationRequest, "true")
} }
if opts.Internal.ReplicationValidityCheck {
header.Set(minIOBucketReplicationCheck, "true")
}
if !opts.Internal.LegalholdTimestamp.IsZero() { if !opts.Internal.LegalholdTimestamp.IsZero() {
header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
} }

View file

@ -112,10 +112,11 @@ func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error {
// AdvancedRemoveOptions intended for internal use by replication // AdvancedRemoveOptions intended for internal use by replication
type AdvancedRemoveOptions struct { type AdvancedRemoveOptions struct {
ReplicationDeleteMarker bool ReplicationDeleteMarker bool
ReplicationStatus ReplicationStatus ReplicationStatus ReplicationStatus
ReplicationMTime time.Time ReplicationMTime time.Time
ReplicationRequest bool ReplicationRequest bool
ReplicationValidityCheck bool // check permissions
} }
// RemoveObjectOptions represents options specified by user for RemoveObject call // RemoveObjectOptions represents options specified by user for RemoveObject call
@ -168,6 +169,9 @@ func (c *Client) removeObject(ctx context.Context, bucketName, objectName string
if opts.Internal.ReplicationRequest { if opts.Internal.ReplicationRequest {
headers.Set(minIOBucketReplicationRequest, "true") headers.Set(minIOBucketReplicationRequest, "true")
} }
if opts.Internal.ReplicationValidityCheck {
headers.Set(minIOBucketReplicationCheck, "true")
}
if opts.ForceDelete { if opts.ForceDelete {
headers.Set(minIOForceDelete, "true") headers.Set(minIOForceDelete, "true")
} }

View file

@ -124,7 +124,7 @@ type Options struct {
// Global constants. // Global constants.
const ( const (
libraryName = "minio-go" libraryName = "minio-go"
libraryVersion = "v7.0.58" libraryVersion = "v7.0.59"
) )
// User Agent should always following the below style. // User Agent should always following the below style.

View file

@ -94,6 +94,8 @@
minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker" minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker"
minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request" minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request"
minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request" minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request"
minIOBucketReplicationCheck = "X-Minio-Source-Replication-Check"
// Header indicates last tag update time on source // Header indicates last tag update time on source
minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp" minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp"
// Header indicates last retention update time on source // Header indicates last retention update time on source

View file

@ -20,6 +20,7 @@
"bytes" "bytes"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"math"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -704,6 +705,8 @@ type TargetMetrics struct {
BandWidthLimitInBytesPerSecond int64 `json:"limitInBits"` BandWidthLimitInBytesPerSecond int64 `json:"limitInBits"`
// Current bandwidth used in bytes/sec for this target // Current bandwidth used in bytes/sec for this target
CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"` CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"`
// Completed count
ReplicatedCount uint64 `json:"replicationCount"`
} }
// Metrics represents inline replication metrics for a bucket. // Metrics represents inline replication metrics for a bucket.
@ -721,6 +724,10 @@ type Metrics struct {
PendingCount uint64 `json:"pendingReplicationCount"` PendingCount uint64 `json:"pendingReplicationCount"`
// Total number of failed operations including metadata updates across targets // Total number of failed operations including metadata updates across targets
FailedCount uint64 `json:"failedReplicationCount"` FailedCount uint64 `json:"failedReplicationCount"`
// Total Replica counts
ReplicaCount int64 `json:"replicaCount,omitempty"`
// Total Replicated count
ReplicatedCount int64 `json:"replicationCount,omitempty"`
} }
// ResyncTargetsInfo provides replication target information to resync replicated data. // ResyncTargetsInfo provides replication target information to resync replicated data.
@ -742,9 +749,114 @@ type ResyncTarget struct {
FailedSize int64 `json:"failedReplicationSize,omitempty"` FailedSize int64 `json:"failedReplicationSize,omitempty"`
// Total number of failed operations // Total number of failed operations
FailedCount int64 `json:"failedReplicationCount,omitempty"` FailedCount int64 `json:"failedReplicationCount,omitempty"`
// Total number of failed operations // Total number of completed operations
ReplicatedCount int64 `json:"replicationCount,omitempty"` ReplicatedCount int64 `json:"replicationCount,omitempty"`
// Last bucket/object replicated. // Last bucket/object replicated.
Bucket string `json:"bucket,omitempty"` Bucket string `json:"bucket,omitempty"`
Object string `json:"object,omitempty"` Object string `json:"object,omitempty"`
} }
// XferStats holds transfer rate info for uploads/sec
type XferStats struct {
AvgRate float64 `json:"avgRate"`
PeakRate float64 `json:"peakRate"`
CurrRate float64 `json:"currRate"`
}
// InQueueStats holds stats for objects in replication queue
type InQueueStats struct {
Count int32 `json:"count"`
Bytes int64 `json:"bytes"`
}
// MetricName name of replication metric
type MetricName string
const (
// Large is a metric name for large objects >=128MiB
Large MetricName = "Large"
// Small is a metric name for objects <128MiB size
Small MetricName = "Small"
// Total is a metric name for total objects
Total MetricName = "Total"
)
// ReplQNodeStats holds stats for a node in replication queue
type ReplQNodeStats struct {
NodeName string `json:"nodeName"`
Uptime int64 `json:"uptime"`
ActiveWorkers int32 `json:"activeWorkers"`
XferStats map[MetricName]XferStats `json:"xferStats"`
QStats map[MetricName]InQueueStats `json:"qStats"`
}
// ReplQueueStats holds stats for replication queue across nodes
type ReplQueueStats struct {
Nodes []ReplQNodeStats `json:"nodes"`
}
// Workers returns number of workers across all nodes
func (q ReplQueueStats) Workers() int64 {
var workers int64
for _, node := range q.Nodes {
workers += int64(node.ActiveWorkers)
}
return workers
}
// ReplQStats holds stats for objects in replication queue
type ReplQStats struct {
Uptime int64 `json:"uptime"`
Workers int64 `json:"workers"`
XferStats map[MetricName]XferStats `json:"xferStats"`
QStats map[MetricName]InQueueStats `json:"qStats"`
}
// QStats returns cluster level stats for objects in replication queue
func (q ReplQueueStats) QStats() (r ReplQStats) {
r.QStats = make(map[MetricName]InQueueStats)
r.XferStats = make(map[MetricName]XferStats)
for _, node := range q.Nodes {
r.Workers += int64(node.ActiveWorkers)
for k, v := range node.XferStats {
st, ok := r.XferStats[k]
if !ok {
st = XferStats{}
}
st.AvgRate += v.AvgRate
st.CurrRate += v.CurrRate
st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
r.XferStats[k] = st
}
for k, v := range node.QStats {
st, ok := r.QStats[k]
if !ok {
st = InQueueStats{}
}
st.Count += v.Count
st.Bytes += v.Bytes
r.QStats[k] = st
}
r.Uptime += node.Uptime
}
if len(q.Nodes) > 0 {
for k := range r.XferStats {
st := r.XferStats[k]
st.AvgRate /= float64(len(q.Nodes))
st.CurrRate /= float64(len(q.Nodes))
r.XferStats[k] = st
}
r.Uptime /= int64(len(q.Nodes)) // average uptime
}
return
}
// MetricsV2 represents replication metrics for a bucket.
type MetricsV2 struct {
History Metrics `json:"history"`
CurrentStats Metrics `json:"currStats"`
QueueStats ReplQueueStats `json:"queueStats"`
}

2
vendor/modules.txt vendored
View file

@ -386,7 +386,7 @@ github.com/miekg/dns
# github.com/minio/md5-simd v1.1.2 # github.com/minio/md5-simd v1.1.2
## explicit; go 1.14 ## explicit; go 1.14
github.com/minio/md5-simd github.com/minio/md5-simd
# github.com/minio/minio-go/v7 v7.0.58 # github.com/minio/minio-go/v7 v7.0.59
## explicit; go 1.17 ## explicit; go 1.17
github.com/minio/minio-go/v7 github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/credentials github.com/minio/minio-go/v7/pkg/credentials