From 8e88ee8d9c3740a76139566837199831dabdf997 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Mar 2024 10:51:13 +0000 Subject: [PATCH] [chore]: Bump github.com/minio/minio-go/v7 from 7.0.67 to 7.0.69 (#2748) --- go.mod | 4 +- go.sum | 8 +- .../klauspost/compress/flate/deflate.go | 2 +- .../compress/internal/race/norace.go | 13 + .../klauspost/compress/internal/race/race.go | 26 ++ .../klauspost/compress/s2/decode.go | 6 + .../klauspost/compress/s2/encode_amd64.go | 14 + .../klauspost/compress/s2/reader.go | 31 +- vendor/github.com/klauspost/compress/s2/s2.go | 4 + .../klauspost/compress/s2/writer.go | 14 +- vendor/github.com/minio/minio-go/v7/CREDITS | 110 ------ .../minio/minio-go/v7/api-compose-object.go | 2 +- .../minio/minio-go/v7/api-put-object.go | 4 +- vendor/github.com/minio/minio-go/v7/api.go | 23 +- .../minio/minio-go/v7/functional_tests.go | 349 ++++++++---------- .../v7/pkg/credentials/assume_role.go | 1 + .../v7/pkg/credentials/credentials.go | 11 +- .../pkg/credentials/file_aws_credentials.go | 1 + .../minio-go/v7/pkg/credentials/iam_aws.go | 23 ++ .../v7/pkg/credentials/sts_client_grants.go | 1 + .../v7/pkg/credentials/sts_custom_identity.go | 1 + .../v7/pkg/credentials/sts_ldap_identity.go | 1 + .../v7/pkg/credentials/sts_tls_identity.go | 1 + .../v7/pkg/credentials/sts_web_identity.go | 1 + vendor/github.com/minio/minio-go/v7/retry.go | 1 + .../minio/minio-go/v7/s3-endpoints.go | 183 +++++++-- vendor/github.com/minio/minio-go/v7/utils.go | 15 + vendor/modules.txt | 7 +- 28 files changed, 487 insertions(+), 370 deletions(-) create mode 100644 vendor/github.com/klauspost/compress/internal/race/norace.go create mode 100644 vendor/github.com/klauspost/compress/internal/race/race.go diff --git a/go.mod b/go.mod index 4195d9697..1dbd65162 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,7 @@ require ( github.com/jackc/pgx/v5 v5.5.5 github.com/microcosm-cc/bluemonday v1.0.26 github.com/miekg/dns v1.1.58 - github.com/minio/minio-go/v7 v7.0.67 + github.com/minio/minio-go/v7 v7.0.69 github.com/mitchellh/mapstructure v1.5.0 github.com/oklog/ulid v1.3.1 github.com/prometheus/client_golang v1.18.0 @@ -155,7 +155,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.17.4 // indirect + github.com/klauspost/compress v1.17.6 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect diff --git a/go.sum b/go.sum index 9453ef7a4..f7678788d 100644 --- a/go.sum +++ b/go.sum @@ -449,8 +449,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= @@ -495,8 +495,8 @@ github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.67 h1:BeBvZWAS+kRJm1vGTMJYVjKUNoo0FoEt/wUWdUtfmh8= -github.com/minio/minio-go/v7 v7.0.67/go.mod h1:+UXocnUeZ3wHvVh5s95gcrA4YjMIbccT6ubB+1m054A= +github.com/minio/minio-go/v7 v7.0.69 h1:l8AnsQFyY1xiwa/DaQskY4NXSLA2yrGsW5iD9nRPVS0= +github.com/minio/minio-go/v7 v7.0.69/go.mod h1:XAvOPJQ5Xlzk5o3o/ArO2NMbhSGkimC+bpW/ngRKDmQ= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index de912e187..66d1657d2 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -212,7 +212,7 @@ func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { // Should only be used after a start/reset. func (d *compressor) fillWindow(b []byte) { // Do not fill window if we are in store-only or huffman mode. - if d.level <= 0 { + if d.level <= 0 && d.level > -MinCustomWindowSize { return } if d.fast != nil { diff --git a/vendor/github.com/klauspost/compress/internal/race/norace.go b/vendor/github.com/klauspost/compress/internal/race/norace.go new file mode 100644 index 000000000..affbbbb59 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/race/norace.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package race + +func ReadSlice[T any](s []T) { +} + +func WriteSlice[T any](s []T) { +} diff --git a/vendor/github.com/klauspost/compress/internal/race/race.go b/vendor/github.com/klauspost/compress/internal/race/race.go new file mode 100644 index 000000000..f5e240dcd --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/race/race.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package race + +import ( + "runtime" + "unsafe" +) + +func ReadSlice[T any](s []T) { + if len(s) == 0 { + return + } + runtime.RaceReadRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) +} + +func WriteSlice[T any](s []T) { + if len(s) == 0 { + return + } + runtime.RaceWriteRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) +} diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go index 6c7feafcc..264ffd0a9 100644 --- a/vendor/github.com/klauspost/compress/s2/decode.go +++ b/vendor/github.com/klauspost/compress/s2/decode.go @@ -10,6 +10,8 @@ "errors" "fmt" "strconv" + + "github.com/klauspost/compress/internal/race" ) var ( @@ -63,6 +65,10 @@ func Decode(dst, src []byte) ([]byte, error) { } else { dst = make([]byte, dLen) } + + race.WriteSlice(dst) + race.ReadSlice(src[s:]) + if s2Decode(dst, src[s:]) != 0 { return nil, ErrCorrupt } diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go index ebc332ad5..4f45206a4 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_amd64.go +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -3,6 +3,8 @@ package s2 +import "github.com/klauspost/compress/internal/race" + const hasAmd64Asm = true // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It @@ -14,6 +16,9 @@ // len(dst) >= MaxEncodedLen(len(src)) && // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlock(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + const ( // Use 12 bit table when less than... limit12B = 16 << 10 @@ -50,6 +55,9 @@ func encodeBlock(dst, src []byte) (d int) { // len(dst) >= MaxEncodedLen(len(src)) && // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetter(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + const ( // Use 12 bit table when less than... limit12B = 16 << 10 @@ -86,6 +94,9 @@ func encodeBlockBetter(dst, src []byte) (d int) { // len(dst) >= MaxEncodedLen(len(src)) && // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockSnappy(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + const ( // Use 12 bit table when less than... limit12B = 16 << 10 @@ -121,6 +132,9 @@ func encodeBlockSnappy(dst, src []byte) (d int) { // len(dst) >= MaxEncodedLen(len(src)) && // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetterSnappy(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + const ( // Use 12 bit table when less than... limit12B = 16 << 10 diff --git a/vendor/github.com/klauspost/compress/s2/reader.go b/vendor/github.com/klauspost/compress/s2/reader.go index 2f01a3987..8372d752f 100644 --- a/vendor/github.com/klauspost/compress/s2/reader.go +++ b/vendor/github.com/klauspost/compress/s2/reader.go @@ -104,12 +104,14 @@ func ReaderIgnoreStreamIdentifier() ReaderOption { // For each chunk with the ID, the callback is called with the content. // Any returned non-nil error will abort decompression. // Only one callback per ID is supported, latest sent will be used. +// You can peek the stream, triggering the callback, by doing a Read with a 0 +// byte buffer. func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption { return func(r *Reader) error { if id < 0x80 || id > 0xfd { return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)") } - r.skippableCB[id] = fn + r.skippableCB[id-0x80] = fn return nil } } @@ -128,7 +130,7 @@ type Reader struct { err error decoded []byte buf []byte - skippableCB [0x80]func(r io.Reader) error + skippableCB [0xff - 0x80]func(r io.Reader) error blockStart int64 // Uncompressed offset at start of current. index *Index @@ -201,7 +203,7 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { // The supplied slice does not need to be the size of the read. func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) { if id < 0x80 { - r.err = fmt.Errorf("interbal error: skippable id < 0x80") + r.err = fmt.Errorf("internal error: skippable id < 0x80") return false } if fn := r.skippableCB[id-0x80]; fn != nil { @@ -450,6 +452,12 @@ func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, e for toWrite := range queue { entry := <-toWrite reUse <- toWrite + if hasErr() || entry == nil { + if entry != nil { + writtenBlocks <- entry + } + continue + } if hasErr() { writtenBlocks <- entry continue @@ -469,13 +477,13 @@ func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, e } }() - // Reader defer func() { - close(queue) if r.err != nil { - err = r.err setErr(r.err) + } else if err != nil { + setErr(err) } + close(queue) wg.Wait() if err == nil { err = aErr @@ -483,6 +491,7 @@ func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, e written = aWritten }() + // Reader for !hasErr() { if !r.readFull(r.buf[:4], true) { if r.err == io.EOF { @@ -551,11 +560,13 @@ func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, e if err != nil { writtenBlocks <- decoded setErr(err) + entry <- nil return } if !r.ignoreCRC && crc(decoded) != checksum { writtenBlocks <- decoded setErr(ErrCRC) + entry <- nil return } entry <- decoded @@ -1048,15 +1059,17 @@ func (r *Reader) ReadByte() (byte, error) { } // SkippableCB will register a callback for chunks with the specified ID. -// ID must be a Reserved skippable chunks ID, 0x80-0xfe (inclusive). +// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). // For each chunk with the ID, the callback is called with the content. // Any returned non-nil error will abort decompression. // Only one callback per ID is supported, latest sent will be used. // Sending a nil function will disable previous callbacks. +// You can peek the stream, triggering the callback, by doing a Read with a 0 +// byte buffer. func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error { - if id < 0x80 || id > chunkTypePadding { + if id < 0x80 || id >= chunkTypePadding { return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)") } - r.skippableCB[id] = fn + r.skippableCB[id-0x80] = fn return nil } diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go index dae3f731f..72bcb4945 100644 --- a/vendor/github.com/klauspost/compress/s2/s2.go +++ b/vendor/github.com/klauspost/compress/s2/s2.go @@ -37,6 +37,8 @@ import ( "bytes" "hash/crc32" + + "github.com/klauspost/compress/internal/race" ) /* @@ -112,6 +114,8 @@ // crc implements the checksum specified in section 3 of // https://github.com/google/snappy/blob/master/framing_format.txt func crc(b []byte) uint32 { + race.ReadSlice(b) + c := crc32.Update(0, crcTable, b) return c>>15 | c<<17 + 0xa282ead8 } diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go index 089cd36d8..bba66a876 100644 --- a/vendor/github.com/klauspost/compress/s2/writer.go +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -13,6 +13,8 @@ "io" "runtime" "sync" + + "github.com/klauspost/compress/internal/race" ) const ( @@ -271,7 +273,7 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { return fmt.Errorf("skippable block excessed maximum size") } var header [4]byte - chunkLen := 4 + len(data) + chunkLen := len(data) header[0] = id header[1] = uint8(chunkLen >> 0) header[2] = uint8(chunkLen >> 8) @@ -282,7 +284,7 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { if err = w.err(err); err != nil { return err } - if n != len(data) { + if n != len(b) { return w.err(io.ErrShortWrite) } w.written += int64(n) @@ -303,9 +305,7 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { if err := write(header[:]); err != nil { return err } - if err := write(data); err != nil { - return err - } + return write(data) } // Create output... @@ -385,6 +385,8 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { buf = buf[len(uncompressed):] // Get an output buffer. obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + race.WriteSlice(obuf) + output := make(chan result) // Queue output now, so we keep order. w.output <- output @@ -393,6 +395,8 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { } w.uncompWritten += int64(len(uncompressed)) go func() { + race.ReadSlice(uncompressed) + checksum := crc(uncompressed) // Set to uncompressed. diff --git a/vendor/github.com/minio/minio-go/v7/CREDITS b/vendor/github.com/minio/minio-go/v7/CREDITS index d20923181..dce3d4c9a 100644 --- a/vendor/github.com/minio/minio-go/v7/CREDITS +++ b/vendor/github.com/minio/minio-go/v7/CREDITS @@ -1365,60 +1365,6 @@ THE SOFTWARE. ================================================================ -github.com/sirupsen/logrus -https://github.com/sirupsen/logrus ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -================================================================ - -github.com/stretchr/testify -https://github.com/stretchr/testify ----------------------------------------------------------------- -MIT License - -Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -================================================================ - golang.org/x/crypto https://golang.org/x/crypto ---------------------------------------------------------------- @@ -1748,59 +1694,3 @@ third-party archives. ================================================================ -gopkg.in/yaml.v3 -https://gopkg.in/yaml.v3 ----------------------------------------------------------------- - -This project is covered by two different licenses: MIT and Apache. - -#### MIT License #### - -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original MIT license, with the additional -copyright staring in 2011 when the project was ported over: - - apic.go emitterc.go parserc.go readerc.go scannerc.go - writerc.go yamlh.go yamlprivateh.go - -Copyright (c) 2006-2010 Kirill Simonov -Copyright (c) 2006-2011 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -### Apache License ### - -All the remaining project files are covered by the Apache license: - -Copyright (c) 2011-2019 Canonical Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -================================================================ - diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index e64a24458..8c12c355c 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -119,7 +119,7 @@ func (opts CopyDestOptions) Marshal(header http.Header) { if opts.ReplaceMetadata { header.Set("x-amz-metadata-directive", replaceDirective) for k, v := range filterCustomMeta(opts.UserMetadata) { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) { header.Set(k, v) } else { header.Set("x-amz-meta-"+k, v) diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index bbd8924e2..4dec6040d 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -212,7 +212,7 @@ func (opts PutObjectOptions) Header() (header http.Header) { } for k, v := range opts.UserMetadata { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) { header.Set(k, v) } else { header.Set("x-amz-meta-"+k, v) @@ -230,7 +230,7 @@ func (opts PutObjectOptions) Header() (header http.Header) { // validate() checks if the UserMetadata map has standard headers or and raises an error if so. func (opts PutObjectOptions) validate() (err error) { for k, v := range opts.UserMetadata { - if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { + if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) { return errInvalidArgument(k + " unsupported user defined metadata name") } if !httpguts.ValidHeaderFieldValue(v) { diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 1c3cb83cd..930e082ab 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -1,6 +1,6 @@ /* * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2023 MinIO, Inc. + * Copyright 2015-2024 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -80,6 +80,8 @@ type Client struct { // S3 specific accelerated endpoint. s3AccelerateEndpoint string + // S3 dual-stack endpoints are enabled by default. + s3DualstackEnabled bool // Region endpoint region string @@ -127,7 +129,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.67" + libraryVersion = "v7.0.69" ) // User Agent should always following the below style. @@ -158,9 +160,12 @@ func New(endpoint string, opts *Options) (*Client, error) { if err != nil { return nil, err } - // If Amazon S3 set to signature v4. if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { + // If Amazon S3 set to signature v4. clnt.overrideSignerType = credentials.SignatureV4 + // Amazon S3 endpoints are resolved into dual-stack endpoints by default + // for backwards compatibility. + clnt.s3DualstackEnabled = true } return clnt, nil @@ -330,6 +335,16 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { } } +// SetS3EnableDualstack turns s3 dual-stack endpoints on or off for all requests. +// The feature is only specific to S3 and is on by default. To read more about +// Amazon S3 dual-stack endpoints visit - +// https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html +func (c *Client) SetS3EnableDualstack(enabled bool) { + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + c.s3DualstackEnabled = enabled + } +} + // Hash materials provides relevant initialized hash algo writers // based on the expected signature type. // @@ -926,7 +941,7 @@ func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, is // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) { // Fetch new host based on the bucket location. - host = getS3Endpoint(bucketLocation) + host = getS3Endpoint(bucketLocation, c.s3DualstackEnabled) } } } diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index a6a436c23..de17cdc6f 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -31,6 +31,7 @@ "hash" "hash/crc32" "io" + "log/slog" "math/rand" "mime/multipart" "net/http" @@ -48,9 +49,7 @@ "github.com/dustin/go-humanize" "github.com/google/uuid" - jsoniter "github.com/json-iterator/go" "github.com/minio/sha256-simd" - log "github.com/sirupsen/logrus" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" @@ -91,28 +90,6 @@ func createHTTPTransport() (transport *http.Transport) { return } -type mintJSONFormatter struct{} - -func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) { - data := make(log.Fields, len(entry.Data)) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - json := jsoniter.ConfigCompatibleWithStandardLibrary - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} - var readFull = func(r io.Reader, buf []byte) (n int, err error) { // ReadFull reads exactly len(buf) bytes from r into buf. // It returns the number of bytes copied and an error if @@ -148,23 +125,28 @@ func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) { return } -func cleanEmptyEntries(fields log.Fields) log.Fields { - cleanFields := log.Fields{} - for k, v := range fields { - if v != "" { - cleanFields[k] = v - } - } - return cleanFields -} - -// log successful test runs -func successLogger(testName, function string, args map[string]interface{}, startTime time.Time) *log.Entry { +func baseLogger(testName, function string, args map[string]interface{}, startTime time.Time) *slog.Logger { // calculate the test case duration duration := time.Since(startTime) // log with the fields as per mint - fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "PASS"} - return log.WithFields(cleanEmptyEntries(fields)) + l := slog.With( + "name", "minio-go: "+testName, + "duration", duration.Nanoseconds()/1000000, + ) + if function != "" { + l = l.With("function", function) + } + if len(args) > 0 { + l = l.With("args", args) + } + return l +} + +// log successful test runs +func logSuccess(testName, function string, args map[string]interface{}, startTime time.Time) { + baseLogger(testName, function, args, startTime). + With("status", "PASS"). + Info("") } // As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented, @@ -174,44 +156,37 @@ func logError(testName, function string, args map[string]interface{}, startTime // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in // addition to NotImplemented error returned from server if isErrNotImplemented(err) { - ignoredLog(testName, function, args, startTime, message).Info() - } else if isRunOnFail() { - failureLog(testName, function, args, startTime, alert, message, err).Error() + logIgnored(testName, function, args, startTime, message) } else { - failureLog(testName, function, args, startTime, alert, message, err).Fatal() + logFailure(testName, function, args, startTime, alert, message, err) + if !isRunOnFail() { + panic(err) + } } } // log failed test runs -func failureLog(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) *log.Entry { - // calculate the test case duration - duration := time.Since(startTime) - var fields log.Fields - // log with the fields as per mint +func logFailure(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) { + l := baseLogger(testName, function, args, startTime).With( + "status", "FAIL", + "alert", alert, + "message", message, + ) + if err != nil { - fields = log.Fields{ - "name": "minio-go: " + testName, "function": function, "args": args, - "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err, - } - } else { - fields = log.Fields{ - "name": "minio-go: " + testName, "function": function, "args": args, - "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, - } + l = l.With("error", err) } - return log.WithFields(cleanEmptyEntries(fields)) + + l.Error("") } // log not applicable test runs -func ignoredLog(testName, function string, args map[string]interface{}, startTime time.Time, alert string) *log.Entry { - // calculate the test case duration - duration := time.Since(startTime) - // log with the fields as per mint - fields := log.Fields{ - "name": "minio-go: " + testName, "function": function, "args": args, - "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented", - } - return log.WithFields(cleanEmptyEntries(fields)) +func logIgnored(testName, function string, args map[string]interface{}, startTime time.Time, alert string) { + baseLogger(testName, function, args, startTime). + With( + "status", "NA", + "alert", strings.Split(alert, " ")[0]+" is NotImplemented", + ).Info("") } // Delete objects in given bucket, recursively @@ -244,11 +219,7 @@ func cleanupBucket(bucketName string, c *minio.Client) error { } } // objects are already deleted, clear the buckets now - err := c.RemoveBucket(context.Background(), bucketName) - if err != nil { - return err - } - return err + return c.RemoveBucket(context.Background(), bucketName) } func cleanupVersionedBucket(bucketName string, c *minio.Client) error { @@ -281,9 +252,8 @@ func cleanupVersionedBucket(bucketName string, c *minio.Client) error { err := c.RemoveBucket(context.Background(), bucketName) if err != nil { for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { - log.Println("found", obj.Key, obj.VersionID) + slog.Info("found object", "key", obj.Key, "version", obj.VersionID) } - return err } return err } @@ -480,7 +450,7 @@ function := "MakeBucket(bucketName, region)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testMetadataSizeLimit() { @@ -545,7 +515,7 @@ function := "PutObject(bucketName, objectName, reader, objectSize, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests various bucket supported formats. @@ -613,7 +583,7 @@ function := "MakeBucket(bucketName, region)" logError(testName, function, args, startTime, "", "CleanupBucket failed", err) return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test PutObject using a large data to trigger multipart readat @@ -713,7 +683,7 @@ function := "PutObject(bucketName, objectName, reader, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testListObjectVersions() { @@ -837,7 +807,7 @@ function := "ListObjectVersions(bucketName, prefix, recursive)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testStatObjectWithVersioning() { @@ -955,7 +925,7 @@ function := "StatObject" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testGetObjectWithVersioning() { @@ -1095,7 +1065,7 @@ function := "GetObject()" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testPutObjectWithVersioning() { @@ -1243,7 +1213,7 @@ function := "GetObject()" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testCopyObjectWithVersioning() { @@ -1381,7 +1351,7 @@ function := "CopyObject()" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testConcurrentCopyObjectWithVersioning() { @@ -1542,7 +1512,7 @@ function := "CopyObject()" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testComposeObjectWithVersioning() { @@ -1683,7 +1653,7 @@ function := "ComposeObject()" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testRemoveObjectWithVersioning() { @@ -1796,7 +1766,7 @@ function := "DeleteObject()" defer cleanupBucket(bucketName, c) - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testRemoveObjectsWithVersioning() { @@ -1892,7 +1862,7 @@ function := "DeleteObjects()" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testObjectTaggingWithVersioning() { @@ -2050,7 +2020,7 @@ function := "{Get,Set,Remove}ObjectTagging()" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test PutObject with custom checksums. @@ -2066,7 +2036,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)" } if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") return } @@ -2153,7 +2123,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)" }) if err == nil { if i == 0 && resp.ChecksumCRC32 == "" { - ignoredLog(testName, function, args, startTime, "Checksums does not appear to be supported by backend").Info() + logIgnored(testName, function, args, startTime, "Checksums does not appear to be supported by backend") return } logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -2245,7 +2215,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)" delete(args, "metadata") } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test PutObject with custom checksums. @@ -2261,7 +2231,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)" } if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") return } @@ -2418,7 +2388,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)" delete(args, "metadata") } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test PutObject with trailing checksums. @@ -2434,7 +2404,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)" } if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") return } @@ -2645,7 +2615,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)" } if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") return } @@ -2734,7 +2704,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)" }) if err == nil { if i == 0 && resp.ChecksumCRC32C == "" { - ignoredLog(testName, function, args, startTime, "Checksums does not appear to be supported by backend").Info() + logIgnored(testName, function, args, startTime, "Checksums does not appear to be supported by backend") return } } else { @@ -2787,7 +2757,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)" delete(args, "metadata") } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testGetObjectAttributes() { @@ -2801,7 +2771,7 @@ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" } if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") return } @@ -2994,7 +2964,7 @@ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testGetObjectAttributesSSECEncryption() { @@ -3008,7 +2978,7 @@ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" } if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") return } @@ -3079,7 +3049,7 @@ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testGetObjectAttributesErrorCases() { @@ -3093,7 +3063,7 @@ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" } if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") return } @@ -3193,7 +3163,7 @@ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } type objectAttributesNewObject struct { @@ -3355,7 +3325,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)" } if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") return } @@ -3450,7 +3420,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testPutObjectWithContentLanguage() { @@ -3519,7 +3489,7 @@ function := "PutObject(bucketName, objectName, reader, size, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test put object with streaming signature. @@ -3597,7 +3567,7 @@ function := "PutObject(bucketName, objectName, reader,size,opts)" } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test get object seeker from the end, using whence set to '2'. @@ -3720,7 +3690,7 @@ function := "GetObject(bucketName, objectName)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test get object reader to not throw error on being closed twice. @@ -3809,7 +3779,7 @@ function := "GetObject(bucketName, objectName)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test RemoveObjects request where context cancels after timeout @@ -3906,7 +3876,7 @@ function := "RemoveObjects(ctx, bucketName, objectsCh)" } } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test removing multiple objects with Remove API @@ -3990,7 +3960,7 @@ function := "RemoveObjects(bucketName, objectsCh)" } } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test removing multiple objects and check for results @@ -4123,7 +4093,7 @@ function := "RemoveObjects(bucketName, objectsCh)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests FPutObject of a big file to trigger multipart @@ -4228,7 +4198,7 @@ function := "FPutObject(bucketName, objectName, fileName, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests FPutObject with null contentType (default = application/octet-stream) @@ -4400,7 +4370,7 @@ function = "StatObject(bucketName, objectName, opts)" } os.Remove(fName + ".gtar") - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests FPutObject request when context cancels after timeout @@ -4502,7 +4472,7 @@ function := "FPutObject(bucketName, objectName, fileName, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests FPutObject request when context cancels after timeout @@ -4605,7 +4575,7 @@ function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test validates putObject with context to see if request cancellation is honored. @@ -4680,7 +4650,7 @@ function := "PutObject(ctx, bucketName, objectName, fileName, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests get object with s3zip extensions. @@ -4807,7 +4777,7 @@ function := "GetObject(bucketName, objectName)" if len(listed) == 0 { // Assume we are running against non-minio. args["SKIPPED"] = true - ignoredLog(testName, function, args, startTime, "s3zip does not appear to be present").Info() + logIgnored(testName, function, args, startTime, "s3zip does not appear to be present") return } @@ -4864,7 +4834,7 @@ function := "GetObject(bucketName, objectName)" logError(testName, function, args, startTime, "", "Extra listed objects", fmt.Errorf("left over: %v", listed)) return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests get object ReaderSeeker interface methods. @@ -5034,7 +5004,7 @@ function := "GetObject(bucketName, objectName)" cmpData(r, testCase.start, testCase.end) } } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests get object ReaderAt interface methods. @@ -5212,7 +5182,7 @@ function := "GetObject(bucketName, objectName)" } } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Reproduces issue https://github.com/minio/minio-go/issues/1137 @@ -5330,7 +5300,7 @@ function := "GetObject(bucketName, objectName)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test Presigned Post Policy @@ -5554,7 +5524,7 @@ function := "PresignedPostPolicy(policy)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests copy object @@ -5749,7 +5719,7 @@ function := "CopyObject(dst, src)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests SSE-C get object ReaderSeeker interface methods. @@ -5932,7 +5902,7 @@ function := "GetObject(bucketName, objectName)" } } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests SSE-S3 get object ReaderSeeker interface methods. @@ -6113,7 +6083,7 @@ function := "GetObject(bucketName, objectName)" } } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests SSE-C get object ReaderAt interface methods. @@ -6297,7 +6267,7 @@ function := "GetObject(bucketName, objectName)" } } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests SSE-S3 get object ReaderAt interface methods. @@ -6479,7 +6449,7 @@ function := "GetObject(bucketName, objectName)" } } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // testSSECEncryptionPutGet tests encryption with customer provided encryption keys @@ -6583,11 +6553,11 @@ function := "PutEncryptedObject(bucketName, objectName, reader, sse)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // TestEncryptionFPut tests encryption with customer specified encryption keys @@ -6712,7 +6682,7 @@ function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, os.Remove(fileName) } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // testSSES3EncryptionPutGet tests SSE-S3 encryption @@ -6814,11 +6784,11 @@ function := "PutEncryptedObject(bucketName, objectName, reader, sse)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // TestSSES3EncryptionFPut tests server side encryption @@ -6942,7 +6912,7 @@ function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, os.Remove(fileName) } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testBucketNotification() { @@ -6959,7 +6929,7 @@ function := "SetBucketNotification(bucketName)" os.Getenv("NOTIFY_REGION") == "" || os.Getenv("NOTIFY_ACCOUNTID") == "" || os.Getenv("NOTIFY_RESOURCE") == "" { - ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info() + logIgnored(testName, function, args, startTime, "Skipped notification test as it is not configured") return } @@ -7046,7 +7016,7 @@ function := "SetBucketNotification(bucketName)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests comprehensive list of all methods. @@ -7736,7 +7706,7 @@ functionAll += ", " + function os.Remove(fileName) os.Remove(fileName + "-f") - successLogger(testName, functionAll, args, startTime).Info() + logSuccess(testName, functionAll, args, startTime) } // Test for validating GetObject Reader* methods functioning when the @@ -7828,7 +7798,7 @@ function := "GetObject(bucketName, objectName)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test validates putObject to upload a file seeked at a given offset. @@ -7950,7 +7920,7 @@ function := "PutObject(bucketName, objectName, fileToUpload, contentType)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests bucket re-create errors. @@ -8010,7 +7980,7 @@ function := "MakeBucket(bucketName, region)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test get object reader to not throw error on being closed twice. @@ -8099,7 +8069,7 @@ function := "MakeBucket(bucketName, region)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests FPutObject hidden contentType setting @@ -8262,7 +8232,7 @@ function := "FPutObject(bucketName, objectName, fileName, opts)" } os.Remove(fileName + ".gtar") - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests various bucket supported formats. @@ -8328,7 +8298,7 @@ function := "MakeBucket(bucketName, region)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests get object ReaderSeeker interface methods. @@ -8483,7 +8453,7 @@ function := "GetObject(bucketName, objectName)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests get object ReaderAt interface methods. @@ -8645,7 +8615,7 @@ function := "GetObject(bucketName, objectName)" } } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Tests copy object @@ -8792,7 +8762,7 @@ function := "CopyObject(destination, source)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testComposeObjectErrorCasesWrapper(c *minio.Client) { @@ -8864,7 +8834,7 @@ function := "ComposeObject(destination, sourceList)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test expected error cases @@ -8962,7 +8932,7 @@ function := "ComposeObject(destination, sourceList)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test concatenating multiple 10K objects V2 @@ -9089,7 +9059,7 @@ function = "CopyObject(dst, src)" } delete(args, "objectName") - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { @@ -9245,7 +9215,7 @@ function := "CopyObject(destination, source)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test encrypted copy object @@ -9583,7 +9553,7 @@ function := "CopyObject(destination, source)" logError(testName, function, args, startTime, "", "GetObject failed", err) return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testSSECMultipartEncryptedToSSECCopyObjectPart() { @@ -9778,7 +9748,7 @@ function := "CopyObjectPart(destination, source)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) // Do not need to remove destBucketName its same as bucketName. } @@ -9956,7 +9926,7 @@ function := "CopyObjectPart(destination, source)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) // Do not need to remove destBucketName its same as bucketName. } @@ -10133,7 +10103,7 @@ function := "CopyObjectPart(destination, source)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) // Do not need to remove destBucketName its same as bucketName. } @@ -10313,7 +10283,7 @@ function := "CopyObjectPart(destination, source)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) // Do not need to remove destBucketName its same as bucketName. } @@ -10488,7 +10458,7 @@ function := "CopyObjectPart(destination, source)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) // Do not need to remove destBucketName its same as bucketName. } @@ -10659,7 +10629,7 @@ function := "CopyObjectPart(destination, source)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) // Do not need to remove destBucketName its same as bucketName. } @@ -10832,7 +10802,7 @@ function := "CopyObjectPart(destination, source)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) // Do not need to remove destBucketName its same as bucketName. } @@ -11008,7 +10978,7 @@ function := "CopyObjectPart(destination, source)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) // Do not need to remove destBucketName its same as bucketName. } @@ -11180,7 +11150,7 @@ function := "CopyObjectPart(destination, source)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) // Do not need to remove destBucketName its same as bucketName. } @@ -11355,7 +11325,7 @@ function := "CopyObjectPart(destination, source)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) // Do not need to remove destBucketName its same as bucketName. } @@ -11534,7 +11504,7 @@ function = "ComposeObject(destination, sources)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testUserMetadataCopyingV2() { @@ -11645,7 +11615,7 @@ function := "testStorageClassMetadataPutObject()" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testStorageClassInvalidMetadataPutObject() { @@ -11688,7 +11658,7 @@ function := "testStorageClassInvalidMetadataPutObject()" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } func testStorageClassMetadataCopyObject() { @@ -11809,7 +11779,7 @@ function := "testStorageClassMetadataCopyObject()" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test put object with size -1 byte object. @@ -11885,7 +11855,7 @@ function := "PutObject(bucketName, objectName, reader, size, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test put objects of unknown size. @@ -11976,7 +11946,7 @@ function := "PutObject(bucketName, objectName, reader,size,opts)" } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test put object with 0 byte object. @@ -12046,7 +12016,7 @@ function := "PutObject(bucketName, objectName, reader, size, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test expected error cases @@ -12550,7 +12520,7 @@ functionAll += ", " + function os.Remove(fileName) os.Remove(fileName + "-f") - successLogger(testName, functionAll, args, startTime).Info() + logSuccess(testName, functionAll, args, startTime) } // Test get object with GetObject with context @@ -12652,7 +12622,7 @@ function := "GetObject(ctx, bucketName, objectName)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test get object with FGetObject with a user provided context @@ -12740,7 +12710,7 @@ function := "FGetObject(ctx, bucketName, objectName, fileName)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test get object with GetObject with a user provided context @@ -12852,7 +12822,7 @@ function := "GetObject(ctx, bucketName, objectName, fileName)" } } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test get object ACLs with GetObjectACL with custom provided context @@ -12953,7 +12923,7 @@ function := "GetObjectACL(ctx, bucketName, objectName)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) return } @@ -13029,7 +12999,7 @@ function := "GetObjectACL(ctx, bucketName, objectName)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test validates putObject with context to see if request cancellation is honored for V2. @@ -13103,7 +13073,7 @@ function := "PutObject(ctx, bucketName, objectName, reader, size, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test get object with GetObject with custom context @@ -13203,7 +13173,7 @@ function := "GetObject(ctx, bucketName, objectName)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test get object with FGetObject with custom context @@ -13293,7 +13263,7 @@ function := "FGetObject(ctx, bucketName, objectName,fileName)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test list object v1 and V2 @@ -13379,7 +13349,7 @@ function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" } if objInfo.StorageClass != testObjects[objCursor].storageClass { // Ignored as Gateways (Azure/GCS etc) wont return storage class - ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() + logIgnored(testName, function, args, startTime, "ListObjects doesn't return expected storage class") } objCursor++ } @@ -13394,7 +13364,7 @@ function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true}) testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, WithMetadata: true}) - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Test deleting multiple objects with object retention set in Governance mode @@ -13530,7 +13500,7 @@ function := "RemoveObjects(bucketName, objectsCh, opts)" return } - successLogger(testName, function, args, startTime).Info() + logSuccess(testName, function, args, startTime) } // Convert string to bool and always return false if any error @@ -13543,14 +13513,19 @@ func mustParseBool(str string) bool { } func main() { - // Output to stdout instead of the default stderr - log.SetOutput(os.Stdout) - // create custom formatter - mintFormatter := mintJSONFormatter{} - // set custom formatter - log.SetFormatter(&mintFormatter) - // log Info or above -- success cases are Info level, failures are Fatal level - log.SetLevel(log.InfoLevel) + slog.SetDefault(slog.New(slog.NewJSONHandler( + os.Stdout, + &slog.HandlerOptions{ + Level: slog.LevelInfo, + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + if a.Key == slog.MessageKey || a.Value.String() == "" { + return slog.Attr{} + } + + return a + }, + }, + ))) tls := mustParseBool(os.Getenv(enableHTTPS)) kms := mustParseBool(os.Getenv(enableKMS)) diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go index 800c4a294..d245bc07a 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -237,6 +237,7 @@ func (m *STSAssumeRole) Retrieve() (Value, error) { AccessKeyID: a.Result.Credentials.AccessKey, SecretAccessKey: a.Result.Credentials.SecretKey, SessionToken: a.Result.Credentials.SessionToken, + Expiration: a.Result.Credentials.Expiration, SignerType: SignatureV4, }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go index af6104967..68f9b3815 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go @@ -30,17 +30,20 @@ defaultExpiryWindow = 0.8 ) -// A Value is the AWS credentials value for individual credential fields. +// A Value is the S3 credentials value for individual credential fields. type Value struct { - // AWS Access key ID + // S3 Access key ID AccessKeyID string - // AWS Secret Access Key + // S3 Secret Access Key SecretAccessKey string - // AWS Session Token + // S3 Session Token SessionToken string + // Expiration of this credentials - null means no expiration associated + Expiration time.Time + // Signature Type. SignerType SignatureType } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go index 5b073763e..8c5c4eb2d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go @@ -129,6 +129,7 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) { AccessKeyID: externalProcessCredentials.AccessKeyID, SecretAccessKey: externalProcessCredentials.SecretAccessKey, SessionToken: externalProcessCredentials.SessionToken, + Expiration: externalProcessCredentials.Expiration, SignerType: SignatureV4, }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go index c5153c4ca..7322948ec 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -61,6 +61,7 @@ type IAM struct { // Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html Container struct { AuthorizationToken string + AuthorizationTokenFile string CredentialsFullURI string CredentialsRelativeURI string } @@ -105,6 +106,11 @@ func (m *IAM) Retrieve() (Value, error) { token = m.Container.AuthorizationToken } + tokenFile := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE") + if tokenFile == "" { + tokenFile = m.Container.AuthorizationToken + } + relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") if relativeURI == "" { relativeURI = m.Container.CredentialsRelativeURI @@ -181,6 +187,10 @@ func (m *IAM) Retrieve() (Value, error) { roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) + case tokenFile != "" && fullURI != "": + endpoint = fullURI + roleCreds, err = getEKSPodIdentityCredentials(m.Client, endpoint, tokenFile) + case fullURI != "": if len(endpoint) == 0 { endpoint = fullURI @@ -209,6 +219,7 @@ func (m *IAM) Retrieve() (Value, error) { AccessKeyID: roleCreds.AccessKeyID, SecretAccessKey: roleCreds.SecretAccessKey, SessionToken: roleCreds.Token, + Expiration: roleCreds.Expiration, SignerType: SignatureV4, }, nil } @@ -304,6 +315,18 @@ func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2Role return respCreds, nil } +func getEKSPodIdentityCredentials(client *http.Client, endpoint string, tokenFile string) (ec2RoleCredRespBody, error) { + if tokenFile != "" { + bytes, err := os.ReadFile(tokenFile) + if err != nil { + return ec2RoleCredRespBody{}, fmt.Errorf("getEKSPodIdentityCredentials: failed to read token file:%s", err) + } + token := string(bytes) + return getEcsTaskCredentials(client, endpoint, token) + } + return ec2RoleCredRespBody{}, fmt.Errorf("getEKSPodIdentityCredentials: no tokenFile found") +} + func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go index 9e92c1e0f..62bfbb6b0 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go @@ -177,6 +177,7 @@ func (m *STSClientGrants) Retrieve() (Value, error) { AccessKeyID: a.Result.Credentials.AccessKey, SecretAccessKey: a.Result.Credentials.SecretKey, SessionToken: a.Result.Credentials.SessionToken, + Expiration: a.Result.Credentials.Expiration, SignerType: SignatureV4, }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go index e1f9ce4be..75e1a77d3 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go @@ -113,6 +113,7 @@ func (c *CustomTokenIdentity) Retrieve() (value Value, err error) { AccessKeyID: cr.AccessKey, SecretAccessKey: cr.SecretKey, SessionToken: cr.SessionToken, + Expiration: cr.Expiration, SignerType: SignatureV4, }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go index ec5f3f097..b8df289f2 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -184,6 +184,7 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) { AccessKeyID: cr.AccessKey, SecretAccessKey: cr.SecretKey, SessionToken: cr.SessionToken, + Expiration: cr.Expiration, SignerType: SignatureV4, }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go index dee0a8cbb..10083502d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go @@ -188,6 +188,7 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) { AccessKeyID: response.Result.Credentials.AccessKey, SecretAccessKey: response.Result.Credentials.SecretKey, SessionToken: response.Result.Credentials.SessionToken, + Expiration: response.Result.Credentials.Expiration, SignerType: SignatureDefault, }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go index 2e2af50b4..596d95152 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -195,6 +195,7 @@ func (m *STSWebIdentity) Retrieve() (Value, error) { AccessKeyID: a.Result.Credentials.AccessKey, SecretAccessKey: a.Result.Credentials.SecretKey, SessionToken: a.Result.Credentials.SessionToken, + Expiration: a.Result.Credentials.Expiration, SignerType: SignatureV4, }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go index 1c6105e6a..5ddcad897 100644 --- a/vendor/github.com/minio/minio-go/v7/retry.go +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -118,6 +118,7 @@ func isS3CodeRetryable(s3Code string) (ok bool) { http.StatusBadGateway: {}, http.StatusServiceUnavailable: {}, http.StatusGatewayTimeout: {}, + 520: {}, // It is used by Cloudflare as a catch-all response for when the origin server sends something unexpected. // Add more HTTP status codes here. } diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go index b1de7b62a..068a6bfa1 100644 --- a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go +++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go @@ -1,6 +1,6 @@ /* * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. + * Copyright 2015-2024 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,48 +17,155 @@ package minio +type awsS3Endpoint struct { + endpoint string + dualstackEndpoint string +} + // awsS3EndpointMap Amazon S3 endpoint map. -var awsS3EndpointMap = map[string]string{ - "us-east-1": "s3.dualstack.us-east-1.amazonaws.com", - "us-east-2": "s3.dualstack.us-east-2.amazonaws.com", - "us-west-2": "s3.dualstack.us-west-2.amazonaws.com", - "us-west-1": "s3.dualstack.us-west-1.amazonaws.com", - "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", - "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com", - "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", - "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", - "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", - "eu-central-2": "s3.dualstack.eu-central-2.amazonaws.com", - "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", - "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com", - "eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com", - "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", - "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", - "ap-south-2": "s3.dualstack.ap-south-2.amazonaws.com", - "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", - "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", - "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", - "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", - "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com", - "af-south-1": "s3.dualstack.af-south-1.amazonaws.com", - "me-central-1": "s3.dualstack.me-central-1.amazonaws.com", - "me-south-1": "s3.dualstack.me-south-1.amazonaws.com", - "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", - "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", - "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", - "cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn", - "cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn", - "ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com", - "ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com", - "il-central-1": "s3.dualstack.il-central-1.amazonaws.com", +var awsS3EndpointMap = map[string]awsS3Endpoint{ + "us-east-1": { + "s3.us-east-1.amazonaws.com", + "s3.dualstack.us-east-1.amazonaws.com", + }, + "us-east-2": { + "s3.us-east-2.amazonaws.com", + "s3.dualstack.us-east-2.amazonaws.com", + }, + "us-west-2": { + "s3.us-west-2.amazonaws.com", + "s3.dualstack.us-west-2.amazonaws.com", + }, + "us-west-1": { + "s3.us-west-1.amazonaws.com", + "s3.dualstack.us-west-1.amazonaws.com", + }, + "ca-central-1": { + "s3.ca-central-1.amazonaws.com", + "s3.dualstack.ca-central-1.amazonaws.com", + }, + "eu-west-1": { + "s3.eu-west-1.amazonaws.com", + "s3.dualstack.eu-west-1.amazonaws.com", + }, + "eu-west-2": { + "s3.eu-west-2.amazonaws.com", + "s3.dualstack.eu-west-2.amazonaws.com", + }, + "eu-west-3": { + "s3.eu-west-3.amazonaws.com", + "s3.dualstack.eu-west-3.amazonaws.com", + }, + "eu-central-1": { + "s3.eu-central-1.amazonaws.com", + "s3.dualstack.eu-central-1.amazonaws.com", + }, + "eu-central-2": { + "s3.eu-central-2.amazonaws.com", + "s3.dualstack.eu-central-2.amazonaws.com", + }, + "eu-north-1": { + "s3.eu-north-1.amazonaws.com", + "s3.dualstack.eu-north-1.amazonaws.com", + }, + "eu-south-1": { + "s3.eu-south-1.amazonaws.com", + "s3.dualstack.eu-south-1.amazonaws.com", + }, + "eu-south-2": { + "s3.eu-south-2.amazonaws.com", + "s3.dualstack.eu-south-2.amazonaws.com", + }, + "ap-east-1": { + "s3.ap-east-1.amazonaws.com", + "s3.dualstack.ap-east-1.amazonaws.com", + }, + "ap-south-1": { + "s3.ap-south-1.amazonaws.com", + "s3.dualstack.ap-south-1.amazonaws.com", + }, + "ap-south-2": { + "s3.ap-south-2.amazonaws.com", + "s3.dualstack.ap-south-2.amazonaws.com", + }, + "ap-southeast-1": { + "s3.ap-southeast-1.amazonaws.com", + "s3.dualstack.ap-southeast-1.amazonaws.com", + }, + "ap-southeast-2": { + "s3.ap-southeast-2.amazonaws.com", + "s3.dualstack.ap-southeast-2.amazonaws.com", + }, + "ap-southeast-3": { + "s3.ap-southeast-3.amazonaws.com", + "s3.dualstack.ap-southeast-3.amazonaws.com", + }, + "ap-southeast-4": { + "s3.ap-southeast-4.amazonaws.com", + "s3.dualstack.ap-southeast-4.amazonaws.com", + }, + "ap-northeast-1": { + "s3.ap-northeast-1.amazonaws.com", + "s3.dualstack.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": { + "s3.ap-northeast-2.amazonaws.com", + "s3.dualstack.ap-northeast-2.amazonaws.com", + }, + "ap-northeast-3": { + "s3.ap-northeast-3.amazonaws.com", + "s3.dualstack.ap-northeast-3.amazonaws.com", + }, + "af-south-1": { + "s3.af-south-1.amazonaws.com", + "s3.dualstack.af-south-1.amazonaws.com", + }, + "me-central-1": { + "s3.me-central-1.amazonaws.com", + "s3.dualstack.me-central-1.amazonaws.com", + }, + "me-south-1": { + "s3.me-south-1.amazonaws.com", + "s3.dualstack.me-south-1.amazonaws.com", + }, + "sa-east-1": { + "s3.sa-east-1.amazonaws.com", + "s3.dualstack.sa-east-1.amazonaws.com", + }, + "us-gov-west-1": { + "s3.us-gov-west-1.amazonaws.com", + "s3.dualstack.us-gov-west-1.amazonaws.com", + }, + "us-gov-east-1": { + "s3.us-gov-east-1.amazonaws.com", + "s3.dualstack.us-gov-east-1.amazonaws.com", + }, + "cn-north-1": { + "s3.cn-north-1.amazonaws.com.cn", + "s3.dualstack.cn-north-1.amazonaws.com.cn", + }, + "cn-northwest-1": { + "s3.cn-northwest-1.amazonaws.com.cn", + "s3.dualstack.cn-northwest-1.amazonaws.com.cn", + }, + "il-central-1": { + "s3.il-central-1.amazonaws.com", + "s3.dualstack.il-central-1.amazonaws.com", + }, } // getS3Endpoint get Amazon S3 endpoint based on the bucket location. -func getS3Endpoint(bucketLocation string) (s3Endpoint string) { +func getS3Endpoint(bucketLocation string, useDualstack bool) (endpoint string) { s3Endpoint, ok := awsS3EndpointMap[bucketLocation] if !ok { - // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint. - s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com" + // Default to 's3.us-east-1.amazonaws.com' endpoint. + if useDualstack { + return "s3.dualstack.us-east-1.amazonaws.com" + } + return "s3.us-east-1.amazonaws.com" } - return s3Endpoint + if useDualstack { + return s3Endpoint.dualstackEndpoint + } + return s3Endpoint.endpoint } diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index 94c19b2a5..d68f14844 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -512,6 +512,21 @@ func isAmzHeader(headerKey string) bool { return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-") } +var supportedReplicationEncryptionHeaders = map[string]bool{ + "x-minio-replication-server-side-encryption-sealed-key": true, + "x-minio-replication-server-side-encryption-seal-algorithm": true, + "x-minio-replication-server-side-encryption-iv": true, + "x-minio-replication-encrypted-multipart": true, + "x-minio-replication-actual-object-size": true, + // Add more supported headers here. + // Must be lower case. +} + +// isValidReplicationEncryptionHeader returns true if header is one of valid replication encryption headers +func isValidReplicationEncryptionHeader(headerKey string) bool { + return supportedReplicationEncryptionHeaders[strings.ToLower(headerKey)] +} + // supportedQueryValues is a list of query strings that can be passed in when using GetObject. var supportedQueryValues = map[string]bool{ "attributes": true, diff --git a/vendor/modules.txt b/vendor/modules.txt index bc763fd0f..b4326ecdb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -439,10 +439,11 @@ github.com/json-iterator/go # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 ## explicit github.com/kballard/go-shellquote -# github.com/klauspost/compress v1.17.4 +# github.com/klauspost/compress v1.17.6 ## explicit; go 1.19 github.com/klauspost/compress/flate github.com/klauspost/compress/gzip +github.com/klauspost/compress/internal/race github.com/klauspost/compress/s2 github.com/klauspost/compress/snappy github.com/klauspost/compress/zlib @@ -483,8 +484,8 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.67 -## explicit; go 1.17 +# github.com/minio/minio-go/v7 v7.0.69 +## explicit; go 1.21 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/credentials github.com/minio/minio-go/v7/pkg/encrypt