mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-11-22 19:56:39 +00:00
[chore]: Bump github.com/minio/minio-go/v7 from 7.0.44 to 7.0.47 (#1348)
Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.44 to 7.0.47. - [Release notes](https://github.com/minio/minio-go/releases) - [Commits](https://github.com/minio/minio-go/compare/v7.0.44...v7.0.47) --- updated-dependencies: - dependency-name: github.com/minio/minio-go/v7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
2a46980082
commit
fe3e9ede52
2
go.mod
2
go.mod
|
@ -32,7 +32,7 @@ require (
|
||||||
github.com/jackc/pgx/v4 v4.17.2
|
github.com/jackc/pgx/v4 v4.17.2
|
||||||
github.com/microcosm-cc/bluemonday v1.0.21
|
github.com/microcosm-cc/bluemonday v1.0.21
|
||||||
github.com/miekg/dns v1.1.50
|
github.com/miekg/dns v1.1.50
|
||||||
github.com/minio/minio-go/v7 v7.0.44
|
github.com/minio/minio-go/v7 v7.0.47
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
github.com/mitchellh/mapstructure v1.5.0
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
github.com/robfig/cron/v3 v3.0.1
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -418,8 +418,8 @@ github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||||
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||||
github.com/minio/minio-go/v7 v7.0.44 h1:9zUJ7iU7ax2P1jOvTp6nVrgzlZq3AZlFm0XfRFDKstM=
|
github.com/minio/minio-go/v7 v7.0.47 h1:sLiuCKGSIcn/MI6lREmTzX91DX/oRau4ia0j6e6eOSs=
|
||||||
github.com/minio/minio-go/v7 v7.0.44/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
github.com/minio/minio-go/v7 v7.0.47/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
||||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||||
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
|
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
|
||||||
|
|
2
vendor/github.com/minio/minio-go/v7/Makefile
generated
vendored
2
vendor/github.com/minio/minio-go/v7/Makefile
generated
vendored
|
@ -16,6 +16,8 @@ lint:
|
||||||
|
|
||||||
vet:
|
vet:
|
||||||
@GO111MODULE=on go vet ./...
|
@GO111MODULE=on go vet ./...
|
||||||
|
@echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||||
|
${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005"
|
||||||
|
|
||||||
test:
|
test:
|
||||||
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
|
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
|
||||||
|
|
4
vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
generated
vendored
|
@ -21,7 +21,7 @@
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
|
@ -143,5 +143,5 @@ func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]b
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ioutil.ReadAll(resp.Body)
|
return io.ReadAll(resp.Body)
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
generated
vendored
|
@ -18,7 +18,7 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -137,7 +137,7 @@ func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
|
bucketPolicyBuf, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
generated
vendored
|
@ -22,7 +22,7 @@
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
@ -180,7 +180,7 @@ func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName str
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return s, httpRespToErrorResponse(resp, bucketName, "")
|
return s, httpRespToErrorResponse(resp, bucketName, "")
|
||||||
}
|
}
|
||||||
respBytes, err := ioutil.ReadAll(resp.Body)
|
respBytes, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return s, err
|
return s, err
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
generated
vendored
|
@ -22,7 +22,6 @@
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
|
@ -58,7 +57,7 @@ func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags
|
||||||
return nil, httpRespToErrorResponse(resp, bucketName, "")
|
return nil, httpRespToErrorResponse(resp, bucketName, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
defer io.Copy(ioutil.Discard, resp.Body)
|
defer io.Copy(io.Discard, resp.Body)
|
||||||
return tags.ParseBucketXML(resp.Body)
|
return tags.ParseBucketXML(resp.Body)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
5
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
|
@ -21,7 +21,6 @@
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -516,7 +515,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
if dst.Progress != nil {
|
if dst.Progress != nil {
|
||||||
io.CopyN(ioutil.Discard, dst.Progress, end-start+1)
|
io.CopyN(io.Discard, dst.Progress, end-start+1)
|
||||||
}
|
}
|
||||||
objParts = append(objParts, complPart)
|
objParts = append(objParts, complPart)
|
||||||
partIndex++
|
partIndex++
|
||||||
|
@ -525,7 +524,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
|
||||||
|
|
||||||
// 4. Make final complete-multipart request.
|
// 4. Make final complete-multipart request.
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
|
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
|
||||||
completeMultipartUpload{Parts: objParts}, PutObjectOptions{})
|
completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/minio/minio-go/v7/api-copy-object.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/api-copy-object.go
generated
vendored
|
@ -20,7 +20,6 @@
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -54,7 +53,7 @@ func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySr
|
||||||
|
|
||||||
// Update the progress properly after successful copy.
|
// Update the progress properly after successful copy.
|
||||||
if dst.Progress != nil {
|
if dst.Progress != nil {
|
||||||
io.Copy(ioutil.Discard, io.LimitReader(dst.Progress, dst.Size))
|
io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size))
|
||||||
}
|
}
|
||||||
|
|
||||||
cpObjRes := copyObjectResult{}
|
cpObjRes := copyObjectResult{}
|
||||||
|
|
23
vendor/github.com/minio/minio-go/v7/api-error-response.go
generated
vendored
23
vendor/github.com/minio/minio-go/v7/api-error-response.go
generated
vendored
|
@ -22,7 +22,6 @@
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -108,7 +107,7 @@ func (e ErrorResponse) Error() string {
|
||||||
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
|
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
|
||||||
// read the whole body (up to 1MB)
|
// read the whole body (up to 1MB)
|
||||||
const maxBodyLength = 1 << 20
|
const maxBodyLength = 1 << 20
|
||||||
body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
|
body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -253,26 +252,6 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// errInvalidBucketName - Invalid bucket name response.
|
|
||||||
func errInvalidBucketName(message string) error {
|
|
||||||
return ErrorResponse{
|
|
||||||
StatusCode: http.StatusBadRequest,
|
|
||||||
Code: "InvalidBucketName",
|
|
||||||
Message: message,
|
|
||||||
RequestID: "minio",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// errInvalidObjectName - Invalid object name response.
|
|
||||||
func errInvalidObjectName(message string) error {
|
|
||||||
return ErrorResponse{
|
|
||||||
StatusCode: http.StatusNotFound,
|
|
||||||
Code: "NoSuchKey",
|
|
||||||
Message: message,
|
|
||||||
RequestID: "minio",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// errInvalidArgument - Invalid argument response.
|
// errInvalidArgument - Invalid argument response.
|
||||||
func errInvalidArgument(message string) error {
|
func errInvalidArgument(message string) error {
|
||||||
return ErrorResponse{
|
return ErrorResponse{
|
||||||
|
|
2
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
|
@ -897,6 +897,8 @@ func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyM
|
||||||
}
|
}
|
||||||
|
|
||||||
// listObjectParts list all object parts recursively.
|
// listObjectParts list all object parts recursively.
|
||||||
|
//
|
||||||
|
//lint:ignore U1000 Keep this around
|
||||||
func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
|
func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
|
||||||
// Part number marker for the next batch of request.
|
// Part number marker for the next batch of request.
|
||||||
var nextPartNumberMarker int
|
var nextPartNumberMarker int
|
||||||
|
|
7
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
7
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
|
@ -26,7 +26,6 @@
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -201,7 +200,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||||
|
|
||||||
// Sort all completed parts.
|
// Sort all completed parts.
|
||||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
opts = PutObjectOptions{}
|
opts = PutObjectOptions{
|
||||||
|
ServerSideEncryption: opts.ServerSideEncryption,
|
||||||
|
}
|
||||||
if len(crcBytes) > 0 {
|
if len(crcBytes) > 0 {
|
||||||
// Add hash of hashes.
|
// Add hash of hashes.
|
||||||
crc.Reset()
|
crc.Reset()
|
||||||
|
@ -412,7 +413,7 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
|
||||||
|
|
||||||
// Read resp.Body into a []bytes to parse for Error response inside the body
|
// Read resp.Body into a []bytes to parse for Error response inside the body
|
||||||
var b []byte
|
var b []byte
|
||||||
b, err = ioutil.ReadAll(resp.Body)
|
b, err = io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
215
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
215
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
|
@ -28,6 +28,7 @@
|
||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||||
|
@ -44,7 +45,9 @@
|
||||||
func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
|
func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
|
||||||
reader io.Reader, size int64, opts PutObjectOptions,
|
reader io.Reader, size int64, opts PutObjectOptions,
|
||||||
) (info UploadInfo, err error) {
|
) (info UploadInfo, err error) {
|
||||||
if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
|
if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
|
||||||
|
info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
|
||||||
|
} else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
|
||||||
// Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
|
// Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
|
||||||
info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
|
info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
|
||||||
} else {
|
} else {
|
||||||
|
@ -266,6 +269,9 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||||
// Sort all completed parts.
|
// Sort all completed parts.
|
||||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
|
|
||||||
|
opts = PutObjectOptions{
|
||||||
|
ServerSideEncryption: opts.ServerSideEncryption,
|
||||||
|
}
|
||||||
if withChecksum {
|
if withChecksum {
|
||||||
// Add hash of hashes.
|
// Add hash of hashes.
|
||||||
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
||||||
|
@ -278,7 +284,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||||
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
|
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
@ -425,6 +431,211 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||||
// Sort all completed parts.
|
// Sort all completed parts.
|
||||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
|
|
||||||
|
opts = PutObjectOptions{
|
||||||
|
ServerSideEncryption: opts.ServerSideEncryption,
|
||||||
|
}
|
||||||
|
if len(crcBytes) > 0 {
|
||||||
|
// Add hash of hashes.
|
||||||
|
crc.Reset()
|
||||||
|
crc.Write(crcBytes)
|
||||||
|
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||||
|
}
|
||||||
|
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||||
|
if err != nil {
|
||||||
|
return UploadInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadInfo.Size = totalUploadedSize
|
||||||
|
return uploadInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel.
|
||||||
|
// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer.
|
||||||
|
func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string,
|
||||||
|
reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||||
|
// Input validation.
|
||||||
|
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
|
return UploadInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
|
return UploadInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !opts.SendContentMd5 {
|
||||||
|
if opts.UserMetadata == nil {
|
||||||
|
opts.UserMetadata = make(map[string]string, 1)
|
||||||
|
}
|
||||||
|
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel all when an error occurs.
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Calculate the optimal parts info for a given size.
|
||||||
|
totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
|
||||||
|
if err != nil {
|
||||||
|
return UploadInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initiates a new multipart request
|
||||||
|
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||||
|
if err != nil {
|
||||||
|
return UploadInfo{}, err
|
||||||
|
}
|
||||||
|
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
|
||||||
|
|
||||||
|
// Aborts the multipart upload if the function returns
|
||||||
|
// any error, since we do not resume we should purge
|
||||||
|
// the parts which have been uploaded to relinquish
|
||||||
|
// storage space.
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Create checksums
|
||||||
|
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||||
|
var crcBytes []byte
|
||||||
|
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
||||||
|
md5Hash := c.md5Hasher()
|
||||||
|
defer md5Hash.Close()
|
||||||
|
|
||||||
|
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||||
|
var totalUploadedSize int64
|
||||||
|
|
||||||
|
// Initialize parts uploaded map.
|
||||||
|
partsInfo := make(map[int]ObjectPart)
|
||||||
|
|
||||||
|
// Create a buffer.
|
||||||
|
nBuffers := int64(opts.NumThreads)
|
||||||
|
bufs := make(chan []byte, nBuffers)
|
||||||
|
all := make([]byte, nBuffers*partSize)
|
||||||
|
for i := int64(0); i < nBuffers; i++ {
|
||||||
|
bufs <- all[i*partSize : i*partSize+partSize]
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
var mu sync.Mutex
|
||||||
|
errCh := make(chan error, opts.NumThreads)
|
||||||
|
|
||||||
|
reader = newHook(reader, opts.Progress)
|
||||||
|
|
||||||
|
// Part number always starts with '1'.
|
||||||
|
var partNumber int
|
||||||
|
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
|
||||||
|
// Proceed to upload the part.
|
||||||
|
var buf []byte
|
||||||
|
select {
|
||||||
|
case buf = <-bufs:
|
||||||
|
case err = <-errCh:
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
return UploadInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if int64(len(buf)) != partSize {
|
||||||
|
return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
length, rerr := readFull(reader, buf)
|
||||||
|
if rerr == io.EOF && partNumber > 1 {
|
||||||
|
// Done
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
return UploadInfo{}, rerr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate md5sum.
|
||||||
|
customHeader := make(http.Header)
|
||||||
|
if !opts.SendContentMd5 {
|
||||||
|
// Add CRC32C instead.
|
||||||
|
crc.Reset()
|
||||||
|
crc.Write(buf[:length])
|
||||||
|
cSum := crc.Sum(nil)
|
||||||
|
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
|
||||||
|
crcBytes = append(crcBytes, cSum...)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func(partNumber int) {
|
||||||
|
// Avoid declaring variables in the for loop
|
||||||
|
var md5Base64 string
|
||||||
|
|
||||||
|
if opts.SendContentMd5 {
|
||||||
|
md5Hash.Reset()
|
||||||
|
md5Hash.Write(buf[:length])
|
||||||
|
md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
defer wg.Done()
|
||||||
|
p := uploadPartParams{
|
||||||
|
bucketName: bucketName,
|
||||||
|
objectName: objectName,
|
||||||
|
uploadID: uploadID,
|
||||||
|
reader: bytes.NewReader(buf[:length]),
|
||||||
|
partNumber: partNumber,
|
||||||
|
md5Base64: md5Base64,
|
||||||
|
size: int64(length),
|
||||||
|
sse: opts.ServerSideEncryption,
|
||||||
|
streamSha256: !opts.DisableContentSha256,
|
||||||
|
customHeader: customHeader,
|
||||||
|
}
|
||||||
|
objPart, uerr := c.uploadPart(ctx, p)
|
||||||
|
if uerr != nil {
|
||||||
|
errCh <- uerr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save successfully uploaded part metadata.
|
||||||
|
mu.Lock()
|
||||||
|
partsInfo[partNumber] = objPart
|
||||||
|
mu.Unlock()
|
||||||
|
|
||||||
|
// Send buffer back so it can be reused.
|
||||||
|
bufs <- buf
|
||||||
|
}(partNumber)
|
||||||
|
|
||||||
|
// Save successfully uploaded size.
|
||||||
|
totalUploadedSize += int64(length)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Collect any error
|
||||||
|
select {
|
||||||
|
case err = <-errCh:
|
||||||
|
return UploadInfo{}, err
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Complete multipart upload.
|
||||||
|
var complMultipartUpload completeMultipartUpload
|
||||||
|
|
||||||
|
// Loop over total uploaded parts to save them in
|
||||||
|
// Parts array before completing the multipart request.
|
||||||
|
for i := 1; i < partNumber; i++ {
|
||||||
|
part, ok := partsInfo[i]
|
||||||
|
if !ok {
|
||||||
|
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||||
|
}
|
||||||
|
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||||
|
ETag: part.ETag,
|
||||||
|
PartNumber: part.PartNumber,
|
||||||
|
ChecksumCRC32: part.ChecksumCRC32,
|
||||||
|
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||||
|
ChecksumSHA1: part.ChecksumSHA1,
|
||||||
|
ChecksumSHA256: part.ChecksumSHA256,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort all completed parts.
|
||||||
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
|
|
||||||
opts = PutObjectOptions{}
|
opts = PutObjectOptions{}
|
||||||
if len(crcBytes) > 0 {
|
if len(crcBytes) > 0 {
|
||||||
// Add hash of hashes.
|
// Add hash of hashes.
|
||||||
|
|
10
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
10
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
|
@ -87,7 +87,12 @@ type PutObjectOptions struct {
|
||||||
SendContentMd5 bool
|
SendContentMd5 bool
|
||||||
DisableContentSha256 bool
|
DisableContentSha256 bool
|
||||||
DisableMultipart bool
|
DisableMultipart bool
|
||||||
Internal AdvancedPutOptions
|
|
||||||
|
// ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
|
||||||
|
// fill them serially and upload them in parallel.
|
||||||
|
// This can be used for faster uploads on non-seekable or slow-to-seek input.
|
||||||
|
ConcurrentStreamParts bool
|
||||||
|
Internal AdvancedPutOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
// getNumThreads - gets the number of threads to be used in the multipart
|
// getNumThreads - gets the number of threads to be used in the multipart
|
||||||
|
@ -272,6 +277,9 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
|
||||||
if opts.DisableMultipart {
|
if opts.DisableMultipart {
|
||||||
return UploadInfo{}, errors.New("no length provided and multipart disabled")
|
return UploadInfo{}, errors.New("no length provided and multipart disabled")
|
||||||
}
|
}
|
||||||
|
if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
|
||||||
|
return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
|
||||||
|
}
|
||||||
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
|
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
3
vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
generated
vendored
|
@ -24,7 +24,6 @@
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -107,7 +106,7 @@ func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts
|
||||||
return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil
|
return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
f, err := ioutil.TempFile("", "s3-putsnowballobjects-*")
|
f, err := os.CreateTemp("", "s3-putsnowballobjects-*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
|
@ -316,8 +316,6 @@ type completeMultipartUploadResult struct {
|
||||||
// CompletePart sub container lists individual part numbers and their
|
// CompletePart sub container lists individual part numbers and their
|
||||||
// md5sum, part of completeMultipartUpload.
|
// md5sum, part of completeMultipartUpload.
|
||||||
type CompletePart struct {
|
type CompletePart struct {
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
|
|
||||||
|
|
||||||
// Part number identifies the part.
|
// Part number identifies the part.
|
||||||
PartNumber int
|
PartNumber int
|
||||||
ETag string
|
ETag string
|
||||||
|
|
32
vendor/github.com/minio/minio-go/v7/api-select.go
generated
vendored
32
vendor/github.com/minio/minio-go/v7/api-select.go
generated
vendored
|
@ -41,8 +41,8 @@
|
||||||
// Constants for file header info.
|
// Constants for file header info.
|
||||||
const (
|
const (
|
||||||
CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
|
CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
|
||||||
CSVFileHeaderInfoIgnore = "IGNORE"
|
CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE"
|
||||||
CSVFileHeaderInfoUse = "USE"
|
CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SelectCompressionType - is the parameter for what type of compression is
|
// SelectCompressionType - is the parameter for what type of compression is
|
||||||
|
@ -52,15 +52,15 @@
|
||||||
// Constants for compression types under select API.
|
// Constants for compression types under select API.
|
||||||
const (
|
const (
|
||||||
SelectCompressionNONE SelectCompressionType = "NONE"
|
SelectCompressionNONE SelectCompressionType = "NONE"
|
||||||
SelectCompressionGZIP = "GZIP"
|
SelectCompressionGZIP SelectCompressionType = "GZIP"
|
||||||
SelectCompressionBZIP = "BZIP2"
|
SelectCompressionBZIP SelectCompressionType = "BZIP2"
|
||||||
|
|
||||||
// Non-standard compression schemes, supported by MinIO hosts:
|
// Non-standard compression schemes, supported by MinIO hosts:
|
||||||
|
|
||||||
SelectCompressionZSTD = "ZSTD" // Zstandard compression.
|
SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression.
|
||||||
SelectCompressionLZ4 = "LZ4" // LZ4 Stream
|
SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream
|
||||||
SelectCompressionS2 = "S2" // S2 Stream
|
SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream
|
||||||
SelectCompressionSNAPPY = "SNAPPY" // Snappy stream
|
SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream
|
||||||
)
|
)
|
||||||
|
|
||||||
// CSVQuoteFields - is the parameter for how CSV fields are quoted.
|
// CSVQuoteFields - is the parameter for how CSV fields are quoted.
|
||||||
|
@ -69,7 +69,7 @@
|
||||||
// Constants for csv quote styles.
|
// Constants for csv quote styles.
|
||||||
const (
|
const (
|
||||||
CSVQuoteFieldsAlways CSVQuoteFields = "Always"
|
CSVQuoteFieldsAlways CSVQuoteFields = "Always"
|
||||||
CSVQuoteFieldsAsNeeded = "AsNeeded"
|
CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded"
|
||||||
)
|
)
|
||||||
|
|
||||||
// QueryExpressionType - is of what syntax the expression is, this should only
|
// QueryExpressionType - is of what syntax the expression is, this should only
|
||||||
|
@ -87,7 +87,7 @@
|
||||||
// Constants for JSONTypes.
|
// Constants for JSONTypes.
|
||||||
const (
|
const (
|
||||||
JSONDocumentType JSONType = "DOCUMENT"
|
JSONDocumentType JSONType = "DOCUMENT"
|
||||||
JSONLinesType = "LINES"
|
JSONLinesType JSONType = "LINES"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParquetInputOptions parquet input specific options
|
// ParquetInputOptions parquet input specific options
|
||||||
|
@ -378,8 +378,8 @@ func (o SelectObjectOptions) Header() http.Header {
|
||||||
// Constants for input data types.
|
// Constants for input data types.
|
||||||
const (
|
const (
|
||||||
SelectObjectTypeCSV SelectObjectType = "CSV"
|
SelectObjectTypeCSV SelectObjectType = "CSV"
|
||||||
SelectObjectTypeJSON = "JSON"
|
SelectObjectTypeJSON SelectObjectType = "JSON"
|
||||||
SelectObjectTypeParquet = "Parquet"
|
SelectObjectTypeParquet SelectObjectType = "Parquet"
|
||||||
)
|
)
|
||||||
|
|
||||||
// preludeInfo is used for keeping track of necessary information from the
|
// preludeInfo is used for keeping track of necessary information from the
|
||||||
|
@ -416,7 +416,7 @@ type StatsMessage struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
errorMsg messageType = "error"
|
errorMsg messageType = "error"
|
||||||
commonMsg = "event"
|
commonMsg messageType = "event"
|
||||||
)
|
)
|
||||||
|
|
||||||
// eventType represents the type of event.
|
// eventType represents the type of event.
|
||||||
|
@ -425,9 +425,9 @@ type StatsMessage struct {
|
||||||
// list of event-types returned by Select API.
|
// list of event-types returned by Select API.
|
||||||
const (
|
const (
|
||||||
endEvent eventType = "End"
|
endEvent eventType = "End"
|
||||||
recordsEvent = "Records"
|
recordsEvent eventType = "Records"
|
||||||
progressEvent = "Progress"
|
progressEvent eventType = "Progress"
|
||||||
statsEvent = "Stats"
|
statsEvent eventType = "Stats"
|
||||||
)
|
)
|
||||||
|
|
||||||
// contentType represents content type of event.
|
// contentType represents content type of event.
|
||||||
|
|
13
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
13
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
|
@ -25,7 +25,6 @@
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -119,7 +118,7 @@ type Options struct {
|
||||||
// Global constants.
|
// Global constants.
|
||||||
const (
|
const (
|
||||||
libraryName = "minio-go"
|
libraryName = "minio-go"
|
||||||
libraryVersion = "v7.0.44"
|
libraryVersion = "v7.0.47"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User Agent should always following the below style.
|
// User Agent should always following the below style.
|
||||||
|
@ -635,7 +634,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the body to be saved later.
|
// Read the body to be saved later.
|
||||||
errBodyBytes, err := ioutil.ReadAll(res.Body)
|
errBodyBytes, err := io.ReadAll(res.Body)
|
||||||
// res.Body should be closed
|
// res.Body should be closed
|
||||||
closeResponse(res)
|
closeResponse(res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -644,14 +643,14 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
||||||
|
|
||||||
// Save the body.
|
// Save the body.
|
||||||
errBodySeeker := bytes.NewReader(errBodyBytes)
|
errBodySeeker := bytes.NewReader(errBodyBytes)
|
||||||
res.Body = ioutil.NopCloser(errBodySeeker)
|
res.Body = io.NopCloser(errBodySeeker)
|
||||||
|
|
||||||
// For errors verify if its retryable otherwise fail quickly.
|
// For errors verify if its retryable otherwise fail quickly.
|
||||||
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
|
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
|
||||||
|
|
||||||
// Save the body back again.
|
// Save the body back again.
|
||||||
errBodySeeker.Seek(0, 0) // Seek back to starting point.
|
errBodySeeker.Seek(0, 0) // Seek back to starting point.
|
||||||
res.Body = ioutil.NopCloser(errBodySeeker)
|
res.Body = io.NopCloser(errBodySeeker)
|
||||||
|
|
||||||
// Bucket region if set in error response and the error
|
// Bucket region if set in error response and the error
|
||||||
// code dictates invalid region, we can retry the request
|
// code dictates invalid region, we can retry the request
|
||||||
|
@ -814,7 +813,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
|
||||||
if metadata.contentLength == 0 {
|
if metadata.contentLength == 0 {
|
||||||
req.Body = nil
|
req.Body = nil
|
||||||
} else {
|
} else {
|
||||||
req.Body = ioutil.NopCloser(metadata.contentBody)
|
req.Body = io.NopCloser(metadata.contentBody)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set incoming content-length.
|
// Set incoming content-length.
|
||||||
|
@ -846,7 +845,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
|
||||||
// Additionally, we also look if the initialized client is secure,
|
// Additionally, we also look if the initialized client is secure,
|
||||||
// if yes then we don't need to perform streaming signature.
|
// if yes then we don't need to perform streaming signature.
|
||||||
req = signer.StreamingSignV4(req, accessKeyID,
|
req = signer.StreamingSignV4(req, accessKeyID,
|
||||||
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
|
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher())
|
||||||
default:
|
default:
|
||||||
// Set sha256 sum for signature calculation only with signature version '4'.
|
// Set sha256 sum for signature calculation only with signature version '4'.
|
||||||
shaHeader := unsignedPayload
|
shaHeader := unsignedPayload
|
||||||
|
|
124
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
124
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
|
@ -31,7 +31,6 @@
|
||||||
"hash"
|
"hash"
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -346,7 +345,7 @@ func getDataReader(fileName string) io.ReadCloser {
|
||||||
if _, ok := dataFileCRC32[fileName]; !ok {
|
if _, ok := dataFileCRC32[fileName]; !ok {
|
||||||
dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size))
|
dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size))
|
||||||
}
|
}
|
||||||
return ioutil.NopCloser(newRandomReader(size, size))
|
return io.NopCloser(newRandomReader(size, size))
|
||||||
}
|
}
|
||||||
reader, _ := os.Open(getMintDataDirFilePath(fileName))
|
reader, _ := os.Open(getMintDataDirFilePath(fileName))
|
||||||
if _, ok := dataFileCRC32[fileName]; !ok {
|
if _, ok := dataFileCRC32[fileName]; !ok {
|
||||||
|
@ -989,7 +988,7 @@ function := "GetObject()"
|
||||||
|
|
||||||
for _, testFile := range testFiles {
|
for _, testFile := range testFiles {
|
||||||
r := getDataReader(testFile)
|
r := getDataReader(testFile)
|
||||||
buf, err := ioutil.ReadAll(r)
|
buf, err := io.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
||||||
return
|
return
|
||||||
|
@ -1131,7 +1130,7 @@ function := "GetObject()"
|
||||||
var errs [n]error
|
var errs [n]error
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
r := newRandomReader(int64((1<<20)*i+i), int64(i))
|
r := newRandomReader(int64((1<<20)*i+i), int64(i))
|
||||||
buf, err := ioutil.ReadAll(r)
|
buf, err := io.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
||||||
return
|
return
|
||||||
|
@ -1271,7 +1270,7 @@ function := "CopyObject()"
|
||||||
testFiles := []string{"datafile-1-b", "datafile-10-kB"}
|
testFiles := []string{"datafile-1-b", "datafile-10-kB"}
|
||||||
for _, testFile := range testFiles {
|
for _, testFile := range testFiles {
|
||||||
r := getDataReader(testFile)
|
r := getDataReader(testFile)
|
||||||
buf, err := ioutil.ReadAll(r)
|
buf, err := io.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
||||||
return
|
return
|
||||||
|
@ -1304,7 +1303,7 @@ function := "CopyObject()"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
oldestContent, err := ioutil.ReadAll(reader)
|
oldestContent, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
|
logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
|
||||||
return
|
return
|
||||||
|
@ -1338,7 +1337,7 @@ function := "CopyObject()"
|
||||||
}
|
}
|
||||||
defer readerCopy.Close()
|
defer readerCopy.Close()
|
||||||
|
|
||||||
newestContent, err := ioutil.ReadAll(readerCopy)
|
newestContent, err := io.ReadAll(readerCopy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
|
logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
|
||||||
return
|
return
|
||||||
|
@ -1408,7 +1407,7 @@ function := "CopyObject()"
|
||||||
testFiles := []string{"datafile-10-kB"}
|
testFiles := []string{"datafile-10-kB"}
|
||||||
for _, testFile := range testFiles {
|
for _, testFile := range testFiles {
|
||||||
r := getDataReader(testFile)
|
r := getDataReader(testFile)
|
||||||
buf, err := ioutil.ReadAll(r)
|
buf, err := io.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
||||||
return
|
return
|
||||||
|
@ -1441,7 +1440,7 @@ function := "CopyObject()"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
oldestContent, err := ioutil.ReadAll(reader)
|
oldestContent, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
|
logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
|
||||||
return
|
return
|
||||||
|
@ -1491,7 +1490,7 @@ function := "CopyObject()"
|
||||||
}
|
}
|
||||||
defer readerCopy.Close()
|
defer readerCopy.Close()
|
||||||
|
|
||||||
newestContent, err := ioutil.ReadAll(readerCopy)
|
newestContent, err := io.ReadAll(readerCopy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
|
logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
|
||||||
return
|
return
|
||||||
|
@ -1571,7 +1570,7 @@ function := "ComposeObject()"
|
||||||
|
|
||||||
for _, testFile := range testFiles {
|
for _, testFile := range testFiles {
|
||||||
r := getDataReader(testFile)
|
r := getDataReader(testFile)
|
||||||
buf, err := ioutil.ReadAll(r)
|
buf, err := io.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
||||||
return
|
return
|
||||||
|
@ -1633,7 +1632,7 @@ function := "ComposeObject()"
|
||||||
}
|
}
|
||||||
defer readerCopy.Close()
|
defer readerCopy.Close()
|
||||||
|
|
||||||
copyContentBytes, err := ioutil.ReadAll(readerCopy)
|
copyContentBytes, err := io.ReadAll(readerCopy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err)
|
logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err)
|
||||||
return
|
return
|
||||||
|
@ -1733,12 +1732,39 @@ function := "DeleteObject()"
|
||||||
logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err)
|
logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// test delete marker version id is non-null
|
||||||
err = c.RemoveBucket(context.Background(), bucketName)
|
_, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
|
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// create delete marker
|
||||||
|
err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "DeleteObject failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
|
||||||
|
idx := 0
|
||||||
|
for info := range objectsInfo {
|
||||||
|
if info.Err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if idx == 0 {
|
||||||
|
if !info.IsDeleteMarker {
|
||||||
|
logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if info.VersionID == "" {
|
||||||
|
logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idx++
|
||||||
|
}
|
||||||
|
|
||||||
|
defer cleanupBucket(bucketName, c)
|
||||||
|
|
||||||
successLogger(testName, function, args, startTime).Info()
|
successLogger(testName, function, args, startTime).Info()
|
||||||
}
|
}
|
||||||
|
@ -2461,7 +2487,7 @@ function := "GetObject(bucketName, objectName)"
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
args["objectName"] = objectName
|
args["objectName"] = objectName
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(reader)
|
buf, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -2982,7 +3008,7 @@ function := "FPutObject(bucketName, objectName, fileName, opts)"
|
||||||
fileName := getMintDataDirFilePath("datafile-129-MB")
|
fileName := getMintDataDirFilePath("datafile-129-MB")
|
||||||
if fileName == "" {
|
if fileName == "" {
|
||||||
// Make a temp file with minPartSize bytes of data.
|
// Make a temp file with minPartSize bytes of data.
|
||||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
|
file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
||||||
return
|
return
|
||||||
|
@ -3091,7 +3117,7 @@ function = "MakeBucket(bucketName, location)"
|
||||||
fName := getMintDataDirFilePath("datafile-129-MB")
|
fName := getMintDataDirFilePath("datafile-129-MB")
|
||||||
if fName == "" {
|
if fName == "" {
|
||||||
// Make a temp file with minPartSize bytes of data.
|
// Make a temp file with minPartSize bytes of data.
|
||||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
|
file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
||||||
return
|
return
|
||||||
|
@ -3257,7 +3283,7 @@ function := "FPutObject(bucketName, objectName, fileName, opts)"
|
||||||
fName := getMintDataDirFilePath("datafile-1-MB")
|
fName := getMintDataDirFilePath("datafile-1-MB")
|
||||||
if fName == "" {
|
if fName == "" {
|
||||||
// Make a temp file with 1 MiB bytes of data.
|
// Make a temp file with 1 MiB bytes of data.
|
||||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest")
|
file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
||||||
return
|
return
|
||||||
|
@ -3357,7 +3383,7 @@ function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)"
|
||||||
fName := getMintDataDirFilePath("datafile-1-MB")
|
fName := getMintDataDirFilePath("datafile-1-MB")
|
||||||
if fName == "" {
|
if fName == "" {
|
||||||
// Make a temp file with 1 MiB bytes of data.
|
// Make a temp file with 1 MiB bytes of data.
|
||||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest")
|
file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "Temp file creation failed", err)
|
logError(testName, function, args, startTime, "", "Temp file creation failed", err)
|
||||||
return
|
return
|
||||||
|
@ -3621,7 +3647,7 @@ function := "GetObject(bucketName, objectName)"
|
||||||
logError(testName, function, args, startTime, "", "file.Open failed", err)
|
logError(testName, function, args, startTime, "", "file.Open failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
want, err := ioutil.ReadAll(zfr)
|
want, err := io.ReadAll(zfr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "fzip file read failed", err)
|
logError(testName, function, args, startTime, "", "fzip file read failed", err)
|
||||||
return
|
return
|
||||||
|
@ -3638,7 +3664,7 @@ function := "GetObject(bucketName, objectName)"
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
got, err := ioutil.ReadAll(r)
|
got, err := io.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -3722,7 +3748,7 @@ function := "GetObject(bucketName, objectName)"
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
args["objectName"] = objectName
|
args["objectName"] = objectName
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(reader)
|
buf, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -3885,7 +3911,7 @@ function := "GetObject(bucketName, objectName)"
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
args["objectName"] = objectName
|
args["objectName"] = objectName
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(reader)
|
buf, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -4062,7 +4088,7 @@ function := "GetObject(bucketName, objectName)"
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
args["objectName"] = objectName
|
args["objectName"] = objectName
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(reader)
|
buf, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -4181,7 +4207,7 @@ function := "PresignedPostPolicy(policy)"
|
||||||
metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user")
|
metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user")
|
||||||
metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(reader)
|
buf, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -4245,7 +4271,7 @@ function := "PresignedPostPolicy(policy)"
|
||||||
filePath := getMintDataDirFilePath("datafile-33-kB")
|
filePath := getMintDataDirFilePath("datafile-33-kB")
|
||||||
if filePath == "" {
|
if filePath == "" {
|
||||||
// Make a temp file with 33 KB data.
|
// Make a temp file with 33 KB data.
|
||||||
file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest")
|
file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
||||||
return
|
return
|
||||||
|
@ -4588,7 +4614,7 @@ function := "GetObject(bucketName, objectName)"
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
args["objectName"] = objectName
|
args["objectName"] = objectName
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(reader)
|
buf, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -4770,7 +4796,7 @@ function := "GetObject(bucketName, objectName)"
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
args["objectName"] = objectName
|
args["objectName"] = objectName
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(reader)
|
buf, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -4944,7 +4970,7 @@ function := "GetObject(bucketName, objectName)"
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
args["objectName"] = objectName
|
args["objectName"] = objectName
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(reader)
|
buf, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -5127,7 +5153,7 @@ function := "GetObject(bucketName, objectName)"
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
args["objectName"] = objectName
|
args["objectName"] = objectName
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(reader)
|
buf, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -6138,7 +6164,7 @@ functionAll += ", " + function
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
newReadBytes, err := ioutil.ReadAll(newReader)
|
newReadBytes, err := io.ReadAll(newReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -6269,7 +6295,7 @@ functionAll += ", " + function
|
||||||
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
|
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
newPresignedBytes, err := ioutil.ReadAll(resp.Body)
|
newPresignedBytes, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
|
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
|
||||||
return
|
return
|
||||||
|
@ -6312,7 +6338,7 @@ functionAll += ", " + function
|
||||||
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
|
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
newPresignedBytes, err = ioutil.ReadAll(resp.Body)
|
newPresignedBytes, err = io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
|
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
|
||||||
return
|
return
|
||||||
|
@ -6372,7 +6398,7 @@ functionAll += ", " + function
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
newReadBytes, err = ioutil.ReadAll(newReader)
|
newReadBytes, err = io.ReadAll(newReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err)
|
||||||
return
|
return
|
||||||
|
@ -6428,7 +6454,7 @@ functionAll += ", " + function
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
newReadBytes, err = ioutil.ReadAll(newReader)
|
newReadBytes, err = io.ReadAll(newReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err)
|
||||||
return
|
return
|
||||||
|
@ -6652,7 +6678,7 @@ function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
|
||||||
}
|
}
|
||||||
args["fileToUpload"] = fileName
|
args["fileToUpload"] = fileName
|
||||||
} else {
|
} else {
|
||||||
tempfile, err = ioutil.TempFile("", "minio-go-upload-test-")
|
tempfile, err = os.CreateTemp("", "minio-go-upload-test-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "TempFile create failed", err)
|
logError(testName, function, args, startTime, "", "TempFile create failed", err)
|
||||||
return
|
return
|
||||||
|
@ -6916,7 +6942,7 @@ function := "FPutObject(bucketName, objectName, fileName, opts)"
|
||||||
defer cleanupBucket(bucketName, c)
|
defer cleanupBucket(bucketName, c)
|
||||||
|
|
||||||
// Make a temp file with 11*1024*1024 bytes of data.
|
// Make a temp file with 11*1024*1024 bytes of data.
|
||||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
|
file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
||||||
return
|
return
|
||||||
|
@ -7145,7 +7171,7 @@ function := "GetObject(bucketName, objectName)"
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
args["objectName"] = objectName
|
args["objectName"] = objectName
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(reader)
|
buf, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -7299,7 +7325,7 @@ function := "GetObject(bucketName, objectName)"
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
args["objectName"] = objectName
|
args["objectName"] = objectName
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(reader)
|
buf, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -7837,7 +7863,7 @@ function = "CopyObject(dst, src)"
|
||||||
}
|
}
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
|
|
||||||
decBytes, err := ioutil.ReadAll(reader)
|
decBytes, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err)
|
logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -7915,7 +7941,7 @@ function := "CopyObject(destination, source)"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
decBytes, err := ioutil.ReadAll(reader)
|
decBytes, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -7955,7 +7981,7 @@ function := "CopyObject(destination, source)"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
decBytes, err = ioutil.ReadAll(reader)
|
decBytes, err = io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -7994,7 +8020,7 @@ function := "CopyObject(destination, source)"
|
||||||
}
|
}
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
|
|
||||||
decBytes, err = ioutil.ReadAll(reader)
|
decBytes, err = io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -11040,7 +11066,7 @@ functionAll += ", " + function
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
newReadBytes, err := ioutil.ReadAll(newReader)
|
newReadBytes, err := io.ReadAll(newReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -11146,7 +11172,7 @@ functionAll += ", " + function
|
||||||
logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
|
logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
newPresignedBytes, err := ioutil.ReadAll(resp.Body)
|
newPresignedBytes, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -11185,7 +11211,7 @@ functionAll += ", " + function
|
||||||
logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
|
logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
newPresignedBytes, err = ioutil.ReadAll(resp.Body)
|
newPresignedBytes, err = io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||||
return
|
return
|
||||||
|
@ -11239,7 +11265,7 @@ functionAll += ", " + function
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
newReadBytes, err = ioutil.ReadAll(newReader)
|
newReadBytes, err = io.ReadAll(newReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err)
|
logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err)
|
||||||
return
|
return
|
||||||
|
@ -11553,7 +11579,7 @@ function := "GetObject(ctx, bucketName, objectName, fileName)"
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
wantRC := getDataReader("datafile-129-MB")
|
wantRC := getDataReader("datafile-129-MB")
|
||||||
io.CopyN(ioutil.Discard, wantRC, test.start)
|
io.CopyN(io.Discard, wantRC, test.start)
|
||||||
want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1))
|
want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1))
|
||||||
opts := minio.GetObjectOptions{}
|
opts := minio.GetObjectOptions{}
|
||||||
opts.SetRange(test.start, test.end)
|
opts.SetRange(test.start, test.end)
|
||||||
|
|
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
generated
vendored
|
@ -24,7 +24,6 @@
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -139,7 +138,7 @@ func closeResponse(resp *http.Response) {
|
||||||
// Without this closing connection would disallow re-using
|
// Without this closing connection would disallow re-using
|
||||||
// the same connection for future uses.
|
// the same connection for future uses.
|
||||||
// - http://stackoverflow.com/a/17961593/4465767
|
// - http://stackoverflow.com/a/17961593/4465767
|
||||||
io.Copy(ioutil.Discard, resp.Body)
|
io.Copy(io.Discard, resp.Body)
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -191,7 +190,7 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
var errResp ErrorResponse
|
var errResp ErrorResponse
|
||||||
buf, err := ioutil.ReadAll(resp.Body)
|
buf, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return AssumeRoleResponse{}, err
|
return AssumeRoleResponse{}, err
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
generated
vendored
|
@ -22,7 +22,6 @@
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrorResponse - Is the typed error returned.
|
// ErrorResponse - Is the typed error returned.
|
||||||
|
@ -88,7 +87,7 @@ func xmlDecoder(body io.Reader, v interface{}) error {
|
||||||
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
|
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
|
||||||
// read the whole body (up to 1MB)
|
// read the whole body (up to 1MB)
|
||||||
const maxBodyLength = 1 << 20
|
const maxBodyLength = 1 << 20
|
||||||
body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
|
body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
9
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
generated
vendored
9
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
generated
vendored
|
@ -18,7 +18,6 @@
|
||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -114,6 +113,7 @@ type hostConfig struct {
|
||||||
type config struct {
|
type config struct {
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
Hosts map[string]hostConfig `json:"hosts"`
|
Hosts map[string]hostConfig `json:"hosts"`
|
||||||
|
Aliases map[string]hostConfig `json:"aliases"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadAliass loads from the file pointed to by shared credentials filename for alias.
|
// loadAliass loads from the file pointed to by shared credentials filename for alias.
|
||||||
|
@ -123,12 +123,17 @@ func loadAlias(filename, alias string) (hostConfig, error) {
|
||||||
cfg := &config{}
|
cfg := &config{}
|
||||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
|
|
||||||
configBytes, err := ioutil.ReadFile(filename)
|
configBytes, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return hostConfig{}, err
|
return hostConfig{}, err
|
||||||
}
|
}
|
||||||
if err = json.Unmarshal(configBytes, cfg); err != nil {
|
if err = json.Unmarshal(configBytes, cfg); err != nil {
|
||||||
return hostConfig{}, err
|
return hostConfig{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cfg.Version == "10" {
|
||||||
|
return cfg.Aliases[alias], nil
|
||||||
|
}
|
||||||
|
|
||||||
return cfg.Hosts[alias], nil
|
return cfg.Hosts[alias], nil
|
||||||
}
|
}
|
||||||
|
|
6
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
6
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
|
@ -22,7 +22,7 @@
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -106,7 +106,7 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||||
Client: m.Client,
|
Client: m.Client,
|
||||||
STSEndpoint: endpoint,
|
STSEndpoint: endpoint,
|
||||||
GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
|
GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
|
||||||
token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"))
|
token, err := os.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -268,7 +268,7 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
data, err := ioutil.ReadAll(resp.Body)
|
data, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
generated
vendored
|
@ -22,7 +22,7 @@
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -138,7 +138,7 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string,
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
var errResp ErrorResponse
|
var errResp ErrorResponse
|
||||||
buf, err := ioutil.ReadAll(resp.Body)
|
buf, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return AssumeRoleWithClientGrantsResponse{}, err
|
return AssumeRoleWithClientGrantsResponse{}, err
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
generated
vendored
|
@ -21,7 +21,7 @@
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -156,7 +156,7 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
var errResp ErrorResponse
|
var errResp ErrorResponse
|
||||||
buf, err := ioutil.ReadAll(resp.Body)
|
buf, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return value, err
|
return value, err
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
generated
vendored
|
@ -21,7 +21,6 @@
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -152,7 +151,7 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||||
}
|
}
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
var errResp ErrorResponse
|
var errResp ErrorResponse
|
||||||
buf, err := ioutil.ReadAll(resp.Body)
|
buf, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Value{}, err
|
return Value{}, err
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
|
@ -22,7 +22,7 @@
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -155,7 +155,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
var errResp ErrorResponse
|
var errResp ErrorResponse
|
||||||
buf, err := ioutil.ReadAll(resp.Body)
|
buf, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return AssumeRoleWithWebIdentityResponse{}, err
|
return AssumeRoleWithWebIdentityResponse{}, err
|
||||||
}
|
}
|
||||||
|
|
26
vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
generated
vendored
26
vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
generated
vendored
|
@ -34,19 +34,19 @@
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
|
||||||
const (
|
const (
|
||||||
ObjectCreatedAll EventType = "s3:ObjectCreated:*"
|
ObjectCreatedAll EventType = "s3:ObjectCreated:*"
|
||||||
ObjectCreatedPut = "s3:ObjectCreated:Put"
|
ObjectCreatedPut EventType = "s3:ObjectCreated:Put"
|
||||||
ObjectCreatedPost = "s3:ObjectCreated:Post"
|
ObjectCreatedPost EventType = "s3:ObjectCreated:Post"
|
||||||
ObjectCreatedCopy = "s3:ObjectCreated:Copy"
|
ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
|
||||||
ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
|
ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
|
||||||
ObjectAccessedGet = "s3:ObjectAccessed:Get"
|
ObjectAccessedGet EventType = "s3:ObjectAccessed:Get"
|
||||||
ObjectAccessedHead = "s3:ObjectAccessed:Head"
|
ObjectAccessedHead EventType = "s3:ObjectAccessed:Head"
|
||||||
ObjectAccessedAll = "s3:ObjectAccessed:*"
|
ObjectAccessedAll EventType = "s3:ObjectAccessed:*"
|
||||||
ObjectRemovedAll = "s3:ObjectRemoved:*"
|
ObjectRemovedAll EventType = "s3:ObjectRemoved:*"
|
||||||
ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
|
ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
|
||||||
ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
|
ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated"
|
||||||
ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
|
ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject"
|
||||||
BucketCreatedAll = "s3:BucketCreated:*"
|
BucketCreatedAll EventType = "s3:BucketCreated:*"
|
||||||
BucketRemovedAll = "s3:BucketRemoved:*"
|
BucketRemovedAll EventType = "s3:BucketRemoved:*"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FilterRule - child of S3Key, a tag in the notification xml which
|
// FilterRule - child of S3Key, a tag in the notification xml which
|
||||||
|
|
4
vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
generated
vendored
|
@ -700,6 +700,10 @@ type TargetMetrics struct {
|
||||||
PendingCount uint64 `json:"pendingReplicationCount"`
|
PendingCount uint64 `json:"pendingReplicationCount"`
|
||||||
// Total number of failed operations including metadata updates
|
// Total number of failed operations including metadata updates
|
||||||
FailedCount uint64 `json:"failedReplicationCount"`
|
FailedCount uint64 `json:"failedReplicationCount"`
|
||||||
|
// Bandwidth limit in bytes/sec for this target
|
||||||
|
BandWidthLimitInBytesPerSecond int64 `json:"limitInBits"`
|
||||||
|
// Current bandwidth used in bytes/sec for this target
|
||||||
|
CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metrics represents inline replication metrics for a bucket.
|
// Metrics represents inline replication metrics for a bucket.
|
||||||
|
|
|
@ -21,7 +21,6 @@
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -132,7 +131,7 @@ func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64,
|
||||||
prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime)
|
prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime)
|
||||||
|
|
||||||
if req.Body == nil {
|
if req.Body == nil {
|
||||||
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
|
req.Body = io.NopCloser(bytes.NewReader([]byte("")))
|
||||||
}
|
}
|
||||||
|
|
||||||
stReader := &StreamingUSReader{
|
stReader := &StreamingUSReader{
|
||||||
|
|
40
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
generated
vendored
40
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
generated
vendored
|
@ -22,11 +22,12 @@
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
md5simd "github.com/minio/md5-simd"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference for constants used below -
|
// Reference for constants used below -
|
||||||
|
@ -91,14 +92,14 @@ func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
|
||||||
|
|
||||||
// buildChunkStringToSign - returns the string to sign given chunk data
|
// buildChunkStringToSign - returns the string to sign given chunk data
|
||||||
// and previous signature.
|
// and previous signature.
|
||||||
func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
|
func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
|
||||||
stringToSignParts := []string{
|
stringToSignParts := []string{
|
||||||
streamingPayloadHdr,
|
streamingPayloadHdr,
|
||||||
t.Format(iso8601DateFormat),
|
t.Format(iso8601DateFormat),
|
||||||
getScope(region, t, ServiceTypeS3),
|
getScope(region, t, ServiceTypeS3),
|
||||||
previousSig,
|
previousSig,
|
||||||
emptySHA256,
|
emptySHA256,
|
||||||
hex.EncodeToString(sum256(chunkData)),
|
chunkChecksum,
|
||||||
}
|
}
|
||||||
|
|
||||||
return strings.Join(stringToSignParts, "\n")
|
return strings.Join(stringToSignParts, "\n")
|
||||||
|
@ -106,13 +107,13 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [
|
||||||
|
|
||||||
// buildTrailerChunkStringToSign - returns the string to sign given chunk data
|
// buildTrailerChunkStringToSign - returns the string to sign given chunk data
|
||||||
// and previous signature.
|
// and previous signature.
|
||||||
func buildTrailerChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
|
func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
|
||||||
stringToSignParts := []string{
|
stringToSignParts := []string{
|
||||||
streamingTrailerHdr,
|
streamingTrailerHdr,
|
||||||
t.Format(iso8601DateFormat),
|
t.Format(iso8601DateFormat),
|
||||||
getScope(region, t, ServiceTypeS3),
|
getScope(region, t, ServiceTypeS3),
|
||||||
previousSig,
|
previousSig,
|
||||||
hex.EncodeToString(sum256(chunkData)),
|
chunkChecksum,
|
||||||
}
|
}
|
||||||
|
|
||||||
return strings.Join(stringToSignParts, "\n")
|
return strings.Join(stringToSignParts, "\n")
|
||||||
|
@ -149,21 +150,21 @@ func buildChunkHeader(chunkLen int64, signature string) []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
|
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
|
||||||
func buildChunkSignature(chunkData []byte, reqTime time.Time, region,
|
func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region,
|
||||||
previousSignature, secretAccessKey string,
|
previousSignature, secretAccessKey string,
|
||||||
) string {
|
) string {
|
||||||
chunkStringToSign := buildChunkStringToSign(reqTime, region,
|
chunkStringToSign := buildChunkStringToSign(reqTime, region,
|
||||||
previousSignature, chunkData)
|
previousSignature, chunkCheckSum)
|
||||||
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
|
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
|
||||||
return getSignature(signingKey, chunkStringToSign)
|
return getSignature(signingKey, chunkStringToSign)
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
|
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
|
||||||
func buildTrailerChunkSignature(chunkData []byte, reqTime time.Time, region,
|
func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region,
|
||||||
previousSignature, secretAccessKey string,
|
previousSignature, secretAccessKey string,
|
||||||
) string {
|
) string {
|
||||||
chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region,
|
chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region,
|
||||||
previousSignature, chunkData)
|
previousSignature, chunkChecksum)
|
||||||
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
|
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
|
||||||
return getSignature(signingKey, chunkStringToSign)
|
return getSignature(signingKey, chunkStringToSign)
|
||||||
}
|
}
|
||||||
|
@ -203,12 +204,17 @@ type StreamingReader struct {
|
||||||
totalChunks int
|
totalChunks int
|
||||||
lastChunkSize int
|
lastChunkSize int
|
||||||
trailer http.Header
|
trailer http.Header
|
||||||
|
sh256 md5simd.Hasher
|
||||||
}
|
}
|
||||||
|
|
||||||
// signChunk - signs a chunk read from s.baseReader of chunkLen size.
|
// signChunk - signs a chunk read from s.baseReader of chunkLen size.
|
||||||
func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) {
|
func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) {
|
||||||
// Compute chunk signature for next header
|
// Compute chunk signature for next header
|
||||||
signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime,
|
s.sh256.Reset()
|
||||||
|
s.sh256.Write(s.chunkBuf[:chunkLen])
|
||||||
|
chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil))
|
||||||
|
|
||||||
|
signature := buildChunkSignature(chunckChecksum, s.reqTime,
|
||||||
s.region, s.prevSignature, s.secretAccessKey)
|
s.region, s.prevSignature, s.secretAccessKey)
|
||||||
|
|
||||||
// For next chunk signature computation
|
// For next chunk signature computation
|
||||||
|
@ -240,8 +246,11 @@ func (s *StreamingReader) addSignedTrailer(h http.Header) {
|
||||||
s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
|
s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s.sh256.Reset()
|
||||||
|
s.sh256.Write(s.chunkBuf)
|
||||||
|
chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil))
|
||||||
// Compute chunk signature
|
// Compute chunk signature
|
||||||
signature := buildTrailerChunkSignature(s.chunkBuf, s.reqTime,
|
signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime,
|
||||||
s.region, s.prevSignature, s.secretAccessKey)
|
s.region, s.prevSignature, s.secretAccessKey)
|
||||||
|
|
||||||
// For next chunk signature computation
|
// For next chunk signature computation
|
||||||
|
@ -274,13 +283,13 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
|
||||||
// StreamingSignV4 - provides chunked upload signatureV4 support by
|
// StreamingSignV4 - provides chunked upload signatureV4 support by
|
||||||
// implementing io.Reader.
|
// implementing io.Reader.
|
||||||
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
|
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
|
||||||
region string, dataLen int64, reqTime time.Time,
|
region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher,
|
||||||
) *http.Request {
|
) *http.Request {
|
||||||
// Set headers needed for streaming signature.
|
// Set headers needed for streaming signature.
|
||||||
prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
|
prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
|
||||||
|
|
||||||
if req.Body == nil {
|
if req.Body == nil {
|
||||||
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
|
req.Body = io.NopCloser(bytes.NewReader([]byte("")))
|
||||||
}
|
}
|
||||||
|
|
||||||
stReader := &StreamingReader{
|
stReader := &StreamingReader{
|
||||||
|
@ -295,6 +304,7 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok
|
||||||
chunkNum: 1,
|
chunkNum: 1,
|
||||||
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
|
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
|
||||||
lastChunkSize: int(dataLen % payloadChunkSize),
|
lastChunkSize: int(dataLen % payloadChunkSize),
|
||||||
|
sh256: sh256,
|
||||||
}
|
}
|
||||||
if len(req.Trailer) > 0 {
|
if len(req.Trailer) > 0 {
|
||||||
stReader.trailer = req.Trailer
|
stReader.trailer = req.Trailer
|
||||||
|
@ -385,5 +395,9 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
|
||||||
|
|
||||||
// Close - this method makes underlying io.ReadCloser's Close method available.
|
// Close - this method makes underlying io.ReadCloser's Close method available.
|
||||||
func (s *StreamingReader) Close() error {
|
func (s *StreamingReader) Close() error {
|
||||||
|
if s.sh256 != nil {
|
||||||
|
s.sh256.Close()
|
||||||
|
s.sh256 = nil
|
||||||
|
}
|
||||||
return s.baseReadCloser.Close()
|
return s.baseReadCloser.Close()
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
|
@ -25,7 +25,7 @@
|
||||||
)
|
)
|
||||||
|
|
||||||
// expirationDateFormat date format for expiration key in json policy.
|
// expirationDateFormat date format for expiration key in json policy.
|
||||||
const expirationDateFormat = "2006-01-02T15:04:05.999Z"
|
const expirationDateFormat = "2006-01-02T15:04:05.000Z"
|
||||||
|
|
||||||
// policyCondition explanation:
|
// policyCondition explanation:
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
|
||||||
|
|
3
vendor/github.com/minio/minio-go/v7/transport.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/transport.go
generated
vendored
|
@ -23,7 +23,6 @@
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
@ -73,7 +72,7 @@ func mustGetSystemCertPool() *x509.CertPool {
|
||||||
}
|
}
|
||||||
if f := os.Getenv("SSL_CERT_FILE"); f != "" {
|
if f := os.Getenv("SSL_CERT_FILE"); f != "" {
|
||||||
rootCAs := mustGetSystemCertPool()
|
rootCAs := mustGetSystemCertPool()
|
||||||
data, err := ioutil.ReadFile(f)
|
data, err := os.ReadFile(f)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
rootCAs.AppendCertsFromPEM(data)
|
rootCAs.AppendCertsFromPEM(data)
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
|
@ -28,7 +28,6 @@
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -142,7 +141,7 @@ func closeResponse(resp *http.Response) {
|
||||||
// Without this closing connection would disallow re-using
|
// Without this closing connection would disallow re-using
|
||||||
// the same connection for future uses.
|
// the same connection for future uses.
|
||||||
// - http://stackoverflow.com/a/17961593/4465767
|
// - http://stackoverflow.com/a/17961593/4465767
|
||||||
io.Copy(ioutil.Discard, resp.Body)
|
io.Copy(io.Discard, resp.Body)
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -297,7 +297,7 @@ github.com/miekg/dns
|
||||||
# github.com/minio/md5-simd v1.1.2
|
# github.com/minio/md5-simd v1.1.2
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/minio/md5-simd
|
github.com/minio/md5-simd
|
||||||
# github.com/minio/minio-go/v7 v7.0.44
|
# github.com/minio/minio-go/v7 v7.0.47
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/minio/minio-go/v7
|
github.com/minio/minio-go/v7
|
||||||
github.com/minio/minio-go/v7/pkg/credentials
|
github.com/minio/minio-go/v7/pkg/credentials
|
||||||
|
|
Loading…
Reference in a new issue