mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-11-22 11:46:40 +00:00
[chore] Bump github.com/minio/minio-go/v7 from 7.0.37 to 7.0.43 (#983)
Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.37 to 7.0.43. - [Release notes](https://github.com/minio/minio-go/releases) - [Commits](https://github.com/minio/minio-go/compare/v7.0.37...v7.0.43) --- updated-dependencies: - dependency-name: github.com/minio/minio-go/v7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
a5f31e5dd3
commit
459a5c8d96
2
go.mod
2
go.mod
|
@ -30,7 +30,7 @@ require (
|
||||||
github.com/jackc/pgx/v4 v4.17.2
|
github.com/jackc/pgx/v4 v4.17.2
|
||||||
github.com/microcosm-cc/bluemonday v1.0.20
|
github.com/microcosm-cc/bluemonday v1.0.20
|
||||||
github.com/miekg/dns v1.1.50
|
github.com/miekg/dns v1.1.50
|
||||||
github.com/minio/minio-go/v7 v7.0.37
|
github.com/minio/minio-go/v7 v7.0.43
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
github.com/mitchellh/mapstructure v1.5.0
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
github.com/robfig/cron/v3 v3.0.1
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -457,8 +457,8 @@ github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||||
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||||
github.com/minio/minio-go/v7 v7.0.37 h1:aJvYMbtpVPSFBck6guyvOkxK03MycxDOCs49ZBuY5M8=
|
github.com/minio/minio-go/v7 v7.0.43 h1:14Q4lwblqTdlAmba05oq5xL0VBLHi06zS4yLnIkz6hI=
|
||||||
github.com/minio/minio-go/v7 v7.0.37/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
github.com/minio/minio-go/v7 v7.0.43/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
||||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
|
|
3
vendor/github.com/minio/minio-go/v7/.gitignore
generated
vendored
3
vendor/github.com/minio/minio-go/v7/.gitignore
generated
vendored
|
@ -1,4 +1,5 @@
|
||||||
*~
|
*~
|
||||||
*.test
|
*.test
|
||||||
validator
|
validator
|
||||||
golangci-lint
|
golangci-lint
|
||||||
|
functional_tests
|
4
vendor/github.com/minio/minio-go/v7/.golangci.yml
generated
vendored
4
vendor/github.com/minio/minio-go/v7/.golangci.yml
generated
vendored
|
@ -12,8 +12,7 @@ linters:
|
||||||
- govet
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- gosimple
|
- gosimple
|
||||||
- deadcode
|
- unused
|
||||||
- structcheck
|
|
||||||
- gocritic
|
- gocritic
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
|
@ -25,3 +24,4 @@ issues:
|
||||||
- "captLocal:"
|
- "captLocal:"
|
||||||
- "ifElseChain:"
|
- "ifElseChain:"
|
||||||
- "elseif:"
|
- "elseif:"
|
||||||
|
- "should have a package comment"
|
||||||
|
|
5
vendor/github.com/minio/minio-go/v7/Makefile
generated
vendored
5
vendor/github.com/minio/minio-go/v7/Makefile
generated
vendored
|
@ -9,7 +9,7 @@ checks: lint vet test examples functional-test
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
@mkdir -p ${GOPATH}/bin
|
@mkdir -p ${GOPATH}/bin
|
||||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2
|
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin
|
||||||
@echo "Running $@ check"
|
@echo "Running $@ check"
|
||||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||||
|
@ -27,7 +27,8 @@ examples:
|
||||||
@cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;)
|
@cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;)
|
||||||
|
|
||||||
functional-test:
|
functional-test:
|
||||||
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go
|
@GO111MODULE=on go build -race functional_tests.go
|
||||||
|
@SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
@echo "Cleaning up all the generated files"
|
@echo "Cleaning up all the generated files"
|
||||||
|
|
70
vendor/github.com/minio/minio-go/v7/README.md
generated
vendored
70
vendor/github.com/minio/minio-go/v7/README.md
generated
vendored
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
|
The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
|
||||||
|
|
||||||
This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference).
|
This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html).
|
||||||
|
|
||||||
This document assumes that you have a working [Go development environment](https://golang.org/doc/install).
|
This document assumes that you have a working [Go development environment](https://golang.org/doc/install).
|
||||||
|
|
||||||
|
@ -126,53 +126,53 @@ mc ls play/mymusic/
|
||||||
## API Reference
|
## API Reference
|
||||||
The full API Reference is available here.
|
The full API Reference is available here.
|
||||||
|
|
||||||
* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference)
|
* [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html)
|
||||||
|
|
||||||
### API Reference : Bucket Operations
|
### API Reference : Bucket Operations
|
||||||
* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket)
|
* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket)
|
||||||
* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets)
|
* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets)
|
||||||
* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists)
|
* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists)
|
||||||
* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket)
|
* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket)
|
||||||
* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects)
|
* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects)
|
||||||
* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads)
|
* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads)
|
||||||
|
|
||||||
### API Reference : Bucket policy Operations
|
### API Reference : Bucket policy Operations
|
||||||
* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy)
|
* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy)
|
||||||
* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy)
|
* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy)
|
||||||
|
|
||||||
### API Reference : Bucket notification Operations
|
### API Reference : Bucket notification Operations
|
||||||
* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification)
|
* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification)
|
||||||
* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification)
|
* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification)
|
||||||
* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
|
* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification)
|
||||||
* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension)
|
* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension)
|
||||||
* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO Extension)
|
* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension)
|
||||||
|
|
||||||
### API Reference : File Object Operations
|
### API Reference : File Object Operations
|
||||||
* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject)
|
* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject)
|
||||||
* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject)
|
* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject)
|
||||||
|
|
||||||
### API Reference : Object Operations
|
### API Reference : Object Operations
|
||||||
* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject)
|
* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject)
|
||||||
* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject)
|
* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject)
|
||||||
* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming)
|
* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming)
|
||||||
* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject)
|
* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject)
|
||||||
* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject)
|
* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject)
|
||||||
* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject)
|
* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject)
|
||||||
* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects)
|
* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects)
|
||||||
* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
|
* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload)
|
||||||
* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent)
|
* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent)
|
||||||
|
|
||||||
|
|
||||||
### API Reference : Presigned Operations
|
### API Reference : Presigned Operations
|
||||||
* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject)
|
* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject)
|
||||||
* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject)
|
* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject)
|
||||||
* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject)
|
* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject)
|
||||||
* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy)
|
* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy)
|
||||||
|
|
||||||
### API Reference : Client custom settings
|
### API Reference : Client custom settings
|
||||||
* [`SetAppInfo`](https://docs.min.io/docs/golang-client-api-reference#SetAppInfo)
|
* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo)
|
||||||
* [`TraceOn`](https://docs.min.io/docs/golang-client-api-reference#TraceOn)
|
* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn)
|
||||||
* [`TraceOff`](https://docs.min.io/docs/golang-client-api-reference#TraceOff)
|
* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff)
|
||||||
|
|
||||||
## Full Examples
|
## Full Examples
|
||||||
|
|
||||||
|
@ -236,8 +236,8 @@ The full API Reference is available here.
|
||||||
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
|
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
|
||||||
|
|
||||||
## Explore Further
|
## Explore Further
|
||||||
* [Complete Documentation](https://docs.min.io)
|
* [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html)
|
||||||
* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference)
|
* [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html)
|
||||||
|
|
||||||
## Contribute
|
## Contribute
|
||||||
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
|
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
|
||||||
|
|
70
vendor/github.com/minio/minio-go/v7/README_zh_CN.md
generated
vendored
70
vendor/github.com/minio/minio-go/v7/README_zh_CN.md
generated
vendored
|
@ -14,7 +14,7 @@ MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对
|
||||||
- Ceph Object Gateway
|
- Ceph Object Gateway
|
||||||
- Riak CS
|
- Riak CS
|
||||||
|
|
||||||
本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。
|
本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html)。
|
||||||
|
|
||||||
本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。
|
本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。
|
||||||
|
|
||||||
|
@ -140,52 +140,52 @@ mc ls play/mymusic/
|
||||||
|
|
||||||
## API文档
|
## API文档
|
||||||
完整的API文档在这里。
|
完整的API文档在这里。
|
||||||
* [完整API文档](https://docs.min.io/docs/golang-client-api-reference)
|
* [完整API文档](https://min.io/docs/minio/linux/developers/go/API.html)
|
||||||
|
|
||||||
### API文档 : 操作存储桶
|
### API文档 : 操作存储桶
|
||||||
* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket)
|
* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket)
|
||||||
* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets)
|
* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets)
|
||||||
* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists)
|
* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists)
|
||||||
* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket)
|
* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket)
|
||||||
* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects)
|
* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects)
|
||||||
* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads)
|
* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads)
|
||||||
|
|
||||||
### API文档 : 存储桶策略
|
### API文档 : 存储桶策略
|
||||||
* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy)
|
* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy)
|
||||||
* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy)
|
* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy)
|
||||||
|
|
||||||
### API文档 : 存储桶通知
|
### API文档 : 存储桶通知
|
||||||
* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification)
|
* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification)
|
||||||
* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification)
|
* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification)
|
||||||
* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
|
* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification)
|
||||||
* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO 扩展)
|
* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO 扩展)
|
||||||
* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO 扩展)
|
* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO 扩展)
|
||||||
|
|
||||||
### API文档 : 操作文件对象
|
### API文档 : 操作文件对象
|
||||||
* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject)
|
* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject)
|
||||||
* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject)
|
* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject)
|
||||||
|
|
||||||
### API文档 : 操作对象
|
### API文档 : 操作对象
|
||||||
* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject)
|
* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject)
|
||||||
* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject)
|
* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject)
|
||||||
* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming)
|
* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming)
|
||||||
* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject)
|
* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject)
|
||||||
* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject)
|
* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject)
|
||||||
* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject)
|
* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject)
|
||||||
* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects)
|
* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects)
|
||||||
* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
|
* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload)
|
||||||
* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent)
|
* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent)
|
||||||
|
|
||||||
### API文档 : Presigned操作
|
### API文档 : Presigned操作
|
||||||
* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject)
|
* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject)
|
||||||
* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject)
|
* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject)
|
||||||
* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject)
|
* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject)
|
||||||
* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy)
|
* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy)
|
||||||
|
|
||||||
### API文档 : 客户端自定义设置
|
### API文档 : 客户端自定义设置
|
||||||
* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo)
|
* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo)
|
||||||
* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn)
|
* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn)
|
||||||
* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff)
|
* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff)
|
||||||
|
|
||||||
## 完整示例
|
## 完整示例
|
||||||
|
|
||||||
|
@ -253,8 +253,8 @@ mc ls play/mymusic/
|
||||||
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
|
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
|
||||||
|
|
||||||
## 了解更多
|
## 了解更多
|
||||||
* [完整文档](https://docs.min.io)
|
* [完整文档](https://min.io/docs/minio/kubernetes/upstream/index.html)
|
||||||
* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference)
|
* [MinIO Go Client SDK API文档](https://min.io/docs/minio/linux/developers/go/API.html)
|
||||||
|
|
||||||
## 贡献
|
## 贡献
|
||||||
[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md)
|
[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md)
|
||||||
|
|
2
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
|
@ -221,7 +221,7 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc
|
||||||
headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag)
|
headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag)
|
||||||
}
|
}
|
||||||
if dstOpts.Internal.ReplicationRequest {
|
if dstOpts.Internal.ReplicationRequest {
|
||||||
headers.Set(minIOBucketReplicationRequest, "")
|
headers.Set(minIOBucketReplicationRequest, "true")
|
||||||
}
|
}
|
||||||
if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
|
if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
|
||||||
headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
|
headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
|
||||||
|
|
20
vendor/github.com/minio/minio-go/v7/api-datatypes.go
generated
vendored
20
vendor/github.com/minio/minio-go/v7/api-datatypes.go
generated
vendored
|
@ -45,19 +45,20 @@ type BucketInfo struct {
|
||||||
// on the first line is initialize it.
|
// on the first line is initialize it.
|
||||||
func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||||
*m = StringMap{}
|
*m = StringMap{}
|
||||||
type xmlMapEntry struct {
|
type Item struct {
|
||||||
XMLName xml.Name
|
Key string
|
||||||
Value string `xml:",chardata"`
|
Value string
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
var e xmlMapEntry
|
var e Item
|
||||||
err := d.Decode(&e)
|
err := d.Decode(&e)
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
}
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
(*m)[e.XMLName.Local] = e.Value
|
(*m)[e.Key] = e.Value
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -86,6 +87,8 @@ type UploadInfo struct {
|
||||||
ExpirationRuleID string
|
ExpirationRuleID string
|
||||||
|
|
||||||
// Verified checksum values, if any.
|
// Verified checksum values, if any.
|
||||||
|
// Values are base64 (standard) encoded.
|
||||||
|
// For multipart objects this is a checksum of the checksum of each part.
|
||||||
ChecksumCRC32 string
|
ChecksumCRC32 string
|
||||||
ChecksumCRC32C string
|
ChecksumCRC32C string
|
||||||
ChecksumSHA1 string
|
ChecksumSHA1 string
|
||||||
|
@ -118,7 +121,7 @@ type ObjectInfo struct {
|
||||||
Metadata http.Header `json:"metadata" xml:"-"`
|
Metadata http.Header `json:"metadata" xml:"-"`
|
||||||
|
|
||||||
// x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
|
// x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
|
||||||
UserMetadata StringMap `json:"userMetadata"`
|
UserMetadata StringMap `json:"userMetadata,omitempty"`
|
||||||
|
|
||||||
// x-amz-tagging values in their k/v values.
|
// x-amz-tagging values in their k/v values.
|
||||||
UserTags map[string]string `json:"userTags"`
|
UserTags map[string]string `json:"userTags"`
|
||||||
|
@ -146,7 +149,8 @@ type ObjectInfo struct {
|
||||||
// - FAILED
|
// - FAILED
|
||||||
// - REPLICA (on the destination)
|
// - REPLICA (on the destination)
|
||||||
ReplicationStatus string `xml:"ReplicationStatus"`
|
ReplicationStatus string `xml:"ReplicationStatus"`
|
||||||
|
// set to true if delete marker has backing object version on target, and eligible to replicate
|
||||||
|
ReplicationReady bool
|
||||||
// Lifecycle expiry-date and ruleID associated with the expiry
|
// Lifecycle expiry-date and ruleID associated with the expiry
|
||||||
// not to be confused with `Expires` HTTP header.
|
// not to be confused with `Expires` HTTP header.
|
||||||
Expiration time.Time
|
Expiration time.Time
|
||||||
|
|
16
vendor/github.com/minio/minio-go/v7/api-error-response.go
generated
vendored
16
vendor/github.com/minio/minio-go/v7/api-error-response.go
generated
vendored
|
@ -67,14 +67,14 @@ type ErrorResponse struct {
|
||||||
//
|
//
|
||||||
// For example:
|
// For example:
|
||||||
//
|
//
|
||||||
// import s3 "github.com/minio/minio-go/v7"
|
// import s3 "github.com/minio/minio-go/v7"
|
||||||
// ...
|
// ...
|
||||||
// ...
|
// ...
|
||||||
// reader, stat, err := s3.GetObject(...)
|
// reader, stat, err := s3.GetObject(...)
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// resp := s3.ToErrorResponse(err)
|
// resp := s3.ToErrorResponse(err)
|
||||||
// }
|
// }
|
||||||
// ...
|
// ...
|
||||||
func ToErrorResponse(err error) ErrorResponse {
|
func ToErrorResponse(err error) ErrorResponse {
|
||||||
switch err := err.(type) {
|
switch err := err.(type) {
|
||||||
case ErrorResponse:
|
case ErrorResponse:
|
||||||
|
|
5
vendor/github.com/minio/minio-go/v7/api-get-options.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/api-get-options.go
generated
vendored
|
@ -27,8 +27,9 @@
|
||||||
|
|
||||||
// AdvancedGetOptions for internal use by MinIO server - not intended for client use.
|
// AdvancedGetOptions for internal use by MinIO server - not intended for client use.
|
||||||
type AdvancedGetOptions struct {
|
type AdvancedGetOptions struct {
|
||||||
ReplicationDeleteMarker bool
|
ReplicationDeleteMarker bool
|
||||||
ReplicationProxyRequest string
|
IsReplicationReadyForDeleteMarker bool
|
||||||
|
ReplicationProxyRequest string
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectOptions are used to specify additional headers or options
|
// GetObjectOptions are used to specify additional headers or options
|
||||||
|
|
98
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
98
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
|
@ -32,11 +32,10 @@
|
||||||
// This call requires explicit authentication, no anonymous requests are
|
// This call requires explicit authentication, no anonymous requests are
|
||||||
// allowed for listing buckets.
|
// allowed for listing buckets.
|
||||||
//
|
//
|
||||||
// api := client.New(....)
|
// api := client.New(....)
|
||||||
// for message := range api.ListBuckets(context.Background()) {
|
// for message := range api.ListBuckets(context.Background()) {
|
||||||
// fmt.Println(message)
|
// fmt.Println(message)
|
||||||
// }
|
// }
|
||||||
//
|
|
||||||
func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
||||||
// Execute GET on service.
|
// Execute GET on service.
|
||||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex})
|
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex})
|
||||||
|
@ -71,21 +70,28 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List
|
||||||
// Return object owner information by default
|
// Return object owner information by default
|
||||||
fetchOwner := true
|
fetchOwner := true
|
||||||
|
|
||||||
|
sendObjectInfo := func(info ObjectInfo) {
|
||||||
|
select {
|
||||||
|
case objectStatCh <- info:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
defer close(objectStatCh)
|
defer close(objectStatCh)
|
||||||
objectStatCh <- ObjectInfo{
|
sendObjectInfo(ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
})
|
||||||
return objectStatCh
|
return objectStatCh
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate incoming object prefix.
|
// Validate incoming object prefix.
|
||||||
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
|
||||||
defer close(objectStatCh)
|
defer close(objectStatCh)
|
||||||
objectStatCh <- ObjectInfo{
|
sendObjectInfo(ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
})
|
||||||
return objectStatCh
|
return objectStatCh
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,9 +105,9 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List
|
||||||
result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken,
|
result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken,
|
||||||
fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers)
|
fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
objectStatCh <- ObjectInfo{
|
sendObjectInfo(ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,6 +144,14 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List
|
||||||
if !result.IsTruncated {
|
if !result.IsTruncated {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add this to catch broken S3 API implementations.
|
||||||
|
if continuationToken == "" {
|
||||||
|
sendObjectInfo(ObjectInfo{
|
||||||
|
Err: fmt.Errorf("listObjectsV2 is truncated without continuationToken, %s S3 server is incompatible with S3 API", c.endpointURL),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}(objectStatCh)
|
}(objectStatCh)
|
||||||
return objectStatCh
|
return objectStatCh
|
||||||
|
@ -263,20 +277,28 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb
|
||||||
// If recursive we do not delimit.
|
// If recursive we do not delimit.
|
||||||
delimiter = ""
|
delimiter = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sendObjectInfo := func(info ObjectInfo) {
|
||||||
|
select {
|
||||||
|
case objectStatCh <- info:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
defer close(objectStatCh)
|
defer close(objectStatCh)
|
||||||
objectStatCh <- ObjectInfo{
|
sendObjectInfo(ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
})
|
||||||
return objectStatCh
|
return objectStatCh
|
||||||
}
|
}
|
||||||
// Validate incoming object prefix.
|
// Validate incoming object prefix.
|
||||||
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
|
||||||
defer close(objectStatCh)
|
defer close(objectStatCh)
|
||||||
objectStatCh <- ObjectInfo{
|
sendObjectInfo(ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
})
|
||||||
return objectStatCh
|
return objectStatCh
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -289,9 +311,9 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb
|
||||||
// Get list of objects a maximum of 1000 per request.
|
// Get list of objects a maximum of 1000 per request.
|
||||||
result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers)
|
result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
objectStatCh <- ObjectInfo{
|
sendObjectInfo(ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -344,21 +366,28 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
|
||||||
delimiter = ""
|
delimiter = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sendObjectInfo := func(info ObjectInfo) {
|
||||||
|
select {
|
||||||
|
case resultCh <- info:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
defer close(resultCh)
|
defer close(resultCh)
|
||||||
resultCh <- ObjectInfo{
|
sendObjectInfo(ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
})
|
||||||
return resultCh
|
return resultCh
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate incoming object prefix.
|
// Validate incoming object prefix.
|
||||||
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
|
||||||
defer close(resultCh)
|
defer close(resultCh)
|
||||||
resultCh <- ObjectInfo{
|
sendObjectInfo(ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
})
|
||||||
return resultCh
|
return resultCh
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,9 +404,9 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
|
||||||
// Get list of objects a maximum of 1000 per request.
|
// Get list of objects a maximum of 1000 per request.
|
||||||
result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers)
|
result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resultCh <- ObjectInfo{
|
sendObjectInfo(ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -659,11 +688,10 @@ func (o *ListObjectsOptions) Set(key, value string) {
|
||||||
|
|
||||||
// ListObjects returns objects list after evaluating the passed options.
|
// ListObjects returns objects list after evaluating the passed options.
|
||||||
//
|
//
|
||||||
// api := client.New(....)
|
// api := client.New(....)
|
||||||
// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) {
|
// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) {
|
||||||
// fmt.Println(object)
|
// fmt.Println(object)
|
||||||
// }
|
// }
|
||||||
//
|
|
||||||
func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
|
func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
|
||||||
if opts.WithVersions {
|
if opts.WithVersions {
|
||||||
return c.listObjectVersions(ctx, bucketName, opts)
|
return c.listObjectVersions(ctx, bucketName, opts)
|
||||||
|
@ -694,12 +722,12 @@ func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListOb
|
||||||
// If you enable recursive as 'true' this function will return back all
|
// If you enable recursive as 'true' this function will return back all
|
||||||
// the multipart objects in a given bucket name.
|
// the multipart objects in a given bucket name.
|
||||||
//
|
//
|
||||||
// api := client.New(....)
|
// api := client.New(....)
|
||||||
// // Recurively list all objects in 'mytestbucket'
|
// // Recurively list all objects in 'mytestbucket'
|
||||||
// recursive := true
|
// recursive := true
|
||||||
// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) {
|
// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) {
|
||||||
// fmt.Println(message)
|
// fmt.Println(message)
|
||||||
// }
|
// }
|
||||||
func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
|
func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
|
||||||
return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive)
|
return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive)
|
||||||
}
|
}
|
||||||
|
@ -916,7 +944,7 @@ func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName strin
|
||||||
}
|
}
|
||||||
|
|
||||||
// listObjectPartsQuery (List Parts query)
|
// listObjectPartsQuery (List Parts query)
|
||||||
// - lists some or all (up to 1000) parts that have been uploaded
|
// - lists some or all (up to 1000) parts that have been uploaded
|
||||||
// for a specific multipart upload
|
// for a specific multipart upload
|
||||||
//
|
//
|
||||||
// You can use the request parameters as selection criteria to return
|
// You can use the request parameters as selection criteria to return
|
||||||
|
|
7
vendor/github.com/minio/minio-go/v7/api-put-object-common.go
generated
vendored
7
vendor/github.com/minio/minio-go/v7/api-put-object-common.go
generated
vendored
|
@ -65,10 +65,9 @@ func isReadAt(reader io.Reader) (ok bool) {
|
||||||
// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
|
// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
|
||||||
// object storage it will have the following parameters as constants.
|
// object storage it will have the following parameters as constants.
|
||||||
//
|
//
|
||||||
// maxPartsCount - 10000
|
// maxPartsCount - 10000
|
||||||
// minPartSize - 16MiB
|
// minPartSize - 16MiB
|
||||||
// maxMultipartPutObjectSize - 5TiB
|
// maxMultipartPutObjectSize - 5TiB
|
||||||
//
|
|
||||||
func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
|
func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
|
||||||
// object size is '-1' set it to 5TiB.
|
// object size is '-1' set it to 5TiB.
|
||||||
var unknownSize bool
|
var unknownSize bool
|
||||||
|
|
76
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
76
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
|
@ -159,8 +159,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||||
crcBytes = append(crcBytes, cSum...)
|
crcBytes = append(crcBytes, cSum...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader)
|
objPart, uerr := c.uploadPart(ctx, p)
|
||||||
if uerr != nil {
|
if uerr != nil {
|
||||||
return UploadInfo{}, uerr
|
return UploadInfo{}, uerr
|
||||||
}
|
}
|
||||||
|
@ -269,57 +270,73 @@ func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, object
|
||||||
return initiateMultipartUploadResult, nil
|
return initiateMultipartUploadResult, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type uploadPartParams struct {
|
||||||
|
bucketName string
|
||||||
|
objectName string
|
||||||
|
uploadID string
|
||||||
|
reader io.Reader
|
||||||
|
partNumber int
|
||||||
|
md5Base64 string
|
||||||
|
sha256Hex string
|
||||||
|
size int64
|
||||||
|
sse encrypt.ServerSide
|
||||||
|
streamSha256 bool
|
||||||
|
customHeader http.Header
|
||||||
|
trailer http.Header
|
||||||
|
}
|
||||||
|
|
||||||
// uploadPart - Uploads a part in a multipart upload.
|
// uploadPart - Uploads a part in a multipart upload.
|
||||||
func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName string, uploadID string, reader io.Reader, partNumber int, md5Base64 string, sha256Hex string, size int64, sse encrypt.ServerSide, streamSha256 bool, customHeader http.Header) (ObjectPart, error) {
|
func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(p.bucketName); err != nil {
|
||||||
return ObjectPart{}, err
|
return ObjectPart{}, err
|
||||||
}
|
}
|
||||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(p.objectName); err != nil {
|
||||||
return ObjectPart{}, err
|
return ObjectPart{}, err
|
||||||
}
|
}
|
||||||
if size > maxPartSize {
|
if p.size > maxPartSize {
|
||||||
return ObjectPart{}, errEntityTooLarge(size, maxPartSize, bucketName, objectName)
|
return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName)
|
||||||
}
|
}
|
||||||
if size <= -1 {
|
if p.size <= -1 {
|
||||||
return ObjectPart{}, errEntityTooSmall(size, bucketName, objectName)
|
return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName)
|
||||||
}
|
}
|
||||||
if partNumber <= 0 {
|
if p.partNumber <= 0 {
|
||||||
return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.")
|
return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.")
|
||||||
}
|
}
|
||||||
if uploadID == "" {
|
if p.uploadID == "" {
|
||||||
return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.")
|
return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get resources properly escaped and lined up before using them in http request.
|
// Get resources properly escaped and lined up before using them in http request.
|
||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
// Set part number.
|
// Set part number.
|
||||||
urlValues.Set("partNumber", strconv.Itoa(partNumber))
|
urlValues.Set("partNumber", strconv.Itoa(p.partNumber))
|
||||||
// Set upload id.
|
// Set upload id.
|
||||||
urlValues.Set("uploadId", uploadID)
|
urlValues.Set("uploadId", p.uploadID)
|
||||||
|
|
||||||
// Set encryption headers, if any.
|
// Set encryption headers, if any.
|
||||||
if customHeader == nil {
|
if p.customHeader == nil {
|
||||||
customHeader = make(http.Header)
|
p.customHeader = make(http.Header)
|
||||||
}
|
}
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html
|
||||||
// Server-side encryption is supported by the S3 Multipart Upload actions.
|
// Server-side encryption is supported by the S3 Multipart Upload actions.
|
||||||
// Unless you are using a customer-provided encryption key, you don't need
|
// Unless you are using a customer-provided encryption key, you don't need
|
||||||
// to specify the encryption parameters in each UploadPart request.
|
// to specify the encryption parameters in each UploadPart request.
|
||||||
if sse != nil && sse.Type() == encrypt.SSEC {
|
if p.sse != nil && p.sse.Type() == encrypt.SSEC {
|
||||||
sse.Marshal(customHeader)
|
p.sse.Marshal(p.customHeader)
|
||||||
}
|
}
|
||||||
|
|
||||||
reqMetadata := requestMetadata{
|
reqMetadata := requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: p.bucketName,
|
||||||
objectName: objectName,
|
objectName: p.objectName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
customHeader: customHeader,
|
customHeader: p.customHeader,
|
||||||
contentBody: reader,
|
contentBody: p.reader,
|
||||||
contentLength: size,
|
contentLength: p.size,
|
||||||
contentMD5Base64: md5Base64,
|
contentMD5Base64: p.md5Base64,
|
||||||
contentSHA256Hex: sha256Hex,
|
contentSHA256Hex: p.sha256Hex,
|
||||||
streamSha256: streamSha256,
|
streamSha256: p.streamSha256,
|
||||||
|
trailer: p.trailer,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute PUT on each part.
|
// Execute PUT on each part.
|
||||||
|
@ -330,7 +347,7 @@ func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName s
|
||||||
}
|
}
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Once successfully uploaded, return completed part.
|
// Once successfully uploaded, return completed part.
|
||||||
|
@ -341,8 +358,8 @@ func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName s
|
||||||
ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
|
ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
|
||||||
ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
|
ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
|
||||||
}
|
}
|
||||||
objPart.Size = size
|
objPart.Size = p.size
|
||||||
objPart.PartNumber = partNumber
|
objPart.PartNumber = p.partNumber
|
||||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||||
objPart.ETag = trimEtag(h.Get("ETag"))
|
objPart.ETag = trimEtag(h.Get("ETag"))
|
||||||
return objPart, nil
|
return objPart, nil
|
||||||
|
@ -431,5 +448,10 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
|
||||||
Location: completeMultipartUploadResult.Location,
|
Location: completeMultipartUploadResult.Location,
|
||||||
Expiration: expTime,
|
Expiration: expTime,
|
||||||
ExpirationRuleID: ruleID,
|
ExpirationRuleID: ruleID,
|
||||||
|
|
||||||
|
ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256,
|
||||||
|
ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1,
|
||||||
|
ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32,
|
||||||
|
ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
94
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
94
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
|
@ -107,11 +107,19 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
withChecksum := c.trailingHeaderSupport
|
||||||
|
if withChecksum {
|
||||||
|
if opts.UserMetadata == nil {
|
||||||
|
opts.UserMetadata = make(map[string]string, 1)
|
||||||
|
}
|
||||||
|
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
|
||||||
|
}
|
||||||
// Initiate a new multipart upload.
|
// Initiate a new multipart upload.
|
||||||
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
|
||||||
|
|
||||||
// Aborts the multipart upload in progress, if the
|
// Aborts the multipart upload in progress, if the
|
||||||
// function returns any error, since we do not resume
|
// function returns any error, since we do not resume
|
||||||
|
@ -177,14 +185,33 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||||
// As a special case if partNumber is lastPartNumber, we
|
// As a special case if partNumber is lastPartNumber, we
|
||||||
// calculate the offset based on the last part size.
|
// calculate the offset based on the last part size.
|
||||||
if uploadReq.PartNum == lastPartNumber {
|
if uploadReq.PartNum == lastPartNumber {
|
||||||
readOffset = (size - lastPartSize)
|
readOffset = size - lastPartSize
|
||||||
partSize = lastPartSize
|
partSize = lastPartSize
|
||||||
}
|
}
|
||||||
|
|
||||||
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
|
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
|
||||||
|
var trailer = make(http.Header, 1)
|
||||||
|
if withChecksum {
|
||||||
|
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
||||||
|
trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
|
||||||
|
sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) {
|
||||||
|
trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
objPart, err := c.uploadPart(ctx, bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, "", "", partSize, opts.ServerSideEncryption, !opts.DisableContentSha256, nil)
|
p := uploadPartParams{bucketName: bucketName,
|
||||||
|
objectName: objectName,
|
||||||
|
uploadID: uploadID,
|
||||||
|
reader: sectionReader,
|
||||||
|
partNumber: uploadReq.PartNum,
|
||||||
|
size: partSize,
|
||||||
|
sse: opts.ServerSideEncryption,
|
||||||
|
streamSha256: !opts.DisableContentSha256,
|
||||||
|
sha256Hex: "",
|
||||||
|
trailer: trailer,
|
||||||
|
}
|
||||||
|
objPart, err := c.uploadPart(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
uploadedPartsCh <- uploadedPartRes{
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
Error: err,
|
Error: err,
|
||||||
|
@ -221,8 +248,12 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||||
// Update the totalUploadedSize.
|
// Update the totalUploadedSize.
|
||||||
totalUploadedSize += uploadRes.Size
|
totalUploadedSize += uploadRes.Size
|
||||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||||
ETag: uploadRes.Part.ETag,
|
ETag: uploadRes.Part.ETag,
|
||||||
PartNumber: uploadRes.Part.PartNumber,
|
PartNumber: uploadRes.Part.PartNumber,
|
||||||
|
ChecksumCRC32: uploadRes.Part.ChecksumCRC32,
|
||||||
|
ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C,
|
||||||
|
ChecksumSHA1: uploadRes.Part.ChecksumSHA1,
|
||||||
|
ChecksumSHA256: uploadRes.Part.ChecksumSHA256,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -235,6 +266,18 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||||
// Sort all completed parts.
|
// Sort all completed parts.
|
||||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
|
|
||||||
|
if withChecksum {
|
||||||
|
// Add hash of hashes.
|
||||||
|
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
||||||
|
for _, part := range complMultipartUpload.Parts {
|
||||||
|
cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C)
|
||||||
|
if err == nil {
|
||||||
|
crc.Write(cs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||||
|
}
|
||||||
|
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
|
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
|
@ -339,8 +382,8 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||||
// Update progress reader appropriately to the latest offset
|
// Update progress reader appropriately to the latest offset
|
||||||
// as we read from the source.
|
// as we read from the source.
|
||||||
hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress)
|
hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress)
|
||||||
|
p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
|
||||||
objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, hooked, partNumber, md5Base64, "", partSize, opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader)
|
objPart, uerr := c.uploadPart(ctx, p)
|
||||||
if uerr != nil {
|
if uerr != nil {
|
||||||
return UploadInfo{}, uerr
|
return UploadInfo{}, uerr
|
||||||
}
|
}
|
||||||
|
@ -419,6 +462,7 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r
|
||||||
return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'")
|
return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var readSeeker io.Seeker
|
||||||
if size > 0 {
|
if size > 0 {
|
||||||
if isReadAt(reader) && !isObject(reader) {
|
if isReadAt(reader) && !isObject(reader) {
|
||||||
seeker, ok := reader.(io.Seeker)
|
seeker, ok := reader.(io.Seeker)
|
||||||
|
@ -428,35 +472,49 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r
|
||||||
return UploadInfo{}, errInvalidArgument(err.Error())
|
return UploadInfo{}, errInvalidArgument(err.Error())
|
||||||
}
|
}
|
||||||
reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
|
reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
|
||||||
|
readSeeker = reader.(io.Seeker)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var md5Base64 string
|
var md5Base64 string
|
||||||
if opts.SendContentMd5 {
|
if opts.SendContentMd5 {
|
||||||
// Create a buffer.
|
|
||||||
buf := make([]byte, size)
|
|
||||||
|
|
||||||
length, rErr := readFull(reader, buf)
|
|
||||||
if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF {
|
|
||||||
return UploadInfo{}, rErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate md5sum.
|
// Calculate md5sum.
|
||||||
hash := c.md5Hasher()
|
hash := c.md5Hasher()
|
||||||
hash.Write(buf[:length])
|
|
||||||
|
if readSeeker != nil {
|
||||||
|
if _, err := io.Copy(hash, reader); err != nil {
|
||||||
|
return UploadInfo{}, err
|
||||||
|
}
|
||||||
|
// Seek back to beginning of io.NewSectionReader's offset.
|
||||||
|
_, err = readSeeker.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return UploadInfo{}, errInvalidArgument(err.Error())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Create a buffer.
|
||||||
|
buf := make([]byte, size)
|
||||||
|
|
||||||
|
length, err := readFull(reader, buf)
|
||||||
|
if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
|
||||||
|
return UploadInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
hash.Write(buf[:length])
|
||||||
|
reader = bytes.NewReader(buf[:length])
|
||||||
|
}
|
||||||
|
|
||||||
md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
|
md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
|
||||||
reader = bytes.NewReader(buf[:length])
|
|
||||||
hash.Close()
|
hash.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update progress reader appropriately to the latest offset as we
|
// Update progress reader appropriately to the latest offset as we
|
||||||
// read from the source.
|
// read from the source.
|
||||||
readSeeker := newHook(reader, opts.Progress)
|
progressReader := newHook(reader, opts.Progress)
|
||||||
|
|
||||||
// This function does not calculate sha256 and md5sum for payload.
|
// This function does not calculate sha256 and md5sum for payload.
|
||||||
// Execute put object.
|
// Execute put object.
|
||||||
return c.putObjectDo(ctx, bucketName, objectName, readSeeker, md5Base64, "", size, opts)
|
return c.putObjectDo(ctx, bucketName, objectName, progressReader, md5Base64, "", size, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// putObjectDo - executes the put object http operation.
|
// putObjectDo - executes the put object http operation.
|
||||||
|
|
8
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
8
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
|
@ -159,7 +159,7 @@ func (opts PutObjectOptions) Header() (header http.Header) {
|
||||||
header.Set(minIOBucketSourceETag, opts.Internal.SourceETag)
|
header.Set(minIOBucketSourceETag, opts.Internal.SourceETag)
|
||||||
}
|
}
|
||||||
if opts.Internal.ReplicationRequest {
|
if opts.Internal.ReplicationRequest {
|
||||||
header.Set(minIOBucketReplicationRequest, "")
|
header.Set(minIOBucketReplicationRequest, "true")
|
||||||
}
|
}
|
||||||
if !opts.Internal.LegalholdTimestamp.IsZero() {
|
if !opts.Internal.LegalholdTimestamp.IsZero() {
|
||||||
header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
|
header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
|
||||||
|
@ -269,6 +269,9 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
|
||||||
}
|
}
|
||||||
|
|
||||||
if size < 0 {
|
if size < 0 {
|
||||||
|
if opts.DisableMultipart {
|
||||||
|
return UploadInfo{}, errors.New("no length provided and multipart disabled")
|
||||||
|
}
|
||||||
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
|
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,7 +369,8 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||||
rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
|
rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
|
||||||
|
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, md5Base64, "", int64(length), opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader)
|
p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
|
||||||
|
objPart, uerr := c.uploadPart(ctx, p)
|
||||||
if uerr != nil {
|
if uerr != nil {
|
||||||
return UploadInfo{}, uerr
|
return UploadInfo{}, uerr
|
||||||
}
|
}
|
||||||
|
|
6
vendor/github.com/minio/minio-go/v7/api-remove.go
generated
vendored
6
vendor/github.com/minio/minio-go/v7/api-remove.go
generated
vendored
|
@ -82,8 +82,8 @@ func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string,
|
||||||
|
|
||||||
// RemoveBucket deletes the bucket name.
|
// RemoveBucket deletes the bucket name.
|
||||||
//
|
//
|
||||||
// All objects (including all object versions and delete markers).
|
// All objects (including all object versions and delete markers).
|
||||||
// in the bucket must be deleted before successfully attempting this request.
|
// in the bucket must be deleted before successfully attempting this request.
|
||||||
func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error {
|
func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
|
@ -166,7 +166,7 @@ func (c *Client) removeObject(ctx context.Context, bucketName, objectName string
|
||||||
headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus))
|
headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus))
|
||||||
}
|
}
|
||||||
if opts.Internal.ReplicationRequest {
|
if opts.Internal.ReplicationRequest {
|
||||||
headers.Set(minIOBucketReplicationRequest, "")
|
headers.Set(minIOBucketReplicationRequest, "true")
|
||||||
}
|
}
|
||||||
if opts.ForceDelete {
|
if opts.ForceDelete {
|
||||||
headers.Set(minIOForceDelete, "true")
|
headers.Set(minIOForceDelete, "true")
|
||||||
|
|
8
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
8
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
|
@ -323,10 +323,10 @@ type CompletePart struct {
|
||||||
ETag string
|
ETag string
|
||||||
|
|
||||||
// Checksum values
|
// Checksum values
|
||||||
ChecksumCRC32 string
|
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||||
ChecksumCRC32C string
|
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||||
ChecksumSHA1 string
|
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||||
ChecksumSHA256 string
|
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// completeMultipartUpload container for completing multipart upload.
|
// completeMultipartUpload container for completing multipart upload.
|
||||||
|
|
9
vendor/github.com/minio/minio-go/v7/api-stat.go
generated
vendored
9
vendor/github.com/minio/minio-go/v7/api-stat.go
generated
vendored
|
@ -70,6 +70,9 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string,
|
||||||
if opts.Internal.ReplicationDeleteMarker {
|
if opts.Internal.ReplicationDeleteMarker {
|
||||||
headers.Set(minIOBucketReplicationDeleteMarker, "true")
|
headers.Set(minIOBucketReplicationDeleteMarker, "true")
|
||||||
}
|
}
|
||||||
|
if opts.Internal.IsReplicationReadyForDeleteMarker {
|
||||||
|
headers.Set(isMinioTgtReplicationReady, "true")
|
||||||
|
}
|
||||||
|
|
||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
if opts.VersionID != "" {
|
if opts.VersionID != "" {
|
||||||
|
@ -90,6 +93,7 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string,
|
||||||
|
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
deleteMarker := resp.Header.Get(amzDeleteMarker) == "true"
|
deleteMarker := resp.Header.Get(amzDeleteMarker) == "true"
|
||||||
|
replicationReady := resp.Header.Get(minioTgtReplicationReady) == "true"
|
||||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||||
if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker {
|
if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker {
|
||||||
errResp := ErrorResponse{
|
errResp := ErrorResponse{
|
||||||
|
@ -105,8 +109,9 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string,
|
||||||
}, errResp
|
}, errResp
|
||||||
}
|
}
|
||||||
return ObjectInfo{
|
return ObjectInfo{
|
||||||
VersionID: resp.Header.Get(amzVersionID),
|
VersionID: resp.Header.Get(amzVersionID),
|
||||||
IsDeleteMarker: deleteMarker,
|
IsDeleteMarker: deleteMarker,
|
||||||
|
ReplicationReady: replicationReady, // whether delete marker can be replicated
|
||||||
}, httpRespToErrorResponse(resp, bucketName, objectName)
|
}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
54
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
54
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
|
@ -20,8 +20,10 @@
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
@ -93,6 +95,8 @@ type Client struct {
|
||||||
sha256Hasher func() md5simd.Hasher
|
sha256Hasher func() md5simd.Hasher
|
||||||
|
|
||||||
healthStatus int32
|
healthStatus int32
|
||||||
|
|
||||||
|
trailingHeaderSupport bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options for New method
|
// Options for New method
|
||||||
|
@ -103,6 +107,10 @@ type Options struct {
|
||||||
Region string
|
Region string
|
||||||
BucketLookup BucketLookupType
|
BucketLookup BucketLookupType
|
||||||
|
|
||||||
|
// TrailingHeaders indicates server support of trailing headers.
|
||||||
|
// Only supported for v4 signatures.
|
||||||
|
TrailingHeaders bool
|
||||||
|
|
||||||
// Custom hash routines. Leave nil to use standard.
|
// Custom hash routines. Leave nil to use standard.
|
||||||
CustomMD5 func() md5simd.Hasher
|
CustomMD5 func() md5simd.Hasher
|
||||||
CustomSHA256 func() md5simd.Hasher
|
CustomSHA256 func() md5simd.Hasher
|
||||||
|
@ -111,13 +119,13 @@ type Options struct {
|
||||||
// Global constants.
|
// Global constants.
|
||||||
const (
|
const (
|
||||||
libraryName = "minio-go"
|
libraryName = "minio-go"
|
||||||
libraryVersion = "v7.0.37"
|
libraryVersion = "v7.0.43"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User Agent should always following the below style.
|
// User Agent should always following the below style.
|
||||||
// Please open an issue to discuss any new changes here.
|
// Please open an issue to discuss any new changes here.
|
||||||
//
|
//
|
||||||
// MinIO (OS; ARCH) LIB/VER APP/VER
|
// MinIO (OS; ARCH) LIB/VER APP/VER
|
||||||
const (
|
const (
|
||||||
libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
|
libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
|
||||||
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
|
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
|
||||||
|
@ -246,6 +254,9 @@ func privateNew(endpoint string, opts *Options) (*Client, error) {
|
||||||
if clnt.sha256Hasher == nil {
|
if clnt.sha256Hasher == nil {
|
||||||
clnt.sha256Hasher = newSHA256Hasher
|
clnt.sha256Hasher = newSHA256Hasher
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4()
|
||||||
|
|
||||||
// Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
|
// Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
|
||||||
// by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
|
// by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
|
||||||
clnt.lookup = opts.BucketLookup
|
clnt.lookup = opts.BucketLookup
|
||||||
|
@ -312,9 +323,9 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
|
||||||
// Hash materials provides relevant initialized hash algo writers
|
// Hash materials provides relevant initialized hash algo writers
|
||||||
// based on the expected signature type.
|
// based on the expected signature type.
|
||||||
//
|
//
|
||||||
// - For signature v4 request if the connection is insecure compute only sha256.
|
// - For signature v4 request if the connection is insecure compute only sha256.
|
||||||
// - For signature v4 request if the connection is secure compute only md5.
|
// - For signature v4 request if the connection is secure compute only md5.
|
||||||
// - For anonymous request compute md5.
|
// - For anonymous request compute md5.
|
||||||
func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) {
|
func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) {
|
||||||
hashSums = make(map[string][]byte)
|
hashSums = make(map[string][]byte)
|
||||||
hashAlgos = make(map[string]md5simd.Hasher)
|
hashAlgos = make(map[string]md5simd.Hasher)
|
||||||
|
@ -419,6 +430,8 @@ type requestMetadata struct {
|
||||||
contentMD5Base64 string // carries base64 encoded md5sum
|
contentMD5Base64 string // carries base64 encoded md5sum
|
||||||
contentSHA256Hex string // carries hex encoded sha256sum
|
contentSHA256Hex string // carries hex encoded sha256sum
|
||||||
streamSha256 bool
|
streamSha256 bool
|
||||||
|
addCrc bool
|
||||||
|
trailer http.Header // (http.Request).Trailer. Requires v4 signature.
|
||||||
}
|
}
|
||||||
|
|
||||||
// dumpHTTP - dump HTTP request and response.
|
// dumpHTTP - dump HTTP request and response.
|
||||||
|
@ -581,6 +594,17 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if metadata.addCrc {
|
||||||
|
if metadata.trailer == nil {
|
||||||
|
metadata.trailer = make(http.Header, 1)
|
||||||
|
}
|
||||||
|
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
||||||
|
metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) {
|
||||||
|
// Update trailer when done.
|
||||||
|
metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
|
||||||
|
})
|
||||||
|
metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
|
||||||
|
}
|
||||||
// Instantiate a new request.
|
// Instantiate a new request.
|
||||||
var req *http.Request
|
var req *http.Request
|
||||||
req, err = c.newRequest(ctx, method, metadata)
|
req, err = c.newRequest(ctx, method, metadata)
|
||||||
|
@ -592,6 +616,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initiate the request.
|
// Initiate the request.
|
||||||
res, err = c.do(req)
|
res, err = c.do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -632,7 +657,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
||||||
// code dictates invalid region, we can retry the request
|
// code dictates invalid region, we can retry the request
|
||||||
// with the new region.
|
// with the new region.
|
||||||
//
|
//
|
||||||
// Additionally we should only retry if bucketLocation and custom
|
// Additionally, we should only retry if bucketLocation and custom
|
||||||
// region is empty.
|
// region is empty.
|
||||||
if c.region == "" {
|
if c.region == "" {
|
||||||
switch errResponse.Code {
|
switch errResponse.Code {
|
||||||
|
@ -814,9 +839,12 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
|
||||||
// Add signature version '2' authorization header.
|
// Add signature version '2' authorization header.
|
||||||
req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
|
req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
|
||||||
case metadata.streamSha256 && !c.secure:
|
case metadata.streamSha256 && !c.secure:
|
||||||
// Streaming signature is used by default for a PUT object request. Additionally we also
|
if len(metadata.trailer) > 0 {
|
||||||
// look if the initialized client is secure, if yes then we don't need to perform
|
req.Trailer = metadata.trailer
|
||||||
// streaming signature.
|
}
|
||||||
|
// Streaming signature is used by default for a PUT object request.
|
||||||
|
// Additionally, we also look if the initialized client is secure,
|
||||||
|
// if yes then we don't need to perform streaming signature.
|
||||||
req = signer.StreamingSignV4(req, accessKeyID,
|
req = signer.StreamingSignV4(req, accessKeyID,
|
||||||
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
|
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
|
||||||
default:
|
default:
|
||||||
|
@ -824,11 +852,17 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
|
||||||
shaHeader := unsignedPayload
|
shaHeader := unsignedPayload
|
||||||
if metadata.contentSHA256Hex != "" {
|
if metadata.contentSHA256Hex != "" {
|
||||||
shaHeader = metadata.contentSHA256Hex
|
shaHeader = metadata.contentSHA256Hex
|
||||||
|
if len(metadata.trailer) > 0 {
|
||||||
|
// Sanity check, we should not end up here if upstream is sane.
|
||||||
|
return nil, errors.New("internal error: contentSHA256Hex with trailer not supported")
|
||||||
|
}
|
||||||
|
} else if len(metadata.trailer) > 0 {
|
||||||
|
shaHeader = unsignedPayloadTrailer
|
||||||
}
|
}
|
||||||
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
|
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
|
||||||
|
|
||||||
// Add signature version '4' authorization header.
|
// Add signature version '4' authorization header.
|
||||||
req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location)
|
req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return request.
|
// Return request.
|
||||||
|
|
11
vendor/github.com/minio/minio-go/v7/constants.go
generated
vendored
11
vendor/github.com/minio/minio-go/v7/constants.go
generated
vendored
|
@ -46,6 +46,10 @@
|
||||||
// we don't want to sign the request payload
|
// we don't want to sign the request payload
|
||||||
const unsignedPayload = "UNSIGNED-PAYLOAD"
|
const unsignedPayload = "UNSIGNED-PAYLOAD"
|
||||||
|
|
||||||
|
// unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when
|
||||||
|
// we don't want to sign the request payload, but have a trailer.
|
||||||
|
const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
|
||||||
|
|
||||||
// Total number of parallel workers used for multipart operation.
|
// Total number of parallel workers used for multipart operation.
|
||||||
const totalWorkers = 4
|
const totalWorkers = 4
|
||||||
|
|
||||||
|
@ -96,6 +100,9 @@
|
||||||
minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp"
|
minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp"
|
||||||
// Header indicates last legalhold update time on source
|
// Header indicates last legalhold update time on source
|
||||||
minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp"
|
minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp"
|
||||||
|
minIOForceDelete = "x-minio-force-delete"
|
||||||
minIOForceDelete = "x-minio-force-delete"
|
// Header indicates delete marker replication request can be sent by source now.
|
||||||
|
minioTgtReplicationReady = "X-Minio-Replication-Ready"
|
||||||
|
// Header asks if delete marker replication request can be sent by source now.
|
||||||
|
isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready"
|
||||||
)
|
)
|
||||||
|
|
15
vendor/github.com/minio/minio-go/v7/core.go
generated
vendored
15
vendor/github.com/minio/minio-go/v7/core.go
generated
vendored
|
@ -88,8 +88,19 @@ func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarke
|
||||||
|
|
||||||
// PutObjectPart - Upload an object part.
|
// PutObjectPart - Upload an object part.
|
||||||
func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) {
|
func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) {
|
||||||
streamSha256 := true
|
p := uploadPartParams{
|
||||||
return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse, streamSha256, nil)
|
bucketName: bucket,
|
||||||
|
objectName: object,
|
||||||
|
uploadID: uploadID,
|
||||||
|
reader: data,
|
||||||
|
partNumber: partID,
|
||||||
|
md5Base64: md5Base64,
|
||||||
|
sha256Hex: sha256Hex,
|
||||||
|
size: size,
|
||||||
|
sse: sse,
|
||||||
|
streamSha256: true,
|
||||||
|
}
|
||||||
|
return c.uploadPart(ctx, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjectParts - List uploaded parts of an incomplete upload.x
|
// ListObjectParts - List uploaded parts of an incomplete upload.x
|
||||||
|
|
28
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
28
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
|
@ -2054,10 +2054,10 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
ChecksumSHA1 string
|
ChecksumSHA1 string
|
||||||
ChecksumSHA256 string
|
ChecksumSHA256 string
|
||||||
}{
|
}{
|
||||||
{header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE(), ChecksumCRC32: "yXTVFQ=="},
|
{header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()},
|
||||||
{header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "zXqj7Q=="},
|
{header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))},
|
||||||
{header: "x-amz-checksum-sha1", hasher: sha1.New(), ChecksumSHA1: "SwmAs3F75Sw/sE4dHehkvYtn9UE="},
|
{header: "x-amz-checksum-sha1", hasher: sha1.New()},
|
||||||
{header: "x-amz-checksum-sha256", hasher: sha256.New(), ChecksumSHA256: "8Tlu9msuw/cpmWNEnQx97axliBjiE6gK1doiY0N9WuA="},
|
{header: "x-amz-checksum-sha256", hasher: sha256.New()},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
|
@ -2113,10 +2113,10 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
|
cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
|
||||||
cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
|
cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
|
||||||
cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
|
cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
|
||||||
cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C)
|
cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
|
||||||
|
|
||||||
// Read the data back
|
// Read the data back
|
||||||
gopts := minio.GetObjectOptions{Checksum: true}
|
gopts := minio.GetObjectOptions{Checksum: true}
|
||||||
|
@ -2132,10 +2132,10 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cmpChecksum(st.ChecksumSHA256, test.ChecksumSHA256)
|
cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"])
|
||||||
cmpChecksum(st.ChecksumSHA1, test.ChecksumSHA1)
|
cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"])
|
||||||
cmpChecksum(st.ChecksumCRC32, test.ChecksumCRC32)
|
cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"])
|
||||||
cmpChecksum(st.ChecksumCRC32C, test.ChecksumCRC32C)
|
cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
|
||||||
|
|
||||||
if st.Size != int64(bufSize) {
|
if st.Size != int64(bufSize) {
|
||||||
logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
|
logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
|
||||||
|
@ -4216,10 +4216,6 @@ function := "PresignedPostPolicy(policy)"
|
||||||
logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err)
|
logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := policy.SetContentTypeStartsWith(""); err == nil {
|
|
||||||
logError(testName, function, args, startTime, "", "SetContentTypeStartsWith did not fail for invalid conditions", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
|
if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
|
||||||
logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err)
|
logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err)
|
||||||
return
|
return
|
||||||
|
|
2
vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
generated
vendored
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"errors"
|
"errors"
|
||||||
|
@ -31,7 +32,6 @@
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/minio-go/v7/pkg/signer"
|
"github.com/minio/minio-go/v7/pkg/signer"
|
||||||
sha256 "github.com/minio/sha256-simd"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// AssumeRoleResponse contains the result of successful AssumeRole request.
|
// AssumeRoleResponse contains the result of successful AssumeRole request.
|
||||||
|
|
21
vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
generated
vendored
21
vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
generated
vendored
|
@ -31,18 +31,17 @@
|
||||||
// will cache that Provider for all calls to IsExpired(), until Retrieve is
|
// will cache that Provider for all calls to IsExpired(), until Retrieve is
|
||||||
// called again after IsExpired() is true.
|
// called again after IsExpired() is true.
|
||||||
//
|
//
|
||||||
// creds := credentials.NewChainCredentials(
|
// creds := credentials.NewChainCredentials(
|
||||||
// []credentials.Provider{
|
// []credentials.Provider{
|
||||||
// &credentials.EnvAWSS3{},
|
// &credentials.EnvAWSS3{},
|
||||||
// &credentials.EnvMinio{},
|
// &credentials.EnvMinio{},
|
||||||
// })
|
// })
|
||||||
//
|
|
||||||
// // Usage of ChainCredentials.
|
|
||||||
// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
|
|
||||||
// if err != nil {
|
|
||||||
// log.Fatalln(err)
|
|
||||||
// }
|
|
||||||
//
|
//
|
||||||
|
// // Usage of ChainCredentials.
|
||||||
|
// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
|
||||||
|
// if err != nil {
|
||||||
|
// log.Fatalln(err)
|
||||||
|
// }
|
||||||
type Chain struct {
|
type Chain struct {
|
||||||
Providers []Provider
|
Providers []Provider
|
||||||
curr Provider
|
curr Provider
|
||||||
|
|
9
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
generated
vendored
9
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
generated
vendored
|
@ -65,10 +65,11 @@ type Provider interface {
|
||||||
// provider's struct.
|
// provider's struct.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// type IAMCredentialProvider struct {
|
//
|
||||||
// Expiry
|
// type IAMCredentialProvider struct {
|
||||||
// ...
|
// Expiry
|
||||||
// }
|
// ...
|
||||||
|
// }
|
||||||
type Expiry struct {
|
type Expiry struct {
|
||||||
// The date/time when to expire on
|
// The date/time when to expire on
|
||||||
expiration time.Time
|
expiration time.Time
|
||||||
|
|
7
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
generated
vendored
Normal file
7
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
{
|
||||||
|
"Version": 1,
|
||||||
|
"SessionToken": "token",
|
||||||
|
"AccessKeyId": "accessKey",
|
||||||
|
"SecretAccessKey": "secret",
|
||||||
|
"Expiration": "9999-04-27T16:02:25.000Z"
|
||||||
|
}
|
3
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
generated
vendored
3
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
generated
vendored
|
@ -10,3 +10,6 @@ aws_secret_access_key = secret
|
||||||
[with_colon]
|
[with_colon]
|
||||||
aws_access_key_id: accessKey
|
aws_access_key_id: accessKey
|
||||||
aws_secret_access_key: secret
|
aws_secret_access_key: secret
|
||||||
|
|
||||||
|
[with_process]
|
||||||
|
credential_process = /bin/cat credentials.json
|
||||||
|
|
34
vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
generated
vendored
34
vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
generated
vendored
|
@ -28,35 +28,33 @@
|
||||||
//
|
//
|
||||||
// Example of using the environment variable credentials.
|
// Example of using the environment variable credentials.
|
||||||
//
|
//
|
||||||
// creds := NewFromEnv()
|
// creds := NewFromEnv()
|
||||||
// // Retrieve the credentials value
|
// // Retrieve the credentials value
|
||||||
// credValue, err := creds.Get()
|
// credValue, err := creds.Get()
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// // handle error
|
// // handle error
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// Example of forcing credentials to expire and be refreshed on the next Get().
|
// Example of forcing credentials to expire and be refreshed on the next Get().
|
||||||
// This may be helpful to proactively expire credentials and refresh them sooner
|
// This may be helpful to proactively expire credentials and refresh them sooner
|
||||||
// than they would naturally expire on their own.
|
// than they would naturally expire on their own.
|
||||||
//
|
//
|
||||||
// creds := NewFromIAM("")
|
// creds := NewFromIAM("")
|
||||||
// creds.Expire()
|
// creds.Expire()
|
||||||
// credsValue, err := creds.Get()
|
// credsValue, err := creds.Get()
|
||||||
// // New credentials will be retrieved instead of from cache.
|
// // New credentials will be retrieved instead of from cache.
|
||||||
//
|
//
|
||||||
//
|
// # Custom Provider
|
||||||
// Custom Provider
|
|
||||||
//
|
//
|
||||||
// Each Provider built into this package also provides a helper method to generate
|
// Each Provider built into this package also provides a helper method to generate
|
||||||
// a Credentials pointer setup with the provider. To use a custom Provider just
|
// a Credentials pointer setup with the provider. To use a custom Provider just
|
||||||
// create a type which satisfies the Provider interface and pass it to the
|
// create a type which satisfies the Provider interface and pass it to the
|
||||||
// NewCredentials method.
|
// NewCredentials method.
|
||||||
//
|
//
|
||||||
// type MyProvider struct{}
|
// type MyProvider struct{}
|
||||||
// func (m *MyProvider) Retrieve() (Value, error) {...}
|
// func (m *MyProvider) Retrieve() (Value, error) {...}
|
||||||
// func (m *MyProvider) IsExpired() bool {...}
|
// func (m *MyProvider) IsExpired() bool {...}
|
||||||
//
|
|
||||||
// creds := NewCredentials(&MyProvider{})
|
|
||||||
// credValue, err := creds.Get()
|
|
||||||
//
|
//
|
||||||
|
// creds := NewCredentials(&MyProvider{})
|
||||||
|
// credValue, err := creds.Get()
|
||||||
package credentials
|
package credentials
|
||||||
|
|
48
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
generated
vendored
48
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
generated
vendored
|
@ -18,17 +18,33 @@
|
||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
ini "gopkg.in/ini.v1"
|
ini "gopkg.in/ini.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A externalProcessCredentials stores the output of a credential_process
|
||||||
|
type externalProcessCredentials struct {
|
||||||
|
Version int
|
||||||
|
SessionToken string
|
||||||
|
AccessKeyID string `json:"AccessKeyId"`
|
||||||
|
SecretAccessKey string
|
||||||
|
Expiration time.Time
|
||||||
|
}
|
||||||
|
|
||||||
// A FileAWSCredentials retrieves credentials from the current user's home
|
// A FileAWSCredentials retrieves credentials from the current user's home
|
||||||
// directory, and keeps track if those credentials are expired.
|
// directory, and keeps track if those credentials are expired.
|
||||||
//
|
//
|
||||||
// Profile ini file example: $HOME/.aws/credentials
|
// Profile ini file example: $HOME/.aws/credentials
|
||||||
type FileAWSCredentials struct {
|
type FileAWSCredentials struct {
|
||||||
|
Expiry
|
||||||
|
|
||||||
// Path to the shared credentials file.
|
// Path to the shared credentials file.
|
||||||
//
|
//
|
||||||
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
|
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
|
||||||
|
@ -89,6 +105,33 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) {
|
||||||
// Default to empty string if not found.
|
// Default to empty string if not found.
|
||||||
token := iniProfile.Key("aws_session_token")
|
token := iniProfile.Key("aws_session_token")
|
||||||
|
|
||||||
|
// If credential_process is defined, obtain credentials by executing
|
||||||
|
// the external process
|
||||||
|
credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String())
|
||||||
|
if credentialProcess != "" {
|
||||||
|
args := strings.Fields(credentialProcess)
|
||||||
|
if len(args) <= 1 {
|
||||||
|
return Value{}, errors.New("invalid credential process args")
|
||||||
|
}
|
||||||
|
cmd := exec.Command(args[0], args[1:]...)
|
||||||
|
out, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return Value{}, err
|
||||||
|
}
|
||||||
|
var externalProcessCredentials externalProcessCredentials
|
||||||
|
err = json.Unmarshal([]byte(out), &externalProcessCredentials)
|
||||||
|
if err != nil {
|
||||||
|
return Value{}, err
|
||||||
|
}
|
||||||
|
p.retrieved = true
|
||||||
|
p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow)
|
||||||
|
return Value{
|
||||||
|
AccessKeyID: externalProcessCredentials.AccessKeyID,
|
||||||
|
SecretAccessKey: externalProcessCredentials.SecretAccessKey,
|
||||||
|
SessionToken: externalProcessCredentials.SessionToken,
|
||||||
|
SignerType: SignatureV4,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
p.retrieved = true
|
p.retrieved = true
|
||||||
return Value{
|
return Value{
|
||||||
AccessKeyID: id.String(),
|
AccessKeyID: id.String(),
|
||||||
|
@ -98,11 +141,6 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsExpired returns if the shared credentials have expired.
|
|
||||||
func (p *FileAWSCredentials) IsExpired() bool {
|
|
||||||
return !p.retrieved
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
|
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
|
||||||
// The credentials retrieved from the profile will be returned or error. Error will be
|
// The credentials retrieved from the profile will be returned or error. Error will be
|
||||||
// returned if it fails to read from the file, or the data is invalid.
|
// returned if it fails to read from the file, or the data is invalid.
|
||||||
|
|
24
vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
generated
vendored
Normal file
24
vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
//go:build !fips
|
||||||
|
// +build !fips
|
||||||
|
|
||||||
|
/*
|
||||||
|
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||||
|
* Copyright 2022 MinIO, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package encrypt
|
||||||
|
|
||||||
|
// FIPS is true if 'fips' build tag was specified.
|
||||||
|
const FIPS = false
|
24
vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
generated
vendored
Normal file
24
vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
//go:build fips
|
||||||
|
// +build fips
|
||||||
|
|
||||||
|
/*
|
||||||
|
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||||
|
* Copyright 2022 MinIO, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package encrypt
|
||||||
|
|
||||||
|
// FIPS is true if 'fips' build tag was specified.
|
||||||
|
const FIPS = true
|
6
vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
generated
vendored
6
vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
generated
vendored
|
@ -67,9 +67,9 @@
|
||||||
|
|
||||||
// Type is the server-side-encryption method. It represents one of
|
// Type is the server-side-encryption method. It represents one of
|
||||||
// the following encryption methods:
|
// the following encryption methods:
|
||||||
// - SSE-C: server-side-encryption with customer provided keys
|
// - SSE-C: server-side-encryption with customer provided keys
|
||||||
// - KMS: server-side-encryption with managed keys
|
// - KMS: server-side-encryption with managed keys
|
||||||
// - S3: server-side-encryption using S3 storage encryption
|
// - S3: server-side-encryption using S3 storage encryption
|
||||||
type Type string
|
type Type string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
6
vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
generated
vendored
6
vendor/github.com/minio/minio-go/v7/pkg/notification/info.go
generated
vendored
|
@ -22,14 +22,14 @@ type identity struct {
|
||||||
PrincipalID string `json:"principalId"`
|
PrincipalID string `json:"principalId"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// event bucket metadata.
|
// event bucket metadata.
|
||||||
type bucketMeta struct {
|
type bucketMeta struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
OwnerIdentity identity `json:"ownerIdentity"`
|
OwnerIdentity identity `json:"ownerIdentity"`
|
||||||
ARN string `json:"arn"`
|
ARN string `json:"arn"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// event object metadata.
|
// event object metadata.
|
||||||
type objectMeta struct {
|
type objectMeta struct {
|
||||||
Key string `json:"key"`
|
Key string `json:"key"`
|
||||||
Size int64 `json:"size,omitempty"`
|
Size int64 `json:"size,omitempty"`
|
||||||
|
@ -40,7 +40,7 @@ type objectMeta struct {
|
||||||
Sequencer string `json:"sequencer"`
|
Sequencer string `json:"sequencer"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// event server specific metadata.
|
// event server specific metadata.
|
||||||
type eventMeta struct {
|
type eventMeta struct {
|
||||||
SchemaVersion string `json:"s3SchemaVersion"`
|
SchemaVersion string `json:"s3SchemaVersion"`
|
||||||
ConfigurationID string `json:"configurationId"`
|
ConfigurationID string `json:"configurationId"`
|
||||||
|
|
3
vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
generated
vendored
|
@ -29,7 +29,8 @@
|
||||||
type EventType string
|
type EventType string
|
||||||
|
|
||||||
// The role of all event types are described in :
|
// The role of all event types are described in :
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
|
//
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
|
||||||
const (
|
const (
|
||||||
ObjectCreatedAll EventType = "s3:ObjectCreated:*"
|
ObjectCreatedAll EventType = "s3:ObjectCreated:*"
|
||||||
ObjectCreatedPut = "s3:ObjectCreated:Put"
|
ObjectCreatedPut = "s3:ObjectCreated:Put"
|
||||||
|
|
225
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
generated
vendored
Normal file
225
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
generated
vendored
Normal file
|
@ -0,0 +1,225 @@
|
||||||
|
/*
|
||||||
|
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||||
|
* Copyright 2022 MinIO, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package signer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// getUnsignedChunkLength - calculates the length of chunk metadata
|
||||||
|
func getUnsignedChunkLength(chunkDataSize int64) int64 {
|
||||||
|
return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
|
||||||
|
crlfLen +
|
||||||
|
chunkDataSize +
|
||||||
|
crlfLen
|
||||||
|
}
|
||||||
|
|
||||||
|
// getUSStreamLength - calculates the length of the overall stream (data + metadata)
|
||||||
|
func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
|
||||||
|
if dataLen <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
chunksCount := int64(dataLen / chunkSize)
|
||||||
|
remainingBytes := int64(dataLen % chunkSize)
|
||||||
|
streamLen := int64(0)
|
||||||
|
streamLen += chunksCount * getUnsignedChunkLength(chunkSize)
|
||||||
|
if remainingBytes > 0 {
|
||||||
|
streamLen += getUnsignedChunkLength(remainingBytes)
|
||||||
|
}
|
||||||
|
streamLen += getUnsignedChunkLength(0)
|
||||||
|
if len(trailers) > 0 {
|
||||||
|
for name, placeholder := range trailers {
|
||||||
|
if len(placeholder) > 0 {
|
||||||
|
streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
streamLen += crlfLen
|
||||||
|
}
|
||||||
|
|
||||||
|
return streamLen
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepareStreamingRequest - prepares a request with appropriate
|
||||||
|
// headers before computing the seed signature.
|
||||||
|
func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
|
||||||
|
req.TransferEncoding = []string{"aws-chunked"}
|
||||||
|
if sessionToken != "" {
|
||||||
|
req.Header.Set("X-Amz-Security-Token", sessionToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
|
||||||
|
// Set content length with streaming signature for each chunk included.
|
||||||
|
req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamingUSReader implements chunked upload signature as a reader on
|
||||||
|
// top of req.Body's ReaderCloser chunk header;data;... repeat
|
||||||
|
type StreamingUSReader struct {
|
||||||
|
contentLen int64 // Content-Length from req header
|
||||||
|
baseReadCloser io.ReadCloser // underlying io.Reader
|
||||||
|
bytesRead int64 // bytes read from underlying io.Reader
|
||||||
|
buf bytes.Buffer // holds signed chunk
|
||||||
|
chunkBuf []byte // holds raw data read from req Body
|
||||||
|
chunkBufLen int // no. of bytes read so far into chunkBuf
|
||||||
|
done bool // done reading the underlying reader to EOF
|
||||||
|
chunkNum int
|
||||||
|
totalChunks int
|
||||||
|
lastChunkSize int
|
||||||
|
trailer http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeChunk - signs a chunk read from s.baseReader of chunkLen size.
|
||||||
|
func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) {
|
||||||
|
s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n")
|
||||||
|
|
||||||
|
// Write chunk data into streaming buffer
|
||||||
|
s.buf.Write(s.chunkBuf[:chunkLen])
|
||||||
|
|
||||||
|
// Write the chunk trailer.
|
||||||
|
if addCrLf {
|
||||||
|
s.buf.Write([]byte("\r\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset chunkBufLen for next chunk read.
|
||||||
|
s.chunkBufLen = 0
|
||||||
|
s.chunkNum++
|
||||||
|
}
|
||||||
|
|
||||||
|
// addSignedTrailer - adds a trailer with the provided headers,
|
||||||
|
// then signs a chunk and adds it to output.
|
||||||
|
func (s *StreamingUSReader) addTrailer(h http.Header) {
|
||||||
|
olen := len(s.chunkBuf)
|
||||||
|
s.chunkBuf = s.chunkBuf[:0]
|
||||||
|
for k, v := range h {
|
||||||
|
s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.buf.Write(s.chunkBuf)
|
||||||
|
s.buf.WriteString("\r\n\r\n")
|
||||||
|
|
||||||
|
// Reset chunkBufLen for next chunk read.
|
||||||
|
s.chunkBuf = s.chunkBuf[:olen]
|
||||||
|
s.chunkBufLen = 0
|
||||||
|
s.chunkNum++
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamingUnsignedV4 - provides chunked upload
|
||||||
|
func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request {
|
||||||
|
// Set headers needed for streaming signature.
|
||||||
|
prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime)
|
||||||
|
|
||||||
|
if req.Body == nil {
|
||||||
|
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
|
||||||
|
}
|
||||||
|
|
||||||
|
stReader := &StreamingUSReader{
|
||||||
|
baseReadCloser: req.Body,
|
||||||
|
chunkBuf: make([]byte, payloadChunkSize),
|
||||||
|
contentLen: dataLen,
|
||||||
|
chunkNum: 1,
|
||||||
|
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
|
||||||
|
lastChunkSize: int(dataLen % payloadChunkSize),
|
||||||
|
}
|
||||||
|
if len(req.Trailer) > 0 {
|
||||||
|
stReader.trailer = req.Trailer
|
||||||
|
// Remove...
|
||||||
|
req.Trailer = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Body = stReader
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read - this method performs chunk upload signature providing a
|
||||||
|
// io.Reader interface.
|
||||||
|
func (s *StreamingUSReader) Read(buf []byte) (int, error) {
|
||||||
|
switch {
|
||||||
|
// After the last chunk is read from underlying reader, we
|
||||||
|
// never re-fill s.buf.
|
||||||
|
case s.done:
|
||||||
|
|
||||||
|
// s.buf will be (re-)filled with next chunk when has lesser
|
||||||
|
// bytes than asked for.
|
||||||
|
case s.buf.Len() < len(buf):
|
||||||
|
s.chunkBufLen = 0
|
||||||
|
for {
|
||||||
|
n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
|
||||||
|
// Usually we validate `err` first, but in this case
|
||||||
|
// we are validating n > 0 for the following reasons.
|
||||||
|
//
|
||||||
|
// 1. n > 0, err is one of io.EOF, nil (near end of stream)
|
||||||
|
// A Reader returning a non-zero number of bytes at the end
|
||||||
|
// of the input stream may return either err == EOF or err == nil
|
||||||
|
//
|
||||||
|
// 2. n == 0, err is io.EOF (actual end of stream)
|
||||||
|
//
|
||||||
|
// Callers should always process the n > 0 bytes returned
|
||||||
|
// before considering the error err.
|
||||||
|
if n1 > 0 {
|
||||||
|
s.chunkBufLen += n1
|
||||||
|
s.bytesRead += int64(n1)
|
||||||
|
|
||||||
|
if s.chunkBufLen == payloadChunkSize ||
|
||||||
|
(s.chunkNum == s.totalChunks-1 &&
|
||||||
|
s.chunkBufLen == s.lastChunkSize) {
|
||||||
|
// Sign the chunk and write it to s.buf.
|
||||||
|
s.writeChunk(s.chunkBufLen, true)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
// No more data left in baseReader - last chunk.
|
||||||
|
// Done reading the last chunk from baseReader.
|
||||||
|
s.done = true
|
||||||
|
|
||||||
|
// bytes read from baseReader different than
|
||||||
|
// content length provided.
|
||||||
|
if s.bytesRead != s.contentLen {
|
||||||
|
return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign the chunk and write it to s.buf.
|
||||||
|
s.writeChunk(0, len(s.trailer) == 0)
|
||||||
|
if len(s.trailer) > 0 {
|
||||||
|
// Trailer must be set now.
|
||||||
|
s.addTrailer(s.trailer)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s.buf.Read(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close - this method makes underlying io.ReadCloser's Close method available.
|
||||||
|
func (s *StreamingUSReader) Close() error {
|
||||||
|
return s.baseReadCloser.Close()
|
||||||
|
}
|
111
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
generated
vendored
111
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
generated
vendored
|
@ -32,13 +32,17 @@
|
||||||
// Reference for constants used below -
|
// Reference for constants used below -
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
|
||||||
const (
|
const (
|
||||||
streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||||
streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
|
streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
|
||||||
emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
|
||||||
payloadChunkSize = 64 * 1024
|
streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER"
|
||||||
chunkSigConstLen = 17 // ";chunk-signature="
|
emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||||
signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
|
payloadChunkSize = 64 * 1024
|
||||||
crlfLen = 2 // CRLF
|
chunkSigConstLen = 17 // ";chunk-signature="
|
||||||
|
signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
|
||||||
|
crlfLen = 2 // CRLF
|
||||||
|
trailerKVSeparator = ":"
|
||||||
|
trailerSignature = "x-amz-trailer-signature"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Request headers to be ignored while calculating seed signature for
|
// Request headers to be ignored while calculating seed signature for
|
||||||
|
@ -60,7 +64,7 @@ func getSignedChunkLength(chunkDataSize int64) int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// getStreamLength - calculates the length of the overall stream (data + metadata)
|
// getStreamLength - calculates the length of the overall stream (data + metadata)
|
||||||
func getStreamLength(dataLen, chunkSize int64) int64 {
|
func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
|
||||||
if dataLen <= 0 {
|
if dataLen <= 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
@ -73,6 +77,15 @@ func getStreamLength(dataLen, chunkSize int64) int64 {
|
||||||
streamLen += getSignedChunkLength(remainingBytes)
|
streamLen += getSignedChunkLength(remainingBytes)
|
||||||
}
|
}
|
||||||
streamLen += getSignedChunkLength(0)
|
streamLen += getSignedChunkLength(0)
|
||||||
|
if len(trailers) > 0 {
|
||||||
|
for name, placeholder := range trailers {
|
||||||
|
if len(placeholder) > 0 {
|
||||||
|
streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen
|
||||||
|
}
|
||||||
|
|
||||||
return streamLen
|
return streamLen
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,18 +104,41 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [
|
||||||
return strings.Join(stringToSignParts, "\n")
|
return strings.Join(stringToSignParts, "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// buildTrailerChunkStringToSign - returns the string to sign given chunk data
|
||||||
|
// and previous signature.
|
||||||
|
func buildTrailerChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
|
||||||
|
stringToSignParts := []string{
|
||||||
|
streamingTrailerHdr,
|
||||||
|
t.Format(iso8601DateFormat),
|
||||||
|
getScope(region, t, ServiceTypeS3),
|
||||||
|
previousSig,
|
||||||
|
hex.EncodeToString(sum256(chunkData)),
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(stringToSignParts, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
// prepareStreamingRequest - prepares a request with appropriate
|
// prepareStreamingRequest - prepares a request with appropriate
|
||||||
// headers before computing the seed signature.
|
// headers before computing the seed signature.
|
||||||
func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
|
func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
|
||||||
// Set x-amz-content-sha256 header.
|
// Set x-amz-content-sha256 header.
|
||||||
req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
|
if len(req.Trailer) == 0 {
|
||||||
|
req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
|
||||||
|
} else {
|
||||||
|
req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm)
|
||||||
|
for k := range req.Trailer {
|
||||||
|
req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
|
||||||
|
}
|
||||||
|
req.TransferEncoding = []string{"aws-chunked"}
|
||||||
|
}
|
||||||
|
|
||||||
if sessionToken != "" {
|
if sessionToken != "" {
|
||||||
req.Header.Set("X-Amz-Security-Token", sessionToken)
|
req.Header.Set("X-Amz-Security-Token", sessionToken)
|
||||||
}
|
}
|
||||||
|
|
||||||
req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
|
req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
|
||||||
// Set content length with streaming signature for each chunk included.
|
// Set content length with streaming signature for each chunk included.
|
||||||
req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize))
|
req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer)
|
||||||
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
|
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,6 +158,16 @@ func buildChunkSignature(chunkData []byte, reqTime time.Time, region,
|
||||||
return getSignature(signingKey, chunkStringToSign)
|
return getSignature(signingKey, chunkStringToSign)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
|
||||||
|
func buildTrailerChunkSignature(chunkData []byte, reqTime time.Time, region,
|
||||||
|
previousSignature, secretAccessKey string,
|
||||||
|
) string {
|
||||||
|
chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region,
|
||||||
|
previousSignature, chunkData)
|
||||||
|
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
|
||||||
|
return getSignature(signingKey, chunkStringToSign)
|
||||||
|
}
|
||||||
|
|
||||||
// getSeedSignature - returns the seed signature for a given request.
|
// getSeedSignature - returns the seed signature for a given request.
|
||||||
func (s *StreamingReader) setSeedSignature(req *http.Request) {
|
func (s *StreamingReader) setSeedSignature(req *http.Request) {
|
||||||
// Get canonical request
|
// Get canonical request
|
||||||
|
@ -156,10 +202,11 @@ type StreamingReader struct {
|
||||||
chunkNum int
|
chunkNum int
|
||||||
totalChunks int
|
totalChunks int
|
||||||
lastChunkSize int
|
lastChunkSize int
|
||||||
|
trailer http.Header
|
||||||
}
|
}
|
||||||
|
|
||||||
// signChunk - signs a chunk read from s.baseReader of chunkLen size.
|
// signChunk - signs a chunk read from s.baseReader of chunkLen size.
|
||||||
func (s *StreamingReader) signChunk(chunkLen int) {
|
func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) {
|
||||||
// Compute chunk signature for next header
|
// Compute chunk signature for next header
|
||||||
signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime,
|
signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime,
|
||||||
s.region, s.prevSignature, s.secretAccessKey)
|
s.region, s.prevSignature, s.secretAccessKey)
|
||||||
|
@ -175,13 +222,40 @@ func (s *StreamingReader) signChunk(chunkLen int) {
|
||||||
s.buf.Write(s.chunkBuf[:chunkLen])
|
s.buf.Write(s.chunkBuf[:chunkLen])
|
||||||
|
|
||||||
// Write the chunk trailer.
|
// Write the chunk trailer.
|
||||||
s.buf.Write([]byte("\r\n"))
|
if addCrLf {
|
||||||
|
s.buf.Write([]byte("\r\n"))
|
||||||
|
}
|
||||||
|
|
||||||
// Reset chunkBufLen for next chunk read.
|
// Reset chunkBufLen for next chunk read.
|
||||||
s.chunkBufLen = 0
|
s.chunkBufLen = 0
|
||||||
s.chunkNum++
|
s.chunkNum++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// addSignedTrailer - adds a trailer with the provided headers,
|
||||||
|
// then signs a chunk and adds it to output.
|
||||||
|
func (s *StreamingReader) addSignedTrailer(h http.Header) {
|
||||||
|
olen := len(s.chunkBuf)
|
||||||
|
s.chunkBuf = s.chunkBuf[:0]
|
||||||
|
for k, v := range h {
|
||||||
|
s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute chunk signature
|
||||||
|
signature := buildTrailerChunkSignature(s.chunkBuf, s.reqTime,
|
||||||
|
s.region, s.prevSignature, s.secretAccessKey)
|
||||||
|
|
||||||
|
// For next chunk signature computation
|
||||||
|
s.prevSignature = signature
|
||||||
|
|
||||||
|
s.buf.Write(s.chunkBuf)
|
||||||
|
s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n")
|
||||||
|
|
||||||
|
// Reset chunkBufLen for next chunk read.
|
||||||
|
s.chunkBuf = s.chunkBuf[:olen]
|
||||||
|
s.chunkBufLen = 0
|
||||||
|
s.chunkNum++
|
||||||
|
}
|
||||||
|
|
||||||
// setStreamingAuthHeader - builds and sets authorization header value
|
// setStreamingAuthHeader - builds and sets authorization header value
|
||||||
// for streaming signature.
|
// for streaming signature.
|
||||||
func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
|
func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
|
||||||
|
@ -222,6 +296,11 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok
|
||||||
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
|
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
|
||||||
lastChunkSize: int(dataLen % payloadChunkSize),
|
lastChunkSize: int(dataLen % payloadChunkSize),
|
||||||
}
|
}
|
||||||
|
if len(req.Trailer) > 0 {
|
||||||
|
stReader.trailer = req.Trailer
|
||||||
|
// Remove...
|
||||||
|
req.Trailer = nil
|
||||||
|
}
|
||||||
|
|
||||||
// Add the request headers required for chunk upload signing.
|
// Add the request headers required for chunk upload signing.
|
||||||
|
|
||||||
|
@ -272,7 +351,7 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
|
||||||
(s.chunkNum == s.totalChunks-1 &&
|
(s.chunkNum == s.totalChunks-1 &&
|
||||||
s.chunkBufLen == s.lastChunkSize) {
|
s.chunkBufLen == s.lastChunkSize) {
|
||||||
// Sign the chunk and write it to s.buf.
|
// Sign the chunk and write it to s.buf.
|
||||||
s.signChunk(s.chunkBufLen)
|
s.signChunk(s.chunkBufLen, true)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -289,7 +368,11 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sign the chunk and write it to s.buf.
|
// Sign the chunk and write it to s.buf.
|
||||||
s.signChunk(0)
|
s.signChunk(0, len(s.trailer) == 0)
|
||||||
|
if len(s.trailer) > 0 {
|
||||||
|
// Trailer must be set now.
|
||||||
|
s.addSignedTrailer(s.trailer)
|
||||||
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
return 0, err
|
return 0, err
|
||||||
|
|
27
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
generated
vendored
27
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
generated
vendored
|
@ -162,11 +162,12 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost b
|
||||||
// From the Amazon docs:
|
// From the Amazon docs:
|
||||||
//
|
//
|
||||||
// StringToSign = HTTP-Verb + "\n" +
|
// StringToSign = HTTP-Verb + "\n" +
|
||||||
// Content-Md5 + "\n" +
|
//
|
||||||
// Content-Type + "\n" +
|
// Content-Md5 + "\n" +
|
||||||
// Expires + "\n" +
|
// Content-Type + "\n" +
|
||||||
// CanonicalizedProtocolHeaders +
|
// Expires + "\n" +
|
||||||
// CanonicalizedResource;
|
// CanonicalizedProtocolHeaders +
|
||||||
|
// CanonicalizedResource;
|
||||||
func preStringToSignV2(req http.Request, virtualHost bool) string {
|
func preStringToSignV2(req http.Request, virtualHost bool) string {
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
// Write standard headers.
|
// Write standard headers.
|
||||||
|
@ -189,11 +190,12 @@ func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
|
||||||
// From the Amazon docs:
|
// From the Amazon docs:
|
||||||
//
|
//
|
||||||
// StringToSign = HTTP-Verb + "\n" +
|
// StringToSign = HTTP-Verb + "\n" +
|
||||||
// Content-Md5 + "\n" +
|
//
|
||||||
// Content-Type + "\n" +
|
// Content-Md5 + "\n" +
|
||||||
// Date + "\n" +
|
// Content-Type + "\n" +
|
||||||
// CanonicalizedProtocolHeaders +
|
// Date + "\n" +
|
||||||
// CanonicalizedResource;
|
// CanonicalizedProtocolHeaders +
|
||||||
|
// CanonicalizedResource;
|
||||||
func stringToSignV2(req http.Request, virtualHost bool) string {
|
func stringToSignV2(req http.Request, virtualHost bool) string {
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
// Write standard headers.
|
// Write standard headers.
|
||||||
|
@ -281,8 +283,9 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
|
||||||
// From the Amazon docs:
|
// From the Amazon docs:
|
||||||
//
|
//
|
||||||
// CanonicalizedResource = [ "/" + Bucket ] +
|
// CanonicalizedResource = [ "/" + Bucket ] +
|
||||||
// <HTTP-Request-URI, from the protocol name up to the query string> +
|
//
|
||||||
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
|
// <HTTP-Request-URI, from the protocol name up to the query string> +
|
||||||
|
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
|
||||||
func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) {
|
func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) {
|
||||||
// Save request URL.
|
// Save request URL.
|
||||||
requestURL := req.URL
|
requestURL := req.URL
|
||||||
|
|
41
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
generated
vendored
41
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
generated
vendored
|
@ -42,7 +42,6 @@
|
||||||
ServiceTypeSTS = "sts"
|
ServiceTypeSTS = "sts"
|
||||||
)
|
)
|
||||||
|
|
||||||
//
|
|
||||||
// Excerpts from @lsegal -
|
// Excerpts from @lsegal -
|
||||||
// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
|
// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
|
||||||
//
|
//
|
||||||
|
@ -57,7 +56,6 @@
|
||||||
// * Accept-Encoding
|
// * Accept-Encoding
|
||||||
// Some S3 servers like Hitachi Content Platform do not honor this header for signature
|
// Some S3 servers like Hitachi Content Platform do not honor this header for signature
|
||||||
// calculation.
|
// calculation.
|
||||||
//
|
|
||||||
var v4IgnoredHeaders = map[string]bool{
|
var v4IgnoredHeaders = map[string]bool{
|
||||||
"Accept-Encoding": true,
|
"Accept-Encoding": true,
|
||||||
"Authorization": true,
|
"Authorization": true,
|
||||||
|
@ -177,12 +175,13 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
|
||||||
// getCanonicalRequest generate a canonical request of style.
|
// getCanonicalRequest generate a canonical request of style.
|
||||||
//
|
//
|
||||||
// canonicalRequest =
|
// canonicalRequest =
|
||||||
// <HTTPMethod>\n
|
//
|
||||||
// <CanonicalURI>\n
|
// <HTTPMethod>\n
|
||||||
// <CanonicalQueryString>\n
|
// <CanonicalURI>\n
|
||||||
// <CanonicalHeaders>\n
|
// <CanonicalQueryString>\n
|
||||||
// <SignedHeaders>\n
|
// <CanonicalHeaders>\n
|
||||||
// <HashedPayload>
|
// <SignedHeaders>\n
|
||||||
|
// <HashedPayload>
|
||||||
func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string {
|
func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string {
|
||||||
req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
|
req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
|
||||||
canonicalRequest := strings.Join([]string{
|
canonicalRequest := strings.Join([]string{
|
||||||
|
@ -264,11 +263,11 @@ func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l
|
||||||
|
|
||||||
// SignV4STS - signature v4 for STS request.
|
// SignV4STS - signature v4 for STS request.
|
||||||
func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
|
func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
|
||||||
return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS)
|
return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Internal function called for different service types.
|
// Internal function called for different service types.
|
||||||
func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string) *http.Request {
|
func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request {
|
||||||
// Signature calculation is not needed for anonymous credentials.
|
// Signature calculation is not needed for anonymous credentials.
|
||||||
if accessKeyID == "" || secretAccessKey == "" {
|
if accessKeyID == "" || secretAccessKey == "" {
|
||||||
return &req
|
return &req
|
||||||
|
@ -285,6 +284,15 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati
|
||||||
req.Header.Set("X-Amz-Security-Token", sessionToken)
|
req.Header.Set("X-Amz-Security-Token", sessionToken)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(trailer) > 0 {
|
||||||
|
for k := range trailer {
|
||||||
|
req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
|
||||||
|
}
|
||||||
|
|
||||||
|
req.TransferEncoding = []string{"aws-chunked"}
|
||||||
|
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10))
|
||||||
|
}
|
||||||
|
|
||||||
hashedPayload := getHashedPayload(req)
|
hashedPayload := getHashedPayload(req)
|
||||||
if serviceType == ServiceTypeSTS {
|
if serviceType == ServiceTypeSTS {
|
||||||
// Content sha256 header is not sent with the request
|
// Content sha256 header is not sent with the request
|
||||||
|
@ -322,11 +330,22 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati
|
||||||
auth := strings.Join(parts, ", ")
|
auth := strings.Join(parts, ", ")
|
||||||
req.Header.Set("Authorization", auth)
|
req.Header.Set("Authorization", auth)
|
||||||
|
|
||||||
|
if len(trailer) > 0 {
|
||||||
|
// Use custom chunked encoding.
|
||||||
|
req.Trailer = trailer
|
||||||
|
return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC())
|
||||||
|
}
|
||||||
return &req
|
return &req
|
||||||
}
|
}
|
||||||
|
|
||||||
// SignV4 sign the request before Do(), in accordance with
|
// SignV4 sign the request before Do(), in accordance with
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
|
||||||
func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
|
func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
|
||||||
return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3)
|
return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignV4Trailer sign the request before Do(), in accordance with
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
|
||||||
|
func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request {
|
||||||
|
return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer)
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go
generated
vendored
|
@ -19,10 +19,9 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/hmac"
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/minio/sha256-simd"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
|
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
|
||||||
|
|
101
vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
generated
vendored
101
vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
generated
vendored
|
@ -1,5 +1,6 @@
|
||||||
/*
|
/*
|
||||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||||
|
* Copyright 2020-2022 MinIO, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -20,6 +21,8 @@
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"io"
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
@ -63,8 +66,17 @@ func (err errTag) Error() string {
|
||||||
maxTagCount = 50
|
maxTagCount = 50
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
|
||||||
|
// borrowed from this article and also testing various ASCII characters following regex
|
||||||
|
// is supported by AWS S3 for both tags and values.
|
||||||
|
var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`)
|
||||||
|
|
||||||
func checkKey(key string) error {
|
func checkKey(key string) error {
|
||||||
if len(key) == 0 || utf8.RuneCountInString(key) > maxKeyLength || strings.Contains(key, "&") {
|
if len(key) == 0 {
|
||||||
|
return errInvalidTagKey
|
||||||
|
}
|
||||||
|
|
||||||
|
if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) {
|
||||||
return errInvalidTagKey
|
return errInvalidTagKey
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,8 +84,10 @@ func checkKey(key string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkValue(value string) error {
|
func checkValue(value string) error {
|
||||||
if utf8.RuneCountInString(value) > maxValueLength || strings.Contains(value, "&") {
|
if value != "" {
|
||||||
return errInvalidTagValue
|
if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) {
|
||||||
|
return errInvalidTagValue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -136,11 +150,26 @@ type tagSet struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tags tagSet) String() string {
|
func (tags tagSet) String() string {
|
||||||
vals := make(url.Values)
|
if len(tags.tagMap) == 0 {
|
||||||
for key, value := range tags.tagMap {
|
return ""
|
||||||
vals.Set(key, value)
|
|
||||||
}
|
}
|
||||||
return vals.Encode()
|
var buf strings.Builder
|
||||||
|
keys := make([]string, 0, len(tags.tagMap))
|
||||||
|
for k := range tags.tagMap {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for _, k := range keys {
|
||||||
|
keyEscaped := url.QueryEscape(k)
|
||||||
|
valueEscaped := url.QueryEscape(tags.tagMap[k])
|
||||||
|
if buf.Len() > 0 {
|
||||||
|
buf.WriteByte('&')
|
||||||
|
}
|
||||||
|
buf.WriteString(keyEscaped)
|
||||||
|
buf.WriteByte('=')
|
||||||
|
buf.WriteString(valueEscaped)
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tags *tagSet) remove(key string) {
|
func (tags *tagSet) remove(key string) {
|
||||||
|
@ -175,7 +204,7 @@ func (tags *tagSet) set(key, value string, failOnExist bool) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tags tagSet) toMap() map[string]string {
|
func (tags tagSet) toMap() map[string]string {
|
||||||
m := make(map[string]string)
|
m := make(map[string]string, len(tags.tagMap))
|
||||||
for key, value := range tags.tagMap {
|
for key, value := range tags.tagMap {
|
||||||
m[key] = value
|
m[key] = value
|
||||||
}
|
}
|
||||||
|
@ -188,6 +217,7 @@ func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||||
Tags []Tag `xml:"Tag"`
|
Tags []Tag `xml:"Tag"`
|
||||||
}{}
|
}{}
|
||||||
|
|
||||||
|
tagList.Tags = make([]Tag, 0, len(tags.tagMap))
|
||||||
for key, value := range tags.tagMap {
|
for key, value := range tags.tagMap {
|
||||||
tagList.Tags = append(tagList.Tags, Tag{key, value})
|
tagList.Tags = append(tagList.Tags, Tag{key, value})
|
||||||
}
|
}
|
||||||
|
@ -213,7 +243,7 @@ func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||||
return errTooManyTags
|
return errTooManyTags
|
||||||
}
|
}
|
||||||
|
|
||||||
m := map[string]string{}
|
m := make(map[string]string, len(tagList.Tags))
|
||||||
for _, tag := range tagList.Tags {
|
for _, tag := range tagList.Tags {
|
||||||
if _, found := m[tag.Key]; found {
|
if _, found := m[tag.Key]; found {
|
||||||
return errDuplicateTagKey
|
return errDuplicateTagKey
|
||||||
|
@ -311,14 +341,49 @@ func ParseObjectXML(reader io.Reader) (*Tags, error) {
|
||||||
return unmarshalXML(reader, true)
|
return unmarshalXML(reader, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// stringsCut slices s around the first instance of sep,
|
||||||
|
// returning the text before and after sep.
|
||||||
|
// The found result reports whether sep appears in s.
|
||||||
|
// If sep does not appear in s, cut returns s, "", false.
|
||||||
|
func stringsCut(s, sep string) (before, after string, found bool) {
|
||||||
|
if i := strings.Index(s, sep); i >= 0 {
|
||||||
|
return s[:i], s[i+len(sep):], true
|
||||||
|
}
|
||||||
|
return s, "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tags *tagSet) parseTags(tgs string) (err error) {
|
||||||
|
for tgs != "" {
|
||||||
|
var key string
|
||||||
|
key, tgs, _ = stringsCut(tgs, "&")
|
||||||
|
if key == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key, value, _ := stringsCut(key, "=")
|
||||||
|
key, err1 := url.QueryUnescape(key)
|
||||||
|
if err1 != nil {
|
||||||
|
if err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
value, err1 = url.QueryUnescape(value)
|
||||||
|
if err1 != nil {
|
||||||
|
if err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err = tags.set(key, value, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Parse decodes HTTP query formatted string into tags which is limited by isObject.
|
// Parse decodes HTTP query formatted string into tags which is limited by isObject.
|
||||||
// A query formatted string is like "key1=value1&key2=value2".
|
// A query formatted string is like "key1=value1&key2=value2".
|
||||||
func Parse(s string, isObject bool) (*Tags, error) {
|
func Parse(s string, isObject bool) (*Tags, error) {
|
||||||
values, err := url.ParseQuery(s)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
tagging := &Tags{
|
tagging := &Tags{
|
||||||
TagSet: &tagSet{
|
TagSet: &tagSet{
|
||||||
tagMap: make(map[string]string),
|
tagMap: make(map[string]string),
|
||||||
|
@ -326,10 +391,8 @@ func Parse(s string, isObject bool) (*Tags, error) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for key := range values {
|
if err := tagging.TagSet.parseTags(s); err != nil {
|
||||||
if err := tagging.TagSet.set(key, values.Get(key), true); err != nil {
|
return nil, err
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return tagging, nil
|
return tagging, nil
|
||||||
|
|
15
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
15
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
|
@ -32,12 +32,11 @@
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
//
|
//
|
||||||
// policyCondition {
|
// policyCondition {
|
||||||
// matchType: "$eq",
|
// matchType: "$eq",
|
||||||
// key: "$Content-Type",
|
// key: "$Content-Type",
|
||||||
// value: "image/png",
|
// value: "image/png",
|
||||||
// }
|
// }
|
||||||
//
|
|
||||||
type policyCondition struct {
|
type policyCondition struct {
|
||||||
matchType string
|
matchType string
|
||||||
condition string
|
condition string
|
||||||
|
@ -172,10 +171,8 @@ func (p *PostPolicy) SetContentType(contentType string) error {
|
||||||
|
|
||||||
// SetContentTypeStartsWith - Sets what content-type of the object for this policy
|
// SetContentTypeStartsWith - Sets what content-type of the object for this policy
|
||||||
// based upload can start with.
|
// based upload can start with.
|
||||||
|
// If "" is provided it allows all content-types.
|
||||||
func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error {
|
func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error {
|
||||||
if strings.TrimSpace(contentTypeStartsWith) == "" || contentTypeStartsWith == "" {
|
|
||||||
return errInvalidArgument("No content type specified.")
|
|
||||||
}
|
|
||||||
policyCond := policyCondition{
|
policyCond := policyCondition{
|
||||||
matchType: "starts-with",
|
matchType: "starts-with",
|
||||||
condition: "$Content-Type",
|
condition: "$Content-Type",
|
||||||
|
|
40
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
40
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
|
@ -20,6 +20,7 @@
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
|
fipssha256 "crypto/sha256"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
@ -39,6 +40,7 @@
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
md5simd "github.com/minio/md5-simd"
|
md5simd "github.com/minio/md5-simd"
|
||||||
|
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||||
"github.com/minio/sha256-simd"
|
"github.com/minio/sha256-simd"
|
||||||
)
|
)
|
||||||
|
@ -520,6 +522,9 @@ func newMd5Hasher() md5simd.Hasher {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSHA256Hasher() md5simd.Hasher {
|
func newSHA256Hasher() md5simd.Hasher {
|
||||||
|
if encrypt.FIPS {
|
||||||
|
return &hashWrapper{Hash: fipssha256.New(), isSHA256: true}
|
||||||
|
}
|
||||||
return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true}
|
return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -627,3 +632,38 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newHashReaderWrapper will hash all reads done through r.
|
||||||
|
// When r returns io.EOF the done function will be called with the sum.
|
||||||
|
func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper {
|
||||||
|
return &hashReaderWrapper{
|
||||||
|
r: r,
|
||||||
|
h: h,
|
||||||
|
done: done,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type hashReaderWrapper struct {
|
||||||
|
r io.Reader
|
||||||
|
h hash.Hash
|
||||||
|
done func(hash []byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read implements the io.Reader interface.
|
||||||
|
func (h *hashReaderWrapper) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = h.r.Read(p)
|
||||||
|
if n > 0 {
|
||||||
|
n2, err := h.h.Write(p[:n])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if n2 != n {
|
||||||
|
return 0, io.ErrShortWrite
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == io.EOF {
|
||||||
|
// Call back
|
||||||
|
h.done(h.h.Sum(nil))
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -273,7 +273,7 @@ github.com/miekg/dns
|
||||||
# github.com/minio/md5-simd v1.1.2
|
# github.com/minio/md5-simd v1.1.2
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/minio/md5-simd
|
github.com/minio/md5-simd
|
||||||
# github.com/minio/minio-go/v7 v7.0.37
|
# github.com/minio/minio-go/v7 v7.0.43
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/minio/minio-go/v7
|
github.com/minio/minio-go/v7
|
||||||
github.com/minio/minio-go/v7/pkg/credentials
|
github.com/minio/minio-go/v7/pkg/credentials
|
||||||
|
|
Loading…
Reference in a new issue