mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-12-25 11:42:11 +00:00
Merge branch 'main' into go_123
This commit is contained in:
commit
666b8bc4f2
4
go.mod
4
go.mod
|
@ -42,7 +42,7 @@ require (
|
||||||
github.com/jackc/pgx/v5 v5.6.0
|
github.com/jackc/pgx/v5 v5.6.0
|
||||||
github.com/microcosm-cc/bluemonday v1.0.27
|
github.com/microcosm-cc/bluemonday v1.0.27
|
||||||
github.com/miekg/dns v1.1.62
|
github.com/miekg/dns v1.1.62
|
||||||
github.com/minio/minio-go/v7 v7.0.75
|
github.com/minio/minio-go/v7 v7.0.76
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
github.com/mitchellh/mapstructure v1.5.0
|
||||||
github.com/ncruces/go-sqlite3 v0.18.1
|
github.com/ncruces/go-sqlite3 v0.18.1
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
|
@ -187,7 +187,7 @@ require (
|
||||||
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
|
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||||
github.com/rs/xid v1.5.0 // indirect
|
github.com/rs/xid v1.6.0 // indirect
|
||||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||||
github.com/shopspring/decimal v1.3.1 // indirect
|
github.com/shopspring/decimal v1.3.1 // indirect
|
||||||
|
|
8
go.sum
8
go.sum
|
@ -419,8 +419,8 @@ github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
|
||||||
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
||||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||||
github.com/minio/minio-go/v7 v7.0.75 h1:0uLrB6u6teY2Jt+cJUVi9cTvDRuBKWSRzSAcznRkwlE=
|
github.com/minio/minio-go/v7 v7.0.76 h1:9nxHH2XDai61cT/EFhyIw/wW4vJfpPNvl7lSFpRt+Ng=
|
||||||
github.com/minio/minio-go/v7 v7.0.75/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8=
|
github.com/minio/minio-go/v7 v7.0.76/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg=
|
||||||
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
||||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||||
|
@ -485,8 +485,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
||||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||||
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
|
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
|
||||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
||||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||||
|
|
|
@ -64,8 +64,15 @@ func (a *accountDB) GetAccountsByIDs(ctx context.Context, ids []string) ([]*gtsm
|
||||||
accounts, err := a.state.Caches.DB.Account.LoadIDs("ID",
|
accounts, err := a.state.Caches.DB.Account.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.Account, error) {
|
func(uncached []string) ([]*gtsmodel.Account, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached accounts.
|
// Preallocate expected length of uncached accounts.
|
||||||
accounts := make([]*gtsmodel.Account, 0, len(uncached))
|
accounts := make([]*gtsmodel.Account, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) account IDs.
|
// the remaining (uncached) account IDs.
|
||||||
|
|
|
@ -147,8 +147,15 @@ func (a *applicationDB) GetAllTokens(ctx context.Context) ([]*gtsmodel.Token, er
|
||||||
tokens, err := a.state.Caches.DB.Token.LoadIDs("ID",
|
tokens, err := a.state.Caches.DB.Token.LoadIDs("ID",
|
||||||
tokenIDs,
|
tokenIDs,
|
||||||
func(uncached []string) ([]*gtsmodel.Token, error) {
|
func(uncached []string) ([]*gtsmodel.Token, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached tokens.
|
// Preallocate expected length of uncached tokens.
|
||||||
tokens := make([]*gtsmodel.Token, 0, len(uncached))
|
tokens := make([]*gtsmodel.Token, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) token IDs.
|
// the remaining (uncached) token IDs.
|
||||||
|
|
|
@ -187,8 +187,15 @@ func (c *conversationDB) getConversationsByLastStatusIDs(
|
||||||
accountID,
|
accountID,
|
||||||
conversationLastStatusIDs,
|
conversationLastStatusIDs,
|
||||||
func(accountID string, uncached []string) ([]*gtsmodel.Conversation, error) {
|
func(accountID string, uncached []string) ([]*gtsmodel.Conversation, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached conversations.
|
// Preallocate expected length of uncached conversations.
|
||||||
conversations := make([]*gtsmodel.Conversation, 0, len(uncached))
|
conversations := make([]*gtsmodel.Conversation, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning the remaining (uncached) IDs.
|
// Perform database query scanning the remaining (uncached) IDs.
|
||||||
if err := c.db.NewSelect().
|
if err := c.db.NewSelect().
|
||||||
|
|
|
@ -586,8 +586,15 @@ func (e *emojiDB) GetEmojisByIDs(ctx context.Context, ids []string) ([]*gtsmodel
|
||||||
emojis, err := e.state.Caches.DB.Emoji.LoadIDs("ID",
|
emojis, err := e.state.Caches.DB.Emoji.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.Emoji, error) {
|
func(uncached []string) ([]*gtsmodel.Emoji, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached emojis.
|
// Preallocate expected length of uncached emojis.
|
||||||
emojis := make([]*gtsmodel.Emoji, 0, len(uncached))
|
emojis := make([]*gtsmodel.Emoji, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
@ -650,8 +657,15 @@ func (e *emojiDB) GetEmojiCategoriesByIDs(ctx context.Context, ids []string) ([]
|
||||||
categories, err := e.state.Caches.DB.EmojiCategory.LoadIDs("ID",
|
categories, err := e.state.Caches.DB.EmojiCategory.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.EmojiCategory, error) {
|
func(uncached []string) ([]*gtsmodel.EmojiCategory, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached categories.
|
// Preallocate expected length of uncached categories.
|
||||||
categories := make([]*gtsmodel.EmojiCategory, 0, len(uncached))
|
categories := make([]*gtsmodel.EmojiCategory, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
|
|
@ -353,8 +353,15 @@ func (l *listDB) GetListsByIDs(ctx context.Context, ids []string) ([]*gtsmodel.L
|
||||||
lists, err := l.state.Caches.DB.List.LoadIDs("ID",
|
lists, err := l.state.Caches.DB.List.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.List, error) {
|
func(uncached []string) ([]*gtsmodel.List, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached lists.
|
// Preallocate expected length of uncached lists.
|
||||||
lists := make([]*gtsmodel.List, 0, len(uncached))
|
lists := make([]*gtsmodel.List, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
@ -400,8 +407,15 @@ func (l *listDB) GetListEntriesByIDs(ctx context.Context, ids []string) ([]*gtsm
|
||||||
entries, err := l.state.Caches.DB.ListEntry.LoadIDs("ID",
|
entries, err := l.state.Caches.DB.ListEntry.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.ListEntry, error) {
|
func(uncached []string) ([]*gtsmodel.ListEntry, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached entries.
|
// Preallocate expected length of uncached entries.
|
||||||
entries := make([]*gtsmodel.ListEntry, 0, len(uncached))
|
entries := make([]*gtsmodel.ListEntry, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
|
|
@ -57,8 +57,15 @@ func (m *mediaDB) GetAttachmentsByIDs(ctx context.Context, ids []string) ([]*gts
|
||||||
media, err := m.state.Caches.DB.Media.LoadIDs("ID",
|
media, err := m.state.Caches.DB.Media.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.MediaAttachment, error) {
|
func(uncached []string) ([]*gtsmodel.MediaAttachment, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached media attachments.
|
// Preallocate expected length of uncached media attachments.
|
||||||
media := make([]*gtsmodel.MediaAttachment, 0, len(uncached))
|
media := make([]*gtsmodel.MediaAttachment, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
|
|
@ -69,8 +69,15 @@ func (m *mentionDB) GetMentions(ctx context.Context, ids []string) ([]*gtsmodel.
|
||||||
mentions, err := m.state.Caches.DB.Mention.LoadIDs("ID",
|
mentions, err := m.state.Caches.DB.Mention.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.Mention, error) {
|
func(uncached []string) ([]*gtsmodel.Mention, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached mentions.
|
// Preallocate expected length of uncached mentions.
|
||||||
mentions := make([]*gtsmodel.Mention, 0, len(uncached))
|
mentions := make([]*gtsmodel.Mention, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
|
|
@ -107,13 +107,15 @@ func (n *notificationDB) GetNotificationsByIDs(ctx context.Context, ids []string
|
||||||
notifs, err := n.state.Caches.DB.Notification.LoadIDs("ID",
|
notifs, err := n.state.Caches.DB.Notification.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.Notification, error) {
|
func(uncached []string) ([]*gtsmodel.Notification, error) {
|
||||||
// Skip query if everything was cached.
|
// Avoid querying
|
||||||
if len(uncached) == 0 {
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached notifications.
|
// Preallocate expected length of uncached notifications.
|
||||||
notifs := make([]*gtsmodel.Notification, 0, len(uncached))
|
notifs := make([]*gtsmodel.Notification, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
|
|
@ -274,8 +274,15 @@ func (p *pollDB) GetPollVotes(ctx context.Context, pollID string) ([]*gtsmodel.P
|
||||||
votes, err := p.state.Caches.DB.PollVote.LoadIDs("ID",
|
votes, err := p.state.Caches.DB.PollVote.LoadIDs("ID",
|
||||||
voteIDs,
|
voteIDs,
|
||||||
func(uncached []string) ([]*gtsmodel.PollVote, error) {
|
func(uncached []string) ([]*gtsmodel.PollVote, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached votes.
|
// Preallocate expected length of uncached votes.
|
||||||
votes := make([]*gtsmodel.PollVote, 0, len(uncached))
|
votes := make([]*gtsmodel.PollVote, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
|
|
@ -105,8 +105,15 @@ func (r *relationshipDB) GetBlocksByIDs(ctx context.Context, ids []string) ([]*g
|
||||||
blocks, err := r.state.Caches.DB.Block.LoadIDs("ID",
|
blocks, err := r.state.Caches.DB.Block.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.Block, error) {
|
func(uncached []string) ([]*gtsmodel.Block, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached blocks.
|
// Preallocate expected length of uncached blocks.
|
||||||
blocks := make([]*gtsmodel.Block, 0, len(uncached))
|
blocks := make([]*gtsmodel.Block, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
|
|
@ -82,8 +82,15 @@ func (r *relationshipDB) GetFollowsByIDs(ctx context.Context, ids []string) ([]*
|
||||||
follows, err := r.state.Caches.DB.Follow.LoadIDs("ID",
|
follows, err := r.state.Caches.DB.Follow.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.Follow, error) {
|
func(uncached []string) ([]*gtsmodel.Follow, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached follows.
|
// Preallocate expected length of uncached follows.
|
||||||
follows := make([]*gtsmodel.Follow, 0, len(uncached))
|
follows := make([]*gtsmodel.Follow, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
|
|
@ -81,8 +81,15 @@ func (r *relationshipDB) GetFollowRequestsByIDs(ctx context.Context, ids []strin
|
||||||
follows, err := r.state.Caches.DB.FollowRequest.LoadIDs("ID",
|
follows, err := r.state.Caches.DB.FollowRequest.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.FollowRequest, error) {
|
func(uncached []string) ([]*gtsmodel.FollowRequest, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached followReqs.
|
// Preallocate expected length of uncached followReqs.
|
||||||
follows := make([]*gtsmodel.FollowRequest, 0, len(uncached))
|
follows := make([]*gtsmodel.FollowRequest, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
|
|
@ -87,8 +87,15 @@ func (r *relationshipDB) getMutesByIDs(ctx context.Context, ids []string) ([]*gt
|
||||||
mutes, err := r.state.Caches.DB.UserMute.LoadIDs("ID",
|
mutes, err := r.state.Caches.DB.UserMute.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.UserMute, error) {
|
func(uncached []string) ([]*gtsmodel.UserMute, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached mutes.
|
// Preallocate expected length of uncached mutes.
|
||||||
mutes := make([]*gtsmodel.UserMute, 0, len(uncached))
|
mutes := make([]*gtsmodel.UserMute, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
|
|
@ -54,8 +54,15 @@ func (s *statusDB) GetStatusesByIDs(ctx context.Context, ids []string) ([]*gtsmo
|
||||||
statuses, err := s.state.Caches.DB.Status.LoadIDs("ID",
|
statuses, err := s.state.Caches.DB.Status.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.Status, error) {
|
func(uncached []string) ([]*gtsmodel.Status, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached statuses.
|
// Preallocate expected length of uncached statuses.
|
||||||
statuses := make([]*gtsmodel.Status, 0, len(uncached))
|
statuses := make([]*gtsmodel.Status, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) status IDs.
|
// the remaining (uncached) status IDs.
|
||||||
|
|
|
@ -73,8 +73,15 @@ func (s *statusBookmarkDB) GetStatusBookmarksByIDs(ctx context.Context, ids []st
|
||||||
bookmarks, err := s.state.Caches.DB.StatusBookmark.LoadIDs("ID",
|
bookmarks, err := s.state.Caches.DB.StatusBookmark.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.StatusBookmark, error) {
|
func(uncached []string) ([]*gtsmodel.StatusBookmark, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached bookmarks.
|
// Preallocate expected length of uncached bookmarks.
|
||||||
bookmarks := make([]*gtsmodel.StatusBookmark, 0, len(uncached))
|
bookmarks := make([]*gtsmodel.StatusBookmark, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) bookmarks.
|
// the remaining (uncached) bookmarks.
|
||||||
|
|
|
@ -133,8 +133,15 @@ func (s *statusFaveDB) GetStatusFaves(ctx context.Context, statusID string) ([]*
|
||||||
faves, err := s.state.Caches.DB.StatusFave.LoadIDs("ID",
|
faves, err := s.state.Caches.DB.StatusFave.LoadIDs("ID",
|
||||||
faveIDs,
|
faveIDs,
|
||||||
func(uncached []string) ([]*gtsmodel.StatusFave, error) {
|
func(uncached []string) ([]*gtsmodel.StatusFave, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached faves.
|
// Preallocate expected length of uncached faves.
|
||||||
faves := make([]*gtsmodel.StatusFave, 0, len(uncached))
|
faves := make([]*gtsmodel.StatusFave, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) fave IDs.
|
// the remaining (uncached) fave IDs.
|
||||||
|
|
|
@ -79,8 +79,15 @@ func (t *tagDB) GetTags(ctx context.Context, ids []string) ([]*gtsmodel.Tag, err
|
||||||
tags, err := t.state.Caches.DB.Tag.LoadIDs("ID",
|
tags, err := t.state.Caches.DB.Tag.LoadIDs("ID",
|
||||||
ids,
|
ids,
|
||||||
func(uncached []string) ([]*gtsmodel.Tag, error) {
|
func(uncached []string) ([]*gtsmodel.Tag, error) {
|
||||||
|
// Avoid querying
|
||||||
|
// if none uncached.
|
||||||
|
count := len(uncached)
|
||||||
|
if count == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Preallocate expected length of uncached tags.
|
// Preallocate expected length of uncached tags.
|
||||||
tags := make([]*gtsmodel.Tag, 0, len(uncached))
|
tags := make([]*gtsmodel.Tag, 0, count)
|
||||||
|
|
||||||
// Perform database query scanning
|
// Perform database query scanning
|
||||||
// the remaining (uncached) IDs.
|
// the remaining (uncached) IDs.
|
||||||
|
|
10
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
10
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
|
@ -24,7 +24,6 @@
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -87,7 +86,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||||
if opts.UserMetadata == nil {
|
if opts.UserMetadata == nil {
|
||||||
opts.UserMetadata = make(map[string]string, 1)
|
opts.UserMetadata = make(map[string]string, 1)
|
||||||
}
|
}
|
||||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
|
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initiate a new multipart upload.
|
// Initiate a new multipart upload.
|
||||||
|
@ -116,7 +115,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||||
var crcBytes []byte
|
var crcBytes []byte
|
||||||
customHeader := make(http.Header)
|
customHeader := make(http.Header)
|
||||||
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
crc := opts.AutoChecksum.Hasher()
|
||||||
for partNumber <= totalPartsCount {
|
for partNumber <= totalPartsCount {
|
||||||
length, rErr := readFull(reader, buf)
|
length, rErr := readFull(reader, buf)
|
||||||
if rErr == io.EOF && partNumber > 1 {
|
if rErr == io.EOF && partNumber > 1 {
|
||||||
|
@ -154,7 +153,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||||
crc.Reset()
|
crc.Reset()
|
||||||
crc.Write(buf[:length])
|
crc.Write(buf[:length])
|
||||||
cSum := crc.Sum(nil)
|
cSum := crc.Sum(nil)
|
||||||
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
|
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||||
crcBytes = append(crcBytes, cSum...)
|
crcBytes = append(crcBytes, cSum...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -202,12 +201,13 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
opts = PutObjectOptions{
|
opts = PutObjectOptions{
|
||||||
ServerSideEncryption: opts.ServerSideEncryption,
|
ServerSideEncryption: opts.ServerSideEncryption,
|
||||||
|
AutoChecksum: opts.AutoChecksum,
|
||||||
}
|
}
|
||||||
if len(crcBytes) > 0 {
|
if len(crcBytes) > 0 {
|
||||||
// Add hash of hashes.
|
// Add hash of hashes.
|
||||||
crc.Reset()
|
crc.Reset()
|
||||||
crc.Write(crcBytes)
|
crc.Write(crcBytes)
|
||||||
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||||
}
|
}
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
39
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
39
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
|
@ -22,7 +22,6 @@
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -115,7 +114,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||||
if opts.UserMetadata == nil {
|
if opts.UserMetadata == nil {
|
||||||
opts.UserMetadata = make(map[string]string, 1)
|
opts.UserMetadata = make(map[string]string, 1)
|
||||||
}
|
}
|
||||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
|
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||||
}
|
}
|
||||||
// Initiate a new multipart upload.
|
// Initiate a new multipart upload.
|
||||||
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||||
|
@ -195,10 +194,10 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||||
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
|
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
|
||||||
trailer := make(http.Header, 1)
|
trailer := make(http.Header, 1)
|
||||||
if withChecksum {
|
if withChecksum {
|
||||||
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
crc := opts.AutoChecksum.Hasher()
|
||||||
trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
|
trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil)))
|
||||||
sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) {
|
sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) {
|
||||||
trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
|
trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(hash))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -271,17 +270,18 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||||
|
|
||||||
opts = PutObjectOptions{
|
opts = PutObjectOptions{
|
||||||
ServerSideEncryption: opts.ServerSideEncryption,
|
ServerSideEncryption: opts.ServerSideEncryption,
|
||||||
|
AutoChecksum: opts.AutoChecksum,
|
||||||
}
|
}
|
||||||
if withChecksum {
|
if withChecksum {
|
||||||
// Add hash of hashes.
|
// Add hash of hashes.
|
||||||
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
crc := opts.AutoChecksum.Hasher()
|
||||||
for _, part := range complMultipartUpload.Parts {
|
for _, part := range complMultipartUpload.Parts {
|
||||||
cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C)
|
cs, err := base64.StdEncoding.DecodeString(part.Checksum(opts.AutoChecksum))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
crc.Write(cs)
|
crc.Write(cs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||||
|
@ -308,7 +308,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||||
if opts.UserMetadata == nil {
|
if opts.UserMetadata == nil {
|
||||||
opts.UserMetadata = make(map[string]string, 1)
|
opts.UserMetadata = make(map[string]string, 1)
|
||||||
}
|
}
|
||||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
|
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate the optimal parts info for a given size.
|
// Calculate the optimal parts info for a given size.
|
||||||
|
@ -337,7 +337,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||||
var crcBytes []byte
|
var crcBytes []byte
|
||||||
customHeader := make(http.Header)
|
customHeader := make(http.Header)
|
||||||
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
crc := opts.AutoChecksum.Hasher()
|
||||||
md5Hash := c.md5Hasher()
|
md5Hash := c.md5Hasher()
|
||||||
defer md5Hash.Close()
|
defer md5Hash.Close()
|
||||||
|
|
||||||
|
@ -381,7 +381,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||||
crc.Reset()
|
crc.Reset()
|
||||||
crc.Write(buf[:length])
|
crc.Write(buf[:length])
|
||||||
cSum := crc.Sum(nil)
|
cSum := crc.Sum(nil)
|
||||||
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
|
customHeader.Set(opts.AutoChecksum.KeyCapitalized(), base64.StdEncoding.EncodeToString(cSum))
|
||||||
crcBytes = append(crcBytes, cSum...)
|
crcBytes = append(crcBytes, cSum...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -433,12 +433,13 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||||
|
|
||||||
opts = PutObjectOptions{
|
opts = PutObjectOptions{
|
||||||
ServerSideEncryption: opts.ServerSideEncryption,
|
ServerSideEncryption: opts.ServerSideEncryption,
|
||||||
|
AutoChecksum: opts.AutoChecksum,
|
||||||
}
|
}
|
||||||
if len(crcBytes) > 0 {
|
if len(crcBytes) > 0 {
|
||||||
// Add hash of hashes.
|
// Add hash of hashes.
|
||||||
crc.Reset()
|
crc.Reset()
|
||||||
crc.Write(crcBytes)
|
crc.Write(crcBytes)
|
||||||
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||||
}
|
}
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -467,7 +468,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||||
if opts.UserMetadata == nil {
|
if opts.UserMetadata == nil {
|
||||||
opts.UserMetadata = make(map[string]string, 1)
|
opts.UserMetadata = make(map[string]string, 1)
|
||||||
}
|
}
|
||||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
|
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel all when an error occurs.
|
// Cancel all when an error occurs.
|
||||||
|
@ -500,7 +501,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||||
// Create checksums
|
// Create checksums
|
||||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||||
var crcBytes []byte
|
var crcBytes []byte
|
||||||
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
crc := opts.AutoChecksum.Hasher()
|
||||||
|
|
||||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||||
var totalUploadedSize int64
|
var totalUploadedSize int64
|
||||||
|
@ -558,7 +559,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||||
crc.Reset()
|
crc.Reset()
|
||||||
crc.Write(buf[:length])
|
crc.Write(buf[:length])
|
||||||
cSum := crc.Sum(nil)
|
cSum := crc.Sum(nil)
|
||||||
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
|
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||||
crcBytes = append(crcBytes, cSum...)
|
crcBytes = append(crcBytes, cSum...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -639,12 +640,13 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||||
|
|
||||||
opts = PutObjectOptions{
|
opts = PutObjectOptions{
|
||||||
ServerSideEncryption: opts.ServerSideEncryption,
|
ServerSideEncryption: opts.ServerSideEncryption,
|
||||||
|
AutoChecksum: opts.AutoChecksum,
|
||||||
}
|
}
|
||||||
if len(crcBytes) > 0 {
|
if len(crcBytes) > 0 {
|
||||||
// Add hash of hashes.
|
// Add hash of hashes.
|
||||||
crc.Reset()
|
crc.Reset()
|
||||||
crc.Write(crcBytes)
|
crc.Write(crcBytes)
|
||||||
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||||
}
|
}
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -765,7 +767,10 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
|
||||||
contentMD5Base64: md5Base64,
|
contentMD5Base64: md5Base64,
|
||||||
contentSHA256Hex: sha256Hex,
|
contentSHA256Hex: sha256Hex,
|
||||||
streamSha256: !opts.DisableContentSha256,
|
streamSha256: !opts.DisableContentSha256,
|
||||||
addCrc: addCrc,
|
}
|
||||||
|
if addCrc {
|
||||||
|
opts.AutoChecksum.SetDefault(ChecksumCRC32C)
|
||||||
|
reqMetadata.addCrc = &opts.AutoChecksum
|
||||||
}
|
}
|
||||||
if opts.Internal.SourceVersionID != "" {
|
if opts.Internal.SourceVersionID != "" {
|
||||||
if opts.Internal.SourceVersionID != nullVersionID {
|
if opts.Internal.SourceVersionID != nullVersionID {
|
||||||
|
|
16
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
16
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
|
@ -23,7 +23,6 @@
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -90,6 +89,11 @@ type PutObjectOptions struct {
|
||||||
DisableContentSha256 bool
|
DisableContentSha256 bool
|
||||||
DisableMultipart bool
|
DisableMultipart bool
|
||||||
|
|
||||||
|
// AutoChecksum is the type of checksum that will be added if no other checksum is added,
|
||||||
|
// like MD5 or SHA256 streaming checksum, and it is feasible for the upload type.
|
||||||
|
// If none is specified CRC32C is used, since it is generally the fastest.
|
||||||
|
AutoChecksum ChecksumType
|
||||||
|
|
||||||
// ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
|
// ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
|
||||||
// fill them serially and upload them in parallel.
|
// fill them serially and upload them in parallel.
|
||||||
// This can be used for faster uploads on non-seekable or slow-to-seek input.
|
// This can be used for faster uploads on non-seekable or slow-to-seek input.
|
||||||
|
@ -300,6 +304,7 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
|
||||||
if size > int64(maxMultipartPutObjectSize) {
|
if size > int64(maxMultipartPutObjectSize) {
|
||||||
return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
|
return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
|
||||||
}
|
}
|
||||||
|
opts.AutoChecksum.SetDefault(ChecksumCRC32C)
|
||||||
|
|
||||||
// NOTE: Streaming signature is not supported by GCS.
|
// NOTE: Streaming signature is not supported by GCS.
|
||||||
if s3utils.IsGoogleEndpoint(*c.endpointURL) {
|
if s3utils.IsGoogleEndpoint(*c.endpointURL) {
|
||||||
|
@ -361,7 +366,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||||
if opts.UserMetadata == nil {
|
if opts.UserMetadata == nil {
|
||||||
opts.UserMetadata = make(map[string]string, 1)
|
opts.UserMetadata = make(map[string]string, 1)
|
||||||
}
|
}
|
||||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
|
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initiate a new multipart upload.
|
// Initiate a new multipart upload.
|
||||||
|
@ -390,7 +395,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||||
var crcBytes []byte
|
var crcBytes []byte
|
||||||
customHeader := make(http.Header)
|
customHeader := make(http.Header)
|
||||||
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
crc := opts.AutoChecksum.Hasher()
|
||||||
|
|
||||||
for partNumber <= totalPartsCount {
|
for partNumber <= totalPartsCount {
|
||||||
length, rerr := readFull(reader, buf)
|
length, rerr := readFull(reader, buf)
|
||||||
|
@ -413,7 +418,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||||
crc.Reset()
|
crc.Reset()
|
||||||
crc.Write(buf[:length])
|
crc.Write(buf[:length])
|
||||||
cSum := crc.Sum(nil)
|
cSum := crc.Sum(nil)
|
||||||
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
|
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||||
crcBytes = append(crcBytes, cSum...)
|
crcBytes = append(crcBytes, cSum...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -466,12 +471,13 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||||
|
|
||||||
opts = PutObjectOptions{
|
opts = PutObjectOptions{
|
||||||
ServerSideEncryption: opts.ServerSideEncryption,
|
ServerSideEncryption: opts.ServerSideEncryption,
|
||||||
|
AutoChecksum: opts.AutoChecksum,
|
||||||
}
|
}
|
||||||
if len(crcBytes) > 0 {
|
if len(crcBytes) > 0 {
|
||||||
// Add hash of hashes.
|
// Add hash of hashes.
|
||||||
crc.Reset()
|
crc.Reset()
|
||||||
crc.Write(crcBytes)
|
crc.Write(crcBytes)
|
||||||
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||||
}
|
}
|
||||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
16
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
16
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
|
@ -340,6 +340,22 @@ type CompletePart struct {
|
||||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checksum will return the checksum for the given type.
|
||||||
|
// Will return the empty string if not set.
|
||||||
|
func (c CompletePart) Checksum(t ChecksumType) string {
|
||||||
|
switch {
|
||||||
|
case t.Is(ChecksumCRC32C):
|
||||||
|
return c.ChecksumCRC32C
|
||||||
|
case t.Is(ChecksumCRC32):
|
||||||
|
return c.ChecksumCRC32
|
||||||
|
case t.Is(ChecksumSHA1):
|
||||||
|
return c.ChecksumSHA1
|
||||||
|
case t.Is(ChecksumSHA256):
|
||||||
|
return c.ChecksumSHA256
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
// completeMultipartUpload container for completing multipart upload.
|
// completeMultipartUpload container for completing multipart upload.
|
||||||
type completeMultipartUpload struct {
|
type completeMultipartUpload struct {
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
|
||||||
|
|
13
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
13
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
|
@ -23,7 +23,6 @@
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
|
@ -129,7 +128,7 @@ type Options struct {
|
||||||
// Global constants.
|
// Global constants.
|
||||||
const (
|
const (
|
||||||
libraryName = "minio-go"
|
libraryName = "minio-go"
|
||||||
libraryVersion = "v7.0.75"
|
libraryVersion = "v7.0.76"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User Agent should always following the below style.
|
// User Agent should always following the below style.
|
||||||
|
@ -471,7 +470,7 @@ type requestMetadata struct {
|
||||||
contentMD5Base64 string // carries base64 encoded md5sum
|
contentMD5Base64 string // carries base64 encoded md5sum
|
||||||
contentSHA256Hex string // carries hex encoded sha256sum
|
contentSHA256Hex string // carries hex encoded sha256sum
|
||||||
streamSha256 bool
|
streamSha256 bool
|
||||||
addCrc bool
|
addCrc *ChecksumType
|
||||||
trailer http.Header // (http.Request).Trailer. Requires v4 signature.
|
trailer http.Header // (http.Request).Trailer. Requires v4 signature.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -616,16 +615,16 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if metadata.addCrc && metadata.contentLength > 0 {
|
if metadata.addCrc != nil && metadata.contentLength > 0 {
|
||||||
if metadata.trailer == nil {
|
if metadata.trailer == nil {
|
||||||
metadata.trailer = make(http.Header, 1)
|
metadata.trailer = make(http.Header, 1)
|
||||||
}
|
}
|
||||||
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
crc := metadata.addCrc.Hasher()
|
||||||
metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) {
|
metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) {
|
||||||
// Update trailer when done.
|
// Update trailer when done.
|
||||||
metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
|
metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(hash))
|
||||||
})
|
})
|
||||||
metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
|
metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create cancel context to control 'newRetryTimer' go routine.
|
// Create cancel context to control 'newRetryTimer' go routine.
|
||||||
|
|
13
vendor/github.com/minio/minio-go/v7/checksum.go
generated
vendored
13
vendor/github.com/minio/minio-go/v7/checksum.go
generated
vendored
|
@ -25,6 +25,7 @@
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ChecksumType contains information about the checksum type.
|
// ChecksumType contains information about the checksum type.
|
||||||
|
@ -78,6 +79,11 @@ func (c ChecksumType) Key() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KeyCapitalized returns the capitalized key as used in HTTP headers.
|
||||||
|
func (c ChecksumType) KeyCapitalized() string {
|
||||||
|
return http.CanonicalHeaderKey(c.Key())
|
||||||
|
}
|
||||||
|
|
||||||
// RawByteLen returns the size of the un-encoded checksum.
|
// RawByteLen returns the size of the un-encoded checksum.
|
||||||
func (c ChecksumType) RawByteLen() int {
|
func (c ChecksumType) RawByteLen() int {
|
||||||
switch c & checksumMask {
|
switch c & checksumMask {
|
||||||
|
@ -112,6 +118,13 @@ func (c ChecksumType) IsSet() bool {
|
||||||
return bits.OnesCount32(uint32(c)) == 1
|
return bits.OnesCount32(uint32(c)) == 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetDefault will set the checksum if not already set.
|
||||||
|
func (c *ChecksumType) SetDefault(t ChecksumType) {
|
||||||
|
if !c.IsSet() {
|
||||||
|
*c = t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// String returns the type as a string.
|
// String returns the type as a string.
|
||||||
// CRC32, CRC32C, SHA1, and SHA256 for valid values.
|
// CRC32, CRC32C, SHA1, and SHA256 for valid values.
|
||||||
// Empty string for unset and "<invalid>" if not valid.
|
// Empty string for unset and "<invalid>" if not valid.
|
||||||
|
|
158
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
158
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
|
@ -24,7 +24,6 @@
|
||||||
"archive/zip"
|
"archive/zip"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha1"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
|
@ -166,7 +165,7 @@ func logError(testName, function string, args map[string]interface{}, startTime
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// log failed test runs
|
// Log failed test runs, do not call this directly, use logError instead, as that correctly stops the test run
|
||||||
func logFailure(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) {
|
func logFailure(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) {
|
||||||
l := baseLogger(testName, function, args, startTime).With(
|
l := baseLogger(testName, function, args, startTime).With(
|
||||||
"status", "FAIL",
|
"status", "FAIL",
|
||||||
|
@ -2199,22 +2198,15 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
|
|
||||||
defer cleanupBucket(bucketName, c)
|
defer cleanupBucket(bucketName, c)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
header string
|
cs minio.ChecksumType
|
||||||
hasher hash.Hash
|
|
||||||
|
|
||||||
// Checksum values
|
|
||||||
ChecksumCRC32 string
|
|
||||||
ChecksumCRC32C string
|
|
||||||
ChecksumSHA1 string
|
|
||||||
ChecksumSHA256 string
|
|
||||||
}{
|
}{
|
||||||
{header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()},
|
{cs: minio.ChecksumCRC32C},
|
||||||
{header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))},
|
{cs: minio.ChecksumCRC32},
|
||||||
{header: "x-amz-checksum-sha1", hasher: sha1.New()},
|
{cs: minio.ChecksumSHA1},
|
||||||
{header: "x-amz-checksum-sha256", hasher: sha256.New()},
|
{cs: minio.ChecksumSHA256},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, test := range tests {
|
for _, test := range tests {
|
||||||
bufSize := dataFileMap["datafile-10-kB"]
|
bufSize := dataFileMap["datafile-10-kB"]
|
||||||
|
|
||||||
// Save the data
|
// Save the data
|
||||||
|
@ -2235,29 +2227,27 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
logError(testName, function, args, startTime, "", "Read failed", err)
|
logError(testName, function, args, startTime, "", "Read failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
h := test.hasher
|
h := test.cs.Hasher()
|
||||||
h.Reset()
|
h.Reset()
|
||||||
// Wrong CRC.
|
|
||||||
meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
|
// Test with Wrong CRC.
|
||||||
|
meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||||
args["metadata"] = meta
|
args["metadata"] = meta
|
||||||
args["range"] = "false"
|
args["range"] = "false"
|
||||||
|
args["checksum"] = test.cs.String()
|
||||||
|
|
||||||
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
|
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
|
||||||
DisableMultipart: true,
|
DisableMultipart: true,
|
||||||
UserMetadata: meta,
|
UserMetadata: meta,
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if i == 0 && resp.ChecksumCRC32 == "" {
|
logError(testName, function, args, startTime, "", "PutObject did not fail on wrong CRC", err)
|
||||||
logIgnored(testName, function, args, startTime, "Checksums does not appear to be supported by backend")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set correct CRC.
|
// Set correct CRC.
|
||||||
h.Write(b)
|
h.Write(b)
|
||||||
meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
|
meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||||
reader.Close()
|
reader.Close()
|
||||||
|
|
||||||
resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
|
resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
|
||||||
|
@ -2419,17 +2409,12 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
}
|
}
|
||||||
defer cleanupBucket(bucketName, c)
|
defer cleanupBucket(bucketName, c)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
header string
|
cs minio.ChecksumType
|
||||||
hasher hash.Hash
|
|
||||||
|
|
||||||
// Checksum values
|
|
||||||
ChecksumCRC32 string
|
|
||||||
ChecksumCRC32C string
|
|
||||||
ChecksumSHA1 string
|
|
||||||
ChecksumSHA256 string
|
|
||||||
}{
|
}{
|
||||||
// Currently there is no way to override the checksum type.
|
{cs: minio.ChecksumCRC32C},
|
||||||
{header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "OpEx0Q==-13"},
|
{cs: minio.ChecksumCRC32},
|
||||||
|
{cs: minio.ChecksumSHA1},
|
||||||
|
{cs: minio.ChecksumSHA256},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
@ -2438,11 +2423,12 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
// Save the data
|
// Save the data
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
args["objectName"] = objectName
|
args["objectName"] = objectName
|
||||||
|
args["checksum"] = test.cs.String()
|
||||||
|
|
||||||
cmpChecksum := func(got, want string) {
|
cmpChecksum := func(got, want string) {
|
||||||
if want != got {
|
if want != got {
|
||||||
// logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
|
logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
|
||||||
fmt.Printf("want %s, got %s\n", want, got)
|
//fmt.Printf("want %s, got %s\n", want, got)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2455,9 +2441,9 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
reader.Close()
|
reader.Close()
|
||||||
h := test.hasher
|
h := test.cs.Hasher()
|
||||||
h.Reset()
|
h.Reset()
|
||||||
test.ChecksumCRC32C = hashMultiPart(b, partSize, test.hasher)
|
want := hashMultiPart(b, partSize, test.cs.Hasher())
|
||||||
|
|
||||||
// Set correct CRC.
|
// Set correct CRC.
|
||||||
|
|
||||||
|
@ -2466,15 +2452,40 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
DisableMultipart: false,
|
DisableMultipart: false,
|
||||||
UserMetadata: nil,
|
UserMetadata: nil,
|
||||||
PartSize: partSize,
|
PartSize: partSize,
|
||||||
|
AutoChecksum: test.cs,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
|
|
||||||
cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
|
switch test.cs {
|
||||||
cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
|
case minio.ChecksumCRC32C:
|
||||||
cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C)
|
cmpChecksum(resp.ChecksumCRC32C, want)
|
||||||
|
case minio.ChecksumCRC32:
|
||||||
|
cmpChecksum(resp.ChecksumCRC32, want)
|
||||||
|
case minio.ChecksumSHA1:
|
||||||
|
cmpChecksum(resp.ChecksumSHA1, want)
|
||||||
|
case minio.ChecksumSHA256:
|
||||||
|
cmpChecksum(resp.ChecksumSHA256, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
want = want[:strings.IndexByte(want, '-')]
|
||||||
|
switch test.cs {
|
||||||
|
case minio.ChecksumCRC32C:
|
||||||
|
cmpChecksum(s.Checksum.ChecksumCRC32C, want)
|
||||||
|
case minio.ChecksumCRC32:
|
||||||
|
cmpChecksum(s.Checksum.ChecksumCRC32, want)
|
||||||
|
case minio.ChecksumSHA1:
|
||||||
|
cmpChecksum(s.Checksum.ChecksumSHA1, want)
|
||||||
|
case minio.ChecksumSHA256:
|
||||||
|
cmpChecksum(s.Checksum.ChecksumSHA256, want)
|
||||||
|
}
|
||||||
|
|
||||||
// Read the data back
|
// Read the data back
|
||||||
gopts := minio.GetObjectOptions{Checksum: true}
|
gopts := minio.GetObjectOptions{Checksum: true}
|
||||||
|
@ -2496,18 +2507,17 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
// Test part 2 checksum...
|
// Test part 2 checksum...
|
||||||
h.Reset()
|
h.Reset()
|
||||||
h.Write(b[partSize : 2*partSize])
|
h.Write(b[partSize : 2*partSize])
|
||||||
got := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
want = base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||||
if test.ChecksumSHA256 != "" {
|
|
||||||
cmpChecksum(st.ChecksumSHA256, got)
|
switch test.cs {
|
||||||
}
|
case minio.ChecksumCRC32C:
|
||||||
if test.ChecksumSHA1 != "" {
|
cmpChecksum(st.ChecksumCRC32C, want)
|
||||||
cmpChecksum(st.ChecksumSHA1, got)
|
case minio.ChecksumCRC32:
|
||||||
}
|
cmpChecksum(st.ChecksumCRC32, want)
|
||||||
if test.ChecksumCRC32 != "" {
|
case minio.ChecksumSHA1:
|
||||||
cmpChecksum(st.ChecksumCRC32, got)
|
cmpChecksum(st.ChecksumSHA1, want)
|
||||||
}
|
case minio.ChecksumSHA256:
|
||||||
if test.ChecksumCRC32C != "" {
|
cmpChecksum(st.ChecksumSHA256, want)
|
||||||
cmpChecksum(st.ChecksumCRC32C, got)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(args, "metadata")
|
delete(args, "metadata")
|
||||||
|
@ -13500,7 +13510,7 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13516,7 +13526,7 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
bucketName = randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
|
bucketName = randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
|
||||||
err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
|
err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "MakeBucket failed", err)
|
logError(testName, function, args, startTime, "", "MakeBucket failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -13526,7 +13536,7 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
publicPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:*"],"Resource":["arn:aws:s3:::` + bucketName + `", "arn:aws:s3:::` + bucketName + `/*"]}]}`
|
publicPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:*"],"Resource":["arn:aws:s3:::` + bucketName + `", "arn:aws:s3:::` + bucketName + `/*"]}]}`
|
||||||
err = c.SetBucketPolicy(ctx, bucketName, publicPolicy)
|
err = c.SetBucketPolicy(ctx, bucketName, publicPolicy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
|
logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13540,7 +13550,7 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
|
|
||||||
_, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
|
_, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "PutObject call failed", err)
|
logError(testName, function, args, startTime, "", "PutObject call failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
bucketURL := c.EndpointURL().String() + "/" + bucketName + "/"
|
bucketURL := c.EndpointURL().String() + "/" + bucketName + "/"
|
||||||
|
@ -13548,7 +13558,7 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
|
|
||||||
transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
|
transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "DefaultTransport failed", err)
|
logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
httpClient := &http.Client{
|
httpClient := &http.Client{
|
||||||
|
@ -14156,7 +14166,7 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
}
|
}
|
||||||
err = c.SetBucketCors(ctx, bucketName, corsConfig)
|
err = c.SetBucketCors(ctx, bucketName, corsConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "SetBucketCors failed to apply", err)
|
logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14165,7 +14175,7 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
if test.method != "" && test.url != "" {
|
if test.method != "" && test.url != "" {
|
||||||
req, err := http.NewRequestWithContext(ctx, test.method, test.url, nil)
|
req, err := http.NewRequestWithContext(ctx, test.method, test.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "HTTP request creation failed", err)
|
logError(testName, function, args, startTime, "", "HTTP request creation failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
req.Header.Set("User-Agent", "MinIO-go-FunctionalTest/"+appVersion)
|
req.Header.Set("User-Agent", "MinIO-go-FunctionalTest/"+appVersion)
|
||||||
|
@ -14175,7 +14185,7 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
}
|
}
|
||||||
resp, err := httpClient.Do(req)
|
resp, err := httpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "HTTP request failed", err)
|
logError(testName, function, args, startTime, "", "HTTP request failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
@ -14183,7 +14193,7 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
// Check returned status code
|
// Check returned status code
|
||||||
if resp.StatusCode != test.wantStatus {
|
if resp.StatusCode != test.wantStatus {
|
||||||
errStr := fmt.Sprintf(" incorrect status code in response, want: %d, got: %d", test.wantStatus, resp.StatusCode)
|
errStr := fmt.Sprintf(" incorrect status code in response, want: %d, got: %d", test.wantStatus, resp.StatusCode)
|
||||||
logFailure(testName, function, args, startTime, "", errStr, nil)
|
logError(testName, function, args, startTime, "", errStr, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14191,12 +14201,12 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
if test.wantBodyContains != "" {
|
if test.wantBodyContains != "" {
|
||||||
body, err := io.ReadAll(resp.Body)
|
body, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "Failed to read response body", err)
|
logError(testName, function, args, startTime, "", "Failed to read response body", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !strings.Contains(string(body), test.wantBodyContains) {
|
if !strings.Contains(string(body), test.wantBodyContains) {
|
||||||
errStr := fmt.Sprintf(" incorrect body in response, want: %s, in got: %s", test.wantBodyContains, string(body))
|
errStr := fmt.Sprintf(" incorrect body in response, want: %s, in got: %s", test.wantBodyContains, string(body))
|
||||||
logFailure(testName, function, args, startTime, "", errStr, nil)
|
logError(testName, function, args, startTime, "", errStr, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14213,7 +14223,7 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
gotVal = strings.ReplaceAll(gotVal, " ", "")
|
gotVal = strings.ReplaceAll(gotVal, " ", "")
|
||||||
if gotVal != v {
|
if gotVal != v {
|
||||||
errStr := fmt.Sprintf(" incorrect header in response, want: %s: '%s', got: '%s'", k, v, gotVal)
|
errStr := fmt.Sprintf(" incorrect header in response, want: %s: '%s', got: '%s'", k, v, gotVal)
|
||||||
logFailure(testName, function, args, startTime, "", errStr, nil)
|
logError(testName, function, args, startTime, "", errStr, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14241,7 +14251,7 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14258,7 +14268,7 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
|
err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "MakeBucket failed", err)
|
logError(testName, function, args, startTime, "", "MakeBucket failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer cleanupBucket(bucketName, c)
|
defer cleanupBucket(bucketName, c)
|
||||||
|
@ -14284,37 +14294,37 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
corsConfig := cors.NewConfig(corsRules)
|
corsConfig := cors.NewConfig(corsRules)
|
||||||
err = c.SetBucketCors(ctx, bucketName, corsConfig)
|
err = c.SetBucketCors(ctx, bucketName, corsConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "SetBucketCors failed to apply", err)
|
logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the rules and check they match what we set
|
// Get the rules and check they match what we set
|
||||||
gotCorsConfig, err := c.GetBucketCors(ctx, bucketName)
|
gotCorsConfig, err := c.GetBucketCors(ctx, bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "GetBucketCors failed", err)
|
logError(testName, function, args, startTime, "", "GetBucketCors failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(corsConfig, gotCorsConfig) {
|
if !reflect.DeepEqual(corsConfig, gotCorsConfig) {
|
||||||
msg := fmt.Sprintf("GetBucketCors returned unexpected rules, expected: %+v, got: %+v", corsConfig, gotCorsConfig)
|
msg := fmt.Sprintf("GetBucketCors returned unexpected rules, expected: %+v, got: %+v", corsConfig, gotCorsConfig)
|
||||||
logFailure(testName, function, args, startTime, "", msg, nil)
|
logError(testName, function, args, startTime, "", msg, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the rules
|
// Delete the rules
|
||||||
err = c.SetBucketCors(ctx, bucketName, nil)
|
err = c.SetBucketCors(ctx, bucketName, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "SetBucketCors failed to delete", err)
|
logError(testName, function, args, startTime, "", "SetBucketCors failed to delete", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the rules and check they are now empty
|
// Get the rules and check they are now empty
|
||||||
gotCorsConfig, err = c.GetBucketCors(ctx, bucketName)
|
gotCorsConfig, err = c.GetBucketCors(ctx, bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "GetBucketCors failed", err)
|
logError(testName, function, args, startTime, "", "GetBucketCors failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if gotCorsConfig != nil {
|
if gotCorsConfig != nil {
|
||||||
logFailure(testName, function, args, startTime, "", "GetBucketCors returned unexpected rules", nil)
|
logError(testName, function, args, startTime, "", "GetBucketCors returned unexpected rules", nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
3
vendor/github.com/rs/xid/.gitignore
generated
vendored
Normal file
3
vendor/github.com/rs/xid/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
/.idea
|
||||||
|
/.vscode
|
||||||
|
.DS_Store
|
10
vendor/github.com/rs/xid/README.md
generated
vendored
10
vendor/github.com/rs/xid/README.md
generated
vendored
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
Package xid is a globally unique id generator library, ready to safely be used directly in your server code.
|
Package xid is a globally unique id generator library, ready to safely be used directly in your server code.
|
||||||
|
|
||||||
Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization (base64) to make it shorter when transported as a string:
|
Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization ([base32hex](https://datatracker.ietf.org/doc/html/rfc4648#page-10)) to make it shorter when transported as a string:
|
||||||
https://docs.mongodb.org/manual/reference/object-id/
|
https://docs.mongodb.org/manual/reference/object-id/
|
||||||
|
|
||||||
- 4-byte value representing the seconds since the Unix epoch,
|
- 4-byte value representing the seconds since the Unix epoch,
|
||||||
|
@ -13,7 +13,7 @@ https://docs.mongodb.org/manual/reference/object-id/
|
||||||
- 3-byte counter, starting with a random value.
|
- 3-byte counter, starting with a random value.
|
||||||
|
|
||||||
The binary representation of the id is compatible with Mongo 12 bytes Object IDs.
|
The binary representation of the id is compatible with Mongo 12 bytes Object IDs.
|
||||||
The string representation is using base32 hex (w/o padding) for better space efficiency
|
The string representation is using [base32hex](https://datatracker.ietf.org/doc/html/rfc4648#page-10) (w/o padding) for better space efficiency
|
||||||
when stored in that form (20 bytes). The hex variant of base32 is used to retain the
|
when stored in that form (20 bytes). The hex variant of base32 is used to retain the
|
||||||
sortable property of the id.
|
sortable property of the id.
|
||||||
|
|
||||||
|
@ -71,8 +71,10 @@ References:
|
||||||
- Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid
|
- Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid
|
||||||
- Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid
|
- Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid
|
||||||
- PostgreSQL port by [Rasmus Holm](https://github.com/crholm): https://github.com/modfin/pg-xid
|
- PostgreSQL port by [Rasmus Holm](https://github.com/crholm): https://github.com/modfin/pg-xid
|
||||||
- Swift port by [Uditha Atukorala](https://github.com/uditha-atukorala): https://github.com/uditha-atukorala/swift-xid
|
- Swift port by [Uditha Atukorala](https://github.com/uatuko): https://github.com/uatuko/swift-xid
|
||||||
- C++ port by [Uditha Atukorala](https://github.com/uditha-atukorala): https://github.com/uditha-atukorala/libxid
|
- C++ port by [Uditha Atukorala](https://github.com/uatuko): https://github.com/uatuko/libxid
|
||||||
|
- Typescript & Javascript port by [Yiwen AI](https://github.com/yiwen-ai): https://github.com/yiwen-ai/xid-ts
|
||||||
|
- Gleam port by [Alexandre Del Vecchio](https://github.com/defgenx): https://github.com/defgenx/gxid
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
|
|
29
vendor/github.com/rs/xid/hostid_darwin.go
generated
vendored
29
vendor/github.com/rs/xid/hostid_darwin.go
generated
vendored
|
@ -2,8 +2,33 @@
|
||||||
|
|
||||||
package xid
|
package xid
|
||||||
|
|
||||||
import "syscall"
|
import (
|
||||||
|
"errors"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
func readPlatformMachineID() (string, error) {
|
func readPlatformMachineID() (string, error) {
|
||||||
return syscall.Sysctl("kern.uuid")
|
ioreg, err := exec.LookPath("ioreg")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(ioreg, "-rd1", "-c", "IOPlatformExpertDevice")
|
||||||
|
out, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, line := range strings.Split(string(out), "\n") {
|
||||||
|
if strings.Contains(line, "IOPlatformUUID") {
|
||||||
|
parts := strings.SplitAfter(line, `" = "`)
|
||||||
|
if len(parts) == 2 {
|
||||||
|
uuid := strings.TrimRight(parts[1], `"`)
|
||||||
|
return strings.ToLower(uuid), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errors.New("cannot find host id")
|
||||||
}
|
}
|
||||||
|
|
20
vendor/github.com/rs/xid/hostid_windows.go
generated
vendored
20
vendor/github.com/rs/xid/hostid_windows.go
generated
vendored
|
@ -11,11 +11,17 @@
|
||||||
func readPlatformMachineID() (string, error) {
|
func readPlatformMachineID() (string, error) {
|
||||||
// source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go
|
// source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go
|
||||||
var h syscall.Handle
|
var h syscall.Handle
|
||||||
err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h)
|
|
||||||
|
regKeyCryptoPtr, err := syscall.UTF16PtrFromString(`SOFTWARE\Microsoft\Cryptography`)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf(`error reading registry key "SOFTWARE\Microsoft\Cryptography": %w`, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, regKeyCryptoPtr, 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
defer syscall.RegCloseKey(h)
|
defer func() { _ = syscall.RegCloseKey(h) }()
|
||||||
|
|
||||||
const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16
|
const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16
|
||||||
const uuidLen = 36
|
const uuidLen = 36
|
||||||
|
@ -23,9 +29,15 @@ func readPlatformMachineID() (string, error) {
|
||||||
var regBuf [syscallRegBufLen]uint16
|
var regBuf [syscallRegBufLen]uint16
|
||||||
bufLen := uint32(syscallRegBufLen)
|
bufLen := uint32(syscallRegBufLen)
|
||||||
var valType uint32
|
var valType uint32
|
||||||
err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen)
|
|
||||||
|
mGuidPtr, err := syscall.UTF16PtrFromString(`MachineGuid`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", fmt.Errorf("error reading machine GUID: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = syscall.RegQueryValueEx(h, mGuidPtr, nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error parsing ")
|
||||||
}
|
}
|
||||||
|
|
||||||
hostID := syscall.UTF16ToString(regBuf[:])
|
hostID := syscall.UTF16ToString(regBuf[:])
|
||||||
|
|
13
vendor/github.com/rs/xid/id.go
generated
vendored
13
vendor/github.com/rs/xid/id.go
generated
vendored
|
@ -54,7 +54,6 @@
|
||||||
"sort"
|
"sort"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Code inspired from mgo/bson ObjectId
|
// Code inspired from mgo/bson ObjectId
|
||||||
|
@ -172,7 +171,7 @@ func FromString(id string) (ID, error) {
|
||||||
func (id ID) String() string {
|
func (id ID) String() string {
|
||||||
text := make([]byte, encodedLen)
|
text := make([]byte, encodedLen)
|
||||||
encode(text, id[:])
|
encode(text, id[:])
|
||||||
return *(*string)(unsafe.Pointer(&text))
|
return string(text)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encode encodes the id using base32 encoding, writing 20 bytes to dst and return it.
|
// Encode encodes the id using base32 encoding, writing 20 bytes to dst and return it.
|
||||||
|
@ -206,23 +205,23 @@ func encode(dst, id []byte) {
|
||||||
|
|
||||||
dst[19] = encoding[(id[11]<<4)&0x1F]
|
dst[19] = encoding[(id[11]<<4)&0x1F]
|
||||||
dst[18] = encoding[(id[11]>>1)&0x1F]
|
dst[18] = encoding[(id[11]>>1)&0x1F]
|
||||||
dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F]
|
dst[17] = encoding[(id[11]>>6)|(id[10]<<2)&0x1F]
|
||||||
dst[16] = encoding[id[10]>>3]
|
dst[16] = encoding[id[10]>>3]
|
||||||
dst[15] = encoding[id[9]&0x1F]
|
dst[15] = encoding[id[9]&0x1F]
|
||||||
dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F]
|
dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F]
|
||||||
dst[13] = encoding[(id[8]>>2)&0x1F]
|
dst[13] = encoding[(id[8]>>2)&0x1F]
|
||||||
dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F]
|
dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F]
|
||||||
dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F]
|
dst[11] = encoding[(id[7]>>4)|(id[6]<<4)&0x1F]
|
||||||
dst[10] = encoding[(id[6]>>1)&0x1F]
|
dst[10] = encoding[(id[6]>>1)&0x1F]
|
||||||
dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F]
|
dst[9] = encoding[(id[6]>>6)|(id[5]<<2)&0x1F]
|
||||||
dst[8] = encoding[id[5]>>3]
|
dst[8] = encoding[id[5]>>3]
|
||||||
dst[7] = encoding[id[4]&0x1F]
|
dst[7] = encoding[id[4]&0x1F]
|
||||||
dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F]
|
dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F]
|
||||||
dst[5] = encoding[(id[3]>>2)&0x1F]
|
dst[5] = encoding[(id[3]>>2)&0x1F]
|
||||||
dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F]
|
dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F]
|
||||||
dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F]
|
dst[3] = encoding[(id[2]>>4)|(id[1]<<4)&0x1F]
|
||||||
dst[2] = encoding[(id[1]>>1)&0x1F]
|
dst[2] = encoding[(id[1]>>1)&0x1F]
|
||||||
dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F]
|
dst[1] = encoding[(id[1]>>6)|(id[0]<<2)&0x1F]
|
||||||
dst[0] = encoding[id[0]>>3]
|
dst[0] = encoding[id[0]>>3]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
6
vendor/modules.txt
vendored
6
vendor/modules.txt
vendored
|
@ -491,7 +491,7 @@ github.com/miekg/dns
|
||||||
# github.com/minio/md5-simd v1.1.2
|
# github.com/minio/md5-simd v1.1.2
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/minio/md5-simd
|
github.com/minio/md5-simd
|
||||||
# github.com/minio/minio-go/v7 v7.0.75
|
# github.com/minio/minio-go/v7 v7.0.76
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
github.com/minio/minio-go/v7
|
github.com/minio/minio-go/v7
|
||||||
github.com/minio/minio-go/v7/pkg/cors
|
github.com/minio/minio-go/v7/pkg/cors
|
||||||
|
@ -589,8 +589,8 @@ github.com/remyoudompheng/bigfft
|
||||||
# github.com/rogpeppe/go-internal v1.12.0
|
# github.com/rogpeppe/go-internal v1.12.0
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
github.com/rogpeppe/go-internal/fmtsort
|
github.com/rogpeppe/go-internal/fmtsort
|
||||||
# github.com/rs/xid v1.5.0
|
# github.com/rs/xid v1.6.0
|
||||||
## explicit; go 1.12
|
## explicit; go 1.16
|
||||||
github.com/rs/xid
|
github.com/rs/xid
|
||||||
# github.com/sagikazarmark/locafero v0.4.0
|
# github.com/sagikazarmark/locafero v0.4.0
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
|
|
Loading…
Reference in a new issue