mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-12-05 01:52:46 +00:00
Merge branch 'main' into inbox_post_and_fedi_db_updates
This commit is contained in:
commit
1abb27b0e8
2
go.mod
2
go.mod
|
@ -35,7 +35,7 @@ require (
|
||||||
github.com/jackc/pgx/v5 v5.3.1
|
github.com/jackc/pgx/v5 v5.3.1
|
||||||
github.com/microcosm-cc/bluemonday v1.0.23
|
github.com/microcosm-cc/bluemonday v1.0.23
|
||||||
github.com/miekg/dns v1.1.54
|
github.com/miekg/dns v1.1.54
|
||||||
github.com/minio/minio-go/v7 v7.0.52
|
github.com/minio/minio-go/v7 v7.0.53
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
github.com/mitchellh/mapstructure v1.5.0
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
github.com/spf13/cobra v1.7.0
|
github.com/spf13/cobra v1.7.0
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -452,8 +452,8 @@ github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
|
||||||
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
||||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||||
github.com/minio/minio-go/v7 v7.0.52 h1:8XhG36F6oKQUDDSuz6dY3rioMzovKjW40W6ANuN0Dps=
|
github.com/minio/minio-go/v7 v7.0.53 h1:qtPyQ+b0Cc1ums3LsnVMAYULPNdAGz8qdX8R2zl9XMU=
|
||||||
github.com/minio/minio-go/v7 v7.0.52/go.mod h1:IbbodHyjUAguneyucUaahv+VMNs/EOTV9du7A7/Z3HU=
|
github.com/minio/minio-go/v7 v7.0.53/go.mod h1:IbbodHyjUAguneyucUaahv+VMNs/EOTV9du7A7/Z3HU=
|
||||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||||
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
|
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
|
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/id"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/state"
|
"github.com/superseriousbusiness/gotosocial/internal/state"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/util"
|
"github.com/superseriousbusiness/gotosocial/internal/util"
|
||||||
|
@ -475,48 +476,41 @@ func (a *accountDB) CountAccountPinned(ctx context.Context, accountID string) (i
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *accountDB) GetAccountStatuses(ctx context.Context, accountID string, limit int, excludeReplies bool, excludeReblogs bool, maxID string, minID string, mediaOnly bool, publicOnly bool) ([]*gtsmodel.Status, db.Error) {
|
func (a *accountDB) GetAccountStatuses(ctx context.Context, accountID string, limit int, excludeReplies bool, excludeReblogs bool, maxID string, minID string, mediaOnly bool, publicOnly bool) ([]*gtsmodel.Status, db.Error) {
|
||||||
statusIDs := []string{}
|
// Ensure reasonable
|
||||||
|
if limit < 0 {
|
||||||
|
limit = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make educated guess for slice size
|
||||||
|
var (
|
||||||
|
statusIDs = make([]string, 0, limit)
|
||||||
|
frontToBack = true
|
||||||
|
)
|
||||||
|
|
||||||
q := a.conn.
|
q := a.conn.
|
||||||
NewSelect().
|
NewSelect().
|
||||||
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
|
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
|
||||||
|
// Select only IDs from table
|
||||||
Column("status.id").
|
Column("status.id").
|
||||||
Order("status.id DESC")
|
Where("? = ?", bun.Ident("status.account_id"), accountID)
|
||||||
|
|
||||||
if accountID != "" {
|
|
||||||
q = q.Where("? = ?", bun.Ident("status.account_id"), accountID)
|
|
||||||
}
|
|
||||||
|
|
||||||
if limit != 0 {
|
|
||||||
q = q.Limit(limit)
|
|
||||||
}
|
|
||||||
|
|
||||||
if excludeReplies {
|
if excludeReplies {
|
||||||
// include self-replies (threads)
|
q = q.WhereGroup(" AND ", func(*bun.SelectQuery) *bun.SelectQuery {
|
||||||
whereGroup := func(*bun.SelectQuery) *bun.SelectQuery {
|
|
||||||
return q.
|
return q.
|
||||||
WhereOr("? = ?", bun.Ident("status.in_reply_to_account_id"), accountID).
|
// Do include self replies (threads), but
|
||||||
WhereGroup(" OR ", whereEmptyOrNull("status.in_reply_to_uri"))
|
// don't include replies to other people.
|
||||||
}
|
Where("? = ?", bun.Ident("status.in_reply_to_account_id"), accountID).
|
||||||
|
WhereOr("? IS NULL", bun.Ident("status.in_reply_to_uri"))
|
||||||
q = q.WhereGroup(" AND ", whereGroup)
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if excludeReblogs {
|
if excludeReblogs {
|
||||||
q = q.WhereGroup(" AND ", whereEmptyOrNull("status.boost_of_id"))
|
q = q.Where("? IS NULL", bun.Ident("status.boost_of_id"))
|
||||||
}
|
|
||||||
|
|
||||||
if maxID != "" {
|
|
||||||
q = q.Where("? < ?", bun.Ident("status.id"), maxID)
|
|
||||||
}
|
|
||||||
|
|
||||||
if minID != "" {
|
|
||||||
q = q.Where("? > ?", bun.Ident("status.id"), minID)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if mediaOnly {
|
if mediaOnly {
|
||||||
// attachments are stored as a json object;
|
// Attachments are stored as a json object; this
|
||||||
// this implementation differs between sqlite and postgres,
|
// implementation differs between SQLite and Postgres,
|
||||||
// so we have to be thorough to cover all eventualities
|
// so we have to be thorough to cover all eventualities
|
||||||
q = q.WhereGroup(" AND ", func(q *bun.SelectQuery) *bun.SelectQuery {
|
q = q.WhereGroup(" AND ", func(q *bun.SelectQuery) *bun.SelectQuery {
|
||||||
switch a.conn.Dialect().Name() {
|
switch a.conn.Dialect().Name() {
|
||||||
|
@ -542,10 +536,46 @@ func (a *accountDB) GetAccountStatuses(ctx context.Context, accountID string, li
|
||||||
q = q.Where("? = ?", bun.Ident("status.visibility"), gtsmodel.VisibilityPublic)
|
q = q.Where("? = ?", bun.Ident("status.visibility"), gtsmodel.VisibilityPublic)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// return only statuses LOWER (ie., older) than maxID
|
||||||
|
if maxID == "" {
|
||||||
|
maxID = id.Highest
|
||||||
|
}
|
||||||
|
q = q.Where("? < ?", bun.Ident("status.id"), maxID)
|
||||||
|
|
||||||
|
if minID != "" {
|
||||||
|
// return only statuses HIGHER (ie., newer) than minID
|
||||||
|
q = q.Where("? > ?", bun.Ident("status.id"), minID)
|
||||||
|
|
||||||
|
// page up
|
||||||
|
frontToBack = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if limit > 0 {
|
||||||
|
// limit amount of statuses returned
|
||||||
|
q = q.Limit(limit)
|
||||||
|
}
|
||||||
|
|
||||||
|
if frontToBack {
|
||||||
|
// Page down.
|
||||||
|
q = q.Order("status.id DESC")
|
||||||
|
} else {
|
||||||
|
// Page up.
|
||||||
|
q = q.Order("status.id ASC")
|
||||||
|
}
|
||||||
|
|
||||||
if err := q.Scan(ctx, &statusIDs); err != nil {
|
if err := q.Scan(ctx, &statusIDs); err != nil {
|
||||||
return nil, a.conn.ProcessError(err)
|
return nil, a.conn.ProcessError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we're paging up, we still want statuses
|
||||||
|
// to be sorted by ID desc, so reverse ids slice.
|
||||||
|
// https://zchee.github.io/golang-wiki/SliceTricks/#reversing
|
||||||
|
if !frontToBack {
|
||||||
|
for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
|
||||||
|
statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return a.statusesFromIDs(ctx, statusIDs)
|
return a.statusesFromIDs(ctx, statusIDs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -568,23 +598,45 @@ func (a *accountDB) GetAccountPinnedStatuses(ctx context.Context, accountID stri
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *accountDB) GetAccountWebStatuses(ctx context.Context, accountID string, limit int, maxID string) ([]*gtsmodel.Status, db.Error) {
|
func (a *accountDB) GetAccountWebStatuses(ctx context.Context, accountID string, limit int, maxID string) ([]*gtsmodel.Status, db.Error) {
|
||||||
statusIDs := []string{}
|
// Ensure reasonable
|
||||||
|
if limit < 0 {
|
||||||
|
limit = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make educated guess for slice size
|
||||||
|
statusIDs := make([]string, 0, limit)
|
||||||
|
|
||||||
q := a.conn.
|
q := a.conn.
|
||||||
NewSelect().
|
NewSelect().
|
||||||
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
|
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
|
||||||
|
// Select only IDs from table
|
||||||
Column("status.id").
|
Column("status.id").
|
||||||
Where("? = ?", bun.Ident("status.account_id"), accountID).
|
Where("? = ?", bun.Ident("status.account_id"), accountID).
|
||||||
WhereGroup(" AND ", whereEmptyOrNull("status.in_reply_to_uri")).
|
// Don't show replies or boosts.
|
||||||
WhereGroup(" AND ", whereEmptyOrNull("status.boost_of_id")).
|
Where("? IS NULL", bun.Ident("status.in_reply_to_uri")).
|
||||||
|
Where("? IS NULL", bun.Ident("status.boost_of_id")).
|
||||||
|
// Only Public statuses.
|
||||||
Where("? = ?", bun.Ident("status.visibility"), gtsmodel.VisibilityPublic).
|
Where("? = ?", bun.Ident("status.visibility"), gtsmodel.VisibilityPublic).
|
||||||
|
// Don't show local-only statuses on the web view.
|
||||||
Where("? = ?", bun.Ident("status.federated"), true)
|
Where("? = ?", bun.Ident("status.federated"), true)
|
||||||
|
|
||||||
if maxID != "" {
|
// return only statuses LOWER (ie., older) than maxID
|
||||||
q = q.Where("? < ?", bun.Ident("status.id"), maxID)
|
if maxID == "" {
|
||||||
|
maxID = id.Highest
|
||||||
|
}
|
||||||
|
q = q.Where("? < ?", bun.Ident("status.id"), maxID)
|
||||||
|
|
||||||
|
if limit > 0 {
|
||||||
|
// limit amount of statuses returned
|
||||||
|
q = q.Limit(limit)
|
||||||
}
|
}
|
||||||
|
|
||||||
q = q.Limit(limit).Order("status.id DESC")
|
if limit > 0 {
|
||||||
|
// limit amount of statuses returned
|
||||||
|
q = q.Limit(limit)
|
||||||
|
}
|
||||||
|
|
||||||
|
q = q.Order("status.id DESC")
|
||||||
|
|
||||||
if err := q.Scan(ctx, &statusIDs); err != nil {
|
if err := q.Scan(ctx, &statusIDs); err != nil {
|
||||||
return nil, a.conn.ProcessError(err)
|
return nil, a.conn.ProcessError(err)
|
||||||
|
|
|
@ -45,6 +45,34 @@ func (suite *AccountTestSuite) TestGetAccountStatuses() {
|
||||||
suite.Len(statuses, 5)
|
suite.Len(statuses, 5)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *AccountTestSuite) TestGetAccountStatusesPageDown() {
|
||||||
|
// get the first page
|
||||||
|
statuses, err := suite.db.GetAccountStatuses(context.Background(), suite.testAccounts["local_account_1"].ID, 2, false, false, "", "", false, false)
|
||||||
|
if err != nil {
|
||||||
|
suite.FailNow(err.Error())
|
||||||
|
}
|
||||||
|
suite.Len(statuses, 2)
|
||||||
|
|
||||||
|
// get the second page
|
||||||
|
statuses, err = suite.db.GetAccountStatuses(context.Background(), suite.testAccounts["local_account_1"].ID, 2, false, false, statuses[len(statuses)-1].ID, "", false, false)
|
||||||
|
if err != nil {
|
||||||
|
suite.FailNow(err.Error())
|
||||||
|
}
|
||||||
|
suite.Len(statuses, 2)
|
||||||
|
|
||||||
|
// get the third page
|
||||||
|
statuses, err = suite.db.GetAccountStatuses(context.Background(), suite.testAccounts["local_account_1"].ID, 2, false, false, statuses[len(statuses)-1].ID, "", false, false)
|
||||||
|
if err != nil {
|
||||||
|
suite.FailNow(err.Error())
|
||||||
|
}
|
||||||
|
suite.Len(statuses, 1)
|
||||||
|
|
||||||
|
// try to get the last page (should be empty)
|
||||||
|
statuses, err = suite.db.GetAccountStatuses(context.Background(), suite.testAccounts["local_account_1"].ID, 2, false, false, statuses[len(statuses)-1].ID, "", false, false)
|
||||||
|
suite.ErrorIs(err, db.ErrNoEntries)
|
||||||
|
suite.Empty(statuses)
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *AccountTestSuite) TestGetAccountStatusesExcludeRepliesAndReblogs() {
|
func (suite *AccountTestSuite) TestGetAccountStatusesExcludeRepliesAndReblogs() {
|
||||||
statuses, err := suite.db.GetAccountStatuses(context.Background(), suite.testAccounts["local_account_1"].ID, 20, true, true, "", "", false, false)
|
statuses, err := suite.db.GetAccountStatuses(context.Background(), suite.testAccounts["local_account_1"].ID, 20, true, true, "", "", false, false)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
|
|
|
@ -26,16 +26,32 @@
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/util"
|
"github.com/superseriousbusiness/gotosocial/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StatusesGet fetches a number of statuses (in time descending order) from the given account, filtered by visibility for
|
// StatusesGet fetches a number of statuses (in time descending order) from the
|
||||||
// the account given in authed.
|
// target account, filtered by visibility according to the requesting account.
|
||||||
func (p *Processor) StatusesGet(ctx context.Context, requestingAccount *gtsmodel.Account, targetAccountID string, limit int, excludeReplies bool, excludeReblogs bool, maxID string, minID string, pinned bool, mediaOnly bool, publicOnly bool) (*apimodel.PageableResponse, gtserror.WithCode) {
|
func (p *Processor) StatusesGet(
|
||||||
|
ctx context.Context,
|
||||||
|
requestingAccount *gtsmodel.Account,
|
||||||
|
targetAccountID string,
|
||||||
|
limit int,
|
||||||
|
excludeReplies bool,
|
||||||
|
excludeReblogs bool,
|
||||||
|
maxID string,
|
||||||
|
minID string,
|
||||||
|
pinned bool,
|
||||||
|
mediaOnly bool,
|
||||||
|
publicOnly bool,
|
||||||
|
) (*apimodel.PageableResponse, gtserror.WithCode) {
|
||||||
if requestingAccount != nil {
|
if requestingAccount != nil {
|
||||||
if blocked, err := p.state.DB.IsEitherBlocked(ctx, requestingAccount.ID, targetAccountID); err != nil {
|
blocked, err := p.state.DB.IsEitherBlocked(ctx, requestingAccount.ID, targetAccountID)
|
||||||
|
if err != nil {
|
||||||
return nil, gtserror.NewErrorInternalError(err)
|
return nil, gtserror.NewErrorInternalError(err)
|
||||||
} else if blocked {
|
}
|
||||||
|
|
||||||
|
if blocked {
|
||||||
err := errors.New("block exists between accounts")
|
err := errors.New("block exists between accounts")
|
||||||
return nil, gtserror.NewErrorNotFound(err)
|
return nil, gtserror.NewErrorNotFound(err)
|
||||||
}
|
}
|
||||||
|
@ -45,6 +61,7 @@ func (p *Processor) StatusesGet(ctx context.Context, requestingAccount *gtsmodel
|
||||||
statuses []*gtsmodel.Status
|
statuses []*gtsmodel.Status
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
if pinned {
|
if pinned {
|
||||||
// Get *ONLY* pinned statuses.
|
// Get *ONLY* pinned statuses.
|
||||||
statuses, err = p.state.DB.GetAccountPinnedStatuses(ctx, targetAccountID)
|
statuses, err = p.state.DB.GetAccountPinnedStatuses(ctx, targetAccountID)
|
||||||
|
@ -52,14 +69,17 @@ func (p *Processor) StatusesGet(ctx context.Context, requestingAccount *gtsmodel
|
||||||
// Get account statuses which *may* include pinned ones.
|
// Get account statuses which *may* include pinned ones.
|
||||||
statuses, err = p.state.DB.GetAccountStatuses(ctx, targetAccountID, limit, excludeReplies, excludeReblogs, maxID, minID, mediaOnly, publicOnly)
|
statuses, err = p.state.DB.GetAccountStatuses(ctx, targetAccountID, limit, excludeReplies, excludeReblogs, maxID, minID, mediaOnly, publicOnly)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
if err == db.ErrNoEntries {
|
if err != nil && !errors.Is(err, db.ErrNoEntries) {
|
||||||
return util.EmptyPageableResponse(), nil
|
|
||||||
}
|
|
||||||
return nil, gtserror.NewErrorInternalError(err)
|
return nil, gtserror.NewErrorInternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filtering + serialization process is the same for either pinned status queries or 'normal' ones.
|
if len(statuses) == 0 {
|
||||||
|
return util.EmptyPageableResponse(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filtering + serialization process is the same for
|
||||||
|
// both pinned status queries and 'normal' ones.
|
||||||
filtered, err := p.filter.StatusesVisible(ctx, requestingAccount, statuses)
|
filtered, err := p.filter.StatusesVisible(ctx, requestingAccount, statuses)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, gtserror.NewErrorInternalError(err)
|
return nil, gtserror.NewErrorInternalError(err)
|
||||||
|
@ -67,24 +87,32 @@ func (p *Processor) StatusesGet(ctx context.Context, requestingAccount *gtsmodel
|
||||||
|
|
||||||
count := len(filtered)
|
count := len(filtered)
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
|
// After filtering there were
|
||||||
|
// no statuses left to serve.
|
||||||
return util.EmptyPageableResponse(), nil
|
return util.EmptyPageableResponse(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
items := make([]interface{}, 0, count)
|
var (
|
||||||
nextMaxIDValue := ""
|
items = make([]interface{}, 0, count)
|
||||||
prevMinIDValue := ""
|
nextMaxIDValue string
|
||||||
for i, s := range filtered {
|
prevMinIDValue string
|
||||||
item, err := p.tc.StatusToAPIStatus(ctx, s, requestingAccount)
|
)
|
||||||
if err != nil {
|
|
||||||
return nil, gtserror.NewErrorInternalError(fmt.Errorf("error converting status to api: %s", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
|
for i, s := range filtered {
|
||||||
|
// Set next + prev values before filtering and API
|
||||||
|
// converting, so caller can still page properly.
|
||||||
if i == count-1 {
|
if i == count-1 {
|
||||||
nextMaxIDValue = item.GetID()
|
nextMaxIDValue = s.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
prevMinIDValue = item.GetID()
|
prevMinIDValue = s.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
item, err := p.tc.StatusToAPIStatus(ctx, s, requestingAccount)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf(ctx, "skipping status %s because it couldn't be converted to its api representation: %s", s.ID, err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
items = append(items, item)
|
items = append(items, item)
|
||||||
|
@ -100,7 +128,7 @@ func (p *Processor) StatusesGet(ctx context.Context, requestingAccount *gtsmodel
|
||||||
|
|
||||||
return util.PackagePageableResponse(util.PageableResponseParams{
|
return util.PackagePageableResponse(util.PageableResponseParams{
|
||||||
Items: items,
|
Items: items,
|
||||||
Path: fmt.Sprintf("/api/v1/accounts/%s/statuses", targetAccountID),
|
Path: "/api/v1/accounts/" + targetAccountID + "/statuses",
|
||||||
NextMaxIDValue: nextMaxIDValue,
|
NextMaxIDValue: nextMaxIDValue,
|
||||||
PrevMinIDValue: prevMinIDValue,
|
PrevMinIDValue: prevMinIDValue,
|
||||||
Limit: limit,
|
Limit: limit,
|
||||||
|
@ -114,62 +142,58 @@ func (p *Processor) StatusesGet(ctx context.Context, requestingAccount *gtsmodel
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WebStatusesGet fetches a number of statuses (in descending order) from the given account. It selects only
|
// WebStatusesGet fetches a number of statuses (in descending order)
|
||||||
// statuses which are suitable for showing on the public web profile of an account.
|
// from the given account. It selects only statuses which are suitable
|
||||||
|
// for showing on the public web profile of an account.
|
||||||
func (p *Processor) WebStatusesGet(ctx context.Context, targetAccountID string, maxID string) (*apimodel.PageableResponse, gtserror.WithCode) {
|
func (p *Processor) WebStatusesGet(ctx context.Context, targetAccountID string, maxID string) (*apimodel.PageableResponse, gtserror.WithCode) {
|
||||||
acct, err := p.state.DB.GetAccountByID(ctx, targetAccountID)
|
account, err := p.state.DB.GetAccountByID(ctx, targetAccountID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == db.ErrNoEntries {
|
if errors.Is(err, db.ErrNoEntries) {
|
||||||
err := fmt.Errorf("account %s not found in the db, not getting web statuses for it", targetAccountID)
|
err := fmt.Errorf("account %s not found in the db, not getting web statuses for it", targetAccountID)
|
||||||
return nil, gtserror.NewErrorNotFound(err)
|
return nil, gtserror.NewErrorNotFound(err)
|
||||||
}
|
}
|
||||||
return nil, gtserror.NewErrorInternalError(err)
|
return nil, gtserror.NewErrorInternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if acct.Domain != "" {
|
if account.Domain != "" {
|
||||||
err := fmt.Errorf("account %s was not a local account, not getting web statuses for it", targetAccountID)
|
err := fmt.Errorf("account %s was not a local account, not getting web statuses for it", targetAccountID)
|
||||||
return nil, gtserror.NewErrorNotFound(err)
|
return nil, gtserror.NewErrorNotFound(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
statuses, err := p.state.DB.GetAccountWebStatuses(ctx, targetAccountID, 10, maxID)
|
statuses, err := p.state.DB.GetAccountWebStatuses(ctx, targetAccountID, 10, maxID)
|
||||||
if err != nil {
|
if err != nil && !errors.Is(err, db.ErrNoEntries) {
|
||||||
if err == db.ErrNoEntries {
|
|
||||||
return util.EmptyPageableResponse(), nil
|
|
||||||
}
|
|
||||||
return nil, gtserror.NewErrorInternalError(err)
|
return nil, gtserror.NewErrorInternalError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
count := len(statuses)
|
count := len(statuses)
|
||||||
|
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
return util.EmptyPageableResponse(), nil
|
return util.EmptyPageableResponse(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
items := []interface{}{}
|
var (
|
||||||
nextMaxIDValue := ""
|
items = make([]interface{}, 0, count)
|
||||||
prevMinIDValue := ""
|
nextMaxIDValue string
|
||||||
|
)
|
||||||
|
|
||||||
for i, s := range statuses {
|
for i, s := range statuses {
|
||||||
|
// Set next value before API converting,
|
||||||
|
// so caller can still page properly.
|
||||||
|
if i == count-1 {
|
||||||
|
nextMaxIDValue = s.ID
|
||||||
|
}
|
||||||
|
|
||||||
item, err := p.tc.StatusToAPIStatus(ctx, s, nil)
|
item, err := p.tc.StatusToAPIStatus(ctx, s, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, gtserror.NewErrorInternalError(fmt.Errorf("error converting status to api: %s", err))
|
log.Debugf(ctx, "skipping status %s because it couldn't be converted to its api representation: %s", s.ID, err)
|
||||||
}
|
continue
|
||||||
|
|
||||||
if i == count-1 {
|
|
||||||
nextMaxIDValue = item.GetID()
|
|
||||||
}
|
|
||||||
|
|
||||||
if i == 0 {
|
|
||||||
prevMinIDValue = item.GetID()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
items = append(items, item)
|
items = append(items, item)
|
||||||
}
|
}
|
||||||
|
|
||||||
return util.PackagePageableResponse(util.PageableResponseParams{
|
return util.PackagePageableResponse(util.PageableResponseParams{
|
||||||
Items: items,
|
Items: items,
|
||||||
Path: "/@" + acct.Username,
|
Path: "/@" + account.Username,
|
||||||
NextMaxIDValue: nextMaxIDValue,
|
NextMaxIDValue: nextMaxIDValue,
|
||||||
PrevMinIDValue: prevMinIDValue,
|
|
||||||
ExtraQueryParams: []string{},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
|
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||||
|
@ -47,6 +48,13 @@ type PageableResponseParams struct {
|
||||||
// a bunch of pageable items (notifications, statuses, etc), as well
|
// a bunch of pageable items (notifications, statuses, etc), as well
|
||||||
// as a Link header to inform callers of where to find next/prev items.
|
// as a Link header to inform callers of where to find next/prev items.
|
||||||
func PackagePageableResponse(params PageableResponseParams) (*apimodel.PageableResponse, gtserror.WithCode) {
|
func PackagePageableResponse(params PageableResponseParams) (*apimodel.PageableResponse, gtserror.WithCode) {
|
||||||
|
if len(params.Items) == 0 {
|
||||||
|
// No items to page through.
|
||||||
|
return EmptyPageableResponse(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set default paging values, if
|
||||||
|
// they weren't set by the caller.
|
||||||
if params.NextMaxIDKey == "" {
|
if params.NextMaxIDKey == "" {
|
||||||
params.NextMaxIDKey = "max_id"
|
params.NextMaxIDKey = "max_id"
|
||||||
}
|
}
|
||||||
|
@ -55,58 +63,70 @@ func PackagePageableResponse(params PageableResponseParams) (*apimodel.PageableR
|
||||||
params.PrevMinIDKey = "min_id"
|
params.PrevMinIDKey = "min_id"
|
||||||
}
|
}
|
||||||
|
|
||||||
pageableResponse := EmptyPageableResponse()
|
var (
|
||||||
|
protocol = config.GetProtocol()
|
||||||
|
host = config.GetHost()
|
||||||
|
nextLink string
|
||||||
|
prevLink string
|
||||||
|
linkHeaderParts = make([]string, 0, 2)
|
||||||
|
)
|
||||||
|
|
||||||
if len(params.Items) == 0 {
|
// Parse next link.
|
||||||
return pageableResponse, nil
|
if params.NextMaxIDValue != "" {
|
||||||
|
nextRaw := params.NextMaxIDKey + "=" + params.NextMaxIDValue
|
||||||
|
|
||||||
|
if params.Limit != 0 {
|
||||||
|
nextRaw = fmt.Sprintf("limit=%d&", params.Limit) + nextRaw
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range params.ExtraQueryParams {
|
||||||
|
nextRaw += "&" + p
|
||||||
|
}
|
||||||
|
|
||||||
|
nextLink = func() string {
|
||||||
|
u := &url.URL{
|
||||||
|
Scheme: protocol,
|
||||||
|
Host: host,
|
||||||
|
Path: params.Path,
|
||||||
|
RawQuery: nextRaw,
|
||||||
|
}
|
||||||
|
return u.String()
|
||||||
|
}()
|
||||||
|
|
||||||
|
linkHeaderParts = append(linkHeaderParts, `<`+nextLink+`>; rel="next"`)
|
||||||
}
|
}
|
||||||
|
|
||||||
// items
|
// Parse prev link.
|
||||||
pageableResponse.Items = params.Items
|
if params.PrevMinIDValue != "" {
|
||||||
|
prevRaw := params.PrevMinIDKey + "=" + params.PrevMinIDValue
|
||||||
|
|
||||||
protocol := config.GetProtocol()
|
if params.Limit != 0 {
|
||||||
host := config.GetHost()
|
prevRaw = fmt.Sprintf("limit=%d&", params.Limit) + prevRaw
|
||||||
|
}
|
||||||
|
|
||||||
// next
|
for _, p := range params.ExtraQueryParams {
|
||||||
nextRaw := params.NextMaxIDKey + "=" + params.NextMaxIDValue
|
prevRaw = prevRaw + "&" + p
|
||||||
if params.Limit != 0 {
|
}
|
||||||
nextRaw = fmt.Sprintf("limit=%d&", params.Limit) + nextRaw
|
|
||||||
}
|
|
||||||
for _, p := range params.ExtraQueryParams {
|
|
||||||
nextRaw = nextRaw + "&" + p
|
|
||||||
}
|
|
||||||
nextLink := &url.URL{
|
|
||||||
Scheme: protocol,
|
|
||||||
Host: host,
|
|
||||||
Path: params.Path,
|
|
||||||
RawQuery: nextRaw,
|
|
||||||
}
|
|
||||||
nextLinkString := nextLink.String()
|
|
||||||
pageableResponse.NextLink = nextLinkString
|
|
||||||
|
|
||||||
// prev
|
prevLink = func() string {
|
||||||
prevRaw := params.PrevMinIDKey + "=" + params.PrevMinIDValue
|
u := &url.URL{
|
||||||
if params.Limit != 0 {
|
Scheme: protocol,
|
||||||
prevRaw = fmt.Sprintf("limit=%d&", params.Limit) + prevRaw
|
Host: host,
|
||||||
}
|
Path: params.Path,
|
||||||
for _, p := range params.ExtraQueryParams {
|
RawQuery: prevRaw,
|
||||||
prevRaw = prevRaw + "&" + p
|
}
|
||||||
}
|
return u.String()
|
||||||
prevLink := &url.URL{
|
}()
|
||||||
Scheme: protocol,
|
|
||||||
Host: host,
|
|
||||||
Path: params.Path,
|
|
||||||
RawQuery: prevRaw,
|
|
||||||
}
|
|
||||||
prevLinkString := prevLink.String()
|
|
||||||
pageableResponse.PrevLink = prevLinkString
|
|
||||||
|
|
||||||
// link header
|
linkHeaderParts = append(linkHeaderParts, `<`+prevLink+`>; rel="prev"`)
|
||||||
next := fmt.Sprintf("<%s>; rel=\"next\"", nextLinkString)
|
}
|
||||||
prev := fmt.Sprintf("<%s>; rel=\"prev\"", prevLinkString)
|
|
||||||
pageableResponse.LinkHeader = next + ", " + prev
|
|
||||||
|
|
||||||
return pageableResponse, nil
|
return &apimodel.PageableResponse{
|
||||||
|
Items: params.Items,
|
||||||
|
LinkHeader: strings.Join(linkHeaderParts, ", "),
|
||||||
|
NextLink: nextLink,
|
||||||
|
PrevLink: prevLink,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EmptyPageableResponse just returns an empty
|
// EmptyPageableResponse just returns an empty
|
||||||
|
|
139
internal/util/paging_test.go
Normal file
139
internal/util/paging_test.go
Normal file
|
@ -0,0 +1,139 @@
|
||||||
|
// GoToSocial
|
||||||
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package util_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PagingSuite struct {
|
||||||
|
suite.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *PagingSuite) TestPagingStandard() {
|
||||||
|
config.SetHost("example.org")
|
||||||
|
|
||||||
|
params := util.PageableResponseParams{
|
||||||
|
Items: make([]interface{}, 10, 10),
|
||||||
|
Path: "/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses",
|
||||||
|
NextMaxIDValue: "01H11KA1DM2VH3747YDE7FV5HN",
|
||||||
|
PrevMinIDValue: "01H11KBBVRRDYYC5KEPME1NP5R",
|
||||||
|
Limit: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, errWithCode := util.PackagePageableResponse(params)
|
||||||
|
if errWithCode != nil {
|
||||||
|
suite.FailNow(errWithCode.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.Equal(make([]interface{}, 10, 10), resp.Items)
|
||||||
|
suite.Equal(`<https://example.org/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses?limit=10&max_id=01H11KA1DM2VH3747YDE7FV5HN>; rel="next", <https://example.org/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses?limit=10&min_id=01H11KBBVRRDYYC5KEPME1NP5R>; rel="prev"`, resp.LinkHeader)
|
||||||
|
suite.Equal(`https://example.org/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses?limit=10&max_id=01H11KA1DM2VH3747YDE7FV5HN`, resp.NextLink)
|
||||||
|
suite.Equal(`https://example.org/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses?limit=10&min_id=01H11KBBVRRDYYC5KEPME1NP5R`, resp.PrevLink)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *PagingSuite) TestPagingNoLimit() {
|
||||||
|
config.SetHost("example.org")
|
||||||
|
|
||||||
|
params := util.PageableResponseParams{
|
||||||
|
Items: make([]interface{}, 10, 10),
|
||||||
|
Path: "/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses",
|
||||||
|
NextMaxIDValue: "01H11KA1DM2VH3747YDE7FV5HN",
|
||||||
|
PrevMinIDValue: "01H11KBBVRRDYYC5KEPME1NP5R",
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, errWithCode := util.PackagePageableResponse(params)
|
||||||
|
if errWithCode != nil {
|
||||||
|
suite.FailNow(errWithCode.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.Equal(make([]interface{}, 10, 10), resp.Items)
|
||||||
|
suite.Equal(`<https://example.org/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses?max_id=01H11KA1DM2VH3747YDE7FV5HN>; rel="next", <https://example.org/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses?min_id=01H11KBBVRRDYYC5KEPME1NP5R>; rel="prev"`, resp.LinkHeader)
|
||||||
|
suite.Equal(`https://example.org/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses?max_id=01H11KA1DM2VH3747YDE7FV5HN`, resp.NextLink)
|
||||||
|
suite.Equal(`https://example.org/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses?min_id=01H11KBBVRRDYYC5KEPME1NP5R`, resp.PrevLink)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *PagingSuite) TestPagingNoNextID() {
|
||||||
|
config.SetHost("example.org")
|
||||||
|
|
||||||
|
params := util.PageableResponseParams{
|
||||||
|
Items: make([]interface{}, 10, 10),
|
||||||
|
Path: "/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses",
|
||||||
|
PrevMinIDValue: "01H11KBBVRRDYYC5KEPME1NP5R",
|
||||||
|
Limit: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, errWithCode := util.PackagePageableResponse(params)
|
||||||
|
if errWithCode != nil {
|
||||||
|
suite.FailNow(errWithCode.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.Equal(make([]interface{}, 10, 10), resp.Items)
|
||||||
|
suite.Equal(`<https://example.org/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses?limit=10&min_id=01H11KBBVRRDYYC5KEPME1NP5R>; rel="prev"`, resp.LinkHeader)
|
||||||
|
suite.Equal(``, resp.NextLink)
|
||||||
|
suite.Equal(`https://example.org/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses?limit=10&min_id=01H11KBBVRRDYYC5KEPME1NP5R`, resp.PrevLink)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *PagingSuite) TestPagingNoPrevID() {
|
||||||
|
config.SetHost("example.org")
|
||||||
|
|
||||||
|
params := util.PageableResponseParams{
|
||||||
|
Items: make([]interface{}, 10, 10),
|
||||||
|
Path: "/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses",
|
||||||
|
NextMaxIDValue: "01H11KA1DM2VH3747YDE7FV5HN",
|
||||||
|
Limit: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, errWithCode := util.PackagePageableResponse(params)
|
||||||
|
if errWithCode != nil {
|
||||||
|
suite.FailNow(errWithCode.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.Equal(make([]interface{}, 10, 10), resp.Items)
|
||||||
|
suite.Equal(`<https://example.org/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses?limit=10&max_id=01H11KA1DM2VH3747YDE7FV5HN>; rel="next"`, resp.LinkHeader)
|
||||||
|
suite.Equal(`https://example.org/api/v1/accounts/01H11KA68PM4NNYJEG0FJQ90R3/statuses?limit=10&max_id=01H11KA1DM2VH3747YDE7FV5HN`, resp.NextLink)
|
||||||
|
suite.Equal(``, resp.PrevLink)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *PagingSuite) TestPagingNoItems() {
|
||||||
|
config.SetHost("example.org")
|
||||||
|
|
||||||
|
params := util.PageableResponseParams{
|
||||||
|
NextMaxIDValue: "01H11KA1DM2VH3747YDE7FV5HN",
|
||||||
|
PrevMinIDValue: "01H11KBBVRRDYYC5KEPME1NP5R",
|
||||||
|
Limit: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, errWithCode := util.PackagePageableResponse(params)
|
||||||
|
if errWithCode != nil {
|
||||||
|
suite.FailNow(errWithCode.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.Empty(resp.Items)
|
||||||
|
suite.Empty(resp.LinkHeader)
|
||||||
|
suite.Empty(resp.NextLink)
|
||||||
|
suite.Empty(resp.PrevLink)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPagingSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &PagingSuite{})
|
||||||
|
}
|
7
vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
generated
vendored
7
vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
generated
vendored
|
@ -166,6 +166,7 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
|
||||||
|
|
||||||
// Prepare urlValues to pass into the request on every loop
|
// Prepare urlValues to pass into the request on every loop
|
||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
|
urlValues.Set("ping", "10")
|
||||||
urlValues.Set("prefix", prefix)
|
urlValues.Set("prefix", prefix)
|
||||||
urlValues.Set("suffix", suffix)
|
urlValues.Set("suffix", suffix)
|
||||||
urlValues["events"] = events
|
urlValues["events"] = events
|
||||||
|
@ -224,6 +225,12 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
|
||||||
closeResponse(resp)
|
closeResponse(resp)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Empty events pinged from the server
|
||||||
|
if len(notificationInfo.Records) == 0 && notificationInfo.Err == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// Send notificationInfo
|
// Send notificationInfo
|
||||||
select {
|
select {
|
||||||
case notificationInfoCh <- notificationInfo:
|
case notificationInfoCh <- notificationInfo:
|
||||||
|
|
149
vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
generated
vendored
Normal file
149
vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go
generated
vendored
Normal file
|
@ -0,0 +1,149 @@
|
||||||
|
/*
|
||||||
|
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||||
|
* Copyright 2023 MinIO, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PutObjectFanOutRequest this is the request structure sent
|
||||||
|
// to the server to fan-out the stream to multiple objects.
|
||||||
|
type PutObjectFanOutRequest struct {
|
||||||
|
Key string `json:"key"`
|
||||||
|
UserMetadata map[string]string `json:"metadata,omitempty"`
|
||||||
|
UserTags map[string]string `json:"tags,omitempty"`
|
||||||
|
ContentType string `json:"contentType,omitempty"`
|
||||||
|
ContentEncoding string `json:"contentEncoding,omitempty"`
|
||||||
|
ContentDisposition string `json:"contentDisposition,omitempty"`
|
||||||
|
ContentLanguage string `json:"contentLanguage,omitempty"`
|
||||||
|
CacheControl string `json:"cacheControl,omitempty"`
|
||||||
|
Retention RetentionMode `json:"retention,omitempty"`
|
||||||
|
RetainUntilDate *time.Time `json:"retainUntil,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutObjectFanOutResponse this is the response structure sent
|
||||||
|
// by the server upon success or failure for each object
|
||||||
|
// fan-out keys. Additionally this response carries ETag,
|
||||||
|
// VersionID and LastModified for each object fan-out.
|
||||||
|
type PutObjectFanOutResponse struct {
|
||||||
|
Key string `json:"key"`
|
||||||
|
ETag string `json:"etag,omitempty"`
|
||||||
|
VersionID string `json:"versionId,omitempty"`
|
||||||
|
LastModified *time.Time `json:"lastModified,omitempty"`
|
||||||
|
Error error `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutObjectFanOut - is a variant of PutObject instead of writing a single object from a single
|
||||||
|
// stream multiple objects are written, defined via a list of PutObjectFanOutRequests. Each entry
|
||||||
|
// in PutObjectFanOutRequest carries an object keyname and its relevant metadata if any. `Key` is
|
||||||
|
// mandatory, rest of the other options in PutObjectFanOutRequest are optional.
|
||||||
|
func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, body io.Reader, fanOutReq ...PutObjectFanOutRequest) ([]PutObjectFanOutResponse, error) {
|
||||||
|
if len(fanOutReq) == 0 {
|
||||||
|
return nil, errInvalidArgument("fan out requests cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
policy := NewPostPolicy()
|
||||||
|
policy.SetBucket(bucket)
|
||||||
|
policy.SetKey(strconv.FormatInt(time.Now().UnixNano(), 16))
|
||||||
|
|
||||||
|
// Expires in 15 minutes.
|
||||||
|
policy.SetExpires(time.Now().UTC().Add(15 * time.Minute))
|
||||||
|
|
||||||
|
url, formData, err := c.PresignedPostPolicy(ctx, policy)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r, w := io.Pipe()
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, url.String(), r)
|
||||||
|
if err != nil {
|
||||||
|
w.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var b strings.Builder
|
||||||
|
enc := json.NewEncoder(&b)
|
||||||
|
for _, req := range fanOutReq {
|
||||||
|
if req.Key == "" {
|
||||||
|
w.Close()
|
||||||
|
return nil, errors.New("PutObjectFanOutRequest.Key is mandatory and cannot be empty")
|
||||||
|
}
|
||||||
|
if err = enc.Encode(&req); err != nil {
|
||||||
|
w.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mwriter := multipart.NewWriter(w)
|
||||||
|
req.Header.Add("Content-Type", mwriter.FormDataContentType())
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer w.Close()
|
||||||
|
defer mwriter.Close()
|
||||||
|
|
||||||
|
for k, v := range formData {
|
||||||
|
if err := mwriter.WriteField(k, v); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mwriter.WriteField("x-minio-fanout-list", b.String()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mw, err := mwriter.CreateFormFile("file", "fanout-content")
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(mw, body); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
resp, err := c.do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer closeResponse(resp)
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, httpRespToErrorResponse(resp, bucket, "fanout-content")
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
fanOutResp := make([]PutObjectFanOutResponse, 0, len(fanOutReq))
|
||||||
|
for dec.More() {
|
||||||
|
var m PutObjectFanOutResponse
|
||||||
|
if err = dec.Decode(&m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fanOutResp = append(fanOutResp, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fanOutResp, nil
|
||||||
|
}
|
10
vendor/github.com/minio/minio-go/v7/api-stat.go
generated
vendored
10
vendor/github.com/minio/minio-go/v7/api-stat.go
generated
vendored
|
@ -20,7 +20,6 @@
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
@ -57,7 +56,8 @@ func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, err
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// StatObject verifies if object exists and you have permission to access.
|
// StatObject verifies if object exists, you have permission to access it
|
||||||
|
// and returns information about the object.
|
||||||
func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
|
func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
|
@ -74,15 +74,11 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string,
|
||||||
headers.Set(isMinioTgtReplicationReady, "true")
|
headers.Set(isMinioTgtReplicationReady, "true")
|
||||||
}
|
}
|
||||||
|
|
||||||
urlValues := make(url.Values)
|
|
||||||
if opts.VersionID != "" {
|
|
||||||
urlValues.Set("versionId", opts.VersionID)
|
|
||||||
}
|
|
||||||
// Execute HEAD on objectName.
|
// Execute HEAD on objectName.
|
||||||
resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{
|
resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
queryValues: urlValues,
|
queryValues: opts.toQueryValues(),
|
||||||
contentSHA256Hex: emptySHA256Hex,
|
contentSHA256Hex: emptySHA256Hex,
|
||||||
customHeader: headers,
|
customHeader: headers,
|
||||||
})
|
})
|
||||||
|
|
2
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
|
@ -124,7 +124,7 @@ type Options struct {
|
||||||
// Global constants.
|
// Global constants.
|
||||||
const (
|
const (
|
||||||
libraryName = "minio-go"
|
libraryName = "minio-go"
|
||||||
libraryVersion = "v7.0.52"
|
libraryVersion = "v7.0.53"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User Agent should always following the below style.
|
// User Agent should always following the below style.
|
||||||
|
|
4
vendor/github.com/minio/minio-go/v7/bucket-cache.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/bucket-cache.go
generated
vendored
|
@ -240,9 +240,7 @@ func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string
|
||||||
}
|
}
|
||||||
|
|
||||||
if signerType.IsV2() {
|
if signerType.IsV2() {
|
||||||
// Get Bucket Location calls should be always path style
|
req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualStyle)
|
||||||
isVirtualHost := false
|
|
||||||
req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
|
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
216
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
216
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
|
@ -2087,7 +2087,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
bufSize := dataFileMap["datafile-129-MB"]
|
bufSize := dataFileMap["datafile-10-kB"]
|
||||||
|
|
||||||
// Save the data
|
// Save the data
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
|
@ -2101,7 +2101,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
}
|
}
|
||||||
|
|
||||||
meta := map[string]string{}
|
meta := map[string]string{}
|
||||||
reader := getDataReader("datafile-129-MB")
|
reader := getDataReader("datafile-10-kB")
|
||||||
b, err := io.ReadAll(reader)
|
b, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "Read failed", err)
|
logError(testName, function, args, startTime, "", "Read failed", err)
|
||||||
|
@ -2112,6 +2112,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
// Wrong CRC.
|
// Wrong CRC.
|
||||||
meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
|
meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||||
args["metadata"] = meta
|
args["metadata"] = meta
|
||||||
|
args["range"] = "false"
|
||||||
|
|
||||||
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
|
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
|
||||||
DisableMultipart: true,
|
DisableMultipart: true,
|
||||||
|
@ -2132,8 +2133,9 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
reader.Close()
|
reader.Close()
|
||||||
|
|
||||||
resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
|
resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
|
||||||
DisableMultipart: true,
|
DisableMultipart: true,
|
||||||
UserMetadata: meta,
|
DisableContentSha256: true,
|
||||||
|
UserMetadata: meta,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||||
|
@ -2146,6 +2148,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
|
|
||||||
// Read the data back
|
// Read the data back
|
||||||
gopts := minio.GetObjectOptions{Checksum: true}
|
gopts := minio.GetObjectOptions{Checksum: true}
|
||||||
|
|
||||||
r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
|
r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "GetObject failed", err)
|
logError(testName, function, args, startTime, "", "GetObject failed", err)
|
||||||
|
@ -2157,7 +2160,6 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
logError(testName, function, args, startTime, "", "Stat failed", err)
|
logError(testName, function, args, startTime, "", "Stat failed", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"])
|
cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"])
|
||||||
cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"])
|
cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"])
|
||||||
cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"])
|
cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"])
|
||||||
|
@ -2176,6 +2178,209 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
|
logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
args["range"] = "true"
|
||||||
|
err = gopts.SetRange(100, 1000)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "SetRange failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r, err = c.GetObject(context.Background(), bucketName, objectName, gopts)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "GetObject failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = io.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Read failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
st, err = r.Stat()
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Stat failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Range requests should return empty checksums...
|
||||||
|
cmpChecksum(st.ChecksumSHA256, "")
|
||||||
|
cmpChecksum(st.ChecksumSHA1, "")
|
||||||
|
cmpChecksum(st.ChecksumCRC32, "")
|
||||||
|
cmpChecksum(st.ChecksumCRC32C, "")
|
||||||
|
|
||||||
|
delete(args, "range")
|
||||||
|
delete(args, "metadata")
|
||||||
|
}
|
||||||
|
|
||||||
|
successLogger(testName, function, args, startTime).Info()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test PutObject with custom checksums.
|
||||||
|
func testPutMultipartObjectWithChecksums() {
|
||||||
|
// initialize logging params
|
||||||
|
startTime := time.Now()
|
||||||
|
testName := getFuncName()
|
||||||
|
function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
|
args := map[string]interface{}{
|
||||||
|
"bucketName": "",
|
||||||
|
"objectName": "",
|
||||||
|
"opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isFullMode() {
|
||||||
|
ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed random based on current time.
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
// Instantiate new minio client object.
|
||||||
|
c, err := minio.New(os.Getenv(serverEndpoint),
|
||||||
|
&minio.Options{
|
||||||
|
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
|
||||||
|
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable tracing, write to stderr.
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
|
||||||
|
|
||||||
|
// Generate a new random bucket name.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
|
||||||
|
args["bucketName"] = bucketName
|
||||||
|
|
||||||
|
// Make a new bucket.
|
||||||
|
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Make bucket failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string {
|
||||||
|
r := bytes.NewReader(b)
|
||||||
|
tmp := make([]byte, partSize)
|
||||||
|
parts := 0
|
||||||
|
var all []byte
|
||||||
|
for {
|
||||||
|
n, err := io.ReadFull(r, tmp)
|
||||||
|
if err != nil && err != io.ErrUnexpectedEOF {
|
||||||
|
logError(testName, function, args, startTime, "", "Calc crc failed", err)
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
parts++
|
||||||
|
hasher.Reset()
|
||||||
|
hasher.Write(tmp[:n])
|
||||||
|
all = append(all, hasher.Sum(nil)...)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hasher.Reset()
|
||||||
|
hasher.Write(all)
|
||||||
|
return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts)
|
||||||
|
}
|
||||||
|
defer cleanupBucket(bucketName, c)
|
||||||
|
tests := []struct {
|
||||||
|
header string
|
||||||
|
hasher hash.Hash
|
||||||
|
|
||||||
|
// Checksum values
|
||||||
|
ChecksumCRC32 string
|
||||||
|
ChecksumCRC32C string
|
||||||
|
ChecksumSHA1 string
|
||||||
|
ChecksumSHA256 string
|
||||||
|
}{
|
||||||
|
// Currently there is no way to override the checksum type.
|
||||||
|
{header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "OpEx0Q==-13"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
bufSize := dataFileMap["datafile-129-MB"]
|
||||||
|
|
||||||
|
// Save the data
|
||||||
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
|
args["objectName"] = objectName
|
||||||
|
|
||||||
|
cmpChecksum := func(got, want string) {
|
||||||
|
if want != got {
|
||||||
|
//logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
|
||||||
|
fmt.Printf("want %s, got %s\n", want, got)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const partSize = 10 << 20
|
||||||
|
reader := getDataReader("datafile-129-MB")
|
||||||
|
b, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Read failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
reader.Close()
|
||||||
|
h := test.hasher
|
||||||
|
h.Reset()
|
||||||
|
test.ChecksumCRC32C = hashMultiPart(b, partSize, test.hasher)
|
||||||
|
|
||||||
|
// Set correct CRC.
|
||||||
|
|
||||||
|
resp, err := c.PutObject(context.Background(), bucketName, objectName, io.NopCloser(bytes.NewReader(b)), int64(bufSize), minio.PutObjectOptions{
|
||||||
|
DisableContentSha256: true,
|
||||||
|
DisableMultipart: false,
|
||||||
|
UserMetadata: nil,
|
||||||
|
PartSize: partSize,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
|
||||||
|
cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
|
||||||
|
cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
|
||||||
|
cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C)
|
||||||
|
|
||||||
|
// Read the data back
|
||||||
|
gopts := minio.GetObjectOptions{Checksum: true}
|
||||||
|
gopts.PartNumber = 2
|
||||||
|
|
||||||
|
// We cannot use StatObject, since it ignores partnumber.
|
||||||
|
r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "GetObject failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
io.Copy(io.Discard, r)
|
||||||
|
st, err := r.Stat()
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Stat failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test part 2 checksum...
|
||||||
|
h.Reset()
|
||||||
|
h.Write(b[partSize : 2*partSize])
|
||||||
|
got := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||||
|
if test.ChecksumSHA256 != "" {
|
||||||
|
cmpChecksum(st.ChecksumSHA256, got)
|
||||||
|
}
|
||||||
|
if test.ChecksumSHA1 != "" {
|
||||||
|
cmpChecksum(st.ChecksumSHA1, got)
|
||||||
|
}
|
||||||
|
if test.ChecksumCRC32 != "" {
|
||||||
|
cmpChecksum(st.ChecksumCRC32, got)
|
||||||
|
}
|
||||||
|
if test.ChecksumCRC32C != "" {
|
||||||
|
cmpChecksum(st.ChecksumCRC32C, got)
|
||||||
|
}
|
||||||
|
|
||||||
delete(args, "metadata")
|
delete(args, "metadata")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12318,6 +12523,7 @@ func main() {
|
||||||
testCompose10KSourcesV2()
|
testCompose10KSourcesV2()
|
||||||
testUserMetadataCopyingV2()
|
testUserMetadataCopyingV2()
|
||||||
testPutObjectWithChecksums()
|
testPutObjectWithChecksums()
|
||||||
|
testPutMultipartObjectWithChecksums()
|
||||||
testPutObject0ByteV2()
|
testPutObject0ByteV2()
|
||||||
testPutObjectNoLengthV2()
|
testPutObjectNoLengthV2()
|
||||||
testPutObjectsUnknownV2()
|
testPutObjectsUnknownV2()
|
||||||
|
|
4
vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
generated
vendored
|
@ -54,8 +54,8 @@ func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.Sta
|
||||||
// specific period in the object's lifetime.
|
// specific period in the object's lifetime.
|
||||||
type NoncurrentVersionExpiration struct {
|
type NoncurrentVersionExpiration struct {
|
||||||
XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
|
XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
|
||||||
NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"`
|
NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"`
|
||||||
NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty"`
|
NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions.
|
// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions.
|
||||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -384,7 +384,7 @@ github.com/miekg/dns
|
||||||
# github.com/minio/md5-simd v1.1.2
|
# github.com/minio/md5-simd v1.1.2
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/minio/md5-simd
|
github.com/minio/md5-simd
|
||||||
# github.com/minio/minio-go/v7 v7.0.52
|
# github.com/minio/minio-go/v7 v7.0.53
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/minio/minio-go/v7
|
github.com/minio/minio-go/v7
|
||||||
github.com/minio/minio-go/v7/pkg/credentials
|
github.com/minio/minio-go/v7/pkg/credentials
|
||||||
|
|
Loading…
Reference in a new issue