mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-11-22 11:46:40 +00:00
[performance] processing media and scheduled jobs improvements (#1482)
* replace media workers with just runners.WorkerPool, move to state structure, use go-sched for global task scheduling * improved code comment * fix worker tryUntil function, update go-runners/go-sched * make preprocess functions package public, use these where possible to stop doubled up processing * remove separate emoji worker pool * limit calls to time.Now() during media preprocessing * use Processor{} to manage singular runtime of processing media * ensure workers get started when media manager is used * improved error setting in processing media, fix media test * port changes from processingmedia to processing emoji * finish code commenting * finish code commenting and comment-out client API + federator worker pools until concurrency worker pools replaced * linterrrrrrrrrrrrrrrr --------- Signed-off-by: kim <grufwub@gmail.com>
This commit is contained in:
parent
76d1b484d0
commit
acc95923da
|
@ -36,6 +36,7 @@
|
||||||
var Create action.GTSAction = func(ctx context.Context) error {
|
var Create action.GTSAction = func(ctx context.Context) error {
|
||||||
var state state.State
|
var state state.State
|
||||||
state.Caches.Init()
|
state.Caches.Init()
|
||||||
|
state.Workers.Start()
|
||||||
|
|
||||||
dbConn, err := bundb.NewBunDBService(ctx, &state)
|
dbConn, err := bundb.NewBunDBService(ctx, &state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -97,6 +98,7 @@
|
||||||
var Confirm action.GTSAction = func(ctx context.Context) error {
|
var Confirm action.GTSAction = func(ctx context.Context) error {
|
||||||
var state state.State
|
var state state.State
|
||||||
state.Caches.Init()
|
state.Caches.Init()
|
||||||
|
state.Workers.Start()
|
||||||
|
|
||||||
dbConn, err := bundb.NewBunDBService(ctx, &state)
|
dbConn, err := bundb.NewBunDBService(ctx, &state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -140,6 +142,7 @@
|
||||||
var Promote action.GTSAction = func(ctx context.Context) error {
|
var Promote action.GTSAction = func(ctx context.Context) error {
|
||||||
var state state.State
|
var state state.State
|
||||||
state.Caches.Init()
|
state.Caches.Init()
|
||||||
|
state.Workers.Start()
|
||||||
|
|
||||||
dbConn, err := bundb.NewBunDBService(ctx, &state)
|
dbConn, err := bundb.NewBunDBService(ctx, &state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -180,6 +183,7 @@
|
||||||
var Demote action.GTSAction = func(ctx context.Context) error {
|
var Demote action.GTSAction = func(ctx context.Context) error {
|
||||||
var state state.State
|
var state state.State
|
||||||
state.Caches.Init()
|
state.Caches.Init()
|
||||||
|
state.Workers.Start()
|
||||||
|
|
||||||
dbConn, err := bundb.NewBunDBService(ctx, &state)
|
dbConn, err := bundb.NewBunDBService(ctx, &state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -220,6 +224,7 @@
|
||||||
var Disable action.GTSAction = func(ctx context.Context) error {
|
var Disable action.GTSAction = func(ctx context.Context) error {
|
||||||
var state state.State
|
var state state.State
|
||||||
state.Caches.Init()
|
state.Caches.Init()
|
||||||
|
state.Workers.Start()
|
||||||
|
|
||||||
dbConn, err := bundb.NewBunDBService(ctx, &state)
|
dbConn, err := bundb.NewBunDBService(ctx, &state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -260,6 +265,7 @@
|
||||||
var Password action.GTSAction = func(ctx context.Context) error {
|
var Password action.GTSAction = func(ctx context.Context) error {
|
||||||
var state state.State
|
var state state.State
|
||||||
state.Caches.Init()
|
state.Caches.Init()
|
||||||
|
state.Workers.Start()
|
||||||
|
|
||||||
dbConn, err := bundb.NewBunDBService(ctx, &state)
|
dbConn, err := bundb.NewBunDBService(ctx, &state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -38,21 +38,24 @@ type prune struct {
|
||||||
func setupPrune(ctx context.Context) (*prune, error) {
|
func setupPrune(ctx context.Context) (*prune, error) {
|
||||||
var state state.State
|
var state state.State
|
||||||
state.Caches.Init()
|
state.Caches.Init()
|
||||||
|
state.Workers.Start()
|
||||||
|
|
||||||
dbService, err := bundb.NewBunDBService(ctx, &state)
|
dbService, err := bundb.NewBunDBService(ctx, &state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error creating dbservice: %w", err)
|
return nil, fmt.Errorf("error creating dbservice: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
storage, err := gtsstorage.AutoConfig() //nolint:contextcheck
|
//nolint:contextcheck
|
||||||
|
storage, err := gtsstorage.AutoConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error creating storage backend: %w", err)
|
return nil, fmt.Errorf("error creating storage backend: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
manager, err := media.NewManager(dbService, storage) //nolint:contextcheck
|
state.DB = dbService
|
||||||
if err != nil {
|
state.Storage = storage
|
||||||
return nil, fmt.Errorf("error instantiating mediamanager: %w", err)
|
|
||||||
}
|
//nolint:contextcheck
|
||||||
|
manager := media.NewManager(&state)
|
||||||
|
|
||||||
return &prune{
|
return &prune{
|
||||||
dbService: dbService,
|
dbService: dbService,
|
||||||
|
@ -70,9 +73,5 @@ func (p *prune) shutdown(ctx context.Context) error {
|
||||||
return fmt.Errorf("error closing dbservice: %w", err)
|
return fmt.Errorf("error closing dbservice: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := p.manager.Stop(); err != nil {
|
|
||||||
return fmt.Errorf("error closing media manager: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,33 +91,35 @@
|
||||||
return fmt.Errorf("error creating instance instance: %s", err)
|
return fmt.Errorf("error creating instance instance: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the client API and federator worker pools
|
|
||||||
// NOTE: these MUST NOT be used until they are passed to the
|
|
||||||
// processor and it is started. The reason being that the processor
|
|
||||||
// sets the Worker process functions and start the underlying pools
|
|
||||||
clientWorker := concurrency.NewWorkerPool[messages.FromClientAPI](-1, -1)
|
|
||||||
fedWorker := concurrency.NewWorkerPool[messages.FromFederator](-1, -1)
|
|
||||||
|
|
||||||
federatingDB := federatingdb.New(dbService, fedWorker)
|
|
||||||
|
|
||||||
// build converters and util
|
|
||||||
typeConverter := typeutils.NewConverter(dbService)
|
|
||||||
|
|
||||||
// Open the storage backend
|
// Open the storage backend
|
||||||
storage, err := gtsstorage.AutoConfig()
|
storage, err := gtsstorage.AutoConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating storage backend: %w", err)
|
return fmt.Errorf("error creating storage backend: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the state storage driver
|
||||||
|
state.Storage = storage
|
||||||
|
|
||||||
// Build HTTP client (TODO: add configurables here)
|
// Build HTTP client (TODO: add configurables here)
|
||||||
client := httpclient.New(httpclient.Config{})
|
client := httpclient.New(httpclient.Config{})
|
||||||
|
|
||||||
|
// Initialize workers.
|
||||||
|
state.Workers.Start()
|
||||||
|
defer state.Workers.Stop()
|
||||||
|
|
||||||
|
// Create the client API and federator worker pools
|
||||||
|
// NOTE: these MUST NOT be used until they are passed to the
|
||||||
|
// processor and it is started. The reason being that the processor
|
||||||
|
// sets the Worker process functions and start the underlying pools
|
||||||
|
// TODO: move these into state.Workers (and maybe reformat worker pools).
|
||||||
|
clientWorker := concurrency.NewWorkerPool[messages.FromClientAPI](-1, -1)
|
||||||
|
fedWorker := concurrency.NewWorkerPool[messages.FromFederator](-1, -1)
|
||||||
|
|
||||||
// build backend handlers
|
// build backend handlers
|
||||||
mediaManager, err := media.NewManager(dbService, storage)
|
mediaManager := media.NewManager(&state)
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error creating media manager: %s", err)
|
|
||||||
}
|
|
||||||
oauthServer := oauth.New(ctx, dbService)
|
oauthServer := oauth.New(ctx, dbService)
|
||||||
|
typeConverter := typeutils.NewConverter(dbService)
|
||||||
|
federatingDB := federatingdb.New(dbService, fedWorker, typeConverter)
|
||||||
transportController := transport.NewController(dbService, federatingDB, &federation.Clock{}, client)
|
transportController := transport.NewController(dbService, federatingDB, &federation.Clock{}, client)
|
||||||
federator := federation.NewFederator(dbService, federatingDB, transportController, typeConverter, mediaManager)
|
federator := federation.NewFederator(dbService, federatingDB, transportController, typeConverter, mediaManager)
|
||||||
|
|
||||||
|
|
|
@ -20,9 +20,10 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
"runtime/debug"
|
godebug "runtime/debug"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"codeberg.org/gruf/go-debug"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
_ "github.com/superseriousbusiness/gotosocial/docs"
|
_ "github.com/superseriousbusiness/gotosocial/docs"
|
||||||
|
@ -60,9 +61,12 @@ func main() {
|
||||||
|
|
||||||
// add subcommands
|
// add subcommands
|
||||||
rootCmd.AddCommand(serverCommands())
|
rootCmd.AddCommand(serverCommands())
|
||||||
rootCmd.AddCommand(testrigCommands())
|
|
||||||
rootCmd.AddCommand(debugCommands())
|
rootCmd.AddCommand(debugCommands())
|
||||||
rootCmd.AddCommand(adminCommands())
|
rootCmd.AddCommand(adminCommands())
|
||||||
|
if debug.DEBUG {
|
||||||
|
// only add testrig if debug enabled.
|
||||||
|
rootCmd.AddCommand(testrigCommands())
|
||||||
|
}
|
||||||
|
|
||||||
// run
|
// run
|
||||||
if err := rootCmd.Execute(); err != nil {
|
if err := rootCmd.Execute(); err != nil {
|
||||||
|
@ -73,7 +77,7 @@ func main() {
|
||||||
// version will build a version string from binary's stored build information.
|
// version will build a version string from binary's stored build information.
|
||||||
func version() string {
|
func version() string {
|
||||||
// Read build information from binary
|
// Read build information from binary
|
||||||
build, ok := debug.ReadBuildInfo()
|
build, ok := godebug.ReadBuildInfo()
|
||||||
if !ok {
|
if !ok {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
9
go.mod
9
go.mod
|
@ -6,13 +6,14 @@ require (
|
||||||
codeberg.org/gruf/go-bytesize v1.0.2
|
codeberg.org/gruf/go-bytesize v1.0.2
|
||||||
codeberg.org/gruf/go-byteutil v1.0.2
|
codeberg.org/gruf/go-byteutil v1.0.2
|
||||||
codeberg.org/gruf/go-cache/v3 v3.2.2
|
codeberg.org/gruf/go-cache/v3 v3.2.2
|
||||||
codeberg.org/gruf/go-debug v1.2.0
|
codeberg.org/gruf/go-debug v1.3.0
|
||||||
codeberg.org/gruf/go-errors/v2 v2.1.1
|
codeberg.org/gruf/go-errors/v2 v2.1.1
|
||||||
codeberg.org/gruf/go-fastcopy v1.1.2
|
codeberg.org/gruf/go-fastcopy v1.1.2
|
||||||
codeberg.org/gruf/go-kv v1.5.2
|
codeberg.org/gruf/go-kv v1.5.2
|
||||||
codeberg.org/gruf/go-logger/v2 v2.2.1
|
codeberg.org/gruf/go-logger/v2 v2.2.1
|
||||||
codeberg.org/gruf/go-mutexes v1.1.5
|
codeberg.org/gruf/go-mutexes v1.1.5
|
||||||
codeberg.org/gruf/go-runners v1.5.1
|
codeberg.org/gruf/go-runners v1.6.0
|
||||||
|
codeberg.org/gruf/go-sched v1.2.3
|
||||||
codeberg.org/gruf/go-store/v2 v2.2.1
|
codeberg.org/gruf/go-store/v2 v2.2.1
|
||||||
github.com/KimMachineGun/automemlimit v0.2.4
|
github.com/KimMachineGun/automemlimit v0.2.4
|
||||||
github.com/abema/go-mp4 v0.10.0
|
github.com/abema/go-mp4 v0.10.0
|
||||||
|
@ -37,7 +38,6 @@ require (
|
||||||
github.com/minio/minio-go/v7 v7.0.48
|
github.com/minio/minio-go/v7 v7.0.48
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
github.com/mitchellh/mapstructure v1.5.0
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
github.com/robfig/cron/v3 v3.0.1
|
|
||||||
github.com/spf13/cobra v1.6.1
|
github.com/spf13/cobra v1.6.1
|
||||||
github.com/spf13/viper v1.15.0
|
github.com/spf13/viper v1.15.0
|
||||||
github.com/stretchr/testify v1.8.1
|
github.com/stretchr/testify v1.8.1
|
||||||
|
@ -66,7 +66,7 @@ require (
|
||||||
|
|
||||||
require (
|
require (
|
||||||
codeberg.org/gruf/go-atomics v1.1.0 // indirect
|
codeberg.org/gruf/go-atomics v1.1.0 // indirect
|
||||||
codeberg.org/gruf/go-bitutil v1.0.1 // indirect
|
codeberg.org/gruf/go-bitutil v1.1.0 // indirect
|
||||||
codeberg.org/gruf/go-bytes v1.0.2 // indirect
|
codeberg.org/gruf/go-bytes v1.0.2 // indirect
|
||||||
codeberg.org/gruf/go-fastpath v1.0.3 // indirect
|
codeberg.org/gruf/go-fastpath v1.0.3 // indirect
|
||||||
codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect
|
codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect
|
||||||
|
@ -75,7 +75,6 @@ require (
|
||||||
codeberg.org/gruf/go-mangler v1.2.2 // indirect
|
codeberg.org/gruf/go-mangler v1.2.2 // indirect
|
||||||
codeberg.org/gruf/go-maps v1.0.3 // indirect
|
codeberg.org/gruf/go-maps v1.0.3 // indirect
|
||||||
codeberg.org/gruf/go-pools v1.1.0 // indirect
|
codeberg.org/gruf/go-pools v1.1.0 // indirect
|
||||||
codeberg.org/gruf/go-sched v1.2.0 // indirect
|
|
||||||
github.com/aymerick/douceur v0.2.0 // indirect
|
github.com/aymerick/douceur v0.2.0 // indirect
|
||||||
github.com/cilium/ebpf v0.4.0 // indirect
|
github.com/cilium/ebpf v0.4.0 // indirect
|
||||||
github.com/containerd/cgroups v1.0.4 // indirect
|
github.com/containerd/cgroups v1.0.4 // indirect
|
||||||
|
|
18
go.sum
18
go.sum
|
@ -39,8 +39,8 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f
|
||||||
codeberg.org/gruf/go-atomics v1.1.0 h1:ni9QXYoRUFYQMXE3akWaUb1wMcPBDc05Md6Rgml7W58=
|
codeberg.org/gruf/go-atomics v1.1.0 h1:ni9QXYoRUFYQMXE3akWaUb1wMcPBDc05Md6Rgml7W58=
|
||||||
codeberg.org/gruf/go-atomics v1.1.0/go.mod h1:a/4/y/LgvjxjQVnpoy1VVkOSzLS1W9i1g4SJ0nflAa4=
|
codeberg.org/gruf/go-atomics v1.1.0/go.mod h1:a/4/y/LgvjxjQVnpoy1VVkOSzLS1W9i1g4SJ0nflAa4=
|
||||||
codeberg.org/gruf/go-bitutil v1.0.0/go.mod h1:sb8IjlDnjVTz8zPK/8lmHesKxY0Yb3iqHWjUM/SkphA=
|
codeberg.org/gruf/go-bitutil v1.0.0/go.mod h1:sb8IjlDnjVTz8zPK/8lmHesKxY0Yb3iqHWjUM/SkphA=
|
||||||
codeberg.org/gruf/go-bitutil v1.0.1 h1:l8z9nOvCpHhicU2LZyJ6jLK03UNzCF6bxVCwu+VEenQ=
|
codeberg.org/gruf/go-bitutil v1.1.0 h1:U1Q+A1mtnPk+npqYrlRBc9ar2C5hYiBd17l1Wrp2Bt8=
|
||||||
codeberg.org/gruf/go-bitutil v1.0.1/go.mod h1:3ezHnADoiRJs9jgn65AEZ3HY7dsabAYLmmnIvseCGJI=
|
codeberg.org/gruf/go-bitutil v1.1.0/go.mod h1:rGibFevYTQfYKcPv0Df5KpG8n5xC3AfD4d/UgYeoNy0=
|
||||||
codeberg.org/gruf/go-bytes v1.0.0/go.mod h1:1v/ibfaosfXSZtRdW2rWaVrDXMc9E3bsi/M9Ekx39cg=
|
codeberg.org/gruf/go-bytes v1.0.0/go.mod h1:1v/ibfaosfXSZtRdW2rWaVrDXMc9E3bsi/M9Ekx39cg=
|
||||||
codeberg.org/gruf/go-bytes v1.0.2 h1:malqE42Ni+h1nnYWBUAJaDDtEzF4aeN4uPN8DfMNNvo=
|
codeberg.org/gruf/go-bytes v1.0.2 h1:malqE42Ni+h1nnYWBUAJaDDtEzF4aeN4uPN8DfMNNvo=
|
||||||
codeberg.org/gruf/go-bytes v1.0.2/go.mod h1:1v/ibfaosfXSZtRdW2rWaVrDXMc9E3bsi/M9Ekx39cg=
|
codeberg.org/gruf/go-bytes v1.0.2/go.mod h1:1v/ibfaosfXSZtRdW2rWaVrDXMc9E3bsi/M9Ekx39cg=
|
||||||
|
@ -51,8 +51,8 @@ codeberg.org/gruf/go-byteutil v1.0.2 h1:OesVyK5VKWeWdeDR00zRJ+Oy8hjXx1pBhn7WVvcZ
|
||||||
codeberg.org/gruf/go-byteutil v1.0.2/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
|
codeberg.org/gruf/go-byteutil v1.0.2/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
|
||||||
codeberg.org/gruf/go-cache/v3 v3.2.2 h1:hq6/RITgpcArjzbYSyo3uFxfIw7wW3KqAQjEaN7dj58=
|
codeberg.org/gruf/go-cache/v3 v3.2.2 h1:hq6/RITgpcArjzbYSyo3uFxfIw7wW3KqAQjEaN7dj58=
|
||||||
codeberg.org/gruf/go-cache/v3 v3.2.2/go.mod h1:+Eje6nCvN8QF71VyYjMWMnkdv6t1kHnCO/SvyC4K12Q=
|
codeberg.org/gruf/go-cache/v3 v3.2.2/go.mod h1:+Eje6nCvN8QF71VyYjMWMnkdv6t1kHnCO/SvyC4K12Q=
|
||||||
codeberg.org/gruf/go-debug v1.2.0 h1:WBbTMnK1ArFKUmgv04aO2JiC/daTOB8zQGi521qb7OU=
|
codeberg.org/gruf/go-debug v1.3.0 h1:PIRxQiWUFKtGOGZFdZ3Y0pqyfI0Xr87j224IYe2snZs=
|
||||||
codeberg.org/gruf/go-debug v1.2.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
|
codeberg.org/gruf/go-debug v1.3.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
|
||||||
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
|
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
|
||||||
codeberg.org/gruf/go-errors/v2 v2.1.1 h1:oj7JUIvUBafF60HrwN74JrCMol1Ouh3gq1ggrH5hGTw=
|
codeberg.org/gruf/go-errors/v2 v2.1.1 h1:oj7JUIvUBafF60HrwN74JrCMol1Ouh3gq1ggrH5hGTw=
|
||||||
codeberg.org/gruf/go-errors/v2 v2.1.1/go.mod h1:LfzD9nkAAJpEDbkUqOZQ2jdaQ8VrK0pnR36zLOMFq6Y=
|
codeberg.org/gruf/go-errors/v2 v2.1.1/go.mod h1:LfzD9nkAAJpEDbkUqOZQ2jdaQ8VrK0pnR36zLOMFq6Y=
|
||||||
|
@ -79,10 +79,10 @@ codeberg.org/gruf/go-mutexes v1.1.5 h1:8Y8DwCGf24MyzOSaPvLrtk/B4ecVx4z+fppL6dY+P
|
||||||
codeberg.org/gruf/go-mutexes v1.1.5/go.mod h1:1j/6/MBeBQUedAtAtysLLnBKogfOZAxdym0E3wlaBD8=
|
codeberg.org/gruf/go-mutexes v1.1.5/go.mod h1:1j/6/MBeBQUedAtAtysLLnBKogfOZAxdym0E3wlaBD8=
|
||||||
codeberg.org/gruf/go-pools v1.1.0 h1:LbYP24eQLl/YI1fSU2pafiwhGol1Z1zPjRrMsXpF88s=
|
codeberg.org/gruf/go-pools v1.1.0 h1:LbYP24eQLl/YI1fSU2pafiwhGol1Z1zPjRrMsXpF88s=
|
||||||
codeberg.org/gruf/go-pools v1.1.0/go.mod h1:ZMYpt/DjQWYC3zFD3T97QWSFKs62zAUGJ/tzvgB9D68=
|
codeberg.org/gruf/go-pools v1.1.0/go.mod h1:ZMYpt/DjQWYC3zFD3T97QWSFKs62zAUGJ/tzvgB9D68=
|
||||||
codeberg.org/gruf/go-runners v1.5.1 h1:ekhhxKvO6D/VC7nS/xpv71/iRX01JSqcBEbahqPUghg=
|
codeberg.org/gruf/go-runners v1.6.0 h1:cAHKxMgtkb3v6it4qZZs4fo+yYgICNCrYvFlayuvSdk=
|
||||||
codeberg.org/gruf/go-runners v1.5.1/go.mod h1:kUM6GYL7dC+f9Sc/XuwdvB/mB4FuI4fJFb150ADMsmw=
|
codeberg.org/gruf/go-runners v1.6.0/go.mod h1:QRcSExqXX8DM0rm8Xs6qX7baOzyvw0JIe4mu3TsQT+Y=
|
||||||
codeberg.org/gruf/go-sched v1.2.0 h1:utZl/7srVcbh30rFw42LC2/cMtak4UZRxtIOt/5riNA=
|
codeberg.org/gruf/go-sched v1.2.3 h1:H5ViDxxzOBR3uIyGBCf0eH8b1L8wMybOXcdtUUTXZHk=
|
||||||
codeberg.org/gruf/go-sched v1.2.0/go.mod h1:v4ueWq+fAtAw9JYt4aFXvadI1YoOqofgHQgszRYuslA=
|
codeberg.org/gruf/go-sched v1.2.3/go.mod h1:vT9uB6KWFIIwnG9vcPY2a0alYNoqdL1mSzRM8I+PK7A=
|
||||||
codeberg.org/gruf/go-store/v2 v2.2.1 h1:lbvMjhMLebefiaPNLtWvPySKSYM5xN1aztSxxz+vCzU=
|
codeberg.org/gruf/go-store/v2 v2.2.1 h1:lbvMjhMLebefiaPNLtWvPySKSYM5xN1aztSxxz+vCzU=
|
||||||
codeberg.org/gruf/go-store/v2 v2.2.1/go.mod h1:pxdyfSzau8fFs1TfZlyRzhDYvZWLaj1sXpcjXpzBB6k=
|
codeberg.org/gruf/go-store/v2 v2.2.1/go.mod h1:pxdyfSzau8fFs1TfZlyRzhDYvZWLaj1sXpcjXpzBB6k=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
@ -478,8 +478,6 @@ github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b h1:aUNXCGgukb4gtY
|
||||||
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTepVu7uJBYgZ0SdWHQlIas582j6cn2jgk4DDdlg=
|
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTepVu7uJBYgZ0SdWHQlIas582j6cn2jgk4DDdlg=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
|
||||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||||
|
|
|
@ -356,7 +356,7 @@ func (d *deref) fetchRemoteAccountAvatar(ctx context.Context, tsport transport.T
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create new media processing request from the media manager instance.
|
// Create new media processing request from the media manager instance.
|
||||||
processing, err := d.mediaManager.ProcessMedia(ctx, data, nil, accountID, &media.AdditionalMediaInfo{
|
processing, err := d.mediaManager.PreProcessMedia(ctx, data, nil, accountID, &media.AdditionalMediaInfo{
|
||||||
Avatar: func() *bool { v := false; return &v }(),
|
Avatar: func() *bool { v := false; return &v }(),
|
||||||
RemoteURL: &avatarURL,
|
RemoteURL: &avatarURL,
|
||||||
})
|
})
|
||||||
|
@ -407,7 +407,7 @@ func (d *deref) fetchRemoteAccountHeader(ctx context.Context, tsport transport.T
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create new media processing request from the media manager instance.
|
// Create new media processing request from the media manager instance.
|
||||||
processing, err := d.mediaManager.ProcessMedia(ctx, data, nil, accountID, &media.AdditionalMediaInfo{
|
processing, err := d.mediaManager.PreProcessMedia(ctx, data, nil, accountID, &media.AdditionalMediaInfo{
|
||||||
Header: func() *bool { v := true; return &v }(),
|
Header: func() *bool { v := true; return &v }(),
|
||||||
RemoteURL: &headerURL,
|
RemoteURL: &headerURL,
|
||||||
})
|
})
|
||||||
|
|
|
@ -61,7 +61,7 @@ func (d *deref) GetRemoteEmoji(ctx context.Context, requestingUsername string, r
|
||||||
return t.DereferenceMedia(innerCtx, derefURI)
|
return t.DereferenceMedia(innerCtx, derefURI)
|
||||||
}
|
}
|
||||||
|
|
||||||
newProcessing, err := d.mediaManager.ProcessEmoji(ctx, dataFunc, nil, shortcode, id, emojiURI, ai, refresh)
|
newProcessing, err := d.mediaManager.PreProcessEmoji(ctx, dataFunc, nil, shortcode, id, emojiURI, ai, refresh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("GetRemoteEmoji: error processing emoji %s: %s", shortcodeDomain, err)
|
return nil, fmt.Errorf("GetRemoteEmoji: error processing emoji %s: %s", shortcodeDomain, err)
|
||||||
}
|
}
|
||||||
|
@ -146,7 +146,6 @@ func (d *deref) populateEmojis(ctx context.Context, rawEmojis []*gtsmodel.Emoji,
|
||||||
Disabled: gotEmoji.Disabled,
|
Disabled: gotEmoji.Disabled,
|
||||||
VisibleInPicker: gotEmoji.VisibleInPicker,
|
VisibleInPicker: gotEmoji.VisibleInPicker,
|
||||||
}, refresh)
|
}, refresh)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("populateEmojis: couldn't refresh remote emoji %s: %s", shortcodeDomain, err)
|
log.Errorf("populateEmojis: couldn't refresh remote emoji %s: %s", shortcodeDomain, err)
|
||||||
continue
|
continue
|
||||||
|
@ -172,7 +171,6 @@ func (d *deref) populateEmojis(ctx context.Context, rawEmojis []*gtsmodel.Emoji,
|
||||||
Disabled: e.Disabled,
|
Disabled: e.Disabled,
|
||||||
VisibleInPicker: e.VisibleInPicker,
|
VisibleInPicker: e.VisibleInPicker,
|
||||||
}, refresh)
|
}, refresh)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("populateEmojis: couldn't get remote emoji %s: %s", shortcodeDomain, err)
|
log.Errorf("populateEmojis: couldn't get remote emoji %s: %s", shortcodeDomain, err)
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -49,12 +49,12 @@ type federatingDB struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a DB interface using the given database and config
|
// New returns a DB interface using the given database and config
|
||||||
func New(db db.DB, fedWorker *concurrency.WorkerPool[messages.FromFederator]) DB {
|
func New(db db.DB, fedWorker *concurrency.WorkerPool[messages.FromFederator], tc typeutils.TypeConverter) DB {
|
||||||
fdb := federatingDB{
|
fdb := federatingDB{
|
||||||
locks: mutexes.NewMap(-1, -1), // use defaults
|
locks: mutexes.NewMap(-1, -1), // use defaults
|
||||||
db: db,
|
db: db,
|
||||||
fedWorker: fedWorker,
|
fedWorker: fedWorker,
|
||||||
typeConverter: typeutils.NewConverter(db),
|
typeConverter: tc,
|
||||||
}
|
}
|
||||||
return &fdb
|
return &fdb
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,9 +73,6 @@ func (gts *gotosocial) Stop(ctx context.Context) error {
|
||||||
if err := gts.apiRouter.Stop(ctx); err != nil {
|
if err := gts.apiRouter.Stop(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := gts.mediaManager.Stop(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := gts.db.Stop(ctx); err != nil {
|
if err := gts.db.Stop(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,73 +0,0 @@
|
||||||
/*
|
|
||||||
GoToSocial
|
|
||||||
Copyright (C) 2021-2023 GoToSocial Authors admin@gotosocial.org
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU Affero General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU Affero General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package media
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/robfig/cron/v3"
|
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
type cronLogger struct{}
|
|
||||||
|
|
||||||
func (l *cronLogger) Info(msg string, keysAndValues ...interface{}) {
|
|
||||||
log.Info("media manager cron logger: ", msg, keysAndValues)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *cronLogger) Error(err error, msg string, keysAndValues ...interface{}) {
|
|
||||||
log.Error("media manager cron logger: ", err, msg, keysAndValues)
|
|
||||||
}
|
|
||||||
|
|
||||||
func scheduleCleanup(m *manager) error {
|
|
||||||
pruneCtx, pruneCancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
c := cron.New(cron.WithLogger(new(cronLogger)))
|
|
||||||
defer c.Start()
|
|
||||||
|
|
||||||
if _, err := c.AddFunc("@midnight", func() {
|
|
||||||
if err := m.PruneAll(pruneCtx, config.GetMediaRemoteCacheDays(), true); err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}); err != nil {
|
|
||||||
pruneCancel()
|
|
||||||
return fmt.Errorf("error starting media manager cleanup job: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.stopCronJobs = func() error {
|
|
||||||
// Try to stop jobs gracefully by waiting til they're finished.
|
|
||||||
stopCtx := c.Stop()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-stopCtx.Done():
|
|
||||||
log.Infof("media manager: cron finished jobs and stopped gracefully")
|
|
||||||
case <-time.After(1 * time.Minute):
|
|
||||||
log.Warnf("media manager: cron didn't stop after 60 seconds, force closing jobs")
|
|
||||||
pruneCancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -20,11 +20,19 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
"codeberg.org/gruf/go-runners"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
"codeberg.org/gruf/go-sched"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
"codeberg.org/gruf/go-store/v2/storage"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/id"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/state"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/uris"
|
||||||
)
|
)
|
||||||
|
|
||||||
var SupportedMIMETypes = []string{
|
var SupportedMIMETypes = []string{
|
||||||
|
@ -42,16 +50,11 @@
|
||||||
|
|
||||||
// Manager provides an interface for managing media: parsing, storing, and retrieving media objects like photos, videos, and gifs.
|
// Manager provides an interface for managing media: parsing, storing, and retrieving media objects like photos, videos, and gifs.
|
||||||
type Manager interface {
|
type Manager interface {
|
||||||
// Stop stops the underlying worker pool of the manager. It should be called
|
|
||||||
// when closing GoToSocial in order to cleanly finish any in-progress jobs.
|
|
||||||
// It will block until workers are finished processing.
|
|
||||||
Stop() error
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
PROCESSING FUNCTIONS
|
PROCESSING FUNCTIONS
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// ProcessMedia begins the process of decoding and storing the given data as an attachment.
|
// PreProcessMedia begins the process of decoding and storing the given data as an attachment.
|
||||||
// It will return a pointer to a ProcessingMedia struct upon which further actions can be performed, such as getting
|
// It will return a pointer to a ProcessingMedia struct upon which further actions can be performed, such as getting
|
||||||
// the finished media, thumbnail, attachment, etc.
|
// the finished media, thumbnail, attachment, etc.
|
||||||
//
|
//
|
||||||
|
@ -63,8 +66,19 @@ type Manager interface {
|
||||||
// accountID should be the account that the media belongs to.
|
// accountID should be the account that the media belongs to.
|
||||||
//
|
//
|
||||||
// ai is optional and can be nil. Any additional information about the attachment provided will be put in the database.
|
// ai is optional and can be nil. Any additional information about the attachment provided will be put in the database.
|
||||||
|
//
|
||||||
|
// Note: unlike ProcessMedia, this will NOT queue the media to be asynchronously processed.
|
||||||
|
PreProcessMedia(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, accountID string, ai *AdditionalMediaInfo) (*ProcessingMedia, error)
|
||||||
|
|
||||||
|
// PreProcessMediaRecache refetches, reprocesses, and recaches an existing attachment that has been uncached via pruneRemote.
|
||||||
|
//
|
||||||
|
// Note: unlike ProcessMedia, this will NOT queue the media to be asychronously processed.
|
||||||
|
PreProcessMediaRecache(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, attachmentID string) (*ProcessingMedia, error)
|
||||||
|
|
||||||
|
// ProcessMedia will call PreProcessMedia, followed by queuing the media to be processing in the media worker queue.
|
||||||
ProcessMedia(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, accountID string, ai *AdditionalMediaInfo) (*ProcessingMedia, error)
|
ProcessMedia(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, accountID string, ai *AdditionalMediaInfo) (*ProcessingMedia, error)
|
||||||
// ProcessEmoji begins the process of decoding and storing the given data as an emoji.
|
|
||||||
|
// PreProcessEmoji begins the process of decoding and storing the given data as an emoji.
|
||||||
// It will return a pointer to a ProcessingEmoji struct upon which further actions can be performed, such as getting
|
// It will return a pointer to a ProcessingEmoji struct upon which further actions can be performed, such as getting
|
||||||
// the finished media, thumbnail, attachment, etc.
|
// the finished media, thumbnail, attachment, etc.
|
||||||
//
|
//
|
||||||
|
@ -81,10 +95,11 @@ type Manager interface {
|
||||||
//
|
//
|
||||||
// ai is optional and can be nil. Any additional information about the emoji provided will be put in the database.
|
// ai is optional and can be nil. Any additional information about the emoji provided will be put in the database.
|
||||||
//
|
//
|
||||||
// If refresh is true, this indicates that the emoji image has changed and should be updated.
|
// Note: unlike ProcessEmoji, this will NOT queue the emoji to be asynchronously processed.
|
||||||
|
PreProcessEmoji(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, shortcode string, id string, uri string, ai *AdditionalEmojiInfo, refresh bool) (*ProcessingEmoji, error)
|
||||||
|
|
||||||
|
// ProcessEmoji will call PreProcessEmoji, followed by queuing the emoji to be processing in the emoji worker queue.
|
||||||
ProcessEmoji(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, shortcode string, id string, uri string, ai *AdditionalEmojiInfo, refresh bool) (*ProcessingEmoji, error)
|
ProcessEmoji(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, shortcode string, id string, uri string, ai *AdditionalEmojiInfo, refresh bool) (*ProcessingEmoji, error)
|
||||||
// RecacheMedia refetches, reprocesses, and recaches an existing attachment that has been uncached via pruneRemote.
|
|
||||||
RecacheMedia(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, attachmentID string) (*ProcessingMedia, error)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
PRUNING/UNCACHING FUNCTIONS
|
PRUNING/UNCACHING FUNCTIONS
|
||||||
|
@ -139,11 +154,7 @@ type Manager interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type manager struct {
|
type manager struct {
|
||||||
db db.DB
|
state *state.State
|
||||||
storage *storage.Driver
|
|
||||||
emojiWorker *concurrency.WorkerPool[*ProcessingEmoji]
|
|
||||||
mediaWorker *concurrency.WorkerPool[*ProcessingMedia]
|
|
||||||
stopCronJobs func() error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager returns a media manager with the given db and underlying storage.
|
// NewManager returns a media manager with the given db and underlying storage.
|
||||||
|
@ -152,88 +163,299 @@ type manager struct {
|
||||||
// a limited number of media will be processed in parallel. The numbers of workers
|
// a limited number of media will be processed in parallel. The numbers of workers
|
||||||
// is determined from the $GOMAXPROCS environment variable (usually no. CPU cores).
|
// is determined from the $GOMAXPROCS environment variable (usually no. CPU cores).
|
||||||
// See internal/concurrency.NewWorkerPool() documentation for further information.
|
// See internal/concurrency.NewWorkerPool() documentation for further information.
|
||||||
func NewManager(database db.DB, storage *storage.Driver) (Manager, error) {
|
func NewManager(state *state.State) Manager {
|
||||||
m := &manager{
|
m := &manager{state: state}
|
||||||
db: database,
|
scheduleCleanupJobs(m)
|
||||||
storage: storage,
|
return m
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare the media worker pool.
|
|
||||||
m.mediaWorker = concurrency.NewWorkerPool[*ProcessingMedia](-1, 10)
|
|
||||||
m.mediaWorker.SetProcessor(func(ctx context.Context, media *ProcessingMedia) error {
|
|
||||||
if _, err := media.LoadAttachment(ctx); err != nil {
|
|
||||||
return fmt.Errorf("error loading media %s: %v", media.AttachmentID(), err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Prepare the emoji worker pool.
|
|
||||||
m.emojiWorker = concurrency.NewWorkerPool[*ProcessingEmoji](-1, 10)
|
|
||||||
m.emojiWorker.SetProcessor(func(ctx context.Context, emoji *ProcessingEmoji) error {
|
|
||||||
if _, err := emoji.LoadEmoji(ctx); err != nil {
|
|
||||||
return fmt.Errorf("error loading emoji %s: %v", emoji.EmojiID(), err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Start the worker pools.
|
|
||||||
if err := m.mediaWorker.Start(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := m.emojiWorker.Start(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schedule cron job(s) for clean up.
|
|
||||||
if err := scheduleCleanup(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return m, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) ProcessMedia(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, accountID string, ai *AdditionalMediaInfo) (*ProcessingMedia, error) {
|
func (m *manager) PreProcessMedia(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, accountID string, ai *AdditionalMediaInfo) (*ProcessingMedia, error) {
|
||||||
processingMedia, err := m.preProcessMedia(ctx, data, postData, accountID, ai)
|
id, err := id.NewRandomULID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m.mediaWorker.Queue(processingMedia)
|
|
||||||
|
avatar := false
|
||||||
|
header := false
|
||||||
|
cached := false
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
// populate initial fields on the media attachment -- some of these will be overwritten as we proceed
|
||||||
|
attachment := >smodel.MediaAttachment{
|
||||||
|
ID: id,
|
||||||
|
CreatedAt: now,
|
||||||
|
UpdatedAt: now,
|
||||||
|
StatusID: "",
|
||||||
|
URL: "", // we don't know yet because it depends on the uncalled DataFunc
|
||||||
|
RemoteURL: "",
|
||||||
|
Type: gtsmodel.FileTypeUnknown, // we don't know yet because it depends on the uncalled DataFunc
|
||||||
|
FileMeta: gtsmodel.FileMeta{},
|
||||||
|
AccountID: accountID,
|
||||||
|
Description: "",
|
||||||
|
ScheduledStatusID: "",
|
||||||
|
Blurhash: "",
|
||||||
|
Processing: gtsmodel.ProcessingStatusReceived,
|
||||||
|
File: gtsmodel.File{UpdatedAt: now},
|
||||||
|
Thumbnail: gtsmodel.Thumbnail{UpdatedAt: now},
|
||||||
|
Avatar: &avatar,
|
||||||
|
Header: &header,
|
||||||
|
Cached: &cached,
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if we have additional info to add to the attachment,
|
||||||
|
// and overwrite some of the attachment fields if so
|
||||||
|
if ai != nil {
|
||||||
|
if ai.CreatedAt != nil {
|
||||||
|
attachment.CreatedAt = *ai.CreatedAt
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.StatusID != nil {
|
||||||
|
attachment.StatusID = *ai.StatusID
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.RemoteURL != nil {
|
||||||
|
attachment.RemoteURL = *ai.RemoteURL
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.Description != nil {
|
||||||
|
attachment.Description = *ai.Description
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.ScheduledStatusID != nil {
|
||||||
|
attachment.ScheduledStatusID = *ai.ScheduledStatusID
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.Blurhash != nil {
|
||||||
|
attachment.Blurhash = *ai.Blurhash
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.Avatar != nil {
|
||||||
|
attachment.Avatar = ai.Avatar
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.Header != nil {
|
||||||
|
attachment.Header = ai.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.FocusX != nil {
|
||||||
|
attachment.FileMeta.Focus.X = *ai.FocusX
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.FocusY != nil {
|
||||||
|
attachment.FileMeta.Focus.Y = *ai.FocusY
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
processingMedia := &ProcessingMedia{
|
||||||
|
media: attachment,
|
||||||
|
dataFn: data,
|
||||||
|
postFn: postData,
|
||||||
|
mgr: m,
|
||||||
|
}
|
||||||
|
|
||||||
return processingMedia, nil
|
return processingMedia, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) ProcessEmoji(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, shortcode string, id string, uri string, ai *AdditionalEmojiInfo, refresh bool) (*ProcessingEmoji, error) {
|
func (m *manager) PreProcessMediaRecache(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, attachmentID string) (*ProcessingMedia, error) {
|
||||||
processingEmoji, err := m.preProcessEmoji(ctx, data, postData, shortcode, id, uri, ai, refresh)
|
// get the existing attachment from database.
|
||||||
|
attachment, err := m.state.DB.GetAttachmentByID(ctx, attachmentID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m.emojiWorker.Queue(processingEmoji)
|
|
||||||
|
processingMedia := &ProcessingMedia{
|
||||||
|
media: attachment,
|
||||||
|
dataFn: data,
|
||||||
|
postFn: postData,
|
||||||
|
recache: true, // indicate it's a recache
|
||||||
|
mgr: m,
|
||||||
|
}
|
||||||
|
|
||||||
|
return processingMedia, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *manager) ProcessMedia(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, accountID string, ai *AdditionalMediaInfo) (*ProcessingMedia, error) {
|
||||||
|
// Create a new processing media object for this media request.
|
||||||
|
media, err := m.PreProcessMedia(ctx, data, postData, accountID, ai)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to add this media processing item to the worker queue.
|
||||||
|
_ = m.state.Workers.Media.MustEnqueueCtx(ctx, media.Process)
|
||||||
|
|
||||||
|
return media, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *manager) PreProcessEmoji(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, shortcode string, emojiID string, uri string, ai *AdditionalEmojiInfo, refresh bool) (*ProcessingEmoji, error) {
|
||||||
|
instanceAccount, err := m.state.DB.GetInstanceAccount(ctx, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("preProcessEmoji: error fetching this instance account from the db: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
newPathID string
|
||||||
|
emoji *gtsmodel.Emoji
|
||||||
|
now = time.Now()
|
||||||
|
)
|
||||||
|
|
||||||
|
if refresh {
|
||||||
|
emoji, err = m.state.DB.GetEmojiByID(ctx, emojiID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("preProcessEmoji: error fetching emoji to refresh from the db: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if this is a refresh, we will end up with new images
|
||||||
|
// stored for this emoji, so we can use the postData function
|
||||||
|
// to perform clean up of the old images from storage
|
||||||
|
originalPostData := postData
|
||||||
|
originalImagePath := emoji.ImagePath
|
||||||
|
originalImageStaticPath := emoji.ImageStaticPath
|
||||||
|
postData = func(innerCtx context.Context) error {
|
||||||
|
// trigger the original postData function if it was provided
|
||||||
|
if originalPostData != nil {
|
||||||
|
if err := originalPostData(innerCtx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
l := log.WithField("shortcode@domain", emoji.Shortcode+"@"+emoji.Domain)
|
||||||
|
l.Debug("postData: cleaning up old emoji files for refreshed emoji")
|
||||||
|
if err := m.state.Storage.Delete(innerCtx, originalImagePath); err != nil && !errors.Is(err, storage.ErrNotFound) {
|
||||||
|
l.Errorf("postData: error cleaning up old emoji image at %s for refreshed emoji: %s", originalImagePath, err)
|
||||||
|
}
|
||||||
|
if err := m.state.Storage.Delete(innerCtx, originalImageStaticPath); err != nil && !errors.Is(err, storage.ErrNotFound) {
|
||||||
|
l.Errorf("postData: error cleaning up old emoji static image at %s for refreshed emoji: %s", originalImageStaticPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
newPathID, err = id.NewRandomULID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("preProcessEmoji: error generating alternateID for emoji refresh: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// store + serve static image at new path ID
|
||||||
|
emoji.ImageStaticURL = uris.GenerateURIForAttachment(instanceAccount.ID, string(TypeEmoji), string(SizeStatic), newPathID, mimePng)
|
||||||
|
emoji.ImageStaticPath = fmt.Sprintf("%s/%s/%s/%s.%s", instanceAccount.ID, TypeEmoji, SizeStatic, newPathID, mimePng)
|
||||||
|
|
||||||
|
emoji.Shortcode = shortcode
|
||||||
|
emoji.URI = uri
|
||||||
|
} else {
|
||||||
|
disabled := false
|
||||||
|
visibleInPicker := true
|
||||||
|
|
||||||
|
// populate initial fields on the emoji -- some of these will be overwritten as we proceed
|
||||||
|
emoji = >smodel.Emoji{
|
||||||
|
ID: emojiID,
|
||||||
|
CreatedAt: now,
|
||||||
|
Shortcode: shortcode,
|
||||||
|
Domain: "", // assume our own domain unless told otherwise
|
||||||
|
ImageRemoteURL: "",
|
||||||
|
ImageStaticRemoteURL: "",
|
||||||
|
ImageURL: "", // we don't know yet
|
||||||
|
ImageStaticURL: uris.GenerateURIForAttachment(instanceAccount.ID, string(TypeEmoji), string(SizeStatic), emojiID, mimePng), // all static emojis are encoded as png
|
||||||
|
ImagePath: "", // we don't know yet
|
||||||
|
ImageStaticPath: fmt.Sprintf("%s/%s/%s/%s.%s", instanceAccount.ID, TypeEmoji, SizeStatic, emojiID, mimePng), // all static emojis are encoded as png
|
||||||
|
ImageContentType: "", // we don't know yet
|
||||||
|
ImageStaticContentType: mimeImagePng, // all static emojis are encoded as png
|
||||||
|
ImageFileSize: 0,
|
||||||
|
ImageStaticFileSize: 0,
|
||||||
|
Disabled: &disabled,
|
||||||
|
URI: uri,
|
||||||
|
VisibleInPicker: &visibleInPicker,
|
||||||
|
CategoryID: "",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
emoji.ImageUpdatedAt = now
|
||||||
|
emoji.UpdatedAt = now
|
||||||
|
|
||||||
|
// check if we have additional info to add to the emoji,
|
||||||
|
// and overwrite some of the emoji fields if so
|
||||||
|
if ai != nil {
|
||||||
|
if ai.CreatedAt != nil {
|
||||||
|
emoji.CreatedAt = *ai.CreatedAt
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.Domain != nil {
|
||||||
|
emoji.Domain = *ai.Domain
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.ImageRemoteURL != nil {
|
||||||
|
emoji.ImageRemoteURL = *ai.ImageRemoteURL
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.ImageStaticRemoteURL != nil {
|
||||||
|
emoji.ImageStaticRemoteURL = *ai.ImageStaticRemoteURL
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.Disabled != nil {
|
||||||
|
emoji.Disabled = ai.Disabled
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.VisibleInPicker != nil {
|
||||||
|
emoji.VisibleInPicker = ai.VisibleInPicker
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai.CategoryID != nil {
|
||||||
|
emoji.CategoryID = *ai.CategoryID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
processingEmoji := &ProcessingEmoji{
|
||||||
|
instAccID: instanceAccount.ID,
|
||||||
|
emoji: emoji,
|
||||||
|
refresh: refresh,
|
||||||
|
newPathID: newPathID,
|
||||||
|
dataFn: data,
|
||||||
|
postFn: postData,
|
||||||
|
mgr: m,
|
||||||
|
}
|
||||||
|
|
||||||
return processingEmoji, nil
|
return processingEmoji, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) RecacheMedia(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, attachmentID string) (*ProcessingMedia, error) {
|
func (m *manager) ProcessEmoji(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, shortcode string, id string, uri string, ai *AdditionalEmojiInfo, refresh bool) (*ProcessingEmoji, error) {
|
||||||
processingRecache, err := m.preProcessRecache(ctx, data, postData, attachmentID)
|
// Create a new processing emoji object for this emoji request.
|
||||||
|
emoji, err := m.PreProcessEmoji(ctx, data, postData, shortcode, id, uri, ai, refresh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m.mediaWorker.Queue(processingRecache)
|
|
||||||
return processingRecache, nil
|
// Attempt to add this emoji processing item to the worker queue.
|
||||||
|
_ = m.state.Workers.Media.MustEnqueueCtx(ctx, emoji.Process)
|
||||||
|
|
||||||
|
return emoji, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) Stop() error {
|
func scheduleCleanupJobs(m *manager) {
|
||||||
// Stop worker pools.
|
const day = time.Hour * 24
|
||||||
mediaErr := m.mediaWorker.Stop()
|
|
||||||
emojiErr := m.emojiWorker.Stop()
|
|
||||||
|
|
||||||
var cronErr error
|
// Calculate closest midnight.
|
||||||
if m.stopCronJobs != nil {
|
now := time.Now()
|
||||||
cronErr = m.stopCronJobs()
|
midnight := now.Round(day)
|
||||||
|
|
||||||
|
if midnight.Before(now) {
|
||||||
|
// since <= 11:59am rounds down.
|
||||||
|
midnight = midnight.Add(day)
|
||||||
}
|
}
|
||||||
|
|
||||||
if mediaErr != nil {
|
// Get ctx associated with scheduler run state.
|
||||||
return mediaErr
|
done := m.state.Workers.Scheduler.Done()
|
||||||
} else if emojiErr != nil {
|
doneCtx := runners.CancelCtx(done)
|
||||||
return emojiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
return cronErr
|
// TODO: we'll need to do some thinking to make these
|
||||||
|
// jobs restartable if we want to implement reloads in
|
||||||
|
// the future that make call to Workers.Stop() -> Workers.Start().
|
||||||
|
|
||||||
|
// Schedule the PruneAll task to execute every day at midnight.
|
||||||
|
m.state.Workers.Scheduler.Schedule(sched.NewJob(func(now time.Time) {
|
||||||
|
err := m.PruneAll(doneCtx, config.GetMediaRemoteCacheDays(), true)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error during prune: %v", err)
|
||||||
|
}
|
||||||
|
log.Infof("finished pruning all in %s", time.Since(now))
|
||||||
|
}).EveryAt(midnight, day))
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
gtsmodel "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
gtsmodel "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/state"
|
||||||
gtsstorage "github.com/superseriousbusiness/gotosocial/internal/storage"
|
gtsstorage "github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1189,15 +1190,19 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessBlockingWithDiskStorage() {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var state state.State
|
||||||
|
|
||||||
|
state.Workers.Start()
|
||||||
|
defer state.Workers.Stop()
|
||||||
|
|
||||||
storage := >sstorage.Driver{
|
storage := >sstorage.Driver{
|
||||||
KVStore: kv.New(disk),
|
KVStore: kv.New(disk),
|
||||||
Storage: disk,
|
Storage: disk,
|
||||||
}
|
}
|
||||||
|
state.Storage = storage
|
||||||
|
state.DB = suite.db
|
||||||
|
|
||||||
diskManager, err := media.NewManager(suite.db, storage)
|
diskManager := media.NewManager(&state)
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
suite.manager = diskManager
|
suite.manager = diskManager
|
||||||
|
|
||||||
// process the media with no additional info provided
|
// process the media with no additional info provided
|
||||||
|
|
|
@ -21,18 +21,15 @@
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"codeberg.org/gruf/go-bytesize"
|
"codeberg.org/gruf/go-bytesize"
|
||||||
gostore "codeberg.org/gruf/go-store/v2/storage"
|
"codeberg.org/gruf/go-errors/v2"
|
||||||
|
"codeberg.org/gruf/go-runners"
|
||||||
"github.com/h2non/filetype"
|
"github.com/h2non/filetype"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/id"
|
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/uris"
|
"github.com/superseriousbusiness/gotosocial/internal/uris"
|
||||||
)
|
)
|
||||||
|
@ -46,9 +43,10 @@ type ProcessingEmoji struct {
|
||||||
newPathID string // new emoji path ID to use if refreshed
|
newPathID string // new emoji path ID to use if refreshed
|
||||||
dataFn DataFunc // load-data function, returns media stream
|
dataFn DataFunc // load-data function, returns media stream
|
||||||
postFn PostDataCallbackFunc // post data callback function
|
postFn PostDataCallbackFunc // post data callback function
|
||||||
err error // error encountered during processing
|
done bool // done is set when process finishes with non ctx canceled type error
|
||||||
manager *manager // manager instance (access to db / storage)
|
proc runners.Processor // proc helps synchronize only a singular running processing instance
|
||||||
once sync.Once // once ensures processing only occurs once
|
err error // error stores permanent error value when done
|
||||||
|
mgr *manager // mgr instance (access to db / storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EmojiID returns the ID of the underlying emoji without blocking processing.
|
// EmojiID returns the ID of the underlying emoji without blocking processing.
|
||||||
|
@ -56,40 +54,72 @@ func (p *ProcessingEmoji) EmojiID() string {
|
||||||
return p.emoji.ID // immutable, safe outside mutex.
|
return p.emoji.ID // immutable, safe outside mutex.
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadEmoji blocks until the static and fullsize image
|
// LoadEmoji blocks until the static and fullsize image has been processed, and then returns the completed emoji.
|
||||||
// has been processed, and then returns the completed emoji.
|
|
||||||
func (p *ProcessingEmoji) LoadEmoji(ctx context.Context) (*gtsmodel.Emoji, error) {
|
func (p *ProcessingEmoji) LoadEmoji(ctx context.Context) (*gtsmodel.Emoji, error) {
|
||||||
// only process once.
|
// Attempt to load synchronously.
|
||||||
p.once.Do(func() {
|
emoji, done, err := p.load(ctx)
|
||||||
var err error
|
|
||||||
|
if err == nil {
|
||||||
|
// No issue, return media.
|
||||||
|
return emoji, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !done {
|
||||||
|
// Provided context was cancelled, e.g. request cancelled
|
||||||
|
// early. Queue this item for asynchronous processing.
|
||||||
|
log.Warnf("reprocessing emoji %s after canceled ctx", p.emoji.ID)
|
||||||
|
go p.mgr.state.Workers.Media.Enqueue(p.Process)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process allows the receiving object to fit the runners.WorkerFunc signature. It performs a (blocking) load and logs on error.
|
||||||
|
func (p *ProcessingEmoji) Process(ctx context.Context) {
|
||||||
|
if _, _, err := p.load(ctx); err != nil {
|
||||||
|
log.Errorf("error processing emoji: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// load performs a concurrency-safe load of ProcessingEmoji, only marking itself as complete when returned error is NOT a context cancel.
|
||||||
|
func (p *ProcessingEmoji) load(ctx context.Context) (*gtsmodel.Emoji, bool, error) {
|
||||||
|
var (
|
||||||
|
done bool
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
err = p.proc.Process(func() error {
|
||||||
|
if p.done {
|
||||||
|
// Already proc'd.
|
||||||
|
return p.err
|
||||||
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
// This is only done when ctx NOT cancelled.
|
||||||
if err != nil {
|
done = err == nil || !errors.Is(err,
|
||||||
rOld := r // wrap the panic so we don't lose existing returned error
|
context.Canceled,
|
||||||
r = fmt.Errorf("panic occured after error %q: %v", err.Error(), rOld)
|
context.DeadlineExceeded,
|
||||||
|
)
|
||||||
|
|
||||||
|
if !done {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Catch any panics and wrap as error.
|
// Store final values.
|
||||||
err = fmt.Errorf("caught panic: %v", r)
|
p.done = true
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
// Store error.
|
|
||||||
p.err = err
|
p.err = err
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Attempt to store media and calculate
|
// Attempt to store media and calculate
|
||||||
// full-size media attachment details.
|
// full-size media attachment details.
|
||||||
if err = p.store(ctx); err != nil {
|
if err = p.store(ctx); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finish processing by reloading media into
|
// Finish processing by reloading media into
|
||||||
// memory to get dimension and generate a thumb.
|
// memory to get dimension and generate a thumb.
|
||||||
if err = p.finish(ctx); err != nil {
|
if err = p.finish(ctx); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.refresh {
|
if p.refresh {
|
||||||
|
@ -110,20 +140,20 @@ func (p *ProcessingEmoji) LoadEmoji(ctx context.Context) (*gtsmodel.Emoji, error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Existing emoji we're refreshing, so only need to update.
|
// Existing emoji we're refreshing, so only need to update.
|
||||||
_, err = p.manager.db.UpdateEmoji(ctx, p.emoji, columns...)
|
_, err = p.mgr.state.DB.UpdateEmoji(ctx, p.emoji, columns...)
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// New emoji media, first time caching.
|
// New emoji media, first time caching.
|
||||||
err = p.manager.db.PutEmoji(ctx, p.emoji)
|
err = p.mgr.state.DB.PutEmoji(ctx, p.emoji)
|
||||||
return //nolint shutup linter i like this here
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
if p.err != nil {
|
if err != nil {
|
||||||
return nil, p.err
|
return nil, done, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return p.emoji, nil
|
return p.emoji, done, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// store calls the data function attached to p if it hasn't been called yet,
|
// store calls the data function attached to p if it hasn't been called yet,
|
||||||
|
@ -220,24 +250,24 @@ func (p *ProcessingEmoji) store(ctx context.Context) error {
|
||||||
)
|
)
|
||||||
|
|
||||||
// This shouldn't already exist, but we do a check as it's worth logging.
|
// This shouldn't already exist, but we do a check as it's worth logging.
|
||||||
if have, _ := p.manager.storage.Has(ctx, p.emoji.ImagePath); have {
|
if have, _ := p.mgr.state.Storage.Has(ctx, p.emoji.ImagePath); have {
|
||||||
log.Warnf("emoji already exists at storage path: %s", p.emoji.ImagePath)
|
log.Warnf("emoji already exists at storage path: %s", p.emoji.ImagePath)
|
||||||
|
|
||||||
// Attempt to remove existing emoji at storage path (might be broken / out-of-date)
|
// Attempt to remove existing emoji at storage path (might be broken / out-of-date)
|
||||||
if err := p.manager.storage.Delete(ctx, p.emoji.ImagePath); err != nil {
|
if err := p.mgr.state.Storage.Delete(ctx, p.emoji.ImagePath); err != nil {
|
||||||
return fmt.Errorf("error removing emoji from storage: %v", err)
|
return fmt.Errorf("error removing emoji from storage: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the final image reader stream to our storage.
|
// Write the final image reader stream to our storage.
|
||||||
sz, err = p.manager.storage.PutStream(ctx, p.emoji.ImagePath, r)
|
sz, err = p.mgr.state.Storage.PutStream(ctx, p.emoji.ImagePath, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error writing emoji to storage: %w", err)
|
return fmt.Errorf("error writing emoji to storage: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Once again check size in case none was provided previously.
|
// Once again check size in case none was provided previously.
|
||||||
if size := bytesize.Size(sz); size > maxSize {
|
if size := bytesize.Size(sz); size > maxSize {
|
||||||
if err := p.manager.storage.Delete(ctx, p.emoji.ImagePath); err != nil {
|
if err := p.mgr.state.Storage.Delete(ctx, p.emoji.ImagePath); err != nil {
|
||||||
log.Errorf("error removing too-large-emoji from storage: %v", err)
|
log.Errorf("error removing too-large-emoji from storage: %v", err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("calculated emoji size %s greater than max allowed %s", size, maxSize)
|
return fmt.Errorf("calculated emoji size %s greater than max allowed %s", size, maxSize)
|
||||||
|
@ -259,7 +289,7 @@ func (p *ProcessingEmoji) store(ctx context.Context) error {
|
||||||
|
|
||||||
func (p *ProcessingEmoji) finish(ctx context.Context) error {
|
func (p *ProcessingEmoji) finish(ctx context.Context) error {
|
||||||
// Fetch a stream to the original file in storage.
|
// Fetch a stream to the original file in storage.
|
||||||
rc, err := p.manager.storage.GetStream(ctx, p.emoji.ImagePath)
|
rc, err := p.mgr.state.Storage.GetStream(ctx, p.emoji.ImagePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error loading file from storage: %w", err)
|
return fmt.Errorf("error loading file from storage: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -277,11 +307,11 @@ func (p *ProcessingEmoji) finish(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// This shouldn't already exist, but we do a check as it's worth logging.
|
// This shouldn't already exist, but we do a check as it's worth logging.
|
||||||
if have, _ := p.manager.storage.Has(ctx, p.emoji.ImageStaticPath); have {
|
if have, _ := p.mgr.state.Storage.Has(ctx, p.emoji.ImageStaticPath); have {
|
||||||
log.Warnf("static emoji already exists at storage path: %s", p.emoji.ImagePath)
|
log.Warnf("static emoji already exists at storage path: %s", p.emoji.ImagePath)
|
||||||
|
|
||||||
// Attempt to remove static existing emoji at storage path (might be broken / out-of-date)
|
// Attempt to remove static existing emoji at storage path (might be broken / out-of-date)
|
||||||
if err := p.manager.storage.Delete(ctx, p.emoji.ImageStaticPath); err != nil {
|
if err := p.mgr.state.Storage.Delete(ctx, p.emoji.ImageStaticPath); err != nil {
|
||||||
return fmt.Errorf("error removing static emoji from storage: %v", err)
|
return fmt.Errorf("error removing static emoji from storage: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -290,7 +320,7 @@ func (p *ProcessingEmoji) finish(ctx context.Context) error {
|
||||||
enc := staticImg.ToPNG()
|
enc := staticImg.ToPNG()
|
||||||
|
|
||||||
// Stream-encode the PNG static image into storage.
|
// Stream-encode the PNG static image into storage.
|
||||||
sz, err := p.manager.storage.PutStream(ctx, p.emoji.ImageStaticPath, enc)
|
sz, err := p.mgr.state.Storage.PutStream(ctx, p.emoji.ImageStaticPath, enc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error stream-encoding static emoji to storage: %w", err)
|
return fmt.Errorf("error stream-encoding static emoji to storage: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -300,129 +330,3 @@ func (p *ProcessingEmoji) finish(ctx context.Context) error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) preProcessEmoji(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, shortcode string, emojiID string, uri string, ai *AdditionalEmojiInfo, refresh bool) (*ProcessingEmoji, error) {
|
|
||||||
instanceAccount, err := m.db.GetInstanceAccount(ctx, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("preProcessEmoji: error fetching this instance account from the db: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var newPathID string
|
|
||||||
var emoji *gtsmodel.Emoji
|
|
||||||
if refresh {
|
|
||||||
emoji, err = m.db.GetEmojiByID(ctx, emojiID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("preProcessEmoji: error fetching emoji to refresh from the db: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this is a refresh, we will end up with new images
|
|
||||||
// stored for this emoji, so we can use the postData function
|
|
||||||
// to perform clean up of the old images from storage
|
|
||||||
originalPostData := postData
|
|
||||||
originalImagePath := emoji.ImagePath
|
|
||||||
originalImageStaticPath := emoji.ImageStaticPath
|
|
||||||
postData = func(innerCtx context.Context) error {
|
|
||||||
// trigger the original postData function if it was provided
|
|
||||||
if originalPostData != nil {
|
|
||||||
if err := originalPostData(innerCtx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
l := log.WithField("shortcode@domain", emoji.Shortcode+"@"+emoji.Domain)
|
|
||||||
l.Debug("postData: cleaning up old emoji files for refreshed emoji")
|
|
||||||
if err := m.storage.Delete(innerCtx, originalImagePath); err != nil && !errors.Is(err, gostore.ErrNotFound) {
|
|
||||||
l.Errorf("postData: error cleaning up old emoji image at %s for refreshed emoji: %s", originalImagePath, err)
|
|
||||||
}
|
|
||||||
if err := m.storage.Delete(innerCtx, originalImageStaticPath); err != nil && !errors.Is(err, gostore.ErrNotFound) {
|
|
||||||
l.Errorf("postData: error cleaning up old emoji static image at %s for refreshed emoji: %s", originalImageStaticPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
newPathID, err = id.NewRandomULID()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("preProcessEmoji: error generating alternateID for emoji refresh: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// store + serve static image at new path ID
|
|
||||||
emoji.ImageStaticURL = uris.GenerateURIForAttachment(instanceAccount.ID, string(TypeEmoji), string(SizeStatic), newPathID, mimePng)
|
|
||||||
emoji.ImageStaticPath = fmt.Sprintf("%s/%s/%s/%s.%s", instanceAccount.ID, TypeEmoji, SizeStatic, newPathID, mimePng)
|
|
||||||
|
|
||||||
emoji.Shortcode = shortcode
|
|
||||||
emoji.URI = uri
|
|
||||||
} else {
|
|
||||||
disabled := false
|
|
||||||
visibleInPicker := true
|
|
||||||
|
|
||||||
// populate initial fields on the emoji -- some of these will be overwritten as we proceed
|
|
||||||
emoji = >smodel.Emoji{
|
|
||||||
ID: emojiID,
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
Shortcode: shortcode,
|
|
||||||
Domain: "", // assume our own domain unless told otherwise
|
|
||||||
ImageRemoteURL: "",
|
|
||||||
ImageStaticRemoteURL: "",
|
|
||||||
ImageURL: "", // we don't know yet
|
|
||||||
ImageStaticURL: uris.GenerateURIForAttachment(instanceAccount.ID, string(TypeEmoji), string(SizeStatic), emojiID, mimePng), // all static emojis are encoded as png
|
|
||||||
ImagePath: "", // we don't know yet
|
|
||||||
ImageStaticPath: fmt.Sprintf("%s/%s/%s/%s.%s", instanceAccount.ID, TypeEmoji, SizeStatic, emojiID, mimePng), // all static emojis are encoded as png
|
|
||||||
ImageContentType: "", // we don't know yet
|
|
||||||
ImageStaticContentType: mimeImagePng, // all static emojis are encoded as png
|
|
||||||
ImageFileSize: 0,
|
|
||||||
ImageStaticFileSize: 0,
|
|
||||||
Disabled: &disabled,
|
|
||||||
URI: uri,
|
|
||||||
VisibleInPicker: &visibleInPicker,
|
|
||||||
CategoryID: "",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
emoji.ImageUpdatedAt = time.Now()
|
|
||||||
emoji.UpdatedAt = time.Now()
|
|
||||||
|
|
||||||
// check if we have additional info to add to the emoji,
|
|
||||||
// and overwrite some of the emoji fields if so
|
|
||||||
if ai != nil {
|
|
||||||
if ai.CreatedAt != nil {
|
|
||||||
emoji.CreatedAt = *ai.CreatedAt
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.Domain != nil {
|
|
||||||
emoji.Domain = *ai.Domain
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.ImageRemoteURL != nil {
|
|
||||||
emoji.ImageRemoteURL = *ai.ImageRemoteURL
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.ImageStaticRemoteURL != nil {
|
|
||||||
emoji.ImageStaticRemoteURL = *ai.ImageStaticRemoteURL
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.Disabled != nil {
|
|
||||||
emoji.Disabled = ai.Disabled
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.VisibleInPicker != nil {
|
|
||||||
emoji.VisibleInPicker = ai.VisibleInPicker
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.CategoryID != nil {
|
|
||||||
emoji.CategoryID = *ai.CategoryID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
processingEmoji := &ProcessingEmoji{
|
|
||||||
instAccID: instanceAccount.ID,
|
|
||||||
emoji: emoji,
|
|
||||||
refresh: refresh,
|
|
||||||
newPathID: newPathID,
|
|
||||||
dataFn: data,
|
|
||||||
postFn: postData,
|
|
||||||
manager: m,
|
|
||||||
}
|
|
||||||
|
|
||||||
return processingEmoji, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -24,14 +24,14 @@
|
||||||
"fmt"
|
"fmt"
|
||||||
"image/jpeg"
|
"image/jpeg"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"codeberg.org/gruf/go-errors/v2"
|
||||||
|
"codeberg.org/gruf/go-runners"
|
||||||
"github.com/disintegration/imaging"
|
"github.com/disintegration/imaging"
|
||||||
"github.com/h2non/filetype"
|
"github.com/h2non/filetype"
|
||||||
terminator "github.com/superseriousbusiness/exif-terminator"
|
terminator "github.com/superseriousbusiness/exif-terminator"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/id"
|
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/uris"
|
"github.com/superseriousbusiness/gotosocial/internal/uris"
|
||||||
)
|
)
|
||||||
|
@ -40,12 +40,13 @@
|
||||||
// various functions for retrieving data from the process.
|
// various functions for retrieving data from the process.
|
||||||
type ProcessingMedia struct {
|
type ProcessingMedia struct {
|
||||||
media *gtsmodel.MediaAttachment // processing media attachment details
|
media *gtsmodel.MediaAttachment // processing media attachment details
|
||||||
recache bool // recaching existing (uncached) media
|
|
||||||
dataFn DataFunc // load-data function, returns media stream
|
dataFn DataFunc // load-data function, returns media stream
|
||||||
postFn PostDataCallbackFunc // post data callback function
|
postFn PostDataCallbackFunc // post data callback function
|
||||||
err error // error encountered during processing
|
recache bool // recaching existing (uncached) media
|
||||||
manager *manager // manager instance (access to db / storage)
|
done bool // done is set when process finishes with non ctx canceled type error
|
||||||
once sync.Once // once ensures processing only occurs once
|
proc runners.Processor // proc helps synchronize only a singular running processing instance
|
||||||
|
err error // error stores permanent error value when done
|
||||||
|
mgr *manager // mgr instance (access to db / storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AttachmentID returns the ID of the underlying media attachment without blocking processing.
|
// AttachmentID returns the ID of the underlying media attachment without blocking processing.
|
||||||
|
@ -53,58 +54,90 @@ func (p *ProcessingMedia) AttachmentID() string {
|
||||||
return p.media.ID // immutable, safe outside mutex.
|
return p.media.ID // immutable, safe outside mutex.
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadAttachment blocks until the thumbnail and fullsize content
|
// LoadAttachment blocks until the thumbnail and fullsize content has been processed, and then returns the completed attachment.
|
||||||
// has been processed, and then returns the completed attachment.
|
|
||||||
func (p *ProcessingMedia) LoadAttachment(ctx context.Context) (*gtsmodel.MediaAttachment, error) {
|
func (p *ProcessingMedia) LoadAttachment(ctx context.Context) (*gtsmodel.MediaAttachment, error) {
|
||||||
// only process once.
|
// Attempt to load synchronously.
|
||||||
p.once.Do(func() {
|
media, done, err := p.load(ctx)
|
||||||
var err error
|
|
||||||
|
if err == nil {
|
||||||
|
// No issue, return media.
|
||||||
|
return media, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !done {
|
||||||
|
// Provided context was cancelled, e.g. request cancelled
|
||||||
|
// early. Queue this item for asynchronous processing.
|
||||||
|
log.Warnf("reprocessing media %s after canceled ctx", p.media.ID)
|
||||||
|
go p.mgr.state.Workers.Media.Enqueue(p.Process)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process allows the receiving object to fit the runners.WorkerFunc signature. It performs a (blocking) load and logs on error.
|
||||||
|
func (p *ProcessingMedia) Process(ctx context.Context) {
|
||||||
|
if _, _, err := p.load(ctx); err != nil {
|
||||||
|
log.Errorf("error processing media: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// load performs a concurrency-safe load of ProcessingMedia, only marking itself as complete when returned error is NOT a context cancel.
|
||||||
|
func (p *ProcessingMedia) load(ctx context.Context) (*gtsmodel.MediaAttachment, bool, error) {
|
||||||
|
var (
|
||||||
|
done bool
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
err = p.proc.Process(func() error {
|
||||||
|
if p.done {
|
||||||
|
// Already proc'd.
|
||||||
|
return p.err
|
||||||
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
// This is only done when ctx NOT cancelled.
|
||||||
if err != nil {
|
done = err == nil || !errors.Is(err,
|
||||||
rOld := r // wrap the panic so we don't lose existing returned error
|
context.Canceled,
|
||||||
r = fmt.Errorf("panic occured after error %q: %v", err.Error(), rOld)
|
context.DeadlineExceeded,
|
||||||
|
)
|
||||||
|
|
||||||
|
if !done {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Catch any panics and wrap as error.
|
// Store final values.
|
||||||
err = fmt.Errorf("caught panic: %v", r)
|
p.done = true
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
// Store error.
|
|
||||||
p.err = err
|
p.err = err
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Attempt to store media and calculate
|
// Attempt to store media and calculate
|
||||||
// full-size media attachment details.
|
// full-size media attachment details.
|
||||||
if err = p.store(ctx); err != nil {
|
if err = p.store(ctx); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finish processing by reloading media into
|
// Finish processing by reloading media into
|
||||||
// memory to get dimension and generate a thumb.
|
// memory to get dimension and generate a thumb.
|
||||||
if err = p.finish(ctx); err != nil {
|
if err = p.finish(ctx); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.recache {
|
if p.recache {
|
||||||
// Existing attachment we're recaching, so only need to update.
|
// Existing attachment we're recaching, so only need to update.
|
||||||
err = p.manager.db.UpdateByID(ctx, p.media, p.media.ID)
|
err = p.mgr.state.DB.UpdateByID(ctx, p.media, p.media.ID)
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// New attachment, first time caching.
|
// New attachment, first time caching.
|
||||||
err = p.manager.db.Put(ctx, p.media)
|
err = p.mgr.state.DB.Put(ctx, p.media)
|
||||||
return //nolint shutup linter i like this here
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
if p.err != nil {
|
if err != nil {
|
||||||
return nil, p.err
|
return nil, done, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return p.media, nil
|
return p.media, done, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// store calls the data function attached to p if it hasn't been called yet,
|
// store calls the data function attached to p if it hasn't been called yet,
|
||||||
|
@ -186,17 +219,17 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
||||||
)
|
)
|
||||||
|
|
||||||
// This shouldn't already exist, but we do a check as it's worth logging.
|
// This shouldn't already exist, but we do a check as it's worth logging.
|
||||||
if have, _ := p.manager.storage.Has(ctx, p.media.File.Path); have {
|
if have, _ := p.mgr.state.Storage.Has(ctx, p.media.File.Path); have {
|
||||||
log.Warnf("media already exists at storage path: %s", p.media.File.Path)
|
log.Warnf("media already exists at storage path: %s", p.media.File.Path)
|
||||||
|
|
||||||
// Attempt to remove existing media at storage path (might be broken / out-of-date)
|
// Attempt to remove existing media at storage path (might be broken / out-of-date)
|
||||||
if err := p.manager.storage.Delete(ctx, p.media.File.Path); err != nil {
|
if err := p.mgr.state.Storage.Delete(ctx, p.media.File.Path); err != nil {
|
||||||
return fmt.Errorf("error removing media from storage: %v", err)
|
return fmt.Errorf("error removing media from storage: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the final image reader stream to our storage.
|
// Write the final image reader stream to our storage.
|
||||||
sz, err = p.manager.storage.PutStream(ctx, p.media.File.Path, r)
|
sz, err = p.mgr.state.Storage.PutStream(ctx, p.media.File.Path, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error writing media to storage: %w", err)
|
return fmt.Errorf("error writing media to storage: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -221,7 +254,7 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
||||||
|
|
||||||
func (p *ProcessingMedia) finish(ctx context.Context) error {
|
func (p *ProcessingMedia) finish(ctx context.Context) error {
|
||||||
// Fetch a stream to the original file in storage.
|
// Fetch a stream to the original file in storage.
|
||||||
rc, err := p.manager.storage.GetStream(ctx, p.media.File.Path)
|
rc, err := p.mgr.state.Storage.GetStream(ctx, p.media.File.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error loading file from storage: %w", err)
|
return fmt.Errorf("error loading file from storage: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -299,11 +332,11 @@ func (p *ProcessingMedia) finish(ctx context.Context) error {
|
||||||
p.media.Blurhash = hash
|
p.media.Blurhash = hash
|
||||||
|
|
||||||
// This shouldn't already exist, but we do a check as it's worth logging.
|
// This shouldn't already exist, but we do a check as it's worth logging.
|
||||||
if have, _ := p.manager.storage.Has(ctx, p.media.Thumbnail.Path); have {
|
if have, _ := p.mgr.state.Storage.Has(ctx, p.media.Thumbnail.Path); have {
|
||||||
log.Warnf("thumbnail already exists at storage path: %s", p.media.Thumbnail.Path)
|
log.Warnf("thumbnail already exists at storage path: %s", p.media.Thumbnail.Path)
|
||||||
|
|
||||||
// Attempt to remove existing thumbnail at storage path (might be broken / out-of-date)
|
// Attempt to remove existing thumbnail at storage path (might be broken / out-of-date)
|
||||||
if err := p.manager.storage.Delete(ctx, p.media.Thumbnail.Path); err != nil {
|
if err := p.mgr.state.Storage.Delete(ctx, p.media.Thumbnail.Path); err != nil {
|
||||||
return fmt.Errorf("error removing thumbnail from storage: %v", err)
|
return fmt.Errorf("error removing thumbnail from storage: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -314,7 +347,7 @@ func (p *ProcessingMedia) finish(ctx context.Context) error {
|
||||||
})
|
})
|
||||||
|
|
||||||
// Stream-encode the JPEG thumbnail image into storage.
|
// Stream-encode the JPEG thumbnail image into storage.
|
||||||
sz, err := p.manager.storage.PutStream(ctx, p.media.Thumbnail.Path, enc)
|
sz, err := p.mgr.state.Storage.PutStream(ctx, p.media.Thumbnail.Path, enc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error stream-encoding thumbnail to storage: %w", err)
|
return fmt.Errorf("error stream-encoding thumbnail to storage: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -346,107 +379,3 @@ func (p *ProcessingMedia) finish(ctx context.Context) error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) preProcessMedia(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, accountID string, ai *AdditionalMediaInfo) (*ProcessingMedia, error) {
|
|
||||||
id, err := id.NewRandomULID()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
avatar := false
|
|
||||||
header := false
|
|
||||||
cached := false
|
|
||||||
|
|
||||||
// populate initial fields on the media attachment -- some of these will be overwritten as we proceed
|
|
||||||
attachment := >smodel.MediaAttachment{
|
|
||||||
ID: id,
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
UpdatedAt: time.Now(),
|
|
||||||
StatusID: "",
|
|
||||||
URL: "", // we don't know yet because it depends on the uncalled DataFunc
|
|
||||||
RemoteURL: "",
|
|
||||||
Type: gtsmodel.FileTypeUnknown, // we don't know yet because it depends on the uncalled DataFunc
|
|
||||||
FileMeta: gtsmodel.FileMeta{},
|
|
||||||
AccountID: accountID,
|
|
||||||
Description: "",
|
|
||||||
ScheduledStatusID: "",
|
|
||||||
Blurhash: "",
|
|
||||||
Processing: gtsmodel.ProcessingStatusReceived,
|
|
||||||
File: gtsmodel.File{UpdatedAt: time.Now()},
|
|
||||||
Thumbnail: gtsmodel.Thumbnail{UpdatedAt: time.Now()},
|
|
||||||
Avatar: &avatar,
|
|
||||||
Header: &header,
|
|
||||||
Cached: &cached,
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if we have additional info to add to the attachment,
|
|
||||||
// and overwrite some of the attachment fields if so
|
|
||||||
if ai != nil {
|
|
||||||
if ai.CreatedAt != nil {
|
|
||||||
attachment.CreatedAt = *ai.CreatedAt
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.StatusID != nil {
|
|
||||||
attachment.StatusID = *ai.StatusID
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.RemoteURL != nil {
|
|
||||||
attachment.RemoteURL = *ai.RemoteURL
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.Description != nil {
|
|
||||||
attachment.Description = *ai.Description
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.ScheduledStatusID != nil {
|
|
||||||
attachment.ScheduledStatusID = *ai.ScheduledStatusID
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.Blurhash != nil {
|
|
||||||
attachment.Blurhash = *ai.Blurhash
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.Avatar != nil {
|
|
||||||
attachment.Avatar = ai.Avatar
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.Header != nil {
|
|
||||||
attachment.Header = ai.Header
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.FocusX != nil {
|
|
||||||
attachment.FileMeta.Focus.X = *ai.FocusX
|
|
||||||
}
|
|
||||||
|
|
||||||
if ai.FocusY != nil {
|
|
||||||
attachment.FileMeta.Focus.Y = *ai.FocusY
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
processingMedia := &ProcessingMedia{
|
|
||||||
media: attachment,
|
|
||||||
dataFn: data,
|
|
||||||
postFn: postData,
|
|
||||||
manager: m,
|
|
||||||
}
|
|
||||||
|
|
||||||
return processingMedia, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manager) preProcessRecache(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, id string) (*ProcessingMedia, error) {
|
|
||||||
// get the existing attachment from database.
|
|
||||||
attachment, err := m.db.GetAttachmentByID(ctx, id)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
processingMedia := &ProcessingMedia{
|
|
||||||
media: attachment,
|
|
||||||
dataFn: data,
|
|
||||||
postFn: postData,
|
|
||||||
manager: m,
|
|
||||||
recache: true, // indicate it's a recache
|
|
||||||
}
|
|
||||||
|
|
||||||
return processingMedia, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ func (m *manager) PruneAll(ctx context.Context, mediaCacheRemoteDays int, blocki
|
||||||
log.Infof("pruned %d orphaned media", pruned)
|
log.Infof("pruned %d orphaned media", pruned)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := m.storage.Storage.Clean(innerCtx); err != nil {
|
if err := m.state.Storage.Storage.Clean(innerCtx); err != nil {
|
||||||
errs = append(errs, fmt.Sprintf("error cleaning storage: (%s)", err))
|
errs = append(errs, fmt.Sprintf("error cleaning storage: (%s)", err))
|
||||||
} else {
|
} else {
|
||||||
log.Info("cleaned storage")
|
log.Info("cleaned storage")
|
||||||
|
@ -116,7 +116,7 @@ func (m *manager) PruneUnusedRemote(ctx context.Context, dry bool) (int, error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for attachments, err = m.db.GetAvatarsAndHeaders(ctx, maxID, selectPruneLimit); err == nil && len(attachments) != 0; attachments, err = m.db.GetAvatarsAndHeaders(ctx, maxID, selectPruneLimit) {
|
for attachments, err = m.state.DB.GetAvatarsAndHeaders(ctx, maxID, selectPruneLimit); err == nil && len(attachments) != 0; attachments, err = m.state.DB.GetAvatarsAndHeaders(ctx, maxID, selectPruneLimit) {
|
||||||
maxID = attachments[len(attachments)-1].ID // use the id of the last attachment in the slice as the next 'maxID' value
|
maxID = attachments[len(attachments)-1].ID // use the id of the last attachment in the slice as the next 'maxID' value
|
||||||
|
|
||||||
// Prune each attachment that meets one of the following criteria:
|
// Prune each attachment that meets one of the following criteria:
|
||||||
|
@ -157,7 +157,7 @@ func (m *manager) PruneOrphaned(ctx context.Context, dry bool) (int, error) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator, err := m.storage.Iterator(ctx, match) // make sure this iterator is always released
|
iterator, err := m.state.Storage.Iterator(ctx, match) // make sure this iterator is always released
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("PruneOrphaned: error getting storage iterator: %w", err)
|
return 0, fmt.Errorf("PruneOrphaned: error getting storage iterator: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -172,7 +172,7 @@ func (m *manager) PruneOrphaned(ctx context.Context, dry bool) (int, error) {
|
||||||
// Emojis are stored under the instance account,
|
// Emojis are stored under the instance account,
|
||||||
// so we need the ID of the instance account for
|
// so we need the ID of the instance account for
|
||||||
// the next part.
|
// the next part.
|
||||||
instanceAccount, err := m.db.GetInstanceAccount(ctx, "")
|
instanceAccount, err := m.state.DB.GetInstanceAccount(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iterator.Release()
|
iterator.Release()
|
||||||
return 0, fmt.Errorf("PruneOrphaned: error getting instance account: %w", err)
|
return 0, fmt.Errorf("PruneOrphaned: error getting instance account: %w", err)
|
||||||
|
@ -223,7 +223,7 @@ func (m *manager) orphaned(ctx context.Context, key string, instanceAccountID st
|
||||||
// Look for keys in storage that we don't have an attachment for.
|
// Look for keys in storage that we don't have an attachment for.
|
||||||
switch Type(mediaType) {
|
switch Type(mediaType) {
|
||||||
case TypeAttachment, TypeHeader, TypeAvatar:
|
case TypeAttachment, TypeHeader, TypeAvatar:
|
||||||
if _, err := m.db.GetAttachmentByID(ctx, mediaID); err != nil {
|
if _, err := m.state.DB.GetAttachmentByID(ctx, mediaID); err != nil {
|
||||||
if !errors.Is(err, db.ErrNoEntries) {
|
if !errors.Is(err, db.ErrNoEntries) {
|
||||||
return false, fmt.Errorf("error calling GetAttachmentByID: %w", err)
|
return false, fmt.Errorf("error calling GetAttachmentByID: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -234,7 +234,7 @@ func (m *manager) orphaned(ctx context.Context, key string, instanceAccountID st
|
||||||
// the MEDIA_ID part of the key for emojis will not necessarily correspond
|
// the MEDIA_ID part of the key for emojis will not necessarily correspond
|
||||||
// to the file that's currently being used as the emoji image.
|
// to the file that's currently being used as the emoji image.
|
||||||
staticURL := uris.GenerateURIForAttachment(instanceAccountID, string(TypeEmoji), string(SizeStatic), mediaID, mimePng)
|
staticURL := uris.GenerateURIForAttachment(instanceAccountID, string(TypeEmoji), string(SizeStatic), mediaID, mimePng)
|
||||||
if _, err := m.db.GetEmojiByStaticURL(ctx, staticURL); err != nil {
|
if _, err := m.state.DB.GetEmojiByStaticURL(ctx, staticURL); err != nil {
|
||||||
if !errors.Is(err, db.ErrNoEntries) {
|
if !errors.Is(err, db.ErrNoEntries) {
|
||||||
return false, fmt.Errorf("error calling GetEmojiByStaticURL: %w", err)
|
return false, fmt.Errorf("error calling GetEmojiByStaticURL: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -254,7 +254,7 @@ func (m *manager) UncacheRemote(ctx context.Context, olderThanDays int, dry bool
|
||||||
|
|
||||||
if dry {
|
if dry {
|
||||||
// Dry run, just count eligible entries without removing them.
|
// Dry run, just count eligible entries without removing them.
|
||||||
return m.db.CountRemoteOlderThan(ctx, olderThan)
|
return m.state.DB.CountRemoteOlderThan(ctx, olderThan)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -263,7 +263,7 @@ func (m *manager) UncacheRemote(ctx context.Context, olderThanDays int, dry bool
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
for attachments, err = m.db.GetRemoteOlderThan(ctx, olderThan, selectPruneLimit); err == nil && len(attachments) != 0; attachments, err = m.db.GetRemoteOlderThan(ctx, olderThan, selectPruneLimit) {
|
for attachments, err = m.state.DB.GetRemoteOlderThan(ctx, olderThan, selectPruneLimit); err == nil && len(attachments) != 0; attachments, err = m.state.DB.GetRemoteOlderThan(ctx, olderThan, selectPruneLimit) {
|
||||||
olderThan = attachments[len(attachments)-1].CreatedAt // use the created time of the last attachment in the slice as the next 'olderThan' value
|
olderThan = attachments[len(attachments)-1].CreatedAt // use the created time of the last attachment in the slice as the next 'olderThan' value
|
||||||
|
|
||||||
for _, attachment := range attachments {
|
for _, attachment := range attachments {
|
||||||
|
@ -287,7 +287,7 @@ func (m *manager) PruneUnusedLocal(ctx context.Context, dry bool) (int, error) {
|
||||||
|
|
||||||
if dry {
|
if dry {
|
||||||
// Dry run, just count eligible entries without removing them.
|
// Dry run, just count eligible entries without removing them.
|
||||||
return m.db.CountLocalUnattachedOlderThan(ctx, olderThan)
|
return m.state.DB.CountLocalUnattachedOlderThan(ctx, olderThan)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -296,7 +296,7 @@ func (m *manager) PruneUnusedLocal(ctx context.Context, dry bool) (int, error) {
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
for attachments, err = m.db.GetLocalUnattachedOlderThan(ctx, olderThan, selectPruneLimit); err == nil && len(attachments) != 0; attachments, err = m.db.GetLocalUnattachedOlderThan(ctx, olderThan, selectPruneLimit) {
|
for attachments, err = m.state.DB.GetLocalUnattachedOlderThan(ctx, olderThan, selectPruneLimit); err == nil && len(attachments) != 0; attachments, err = m.state.DB.GetLocalUnattachedOlderThan(ctx, olderThan, selectPruneLimit) {
|
||||||
olderThan = attachments[len(attachments)-1].CreatedAt // use the created time of the last attachment in the slice as the next 'olderThan' value
|
olderThan = attachments[len(attachments)-1].CreatedAt // use the created time of the last attachment in the slice as the next 'olderThan' value
|
||||||
|
|
||||||
for _, attachment := range attachments {
|
for _, attachment := range attachments {
|
||||||
|
@ -325,7 +325,7 @@ func (m *manager) deleteAttachment(ctx context.Context, attachment *gtsmodel.Med
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete attachment completely.
|
// Delete attachment completely.
|
||||||
return m.db.DeleteByID(ctx, attachment.ID, attachment)
|
return m.state.DB.DeleteByID(ctx, attachment.ID, attachment)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) uncacheAttachment(ctx context.Context, attachment *gtsmodel.MediaAttachment) error {
|
func (m *manager) uncacheAttachment(ctx context.Context, attachment *gtsmodel.MediaAttachment) error {
|
||||||
|
@ -337,14 +337,14 @@ func (m *manager) uncacheAttachment(ctx context.Context, attachment *gtsmodel.Me
|
||||||
attachment.UpdatedAt = time.Now()
|
attachment.UpdatedAt = time.Now()
|
||||||
cached := false
|
cached := false
|
||||||
attachment.Cached = &cached
|
attachment.Cached = &cached
|
||||||
return m.db.UpdateByID(ctx, attachment, attachment.ID, "updated_at", "cached")
|
return m.state.DB.UpdateByID(ctx, attachment, attachment.ID, "updated_at", "cached")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) removeFiles(ctx context.Context, keys ...string) error {
|
func (m *manager) removeFiles(ctx context.Context, keys ...string) error {
|
||||||
errs := make(gtserror.MultiError, 0, len(keys))
|
errs := make(gtserror.MultiError, 0, len(keys))
|
||||||
|
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
if err := m.storage.Delete(ctx, key); err != nil && !errors.Is(err, storage.ErrNotFound) {
|
if err := m.state.Storage.Delete(ctx, key); err != nil && !errors.Is(err, storage.ErrNotFound) {
|
||||||
errs = append(errs, "storage error removing "+key+": "+err.Error())
|
errs = append(errs, "storage error removing "+key+": "+err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -313,7 +313,7 @@ func (suite *PruneTestSuite) TestUncacheAndRecache() {
|
||||||
testStatusAttachment,
|
testStatusAttachment,
|
||||||
testHeader,
|
testHeader,
|
||||||
} {
|
} {
|
||||||
processingRecache, err := suite.manager.RecacheMedia(ctx, data, nil, original.ID)
|
processingRecache, err := suite.manager.PreProcessMediaRecache(ctx, data, nil, original.ID)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
|
|
||||||
// synchronously load the recached attachment
|
// synchronously load the recached attachment
|
||||||
|
|
|
@ -47,7 +47,7 @@ func (m *manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM
|
||||||
// page through emojis 20 at a time, looking for those with missing images
|
// page through emojis 20 at a time, looking for those with missing images
|
||||||
for {
|
for {
|
||||||
// Fetch next block of emojis from database
|
// Fetch next block of emojis from database
|
||||||
emojis, err := m.db.GetEmojis(ctx, domain, false, true, "", maxShortcodeDomain, "", 20)
|
emojis, err := m.state.DB.GetEmojis(ctx, domain, false, true, "", maxShortcodeDomain, "", 20)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(err, db.ErrNoEntries) {
|
if !errors.Is(err, db.ErrNoEntries) {
|
||||||
// an actual error has occurred
|
// an actual error has occurred
|
||||||
|
@ -86,7 +86,7 @@ func (m *manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM
|
||||||
|
|
||||||
var totalRefetched int
|
var totalRefetched int
|
||||||
for _, emojiID := range refetchIDs {
|
for _, emojiID := range refetchIDs {
|
||||||
emoji, err := m.db.GetEmojiByID(ctx, emojiID)
|
emoji, err := m.state.DB.GetEmojiByID(ctx, emojiID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// this shouldn't happen--since we know we have the emoji--so return if it does
|
// this shouldn't happen--since we know we have the emoji--so return if it does
|
||||||
return 0, fmt.Errorf("error getting emoji %s: %w", emojiID, err)
|
return 0, fmt.Errorf("error getting emoji %s: %w", emojiID, err)
|
||||||
|
@ -108,7 +108,7 @@ func (m *manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM
|
||||||
return dereferenceMedia(ctx, emojiImageIRI)
|
return dereferenceMedia(ctx, emojiImageIRI)
|
||||||
}
|
}
|
||||||
|
|
||||||
processingEmoji, err := m.ProcessEmoji(ctx, dataFunc, nil, emoji.Shortcode, emoji.ID, emoji.URI, &AdditionalEmojiInfo{
|
processingEmoji, err := m.PreProcessEmoji(ctx, dataFunc, nil, emoji.Shortcode, emoji.ID, emoji.URI, &AdditionalEmojiInfo{
|
||||||
Domain: &emoji.Domain,
|
Domain: &emoji.Domain,
|
||||||
ImageRemoteURL: &emoji.ImageRemoteURL,
|
ImageRemoteURL: &emoji.ImageRemoteURL,
|
||||||
ImageStaticRemoteURL: &emoji.ImageStaticRemoteURL,
|
ImageStaticRemoteURL: &emoji.ImageStaticRemoteURL,
|
||||||
|
@ -133,13 +133,13 @@ func (m *manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) emojiRequiresRefetch(ctx context.Context, emoji *gtsmodel.Emoji) (bool, error) {
|
func (m *manager) emojiRequiresRefetch(ctx context.Context, emoji *gtsmodel.Emoji) (bool, error) {
|
||||||
if has, err := m.storage.Has(ctx, emoji.ImagePath); err != nil {
|
if has, err := m.state.Storage.Has(ctx, emoji.ImagePath); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
} else if !has {
|
} else if !has {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if has, err := m.storage.Has(ctx, emoji.ImageStaticPath); err != nil {
|
if has, err := m.state.Storage.Has(ctx, emoji.ImageStaticPath); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
} else if !has {
|
} else if !has {
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
|
@ -194,7 +194,7 @@ func (p *processor) UpdateAvatar(ctx context.Context, avatar *multipart.FileHead
|
||||||
Description: description,
|
Description: description,
|
||||||
}
|
}
|
||||||
|
|
||||||
processingMedia, err := p.mediaManager.ProcessMedia(ctx, dataFunc, nil, accountID, ai)
|
processingMedia, err := p.mediaManager.PreProcessMedia(ctx, dataFunc, nil, accountID, ai)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("UpdateAvatar: error processing avatar: %s", err)
|
return nil, fmt.Errorf("UpdateAvatar: error processing avatar: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -221,10 +221,7 @@ func (p *processor) UpdateHeader(ctx context.Context, header *multipart.FileHead
|
||||||
Header: &isHeader,
|
Header: &isHeader,
|
||||||
}
|
}
|
||||||
|
|
||||||
processingMedia, err := p.mediaManager.ProcessMedia(ctx, dataFunc, nil, accountID, ai)
|
processingMedia, err := p.mediaManager.PreProcessMedia(ctx, dataFunc, nil, accountID, ai)
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("UpdateHeader: error processing header: %s", err)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("UpdateHeader: error processing header: %s", err)
|
return nil, fmt.Errorf("UpdateHeader: error processing header: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,7 +70,7 @@ func (p *processor) EmojiCreate(ctx context.Context, account *gtsmodel.Account,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
processingEmoji, err := p.mediaManager.ProcessEmoji(ctx, data, nil, form.Shortcode, emojiID, emojiURI, ai, false)
|
processingEmoji, err := p.mediaManager.PreProcessEmoji(ctx, data, nil, form.Shortcode, emojiID, emojiURI, ai, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, gtserror.NewErrorInternalError(fmt.Errorf("error processing emoji: %s", err), "error processing emoji")
|
return nil, gtserror.NewErrorInternalError(fmt.Errorf("error processing emoji: %s", err), "error processing emoji")
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,7 +107,7 @@ func (p *processor) emojiUpdateCopy(ctx context.Context, emoji *gtsmodel.Emoji,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
processingEmoji, err := p.mediaManager.ProcessEmoji(ctx, data, nil, *shortcode, newEmojiID, newEmojiURI, ai, false)
|
processingEmoji, err := p.mediaManager.PreProcessEmoji(ctx, data, nil, *shortcode, newEmojiID, newEmojiURI, ai, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("emojiUpdateCopy: error processing emoji %s: %s", emoji.ID, err)
|
err = fmt.Errorf("emojiUpdateCopy: error processing emoji %s: %s", emoji.ID, err)
|
||||||
return nil, gtserror.NewErrorInternalError(err)
|
return nil, gtserror.NewErrorInternalError(err)
|
||||||
|
@ -213,7 +213,7 @@ func (p *processor) emojiUpdateModify(ctx context.Context, emoji *gtsmodel.Emoji
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
processingEmoji, err := p.mediaManager.ProcessEmoji(ctx, data, nil, emoji.Shortcode, emoji.ID, emoji.URI, ai, true)
|
processingEmoji, err := p.mediaManager.PreProcessEmoji(ctx, data, nil, emoji.Shortcode, emoji.ID, emoji.URI, ai, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("emojiUpdateModify: error processing emoji %s: %s", emoji.ID, err)
|
err = fmt.Errorf("emojiUpdateModify: error processing emoji %s: %s", emoji.ID, err)
|
||||||
return nil, gtserror.NewErrorInternalError(err)
|
return nil, gtserror.NewErrorInternalError(err)
|
||||||
|
|
|
@ -42,7 +42,7 @@ func (p *processor) Create(ctx context.Context, account *gtsmodel.Account, form
|
||||||
}
|
}
|
||||||
|
|
||||||
// process the media attachment and load it immediately
|
// process the media attachment and load it immediately
|
||||||
media, err := p.mediaManager.ProcessMedia(ctx, data, nil, account.ID, &media.AdditionalMediaInfo{
|
media, err := p.mediaManager.PreProcessMedia(ctx, data, nil, account.ID, &media.AdditionalMediaInfo{
|
||||||
Description: &form.Description,
|
Description: &form.Description,
|
||||||
FocusX: &focusX,
|
FocusX: &focusX,
|
||||||
FocusY: &focusY,
|
FocusY: &focusY,
|
||||||
|
|
|
@ -159,7 +159,7 @@ func (p *processor) getAttachmentContent(ctx context.Context, requestingAccount
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start recaching this media with the prepared data function.
|
// Start recaching this media with the prepared data function.
|
||||||
processingMedia, err := p.mediaManager.RecacheMedia(ctx, dataFn, nil, wantedMediaID)
|
processingMedia, err := p.mediaManager.PreProcessMediaRecache(ctx, dataFn, nil, wantedMediaID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("error recaching media: %s", err))
|
return nil, gtserror.NewErrorNotFound(fmt.Errorf("error recaching media: %s", err))
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,7 +117,7 @@ func (r *router) Start() {
|
||||||
// "debug" or "debugenv" build-tag is set pprof stats will be served
|
// "debug" or "debugenv" build-tag is set pprof stats will be served
|
||||||
// at the standard "/debug/pprof" URL.
|
// at the standard "/debug/pprof" URL.
|
||||||
r.srv.Handler = debug.WithPprof(r.srv.Handler)
|
r.srv.Handler = debug.WithPprof(r.srv.Handler)
|
||||||
if debug.DEBUG() {
|
if debug.DEBUG {
|
||||||
// Profiling requires timeouts longer than 30s, so reset these.
|
// Profiling requires timeouts longer than 30s, so reset these.
|
||||||
log.Warn("resetting http.Server{} timeout to support profiling")
|
log.Warn("resetting http.Server{} timeout to support profiling")
|
||||||
r.srv.ReadTimeout = 0
|
r.srv.ReadTimeout = 0
|
||||||
|
|
|
@ -21,6 +21,8 @@
|
||||||
import (
|
import (
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/cache"
|
"github.com/superseriousbusiness/gotosocial/internal/cache"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/workers"
|
||||||
)
|
)
|
||||||
|
|
||||||
// State provides a means of dependency injection and sharing of resources
|
// State provides a means of dependency injection and sharing of resources
|
||||||
|
@ -36,6 +38,12 @@ type State struct {
|
||||||
// DB provides access to the database.
|
// DB provides access to the database.
|
||||||
DB db.DB
|
DB db.DB
|
||||||
|
|
||||||
|
// Storage provides access to the storage driver.
|
||||||
|
Storage *storage.Driver
|
||||||
|
|
||||||
|
// Workers provides access to this state's collection of worker pools.
|
||||||
|
Workers workers.Workers
|
||||||
|
|
||||||
// prevent pass-by-value.
|
// prevent pass-by-value.
|
||||||
_ nocopy
|
_ nocopy
|
||||||
}
|
}
|
||||||
|
|
90
internal/workers/workers.go
Normal file
90
internal/workers/workers.go
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
/*
|
||||||
|
GoToSocial
|
||||||
|
Copyright (C) 2021-2023 GoToSocial Authors admin@gotosocial.org
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package workers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"codeberg.org/gruf/go-runners"
|
||||||
|
"codeberg.org/gruf/go-sched"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Workers struct {
|
||||||
|
// Main task scheduler instance.
|
||||||
|
Scheduler sched.Scheduler
|
||||||
|
|
||||||
|
// Processor / federator worker pools.
|
||||||
|
// ClientAPI runners.WorkerPool
|
||||||
|
// Federator runners.WorkerPool
|
||||||
|
|
||||||
|
// Media manager worker pools.
|
||||||
|
Media runners.WorkerPool
|
||||||
|
|
||||||
|
// prevent pass-by-value.
|
||||||
|
_ nocopy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start will start all of the contained worker pools (and global scheduler).
|
||||||
|
func (w *Workers) Start() {
|
||||||
|
// Get currently set GOMAXPROCS.
|
||||||
|
maxprocs := runtime.GOMAXPROCS(0)
|
||||||
|
|
||||||
|
tryUntil("starting scheduler", 5, func() bool {
|
||||||
|
return w.Scheduler.Start(nil)
|
||||||
|
})
|
||||||
|
|
||||||
|
// tryUntil("starting client API workerpool", 5, func() bool {
|
||||||
|
// return w.ClientAPI.Start(4*maxprocs, 400*maxprocs)
|
||||||
|
// })
|
||||||
|
|
||||||
|
// tryUntil("starting federator workerpool", 5, func() bool {
|
||||||
|
// return w.Federator.Start(4*maxprocs, 400*maxprocs)
|
||||||
|
// })
|
||||||
|
|
||||||
|
tryUntil("starting media workerpool", 5, func() bool {
|
||||||
|
return w.Media.Start(8*maxprocs, 80*maxprocs)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop will stop all of the contained worker pools (and global scheduler).
|
||||||
|
func (w *Workers) Stop() {
|
||||||
|
tryUntil("stopping scheduler", 5, w.Scheduler.Stop)
|
||||||
|
// tryUntil("stopping client API workerpool", 5, w.ClientAPI.Stop)
|
||||||
|
// tryUntil("stopping federator workerpool", 5, w.Federator.Stop)
|
||||||
|
tryUntil("stopping media workerpool", 5, w.Media.Stop)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nocopy when embedded will signal linter to
|
||||||
|
// error on pass-by-value of parent struct.
|
||||||
|
type nocopy struct{}
|
||||||
|
|
||||||
|
func (*nocopy) Lock() {}
|
||||||
|
|
||||||
|
func (*nocopy) Unlock() {}
|
||||||
|
|
||||||
|
// tryUntil will attempt to call 'do' for 'count' attempts, before panicking with 'msg'.
|
||||||
|
func tryUntil(msg string, count int, do func() bool) {
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
if do() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Panicf("failed %s after %d tries", msg, count)
|
||||||
|
}
|
|
@ -27,5 +27,5 @@
|
||||||
|
|
||||||
// NewTestFederatingDB returns a federating DB with the underlying db
|
// NewTestFederatingDB returns a federating DB with the underlying db
|
||||||
func NewTestFederatingDB(db db.DB, fedWorker *concurrency.WorkerPool[messages.FromFederator]) federatingdb.DB {
|
func NewTestFederatingDB(db db.DB, fedWorker *concurrency.WorkerPool[messages.FromFederator]) federatingdb.DB {
|
||||||
return federatingdb.New(db, fedWorker)
|
return federatingdb.New(db, fedWorker, NewTestTypeConverter(db))
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,14 +21,15 @@
|
||||||
import (
|
import (
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/state"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewTestMediaManager returns a media handler with the default test config, and the given db and storage.
|
// NewTestMediaManager returns a media handler with the default test config, and the given db and storage.
|
||||||
func NewTestMediaManager(db db.DB, storage *storage.Driver) media.Manager {
|
func NewTestMediaManager(db db.DB, storage *storage.Driver) media.Manager {
|
||||||
m, err := media.NewManager(db, storage)
|
var state state.State
|
||||||
if err != nil {
|
state.DB = db
|
||||||
panic(err)
|
state.Storage = storage
|
||||||
}
|
state.Workers.Start()
|
||||||
return m
|
return media.NewManager(&state)
|
||||||
}
|
}
|
||||||
|
|
29
vendor/codeberg.org/gruf/go-bitutil/abs.go
generated
vendored
Normal file
29
vendor/codeberg.org/gruf/go-bitutil/abs.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
package bitutil
|
||||||
|
|
||||||
|
// Abs8 returns the absolute value of i (calculated without branching).
|
||||||
|
func Abs8(i int8) int8 {
|
||||||
|
const bits = 8
|
||||||
|
u := uint64(i >> (bits - 1))
|
||||||
|
return (i ^ int8(u)) + int8(u&1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Abs16 returns the absolute value of i (calculated without branching).
|
||||||
|
func Abs16(i int16) int16 {
|
||||||
|
const bits = 16
|
||||||
|
u := uint64(i >> (bits - 1))
|
||||||
|
return (i ^ int16(u)) + int16(u&1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Abs32 returns the absolute value of i (calculated without branching).
|
||||||
|
func Abs32(i int32) int32 {
|
||||||
|
const bits = 32
|
||||||
|
u := uint64(i >> (bits - 1))
|
||||||
|
return (i ^ int32(u)) + int32(u&1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Abs64 returns the absolute value of i (calculated without branching).
|
||||||
|
func Abs64(i int64) int64 {
|
||||||
|
const bits = 64
|
||||||
|
u := uint64(i >> (bits - 1))
|
||||||
|
return (i ^ int64(u)) + int64(u&1)
|
||||||
|
}
|
1246
vendor/codeberg.org/gruf/go-bitutil/flag.go
generated
vendored
1246
vendor/codeberg.org/gruf/go-bitutil/flag.go
generated
vendored
File diff suppressed because it is too large
Load diff
57
vendor/codeberg.org/gruf/go-bitutil/flag.tpl
generated
vendored
57
vendor/codeberg.org/gruf/go-bitutil/flag.tpl
generated
vendored
|
@ -2,14 +2,13 @@ package bitutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
"unsafe"
|
||||||
"codeberg.org/gruf/go-byteutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
{{ range $idx, $size := . }}
|
{{ range $idx, $size := . }}
|
||||||
|
|
||||||
// Flags{{ $size.Size }} is a type-casted unsigned integer with helper
|
// Flags{{ $size.Size }} is a type-casted unsigned integer with helper
|
||||||
// methods for easily managing up to {{ $size.Size }} bit flags.
|
// methods for easily managing up to {{ $size.Size }} bit-flags.
|
||||||
type Flags{{ $size.Size }} uint{{ $size.Size }}
|
type Flags{{ $size.Size }} uint{{ $size.Size }}
|
||||||
|
|
||||||
// Get will fetch the flag bit value at index 'bit'.
|
// Get will fetch the flag bit value at index 'bit'.
|
||||||
|
@ -54,34 +53,58 @@ func (f Flags{{ $size.Size }}) Unset{{ $idx }}() Flags{{ $size.Size }} {
|
||||||
|
|
||||||
// String returns a human readable representation of Flags{{ $size.Size }}.
|
// String returns a human readable representation of Flags{{ $size.Size }}.
|
||||||
func (f Flags{{ $size.Size }}) String() string {
|
func (f Flags{{ $size.Size }}) String() string {
|
||||||
var val bool
|
var (
|
||||||
var buf byteutil.Buffer
|
i int
|
||||||
|
val bool
|
||||||
|
buf []byte
|
||||||
|
)
|
||||||
|
|
||||||
|
// Make a prealloc est. based on longest-possible value
|
||||||
|
const prealloc = 1+(len("false ")*{{ $size.Size }})-1+1
|
||||||
|
buf = make([]byte, prealloc)
|
||||||
|
|
||||||
|
buf[i] = '{'
|
||||||
|
i++
|
||||||
|
|
||||||
buf.WriteByte('{')
|
|
||||||
{{ range $idx := .Bits }}
|
{{ range $idx := .Bits }}
|
||||||
val = f.Get{{ $idx }}()
|
val = f.Get{{ $idx }}()
|
||||||
buf.WriteString(bool2str(val) + " ")
|
i += copy(buf[i:], bool2str(val))
|
||||||
|
buf[i] = ' '
|
||||||
|
i++
|
||||||
{{ end }}
|
{{ end }}
|
||||||
buf.Truncate(1)
|
|
||||||
buf.WriteByte('}')
|
|
||||||
|
|
||||||
return buf.String()
|
buf[i-1] = '}'
|
||||||
|
buf = buf[:i]
|
||||||
|
|
||||||
|
return *(*string)(unsafe.Pointer(&buf))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GoString returns a more verbose human readable representation of Flags{{ $size.Size }}.
|
// GoString returns a more verbose human readable representation of Flags{{ $size.Size }}.
|
||||||
func (f Flags{{ $size.Size }})GoString() string {
|
func (f Flags{{ $size.Size }})GoString() string {
|
||||||
var val bool
|
var (
|
||||||
var buf byteutil.Buffer
|
i int
|
||||||
|
val bool
|
||||||
|
buf []byte
|
||||||
|
)
|
||||||
|
|
||||||
|
// Make a prealloc est. based on longest-possible value
|
||||||
|
const prealloc = len("bitutil.Flags{{ $size.Size }}{")+(len("{{ sub $size.Size 1 }}=false ")*{{ $size.Size }})-1+1
|
||||||
|
buf = make([]byte, prealloc)
|
||||||
|
|
||||||
|
i += copy(buf[i:], "bitutil.Flags{{ $size.Size }}{")
|
||||||
|
|
||||||
buf.WriteString("bitutil.Flags{{ $size.Size }}{")
|
|
||||||
{{ range $idx := .Bits }}
|
{{ range $idx := .Bits }}
|
||||||
val = f.Get{{ $idx }}()
|
val = f.Get{{ $idx }}()
|
||||||
buf.WriteString("{{ $idx }}="+bool2str(val)+" ")
|
i += copy(buf[i:], "{{ $idx }}=")
|
||||||
|
i += copy(buf[i:], bool2str(val))
|
||||||
|
buf[i] = ' '
|
||||||
|
i++
|
||||||
{{ end }}
|
{{ end }}
|
||||||
buf.Truncate(1)
|
|
||||||
buf.WriteByte('}')
|
|
||||||
|
|
||||||
return buf.String()
|
buf[i-1] = '}'
|
||||||
|
buf = buf[:i]
|
||||||
|
|
||||||
|
return *(*string)(unsafe.Pointer(&buf))
|
||||||
}
|
}
|
||||||
|
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
|
7
vendor/codeberg.org/gruf/go-debug/debug.go
generated
vendored
7
vendor/codeberg.org/gruf/go-debug/debug.go
generated
vendored
|
@ -4,14 +4,9 @@
|
||||||
_debug "runtime/debug"
|
_debug "runtime/debug"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DEBUG returns whether debugging is enabled.
|
|
||||||
func DEBUG() bool {
|
|
||||||
return debug
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run will only call fn if DEBUG is enabled.
|
// Run will only call fn if DEBUG is enabled.
|
||||||
func Run(fn func()) {
|
func Run(fn func()) {
|
||||||
if debug {
|
if DEBUG {
|
||||||
fn()
|
fn()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
4
vendor/codeberg.org/gruf/go-debug/debug_env.go
generated
vendored
4
vendor/codeberg.org/gruf/go-debug/debug_env.go
generated
vendored
|
@ -5,5 +5,5 @@
|
||||||
|
|
||||||
import "os"
|
import "os"
|
||||||
|
|
||||||
// check if debug env variable is set
|
// DEBUG returns whether debugging is enabled.
|
||||||
var debug = (os.Getenv("DEBUG") != "")
|
var DEBUG = (os.Getenv("DEBUG") != "")
|
||||||
|
|
4
vendor/codeberg.org/gruf/go-debug/debug_off.go
generated
vendored
4
vendor/codeberg.org/gruf/go-debug/debug_off.go
generated
vendored
|
@ -3,5 +3,5 @@
|
||||||
|
|
||||||
package debug
|
package debug
|
||||||
|
|
||||||
// debug always off.
|
// DEBUG returns whether debugging is enabled.
|
||||||
const debug = false
|
const DEBUG = false
|
||||||
|
|
4
vendor/codeberg.org/gruf/go-debug/debug_on.go
generated
vendored
4
vendor/codeberg.org/gruf/go-debug/debug_on.go
generated
vendored
|
@ -3,5 +3,5 @@
|
||||||
|
|
||||||
package debug
|
package debug
|
||||||
|
|
||||||
// debug always on.
|
// DEBUG returns whether debugging is enabled.
|
||||||
const debug = true
|
const DEBUG = true
|
||||||
|
|
4
vendor/codeberg.org/gruf/go-debug/pprof_on.go
generated
vendored
4
vendor/codeberg.org/gruf/go-debug/pprof_on.go
generated
vendored
|
@ -11,7 +11,7 @@
|
||||||
|
|
||||||
// ServePprof will start an HTTP server serving /debug/pprof only if debug enabled.
|
// ServePprof will start an HTTP server serving /debug/pprof only if debug enabled.
|
||||||
func ServePprof(addr string) error {
|
func ServePprof(addr string) error {
|
||||||
if !debug {
|
if !DEBUG {
|
||||||
// debug disabled in env
|
// debug disabled in env
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ func ServePprof(addr string) error {
|
||||||
|
|
||||||
// WithPprof will add /debug/pprof handling (provided by "net/http/pprof") only if debug enabled.
|
// WithPprof will add /debug/pprof handling (provided by "net/http/pprof") only if debug enabled.
|
||||||
func WithPprof(handler http.Handler) http.Handler {
|
func WithPprof(handler http.Handler) http.Handler {
|
||||||
if !debug {
|
if !DEBUG {
|
||||||
// debug disabled in env
|
// debug disabled in env
|
||||||
return handler
|
return handler
|
||||||
}
|
}
|
||||||
|
|
28
vendor/codeberg.org/gruf/go-runners/pool.go
generated
vendored
28
vendor/codeberg.org/gruf/go-runners/pool.go
generated
vendored
|
@ -157,6 +157,34 @@ func (pool *WorkerPool) EnqueueCtx(ctx context.Context, fn WorkerFunc) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MustEnqueueCtx functionally performs similarly to WorkerPool.EnqueueCtx(), but in the case
|
||||||
|
// that the provided <-ctx.Done() is closed, it is passed asynchronously to WorkerPool.Enqueue().
|
||||||
|
// Return boolean indicates whether function was executed in time before <-ctx.Done() is closed.
|
||||||
|
func (pool *WorkerPool) MustEnqueueCtx(ctx context.Context, fn WorkerFunc) (ok bool) {
|
||||||
|
// Check valid fn
|
||||||
|
if fn == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// We failed to add this entry to the worker queue before the
|
||||||
|
// incoming context was cancelled. So to ensure processing
|
||||||
|
// we simply queue it asynchronously and return early to caller.
|
||||||
|
go pool.Enqueue(fn)
|
||||||
|
return false
|
||||||
|
|
||||||
|
case <-pool.svc.Done():
|
||||||
|
// Pool ctx cancelled
|
||||||
|
fn(closedctx)
|
||||||
|
return false
|
||||||
|
|
||||||
|
case pool.fns <- fn:
|
||||||
|
// Placed fn in queue
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// EnqueueNow attempts Enqueue but returns false if not executed.
|
// EnqueueNow attempts Enqueue but returns false if not executed.
|
||||||
func (pool *WorkerPool) EnqueueNow(fn WorkerFunc) bool {
|
func (pool *WorkerPool) EnqueueNow(fn WorkerFunc) bool {
|
||||||
// Check valid fn
|
// Check valid fn
|
||||||
|
|
75
vendor/codeberg.org/gruf/go-runners/process.go
generated
vendored
Normal file
75
vendor/codeberg.org/gruf/go-runners/process.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
package runners
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Processable defines a runnable process with error return
|
||||||
|
// that can be passed to a Processor instance for managed running.
|
||||||
|
type Processable func() error
|
||||||
|
|
||||||
|
// Processor acts similarly to a sync.Once object, except that it is reusable. After
|
||||||
|
// the first call to Process(), any further calls before this first has returned will
|
||||||
|
// block until the first call has returned, and return the same error. This ensures
|
||||||
|
// that only a single instance of it is ever running at any one time.
|
||||||
|
type Processor struct {
|
||||||
|
mutex sync.Mutex
|
||||||
|
state uint32
|
||||||
|
wait sync.WaitGroup
|
||||||
|
err *error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process will process the given function if first-call, else blocking until
|
||||||
|
// the first function has returned, returning the same error result.
|
||||||
|
func (p *Processor) Process(proc Processable) (err error) {
|
||||||
|
// Acquire state lock.
|
||||||
|
p.mutex.Lock()
|
||||||
|
|
||||||
|
if p.state != 0 {
|
||||||
|
// Already running.
|
||||||
|
//
|
||||||
|
// Get current err ptr.
|
||||||
|
errPtr := p.err
|
||||||
|
|
||||||
|
// Wait until finish.
|
||||||
|
p.mutex.Unlock()
|
||||||
|
p.wait.Wait()
|
||||||
|
return *errPtr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset error ptr.
|
||||||
|
p.err = new(error)
|
||||||
|
|
||||||
|
// Set started.
|
||||||
|
p.wait.Add(1)
|
||||||
|
p.state = 1
|
||||||
|
p.mutex.Unlock()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
if err != nil {
|
||||||
|
rOld := r // wrap the panic so we don't lose existing returned error
|
||||||
|
r = fmt.Errorf("panic occured after error %q: %v", err.Error(), rOld)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Catch any panics and wrap as error.
|
||||||
|
err = fmt.Errorf("caught panic: %v", r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store error.
|
||||||
|
*p.err = err
|
||||||
|
|
||||||
|
// Mark done.
|
||||||
|
p.wait.Done()
|
||||||
|
|
||||||
|
// Set stopped.
|
||||||
|
p.mutex.Lock()
|
||||||
|
p.state = 0
|
||||||
|
p.mutex.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Run process.
|
||||||
|
err = proc()
|
||||||
|
return
|
||||||
|
}
|
124
vendor/codeberg.org/gruf/go-runners/run.go
generated
vendored
124
vendor/codeberg.org/gruf/go-runners/run.go
generated
vendored
|
@ -1,124 +0,0 @@
|
||||||
package runners
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"codeberg.org/gruf/go-atomics"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FuncRunner provides a means of managing long-running functions e.g. main logic loops.
|
|
||||||
type FuncRunner struct {
|
|
||||||
// HandOff is the time after which a blocking function will be considered handed off
|
|
||||||
HandOff time.Duration
|
|
||||||
|
|
||||||
// ErrorHandler is the function that errors are passed to when encountered by the
|
|
||||||
// provided function. This can be used both for logging, and for error filtering
|
|
||||||
ErrorHandler func(err error) error
|
|
||||||
|
|
||||||
svc Service // underlying service to manage start/stop
|
|
||||||
err atomics.Error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Go will attempt to run 'fn' asynchronously. The provided context is used to propagate requested
|
|
||||||
// cancel if FuncRunner.Stop() is called. Any returned error will be passed to FuncRunner.ErrorHandler
|
|
||||||
// for filtering/logging/etc. Any blocking functions will be waited on for FuncRunner.HandOff amount of
|
|
||||||
// time before considering the function as handed off. Returned bool is success state, i.e. returns true
|
|
||||||
// if function is successfully handed off or returns within hand off time with nil error.
|
|
||||||
func (r *FuncRunner) Go(fn func(ctx context.Context) error) bool {
|
|
||||||
var has bool
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
var cancelled bool
|
|
||||||
|
|
||||||
has = r.svc.Run(func(ctx context.Context) {
|
|
||||||
// reset error
|
|
||||||
r.err.Store(nil)
|
|
||||||
|
|
||||||
// Run supplied func and set errror if returned
|
|
||||||
if err := Run(func() error { return fn(ctx) }); err != nil {
|
|
||||||
r.err.Store(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// signal done
|
|
||||||
close(done)
|
|
||||||
|
|
||||||
// Check if cancelled
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
cancelled = true
|
|
||||||
default:
|
|
||||||
cancelled = false
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
switch has {
|
|
||||||
// returned after starting
|
|
||||||
case true:
|
|
||||||
// Load set error
|
|
||||||
err := r.err.Load()
|
|
||||||
|
|
||||||
// filter out errors due FuncRunner.Stop() being called
|
|
||||||
if cancelled && errors.Is(err, context.Canceled) {
|
|
||||||
// filter out errors from FuncRunner.Stop() being called
|
|
||||||
r.err.Store(nil)
|
|
||||||
} else if err != nil && r.ErrorHandler != nil {
|
|
||||||
// pass any non-nil error to set handler
|
|
||||||
r.err.Store(r.ErrorHandler(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// already running
|
|
||||||
case false:
|
|
||||||
close(done)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// get valid handoff to use
|
|
||||||
handoff := r.HandOff
|
|
||||||
if handoff < 1 {
|
|
||||||
handoff = time.Second * 5
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
// handed off (long-run successful)
|
|
||||||
case <-time.After(handoff):
|
|
||||||
return true
|
|
||||||
|
|
||||||
// 'fn' returned, check error
|
|
||||||
case <-done:
|
|
||||||
return has
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop will cancel the context supplied to the running function.
|
|
||||||
func (r *FuncRunner) Stop() bool {
|
|
||||||
return r.svc.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Err returns the last-set error value.
|
|
||||||
func (r *FuncRunner) Err() error {
|
|
||||||
return r.err.Load()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run will execute the supplied 'fn' catching any panics. Returns either function-returned error or formatted panic.
|
|
||||||
func Run(fn func() error) (err error) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
if e, ok := r.(error); ok {
|
|
||||||
// wrap and preserve existing error
|
|
||||||
err = fmt.Errorf("caught panic: %w", e)
|
|
||||||
} else {
|
|
||||||
// simply create new error fromt iface
|
|
||||||
err = fmt.Errorf("caught panic: %v", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// run supplied func
|
|
||||||
err = fn()
|
|
||||||
return
|
|
||||||
}
|
|
52
vendor/codeberg.org/gruf/go-runners/service.go
generated
vendored
52
vendor/codeberg.org/gruf/go-runners/service.go
generated
vendored
|
@ -9,7 +9,7 @@
|
||||||
// changes and preventing multiple instances running. Also providing service state information.
|
// changes and preventing multiple instances running. Also providing service state information.
|
||||||
type Service struct {
|
type Service struct {
|
||||||
state uint32 // 0=stopped, 1=running, 2=stopping
|
state uint32 // 0=stopped, 1=running, 2=stopping
|
||||||
mutex sync.Mutex // mutext protects overall state changes
|
mutex sync.Mutex // mutex protects overall state changes
|
||||||
wait sync.Mutex // wait is used as a single-entity wait-group, only ever locked within 'mutex'
|
wait sync.Mutex // wait is used as a single-entity wait-group, only ever locked within 'mutex'
|
||||||
ctx chan struct{} // ctx is the current context for running function (or nil if not running)
|
ctx chan struct{} // ctx is the current context for running function (or nil if not running)
|
||||||
}
|
}
|
||||||
|
@ -62,6 +62,29 @@ func (svc *Service) GoRun(fn func(context.Context)) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RunWait is functionally the same as .Run(), but blocks until the first instance of .Run() returns.
|
||||||
|
func (svc *Service) RunWait(fn func(context.Context)) bool {
|
||||||
|
// Attempt to start the svc
|
||||||
|
ctx, ok := svc.doStart()
|
||||||
|
if !ok {
|
||||||
|
<-ctx // block
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
// unlock single wait
|
||||||
|
svc.wait.Unlock()
|
||||||
|
|
||||||
|
// ensure stopped
|
||||||
|
_ = svc.Stop()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Run with context.
|
||||||
|
fn(CancelCtx(ctx))
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Stop will attempt to stop the service, cancelling the running function's context. Immediately
|
// Stop will attempt to stop the service, cancelling the running function's context. Immediately
|
||||||
// returns false if not running, and true only after Service is fully stopped.
|
// returns false if not running, and true only after Service is fully stopped.
|
||||||
func (svc *Service) Stop() bool {
|
func (svc *Service) Stop() bool {
|
||||||
|
@ -108,28 +131,29 @@ func (svc *Service) doStart() (chan struct{}, bool) {
|
||||||
// Protect startup
|
// Protect startup
|
||||||
svc.mutex.Lock()
|
svc.mutex.Lock()
|
||||||
|
|
||||||
if svc.state != 0 /* not stopped */ {
|
|
||||||
svc.mutex.Unlock()
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// state started
|
|
||||||
svc.state = 1
|
|
||||||
|
|
||||||
if svc.ctx == nil {
|
if svc.ctx == nil {
|
||||||
// this will only have been allocated
|
// this will only have been allocated
|
||||||
// if svc.Done() was already called.
|
// if svc.Done() was already called.
|
||||||
svc.ctx = make(chan struct{})
|
svc.ctx = make(chan struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the waiter
|
// Take our own ptr
|
||||||
|
ctx := svc.ctx
|
||||||
|
|
||||||
|
if svc.state != 0 {
|
||||||
|
// State was not stopped.
|
||||||
|
svc.mutex.Unlock()
|
||||||
|
return ctx, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set started.
|
||||||
|
svc.state = 1
|
||||||
|
|
||||||
|
// Start waiter.
|
||||||
svc.wait.Lock()
|
svc.wait.Lock()
|
||||||
|
|
||||||
// Take our own ptr
|
// Unlock and return
|
||||||
// and unlock state
|
|
||||||
ctx := svc.ctx
|
|
||||||
svc.mutex.Unlock()
|
svc.mutex.Unlock()
|
||||||
|
|
||||||
return ctx, true
|
return ctx, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
63
vendor/codeberg.org/gruf/go-sched/scheduler.go
generated
vendored
63
vendor/codeberg.org/gruf/go-sched/scheduler.go
generated
vendored
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"runtime"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
@ -55,11 +54,6 @@ func (sch *Scheduler) Start(gorun func(func())) bool {
|
||||||
sch.rgo = func(f func()) { go f() }
|
sch.rgo = func(f func()) { go f() }
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set GC finalizer to ensure scheduler stopped
|
|
||||||
runtime.SetFinalizer(sch, func(sch *Scheduler) {
|
|
||||||
_ = sch.Stop()
|
|
||||||
})
|
|
||||||
|
|
||||||
// Unlock start routine
|
// Unlock start routine
|
||||||
block.Unlock()
|
block.Unlock()
|
||||||
|
|
||||||
|
@ -80,11 +74,16 @@ func (sch *Scheduler) Stop() bool {
|
||||||
return sch.svc.Stop()
|
return sch.svc.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Running will return whether Scheduler is running.
|
// Running will return whether Scheduler is running (i.e. NOT stopped / stopping).
|
||||||
func (sch *Scheduler) Running() bool {
|
func (sch *Scheduler) Running() bool {
|
||||||
return sch.svc.Running()
|
return sch.svc.Running()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Done returns a channel that's closed when Scheduler.Stop() is called.
|
||||||
|
func (sch *Scheduler) Done() <-chan struct{} {
|
||||||
|
return sch.svc.Done()
|
||||||
|
}
|
||||||
|
|
||||||
// Schedule will add provided Job to the Scheduler, returning a cancel function.
|
// Schedule will add provided Job to the Scheduler, returning a cancel function.
|
||||||
func (sch *Scheduler) Schedule(job *Job) (cancel func()) {
|
func (sch *Scheduler) Schedule(job *Job) (cancel func()) {
|
||||||
switch {
|
switch {
|
||||||
|
@ -127,20 +126,26 @@ func (sch *Scheduler) Schedule(job *Job) (cancel func()) {
|
||||||
// run is the main scheduler run routine, which runs for as long as ctx is valid.
|
// run is the main scheduler run routine, which runs for as long as ctx is valid.
|
||||||
func (sch *Scheduler) run(ctx context.Context) {
|
func (sch *Scheduler) run(ctx context.Context) {
|
||||||
var (
|
var (
|
||||||
|
// now stores the current time, and will only be
|
||||||
|
// set when the timer channel is set to be the
|
||||||
|
// 'alwaysticks' channel. this allows minimizing
|
||||||
|
// the number of calls required to time.Now().
|
||||||
|
now time.Time
|
||||||
|
|
||||||
// timerset represents whether timer was running
|
// timerset represents whether timer was running
|
||||||
// for a particular run of the loop. false means
|
// for a particular run of the loop. false means
|
||||||
// that tch == neverticks || tch == alwaysticks
|
// that tch == neverticks || tch == alwaysticks.
|
||||||
timerset bool
|
timerset bool
|
||||||
|
|
||||||
// timer tick channel (or a never-tick channel)
|
// timer tick channel (or always / never ticks).
|
||||||
tch <-chan time.Time
|
tch <-chan time.Time
|
||||||
|
|
||||||
// timer notifies this main routine to wake when
|
// timer notifies this main routine to wake when
|
||||||
// the job queued needs to be checked for executions
|
// the job queued needs to be checked for executions.
|
||||||
timer *time.Timer
|
timer *time.Timer
|
||||||
|
|
||||||
// stopdrain will stop and drain the timer
|
// stopdrain will stop and drain the timer
|
||||||
// if it has been running (i.e. timerset == true)
|
// if it has been running (i.e. timerset == true).
|
||||||
stopdrain = func() {
|
stopdrain = func() {
|
||||||
if timerset && !timer.Stop() {
|
if timerset && !timer.Stop() {
|
||||||
<-timer.C
|
<-timer.C
|
||||||
|
@ -148,33 +153,33 @@ func (sch *Scheduler) run(ctx context.Context) {
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Create a stopped timer
|
// Create a stopped timer.
|
||||||
timer = time.NewTimer(1)
|
timer = time.NewTimer(1)
|
||||||
<-timer.C
|
<-timer.C
|
||||||
|
|
||||||
for {
|
for {
|
||||||
// Reset timer state
|
// Reset timer state.
|
||||||
timerset = false
|
timerset = false
|
||||||
|
|
||||||
if len(sch.jobs) > 0 {
|
if len(sch.jobs) > 0 {
|
||||||
// Sort jobs by next occurring
|
// Get now time.
|
||||||
|
now = time.Now()
|
||||||
|
|
||||||
|
// Sort jobs by next occurring.
|
||||||
sort.Sort(byNext(sch.jobs))
|
sort.Sort(byNext(sch.jobs))
|
||||||
|
|
||||||
// Get execution time
|
// Get next job time.
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
// Get next job time
|
|
||||||
next := sch.jobs[0].Next()
|
next := sch.jobs[0].Next()
|
||||||
|
|
||||||
// If this job is _just_ about to be ready, we
|
// If this job is _just_ about to be ready, we don't bother
|
||||||
// don't bother sleeping. It's wasted cycles only
|
// sleeping. It's wasted cycles only sleeping for some obscenely
|
||||||
// sleeping for some obscenely tiny amount of time
|
// tiny amount of time we can't guarantee precision for.
|
||||||
// we can't guarantee precision for.
|
|
||||||
if until := next.Sub(now); until <= precision/1e3 {
|
if until := next.Sub(now); until <= precision/1e3 {
|
||||||
// This job is behind schedule, set to always tick.
|
// This job is behind,
|
||||||
|
// set to always tick.
|
||||||
tch = alwaysticks
|
tch = alwaysticks
|
||||||
} else {
|
} else {
|
||||||
// Reset timer to period
|
// Reset timer to period.
|
||||||
timer.Reset(until)
|
timer.Reset(until)
|
||||||
tch = timer.C
|
tch = timer.C
|
||||||
timerset = true
|
timerset = true
|
||||||
|
@ -191,12 +196,14 @@ func (sch *Scheduler) run(ctx context.Context) {
|
||||||
return
|
return
|
||||||
|
|
||||||
// Timer ticked, run scheduled
|
// Timer ticked, run scheduled
|
||||||
case now := <-tch:
|
case t := <-tch:
|
||||||
if !timerset {
|
if !timerset {
|
||||||
// alwaysticks returns zero times
|
// 'alwaysticks' returns zero
|
||||||
now = time.Now()
|
// times, BUT 'now' will have
|
||||||
|
// been set during above sort.
|
||||||
|
t = now
|
||||||
}
|
}
|
||||||
sch.schedule(now)
|
sch.schedule(t)
|
||||||
|
|
||||||
// Received update, handle job/id
|
// Received update, handle job/id
|
||||||
case v := <-sch.jch:
|
case v := <-sch.jch:
|
||||||
|
|
22
vendor/github.com/robfig/cron/v3/.gitignore
generated
vendored
22
vendor/github.com/robfig/cron/v3/.gitignore
generated
vendored
|
@ -1,22 +0,0 @@
|
||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
1
vendor/github.com/robfig/cron/v3/.travis.yml
generated
vendored
1
vendor/github.com/robfig/cron/v3/.travis.yml
generated
vendored
|
@ -1 +0,0 @@
|
||||||
language: go
|
|
21
vendor/github.com/robfig/cron/v3/LICENSE
generated
vendored
21
vendor/github.com/robfig/cron/v3/LICENSE
generated
vendored
|
@ -1,21 +0,0 @@
|
||||||
Copyright (C) 2012 Rob Figueiredo
|
|
||||||
All Rights Reserved.
|
|
||||||
|
|
||||||
MIT LICENSE
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
||||||
this software and associated documentation files (the "Software"), to deal in
|
|
||||||
the Software without restriction, including without limitation the rights to
|
|
||||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
|
||||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
|
||||||
subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
|
||||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
|
||||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
|
||||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
125
vendor/github.com/robfig/cron/v3/README.md
generated
vendored
125
vendor/github.com/robfig/cron/v3/README.md
generated
vendored
|
@ -1,125 +0,0 @@
|
||||||
[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron)
|
|
||||||
[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron)
|
|
||||||
|
|
||||||
# cron
|
|
||||||
|
|
||||||
Cron V3 has been released!
|
|
||||||
|
|
||||||
To download the specific tagged release, run:
|
|
||||||
|
|
||||||
go get github.com/robfig/cron/v3@v3.0.0
|
|
||||||
|
|
||||||
Import it in your program as:
|
|
||||||
|
|
||||||
import "github.com/robfig/cron/v3"
|
|
||||||
|
|
||||||
It requires Go 1.11 or later due to usage of Go Modules.
|
|
||||||
|
|
||||||
Refer to the documentation here:
|
|
||||||
http://godoc.org/github.com/robfig/cron
|
|
||||||
|
|
||||||
The rest of this document describes the the advances in v3 and a list of
|
|
||||||
breaking changes for users that wish to upgrade from an earlier version.
|
|
||||||
|
|
||||||
## Upgrading to v3 (June 2019)
|
|
||||||
|
|
||||||
cron v3 is a major upgrade to the library that addresses all outstanding bugs,
|
|
||||||
feature requests, and rough edges. It is based on a merge of master which
|
|
||||||
contains various fixes to issues found over the years and the v2 branch which
|
|
||||||
contains some backwards-incompatible features like the ability to remove cron
|
|
||||||
jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like
|
|
||||||
the timezone support, and fixes a number of bugs.
|
|
||||||
|
|
||||||
New features:
|
|
||||||
|
|
||||||
- Support for Go modules. Callers must now import this library as
|
|
||||||
`github.com/robfig/cron/v3`, instead of `gopkg.in/...`
|
|
||||||
|
|
||||||
- Fixed bugs:
|
|
||||||
- 0f01e6b parser: fix combining of Dow and Dom (#70)
|
|
||||||
- dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157)
|
|
||||||
- eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144)
|
|
||||||
- 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97)
|
|
||||||
- 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206)
|
|
||||||
|
|
||||||
- Standard cron spec parsing by default (first field is "minute"), with an easy
|
|
||||||
way to opt into the seconds field (quartz-compatible). Although, note that the
|
|
||||||
year field (optional in Quartz) is not supported.
|
|
||||||
|
|
||||||
- Extensible, key/value logging via an interface that complies with
|
|
||||||
the https://github.com/go-logr/logr project.
|
|
||||||
|
|
||||||
- The new Chain & JobWrapper types allow you to install "interceptors" to add
|
|
||||||
cross-cutting behavior like the following:
|
|
||||||
- Recover any panics from jobs
|
|
||||||
- Delay a job's execution if the previous run hasn't completed yet
|
|
||||||
- Skip a job's execution if the previous run hasn't completed yet
|
|
||||||
- Log each job's invocations
|
|
||||||
- Notification when jobs are completed
|
|
||||||
|
|
||||||
It is backwards incompatible with both v1 and v2. These updates are required:
|
|
||||||
|
|
||||||
- The v1 branch accepted an optional seconds field at the beginning of the cron
|
|
||||||
spec. This is non-standard and has led to a lot of confusion. The new default
|
|
||||||
parser conforms to the standard as described by [the Cron wikipedia page].
|
|
||||||
|
|
||||||
UPDATING: To retain the old behavior, construct your Cron with a custom
|
|
||||||
parser:
|
|
||||||
|
|
||||||
// Seconds field, required
|
|
||||||
cron.New(cron.WithSeconds())
|
|
||||||
|
|
||||||
// Seconds field, optional
|
|
||||||
cron.New(
|
|
||||||
cron.WithParser(
|
|
||||||
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))
|
|
||||||
|
|
||||||
- The Cron type now accepts functional options on construction rather than the
|
|
||||||
previous ad-hoc behavior modification mechanisms (setting a field, calling a setter).
|
|
||||||
|
|
||||||
UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be
|
|
||||||
updated to provide those values on construction.
|
|
||||||
|
|
||||||
- CRON_TZ is now the recommended way to specify the timezone of a single
|
|
||||||
schedule, which is sanctioned by the specification. The legacy "TZ=" prefix
|
|
||||||
will continue to be supported since it is unambiguous and easy to do so.
|
|
||||||
|
|
||||||
UPDATING: No update is required.
|
|
||||||
|
|
||||||
- By default, cron will no longer recover panics in jobs that it runs.
|
|
||||||
Recovering can be surprising (see issue #192) and seems to be at odds with
|
|
||||||
typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option
|
|
||||||
has been removed to accommodate the more general JobWrapper type.
|
|
||||||
|
|
||||||
UPDATING: To opt into panic recovery and configure the panic logger:
|
|
||||||
|
|
||||||
cron.New(cron.WithChain(
|
|
||||||
cron.Recover(logger), // or use cron.DefaultLogger
|
|
||||||
))
|
|
||||||
|
|
||||||
- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was
|
|
||||||
removed, since it is duplicative with the leveled logging.
|
|
||||||
|
|
||||||
UPDATING: Callers should use `WithLogger` and specify a logger that does not
|
|
||||||
discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`:
|
|
||||||
|
|
||||||
cron.New(
|
|
||||||
cron.WithLogger(cron.VerbosePrintfLogger(logger)))
|
|
||||||
|
|
||||||
|
|
||||||
### Background - Cron spec format
|
|
||||||
|
|
||||||
There are two cron spec formats in common usage:
|
|
||||||
|
|
||||||
- The "standard" cron format, described on [the Cron wikipedia page] and used by
|
|
||||||
the cron Linux system utility.
|
|
||||||
|
|
||||||
- The cron format used by [the Quartz Scheduler], commonly used for scheduled
|
|
||||||
jobs in Java software
|
|
||||||
|
|
||||||
[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron
|
|
||||||
[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html
|
|
||||||
|
|
||||||
The original version of this package included an optional "seconds" field, which
|
|
||||||
made it incompatible with both of these formats. Now, the "standard" format is
|
|
||||||
the default format accepted, and the Quartz format is opt-in.
|
|
92
vendor/github.com/robfig/cron/v3/chain.go
generated
vendored
92
vendor/github.com/robfig/cron/v3/chain.go
generated
vendored
|
@ -1,92 +0,0 @@
|
||||||
package cron
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// JobWrapper decorates the given Job with some behavior.
|
|
||||||
type JobWrapper func(Job) Job
|
|
||||||
|
|
||||||
// Chain is a sequence of JobWrappers that decorates submitted jobs with
|
|
||||||
// cross-cutting behaviors like logging or synchronization.
|
|
||||||
type Chain struct {
|
|
||||||
wrappers []JobWrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewChain returns a Chain consisting of the given JobWrappers.
|
|
||||||
func NewChain(c ...JobWrapper) Chain {
|
|
||||||
return Chain{c}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then decorates the given job with all JobWrappers in the chain.
|
|
||||||
//
|
|
||||||
// This:
|
|
||||||
// NewChain(m1, m2, m3).Then(job)
|
|
||||||
// is equivalent to:
|
|
||||||
// m1(m2(m3(job)))
|
|
||||||
func (c Chain) Then(j Job) Job {
|
|
||||||
for i := range c.wrappers {
|
|
||||||
j = c.wrappers[len(c.wrappers)-i-1](j)
|
|
||||||
}
|
|
||||||
return j
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recover panics in wrapped jobs and log them with the provided logger.
|
|
||||||
func Recover(logger Logger) JobWrapper {
|
|
||||||
return func(j Job) Job {
|
|
||||||
return FuncJob(func() {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
const size = 64 << 10
|
|
||||||
buf := make([]byte, size)
|
|
||||||
buf = buf[:runtime.Stack(buf, false)]
|
|
||||||
err, ok := r.(error)
|
|
||||||
if !ok {
|
|
||||||
err = fmt.Errorf("%v", r)
|
|
||||||
}
|
|
||||||
logger.Error(err, "panic", "stack", "...\n"+string(buf))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
j.Run()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DelayIfStillRunning serializes jobs, delaying subsequent runs until the
|
|
||||||
// previous one is complete. Jobs running after a delay of more than a minute
|
|
||||||
// have the delay logged at Info.
|
|
||||||
func DelayIfStillRunning(logger Logger) JobWrapper {
|
|
||||||
return func(j Job) Job {
|
|
||||||
var mu sync.Mutex
|
|
||||||
return FuncJob(func() {
|
|
||||||
start := time.Now()
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
if dur := time.Since(start); dur > time.Minute {
|
|
||||||
logger.Info("delay", "duration", dur)
|
|
||||||
}
|
|
||||||
j.Run()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SkipIfStillRunning skips an invocation of the Job if a previous invocation is
|
|
||||||
// still running. It logs skips to the given logger at Info level.
|
|
||||||
func SkipIfStillRunning(logger Logger) JobWrapper {
|
|
||||||
return func(j Job) Job {
|
|
||||||
var ch = make(chan struct{}, 1)
|
|
||||||
ch <- struct{}{}
|
|
||||||
return FuncJob(func() {
|
|
||||||
select {
|
|
||||||
case v := <-ch:
|
|
||||||
j.Run()
|
|
||||||
ch <- v
|
|
||||||
default:
|
|
||||||
logger.Info("skip")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
27
vendor/github.com/robfig/cron/v3/constantdelay.go
generated
vendored
27
vendor/github.com/robfig/cron/v3/constantdelay.go
generated
vendored
|
@ -1,27 +0,0 @@
|
||||||
package cron
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
|
|
||||||
// It does not support jobs more frequent than once a second.
|
|
||||||
type ConstantDelaySchedule struct {
|
|
||||||
Delay time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// Every returns a crontab Schedule that activates once every duration.
|
|
||||||
// Delays of less than a second are not supported (will round up to 1 second).
|
|
||||||
// Any fields less than a Second are truncated.
|
|
||||||
func Every(duration time.Duration) ConstantDelaySchedule {
|
|
||||||
if duration < time.Second {
|
|
||||||
duration = time.Second
|
|
||||||
}
|
|
||||||
return ConstantDelaySchedule{
|
|
||||||
Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next time this should be run.
|
|
||||||
// This rounds so that the next activation time will be on the second.
|
|
||||||
func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
|
|
||||||
return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
|
|
||||||
}
|
|
355
vendor/github.com/robfig/cron/v3/cron.go
generated
vendored
355
vendor/github.com/robfig/cron/v3/cron.go
generated
vendored
|
@ -1,355 +0,0 @@
|
||||||
package cron
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cron keeps track of any number of entries, invoking the associated func as
|
|
||||||
// specified by the schedule. It may be started, stopped, and the entries may
|
|
||||||
// be inspected while running.
|
|
||||||
type Cron struct {
|
|
||||||
entries []*Entry
|
|
||||||
chain Chain
|
|
||||||
stop chan struct{}
|
|
||||||
add chan *Entry
|
|
||||||
remove chan EntryID
|
|
||||||
snapshot chan chan []Entry
|
|
||||||
running bool
|
|
||||||
logger Logger
|
|
||||||
runningMu sync.Mutex
|
|
||||||
location *time.Location
|
|
||||||
parser ScheduleParser
|
|
||||||
nextID EntryID
|
|
||||||
jobWaiter sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScheduleParser is an interface for schedule spec parsers that return a Schedule
|
|
||||||
type ScheduleParser interface {
|
|
||||||
Parse(spec string) (Schedule, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Job is an interface for submitted cron jobs.
|
|
||||||
type Job interface {
|
|
||||||
Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schedule describes a job's duty cycle.
|
|
||||||
type Schedule interface {
|
|
||||||
// Next returns the next activation time, later than the given time.
|
|
||||||
// Next is invoked initially, and then each time the job is run.
|
|
||||||
Next(time.Time) time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// EntryID identifies an entry within a Cron instance
|
|
||||||
type EntryID int
|
|
||||||
|
|
||||||
// Entry consists of a schedule and the func to execute on that schedule.
|
|
||||||
type Entry struct {
|
|
||||||
// ID is the cron-assigned ID of this entry, which may be used to look up a
|
|
||||||
// snapshot or remove it.
|
|
||||||
ID EntryID
|
|
||||||
|
|
||||||
// Schedule on which this job should be run.
|
|
||||||
Schedule Schedule
|
|
||||||
|
|
||||||
// Next time the job will run, or the zero time if Cron has not been
|
|
||||||
// started or this entry's schedule is unsatisfiable
|
|
||||||
Next time.Time
|
|
||||||
|
|
||||||
// Prev is the last time this job was run, or the zero time if never.
|
|
||||||
Prev time.Time
|
|
||||||
|
|
||||||
// WrappedJob is the thing to run when the Schedule is activated.
|
|
||||||
WrappedJob Job
|
|
||||||
|
|
||||||
// Job is the thing that was submitted to cron.
|
|
||||||
// It is kept around so that user code that needs to get at the job later,
|
|
||||||
// e.g. via Entries() can do so.
|
|
||||||
Job Job
|
|
||||||
}
|
|
||||||
|
|
||||||
// Valid returns true if this is not the zero entry.
|
|
||||||
func (e Entry) Valid() bool { return e.ID != 0 }
|
|
||||||
|
|
||||||
// byTime is a wrapper for sorting the entry array by time
|
|
||||||
// (with zero time at the end).
|
|
||||||
type byTime []*Entry
|
|
||||||
|
|
||||||
func (s byTime) Len() int { return len(s) }
|
|
||||||
func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
||||||
func (s byTime) Less(i, j int) bool {
|
|
||||||
// Two zero times should return false.
|
|
||||||
// Otherwise, zero is "greater" than any other time.
|
|
||||||
// (To sort it at the end of the list.)
|
|
||||||
if s[i].Next.IsZero() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if s[j].Next.IsZero() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return s[i].Next.Before(s[j].Next)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new Cron job runner, modified by the given options.
|
|
||||||
//
|
|
||||||
// Available Settings
|
|
||||||
//
|
|
||||||
// Time Zone
|
|
||||||
// Description: The time zone in which schedules are interpreted
|
|
||||||
// Default: time.Local
|
|
||||||
//
|
|
||||||
// Parser
|
|
||||||
// Description: Parser converts cron spec strings into cron.Schedules.
|
|
||||||
// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron
|
|
||||||
//
|
|
||||||
// Chain
|
|
||||||
// Description: Wrap submitted jobs to customize behavior.
|
|
||||||
// Default: A chain that recovers panics and logs them to stderr.
|
|
||||||
//
|
|
||||||
// See "cron.With*" to modify the default behavior.
|
|
||||||
func New(opts ...Option) *Cron {
|
|
||||||
c := &Cron{
|
|
||||||
entries: nil,
|
|
||||||
chain: NewChain(),
|
|
||||||
add: make(chan *Entry),
|
|
||||||
stop: make(chan struct{}),
|
|
||||||
snapshot: make(chan chan []Entry),
|
|
||||||
remove: make(chan EntryID),
|
|
||||||
running: false,
|
|
||||||
runningMu: sync.Mutex{},
|
|
||||||
logger: DefaultLogger,
|
|
||||||
location: time.Local,
|
|
||||||
parser: standardParser,
|
|
||||||
}
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(c)
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// FuncJob is a wrapper that turns a func() into a cron.Job
|
|
||||||
type FuncJob func()
|
|
||||||
|
|
||||||
func (f FuncJob) Run() { f() }
|
|
||||||
|
|
||||||
// AddFunc adds a func to the Cron to be run on the given schedule.
|
|
||||||
// The spec is parsed using the time zone of this Cron instance as the default.
|
|
||||||
// An opaque ID is returned that can be used to later remove it.
|
|
||||||
func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) {
|
|
||||||
return c.AddJob(spec, FuncJob(cmd))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddJob adds a Job to the Cron to be run on the given schedule.
|
|
||||||
// The spec is parsed using the time zone of this Cron instance as the default.
|
|
||||||
// An opaque ID is returned that can be used to later remove it.
|
|
||||||
func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) {
|
|
||||||
schedule, err := c.parser.Parse(spec)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return c.Schedule(schedule, cmd), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schedule adds a Job to the Cron to be run on the given schedule.
|
|
||||||
// The job is wrapped with the configured Chain.
|
|
||||||
func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID {
|
|
||||||
c.runningMu.Lock()
|
|
||||||
defer c.runningMu.Unlock()
|
|
||||||
c.nextID++
|
|
||||||
entry := &Entry{
|
|
||||||
ID: c.nextID,
|
|
||||||
Schedule: schedule,
|
|
||||||
WrappedJob: c.chain.Then(cmd),
|
|
||||||
Job: cmd,
|
|
||||||
}
|
|
||||||
if !c.running {
|
|
||||||
c.entries = append(c.entries, entry)
|
|
||||||
} else {
|
|
||||||
c.add <- entry
|
|
||||||
}
|
|
||||||
return entry.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
// Entries returns a snapshot of the cron entries.
|
|
||||||
func (c *Cron) Entries() []Entry {
|
|
||||||
c.runningMu.Lock()
|
|
||||||
defer c.runningMu.Unlock()
|
|
||||||
if c.running {
|
|
||||||
replyChan := make(chan []Entry, 1)
|
|
||||||
c.snapshot <- replyChan
|
|
||||||
return <-replyChan
|
|
||||||
}
|
|
||||||
return c.entrySnapshot()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Location gets the time zone location
|
|
||||||
func (c *Cron) Location() *time.Location {
|
|
||||||
return c.location
|
|
||||||
}
|
|
||||||
|
|
||||||
// Entry returns a snapshot of the given entry, or nil if it couldn't be found.
|
|
||||||
func (c *Cron) Entry(id EntryID) Entry {
|
|
||||||
for _, entry := range c.Entries() {
|
|
||||||
if id == entry.ID {
|
|
||||||
return entry
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Entry{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an entry from being run in the future.
|
|
||||||
func (c *Cron) Remove(id EntryID) {
|
|
||||||
c.runningMu.Lock()
|
|
||||||
defer c.runningMu.Unlock()
|
|
||||||
if c.running {
|
|
||||||
c.remove <- id
|
|
||||||
} else {
|
|
||||||
c.removeEntry(id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start the cron scheduler in its own goroutine, or no-op if already started.
|
|
||||||
func (c *Cron) Start() {
|
|
||||||
c.runningMu.Lock()
|
|
||||||
defer c.runningMu.Unlock()
|
|
||||||
if c.running {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.running = true
|
|
||||||
go c.run()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the cron scheduler, or no-op if already running.
|
|
||||||
func (c *Cron) Run() {
|
|
||||||
c.runningMu.Lock()
|
|
||||||
if c.running {
|
|
||||||
c.runningMu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.running = true
|
|
||||||
c.runningMu.Unlock()
|
|
||||||
c.run()
|
|
||||||
}
|
|
||||||
|
|
||||||
// run the scheduler.. this is private just due to the need to synchronize
|
|
||||||
// access to the 'running' state variable.
|
|
||||||
func (c *Cron) run() {
|
|
||||||
c.logger.Info("start")
|
|
||||||
|
|
||||||
// Figure out the next activation times for each entry.
|
|
||||||
now := c.now()
|
|
||||||
for _, entry := range c.entries {
|
|
||||||
entry.Next = entry.Schedule.Next(now)
|
|
||||||
c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next)
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Determine the next entry to run.
|
|
||||||
sort.Sort(byTime(c.entries))
|
|
||||||
|
|
||||||
var timer *time.Timer
|
|
||||||
if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
|
|
||||||
// If there are no entries yet, just sleep - it still handles new entries
|
|
||||||
// and stop requests.
|
|
||||||
timer = time.NewTimer(100000 * time.Hour)
|
|
||||||
} else {
|
|
||||||
timer = time.NewTimer(c.entries[0].Next.Sub(now))
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case now = <-timer.C:
|
|
||||||
now = now.In(c.location)
|
|
||||||
c.logger.Info("wake", "now", now)
|
|
||||||
|
|
||||||
// Run every entry whose next time was less than now
|
|
||||||
for _, e := range c.entries {
|
|
||||||
if e.Next.After(now) || e.Next.IsZero() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c.startJob(e.WrappedJob)
|
|
||||||
e.Prev = e.Next
|
|
||||||
e.Next = e.Schedule.Next(now)
|
|
||||||
c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next)
|
|
||||||
}
|
|
||||||
|
|
||||||
case newEntry := <-c.add:
|
|
||||||
timer.Stop()
|
|
||||||
now = c.now()
|
|
||||||
newEntry.Next = newEntry.Schedule.Next(now)
|
|
||||||
c.entries = append(c.entries, newEntry)
|
|
||||||
c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next)
|
|
||||||
|
|
||||||
case replyChan := <-c.snapshot:
|
|
||||||
replyChan <- c.entrySnapshot()
|
|
||||||
continue
|
|
||||||
|
|
||||||
case <-c.stop:
|
|
||||||
timer.Stop()
|
|
||||||
c.logger.Info("stop")
|
|
||||||
return
|
|
||||||
|
|
||||||
case id := <-c.remove:
|
|
||||||
timer.Stop()
|
|
||||||
now = c.now()
|
|
||||||
c.removeEntry(id)
|
|
||||||
c.logger.Info("removed", "entry", id)
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// startJob runs the given job in a new goroutine.
|
|
||||||
func (c *Cron) startJob(j Job) {
|
|
||||||
c.jobWaiter.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer c.jobWaiter.Done()
|
|
||||||
j.Run()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// now returns current time in c location
|
|
||||||
func (c *Cron) now() time.Time {
|
|
||||||
return time.Now().In(c.location)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop stops the cron scheduler if it is running; otherwise it does nothing.
|
|
||||||
// A context is returned so the caller can wait for running jobs to complete.
|
|
||||||
func (c *Cron) Stop() context.Context {
|
|
||||||
c.runningMu.Lock()
|
|
||||||
defer c.runningMu.Unlock()
|
|
||||||
if c.running {
|
|
||||||
c.stop <- struct{}{}
|
|
||||||
c.running = false
|
|
||||||
}
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
go func() {
|
|
||||||
c.jobWaiter.Wait()
|
|
||||||
cancel()
|
|
||||||
}()
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
// entrySnapshot returns a copy of the current cron entry list.
|
|
||||||
func (c *Cron) entrySnapshot() []Entry {
|
|
||||||
var entries = make([]Entry, len(c.entries))
|
|
||||||
for i, e := range c.entries {
|
|
||||||
entries[i] = *e
|
|
||||||
}
|
|
||||||
return entries
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cron) removeEntry(id EntryID) {
|
|
||||||
var entries []*Entry
|
|
||||||
for _, e := range c.entries {
|
|
||||||
if e.ID != id {
|
|
||||||
entries = append(entries, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.entries = entries
|
|
||||||
}
|
|
231
vendor/github.com/robfig/cron/v3/doc.go
generated
vendored
231
vendor/github.com/robfig/cron/v3/doc.go
generated
vendored
|
@ -1,231 +0,0 @@
|
||||||
/*
|
|
||||||
Package cron implements a cron spec parser and job runner.
|
|
||||||
|
|
||||||
Installation
|
|
||||||
|
|
||||||
To download the specific tagged release, run:
|
|
||||||
|
|
||||||
go get github.com/robfig/cron/v3@v3.0.0
|
|
||||||
|
|
||||||
Import it in your program as:
|
|
||||||
|
|
||||||
import "github.com/robfig/cron/v3"
|
|
||||||
|
|
||||||
It requires Go 1.11 or later due to usage of Go Modules.
|
|
||||||
|
|
||||||
Usage
|
|
||||||
|
|
||||||
Callers may register Funcs to be invoked on a given schedule. Cron will run
|
|
||||||
them in their own goroutines.
|
|
||||||
|
|
||||||
c := cron.New()
|
|
||||||
c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") })
|
|
||||||
c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") })
|
|
||||||
c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") })
|
|
||||||
c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") })
|
|
||||||
c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") })
|
|
||||||
c.Start()
|
|
||||||
..
|
|
||||||
// Funcs are invoked in their own goroutine, asynchronously.
|
|
||||||
...
|
|
||||||
// Funcs may also be added to a running Cron
|
|
||||||
c.AddFunc("@daily", func() { fmt.Println("Every day") })
|
|
||||||
..
|
|
||||||
// Inspect the cron job entries' next and previous run times.
|
|
||||||
inspect(c.Entries())
|
|
||||||
..
|
|
||||||
c.Stop() // Stop the scheduler (does not stop any jobs already running).
|
|
||||||
|
|
||||||
CRON Expression Format
|
|
||||||
|
|
||||||
A cron expression represents a set of times, using 5 space-separated fields.
|
|
||||||
|
|
||||||
Field name | Mandatory? | Allowed values | Allowed special characters
|
|
||||||
---------- | ---------- | -------------- | --------------------------
|
|
||||||
Minutes | Yes | 0-59 | * / , -
|
|
||||||
Hours | Yes | 0-23 | * / , -
|
|
||||||
Day of month | Yes | 1-31 | * / , - ?
|
|
||||||
Month | Yes | 1-12 or JAN-DEC | * / , -
|
|
||||||
Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
|
|
||||||
|
|
||||||
Month and Day-of-week field values are case insensitive. "SUN", "Sun", and
|
|
||||||
"sun" are equally accepted.
|
|
||||||
|
|
||||||
The specific interpretation of the format is based on the Cron Wikipedia page:
|
|
||||||
https://en.wikipedia.org/wiki/Cron
|
|
||||||
|
|
||||||
Alternative Formats
|
|
||||||
|
|
||||||
Alternative Cron expression formats support other fields like seconds. You can
|
|
||||||
implement that by creating a custom Parser as follows.
|
|
||||||
|
|
||||||
cron.New(
|
|
||||||
cron.WithParser(
|
|
||||||
cron.NewParser(
|
|
||||||
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)))
|
|
||||||
|
|
||||||
Since adding Seconds is the most common modification to the standard cron spec,
|
|
||||||
cron provides a builtin function to do that, which is equivalent to the custom
|
|
||||||
parser you saw earlier, except that its seconds field is REQUIRED:
|
|
||||||
|
|
||||||
cron.New(cron.WithSeconds())
|
|
||||||
|
|
||||||
That emulates Quartz, the most popular alternative Cron schedule format:
|
|
||||||
http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html
|
|
||||||
|
|
||||||
Special Characters
|
|
||||||
|
|
||||||
Asterisk ( * )
|
|
||||||
|
|
||||||
The asterisk indicates that the cron expression will match for all values of the
|
|
||||||
field; e.g., using an asterisk in the 5th field (month) would indicate every
|
|
||||||
month.
|
|
||||||
|
|
||||||
Slash ( / )
|
|
||||||
|
|
||||||
Slashes are used to describe increments of ranges. For example 3-59/15 in the
|
|
||||||
1st field (minutes) would indicate the 3rd minute of the hour and every 15
|
|
||||||
minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
|
|
||||||
that is, an increment over the largest possible range of the field. The form
|
|
||||||
"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
|
|
||||||
increment until the end of that specific range. It does not wrap around.
|
|
||||||
|
|
||||||
Comma ( , )
|
|
||||||
|
|
||||||
Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
|
|
||||||
the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
|
|
||||||
|
|
||||||
Hyphen ( - )
|
|
||||||
|
|
||||||
Hyphens are used to define ranges. For example, 9-17 would indicate every
|
|
||||||
hour between 9am and 5pm inclusive.
|
|
||||||
|
|
||||||
Question mark ( ? )
|
|
||||||
|
|
||||||
Question mark may be used instead of '*' for leaving either day-of-month or
|
|
||||||
day-of-week blank.
|
|
||||||
|
|
||||||
Predefined schedules
|
|
||||||
|
|
||||||
You may use one of several pre-defined schedules in place of a cron expression.
|
|
||||||
|
|
||||||
Entry | Description | Equivalent To
|
|
||||||
----- | ----------- | -------------
|
|
||||||
@yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 *
|
|
||||||
@monthly | Run once a month, midnight, first of month | 0 0 1 * *
|
|
||||||
@weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0
|
|
||||||
@daily (or @midnight) | Run once a day, midnight | 0 0 * * *
|
|
||||||
@hourly | Run once an hour, beginning of hour | 0 * * * *
|
|
||||||
|
|
||||||
Intervals
|
|
||||||
|
|
||||||
You may also schedule a job to execute at fixed intervals, starting at the time it's added
|
|
||||||
or cron is run. This is supported by formatting the cron spec like this:
|
|
||||||
|
|
||||||
@every <duration>
|
|
||||||
|
|
||||||
where "duration" is a string accepted by time.ParseDuration
|
|
||||||
(http://golang.org/pkg/time/#ParseDuration).
|
|
||||||
|
|
||||||
For example, "@every 1h30m10s" would indicate a schedule that activates after
|
|
||||||
1 hour, 30 minutes, 10 seconds, and then every interval after that.
|
|
||||||
|
|
||||||
Note: The interval does not take the job runtime into account. For example,
|
|
||||||
if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
|
|
||||||
it will have only 2 minutes of idle time between each run.
|
|
||||||
|
|
||||||
Time zones
|
|
||||||
|
|
||||||
By default, all interpretation and scheduling is done in the machine's local
|
|
||||||
time zone (time.Local). You can specify a different time zone on construction:
|
|
||||||
|
|
||||||
cron.New(
|
|
||||||
cron.WithLocation(time.UTC))
|
|
||||||
|
|
||||||
Individual cron schedules may also override the time zone they are to be
|
|
||||||
interpreted in by providing an additional space-separated field at the beginning
|
|
||||||
of the cron spec, of the form "CRON_TZ=Asia/Tokyo".
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
# Runs at 6am in time.Local
|
|
||||||
cron.New().AddFunc("0 6 * * ?", ...)
|
|
||||||
|
|
||||||
# Runs at 6am in America/New_York
|
|
||||||
nyc, _ := time.LoadLocation("America/New_York")
|
|
||||||
c := cron.New(cron.WithLocation(nyc))
|
|
||||||
c.AddFunc("0 6 * * ?", ...)
|
|
||||||
|
|
||||||
# Runs at 6am in Asia/Tokyo
|
|
||||||
cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
|
|
||||||
|
|
||||||
# Runs at 6am in Asia/Tokyo
|
|
||||||
c := cron.New(cron.WithLocation(nyc))
|
|
||||||
c.SetLocation("America/New_York")
|
|
||||||
c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
|
|
||||||
|
|
||||||
The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility.
|
|
||||||
|
|
||||||
Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
|
|
||||||
not be run!
|
|
||||||
|
|
||||||
Job Wrappers
|
|
||||||
|
|
||||||
A Cron runner may be configured with a chain of job wrappers to add
|
|
||||||
cross-cutting functionality to all submitted jobs. For example, they may be used
|
|
||||||
to achieve the following effects:
|
|
||||||
|
|
||||||
- Recover any panics from jobs (activated by default)
|
|
||||||
- Delay a job's execution if the previous run hasn't completed yet
|
|
||||||
- Skip a job's execution if the previous run hasn't completed yet
|
|
||||||
- Log each job's invocations
|
|
||||||
|
|
||||||
Install wrappers for all jobs added to a cron using the `cron.WithChain` option:
|
|
||||||
|
|
||||||
cron.New(cron.WithChain(
|
|
||||||
cron.SkipIfStillRunning(logger),
|
|
||||||
))
|
|
||||||
|
|
||||||
Install wrappers for individual jobs by explicitly wrapping them:
|
|
||||||
|
|
||||||
job = cron.NewChain(
|
|
||||||
cron.SkipIfStillRunning(logger),
|
|
||||||
).Then(job)
|
|
||||||
|
|
||||||
Thread safety
|
|
||||||
|
|
||||||
Since the Cron service runs concurrently with the calling code, some amount of
|
|
||||||
care must be taken to ensure proper synchronization.
|
|
||||||
|
|
||||||
All cron methods are designed to be correctly synchronized as long as the caller
|
|
||||||
ensures that invocations have a clear happens-before ordering between them.
|
|
||||||
|
|
||||||
Logging
|
|
||||||
|
|
||||||
Cron defines a Logger interface that is a subset of the one defined in
|
|
||||||
github.com/go-logr/logr. It has two logging levels (Info and Error), and
|
|
||||||
parameters are key/value pairs. This makes it possible for cron logging to plug
|
|
||||||
into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided
|
|
||||||
to wrap the standard library *log.Logger.
|
|
||||||
|
|
||||||
For additional insight into Cron operations, verbose logging may be activated
|
|
||||||
which will record job runs, scheduling decisions, and added or removed jobs.
|
|
||||||
Activate it with a one-off logger as follows:
|
|
||||||
|
|
||||||
cron.New(
|
|
||||||
cron.WithLogger(
|
|
||||||
cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))))
|
|
||||||
|
|
||||||
|
|
||||||
Implementation
|
|
||||||
|
|
||||||
Cron entries are stored in an array, sorted by their next activation time. Cron
|
|
||||||
sleeps until the next job is due to be run.
|
|
||||||
|
|
||||||
Upon waking:
|
|
||||||
- it runs each entry that is active on that second
|
|
||||||
- it calculates the next run times for the jobs that were run
|
|
||||||
- it re-sorts the array of entries by next activation time.
|
|
||||||
- it goes to sleep until the soonest job.
|
|
||||||
*/
|
|
||||||
package cron
|
|
86
vendor/github.com/robfig/cron/v3/logger.go
generated
vendored
86
vendor/github.com/robfig/cron/v3/logger.go
generated
vendored
|
@ -1,86 +0,0 @@
|
||||||
package cron
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultLogger is used by Cron if none is specified.
|
|
||||||
var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))
|
|
||||||
|
|
||||||
// DiscardLogger can be used by callers to discard all log messages.
|
|
||||||
var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0))
|
|
||||||
|
|
||||||
// Logger is the interface used in this package for logging, so that any backend
|
|
||||||
// can be plugged in. It is a subset of the github.com/go-logr/logr interface.
|
|
||||||
type Logger interface {
|
|
||||||
// Info logs routine messages about cron's operation.
|
|
||||||
Info(msg string, keysAndValues ...interface{})
|
|
||||||
// Error logs an error condition.
|
|
||||||
Error(err error, msg string, keysAndValues ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrintfLogger wraps a Printf-based logger (such as the standard library "log")
|
|
||||||
// into an implementation of the Logger interface which logs errors only.
|
|
||||||
func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
|
|
||||||
return printfLogger{l, false}
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library
|
|
||||||
// "log") into an implementation of the Logger interface which logs everything.
|
|
||||||
func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
|
|
||||||
return printfLogger{l, true}
|
|
||||||
}
|
|
||||||
|
|
||||||
type printfLogger struct {
|
|
||||||
logger interface{ Printf(string, ...interface{}) }
|
|
||||||
logInfo bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) {
|
|
||||||
if pl.logInfo {
|
|
||||||
keysAndValues = formatTimes(keysAndValues)
|
|
||||||
pl.logger.Printf(
|
|
||||||
formatString(len(keysAndValues)),
|
|
||||||
append([]interface{}{msg}, keysAndValues...)...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) {
|
|
||||||
keysAndValues = formatTimes(keysAndValues)
|
|
||||||
pl.logger.Printf(
|
|
||||||
formatString(len(keysAndValues)+2),
|
|
||||||
append([]interface{}{msg, "error", err}, keysAndValues...)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatString returns a logfmt-like format string for the number of
|
|
||||||
// key/values.
|
|
||||||
func formatString(numKeysAndValues int) string {
|
|
||||||
var sb strings.Builder
|
|
||||||
sb.WriteString("%s")
|
|
||||||
if numKeysAndValues > 0 {
|
|
||||||
sb.WriteString(", ")
|
|
||||||
}
|
|
||||||
for i := 0; i < numKeysAndValues/2; i++ {
|
|
||||||
if i > 0 {
|
|
||||||
sb.WriteString(", ")
|
|
||||||
}
|
|
||||||
sb.WriteString("%v=%v")
|
|
||||||
}
|
|
||||||
return sb.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatTimes formats any time.Time values as RFC3339.
|
|
||||||
func formatTimes(keysAndValues []interface{}) []interface{} {
|
|
||||||
var formattedArgs []interface{}
|
|
||||||
for _, arg := range keysAndValues {
|
|
||||||
if t, ok := arg.(time.Time); ok {
|
|
||||||
arg = t.Format(time.RFC3339)
|
|
||||||
}
|
|
||||||
formattedArgs = append(formattedArgs, arg)
|
|
||||||
}
|
|
||||||
return formattedArgs
|
|
||||||
}
|
|
45
vendor/github.com/robfig/cron/v3/option.go
generated
vendored
45
vendor/github.com/robfig/cron/v3/option.go
generated
vendored
|
@ -1,45 +0,0 @@
|
||||||
package cron
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Option represents a modification to the default behavior of a Cron.
|
|
||||||
type Option func(*Cron)
|
|
||||||
|
|
||||||
// WithLocation overrides the timezone of the cron instance.
|
|
||||||
func WithLocation(loc *time.Location) Option {
|
|
||||||
return func(c *Cron) {
|
|
||||||
c.location = loc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithSeconds overrides the parser used for interpreting job schedules to
|
|
||||||
// include a seconds field as the first one.
|
|
||||||
func WithSeconds() Option {
|
|
||||||
return WithParser(NewParser(
|
|
||||||
Second | Minute | Hour | Dom | Month | Dow | Descriptor,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithParser overrides the parser used for interpreting job schedules.
|
|
||||||
func WithParser(p ScheduleParser) Option {
|
|
||||||
return func(c *Cron) {
|
|
||||||
c.parser = p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithChain specifies Job wrappers to apply to all jobs added to this cron.
|
|
||||||
// Refer to the Chain* functions in this package for provided wrappers.
|
|
||||||
func WithChain(wrappers ...JobWrapper) Option {
|
|
||||||
return func(c *Cron) {
|
|
||||||
c.chain = NewChain(wrappers...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLogger uses the provided logger.
|
|
||||||
func WithLogger(logger Logger) Option {
|
|
||||||
return func(c *Cron) {
|
|
||||||
c.logger = logger
|
|
||||||
}
|
|
||||||
}
|
|
434
vendor/github.com/robfig/cron/v3/parser.go
generated
vendored
434
vendor/github.com/robfig/cron/v3/parser.go
generated
vendored
|
@ -1,434 +0,0 @@
|
||||||
package cron
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Configuration options for creating a parser. Most options specify which
|
|
||||||
// fields should be included, while others enable features. If a field is not
|
|
||||||
// included the parser will assume a default value. These options do not change
|
|
||||||
// the order fields are parse in.
|
|
||||||
type ParseOption int
|
|
||||||
|
|
||||||
const (
|
|
||||||
Second ParseOption = 1 << iota // Seconds field, default 0
|
|
||||||
SecondOptional // Optional seconds field, default 0
|
|
||||||
Minute // Minutes field, default 0
|
|
||||||
Hour // Hours field, default 0
|
|
||||||
Dom // Day of month field, default *
|
|
||||||
Month // Month field, default *
|
|
||||||
Dow // Day of week field, default *
|
|
||||||
DowOptional // Optional day of week field, default *
|
|
||||||
Descriptor // Allow descriptors such as @monthly, @weekly, etc.
|
|
||||||
)
|
|
||||||
|
|
||||||
var places = []ParseOption{
|
|
||||||
Second,
|
|
||||||
Minute,
|
|
||||||
Hour,
|
|
||||||
Dom,
|
|
||||||
Month,
|
|
||||||
Dow,
|
|
||||||
}
|
|
||||||
|
|
||||||
var defaults = []string{
|
|
||||||
"0",
|
|
||||||
"0",
|
|
||||||
"0",
|
|
||||||
"*",
|
|
||||||
"*",
|
|
||||||
"*",
|
|
||||||
}
|
|
||||||
|
|
||||||
// A custom Parser that can be configured.
|
|
||||||
type Parser struct {
|
|
||||||
options ParseOption
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewParser creates a Parser with custom options.
|
|
||||||
//
|
|
||||||
// It panics if more than one Optional is given, since it would be impossible to
|
|
||||||
// correctly infer which optional is provided or missing in general.
|
|
||||||
//
|
|
||||||
// Examples
|
|
||||||
//
|
|
||||||
// // Standard parser without descriptors
|
|
||||||
// specParser := NewParser(Minute | Hour | Dom | Month | Dow)
|
|
||||||
// sched, err := specParser.Parse("0 0 15 */3 *")
|
|
||||||
//
|
|
||||||
// // Same as above, just excludes time fields
|
|
||||||
// subsParser := NewParser(Dom | Month | Dow)
|
|
||||||
// sched, err := specParser.Parse("15 */3 *")
|
|
||||||
//
|
|
||||||
// // Same as above, just makes Dow optional
|
|
||||||
// subsParser := NewParser(Dom | Month | DowOptional)
|
|
||||||
// sched, err := specParser.Parse("15 */3")
|
|
||||||
//
|
|
||||||
func NewParser(options ParseOption) Parser {
|
|
||||||
optionals := 0
|
|
||||||
if options&DowOptional > 0 {
|
|
||||||
optionals++
|
|
||||||
}
|
|
||||||
if options&SecondOptional > 0 {
|
|
||||||
optionals++
|
|
||||||
}
|
|
||||||
if optionals > 1 {
|
|
||||||
panic("multiple optionals may not be configured")
|
|
||||||
}
|
|
||||||
return Parser{options}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse returns a new crontab schedule representing the given spec.
|
|
||||||
// It returns a descriptive error if the spec is not valid.
|
|
||||||
// It accepts crontab specs and features configured by NewParser.
|
|
||||||
func (p Parser) Parse(spec string) (Schedule, error) {
|
|
||||||
if len(spec) == 0 {
|
|
||||||
return nil, fmt.Errorf("empty spec string")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract timezone if present
|
|
||||||
var loc = time.Local
|
|
||||||
if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") {
|
|
||||||
var err error
|
|
||||||
i := strings.Index(spec, " ")
|
|
||||||
eq := strings.Index(spec, "=")
|
|
||||||
if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil {
|
|
||||||
return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err)
|
|
||||||
}
|
|
||||||
spec = strings.TrimSpace(spec[i:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle named schedules (descriptors), if configured
|
|
||||||
if strings.HasPrefix(spec, "@") {
|
|
||||||
if p.options&Descriptor == 0 {
|
|
||||||
return nil, fmt.Errorf("parser does not accept descriptors: %v", spec)
|
|
||||||
}
|
|
||||||
return parseDescriptor(spec, loc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Split on whitespace.
|
|
||||||
fields := strings.Fields(spec)
|
|
||||||
|
|
||||||
// Validate & fill in any omitted or optional fields
|
|
||||||
var err error
|
|
||||||
fields, err = normalizeFields(fields, p.options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
field := func(field string, r bounds) uint64 {
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var bits uint64
|
|
||||||
bits, err = getField(field, r)
|
|
||||||
return bits
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
second = field(fields[0], seconds)
|
|
||||||
minute = field(fields[1], minutes)
|
|
||||||
hour = field(fields[2], hours)
|
|
||||||
dayofmonth = field(fields[3], dom)
|
|
||||||
month = field(fields[4], months)
|
|
||||||
dayofweek = field(fields[5], dow)
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &SpecSchedule{
|
|
||||||
Second: second,
|
|
||||||
Minute: minute,
|
|
||||||
Hour: hour,
|
|
||||||
Dom: dayofmonth,
|
|
||||||
Month: month,
|
|
||||||
Dow: dayofweek,
|
|
||||||
Location: loc,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// normalizeFields takes a subset set of the time fields and returns the full set
|
|
||||||
// with defaults (zeroes) populated for unset fields.
|
|
||||||
//
|
|
||||||
// As part of performing this function, it also validates that the provided
|
|
||||||
// fields are compatible with the configured options.
|
|
||||||
func normalizeFields(fields []string, options ParseOption) ([]string, error) {
|
|
||||||
// Validate optionals & add their field to options
|
|
||||||
optionals := 0
|
|
||||||
if options&SecondOptional > 0 {
|
|
||||||
options |= Second
|
|
||||||
optionals++
|
|
||||||
}
|
|
||||||
if options&DowOptional > 0 {
|
|
||||||
options |= Dow
|
|
||||||
optionals++
|
|
||||||
}
|
|
||||||
if optionals > 1 {
|
|
||||||
return nil, fmt.Errorf("multiple optionals may not be configured")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Figure out how many fields we need
|
|
||||||
max := 0
|
|
||||||
for _, place := range places {
|
|
||||||
if options&place > 0 {
|
|
||||||
max++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
min := max - optionals
|
|
||||||
|
|
||||||
// Validate number of fields
|
|
||||||
if count := len(fields); count < min || count > max {
|
|
||||||
if min == max {
|
|
||||||
return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Populate the optional field if not provided
|
|
||||||
if min < max && len(fields) == min {
|
|
||||||
switch {
|
|
||||||
case options&DowOptional > 0:
|
|
||||||
fields = append(fields, defaults[5]) // TODO: improve access to default
|
|
||||||
case options&SecondOptional > 0:
|
|
||||||
fields = append([]string{defaults[0]}, fields...)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown optional field")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Populate all fields not part of options with their defaults
|
|
||||||
n := 0
|
|
||||||
expandedFields := make([]string, len(places))
|
|
||||||
copy(expandedFields, defaults)
|
|
||||||
for i, place := range places {
|
|
||||||
if options&place > 0 {
|
|
||||||
expandedFields[i] = fields[n]
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return expandedFields, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var standardParser = NewParser(
|
|
||||||
Minute | Hour | Dom | Month | Dow | Descriptor,
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseStandard returns a new crontab schedule representing the given
|
|
||||||
// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries
|
|
||||||
// representing: minute, hour, day of month, month and day of week, in that
|
|
||||||
// order. It returns a descriptive error if the spec is not valid.
|
|
||||||
//
|
|
||||||
// It accepts
|
|
||||||
// - Standard crontab specs, e.g. "* * * * ?"
|
|
||||||
// - Descriptors, e.g. "@midnight", "@every 1h30m"
|
|
||||||
func ParseStandard(standardSpec string) (Schedule, error) {
|
|
||||||
return standardParser.Parse(standardSpec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getField returns an Int with the bits set representing all of the times that
|
|
||||||
// the field represents or error parsing field value. A "field" is a comma-separated
|
|
||||||
// list of "ranges".
|
|
||||||
func getField(field string, r bounds) (uint64, error) {
|
|
||||||
var bits uint64
|
|
||||||
ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
|
|
||||||
for _, expr := range ranges {
|
|
||||||
bit, err := getRange(expr, r)
|
|
||||||
if err != nil {
|
|
||||||
return bits, err
|
|
||||||
}
|
|
||||||
bits |= bit
|
|
||||||
}
|
|
||||||
return bits, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getRange returns the bits indicated by the given expression:
|
|
||||||
// number | number "-" number [ "/" number ]
|
|
||||||
// or error parsing range.
|
|
||||||
func getRange(expr string, r bounds) (uint64, error) {
|
|
||||||
var (
|
|
||||||
start, end, step uint
|
|
||||||
rangeAndStep = strings.Split(expr, "/")
|
|
||||||
lowAndHigh = strings.Split(rangeAndStep[0], "-")
|
|
||||||
singleDigit = len(lowAndHigh) == 1
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
var extra uint64
|
|
||||||
if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
|
|
||||||
start = r.min
|
|
||||||
end = r.max
|
|
||||||
extra = starBit
|
|
||||||
} else {
|
|
||||||
start, err = parseIntOrName(lowAndHigh[0], r.names)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
switch len(lowAndHigh) {
|
|
||||||
case 1:
|
|
||||||
end = start
|
|
||||||
case 2:
|
|
||||||
end, err = parseIntOrName(lowAndHigh[1], r.names)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return 0, fmt.Errorf("too many hyphens: %s", expr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch len(rangeAndStep) {
|
|
||||||
case 1:
|
|
||||||
step = 1
|
|
||||||
case 2:
|
|
||||||
step, err = mustParseInt(rangeAndStep[1])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special handling: "N/step" means "N-max/step".
|
|
||||||
if singleDigit {
|
|
||||||
end = r.max
|
|
||||||
}
|
|
||||||
if step > 1 {
|
|
||||||
extra = 0
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return 0, fmt.Errorf("too many slashes: %s", expr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if start < r.min {
|
|
||||||
return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
|
|
||||||
}
|
|
||||||
if end > r.max {
|
|
||||||
return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr)
|
|
||||||
}
|
|
||||||
if start > end {
|
|
||||||
return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
|
|
||||||
}
|
|
||||||
if step == 0 {
|
|
||||||
return 0, fmt.Errorf("step of range should be a positive number: %s", expr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return getBits(start, end, step) | extra, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseIntOrName returns the (possibly-named) integer contained in expr.
|
|
||||||
func parseIntOrName(expr string, names map[string]uint) (uint, error) {
|
|
||||||
if names != nil {
|
|
||||||
if namedInt, ok := names[strings.ToLower(expr)]; ok {
|
|
||||||
return namedInt, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return mustParseInt(expr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// mustParseInt parses the given expression as an int or returns an error.
|
|
||||||
func mustParseInt(expr string) (uint, error) {
|
|
||||||
num, err := strconv.Atoi(expr)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err)
|
|
||||||
}
|
|
||||||
if num < 0 {
|
|
||||||
return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return uint(num), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getBits sets all bits in the range [min, max], modulo the given step size.
|
|
||||||
func getBits(min, max, step uint) uint64 {
|
|
||||||
var bits uint64
|
|
||||||
|
|
||||||
// If step is 1, use shifts.
|
|
||||||
if step == 1 {
|
|
||||||
return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Else, use a simple loop.
|
|
||||||
for i := min; i <= max; i += step {
|
|
||||||
bits |= 1 << i
|
|
||||||
}
|
|
||||||
return bits
|
|
||||||
}
|
|
||||||
|
|
||||||
// all returns all bits within the given bounds. (plus the star bit)
|
|
||||||
func all(r bounds) uint64 {
|
|
||||||
return getBits(r.min, r.max, 1) | starBit
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseDescriptor returns a predefined schedule for the expression, or error if none matches.
|
|
||||||
func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) {
|
|
||||||
switch descriptor {
|
|
||||||
case "@yearly", "@annually":
|
|
||||||
return &SpecSchedule{
|
|
||||||
Second: 1 << seconds.min,
|
|
||||||
Minute: 1 << minutes.min,
|
|
||||||
Hour: 1 << hours.min,
|
|
||||||
Dom: 1 << dom.min,
|
|
||||||
Month: 1 << months.min,
|
|
||||||
Dow: all(dow),
|
|
||||||
Location: loc,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
case "@monthly":
|
|
||||||
return &SpecSchedule{
|
|
||||||
Second: 1 << seconds.min,
|
|
||||||
Minute: 1 << minutes.min,
|
|
||||||
Hour: 1 << hours.min,
|
|
||||||
Dom: 1 << dom.min,
|
|
||||||
Month: all(months),
|
|
||||||
Dow: all(dow),
|
|
||||||
Location: loc,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
case "@weekly":
|
|
||||||
return &SpecSchedule{
|
|
||||||
Second: 1 << seconds.min,
|
|
||||||
Minute: 1 << minutes.min,
|
|
||||||
Hour: 1 << hours.min,
|
|
||||||
Dom: all(dom),
|
|
||||||
Month: all(months),
|
|
||||||
Dow: 1 << dow.min,
|
|
||||||
Location: loc,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
case "@daily", "@midnight":
|
|
||||||
return &SpecSchedule{
|
|
||||||
Second: 1 << seconds.min,
|
|
||||||
Minute: 1 << minutes.min,
|
|
||||||
Hour: 1 << hours.min,
|
|
||||||
Dom: all(dom),
|
|
||||||
Month: all(months),
|
|
||||||
Dow: all(dow),
|
|
||||||
Location: loc,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
case "@hourly":
|
|
||||||
return &SpecSchedule{
|
|
||||||
Second: 1 << seconds.min,
|
|
||||||
Minute: 1 << minutes.min,
|
|
||||||
Hour: all(hours),
|
|
||||||
Dom: all(dom),
|
|
||||||
Month: all(months),
|
|
||||||
Dow: all(dow),
|
|
||||||
Location: loc,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
const every = "@every "
|
|
||||||
if strings.HasPrefix(descriptor, every) {
|
|
||||||
duration, err := time.ParseDuration(descriptor[len(every):])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err)
|
|
||||||
}
|
|
||||||
return Every(duration), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor)
|
|
||||||
}
|
|
188
vendor/github.com/robfig/cron/v3/spec.go
generated
vendored
188
vendor/github.com/robfig/cron/v3/spec.go
generated
vendored
|
@ -1,188 +0,0 @@
|
||||||
package cron
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// SpecSchedule specifies a duty cycle (to the second granularity), based on a
|
|
||||||
// traditional crontab specification. It is computed initially and stored as bit sets.
|
|
||||||
type SpecSchedule struct {
|
|
||||||
Second, Minute, Hour, Dom, Month, Dow uint64
|
|
||||||
|
|
||||||
// Override location for this schedule.
|
|
||||||
Location *time.Location
|
|
||||||
}
|
|
||||||
|
|
||||||
// bounds provides a range of acceptable values (plus a map of name to value).
|
|
||||||
type bounds struct {
|
|
||||||
min, max uint
|
|
||||||
names map[string]uint
|
|
||||||
}
|
|
||||||
|
|
||||||
// The bounds for each field.
|
|
||||||
var (
|
|
||||||
seconds = bounds{0, 59, nil}
|
|
||||||
minutes = bounds{0, 59, nil}
|
|
||||||
hours = bounds{0, 23, nil}
|
|
||||||
dom = bounds{1, 31, nil}
|
|
||||||
months = bounds{1, 12, map[string]uint{
|
|
||||||
"jan": 1,
|
|
||||||
"feb": 2,
|
|
||||||
"mar": 3,
|
|
||||||
"apr": 4,
|
|
||||||
"may": 5,
|
|
||||||
"jun": 6,
|
|
||||||
"jul": 7,
|
|
||||||
"aug": 8,
|
|
||||||
"sep": 9,
|
|
||||||
"oct": 10,
|
|
||||||
"nov": 11,
|
|
||||||
"dec": 12,
|
|
||||||
}}
|
|
||||||
dow = bounds{0, 6, map[string]uint{
|
|
||||||
"sun": 0,
|
|
||||||
"mon": 1,
|
|
||||||
"tue": 2,
|
|
||||||
"wed": 3,
|
|
||||||
"thu": 4,
|
|
||||||
"fri": 5,
|
|
||||||
"sat": 6,
|
|
||||||
}}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Set the top bit if a star was included in the expression.
|
|
||||||
starBit = 1 << 63
|
|
||||||
)
|
|
||||||
|
|
||||||
// Next returns the next time this schedule is activated, greater than the given
|
|
||||||
// time. If no time can be found to satisfy the schedule, return the zero time.
|
|
||||||
func (s *SpecSchedule) Next(t time.Time) time.Time {
|
|
||||||
// General approach
|
|
||||||
//
|
|
||||||
// For Month, Day, Hour, Minute, Second:
|
|
||||||
// Check if the time value matches. If yes, continue to the next field.
|
|
||||||
// If the field doesn't match the schedule, then increment the field until it matches.
|
|
||||||
// While incrementing the field, a wrap-around brings it back to the beginning
|
|
||||||
// of the field list (since it is necessary to re-verify previous field
|
|
||||||
// values)
|
|
||||||
|
|
||||||
// Convert the given time into the schedule's timezone, if one is specified.
|
|
||||||
// Save the original timezone so we can convert back after we find a time.
|
|
||||||
// Note that schedules without a time zone specified (time.Local) are treated
|
|
||||||
// as local to the time provided.
|
|
||||||
origLocation := t.Location()
|
|
||||||
loc := s.Location
|
|
||||||
if loc == time.Local {
|
|
||||||
loc = t.Location()
|
|
||||||
}
|
|
||||||
if s.Location != time.Local {
|
|
||||||
t = t.In(s.Location)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start at the earliest possible time (the upcoming second).
|
|
||||||
t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
|
|
||||||
|
|
||||||
// This flag indicates whether a field has been incremented.
|
|
||||||
added := false
|
|
||||||
|
|
||||||
// If no time is found within five years, return zero.
|
|
||||||
yearLimit := t.Year() + 5
|
|
||||||
|
|
||||||
WRAP:
|
|
||||||
if t.Year() > yearLimit {
|
|
||||||
return time.Time{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the first applicable month.
|
|
||||||
// If it's this month, then do nothing.
|
|
||||||
for 1<<uint(t.Month())&s.Month == 0 {
|
|
||||||
// If we have to add a month, reset the other parts to 0.
|
|
||||||
if !added {
|
|
||||||
added = true
|
|
||||||
// Otherwise, set the date at the beginning (since the current time is irrelevant).
|
|
||||||
t = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, loc)
|
|
||||||
}
|
|
||||||
t = t.AddDate(0, 1, 0)
|
|
||||||
|
|
||||||
// Wrapped around.
|
|
||||||
if t.Month() == time.January {
|
|
||||||
goto WRAP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now get a day in that month.
|
|
||||||
//
|
|
||||||
// NOTE: This causes issues for daylight savings regimes where midnight does
|
|
||||||
// not exist. For example: Sao Paulo has DST that transforms midnight on
|
|
||||||
// 11/3 into 1am. Handle that by noticing when the Hour ends up != 0.
|
|
||||||
for !dayMatches(s, t) {
|
|
||||||
if !added {
|
|
||||||
added = true
|
|
||||||
t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, loc)
|
|
||||||
}
|
|
||||||
t = t.AddDate(0, 0, 1)
|
|
||||||
// Notice if the hour is no longer midnight due to DST.
|
|
||||||
// Add an hour if it's 23, subtract an hour if it's 1.
|
|
||||||
if t.Hour() != 0 {
|
|
||||||
if t.Hour() > 12 {
|
|
||||||
t = t.Add(time.Duration(24-t.Hour()) * time.Hour)
|
|
||||||
} else {
|
|
||||||
t = t.Add(time.Duration(-t.Hour()) * time.Hour)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.Day() == 1 {
|
|
||||||
goto WRAP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for 1<<uint(t.Hour())&s.Hour == 0 {
|
|
||||||
if !added {
|
|
||||||
added = true
|
|
||||||
t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, loc)
|
|
||||||
}
|
|
||||||
t = t.Add(1 * time.Hour)
|
|
||||||
|
|
||||||
if t.Hour() == 0 {
|
|
||||||
goto WRAP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for 1<<uint(t.Minute())&s.Minute == 0 {
|
|
||||||
if !added {
|
|
||||||
added = true
|
|
||||||
t = t.Truncate(time.Minute)
|
|
||||||
}
|
|
||||||
t = t.Add(1 * time.Minute)
|
|
||||||
|
|
||||||
if t.Minute() == 0 {
|
|
||||||
goto WRAP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for 1<<uint(t.Second())&s.Second == 0 {
|
|
||||||
if !added {
|
|
||||||
added = true
|
|
||||||
t = t.Truncate(time.Second)
|
|
||||||
}
|
|
||||||
t = t.Add(1 * time.Second)
|
|
||||||
|
|
||||||
if t.Second() == 0 {
|
|
||||||
goto WRAP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return t.In(origLocation)
|
|
||||||
}
|
|
||||||
|
|
||||||
// dayMatches returns true if the schedule's day-of-week and day-of-month
|
|
||||||
// restrictions are satisfied by the given time.
|
|
||||||
func dayMatches(s *SpecSchedule, t time.Time) bool {
|
|
||||||
var (
|
|
||||||
domMatch bool = 1<<uint(t.Day())&s.Dom > 0
|
|
||||||
dowMatch bool = 1<<uint(t.Weekday())&s.Dow > 0
|
|
||||||
)
|
|
||||||
if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
|
|
||||||
return domMatch && dowMatch
|
|
||||||
}
|
|
||||||
return domMatch || dowMatch
|
|
||||||
}
|
|
15
vendor/modules.txt
vendored
15
vendor/modules.txt
vendored
|
@ -1,8 +1,8 @@
|
||||||
# codeberg.org/gruf/go-atomics v1.1.0
|
# codeberg.org/gruf/go-atomics v1.1.0
|
||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
codeberg.org/gruf/go-atomics
|
codeberg.org/gruf/go-atomics
|
||||||
# codeberg.org/gruf/go-bitutil v1.0.1
|
# codeberg.org/gruf/go-bitutil v1.1.0
|
||||||
## explicit; go 1.16
|
## explicit; go 1.19
|
||||||
codeberg.org/gruf/go-bitutil
|
codeberg.org/gruf/go-bitutil
|
||||||
# codeberg.org/gruf/go-bytes v1.0.2
|
# codeberg.org/gruf/go-bytes v1.0.2
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
|
@ -18,7 +18,7 @@ codeberg.org/gruf/go-byteutil
|
||||||
codeberg.org/gruf/go-cache/v3
|
codeberg.org/gruf/go-cache/v3
|
||||||
codeberg.org/gruf/go-cache/v3/result
|
codeberg.org/gruf/go-cache/v3/result
|
||||||
codeberg.org/gruf/go-cache/v3/ttl
|
codeberg.org/gruf/go-cache/v3/ttl
|
||||||
# codeberg.org/gruf/go-debug v1.2.0
|
# codeberg.org/gruf/go-debug v1.3.0
|
||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
codeberg.org/gruf/go-debug
|
codeberg.org/gruf/go-debug
|
||||||
# codeberg.org/gruf/go-errors/v2 v2.1.1
|
# codeberg.org/gruf/go-errors/v2 v2.1.1
|
||||||
|
@ -58,10 +58,10 @@ codeberg.org/gruf/go-mutexes
|
||||||
# codeberg.org/gruf/go-pools v1.1.0
|
# codeberg.org/gruf/go-pools v1.1.0
|
||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
codeberg.org/gruf/go-pools
|
codeberg.org/gruf/go-pools
|
||||||
# codeberg.org/gruf/go-runners v1.5.1
|
# codeberg.org/gruf/go-runners v1.6.0
|
||||||
## explicit; go 1.14
|
## explicit; go 1.19
|
||||||
codeberg.org/gruf/go-runners
|
codeberg.org/gruf/go-runners
|
||||||
# codeberg.org/gruf/go-sched v1.2.0
|
# codeberg.org/gruf/go-sched v1.2.3
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
codeberg.org/gruf/go-sched
|
codeberg.org/gruf/go-sched
|
||||||
# codeberg.org/gruf/go-store/v2 v2.2.1
|
# codeberg.org/gruf/go-store/v2 v2.2.1
|
||||||
|
@ -379,9 +379,6 @@ github.com/quasoft/memstore
|
||||||
# github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0
|
# github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0
|
||||||
## explicit; go 1.12
|
## explicit; go 1.12
|
||||||
github.com/remyoudompheng/bigfft
|
github.com/remyoudompheng/bigfft
|
||||||
# github.com/robfig/cron/v3 v3.0.1
|
|
||||||
## explicit; go 1.12
|
|
||||||
github.com/robfig/cron/v3
|
|
||||||
# github.com/rs/xid v1.4.0
|
# github.com/rs/xid v1.4.0
|
||||||
## explicit; go 1.12
|
## explicit; go 1.12
|
||||||
github.com/rs/xid
|
github.com/rs/xid
|
||||||
|
|
Loading…
Reference in a new issue