mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-11-26 05:36:38 +00:00
Compare commits
10 commits
ecd77b8782
...
145fe416b6
Author | SHA1 | Date | |
---|---|---|---|
145fe416b6 | |||
3f7dc10449 | |||
c17abea921 | |||
4be1f780a1 | |||
8db3d6b700 | |||
666b8bc4f2 | |||
7c6c74243b | |||
75d3fca08c | |||
bd4c4d79fe | |||
c1543c029b |
12
.drone.yml
12
.drone.yml
|
@ -12,7 +12,7 @@ steps:
|
||||||
# We use golangci-lint for linting.
|
# We use golangci-lint for linting.
|
||||||
# See: https://golangci-lint.run/
|
# See: https://golangci-lint.run/
|
||||||
- name: lint
|
- name: lint
|
||||||
image: golangci/golangci-lint:v1.57.2
|
image: golangci/golangci-lint:v1.60.3
|
||||||
volumes:
|
volumes:
|
||||||
- name: go-build-cache
|
- name: go-build-cache
|
||||||
path: /root/.cache/go-build
|
path: /root/.cache/go-build
|
||||||
|
@ -28,7 +28,7 @@ steps:
|
||||||
- pull_request
|
- pull_request
|
||||||
|
|
||||||
- name: test
|
- name: test
|
||||||
image: golang:1.22-alpine
|
image: golang:1.23.0-alpine
|
||||||
volumes:
|
volumes:
|
||||||
- name: go-build-cache
|
- name: go-build-cache
|
||||||
path: /root/.cache/go-build
|
path: /root/.cache/go-build
|
||||||
|
@ -94,7 +94,7 @@ steps:
|
||||||
- pull_request
|
- pull_request
|
||||||
|
|
||||||
- name: snapshot
|
- name: snapshot
|
||||||
image: superseriousbusiness/gotosocial-drone-build:0.6.0 # https://github.com/superseriousbusiness/gotosocial-drone-build
|
image: superseriousbusiness/gotosocial-drone-build:0.7.0 # https://github.com/superseriousbusiness/gotosocial-drone-build
|
||||||
volumes:
|
volumes:
|
||||||
- name: go-build-cache
|
- name: go-build-cache
|
||||||
path: /root/.cache/go-build
|
path: /root/.cache/go-build
|
||||||
|
@ -135,7 +135,7 @@ steps:
|
||||||
- main
|
- main
|
||||||
|
|
||||||
- name: release
|
- name: release
|
||||||
image: superseriousbusiness/gotosocial-drone-build:0.6.0 # https://github.com/superseriousbusiness/gotosocial-drone-build
|
image: superseriousbusiness/gotosocial-drone-build:0.7.0 # https://github.com/superseriousbusiness/gotosocial-drone-build
|
||||||
volumes:
|
volumes:
|
||||||
- name: go-build-cache
|
- name: go-build-cache
|
||||||
path: /root/.cache/go-build
|
path: /root/.cache/go-build
|
||||||
|
@ -194,7 +194,7 @@ clone:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: mirror
|
- name: mirror
|
||||||
image: superseriousbusiness/gotosocial-drone-build:0.6.0
|
image: superseriousbusiness/gotosocial-drone-build:0.7.0
|
||||||
environment:
|
environment:
|
||||||
ORIGIN_REPO: https://github.com/superseriousbusiness/gotosocial
|
ORIGIN_REPO: https://github.com/superseriousbusiness/gotosocial
|
||||||
TARGET_REPO: https://codeberg.org/superseriousbusiness/gotosocial
|
TARGET_REPO: https://codeberg.org/superseriousbusiness/gotosocial
|
||||||
|
@ -207,6 +207,6 @@ steps:
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: signature
|
kind: signature
|
||||||
hmac: f4008d87e4e5b67251eb89f255c1224e6ab5818828cab24fc319b8f829176058
|
hmac: 9810bf692fb1029c13b0a1e2f556e2306d16f7d3eec9ca6163a0499c147280c1
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
# https://goreleaser.com
|
# Version 2 of GoReleaser: https://goreleaser.com/errors/version/
|
||||||
|
version: 2
|
||||||
project_name: gotosocial
|
project_name: gotosocial
|
||||||
before:
|
before:
|
||||||
# https://goreleaser.com/customization/hooks/
|
# https://goreleaser.com/customization/hooks/
|
||||||
|
@ -185,7 +186,7 @@ checksum:
|
||||||
name_template: 'checksums.txt'
|
name_template: 'checksums.txt'
|
||||||
snapshot:
|
snapshot:
|
||||||
# https://goreleaser.com/customization/snapshots/
|
# https://goreleaser.com/customization/snapshots/
|
||||||
name_template: "{{ incpatch .Version }}-SNAPSHOT"
|
version_template: "{{ incpatch .Version }}-SNAPSHOT"
|
||||||
source:
|
source:
|
||||||
# https://goreleaser.com/customization/source/
|
# https://goreleaser.com/customization/source/
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# Dockerfile reference: https://docs.docker.com/engine/reference/builder/
|
# Dockerfile reference: https://docs.docker.com/engine/reference/builder/
|
||||||
|
|
||||||
# stage 1: generate up-to-date swagger.yaml to put in the final container
|
# stage 1: generate up-to-date swagger.yaml to put in the final container
|
||||||
FROM --platform=${BUILDPLATFORM} golang:1.22-alpine AS swagger
|
FROM --platform=${BUILDPLATFORM} golang:1.23.0-alpine AS swagger
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
### Installs goswagger for building swagger definitions inside this container
|
### Installs goswagger for building swagger definitions inside this container
|
||||||
|
@ -28,7 +28,7 @@ RUN yarn --cwd ./web/source install && \
|
||||||
rm -rf ./web/source
|
rm -rf ./web/source
|
||||||
|
|
||||||
# stage 3: build the executor container
|
# stage 3: build the executor container
|
||||||
FROM --platform=${TARGETPLATFORM} alpine:3.19.1 as executor
|
FROM --platform=${TARGETPLATFORM} alpine:3.20.2 as executor
|
||||||
|
|
||||||
# switch to non-root user:group for GtS
|
# switch to non-root user:group for GtS
|
||||||
USER 1000:1000
|
USER 1000:1000
|
||||||
|
|
|
@ -8,6 +8,18 @@ In order to make GoToSocial email sending work, you need an smtp-compatible mail
|
||||||
|
|
||||||
To validate your configuration, you can use the "Administration -> Actions -> Email" section of the settings panel to send a test email.
|
To validate your configuration, you can use the "Administration -> Actions -> Email" section of the settings panel to send a test email.
|
||||||
|
|
||||||
|
!!! warning
|
||||||
|
Pending an smtp library update, currently only email providers that work with STARTTLS will work with GoToSocial. STARTTLS is generally available over **port 587**.
|
||||||
|
|
||||||
|
For more info, see:
|
||||||
|
|
||||||
|
- [STARTTLS vs SSL vs TLS](https://mailtrap.io/blog/starttls-ssl-tls/)
|
||||||
|
- [Understanding Ports](https://www.mailgun.com/blog/email/which-smtp-port-understanding-ports-25-465-587/)
|
||||||
|
- [Port 587](https://www.mailgun.com/blog/deliverability/smtp-port-587/)
|
||||||
|
|
||||||
|
!!! info
|
||||||
|
For safety reasons, the smtp library used by GoToSocial will refuse to send authentication credentials over an unencrypted connection, unless the mail provider is running on localhost.
|
||||||
|
|
||||||
## Settings
|
## Settings
|
||||||
|
|
||||||
The configuration options for smtp are as follows:
|
The configuration options for smtp are as follows:
|
||||||
|
@ -26,6 +38,7 @@ The configuration options for smtp are as follows:
|
||||||
smtp-host: ""
|
smtp-host: ""
|
||||||
|
|
||||||
# Int. Port to use to connect to the smtp server.
|
# Int. Port to use to connect to the smtp server.
|
||||||
|
# In the majority of cases, you should use port 587.
|
||||||
# Examples: []
|
# Examples: []
|
||||||
# Default: 0
|
# Default: 0
|
||||||
smtp-port: 0
|
smtp-port: 0
|
||||||
|
@ -63,27 +76,16 @@ smtp-disclose-recipients: false
|
||||||
|
|
||||||
Note that if you don't set `Host`, then email sending via smtp will be disabled, and the other settings will be ignored. GoToSocial will still log (at trace level) emails that *would* have been sent if smtp was enabled.
|
Note that if you don't set `Host`, then email sending via smtp will be disabled, and the other settings will be ignored. GoToSocial will still log (at trace level) emails that *would* have been sent if smtp was enabled.
|
||||||
|
|
||||||
## Behavior
|
## When are emails sent?
|
||||||
|
|
||||||
### SSL
|
|
||||||
|
|
||||||
GoToSocial requires your smtp server to present valid SSL certificates. Most of the big services like Mailgun do this anyway, but if you're running your own mail server without SSL for some reason, and you're trying to connect GoToSocial to it, it will not work.
|
|
||||||
|
|
||||||
The exception to this requirement is if you're running your mail server (or bridge to a mail server) on `localhost`, in which case SSL certs are not required.
|
|
||||||
|
|
||||||
### When are emails sent?
|
|
||||||
|
|
||||||
Currently, emails are sent:
|
Currently, emails are sent:
|
||||||
|
|
||||||
- To the provided email address of a new user to request email confirmation when a new account is created via the API.
|
- To the provided email address of a new user to request email confirmation when a new account is created via the sign up page or API.
|
||||||
|
- To instance admins when a new account is created in this way.
|
||||||
- To all active instance moderators + admins when a new moderation report is received. By default, recipients are Bcc'd, but you can change this behavior with the setting `smtp-disclose-recipients`.
|
- To all active instance moderators + admins when a new moderation report is received. By default, recipients are Bcc'd, but you can change this behavior with the setting `smtp-disclose-recipients`.
|
||||||
- To the creator of a report (on this instance) when the report is closed by a moderator.
|
- To the creator of a report (on this instance) when the report is closed by a moderator.
|
||||||
|
|
||||||
### Can I test if my SMTP configuration is correct?
|
## HTML versus Plaintext
|
||||||
|
|
||||||
Yes, you can use the API to send a test email to yourself. Check the API documentation for the `/api/v1/admin/email/test` endpoint.
|
|
||||||
|
|
||||||
### HTML versus Plaintext
|
|
||||||
|
|
||||||
Emails are sent in plaintext by default. At this point, there is no option to send emails in html, but this is something that might be added later if there's enough demand for it.
|
Emails are sent in plaintext by default. At this point, there is no option to send emails in html, but this is something that might be added later if there's enough demand for it.
|
||||||
|
|
||||||
|
|
|
@ -817,6 +817,7 @@ oidc-admin-groups: []
|
||||||
smtp-host: ""
|
smtp-host: ""
|
||||||
|
|
||||||
# Int. Port to use to connect to the smtp server.
|
# Int. Port to use to connect to the smtp server.
|
||||||
|
# In the majority of cases, you should use port 587.
|
||||||
# Examples: []
|
# Examples: []
|
||||||
# Default: 0
|
# Default: 0
|
||||||
smtp-port: 0
|
smtp-port: 0
|
||||||
|
|
4
go.mod
4
go.mod
|
@ -1,6 +1,6 @@
|
||||||
module github.com/superseriousbusiness/gotosocial
|
module github.com/superseriousbusiness/gotosocial
|
||||||
|
|
||||||
go 1.22.2
|
go 1.23
|
||||||
|
|
||||||
replace modernc.org/sqlite => gitlab.com/NyaaaWhatsUpDoc/sqlite v1.33.1-concurrency-workaround
|
replace modernc.org/sqlite => gitlab.com/NyaaaWhatsUpDoc/sqlite v1.33.1-concurrency-workaround
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ require (
|
||||||
codeberg.org/gruf/go-runners v1.6.3
|
codeberg.org/gruf/go-runners v1.6.3
|
||||||
codeberg.org/gruf/go-sched v1.2.4
|
codeberg.org/gruf/go-sched v1.2.4
|
||||||
codeberg.org/gruf/go-storage v0.2.0
|
codeberg.org/gruf/go-storage v0.2.0
|
||||||
codeberg.org/gruf/go-structr v0.8.10
|
codeberg.org/gruf/go-structr v0.8.11
|
||||||
codeberg.org/superseriousbusiness/exif-terminator v0.9.0
|
codeberg.org/superseriousbusiness/exif-terminator v0.9.0
|
||||||
github.com/DmitriyVTitov/size v1.5.0
|
github.com/DmitriyVTitov/size v1.5.0
|
||||||
github.com/KimMachineGun/automemlimit v0.6.1
|
github.com/KimMachineGun/automemlimit v0.6.1
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -72,8 +72,8 @@ codeberg.org/gruf/go-sched v1.2.4 h1:ddBB9o0D/2oU8NbQ0ldN5aWxogpXPRBATWi58+p++Hw
|
||||||
codeberg.org/gruf/go-sched v1.2.4/go.mod h1:wad6l+OcYGWMA2TzNLMmLObsrbBDxdJfEy5WvTgBjNk=
|
codeberg.org/gruf/go-sched v1.2.4/go.mod h1:wad6l+OcYGWMA2TzNLMmLObsrbBDxdJfEy5WvTgBjNk=
|
||||||
codeberg.org/gruf/go-storage v0.2.0 h1:mKj3Lx6AavEkuXXtxqPhdq+akW9YwrnP16yQBF7K5ZI=
|
codeberg.org/gruf/go-storage v0.2.0 h1:mKj3Lx6AavEkuXXtxqPhdq+akW9YwrnP16yQBF7K5ZI=
|
||||||
codeberg.org/gruf/go-storage v0.2.0/go.mod h1:o3GzMDE5QNUaRnm/daUzFqvuAaC4utlgXDXYO79sWKU=
|
codeberg.org/gruf/go-storage v0.2.0/go.mod h1:o3GzMDE5QNUaRnm/daUzFqvuAaC4utlgXDXYO79sWKU=
|
||||||
codeberg.org/gruf/go-structr v0.8.10 h1:uSapW97/StRnYEhCtycaM0isCsEMYC+tx/knYr6SiVo=
|
codeberg.org/gruf/go-structr v0.8.11 h1:I3cQCHpK3fQSXWaaUfksAJRN4+efULiuF11Oi/m8c+o=
|
||||||
codeberg.org/gruf/go-structr v0.8.10/go.mod h1:zkoXVrAnKosh8VFAsbP/Hhs8FmLBjbVVy5w/Ngm8ApM=
|
codeberg.org/gruf/go-structr v0.8.11/go.mod h1:zkoXVrAnKosh8VFAsbP/Hhs8FmLBjbVVy5w/Ngm8ApM=
|
||||||
codeberg.org/superseriousbusiness/exif-terminator v0.9.0 h1:/EfyGI6HIrbkhFwgXGSjZ9o1kr/+k8v4mKdfXTH02Go=
|
codeberg.org/superseriousbusiness/exif-terminator v0.9.0 h1:/EfyGI6HIrbkhFwgXGSjZ9o1kr/+k8v4mKdfXTH02Go=
|
||||||
codeberg.org/superseriousbusiness/exif-terminator v0.9.0/go.mod h1:gCWKduudUWFzsnixoMzu0FYVdxHWG+AbXnZ50DqxsUE=
|
codeberg.org/superseriousbusiness/exif-terminator v0.9.0/go.mod h1:gCWKduudUWFzsnixoMzu0FYVdxHWG+AbXnZ50DqxsUE=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
|
|
@ -145,8 +145,8 @@ func validateCreateEmoji(form *apimodel.EmojiCreateRequest) error {
|
||||||
return errors.New("no emoji given")
|
return errors.New("no emoji given")
|
||||||
}
|
}
|
||||||
|
|
||||||
maxSize := config.GetMediaEmojiLocalMaxSize()
|
maxSize := int64(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
if form.Image.Size > int64(maxSize) {
|
if form.Image.Size > maxSize {
|
||||||
return fmt.Errorf("emoji image too large: image is %dKB but size limit for custom emojis is %dKB", form.Image.Size/1024, maxSize/1024)
|
return fmt.Errorf("emoji image too large: image is %dKB but size limit for custom emojis is %dKB", form.Image.Size/1024, maxSize/1024)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -208,8 +208,8 @@ func validateUpdateEmoji(form *apimodel.EmojiUpdateRequest) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasImage {
|
if hasImage {
|
||||||
maxSize := config.GetMediaEmojiLocalMaxSize()
|
maxSize := int64(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
if form.Image.Size > int64(maxSize) {
|
if form.Image.Size > maxSize {
|
||||||
return fmt.Errorf("emoji image too large: image is %dKB but size limit for custom emojis is %dKB", form.Image.Size/1024, maxSize/1024)
|
return fmt.Errorf("emoji image too large: image is %dKB but size limit for custom emojis is %dKB", form.Image.Size/1024, maxSize/1024)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -160,7 +160,7 @@ type MediaDimensions struct {
|
||||||
Duration float32 `json:"duration,omitempty"`
|
Duration float32 `json:"duration,omitempty"`
|
||||||
// Bitrate of the media in bits per second.
|
// Bitrate of the media in bits per second.
|
||||||
// example: 1000000
|
// example: 1000000
|
||||||
Bitrate int `json:"bitrate,omitempty"`
|
Bitrate uint64 `json:"bitrate,omitempty"`
|
||||||
// Size of the media, in the format `[width]x[height]`.
|
// Size of the media, in the format `[width]x[height]`.
|
||||||
// Not set for audio.
|
// Not set for audio.
|
||||||
// example: 1920x1080
|
// example: 1920x1080
|
||||||
|
|
2
internal/cache/domain/domain.go
vendored
2
internal/cache/domain/domain.go
vendored
|
@ -220,7 +220,7 @@ func (n *node) getChild(part string) *node {
|
||||||
|
|
||||||
for i < j {
|
for i < j {
|
||||||
// avoid overflow when computing h
|
// avoid overflow when computing h
|
||||||
h := int(uint(i+j) >> 1)
|
h := int(uint(i+j) >> 1) // #nosec G115
|
||||||
// i ≤ h < j
|
// i ≤ h < j
|
||||||
|
|
||||||
if n.child[h].part < part {
|
if n.child[h].part < part {
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -407,13 +408,12 @@ func maxOpenConns() int {
|
||||||
// deriveBunDBPGOptions takes an application config and returns either a ready-to-use set of options
|
// deriveBunDBPGOptions takes an application config and returns either a ready-to-use set of options
|
||||||
// with sensible defaults, or an error if it's not satisfied by the provided config.
|
// with sensible defaults, or an error if it's not satisfied by the provided config.
|
||||||
func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
|
func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
|
||||||
url := config.GetDbPostgresConnectionString()
|
// If database URL is defined, ignore
|
||||||
|
// other DB-related configuration fields.
|
||||||
// if database URL is defined, ignore other DB related configuration fields
|
if url := config.GetDbPostgresConnectionString(); url != "" {
|
||||||
if url != "" {
|
return pgx.ParseConfig(url)
|
||||||
cfg, err := pgx.ParseConfig(url)
|
|
||||||
return cfg, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// these are all optional, the db adapter figures out defaults
|
// these are all optional, the db adapter figures out defaults
|
||||||
address := config.GetDbAddress()
|
address := config.GetDbAddress()
|
||||||
|
|
||||||
|
@ -477,7 +477,10 @@ func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
|
||||||
cfg.Host = address
|
cfg.Host = address
|
||||||
}
|
}
|
||||||
if port := config.GetDbPort(); port > 0 {
|
if port := config.GetDbPort(); port > 0 {
|
||||||
cfg.Port = uint16(port)
|
if port > math.MaxUint16 {
|
||||||
|
return nil, errors.New("invalid port, must be in range 1-65535")
|
||||||
|
}
|
||||||
|
cfg.Port = uint16(port) // #nosec G115 -- Just validated above.
|
||||||
}
|
}
|
||||||
if u := config.GetDbUser(); u != "" {
|
if u := config.GetDbUser(); u != "" {
|
||||||
cfg.User = u
|
cfg.User = u
|
||||||
|
|
|
@ -97,11 +97,11 @@ func() (*media.ProcessingEmoji, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote emoji size.
|
// Get maximum supported remote emoji size.
|
||||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
maxsz := int64(config.GetMediaEmojiRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Prepare data function to dereference remote emoji media.
|
// Prepare data function to dereference remote emoji media.
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create new emoji with prepared info.
|
// Create new emoji with prepared info.
|
||||||
|
@ -189,11 +189,11 @@ func() (*media.ProcessingEmoji, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote emoji size.
|
// Get maximum supported remote emoji size.
|
||||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
maxsz := int64(config.GetMediaEmojiRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Prepare data function to dereference remote emoji media.
|
// Prepare data function to dereference remote emoji media.
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update emoji with prepared info.
|
// Update emoji with prepared info.
|
||||||
|
@ -255,11 +255,11 @@ func() (*media.ProcessingEmoji, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote emoji size.
|
// Get maximum supported remote emoji size.
|
||||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
maxsz := int64(config.GetMediaEmojiRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Prepare data function to dereference remote emoji media.
|
// Prepare data function to dereference remote emoji media.
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recache emoji with prepared info.
|
// Recache emoji with prepared info.
|
||||||
|
|
|
@ -77,14 +77,14 @@ func() (*media.ProcessingMedia, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote media size.
|
// Get maximum supported remote media size.
|
||||||
maxsz := config.GetMediaRemoteMaxSize()
|
maxsz := int64(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Create media with prepared info.
|
// Create media with prepared info.
|
||||||
return d.mediaManager.CreateMedia(
|
return d.mediaManager.CreateMedia(
|
||||||
ctx,
|
ctx,
|
||||||
accountID,
|
accountID,
|
||||||
func(ctx context.Context) (io.ReadCloser, error) {
|
func(ctx context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
},
|
},
|
||||||
info,
|
info,
|
||||||
)
|
)
|
||||||
|
@ -168,14 +168,14 @@ func() (*media.ProcessingMedia, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote media size.
|
// Get maximum supported remote media size.
|
||||||
maxsz := config.GetMediaRemoteMaxSize()
|
maxsz := int64(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Recache media with prepared info,
|
// Recache media with prepared info,
|
||||||
// this will also update media in db.
|
// this will also update media in db.
|
||||||
return d.mediaManager.CacheMedia(
|
return d.mediaManager.CacheMedia(
|
||||||
attach,
|
attach,
|
||||||
func(ctx context.Context) (io.ReadCloser, error) {
|
func(ctx context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
},
|
},
|
||||||
), nil
|
), nil
|
||||||
},
|
},
|
||||||
|
|
|
@ -340,14 +340,14 @@ func (c *Client) do(r *Request) (rsp *http.Response, retry bool, err error) {
|
||||||
|
|
||||||
if u, _ := strconv.ParseUint(after, 10, 32); u != 0 {
|
if u, _ := strconv.ParseUint(after, 10, 32); u != 0 {
|
||||||
// An integer no. of backoff seconds was provided.
|
// An integer no. of backoff seconds was provided.
|
||||||
r.backoff = time.Duration(u) * time.Second
|
r.backoff = time.Duration(u) * time.Second // #nosec G115 -- We clamp backoff below.
|
||||||
} else if at, _ := http.ParseTime(after); !at.Before(now) {
|
} else if at, _ := http.ParseTime(after); !at.Before(now) {
|
||||||
// An HTTP formatted future date-time was provided.
|
// An HTTP formatted future date-time was provided.
|
||||||
r.backoff = at.Sub(now)
|
r.backoff = at.Sub(now)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't let their provided backoff exceed our max.
|
// Don't let their provided backoff exceed our max.
|
||||||
if max := baseBackoff * time.Duration(c.retries); //
|
if max := baseBackoff * time.Duration(c.retries); // #nosec G115 -- We control c.retries.
|
||||||
r.backoff > max {
|
r.backoff > max {
|
||||||
r.backoff = max
|
r.backoff = max
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -556,10 +557,18 @@ func (res *ffprobeResult) Process() (*result, error) {
|
||||||
if p := strings.SplitN(str, "/", 2); len(p) == 2 {
|
if p := strings.SplitN(str, "/", 2); len(p) == 2 {
|
||||||
n, _ := strconv.ParseUint(p[0], 10, 32)
|
n, _ := strconv.ParseUint(p[0], 10, 32)
|
||||||
d, _ := strconv.ParseUint(p[1], 10, 32)
|
d, _ := strconv.ParseUint(p[1], 10, 32)
|
||||||
num, den = uint32(n), uint32(d)
|
|
||||||
|
if n > math.MaxUint32 || d > math.MaxUint32 {
|
||||||
|
return nil, gtserror.Newf("overflowed numerator or denominator")
|
||||||
|
}
|
||||||
|
num, den = uint32(n), uint32(d) // #nosec G115 -- Just checked.
|
||||||
} else {
|
} else {
|
||||||
n, _ := strconv.ParseUint(p[0], 10, 32)
|
n, _ := strconv.ParseUint(p[0], 10, 32)
|
||||||
num = uint32(n)
|
|
||||||
|
if n > math.MaxUint32 {
|
||||||
|
return nil, gtserror.Newf("overflowed numerator")
|
||||||
|
}
|
||||||
|
num = uint32(n) // #nosec G115 -- Just checked.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set final divised framerate.
|
// Set final divised framerate.
|
||||||
|
|
|
@ -399,9 +399,9 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
g16 := uint16(s[1])
|
g16 := uint16(s[1])
|
||||||
b16 := uint16(s[2])
|
b16 := uint16(s[2])
|
||||||
a16 := uint16(a)
|
a16 := uint16(a)
|
||||||
d[0] = uint8(r16 * 0xff / a16)
|
d[0] = uint8(r16 * 0xff / a16) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8(g16 * 0xff / a16)
|
d[1] = uint8(g16 * 0xff / a16) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8(b16 * 0xff / a16)
|
d[2] = uint8(b16 * 0xff / a16) // #nosec G115 -- Overflow desired.
|
||||||
d[3] = a
|
d[3] = a
|
||||||
}
|
}
|
||||||
j += 4
|
j += 4
|
||||||
|
@ -431,9 +431,9 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
g32 := uint32(s[2])<<8 | uint32(s[3])
|
g32 := uint32(s[2])<<8 | uint32(s[3])
|
||||||
b32 := uint32(s[4])<<8 | uint32(s[5])
|
b32 := uint32(s[4])<<8 | uint32(s[5])
|
||||||
a32 := uint32(s[6])<<8 | uint32(s[7])
|
a32 := uint32(s[6])<<8 | uint32(s[7])
|
||||||
d[0] = uint8((r32 * 0xffff / a32) >> 8)
|
d[0] = uint8((r32 * 0xffff / a32) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8((g32 * 0xffff / a32) >> 8)
|
d[1] = uint8((g32 * 0xffff / a32) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8((b32 * 0xffff / a32) >> 8)
|
d[2] = uint8((b32 * 0xffff / a32) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
}
|
}
|
||||||
d[3] = a
|
d[3] = a
|
||||||
j += 4
|
j += 4
|
||||||
|
@ -530,9 +530,9 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
}
|
}
|
||||||
|
|
||||||
d := dst[j : j+4 : j+4]
|
d := dst[j : j+4 : j+4]
|
||||||
d[0] = uint8(r)
|
d[0] = uint8(r) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8(g)
|
d[1] = uint8(g) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8(b)
|
d[2] = uint8(b) // #nosec G115 -- Overflow desired.
|
||||||
d[3] = 0xff
|
d[3] = 0xff
|
||||||
|
|
||||||
iy++
|
iy++
|
||||||
|
@ -569,9 +569,9 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
d := dst[j : j+4 : j+4]
|
d := dst[j : j+4 : j+4]
|
||||||
switch a16 {
|
switch a16 {
|
||||||
case 0xffff:
|
case 0xffff:
|
||||||
d[0] = uint8(r16 >> 8)
|
d[0] = uint8(r16 >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8(g16 >> 8)
|
d[1] = uint8(g16 >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8(b16 >> 8)
|
d[2] = uint8(b16 >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[3] = 0xff
|
d[3] = 0xff
|
||||||
case 0:
|
case 0:
|
||||||
d[0] = 0
|
d[0] = 0
|
||||||
|
@ -579,10 +579,10 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
d[2] = 0
|
d[2] = 0
|
||||||
d[3] = 0
|
d[3] = 0
|
||||||
default:
|
default:
|
||||||
d[0] = uint8(((r16 * 0xffff) / a16) >> 8)
|
d[0] = uint8(((r16 * 0xffff) / a16) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8(((g16 * 0xffff) / a16) >> 8)
|
d[1] = uint8(((g16 * 0xffff) / a16) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8(((b16 * 0xffff) / a16) >> 8)
|
d[2] = uint8(((b16 * 0xffff) / a16) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[3] = uint8(a16 >> 8)
|
d[3] = uint8(a16 >> 8) // #nosec G115 -- Overflow desired.
|
||||||
}
|
}
|
||||||
j += 4
|
j += 4
|
||||||
}
|
}
|
||||||
|
@ -617,7 +617,7 @@ func clampFloat(x float64) uint8 {
|
||||||
return 255
|
return 255
|
||||||
}
|
}
|
||||||
if v > 0 {
|
if v > 0 {
|
||||||
return uint8(v)
|
return uint8(v) // #nosec G115 -- Just checked.
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,9 +49,6 @@ func (m *Manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM
|
||||||
refetchIDs []string
|
refetchIDs []string
|
||||||
)
|
)
|
||||||
|
|
||||||
// Get max supported remote emoji media size.
|
|
||||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
|
||||||
|
|
||||||
// page through emojis 20 at a time, looking for those with missing images
|
// page through emojis 20 at a time, looking for those with missing images
|
||||||
for {
|
for {
|
||||||
// Fetch next block of emojis from database
|
// Fetch next block of emojis from database
|
||||||
|
@ -111,8 +108,10 @@ func (m *Manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get max supported remote emoji media size.
|
||||||
|
maxsz := int64(config.GetMediaEmojiRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
dataFunc := func(ctx context.Context) (reader io.ReadCloser, err error) {
|
dataFunc := func(ctx context.Context) (reader io.ReadCloser, err error) {
|
||||||
return dereferenceMedia(ctx, emojiImageIRI, int64(maxsz))
|
return dereferenceMedia(ctx, emojiImageIRI, maxsz)
|
||||||
}
|
}
|
||||||
|
|
||||||
processingEmoji, err := m.UpdateEmoji(ctx, emoji, dataFunc, AdditionalEmojiInfo{
|
processingEmoji, err := m.UpdateEmoji(ctx, emoji, dataFunc, AdditionalEmojiInfo{
|
||||||
|
|
|
@ -462,11 +462,11 @@ func (p *Processor) UpdateAvatar(
|
||||||
gtserror.WithCode,
|
gtserror.WithCode,
|
||||||
) {
|
) {
|
||||||
// Get maximum supported local media size.
|
// Get maximum supported local media size.
|
||||||
maxsz := config.GetMediaLocalMaxSize()
|
maxsz := int64(config.GetMediaLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if avatar.Size > int64(maxsz) {
|
if avatar.Size > maxsz {
|
||||||
text := fmt.Sprintf("media exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("media exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -478,7 +478,7 @@ func (p *Processor) UpdateAvatar(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
|
|
||||||
// Write to instance storage.
|
// Write to instance storage.
|
||||||
return p.c.StoreLocalMedia(ctx,
|
return p.c.StoreLocalMedia(ctx,
|
||||||
|
@ -507,11 +507,11 @@ func (p *Processor) UpdateHeader(
|
||||||
gtserror.WithCode,
|
gtserror.WithCode,
|
||||||
) {
|
) {
|
||||||
// Get maximum supported local media size.
|
// Get maximum supported local media size.
|
||||||
maxsz := config.GetMediaLocalMaxSize()
|
maxsz := int64(config.GetMediaLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if header.Size > int64(maxsz) {
|
if header.Size > maxsz {
|
||||||
text := fmt.Sprintf("media exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("media exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -523,7 +523,7 @@ func (p *Processor) UpdateHeader(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
|
|
||||||
// Write to instance storage.
|
// Write to instance storage.
|
||||||
return p.c.StoreLocalMedia(ctx,
|
return p.c.StoreLocalMedia(ctx,
|
||||||
|
|
|
@ -45,11 +45,11 @@ func (p *Processor) EmojiCreate(
|
||||||
) (*apimodel.Emoji, gtserror.WithCode) {
|
) (*apimodel.Emoji, gtserror.WithCode) {
|
||||||
|
|
||||||
// Get maximum supported local emoji size.
|
// Get maximum supported local emoji size.
|
||||||
maxsz := config.GetMediaEmojiLocalMaxSize()
|
maxsz := int64(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if form.Image.Size > int64(maxsz) {
|
if form.Image.Size > maxsz {
|
||||||
text := fmt.Sprintf("emoji exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("emoji exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ func (p *Processor) EmojiCreate(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return rc, nil
|
return rc, nil
|
||||||
}
|
}
|
||||||
|
@ -441,11 +441,11 @@ func (p *Processor) emojiUpdateModify(
|
||||||
// We can do both at the same time :)
|
// We can do both at the same time :)
|
||||||
|
|
||||||
// Get maximum supported local emoji size.
|
// Get maximum supported local emoji size.
|
||||||
maxsz := config.GetMediaEmojiLocalMaxSize()
|
maxsz := int64(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if image.Size > int64(maxsz) {
|
if image.Size > maxsz {
|
||||||
text := fmt.Sprintf("emoji exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("emoji exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -457,7 +457,7 @@ func (p *Processor) emojiUpdateModify(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return rc, nil
|
return rc, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,11 +35,11 @@
|
||||||
func (p *Processor) Create(ctx context.Context, account *gtsmodel.Account, form *apimodel.AttachmentRequest) (*apimodel.Attachment, gtserror.WithCode) {
|
func (p *Processor) Create(ctx context.Context, account *gtsmodel.Account, form *apimodel.AttachmentRequest) (*apimodel.Attachment, gtserror.WithCode) {
|
||||||
|
|
||||||
// Get maximum supported local media size.
|
// Get maximum supported local media size.
|
||||||
maxsz := config.GetMediaLocalMaxSize()
|
maxsz := int64(config.GetMediaLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if form.File.Size > int64(maxsz) {
|
if form.File.Size > maxsz {
|
||||||
text := fmt.Sprintf("media exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("media exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ func (p *Processor) Create(ctx context.Context, account *gtsmodel.Account, form
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
|
|
||||||
// Create local media and write to instance storage.
|
// Create local media and write to instance storage.
|
||||||
attachment, errWithCode := p.c.StoreLocalMedia(ctx,
|
attachment, errWithCode := p.c.StoreLocalMedia(ctx,
|
||||||
|
|
|
@ -647,7 +647,7 @@ func (c *Converter) AttachmentToAPIAttachment(ctx context.Context, media *gtsmod
|
||||||
Size: toAPISize(media.FileMeta.Original.Width, media.FileMeta.Original.Height),
|
Size: toAPISize(media.FileMeta.Original.Width, media.FileMeta.Original.Height),
|
||||||
FrameRate: toAPIFrameRate(media.FileMeta.Original.Framerate),
|
FrameRate: toAPIFrameRate(media.FileMeta.Original.Framerate),
|
||||||
Duration: util.PtrOrZero(media.FileMeta.Original.Duration),
|
Duration: util.PtrOrZero(media.FileMeta.Original.Duration),
|
||||||
Bitrate: int(util.PtrOrZero(media.FileMeta.Original.Bitrate)),
|
Bitrate: util.PtrOrZero(media.FileMeta.Original.Bitrate),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy over local file URL.
|
// Copy over local file URL.
|
||||||
|
@ -1551,9 +1551,9 @@ func (c *Converter) InstanceToAPIV1Instance(ctx context.Context, i *gtsmodel.Ins
|
||||||
instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL
|
instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL
|
||||||
instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes
|
instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes
|
||||||
instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes
|
instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes
|
||||||
instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize())
|
instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit
|
instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit
|
||||||
instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize())
|
instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit
|
instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit
|
||||||
instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit
|
instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit
|
||||||
instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions()
|
instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions()
|
||||||
|
@ -1563,7 +1563,7 @@ func (c *Converter) InstanceToAPIV1Instance(ctx context.Context, i *gtsmodel.Ins
|
||||||
instance.Configuration.Accounts.AllowCustomCSS = config.GetAccountsAllowCustomCSS()
|
instance.Configuration.Accounts.AllowCustomCSS = config.GetAccountsAllowCustomCSS()
|
||||||
instance.Configuration.Accounts.MaxFeaturedTags = instanceAccountsMaxFeaturedTags
|
instance.Configuration.Accounts.MaxFeaturedTags = instanceAccountsMaxFeaturedTags
|
||||||
instance.Configuration.Accounts.MaxProfileFields = instanceAccountsMaxProfileFields
|
instance.Configuration.Accounts.MaxProfileFields = instanceAccountsMaxProfileFields
|
||||||
instance.Configuration.Emojis.EmojiSizeLimit = int(config.GetMediaEmojiLocalMaxSize())
|
instance.Configuration.Emojis.EmojiSizeLimit = int(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.OIDCEnabled = config.GetOIDCEnabled()
|
instance.Configuration.OIDCEnabled = config.GetOIDCEnabled()
|
||||||
|
|
||||||
// URLs
|
// URLs
|
||||||
|
@ -1695,9 +1695,9 @@ func (c *Converter) InstanceToAPIV2Instance(ctx context.Context, i *gtsmodel.Ins
|
||||||
instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL
|
instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL
|
||||||
instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes
|
instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes
|
||||||
instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes
|
instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes
|
||||||
instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize())
|
instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit
|
instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit
|
||||||
instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize())
|
instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit
|
instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit
|
||||||
instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit
|
instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit
|
||||||
instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions()
|
instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions()
|
||||||
|
@ -1707,7 +1707,7 @@ func (c *Converter) InstanceToAPIV2Instance(ctx context.Context, i *gtsmodel.Ins
|
||||||
instance.Configuration.Accounts.AllowCustomCSS = config.GetAccountsAllowCustomCSS()
|
instance.Configuration.Accounts.AllowCustomCSS = config.GetAccountsAllowCustomCSS()
|
||||||
instance.Configuration.Accounts.MaxFeaturedTags = instanceAccountsMaxFeaturedTags
|
instance.Configuration.Accounts.MaxFeaturedTags = instanceAccountsMaxFeaturedTags
|
||||||
instance.Configuration.Accounts.MaxProfileFields = instanceAccountsMaxProfileFields
|
instance.Configuration.Accounts.MaxProfileFields = instanceAccountsMaxProfileFields
|
||||||
instance.Configuration.Emojis.EmojiSizeLimit = int(config.GetMediaEmojiLocalMaxSize())
|
instance.Configuration.Emojis.EmojiSizeLimit = int(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.OIDCEnabled = config.GetOIDCEnabled()
|
instance.Configuration.OIDCEnabled = config.GetOIDCEnabled()
|
||||||
|
|
||||||
// registrations
|
// registrations
|
||||||
|
|
47
vendor/codeberg.org/gruf/go-structr/cache.go
generated
vendored
47
vendor/codeberg.org/gruf/go-structr/cache.go
generated
vendored
|
@ -119,9 +119,9 @@ func (c *Cache[T]) Init(config CacheConfig[T]) {
|
||||||
|
|
||||||
// Index selects index with given name from cache, else panics.
|
// Index selects index with given name from cache, else panics.
|
||||||
func (c *Cache[T]) Index(name string) *Index {
|
func (c *Cache[T]) Index(name string) *Index {
|
||||||
for i := range c.indices {
|
for i, idx := range c.indices {
|
||||||
if c.indices[i].name == name {
|
if idx.name == name {
|
||||||
return &c.indices[i]
|
return &(c.indices[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
panic("unknown index: " + name)
|
panic("unknown index: " + name)
|
||||||
|
@ -337,13 +337,16 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
|
||||||
panic("not initialized")
|
panic("not initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(keys); {
|
// Iterate keys and catch uncached.
|
||||||
|
toLoad := make([]Key, 0, len(keys))
|
||||||
|
for _, key := range keys {
|
||||||
|
|
||||||
// Value length before
|
// Value length before
|
||||||
// any below appends.
|
// any below appends.
|
||||||
before := len(values)
|
before := len(values)
|
||||||
|
|
||||||
// Concatenate all *values* from cached items.
|
// Concatenate all *values* from cached items.
|
||||||
index.get(keys[i].key, func(item *indexed_item) {
|
index.get(key.key, func(item *indexed_item) {
|
||||||
if value, ok := item.data.(T); ok {
|
if value, ok := item.data.(T); ok {
|
||||||
// Append value COPY.
|
// Append value COPY.
|
||||||
value = c.copy(value)
|
value = c.copy(value)
|
||||||
|
@ -358,30 +361,22 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
|
||||||
|
|
||||||
// Only if values changed did
|
// Only if values changed did
|
||||||
// we actually find anything.
|
// we actually find anything.
|
||||||
if len(values) != before {
|
if len(values) == before {
|
||||||
|
toLoad = append(toLoad, key)
|
||||||
// We found values at key,
|
|
||||||
// drop key from the slice.
|
|
||||||
copy(keys[i:], keys[i+1:])
|
|
||||||
keys = keys[:len(keys)-1]
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iter
|
|
||||||
i++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done with
|
// Done with
|
||||||
// the lock.
|
// the lock.
|
||||||
unlock()
|
unlock()
|
||||||
|
|
||||||
if len(keys) == 0 {
|
if len(toLoad) == 0 {
|
||||||
// We loaded everything!
|
// We loaded everything!
|
||||||
return values, nil
|
return values, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load uncached values.
|
// Load uncached key values.
|
||||||
uncached, err := load(keys)
|
uncached, err := load(toLoad)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -515,8 +510,8 @@ func (c *Cache[T]) Trim(perc float64) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compact index data stores.
|
// Compact index data stores.
|
||||||
for i := range c.indices {
|
for _, idx := range c.indices {
|
||||||
c.indices[i].data.Compact()
|
(&idx).data.Compact()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done with lock.
|
// Done with lock.
|
||||||
|
@ -536,17 +531,17 @@ func (c *Cache[T]) Len() int {
|
||||||
|
|
||||||
// Debug returns debug stats about cache.
|
// Debug returns debug stats about cache.
|
||||||
func (c *Cache[T]) Debug() map[string]any {
|
func (c *Cache[T]) Debug() map[string]any {
|
||||||
m := make(map[string]any)
|
m := make(map[string]any, 2)
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
m["lru"] = c.lru.len
|
m["lru"] = c.lru.len
|
||||||
indices := make(map[string]any)
|
indices := make(map[string]any, len(c.indices))
|
||||||
m["indices"] = indices
|
m["indices"] = indices
|
||||||
for i := range c.indices {
|
for _, idx := range c.indices {
|
||||||
var n uint64
|
var n uint64
|
||||||
for _, l := range c.indices[i].data.m {
|
for _, l := range idx.data.m {
|
||||||
n += uint64(l.len)
|
n += uint64(l.len)
|
||||||
}
|
}
|
||||||
indices[c.indices[i].name] = n
|
indices[idx.name] = n
|
||||||
}
|
}
|
||||||
c.mutex.Unlock()
|
c.mutex.Unlock()
|
||||||
return m
|
return m
|
||||||
|
@ -588,7 +583,7 @@ func (c *Cache[T]) store_value(index *Index, key string, value T) {
|
||||||
|
|
||||||
for i := range c.indices {
|
for i := range c.indices {
|
||||||
// Get current index ptr.
|
// Get current index ptr.
|
||||||
idx := &(c.indices[i])
|
idx := (&c.indices[i])
|
||||||
if idx == index {
|
if idx == index {
|
||||||
|
|
||||||
// Already stored under
|
// Already stored under
|
||||||
|
|
42
vendor/codeberg.org/gruf/go-structr/index.go
generated
vendored
42
vendor/codeberg.org/gruf/go-structr/index.go
generated
vendored
|
@ -197,8 +197,13 @@ func (i *Index) get(key string, hook func(*indexed_item)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate all entries in list.
|
// Iterate the list.
|
||||||
l.rangefn(func(elem *list_elem) {
|
for elem := l.head; //
|
||||||
|
elem != nil; //
|
||||||
|
{
|
||||||
|
// Get next before
|
||||||
|
// any modification.
|
||||||
|
next := elem.next
|
||||||
|
|
||||||
// Extract element entry + item.
|
// Extract element entry + item.
|
||||||
entry := (*index_entry)(elem.data)
|
entry := (*index_entry)(elem.data)
|
||||||
|
@ -206,18 +211,21 @@ func (i *Index) get(key string, hook func(*indexed_item)) {
|
||||||
|
|
||||||
// Pass to hook.
|
// Pass to hook.
|
||||||
hook(item)
|
hook(item)
|
||||||
})
|
|
||||||
|
// Set next.
|
||||||
|
elem = next
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// key uses hasher to generate Key{} from given raw parts.
|
// key uses hasher to generate Key{} from given raw parts.
|
||||||
func (i *Index) key(buf *byteutil.Buffer, parts []unsafe.Pointer) string {
|
func (i *Index) key(buf *byteutil.Buffer, parts []unsafe.Pointer) string {
|
||||||
|
buf.B = buf.B[:0]
|
||||||
if len(parts) != len(i.fields) {
|
if len(parts) != len(i.fields) {
|
||||||
panicf("incorrect number key parts: want=%d received=%d",
|
panicf("incorrect number key parts: want=%d received=%d",
|
||||||
len(i.fields),
|
len(i.fields),
|
||||||
len(parts),
|
len(parts),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
buf.B = buf.B[:0]
|
|
||||||
if !allow_zero(i.flags) {
|
if !allow_zero(i.flags) {
|
||||||
for x, field := range i.fields {
|
for x, field := range i.fields {
|
||||||
before := len(buf.B)
|
before := len(buf.B)
|
||||||
|
@ -301,8 +309,13 @@ func (i *Index) delete(key string, hook func(*indexed_item)) {
|
||||||
// Delete at hash.
|
// Delete at hash.
|
||||||
i.data.Delete(key)
|
i.data.Delete(key)
|
||||||
|
|
||||||
// Iterate entries in list.
|
// Iterate the list.
|
||||||
l.rangefn(func(elem *list_elem) {
|
for elem := l.head; //
|
||||||
|
elem != nil; //
|
||||||
|
{
|
||||||
|
// Get next before
|
||||||
|
// any modification.
|
||||||
|
next := elem.next
|
||||||
|
|
||||||
// Remove elem.
|
// Remove elem.
|
||||||
l.remove(elem)
|
l.remove(elem)
|
||||||
|
@ -319,7 +332,10 @@ func (i *Index) delete(key string, hook func(*indexed_item)) {
|
||||||
|
|
||||||
// Pass to hook.
|
// Pass to hook.
|
||||||
hook(item)
|
hook(item)
|
||||||
})
|
|
||||||
|
// Set next.
|
||||||
|
elem = next
|
||||||
|
}
|
||||||
|
|
||||||
// Release list.
|
// Release list.
|
||||||
free_list(l)
|
free_list(l)
|
||||||
|
@ -375,17 +391,21 @@ type index_entry struct {
|
||||||
func new_index_entry() *index_entry {
|
func new_index_entry() *index_entry {
|
||||||
v := index_entry_pool.Get()
|
v := index_entry_pool.Get()
|
||||||
if v == nil {
|
if v == nil {
|
||||||
v = new(index_entry)
|
e := new(index_entry)
|
||||||
|
e.elem.data = unsafe.Pointer(e)
|
||||||
|
v = e
|
||||||
}
|
}
|
||||||
entry := v.(*index_entry)
|
entry := v.(*index_entry)
|
||||||
ptr := unsafe.Pointer(entry)
|
|
||||||
entry.elem.data = ptr
|
|
||||||
return entry
|
return entry
|
||||||
}
|
}
|
||||||
|
|
||||||
// free_index_entry releases the index_entry.
|
// free_index_entry releases the index_entry.
|
||||||
func free_index_entry(entry *index_entry) {
|
func free_index_entry(entry *index_entry) {
|
||||||
entry.elem.data = nil
|
if entry.elem.next != nil ||
|
||||||
|
entry.elem.prev != nil {
|
||||||
|
should_not_reach()
|
||||||
|
return
|
||||||
|
}
|
||||||
entry.key = ""
|
entry.key = ""
|
||||||
entry.index = nil
|
entry.index = nil
|
||||||
entry.item = nil
|
entry.item = nil
|
||||||
|
|
16
vendor/codeberg.org/gruf/go-structr/item.go
generated
vendored
16
vendor/codeberg.org/gruf/go-structr/item.go
generated
vendored
|
@ -24,18 +24,22 @@ type indexed_item struct {
|
||||||
func new_indexed_item() *indexed_item {
|
func new_indexed_item() *indexed_item {
|
||||||
v := indexed_item_pool.Get()
|
v := indexed_item_pool.Get()
|
||||||
if v == nil {
|
if v == nil {
|
||||||
v = new(indexed_item)
|
i := new(indexed_item)
|
||||||
|
i.elem.data = unsafe.Pointer(i)
|
||||||
|
v = i
|
||||||
}
|
}
|
||||||
item := v.(*indexed_item)
|
item := v.(*indexed_item)
|
||||||
ptr := unsafe.Pointer(item)
|
|
||||||
item.elem.data = ptr
|
|
||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
// free_indexed_item releases the indexed_item.
|
// free_indexed_item releases the indexed_item.
|
||||||
func free_indexed_item(item *indexed_item) {
|
func free_indexed_item(item *indexed_item) {
|
||||||
item.elem.data = nil
|
if len(item.indexed) > 0 ||
|
||||||
item.indexed = item.indexed[:0]
|
item.elem.next != nil ||
|
||||||
|
item.elem.prev != nil {
|
||||||
|
should_not_reach()
|
||||||
|
return
|
||||||
|
}
|
||||||
item.data = nil
|
item.data = nil
|
||||||
indexed_item_pool.Put(item)
|
indexed_item_pool.Put(item)
|
||||||
}
|
}
|
||||||
|
@ -50,7 +54,7 @@ func (i *indexed_item) drop_index(entry *index_entry) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move all index entries down + reslice.
|
// Reslice index entries minus 'x'.
|
||||||
_ = copy(i.indexed[x:], i.indexed[x+1:])
|
_ = copy(i.indexed[x:], i.indexed[x+1:])
|
||||||
i.indexed[len(i.indexed)-1] = nil
|
i.indexed[len(i.indexed)-1] = nil
|
||||||
i.indexed = i.indexed[:len(i.indexed)-1]
|
i.indexed = i.indexed[:len(i.indexed)-1]
|
||||||
|
|
46
vendor/codeberg.org/gruf/go-structr/list.go
generated
vendored
46
vendor/codeberg.org/gruf/go-structr/list.go
generated
vendored
|
@ -40,9 +40,12 @@ func new_list() *list {
|
||||||
|
|
||||||
// free_list releases the list.
|
// free_list releases the list.
|
||||||
func free_list(list *list) {
|
func free_list(list *list) {
|
||||||
list.head = nil
|
if list.head != nil ||
|
||||||
list.tail = nil
|
list.tail != nil ||
|
||||||
list.len = 0
|
list.len != 0 {
|
||||||
|
should_not_reach()
|
||||||
|
return
|
||||||
|
}
|
||||||
list_pool.Put(list)
|
list_pool.Put(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,20 +118,27 @@ func (l *list) remove(elem *list_elem) {
|
||||||
elem.prev = nil
|
elem.prev = nil
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
case next == nil:
|
||||||
|
if prev == nil {
|
||||||
|
// next == nil && prev == nil
|
||||||
|
//
|
||||||
// elem is ONLY one in list.
|
// elem is ONLY one in list.
|
||||||
case next == nil && prev == nil:
|
|
||||||
l.head = nil
|
l.head = nil
|
||||||
l.tail = nil
|
l.tail = nil
|
||||||
|
} else {
|
||||||
// elem is front in list.
|
// next == nil && prev != nil
|
||||||
case next != nil && prev == nil:
|
//
|
||||||
l.head = next
|
|
||||||
next.prev = nil
|
|
||||||
|
|
||||||
// elem is last in list.
|
// elem is last in list.
|
||||||
case prev != nil && next == nil:
|
|
||||||
l.tail = prev
|
l.tail = prev
|
||||||
prev.next = nil
|
prev.next = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case prev == nil:
|
||||||
|
// next != nil && prev == nil
|
||||||
|
//
|
||||||
|
// elem is front in list.
|
||||||
|
l.head = next
|
||||||
|
next.prev = nil
|
||||||
|
|
||||||
// elem in middle of list.
|
// elem in middle of list.
|
||||||
default:
|
default:
|
||||||
|
@ -139,17 +149,3 @@ func (l *list) remove(elem *list_elem) {
|
||||||
// Decr count
|
// Decr count
|
||||||
l.len--
|
l.len--
|
||||||
}
|
}
|
||||||
|
|
||||||
// rangefn will range all elems in list, passing each to fn.
|
|
||||||
func (l *list) rangefn(fn func(*list_elem)) {
|
|
||||||
if fn == nil {
|
|
||||||
panic("nil fn")
|
|
||||||
}
|
|
||||||
for e := l.head; //
|
|
||||||
e != nil; //
|
|
||||||
{
|
|
||||||
n := e.next
|
|
||||||
fn(e)
|
|
||||||
e = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
180
vendor/codeberg.org/gruf/go-structr/ordered_list.bak
generated
vendored
Normal file
180
vendor/codeberg.org/gruf/go-structr/ordered_list.bak
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
||||||
|
package structr
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
type Timeline[StructType any, PK comparable] struct {
|
||||||
|
|
||||||
|
// hook functions.
|
||||||
|
pkey func(StructType) PK
|
||||||
|
gte func(PK, PK) bool
|
||||||
|
lte func(PK, PK) bool
|
||||||
|
copy func(StructType) StructType
|
||||||
|
|
||||||
|
// main underlying
|
||||||
|
// ordered item list.
|
||||||
|
list list
|
||||||
|
|
||||||
|
// indices used in storing passed struct
|
||||||
|
// types by user defined sets of fields.
|
||||||
|
indices []Index
|
||||||
|
|
||||||
|
// protective mutex, guards:
|
||||||
|
// - TODO
|
||||||
|
mutex sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Timeline[T, PK]) Init(config any) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Timeline[T, PK]) Index(name string) *Index {
|
||||||
|
for i := range t.indices {
|
||||||
|
if t.indices[i].name == name {
|
||||||
|
return &t.indices[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unknown index: " + name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Timeline[T, PK]) Insert(values ...T) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Timeline[T, PK]) LoadTop(min, max PK, length int, load func(min, max PK, length int) ([]T, error)) ([]T, error) {
|
||||||
|
// Allocate expected no. values.
|
||||||
|
values := make([]T, 0, length)
|
||||||
|
|
||||||
|
// Acquire lock.
|
||||||
|
t.mutex.Lock()
|
||||||
|
|
||||||
|
// Wrap unlock to only do once.
|
||||||
|
unlock := once(t.mutex.Unlock)
|
||||||
|
defer unlock()
|
||||||
|
|
||||||
|
// Check init'd.
|
||||||
|
if t.copy == nil {
|
||||||
|
panic("not initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate through linked list from top (i.e. head).
|
||||||
|
for next := t.list.head; next != nil; next = next.next {
|
||||||
|
|
||||||
|
// Check if we've gathered
|
||||||
|
// enough values from timeline.
|
||||||
|
if len(values) >= length {
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
item := (*indexed_item)(next.data)
|
||||||
|
value := item.data.(T)
|
||||||
|
pkey := t.pkey(value)
|
||||||
|
|
||||||
|
// Check if below min.
|
||||||
|
if t.lte(pkey, min) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update min.
|
||||||
|
min = pkey
|
||||||
|
|
||||||
|
// Check if above max.
|
||||||
|
if t.gte(pkey, max) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append value copy.
|
||||||
|
value = t.copy(value)
|
||||||
|
values = append(values, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Timeline[T, PK]) LoadBottom(min, max PK, length int, load func(min, max PK, length int) ([]T, error)) ([]T, error) {
|
||||||
|
// Allocate expected no. values.
|
||||||
|
values := make([]T, 0, length)
|
||||||
|
|
||||||
|
// Acquire lock.
|
||||||
|
t.mutex.Lock()
|
||||||
|
|
||||||
|
// Wrap unlock to only do once.
|
||||||
|
unlock := once(t.mutex.Unlock)
|
||||||
|
defer unlock()
|
||||||
|
|
||||||
|
// Check init'd.
|
||||||
|
if t.copy == nil {
|
||||||
|
panic("not initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate through linked list from bottom (i.e. tail).
|
||||||
|
for next := t.list.tail; next != nil; next = next.prev {
|
||||||
|
|
||||||
|
// Check if we've gathered
|
||||||
|
// enough values from timeline.
|
||||||
|
if len(values) >= length {
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
item := (*indexed_item)(next.data)
|
||||||
|
value := item.data.(T)
|
||||||
|
pkey := t.pkey(value)
|
||||||
|
|
||||||
|
// Check if above max.
|
||||||
|
if t.gte(pkey, max) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update max.
|
||||||
|
max = pkey
|
||||||
|
|
||||||
|
// Check if below min.
|
||||||
|
if t.lte(pkey, min) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append value copy.
|
||||||
|
value = t.copy(value)
|
||||||
|
values = append(values, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done with
|
||||||
|
// the lock.
|
||||||
|
unlock()
|
||||||
|
|
||||||
|
// Attempt to load values up to given length.
|
||||||
|
next, err := load(min, max, length-len(values))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Acquire lock.
|
||||||
|
t.mutex.Lock()
|
||||||
|
|
||||||
|
// Store uncached values.
|
||||||
|
for i := range next {
|
||||||
|
t.store_value(
|
||||||
|
nil, "",
|
||||||
|
uncached[i],
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done with lock.
|
||||||
|
t.mutex.Unlock()
|
||||||
|
|
||||||
|
// Append uncached to return values.
|
||||||
|
values = append(values, next...)
|
||||||
|
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Timeline[T, PK]) index(value T) *indexed_item {
|
||||||
|
pk := t.pkey(value)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case t.list.len == 0:
|
||||||
|
|
||||||
|
case pk < t.list.head.data:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Timeline[T, PK]) delete(item *indexed_item) {
|
||||||
|
|
||||||
|
}
|
16
vendor/codeberg.org/gruf/go-structr/queue.go
generated
vendored
16
vendor/codeberg.org/gruf/go-structr/queue.go
generated
vendored
|
@ -68,9 +68,9 @@ func (q *Queue[T]) Init(config QueueConfig[T]) {
|
||||||
|
|
||||||
// Index selects index with given name from queue, else panics.
|
// Index selects index with given name from queue, else panics.
|
||||||
func (q *Queue[T]) Index(name string) *Index {
|
func (q *Queue[T]) Index(name string) *Index {
|
||||||
for i := range q.indices {
|
for i, idx := range q.indices {
|
||||||
if q.indices[i].name == name {
|
if idx.name == name {
|
||||||
return &q.indices[i]
|
return &(q.indices[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
panic("unknown index: " + name)
|
panic("unknown index: " + name)
|
||||||
|
@ -207,17 +207,17 @@ func (q *Queue[T]) Len() int {
|
||||||
|
|
||||||
// Debug returns debug stats about queue.
|
// Debug returns debug stats about queue.
|
||||||
func (q *Queue[T]) Debug() map[string]any {
|
func (q *Queue[T]) Debug() map[string]any {
|
||||||
m := make(map[string]any)
|
m := make(map[string]any, 2)
|
||||||
q.mutex.Lock()
|
q.mutex.Lock()
|
||||||
m["queue"] = q.queue.len
|
m["queue"] = q.queue.len
|
||||||
indices := make(map[string]any)
|
indices := make(map[string]any, len(q.indices))
|
||||||
m["indices"] = indices
|
m["indices"] = indices
|
||||||
for i := range q.indices {
|
for _, idx := range q.indices {
|
||||||
var n uint64
|
var n uint64
|
||||||
for _, l := range q.indices[i].data.m {
|
for _, l := range idx.data.m {
|
||||||
n += uint64(l.len)
|
n += uint64(l.len)
|
||||||
}
|
}
|
||||||
indices[q.indices[i].name] = n
|
indices[idx.name] = n
|
||||||
}
|
}
|
||||||
q.mutex.Unlock()
|
q.mutex.Unlock()
|
||||||
return m
|
return m
|
||||||
|
|
28
vendor/codeberg.org/gruf/go-structr/runtime.go
generated
vendored
28
vendor/codeberg.org/gruf/go-structr/runtime.go
generated
vendored
|
@ -2,7 +2,10 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
@ -182,7 +185,32 @@ func deref(p unsafe.Pointer, n uint) unsafe.Pointer {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// eface_data returns the data ptr from an empty interface.
|
||||||
|
func eface_data(a any) unsafe.Pointer {
|
||||||
|
type eface struct{ _, data unsafe.Pointer }
|
||||||
|
return (*eface)(unsafe.Pointer(&a)).data
|
||||||
|
}
|
||||||
|
|
||||||
// panicf provides a panic with string formatting.
|
// panicf provides a panic with string formatting.
|
||||||
func panicf(format string, args ...any) {
|
func panicf(format string, args ...any) {
|
||||||
panic(fmt.Sprintf(format, args...))
|
panic(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// should_not_reach can be called to indicated a
|
||||||
|
// block of code should not be able to be reached,
|
||||||
|
// else it prints callsite info with a BUG report.
|
||||||
|
//
|
||||||
|
//go:noinline
|
||||||
|
func should_not_reach() {
|
||||||
|
pcs := make([]uintptr, 1)
|
||||||
|
_ = runtime.Callers(2, pcs)
|
||||||
|
fn := runtime.FuncForPC(pcs[0])
|
||||||
|
funcname := "go-structr" // by default use just our library name
|
||||||
|
if fn != nil {
|
||||||
|
funcname = fn.Name()
|
||||||
|
if i := strings.LastIndexByte(funcname, '/'); i != -1 {
|
||||||
|
funcname = funcname[i+1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
os.Stderr.WriteString("BUG: assertion failed in " + funcname + "\n")
|
||||||
|
}
|
||||||
|
|
8
vendor/codeberg.org/gruf/go-structr/util.go
generated
vendored
8
vendor/codeberg.org/gruf/go-structr/util.go
generated
vendored
|
@ -1,7 +1,5 @@
|
||||||
package structr
|
package structr
|
||||||
|
|
||||||
import "unsafe"
|
|
||||||
|
|
||||||
// once only executes 'fn' once.
|
// once only executes 'fn' once.
|
||||||
func once(fn func()) func() {
|
func once(fn func()) func() {
|
||||||
var once int32
|
var once int32
|
||||||
|
@ -13,9 +11,3 @@ func once(fn func()) func() {
|
||||||
fn()
|
fn()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// eface_data returns the data ptr from an empty interface.
|
|
||||||
func eface_data(a any) unsafe.Pointer {
|
|
||||||
type eface struct{ _, data unsafe.Pointer }
|
|
||||||
return (*eface)(unsafe.Pointer(&a)).data
|
|
||||||
}
|
|
||||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -66,7 +66,7 @@ codeberg.org/gruf/go-storage/disk
|
||||||
codeberg.org/gruf/go-storage/internal
|
codeberg.org/gruf/go-storage/internal
|
||||||
codeberg.org/gruf/go-storage/memory
|
codeberg.org/gruf/go-storage/memory
|
||||||
codeberg.org/gruf/go-storage/s3
|
codeberg.org/gruf/go-storage/s3
|
||||||
# codeberg.org/gruf/go-structr v0.8.10
|
# codeberg.org/gruf/go-structr v0.8.11
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
codeberg.org/gruf/go-structr
|
codeberg.org/gruf/go-structr
|
||||||
# codeberg.org/superseriousbusiness/exif-terminator v0.9.0
|
# codeberg.org/superseriousbusiness/exif-terminator v0.9.0
|
||||||
|
|
Loading…
Reference in a new issue