mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-11-26 13:46:39 +00:00
Compare commits
11 commits
4057daf6fe
...
454ed99516
Author | SHA1 | Date | |
---|---|---|---|
454ed99516 | |||
095663f5cc | |||
18b7e00fef | |||
504b3a9162 | |||
4be1f780a1 | |||
8db3d6b700 | |||
666b8bc4f2 | |||
7c6c74243b | |||
75d3fca08c | |||
bd4c4d79fe | |||
c1543c029b |
12
.drone.yml
12
.drone.yml
|
@ -12,7 +12,7 @@ steps:
|
||||||
# We use golangci-lint for linting.
|
# We use golangci-lint for linting.
|
||||||
# See: https://golangci-lint.run/
|
# See: https://golangci-lint.run/
|
||||||
- name: lint
|
- name: lint
|
||||||
image: golangci/golangci-lint:v1.57.2
|
image: golangci/golangci-lint:v1.60.3
|
||||||
volumes:
|
volumes:
|
||||||
- name: go-build-cache
|
- name: go-build-cache
|
||||||
path: /root/.cache/go-build
|
path: /root/.cache/go-build
|
||||||
|
@ -28,7 +28,7 @@ steps:
|
||||||
- pull_request
|
- pull_request
|
||||||
|
|
||||||
- name: test
|
- name: test
|
||||||
image: golang:1.22-alpine
|
image: golang:1.23.0-alpine
|
||||||
volumes:
|
volumes:
|
||||||
- name: go-build-cache
|
- name: go-build-cache
|
||||||
path: /root/.cache/go-build
|
path: /root/.cache/go-build
|
||||||
|
@ -94,7 +94,7 @@ steps:
|
||||||
- pull_request
|
- pull_request
|
||||||
|
|
||||||
- name: snapshot
|
- name: snapshot
|
||||||
image: superseriousbusiness/gotosocial-drone-build:0.6.0 # https://github.com/superseriousbusiness/gotosocial-drone-build
|
image: superseriousbusiness/gotosocial-drone-build:0.7.0 # https://github.com/superseriousbusiness/gotosocial-drone-build
|
||||||
volumes:
|
volumes:
|
||||||
- name: go-build-cache
|
- name: go-build-cache
|
||||||
path: /root/.cache/go-build
|
path: /root/.cache/go-build
|
||||||
|
@ -135,7 +135,7 @@ steps:
|
||||||
- main
|
- main
|
||||||
|
|
||||||
- name: release
|
- name: release
|
||||||
image: superseriousbusiness/gotosocial-drone-build:0.6.0 # https://github.com/superseriousbusiness/gotosocial-drone-build
|
image: superseriousbusiness/gotosocial-drone-build:0.7.0 # https://github.com/superseriousbusiness/gotosocial-drone-build
|
||||||
volumes:
|
volumes:
|
||||||
- name: go-build-cache
|
- name: go-build-cache
|
||||||
path: /root/.cache/go-build
|
path: /root/.cache/go-build
|
||||||
|
@ -194,7 +194,7 @@ clone:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: mirror
|
- name: mirror
|
||||||
image: superseriousbusiness/gotosocial-drone-build:0.6.0
|
image: superseriousbusiness/gotosocial-drone-build:0.7.0
|
||||||
environment:
|
environment:
|
||||||
ORIGIN_REPO: https://github.com/superseriousbusiness/gotosocial
|
ORIGIN_REPO: https://github.com/superseriousbusiness/gotosocial
|
||||||
TARGET_REPO: https://codeberg.org/superseriousbusiness/gotosocial
|
TARGET_REPO: https://codeberg.org/superseriousbusiness/gotosocial
|
||||||
|
@ -207,6 +207,6 @@ steps:
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: signature
|
kind: signature
|
||||||
hmac: f4008d87e4e5b67251eb89f255c1224e6ab5818828cab24fc319b8f829176058
|
hmac: 9810bf692fb1029c13b0a1e2f556e2306d16f7d3eec9ca6163a0499c147280c1
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
# https://goreleaser.com
|
# Version 2 of GoReleaser: https://goreleaser.com/errors/version/
|
||||||
|
version: 2
|
||||||
project_name: gotosocial
|
project_name: gotosocial
|
||||||
before:
|
before:
|
||||||
# https://goreleaser.com/customization/hooks/
|
# https://goreleaser.com/customization/hooks/
|
||||||
|
@ -185,7 +186,7 @@ checksum:
|
||||||
name_template: 'checksums.txt'
|
name_template: 'checksums.txt'
|
||||||
snapshot:
|
snapshot:
|
||||||
# https://goreleaser.com/customization/snapshots/
|
# https://goreleaser.com/customization/snapshots/
|
||||||
name_template: "{{ incpatch .Version }}-SNAPSHOT"
|
version_template: "{{ incpatch .Version }}-SNAPSHOT"
|
||||||
source:
|
source:
|
||||||
# https://goreleaser.com/customization/source/
|
# https://goreleaser.com/customization/source/
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# Dockerfile reference: https://docs.docker.com/engine/reference/builder/
|
# Dockerfile reference: https://docs.docker.com/engine/reference/builder/
|
||||||
|
|
||||||
# stage 1: generate up-to-date swagger.yaml to put in the final container
|
# stage 1: generate up-to-date swagger.yaml to put in the final container
|
||||||
FROM --platform=${BUILDPLATFORM} golang:1.22-alpine AS swagger
|
FROM --platform=${BUILDPLATFORM} golang:1.23.0-alpine AS swagger
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
### Installs goswagger for building swagger definitions inside this container
|
### Installs goswagger for building swagger definitions inside this container
|
||||||
|
@ -28,7 +28,7 @@ RUN yarn --cwd ./web/source install && \
|
||||||
rm -rf ./web/source
|
rm -rf ./web/source
|
||||||
|
|
||||||
# stage 3: build the executor container
|
# stage 3: build the executor container
|
||||||
FROM --platform=${TARGETPLATFORM} alpine:3.19.1 as executor
|
FROM --platform=${TARGETPLATFORM} alpine:3.20.2 as executor
|
||||||
|
|
||||||
# switch to non-root user:group for GtS
|
# switch to non-root user:group for GtS
|
||||||
USER 1000:1000
|
USER 1000:1000
|
||||||
|
|
|
@ -16,3 +16,4 @@ We consider these topics advanced because applying them incorrectly does have th
|
||||||
* [Tracing](tracing.md)
|
* [Tracing](tracing.md)
|
||||||
* [Metrics](metrics.md)
|
* [Metrics](metrics.md)
|
||||||
* [Replicating SQLite](replicating-sqlite.md)
|
* [Replicating SQLite](replicating-sqlite.md)
|
||||||
|
* [SQLite on networked storage](sqlite-networked-storage.md)
|
||||||
|
|
35
docs/advanced/sqlite-networked-storage.md
Normal file
35
docs/advanced/sqlite-networked-storage.md
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
# SQLite on networked storage
|
||||||
|
|
||||||
|
SQLite's operating model assumes the database and the processes or applications using it are colocated on the same host. When running the database in WAL-mode, which is GoToSocial's default, it relies on shared memory between processes to ensure the integrity of your database.
|
||||||
|
|
||||||
|
!!! quote
|
||||||
|
All processes using a database must be on the same host computer; WAL does not work over a network filesystem. This is because WAL requires all processes to share a small amount of memory and processes on separate host machines obviously cannot share memory with each other.
|
||||||
|
|
||||||
|
— SQLite.org [Write-Ahead Logging](https://www.sqlite.org/wal.html)
|
||||||
|
|
||||||
|
This also means that any other processes accessing the database need to run in the same namespace or container context.
|
||||||
|
|
||||||
|
It is in theory possible to run SQLite over Samba, NFS, iSCSI or other forms of filesystems accessed over the network. But it is neither recommended nor supported by the SQLite maintainers, irrespective of whether you're running with write-ahead logging or not. Doing so puts you at risk of database corruption. There is a long history of networked storage having synchronisation issues in their locking primitives, or implementing them with weaker guarantees than what a local filesystem can provide.
|
||||||
|
|
||||||
|
Your cloud provider's external volumes, like Hetzner Cloud Volumes, AWS EBS, GCP Persistent Disk etc. may also cause problems, and add variable latency. This has a tendency to severely degrade SQLite's performance.
|
||||||
|
|
||||||
|
If you're going to access your database over the network, it's better to use a database with a client-server architecture. GoToSocial supports Postgres for such use-cases.
|
||||||
|
|
||||||
|
For the purpose of having a copy of the SQLite database on durable long-term storage, refer to [SQLite streaming replication](replicating-sqlite.md) instead. Remember that neither replication nor using a networked filesystem are a substitute [for having backups](../admin/backup_and_restore.md).
|
||||||
|
|
||||||
|
## Settings
|
||||||
|
|
||||||
|
!!! danger "Corrupted database"
|
||||||
|
We do not support running GoToSocial with SQLite on a networked filesystem and we will not be able to help you if you damage your database this way.
|
||||||
|
|
||||||
|
Should you really want to take this risk, you'll need to adjust the SQLite [synchronous][sqlite-sync] mode and [journal][sqlite-journal] mode to match the limitations of the filesystem.
|
||||||
|
|
||||||
|
[sqlite-sync]: https://www.sqlite.org/pragma.html#pragma_synchronous
|
||||||
|
[sqlite-journal]: https://www.sqlite.org/pragma.html#pragma_journal_mode
|
||||||
|
|
||||||
|
You'll need to update the following settings:
|
||||||
|
|
||||||
|
* `db-sqlite-journal-mode`
|
||||||
|
* `db-sqlite-synchronous`
|
||||||
|
|
||||||
|
We don't provide any recommendations as this will vary based on the solution you're using. See [this issue](https://github.com/superseriousbusiness/gotosocial/issues/3360#issuecomment-2380332027) for what you could potentially set those values to.
|
|
@ -65,14 +65,16 @@ If you decide to use a VPS instead, you can spin yourself up something cheap wit
|
||||||
|
|
||||||
[Greenhost](https://greenhost.net) is also great: it has zero CO2 emissions, but is a bit more costly. Their 1GB, 1-cpu VPS works great for a single-user or small instance.
|
[Greenhost](https://greenhost.net) is also great: it has zero CO2 emissions, but is a bit more costly. Their 1GB, 1-cpu VPS works great for a single-user or small instance.
|
||||||
|
|
||||||
|
!!! warning "Cloud storage volumes"
|
||||||
|
Not all cloud VPS storage offerings are equal, and just because something claims to be backed by an SSD doesn't mean that it will necessarily be suitable to run a GoToSocial instance.
|
||||||
|
|
||||||
|
The [performance of Hetzner Cloud Volumes](https://github.com/superseriousbusiness/gotosocial/issues/2471#issuecomment-1891098323) is not guaranteed and seems to have very volatile latency. This will result in your GoToSocial instance performing poorly.
|
||||||
|
|
||||||
!!! danger "Oracle Free Tier"
|
!!! danger "Oracle Free Tier"
|
||||||
[Oracle Cloud Free Tier](https://www.oracle.com/cloud/free/) servers are not suitable for a GoToSocial deployment if you intend to federate with more than a handful of other instances and users.
|
[Oracle Cloud Free Tier](https://www.oracle.com/cloud/free/) servers are not suitable for a GoToSocial deployment if you intend to federate with more than a handful of other instances and users.
|
||||||
|
|
||||||
GoToSocial admins running on Oracle Cloud Free Tier have reported that their instances become extremely slow or unresponsive during periods of moderate load. This is most likely due to memory or storage latency, which causes even simple database queries to take a long time to run.
|
GoToSocial admins running on Oracle Cloud Free Tier have reported that their instances become extremely slow or unresponsive during periods of moderate load. This is most likely due to memory or storage latency, which causes even simple database queries to take a long time to run.
|
||||||
|
|
||||||
!!! danger "Hetzner Cloud Volume"
|
|
||||||
The [performance of Hetzner Cloud Volumes](https://github.com/superseriousbusiness/gotosocial/issues/2471#issuecomment-1891098323) is not guaranteed and seems to have very volatile latency. You're going to have a bad time running your database on those, with extremely poor query performance for even the most basic operations. Before filing performance issues against GoToSocial, make sure the problems reproduce with local storage.
|
|
||||||
|
|
||||||
### Distribution system requirements
|
### Distribution system requirements
|
||||||
|
|
||||||
Please make sure to check on your distribution system requirments, especially memory. Many distributions have baseline requirements and running them on a system that doesn't meet them will cause problems without further tuning and tweaking on your part.
|
Please make sure to check on your distribution system requirments, especially memory. Many distributions have baseline requirements and running them on a system that doesn't meet them will cause problems without further tuning and tweaking on your part.
|
||||||
|
@ -99,13 +101,15 @@ GoToSocial supports both SQLite and Postgres as database drivers. Though it is p
|
||||||
|
|
||||||
SQLite is the default driver and it's been shown to work brilliantly for instances in the range of 1-30 users (or maybe more).
|
SQLite is the default driver and it's been shown to work brilliantly for instances in the range of 1-30 users (or maybe more).
|
||||||
|
|
||||||
|
!!! danger "SQLite on networked storage"
|
||||||
|
Don't put your SQLite database on remote storage, whether that's NFS/Samba, iSCSI volumes, things like Ceph/Gluster or your cloud provider's network volume storage solution.
|
||||||
|
|
||||||
|
See [SQLite on networked storage](../advanced/sqlite-networked-storage.md) for further information.
|
||||||
|
|
||||||
If you're planning on hosting more people than this on an instance, you may wish to use Postgres instead, as it offers the possibility of database clustering and redundancy, at the cost of some complexity.
|
If you're planning on hosting more people than this on an instance, you may wish to use Postgres instead, as it offers the possibility of database clustering and redundancy, at the cost of some complexity.
|
||||||
|
|
||||||
Regardless of which database driver you choose, for proper performance they should be run on fast storage that operates with low and stable latency. It is possible to run databases on network attached storage, but this adds variable latency and network congestion to the mix, as well as potential I/O contention on the origin storage.
|
Regardless of which database driver you choose, for proper performance they should be run on fast storage that operates with low and stable latency. It is possible to run databases on network attached storage, but this adds variable latency and network congestion to the mix, as well as potential I/O contention on the origin storage.
|
||||||
|
|
||||||
!!! danger "Cloud Storage Volumes"
|
|
||||||
Not all cloud VPS storage offerings are equal, and just because something claims to be backed by an SSD doesn't mean that it will necessarily be suitable to run a GoToSocial instance on. Please see the [Server/VPS section](#vps) section below.
|
|
||||||
|
|
||||||
!!! tip
|
!!! tip
|
||||||
Please [backup your database](../admin/backup_and_restore.md). The database contains encryption keys for the instance and any user accounts. You won't be able to federate again from the same domain if you lose these keys!
|
Please [backup your database](../admin/backup_and_restore.md). The database contains encryption keys for the instance and any user accounts. You won't be able to federate again from the same domain if you lose these keys!
|
||||||
|
|
||||||
|
|
4
go.mod
4
go.mod
|
@ -1,6 +1,6 @@
|
||||||
module github.com/superseriousbusiness/gotosocial
|
module github.com/superseriousbusiness/gotosocial
|
||||||
|
|
||||||
go 1.22.2
|
go 1.23
|
||||||
|
|
||||||
replace modernc.org/sqlite => gitlab.com/NyaaaWhatsUpDoc/sqlite v1.33.1-concurrency-workaround
|
replace modernc.org/sqlite => gitlab.com/NyaaaWhatsUpDoc/sqlite v1.33.1-concurrency-workaround
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ require (
|
||||||
codeberg.org/gruf/go-runners v1.6.3
|
codeberg.org/gruf/go-runners v1.6.3
|
||||||
codeberg.org/gruf/go-sched v1.2.4
|
codeberg.org/gruf/go-sched v1.2.4
|
||||||
codeberg.org/gruf/go-storage v0.2.0
|
codeberg.org/gruf/go-storage v0.2.0
|
||||||
codeberg.org/gruf/go-structr v0.8.9
|
codeberg.org/gruf/go-structr v0.8.10
|
||||||
codeberg.org/superseriousbusiness/exif-terminator v0.9.0
|
codeberg.org/superseriousbusiness/exif-terminator v0.9.0
|
||||||
github.com/DmitriyVTitov/size v1.5.0
|
github.com/DmitriyVTitov/size v1.5.0
|
||||||
github.com/KimMachineGun/automemlimit v0.6.1
|
github.com/KimMachineGun/automemlimit v0.6.1
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -72,8 +72,8 @@ codeberg.org/gruf/go-sched v1.2.4 h1:ddBB9o0D/2oU8NbQ0ldN5aWxogpXPRBATWi58+p++Hw
|
||||||
codeberg.org/gruf/go-sched v1.2.4/go.mod h1:wad6l+OcYGWMA2TzNLMmLObsrbBDxdJfEy5WvTgBjNk=
|
codeberg.org/gruf/go-sched v1.2.4/go.mod h1:wad6l+OcYGWMA2TzNLMmLObsrbBDxdJfEy5WvTgBjNk=
|
||||||
codeberg.org/gruf/go-storage v0.2.0 h1:mKj3Lx6AavEkuXXtxqPhdq+akW9YwrnP16yQBF7K5ZI=
|
codeberg.org/gruf/go-storage v0.2.0 h1:mKj3Lx6AavEkuXXtxqPhdq+akW9YwrnP16yQBF7K5ZI=
|
||||||
codeberg.org/gruf/go-storage v0.2.0/go.mod h1:o3GzMDE5QNUaRnm/daUzFqvuAaC4utlgXDXYO79sWKU=
|
codeberg.org/gruf/go-storage v0.2.0/go.mod h1:o3GzMDE5QNUaRnm/daUzFqvuAaC4utlgXDXYO79sWKU=
|
||||||
codeberg.org/gruf/go-structr v0.8.9 h1:OyiSspWYCeJOm356fFPd+bDRumPrard2VAUXAPqZiJ0=
|
codeberg.org/gruf/go-structr v0.8.10 h1:uSapW97/StRnYEhCtycaM0isCsEMYC+tx/knYr6SiVo=
|
||||||
codeberg.org/gruf/go-structr v0.8.9/go.mod h1:zkoXVrAnKosh8VFAsbP/Hhs8FmLBjbVVy5w/Ngm8ApM=
|
codeberg.org/gruf/go-structr v0.8.10/go.mod h1:zkoXVrAnKosh8VFAsbP/Hhs8FmLBjbVVy5w/Ngm8ApM=
|
||||||
codeberg.org/superseriousbusiness/exif-terminator v0.9.0 h1:/EfyGI6HIrbkhFwgXGSjZ9o1kr/+k8v4mKdfXTH02Go=
|
codeberg.org/superseriousbusiness/exif-terminator v0.9.0 h1:/EfyGI6HIrbkhFwgXGSjZ9o1kr/+k8v4mKdfXTH02Go=
|
||||||
codeberg.org/superseriousbusiness/exif-terminator v0.9.0/go.mod h1:gCWKduudUWFzsnixoMzu0FYVdxHWG+AbXnZ50DqxsUE=
|
codeberg.org/superseriousbusiness/exif-terminator v0.9.0/go.mod h1:gCWKduudUWFzsnixoMzu0FYVdxHWG+AbXnZ50DqxsUE=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
|
|
@ -73,7 +73,7 @@ func (suite *AccountStatusesTestSuite) TestGetStatusesPublicOnly() {
|
||||||
suite.Equal(apimodel.VisibilityPublic, s.Visibility)
|
suite.Equal(apimodel.VisibilityPublic, s.Visibility)
|
||||||
}
|
}
|
||||||
|
|
||||||
suite.Equal(`<http://localhost:8080/api/v1/accounts/01F8MH17FWEB39HZJ76B6VXSKF/statuses?limit=20&max_id=01F8MH75CBF9JFX4ZAD54N0W0R&exclude_replies=false&exclude_reblogs=false&pinned=false&only_media=false&only_public=true>; rel="next", <http://localhost:8080/api/v1/accounts/01F8MH17FWEB39HZJ76B6VXSKF/statuses?limit=20&min_id=01G36SF3V6Y6V5BF9P4R7PQG7G&exclude_replies=false&exclude_reblogs=false&pinned=false&only_media=false&only_public=true>; rel="prev"`, result.Header.Get("link"))
|
suite.Equal(`<http://localhost:8080/api/v1/accounts/01F8MH17FWEB39HZJ76B6VXSKF/statuses?limit=20&max_id=01F8MH75CBF9JFX4ZAD54N0W0R&exclude_replies=false&exclude_reblogs=false&pinned=false&only_media=false&only_public=true>; rel="next", <http://localhost:8080/api/v1/accounts/01F8MH17FWEB39HZJ76B6VXSKF/statuses?limit=20&min_id=01J5QVB9VC76NPPRQ207GG4DRZ&exclude_replies=false&exclude_reblogs=false&pinned=false&only_media=false&only_public=true>; rel="prev"`, result.Header.Get("link"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *AccountStatusesTestSuite) TestGetStatusesPublicOnlyMediaOnly() {
|
func (suite *AccountStatusesTestSuite) TestGetStatusesPublicOnlyMediaOnly() {
|
||||||
|
|
|
@ -145,8 +145,8 @@ func validateCreateEmoji(form *apimodel.EmojiCreateRequest) error {
|
||||||
return errors.New("no emoji given")
|
return errors.New("no emoji given")
|
||||||
}
|
}
|
||||||
|
|
||||||
maxSize := config.GetMediaEmojiLocalMaxSize()
|
maxSize := int64(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
if form.Image.Size > int64(maxSize) {
|
if form.Image.Size > maxSize {
|
||||||
return fmt.Errorf("emoji image too large: image is %dKB but size limit for custom emojis is %dKB", form.Image.Size/1024, maxSize/1024)
|
return fmt.Errorf("emoji image too large: image is %dKB but size limit for custom emojis is %dKB", form.Image.Size/1024, maxSize/1024)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -208,8 +208,8 @@ func validateUpdateEmoji(form *apimodel.EmojiUpdateRequest) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasImage {
|
if hasImage {
|
||||||
maxSize := config.GetMediaEmojiLocalMaxSize()
|
maxSize := int64(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
if form.Image.Size > int64(maxSize) {
|
if form.Image.Size > maxSize {
|
||||||
return fmt.Errorf("emoji image too large: image is %dKB but size limit for custom emojis is %dKB", form.Image.Size/1024, maxSize/1024)
|
return fmt.Errorf("emoji image too large: image is %dKB but size limit for custom emojis is %dKB", form.Image.Size/1024, maxSize/1024)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -591,7 +591,7 @@ func (suite *StatusBoostTestSuite) TestPostBoostImplicitAccept() {
|
||||||
"text": "Hi @1happyturtle, can I reply?",
|
"text": "Hi @1happyturtle, can I reply?",
|
||||||
"uri": "http://localhost:8080/some/determinate/url",
|
"uri": "http://localhost:8080/some/determinate/url",
|
||||||
"url": "http://localhost:8080/some/determinate/url",
|
"url": "http://localhost:8080/some/determinate/url",
|
||||||
"visibility": "unlisted"
|
"visibility": "public"
|
||||||
},
|
},
|
||||||
"reblogged": true,
|
"reblogged": true,
|
||||||
"reblogs_count": 0,
|
"reblogs_count": 0,
|
||||||
|
@ -601,7 +601,7 @@ func (suite *StatusBoostTestSuite) TestPostBoostImplicitAccept() {
|
||||||
"tags": [],
|
"tags": [],
|
||||||
"uri": "http://localhost:8080/some/determinate/url",
|
"uri": "http://localhost:8080/some/determinate/url",
|
||||||
"url": "http://localhost:8080/some/determinate/url",
|
"url": "http://localhost:8080/some/determinate/url",
|
||||||
"visibility": "unlisted"
|
"visibility": "public"
|
||||||
}`, out)
|
}`, out)
|
||||||
|
|
||||||
// Target status should no
|
// Target status should no
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/api/client/statuses"
|
"github.com/superseriousbusiness/gotosocial/internal/api/client/statuses"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||||
|
|
||||||
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
|
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
|
||||||
|
@ -185,13 +186,24 @@ func (suite *StatusFaveTestSuite) TestPostUnfaveable() {
|
||||||
// Fave a status that's pending approval by us.
|
// Fave a status that's pending approval by us.
|
||||||
func (suite *StatusFaveTestSuite) TestPostFaveImplicitAccept() {
|
func (suite *StatusFaveTestSuite) TestPostFaveImplicitAccept() {
|
||||||
var (
|
var (
|
||||||
|
ctx = context.Background()
|
||||||
targetStatus = suite.testStatuses["admin_account_status_5"]
|
targetStatus = suite.testStatuses["admin_account_status_5"]
|
||||||
app = suite.testApplications["application_1"]
|
app = suite.testApplications["application_1"]
|
||||||
token = suite.testTokens["local_account_2"]
|
token = suite.testTokens["local_account_2"]
|
||||||
user = suite.testUsers["local_account_2"]
|
user = suite.testUsers["local_account_2"]
|
||||||
account = suite.testAccounts["local_account_2"]
|
account = suite.testAccounts["local_account_2"]
|
||||||
|
visFilter = visibility.NewFilter(&suite.state)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Check visibility of status to public before posting fave.
|
||||||
|
visible, err := visFilter.StatusVisible(ctx, nil, targetStatus)
|
||||||
|
if err != nil {
|
||||||
|
suite.FailNow(err.Error())
|
||||||
|
}
|
||||||
|
if visible {
|
||||||
|
suite.FailNow("status should not be visible yet")
|
||||||
|
}
|
||||||
|
|
||||||
out, recorder := suite.postStatusFave(
|
out, recorder := suite.postStatusFave(
|
||||||
targetStatus.ID,
|
targetStatus.ID,
|
||||||
app,
|
app,
|
||||||
|
@ -268,30 +280,40 @@ func (suite *StatusFaveTestSuite) TestPostFaveImplicitAccept() {
|
||||||
"text": "Hi @1happyturtle, can I reply?",
|
"text": "Hi @1happyturtle, can I reply?",
|
||||||
"uri": "http://localhost:8080/some/determinate/url",
|
"uri": "http://localhost:8080/some/determinate/url",
|
||||||
"url": "http://localhost:8080/some/determinate/url",
|
"url": "http://localhost:8080/some/determinate/url",
|
||||||
"visibility": "unlisted"
|
"visibility": "public"
|
||||||
}`, out)
|
}`, out)
|
||||||
|
|
||||||
// Target status should no
|
// Target status should no
|
||||||
// longer be pending approval.
|
// longer be pending approval.
|
||||||
dbStatus, err := suite.state.DB.GetStatusByID(
|
dbStatus, err := suite.state.DB.GetStatusByID(
|
||||||
context.Background(),
|
ctx,
|
||||||
targetStatus.ID,
|
targetStatus.ID,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
suite.FailNow(err.Error())
|
suite.FailNow(err.Error())
|
||||||
}
|
}
|
||||||
suite.False(*dbStatus.PendingApproval)
|
suite.False(*dbStatus.PendingApproval)
|
||||||
|
suite.NotEmpty(dbStatus.ApprovedByURI)
|
||||||
|
|
||||||
// There should be an Accept
|
// There should be an Accept
|
||||||
// stored for the target status.
|
// stored for the target status.
|
||||||
intReq, err := suite.state.DB.GetInteractionRequestByInteractionURI(
|
intReq, err := suite.state.DB.GetInteractionRequestByInteractionURI(
|
||||||
context.Background(), targetStatus.URI,
|
ctx, targetStatus.URI,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
suite.FailNow(err.Error())
|
suite.FailNow(err.Error())
|
||||||
}
|
}
|
||||||
suite.NotZero(intReq.AcceptedAt)
|
suite.NotZero(intReq.AcceptedAt)
|
||||||
suite.NotEmpty(intReq.URI)
|
suite.NotEmpty(intReq.URI)
|
||||||
|
|
||||||
|
// Check visibility of status to public after posting fave.
|
||||||
|
visible, err = visFilter.StatusVisible(ctx, nil, dbStatus)
|
||||||
|
if err != nil {
|
||||||
|
suite.FailNow(err.Error())
|
||||||
|
}
|
||||||
|
if !visible {
|
||||||
|
suite.FailNow("status should be visible")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStatusFaveTestSuite(t *testing.T) {
|
func TestStatusFaveTestSuite(t *testing.T) {
|
||||||
|
|
|
@ -160,7 +160,7 @@ type MediaDimensions struct {
|
||||||
Duration float32 `json:"duration,omitempty"`
|
Duration float32 `json:"duration,omitempty"`
|
||||||
// Bitrate of the media in bits per second.
|
// Bitrate of the media in bits per second.
|
||||||
// example: 1000000
|
// example: 1000000
|
||||||
Bitrate int `json:"bitrate,omitempty"`
|
Bitrate uint64 `json:"bitrate,omitempty"`
|
||||||
// Size of the media, in the format `[width]x[height]`.
|
// Size of the media, in the format `[width]x[height]`.
|
||||||
// Not set for audio.
|
// Not set for audio.
|
||||||
// example: 1920x1080
|
// example: 1920x1080
|
||||||
|
|
2
internal/cache/domain/domain.go
vendored
2
internal/cache/domain/domain.go
vendored
|
@ -220,7 +220,7 @@ func (n *node) getChild(part string) *node {
|
||||||
|
|
||||||
for i < j {
|
for i < j {
|
||||||
// avoid overflow when computing h
|
// avoid overflow when computing h
|
||||||
h := int(uint(i+j) >> 1)
|
h := int(uint(i+j) >> 1) // #nosec G115
|
||||||
// i ≤ h < j
|
// i ≤ h < j
|
||||||
|
|
||||||
if n.child[h].part < part {
|
if n.child[h].part < part {
|
||||||
|
|
2
internal/cache/util.go
vendored
2
internal/cache/util.go
vendored
|
@ -18,7 +18,6 @@
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
|
||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -42,7 +41,6 @@ func ignoreErrors(err error) bool {
|
||||||
// (until invalidation).
|
// (until invalidation).
|
||||||
db.ErrNoEntries,
|
db.ErrNoEntries,
|
||||||
db.ErrAlreadyExists,
|
db.ErrAlreadyExists,
|
||||||
sql.ErrNoRows,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
12
internal/cache/visibility.go
vendored
12
internal/cache/visibility.go
vendored
|
@ -48,9 +48,15 @@ func (c *Caches) initVisibility() {
|
||||||
{Fields: "RequesterID", Multiple: true},
|
{Fields: "RequesterID", Multiple: true},
|
||||||
{Fields: "Type,RequesterID,ItemID"},
|
{Fields: "Type,RequesterID,ItemID"},
|
||||||
},
|
},
|
||||||
MaxSize: cap,
|
MaxSize: cap,
|
||||||
IgnoreErr: ignoreErrors,
|
IgnoreErr: func(err error) bool {
|
||||||
Copy: copyF,
|
// don't cache any errors,
|
||||||
|
// it gets a little too tricky
|
||||||
|
// otherwise with ensuring
|
||||||
|
// errors are cleared out
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
Copy: copyF,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -407,13 +408,12 @@ func maxOpenConns() int {
|
||||||
// deriveBunDBPGOptions takes an application config and returns either a ready-to-use set of options
|
// deriveBunDBPGOptions takes an application config and returns either a ready-to-use set of options
|
||||||
// with sensible defaults, or an error if it's not satisfied by the provided config.
|
// with sensible defaults, or an error if it's not satisfied by the provided config.
|
||||||
func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
|
func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
|
||||||
url := config.GetDbPostgresConnectionString()
|
// If database URL is defined, ignore
|
||||||
|
// other DB-related configuration fields.
|
||||||
// if database URL is defined, ignore other DB related configuration fields
|
if url := config.GetDbPostgresConnectionString(); url != "" {
|
||||||
if url != "" {
|
return pgx.ParseConfig(url)
|
||||||
cfg, err := pgx.ParseConfig(url)
|
|
||||||
return cfg, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// these are all optional, the db adapter figures out defaults
|
// these are all optional, the db adapter figures out defaults
|
||||||
address := config.GetDbAddress()
|
address := config.GetDbAddress()
|
||||||
|
|
||||||
|
@ -477,7 +477,10 @@ func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
|
||||||
cfg.Host = address
|
cfg.Host = address
|
||||||
}
|
}
|
||||||
if port := config.GetDbPort(); port > 0 {
|
if port := config.GetDbPort(); port > 0 {
|
||||||
cfg.Port = uint16(port)
|
if port > math.MaxUint16 {
|
||||||
|
return nil, errors.New("invalid port, must be in range 1-65535")
|
||||||
|
}
|
||||||
|
cfg.Port = uint16(port) // #nosec G115 -- Just validated above.
|
||||||
}
|
}
|
||||||
if u := config.GetDbUser(); u != "" {
|
if u := config.GetDbUser(); u != "" {
|
||||||
cfg.User = u
|
cfg.User = u
|
||||||
|
|
|
@ -26,8 +26,10 @@
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
|
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/paging"
|
"github.com/superseriousbusiness/gotosocial/internal/paging"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/state"
|
"github.com/superseriousbusiness/gotosocial/internal/state"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/util"
|
||||||
"github.com/uptrace/bun"
|
"github.com/uptrace/bun"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -84,6 +86,53 @@ func(request *gtsmodel.InteractionRequest) error {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (i *interactionDB) GetInteractionRequestsByIDs(ctx context.Context, ids []string) ([]*gtsmodel.InteractionRequest, error) {
|
||||||
|
// Load all interaction request IDs via cache loader callbacks.
|
||||||
|
requests, err := i.state.Caches.DB.InteractionRequest.LoadIDs("ID",
|
||||||
|
ids,
|
||||||
|
func(uncached []string) ([]*gtsmodel.InteractionRequest, error) {
|
||||||
|
// Preallocate expected length of uncached interaction requests.
|
||||||
|
requests := make([]*gtsmodel.InteractionRequest, 0, len(uncached))
|
||||||
|
|
||||||
|
// Perform database query scanning
|
||||||
|
// the remaining (uncached) IDs.
|
||||||
|
if err := i.db.NewSelect().
|
||||||
|
Model(&requests).
|
||||||
|
Where("? IN (?)", bun.Ident("id"), bun.In(uncached)).
|
||||||
|
Scan(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return requests, nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reorder the requests by their
|
||||||
|
// IDs to ensure in correct order.
|
||||||
|
getID := func(r *gtsmodel.InteractionRequest) string { return r.ID }
|
||||||
|
util.OrderBy(requests, ids, getID)
|
||||||
|
|
||||||
|
if gtscontext.Barebones(ctx) {
|
||||||
|
// no need to fully populate.
|
||||||
|
return requests, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populate all loaded interaction requests, removing those we
|
||||||
|
// fail to populate (removes needing so many nil checks everywhere).
|
||||||
|
requests = slices.DeleteFunc(requests, func(request *gtsmodel.InteractionRequest) bool {
|
||||||
|
if err := i.PopulateInteractionRequest(ctx, request); err != nil {
|
||||||
|
log.Errorf(ctx, "error populating %s: %v", request.ID, err)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
return requests, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (i *interactionDB) getInteractionRequest(
|
func (i *interactionDB) getInteractionRequest(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
lookup string,
|
lookup string,
|
||||||
|
@ -205,13 +254,18 @@ func (i *interactionDB) UpdateInteractionRequest(ctx context.Context, request *g
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *interactionDB) DeleteInteractionRequestByID(ctx context.Context, id string) error {
|
func (i *interactionDB) DeleteInteractionRequestByID(ctx context.Context, id string) error {
|
||||||
defer i.state.Caches.DB.InteractionRequest.Invalidate("ID", id)
|
// Delete interaction request by ID.
|
||||||
|
if _, err := i.db.NewDelete().
|
||||||
|
Table("interaction_requests").
|
||||||
|
Where("? = ?", bun.Ident("id"), id).
|
||||||
|
Exec(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
_, err := i.db.NewDelete().
|
// Invalidate cached interaction request with ID.
|
||||||
TableExpr("? AS ?", bun.Ident("interaction_requests"), bun.Ident("interaction_request")).
|
i.state.Caches.DB.InteractionRequest.Invalidate("ID", id)
|
||||||
Where("? = ?", bun.Ident("interaction_request.id"), id).
|
|
||||||
Exec(ctx)
|
return nil
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *interactionDB) GetInteractionsRequestsForAcct(
|
func (i *interactionDB) GetInteractionsRequestsForAcct(
|
||||||
|
@ -317,19 +371,8 @@ func (i *interactionDB) GetInteractionsRequestsForAcct(
|
||||||
slices.Reverse(reqIDs)
|
slices.Reverse(reqIDs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// For each interaction request ID,
|
// Load all interaction requests by their IDs.
|
||||||
// select the interaction request.
|
return i.GetInteractionRequestsByIDs(ctx, reqIDs)
|
||||||
reqs := make([]*gtsmodel.InteractionRequest, 0, len(reqIDs))
|
|
||||||
for _, id := range reqIDs {
|
|
||||||
req, err := i.GetInteractionRequestByID(ctx, id)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
reqs = append(reqs, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
return reqs, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *interactionDB) IsInteractionRejected(ctx context.Context, interactionURI string) (bool, error) {
|
func (i *interactionDB) IsInteractionRejected(ctx context.Context, interactionURI string) (bool, error) {
|
||||||
|
|
|
@ -74,7 +74,8 @@ func (suite *TimelineTestSuite) publicCount() int {
|
||||||
var publicCount int
|
var publicCount int
|
||||||
for _, status := range suite.testStatuses {
|
for _, status := range suite.testStatuses {
|
||||||
if status.Visibility == gtsmodel.VisibilityPublic &&
|
if status.Visibility == gtsmodel.VisibilityPublic &&
|
||||||
status.BoostOfID == "" {
|
status.BoostOfID == "" &&
|
||||||
|
!util.PtrOrZero(status.PendingApproval) {
|
||||||
publicCount++
|
publicCount++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,11 +97,11 @@ func() (*media.ProcessingEmoji, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote emoji size.
|
// Get maximum supported remote emoji size.
|
||||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
maxsz := int64(config.GetMediaEmojiRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Prepare data function to dereference remote emoji media.
|
// Prepare data function to dereference remote emoji media.
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create new emoji with prepared info.
|
// Create new emoji with prepared info.
|
||||||
|
@ -189,11 +189,11 @@ func() (*media.ProcessingEmoji, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote emoji size.
|
// Get maximum supported remote emoji size.
|
||||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
maxsz := int64(config.GetMediaEmojiRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Prepare data function to dereference remote emoji media.
|
// Prepare data function to dereference remote emoji media.
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update emoji with prepared info.
|
// Update emoji with prepared info.
|
||||||
|
@ -255,11 +255,11 @@ func() (*media.ProcessingEmoji, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote emoji size.
|
// Get maximum supported remote emoji size.
|
||||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
maxsz := int64(config.GetMediaEmojiRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Prepare data function to dereference remote emoji media.
|
// Prepare data function to dereference remote emoji media.
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recache emoji with prepared info.
|
// Recache emoji with prepared info.
|
||||||
|
|
|
@ -77,14 +77,14 @@ func() (*media.ProcessingMedia, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote media size.
|
// Get maximum supported remote media size.
|
||||||
maxsz := config.GetMediaRemoteMaxSize()
|
maxsz := int64(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Create media with prepared info.
|
// Create media with prepared info.
|
||||||
return d.mediaManager.CreateMedia(
|
return d.mediaManager.CreateMedia(
|
||||||
ctx,
|
ctx,
|
||||||
accountID,
|
accountID,
|
||||||
func(ctx context.Context) (io.ReadCloser, error) {
|
func(ctx context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
},
|
},
|
||||||
info,
|
info,
|
||||||
)
|
)
|
||||||
|
@ -168,14 +168,14 @@ func() (*media.ProcessingMedia, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote media size.
|
// Get maximum supported remote media size.
|
||||||
maxsz := config.GetMediaRemoteMaxSize()
|
maxsz := int64(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Recache media with prepared info,
|
// Recache media with prepared info,
|
||||||
// this will also update media in db.
|
// this will also update media in db.
|
||||||
return d.mediaManager.CacheMedia(
|
return d.mediaManager.CacheMedia(
|
||||||
attach,
|
attach,
|
||||||
func(ctx context.Context) (io.ReadCloser, error) {
|
func(ctx context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
},
|
},
|
||||||
), nil
|
), nil
|
||||||
},
|
},
|
||||||
|
|
|
@ -168,6 +168,9 @@ func (suite *StatusVisibleTestSuite) TestVisiblePending() {
|
||||||
testStatus := new(gtsmodel.Status)
|
testStatus := new(gtsmodel.Status)
|
||||||
*testStatus = *suite.testStatuses["admin_account_status_3"]
|
*testStatus = *suite.testStatuses["admin_account_status_3"]
|
||||||
testStatus.PendingApproval = util.Ptr(true)
|
testStatus.PendingApproval = util.Ptr(true)
|
||||||
|
if err := suite.state.DB.UpdateStatus(ctx, testStatus); err != nil {
|
||||||
|
suite.FailNow(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
for _, testCase := range []struct {
|
for _, testCase := range []struct {
|
||||||
acct *gtsmodel.Account
|
acct *gtsmodel.Account
|
||||||
|
@ -198,6 +201,43 @@ func (suite *StatusVisibleTestSuite) TestVisiblePending() {
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
suite.Equal(testCase.visible, visible)
|
suite.Equal(testCase.visible, visible)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update the status to mark it as approved.
|
||||||
|
testStatus.PendingApproval = util.Ptr(false)
|
||||||
|
testStatus.ApprovedByURI = "http://localhost:8080/some/accept/uri"
|
||||||
|
if err := suite.state.DB.UpdateStatus(ctx, testStatus); err != nil {
|
||||||
|
suite.FailNow(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range []struct {
|
||||||
|
acct *gtsmodel.Account
|
||||||
|
visible bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
acct: suite.testAccounts["admin_account"],
|
||||||
|
visible: true, // Own status, always visible.
|
||||||
|
},
|
||||||
|
{
|
||||||
|
acct: suite.testAccounts["local_account_1"],
|
||||||
|
visible: true, // Reply to zork, always visible.
|
||||||
|
},
|
||||||
|
{
|
||||||
|
acct: suite.testAccounts["local_account_2"],
|
||||||
|
visible: true, // Should be visible now.
|
||||||
|
},
|
||||||
|
{
|
||||||
|
acct: suite.testAccounts["remote_account_1"],
|
||||||
|
visible: true, // Should be visible now.
|
||||||
|
},
|
||||||
|
{
|
||||||
|
acct: nil, // Unauthed request.
|
||||||
|
visible: true, // Should be visible now (public status).
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
visible, err := suite.filter.StatusVisible(ctx, testCase.acct, testStatus)
|
||||||
|
suite.NoError(err)
|
||||||
|
suite.Equal(testCase.visible, visible)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *StatusVisibleTestSuite) TestVisibleLocalOnly() {
|
func (suite *StatusVisibleTestSuite) TestVisibleLocalOnly() {
|
||||||
|
|
|
@ -340,14 +340,14 @@ func (c *Client) do(r *Request) (rsp *http.Response, retry bool, err error) {
|
||||||
|
|
||||||
if u, _ := strconv.ParseUint(after, 10, 32); u != 0 {
|
if u, _ := strconv.ParseUint(after, 10, 32); u != 0 {
|
||||||
// An integer no. of backoff seconds was provided.
|
// An integer no. of backoff seconds was provided.
|
||||||
r.backoff = time.Duration(u) * time.Second
|
r.backoff = time.Duration(u) * time.Second // #nosec G115 -- We clamp backoff below.
|
||||||
} else if at, _ := http.ParseTime(after); !at.Before(now) {
|
} else if at, _ := http.ParseTime(after); !at.Before(now) {
|
||||||
// An HTTP formatted future date-time was provided.
|
// An HTTP formatted future date-time was provided.
|
||||||
r.backoff = at.Sub(now)
|
r.backoff = at.Sub(now)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't let their provided backoff exceed our max.
|
// Don't let their provided backoff exceed our max.
|
||||||
if max := baseBackoff * time.Duration(c.retries); //
|
if max := baseBackoff * time.Duration(c.retries); // #nosec G115 -- We control c.retries.
|
||||||
r.backoff > max {
|
r.backoff > max {
|
||||||
r.backoff = max
|
r.backoff = max
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
|
@ -32,6 +33,7 @@
|
||||||
// Request wraps an HTTP request
|
// Request wraps an HTTP request
|
||||||
// to add our own retry / backoff.
|
// to add our own retry / backoff.
|
||||||
type Request struct {
|
type Request struct {
|
||||||
|
|
||||||
// Current backoff dur.
|
// Current backoff dur.
|
||||||
backoff time.Duration
|
backoff time.Duration
|
||||||
|
|
||||||
|
@ -57,8 +59,7 @@ func WrapRequest(r *http.Request) *Request {
|
||||||
// Only add content-type header if a request body exists.
|
// Only add content-type header if a request body exists.
|
||||||
entry = entry.WithField("contentType", r.Header.Get("Content-Type"))
|
entry = entry.WithField("contentType", r.Header.Get("Content-Type"))
|
||||||
}
|
}
|
||||||
// note our formatting library follows ptr values
|
entry = entry.WithField("attempt", uintPtr{&rr.attempts})
|
||||||
entry = entry.WithField("attempt", &rr.attempts)
|
|
||||||
rr.Entry = entry
|
rr.Entry = entry
|
||||||
return rr
|
return rr
|
||||||
}
|
}
|
||||||
|
@ -73,3 +74,12 @@ func (r *Request) BackOff() time.Duration {
|
||||||
}
|
}
|
||||||
return r.backoff
|
return r.backoff
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type uintPtr struct{ u *uint }
|
||||||
|
|
||||||
|
func (f uintPtr) String() string {
|
||||||
|
if f.u == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return strconv.FormatUint(uint64(*f.u), 10)
|
||||||
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -556,10 +557,18 @@ func (res *ffprobeResult) Process() (*result, error) {
|
||||||
if p := strings.SplitN(str, "/", 2); len(p) == 2 {
|
if p := strings.SplitN(str, "/", 2); len(p) == 2 {
|
||||||
n, _ := strconv.ParseUint(p[0], 10, 32)
|
n, _ := strconv.ParseUint(p[0], 10, 32)
|
||||||
d, _ := strconv.ParseUint(p[1], 10, 32)
|
d, _ := strconv.ParseUint(p[1], 10, 32)
|
||||||
num, den = uint32(n), uint32(d)
|
|
||||||
|
if n > math.MaxUint32 || d > math.MaxUint32 {
|
||||||
|
return nil, gtserror.Newf("overflowed numerator or denominator")
|
||||||
|
}
|
||||||
|
num, den = uint32(n), uint32(d) // #nosec G115 -- Just checked.
|
||||||
} else {
|
} else {
|
||||||
n, _ := strconv.ParseUint(p[0], 10, 32)
|
n, _ := strconv.ParseUint(p[0], 10, 32)
|
||||||
num = uint32(n)
|
|
||||||
|
if n > math.MaxUint32 {
|
||||||
|
return nil, gtserror.Newf("overflowed numerator")
|
||||||
|
}
|
||||||
|
num = uint32(n) // #nosec G115 -- Just checked.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set final divised framerate.
|
// Set final divised framerate.
|
||||||
|
|
|
@ -399,9 +399,9 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
g16 := uint16(s[1])
|
g16 := uint16(s[1])
|
||||||
b16 := uint16(s[2])
|
b16 := uint16(s[2])
|
||||||
a16 := uint16(a)
|
a16 := uint16(a)
|
||||||
d[0] = uint8(r16 * 0xff / a16)
|
d[0] = uint8(r16 * 0xff / a16) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8(g16 * 0xff / a16)
|
d[1] = uint8(g16 * 0xff / a16) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8(b16 * 0xff / a16)
|
d[2] = uint8(b16 * 0xff / a16) // #nosec G115 -- Overflow desired.
|
||||||
d[3] = a
|
d[3] = a
|
||||||
}
|
}
|
||||||
j += 4
|
j += 4
|
||||||
|
@ -431,9 +431,9 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
g32 := uint32(s[2])<<8 | uint32(s[3])
|
g32 := uint32(s[2])<<8 | uint32(s[3])
|
||||||
b32 := uint32(s[4])<<8 | uint32(s[5])
|
b32 := uint32(s[4])<<8 | uint32(s[5])
|
||||||
a32 := uint32(s[6])<<8 | uint32(s[7])
|
a32 := uint32(s[6])<<8 | uint32(s[7])
|
||||||
d[0] = uint8((r32 * 0xffff / a32) >> 8)
|
d[0] = uint8((r32 * 0xffff / a32) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8((g32 * 0xffff / a32) >> 8)
|
d[1] = uint8((g32 * 0xffff / a32) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8((b32 * 0xffff / a32) >> 8)
|
d[2] = uint8((b32 * 0xffff / a32) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
}
|
}
|
||||||
d[3] = a
|
d[3] = a
|
||||||
j += 4
|
j += 4
|
||||||
|
@ -530,9 +530,9 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
}
|
}
|
||||||
|
|
||||||
d := dst[j : j+4 : j+4]
|
d := dst[j : j+4 : j+4]
|
||||||
d[0] = uint8(r)
|
d[0] = uint8(r) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8(g)
|
d[1] = uint8(g) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8(b)
|
d[2] = uint8(b) // #nosec G115 -- Overflow desired.
|
||||||
d[3] = 0xff
|
d[3] = 0xff
|
||||||
|
|
||||||
iy++
|
iy++
|
||||||
|
@ -569,9 +569,9 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
d := dst[j : j+4 : j+4]
|
d := dst[j : j+4 : j+4]
|
||||||
switch a16 {
|
switch a16 {
|
||||||
case 0xffff:
|
case 0xffff:
|
||||||
d[0] = uint8(r16 >> 8)
|
d[0] = uint8(r16 >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8(g16 >> 8)
|
d[1] = uint8(g16 >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8(b16 >> 8)
|
d[2] = uint8(b16 >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[3] = 0xff
|
d[3] = 0xff
|
||||||
case 0:
|
case 0:
|
||||||
d[0] = 0
|
d[0] = 0
|
||||||
|
@ -579,10 +579,10 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
d[2] = 0
|
d[2] = 0
|
||||||
d[3] = 0
|
d[3] = 0
|
||||||
default:
|
default:
|
||||||
d[0] = uint8(((r16 * 0xffff) / a16) >> 8)
|
d[0] = uint8(((r16 * 0xffff) / a16) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8(((g16 * 0xffff) / a16) >> 8)
|
d[1] = uint8(((g16 * 0xffff) / a16) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8(((b16 * 0xffff) / a16) >> 8)
|
d[2] = uint8(((b16 * 0xffff) / a16) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[3] = uint8(a16 >> 8)
|
d[3] = uint8(a16 >> 8) // #nosec G115 -- Overflow desired.
|
||||||
}
|
}
|
||||||
j += 4
|
j += 4
|
||||||
}
|
}
|
||||||
|
@ -617,7 +617,7 @@ func clampFloat(x float64) uint8 {
|
||||||
return 255
|
return 255
|
||||||
}
|
}
|
||||||
if v > 0 {
|
if v > 0 {
|
||||||
return uint8(v)
|
return uint8(v) // #nosec G115 -- Just checked.
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,9 +49,6 @@ func (m *Manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM
|
||||||
refetchIDs []string
|
refetchIDs []string
|
||||||
)
|
)
|
||||||
|
|
||||||
// Get max supported remote emoji media size.
|
|
||||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
|
||||||
|
|
||||||
// page through emojis 20 at a time, looking for those with missing images
|
// page through emojis 20 at a time, looking for those with missing images
|
||||||
for {
|
for {
|
||||||
// Fetch next block of emojis from database
|
// Fetch next block of emojis from database
|
||||||
|
@ -111,8 +108,10 @@ func (m *Manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get max supported remote emoji media size.
|
||||||
|
maxsz := int64(config.GetMediaEmojiRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
dataFunc := func(ctx context.Context) (reader io.ReadCloser, err error) {
|
dataFunc := func(ctx context.Context) (reader io.ReadCloser, err error) {
|
||||||
return dereferenceMedia(ctx, emojiImageIRI, int64(maxsz))
|
return dereferenceMedia(ctx, emojiImageIRI, maxsz)
|
||||||
}
|
}
|
||||||
|
|
||||||
processingEmoji, err := m.UpdateEmoji(ctx, emoji, dataFunc, AdditionalEmojiInfo{
|
processingEmoji, err := m.UpdateEmoji(ctx, emoji, dataFunc, AdditionalEmojiInfo{
|
||||||
|
|
|
@ -462,11 +462,11 @@ func (p *Processor) UpdateAvatar(
|
||||||
gtserror.WithCode,
|
gtserror.WithCode,
|
||||||
) {
|
) {
|
||||||
// Get maximum supported local media size.
|
// Get maximum supported local media size.
|
||||||
maxsz := config.GetMediaLocalMaxSize()
|
maxsz := int64(config.GetMediaLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if avatar.Size > int64(maxsz) {
|
if avatar.Size > maxsz {
|
||||||
text := fmt.Sprintf("media exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("media exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -478,7 +478,7 @@ func (p *Processor) UpdateAvatar(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
|
|
||||||
// Write to instance storage.
|
// Write to instance storage.
|
||||||
return p.c.StoreLocalMedia(ctx,
|
return p.c.StoreLocalMedia(ctx,
|
||||||
|
@ -507,11 +507,11 @@ func (p *Processor) UpdateHeader(
|
||||||
gtserror.WithCode,
|
gtserror.WithCode,
|
||||||
) {
|
) {
|
||||||
// Get maximum supported local media size.
|
// Get maximum supported local media size.
|
||||||
maxsz := config.GetMediaLocalMaxSize()
|
maxsz := int64(config.GetMediaLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if header.Size > int64(maxsz) {
|
if header.Size > maxsz {
|
||||||
text := fmt.Sprintf("media exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("media exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -523,7 +523,7 @@ func (p *Processor) UpdateHeader(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
|
|
||||||
// Write to instance storage.
|
// Write to instance storage.
|
||||||
return p.c.StoreLocalMedia(ctx,
|
return p.c.StoreLocalMedia(ctx,
|
||||||
|
|
|
@ -45,11 +45,11 @@ func (p *Processor) EmojiCreate(
|
||||||
) (*apimodel.Emoji, gtserror.WithCode) {
|
) (*apimodel.Emoji, gtserror.WithCode) {
|
||||||
|
|
||||||
// Get maximum supported local emoji size.
|
// Get maximum supported local emoji size.
|
||||||
maxsz := config.GetMediaEmojiLocalMaxSize()
|
maxsz := int64(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if form.Image.Size > int64(maxsz) {
|
if form.Image.Size > maxsz {
|
||||||
text := fmt.Sprintf("emoji exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("emoji exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ func (p *Processor) EmojiCreate(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return rc, nil
|
return rc, nil
|
||||||
}
|
}
|
||||||
|
@ -441,11 +441,11 @@ func (p *Processor) emojiUpdateModify(
|
||||||
// We can do both at the same time :)
|
// We can do both at the same time :)
|
||||||
|
|
||||||
// Get maximum supported local emoji size.
|
// Get maximum supported local emoji size.
|
||||||
maxsz := config.GetMediaEmojiLocalMaxSize()
|
maxsz := int64(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if image.Size > int64(maxsz) {
|
if image.Size > maxsz {
|
||||||
text := fmt.Sprintf("emoji exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("emoji exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -457,7 +457,7 @@ func (p *Processor) emojiUpdateModify(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return rc, nil
|
return rc, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,11 +35,11 @@
|
||||||
func (p *Processor) Create(ctx context.Context, account *gtsmodel.Account, form *apimodel.AttachmentRequest) (*apimodel.Attachment, gtserror.WithCode) {
|
func (p *Processor) Create(ctx context.Context, account *gtsmodel.Account, form *apimodel.AttachmentRequest) (*apimodel.Attachment, gtserror.WithCode) {
|
||||||
|
|
||||||
// Get maximum supported local media size.
|
// Get maximum supported local media size.
|
||||||
maxsz := config.GetMediaLocalMaxSize()
|
maxsz := int64(config.GetMediaLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if form.File.Size > int64(maxsz) {
|
if form.File.Size > maxsz {
|
||||||
text := fmt.Sprintf("media exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("media exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ func (p *Processor) Create(ctx context.Context, account *gtsmodel.Account, form
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
|
|
||||||
// Create local media and write to instance storage.
|
// Create local media and write to instance storage.
|
||||||
attachment, errWithCode := p.c.StoreLocalMedia(ctx,
|
attachment, errWithCode := p.c.StoreLocalMedia(ctx,
|
||||||
|
|
|
@ -647,7 +647,7 @@ func (c *Converter) AttachmentToAPIAttachment(ctx context.Context, media *gtsmod
|
||||||
Size: toAPISize(media.FileMeta.Original.Width, media.FileMeta.Original.Height),
|
Size: toAPISize(media.FileMeta.Original.Width, media.FileMeta.Original.Height),
|
||||||
FrameRate: toAPIFrameRate(media.FileMeta.Original.Framerate),
|
FrameRate: toAPIFrameRate(media.FileMeta.Original.Framerate),
|
||||||
Duration: util.PtrOrZero(media.FileMeta.Original.Duration),
|
Duration: util.PtrOrZero(media.FileMeta.Original.Duration),
|
||||||
Bitrate: int(util.PtrOrZero(media.FileMeta.Original.Bitrate)),
|
Bitrate: util.PtrOrZero(media.FileMeta.Original.Bitrate),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy over local file URL.
|
// Copy over local file URL.
|
||||||
|
@ -1551,9 +1551,9 @@ func (c *Converter) InstanceToAPIV1Instance(ctx context.Context, i *gtsmodel.Ins
|
||||||
instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL
|
instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL
|
||||||
instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes
|
instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes
|
||||||
instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes
|
instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes
|
||||||
instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize())
|
instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit
|
instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit
|
||||||
instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize())
|
instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit
|
instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit
|
||||||
instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit
|
instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit
|
||||||
instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions()
|
instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions()
|
||||||
|
@ -1563,7 +1563,7 @@ func (c *Converter) InstanceToAPIV1Instance(ctx context.Context, i *gtsmodel.Ins
|
||||||
instance.Configuration.Accounts.AllowCustomCSS = config.GetAccountsAllowCustomCSS()
|
instance.Configuration.Accounts.AllowCustomCSS = config.GetAccountsAllowCustomCSS()
|
||||||
instance.Configuration.Accounts.MaxFeaturedTags = instanceAccountsMaxFeaturedTags
|
instance.Configuration.Accounts.MaxFeaturedTags = instanceAccountsMaxFeaturedTags
|
||||||
instance.Configuration.Accounts.MaxProfileFields = instanceAccountsMaxProfileFields
|
instance.Configuration.Accounts.MaxProfileFields = instanceAccountsMaxProfileFields
|
||||||
instance.Configuration.Emojis.EmojiSizeLimit = int(config.GetMediaEmojiLocalMaxSize())
|
instance.Configuration.Emojis.EmojiSizeLimit = int(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.OIDCEnabled = config.GetOIDCEnabled()
|
instance.Configuration.OIDCEnabled = config.GetOIDCEnabled()
|
||||||
|
|
||||||
// URLs
|
// URLs
|
||||||
|
@ -1695,9 +1695,9 @@ func (c *Converter) InstanceToAPIV2Instance(ctx context.Context, i *gtsmodel.Ins
|
||||||
instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL
|
instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL
|
||||||
instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes
|
instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes
|
||||||
instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes
|
instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes
|
||||||
instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize())
|
instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit
|
instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit
|
||||||
instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize())
|
instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit
|
instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit
|
||||||
instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit
|
instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit
|
||||||
instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions()
|
instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions()
|
||||||
|
@ -1707,7 +1707,7 @@ func (c *Converter) InstanceToAPIV2Instance(ctx context.Context, i *gtsmodel.Ins
|
||||||
instance.Configuration.Accounts.AllowCustomCSS = config.GetAccountsAllowCustomCSS()
|
instance.Configuration.Accounts.AllowCustomCSS = config.GetAccountsAllowCustomCSS()
|
||||||
instance.Configuration.Accounts.MaxFeaturedTags = instanceAccountsMaxFeaturedTags
|
instance.Configuration.Accounts.MaxFeaturedTags = instanceAccountsMaxFeaturedTags
|
||||||
instance.Configuration.Accounts.MaxProfileFields = instanceAccountsMaxProfileFields
|
instance.Configuration.Accounts.MaxProfileFields = instanceAccountsMaxProfileFields
|
||||||
instance.Configuration.Emojis.EmojiSizeLimit = int(config.GetMediaEmojiLocalMaxSize())
|
instance.Configuration.Emojis.EmojiSizeLimit = int(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.OIDCEnabled = config.GetOIDCEnabled()
|
instance.Configuration.OIDCEnabled = config.GetOIDCEnabled()
|
||||||
|
|
||||||
// registrations
|
// registrations
|
||||||
|
|
|
@ -1744,7 +1744,7 @@ func (suite *InternalToFrontendTestSuite) TestStatusToAPIStatusPendingApproval()
|
||||||
"in_reply_to_account_id": "01F8MH5NBDF2MV7CTC4Q5128HF",
|
"in_reply_to_account_id": "01F8MH5NBDF2MV7CTC4Q5128HF",
|
||||||
"sensitive": false,
|
"sensitive": false,
|
||||||
"spoiler_text": "",
|
"spoiler_text": "",
|
||||||
"visibility": "unlisted",
|
"visibility": "public",
|
||||||
"language": null,
|
"language": null,
|
||||||
"uri": "http://localhost:8080/users/admin/statuses/01J5QVB9VC76NPPRQ207GG4DRZ",
|
"uri": "http://localhost:8080/users/admin/statuses/01J5QVB9VC76NPPRQ207GG4DRZ",
|
||||||
"url": "http://localhost:8080/@admin/statuses/01J5QVB9VC76NPPRQ207GG4DRZ",
|
"url": "http://localhost:8080/@admin/statuses/01J5QVB9VC76NPPRQ207GG4DRZ",
|
||||||
|
@ -3177,7 +3177,7 @@ func (suite *InternalToFrontendTestSuite) TestIntReqToAPI() {
|
||||||
"in_reply_to_account_id": null,
|
"in_reply_to_account_id": null,
|
||||||
"sensitive": true,
|
"sensitive": true,
|
||||||
"spoiler_text": "you won't be able to reply to this without my approval",
|
"spoiler_text": "you won't be able to reply to this without my approval",
|
||||||
"visibility": "unlisted",
|
"visibility": "public",
|
||||||
"language": "en",
|
"language": "en",
|
||||||
"uri": "http://localhost:8080/users/1happyturtle/statuses/01F8MHC8VWDRBQR0N1BATDDEM5",
|
"uri": "http://localhost:8080/users/1happyturtle/statuses/01F8MHC8VWDRBQR0N1BATDDEM5",
|
||||||
"url": "http://localhost:8080/@1happyturtle/statuses/01F8MHC8VWDRBQR0N1BATDDEM5",
|
"url": "http://localhost:8080/@1happyturtle/statuses/01F8MHC8VWDRBQR0N1BATDDEM5",
|
||||||
|
@ -3269,7 +3269,7 @@ func (suite *InternalToFrontendTestSuite) TestIntReqToAPI() {
|
||||||
"in_reply_to_account_id": "01F8MH5NBDF2MV7CTC4Q5128HF",
|
"in_reply_to_account_id": "01F8MH5NBDF2MV7CTC4Q5128HF",
|
||||||
"sensitive": false,
|
"sensitive": false,
|
||||||
"spoiler_text": "",
|
"spoiler_text": "",
|
||||||
"visibility": "unlisted",
|
"visibility": "public",
|
||||||
"language": null,
|
"language": null,
|
||||||
"uri": "http://localhost:8080/users/admin/statuses/01J5QVB9VC76NPPRQ207GG4DRZ",
|
"uri": "http://localhost:8080/users/admin/statuses/01J5QVB9VC76NPPRQ207GG4DRZ",
|
||||||
"url": "http://localhost:8080/@admin/statuses/01J5QVB9VC76NPPRQ207GG4DRZ",
|
"url": "http://localhost:8080/@admin/statuses/01J5QVB9VC76NPPRQ207GG4DRZ",
|
||||||
|
|
|
@ -116,6 +116,7 @@ nav:
|
||||||
- "advanced/tracing.md"
|
- "advanced/tracing.md"
|
||||||
- "advanced/metrics.md"
|
- "advanced/metrics.md"
|
||||||
- "advanced/replicating-sqlite.md"
|
- "advanced/replicating-sqlite.md"
|
||||||
|
- "advanced/sqlite-networked-storage.md"
|
||||||
|
|
||||||
- "Admin":
|
- "Admin":
|
||||||
- "admin/settings.md"
|
- "admin/settings.md"
|
||||||
|
|
|
@ -1531,7 +1531,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status {
|
||||||
BoostOfID: "",
|
BoostOfID: "",
|
||||||
BoostOfAccountID: "",
|
BoostOfAccountID: "",
|
||||||
ThreadID: "01HCWE4P0EW9HBA5WHW97D5YV0",
|
ThreadID: "01HCWE4P0EW9HBA5WHW97D5YV0",
|
||||||
Visibility: gtsmodel.VisibilityUnlocked,
|
Visibility: gtsmodel.VisibilityPublic,
|
||||||
Sensitive: util.Ptr(false),
|
Sensitive: util.Ptr(false),
|
||||||
CreatedWithApplicationID: "01F8MGXQRHYF5QPMTMXP78QC2F",
|
CreatedWithApplicationID: "01F8MGXQRHYF5QPMTMXP78QC2F",
|
||||||
Federated: util.Ptr(true),
|
Federated: util.Ptr(true),
|
||||||
|
@ -1811,7 +1811,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status {
|
||||||
BoostOfID: "",
|
BoostOfID: "",
|
||||||
ThreadID: "01HCWE4P0EW9HBA5WHW97D5YV0",
|
ThreadID: "01HCWE4P0EW9HBA5WHW97D5YV0",
|
||||||
ContentWarning: "you won't be able to reply to this without my approval",
|
ContentWarning: "you won't be able to reply to this without my approval",
|
||||||
Visibility: gtsmodel.VisibilityUnlocked,
|
Visibility: gtsmodel.VisibilityPublic,
|
||||||
Sensitive: util.Ptr(true),
|
Sensitive: util.Ptr(true),
|
||||||
Language: "en",
|
Language: "en",
|
||||||
CreatedWithApplicationID: "01F8MGYG9E893WRHW0TAEXR8GJ",
|
CreatedWithApplicationID: "01F8MGYG9E893WRHW0TAEXR8GJ",
|
||||||
|
|
14
vendor/codeberg.org/gruf/go-structr/cache.go
generated
vendored
14
vendor/codeberg.org/gruf/go-structr/cache.go
generated
vendored
|
@ -575,8 +575,9 @@ func (c *Cache[T]) store_value(index *Index, key string, value T) {
|
||||||
item.data = value
|
item.data = value
|
||||||
|
|
||||||
if index != nil {
|
if index != nil {
|
||||||
// Append item to index.
|
// Append item to index a key
|
||||||
index.append(key, item)
|
// was already generated for.
|
||||||
|
index.append(&c.lru, key, item)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get ptr to value data.
|
// Get ptr to value data.
|
||||||
|
@ -607,8 +608,8 @@ func (c *Cache[T]) store_value(index *Index, key string, value T) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append item to index.
|
// Append item to this index.
|
||||||
idx.append(key, item)
|
idx.append(&c.lru, key, item)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add item to main lru list.
|
// Add item to main lru list.
|
||||||
|
@ -645,8 +646,9 @@ func (c *Cache[T]) store_error(index *Index, key string, err error) {
|
||||||
// Set error val.
|
// Set error val.
|
||||||
item.data = err
|
item.data = err
|
||||||
|
|
||||||
// Append item to index.
|
// Append item to index a key
|
||||||
index.append(key, item)
|
// was already generated for.
|
||||||
|
index.append(&c.lru, key, item)
|
||||||
|
|
||||||
// Add item to main lru list.
|
// Add item to main lru list.
|
||||||
c.lru.push_front(&item.elem)
|
c.lru.push_front(&item.elem)
|
||||||
|
|
37
vendor/codeberg.org/gruf/go-structr/index.go
generated
vendored
37
vendor/codeberg.org/gruf/go-structr/index.go
generated
vendored
|
@ -174,7 +174,7 @@ func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {
|
||||||
// get_one will fetch one indexed item under key.
|
// get_one will fetch one indexed item under key.
|
||||||
func (i *Index) get_one(key Key) *indexed_item {
|
func (i *Index) get_one(key Key) *indexed_item {
|
||||||
// Get list at hash.
|
// Get list at hash.
|
||||||
l, _ := i.data.Get(key.key)
|
l := i.data.Get(key.key)
|
||||||
if l == nil {
|
if l == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -192,7 +192,7 @@ func (i *Index) get(key string, hook func(*indexed_item)) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get list at hash.
|
// Get list at hash.
|
||||||
l, _ := i.data.Get(key)
|
l := i.data.Get(key)
|
||||||
if l == nil {
|
if l == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -237,11 +237,12 @@ func (i *Index) key(buf *byteutil.Buffer, parts []unsafe.Pointer) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// append will append the given index entry to appropriate
|
// append will append the given index entry to appropriate
|
||||||
// doubly-linked-list in index hashmap. this handles case
|
// doubly-linked-list in index hashmap. this handles case of
|
||||||
// of key collisions and overwriting 'unique' entries.
|
// overwriting "unique" index entries, and removes from given
|
||||||
func (i *Index) append(key string, item *indexed_item) {
|
// outer linked-list in the case that it is no longer indexed.
|
||||||
|
func (i *Index) append(ll *list, key string, item *indexed_item) {
|
||||||
// Look for existing.
|
// Look for existing.
|
||||||
l, _ := i.data.Get(key)
|
l := i.data.Get(key)
|
||||||
|
|
||||||
if l == nil {
|
if l == nil {
|
||||||
|
|
||||||
|
@ -255,12 +256,21 @@ func (i *Index) append(key string, item *indexed_item) {
|
||||||
elem := l.head
|
elem := l.head
|
||||||
l.remove(elem)
|
l.remove(elem)
|
||||||
|
|
||||||
// Drop index from inner item.
|
// Drop index from inner item,
|
||||||
|
// catching the evicted item.
|
||||||
e := (*index_entry)(elem.data)
|
e := (*index_entry)(elem.data)
|
||||||
e.item.drop_index(e)
|
evicted := e.item
|
||||||
|
evicted.drop_index(e)
|
||||||
|
|
||||||
// Free unused entry.
|
// Free unused entry.
|
||||||
free_index_entry(e)
|
free_index_entry(e)
|
||||||
|
|
||||||
|
if len(evicted.indexed) == 0 {
|
||||||
|
// Evicted item is not indexed,
|
||||||
|
// remove from outer linked list.
|
||||||
|
ll.remove(&evicted.elem)
|
||||||
|
free_indexed_item(evicted)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prepare new index entry.
|
// Prepare new index entry.
|
||||||
|
@ -283,7 +293,7 @@ func (i *Index) delete(key string, hook func(*indexed_item)) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get list at hash.
|
// Get list at hash.
|
||||||
l, _ := i.data.Get(key)
|
l := i.data.Get(key)
|
||||||
if l == nil {
|
if l == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -292,10 +302,9 @@ func (i *Index) delete(key string, hook func(*indexed_item)) {
|
||||||
i.data.Delete(key)
|
i.data.Delete(key)
|
||||||
|
|
||||||
// Iterate entries in list.
|
// Iterate entries in list.
|
||||||
for x := 0; x < l.len; x++ {
|
l.rangefn(func(elem *list_elem) {
|
||||||
|
|
||||||
// Pop list head.
|
// Remove elem.
|
||||||
elem := l.head
|
|
||||||
l.remove(elem)
|
l.remove(elem)
|
||||||
|
|
||||||
// Extract element entry + item.
|
// Extract element entry + item.
|
||||||
|
@ -310,7 +319,7 @@ func (i *Index) delete(key string, hook func(*indexed_item)) {
|
||||||
|
|
||||||
// Pass to hook.
|
// Pass to hook.
|
||||||
hook(item)
|
hook(item)
|
||||||
}
|
})
|
||||||
|
|
||||||
// Release list.
|
// Release list.
|
||||||
free_list(l)
|
free_list(l)
|
||||||
|
@ -319,7 +328,7 @@ func (i *Index) delete(key string, hook func(*indexed_item)) {
|
||||||
// delete_entry deletes the given index entry.
|
// delete_entry deletes the given index entry.
|
||||||
func (i *Index) delete_entry(entry *index_entry) {
|
func (i *Index) delete_entry(entry *index_entry) {
|
||||||
// Get list at hash sum.
|
// Get list at hash sum.
|
||||||
l, _ := i.data.Get(entry.key)
|
l := i.data.Get(entry.key)
|
||||||
if l == nil {
|
if l == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
5
vendor/codeberg.org/gruf/go-structr/item.go
generated
vendored
5
vendor/codeberg.org/gruf/go-structr/item.go
generated
vendored
|
@ -50,12 +50,9 @@ func (i *indexed_item) drop_index(entry *index_entry) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unset tptr value to
|
|
||||||
// ensure GC can take it.
|
|
||||||
i.indexed[x] = nil
|
|
||||||
|
|
||||||
// Move all index entries down + reslice.
|
// Move all index entries down + reslice.
|
||||||
_ = copy(i.indexed[x:], i.indexed[x+1:])
|
_ = copy(i.indexed[x:], i.indexed[x+1:])
|
||||||
|
i.indexed[len(i.indexed)-1] = nil
|
||||||
i.indexed = i.indexed[:len(i.indexed)-1]
|
i.indexed = i.indexed[:len(i.indexed)-1]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
120
vendor/codeberg.org/gruf/go-structr/list.go
generated
vendored
120
vendor/codeberg.org/gruf/go-structr/list.go
generated
vendored
|
@ -48,27 +48,17 @@ func free_list(list *list) {
|
||||||
|
|
||||||
// push_front will push the given elem to front (head) of list.
|
// push_front will push the given elem to front (head) of list.
|
||||||
func (l *list) push_front(elem *list_elem) {
|
func (l *list) push_front(elem *list_elem) {
|
||||||
if l.len == 0 {
|
// Set new head.
|
||||||
// Set new tail + head
|
oldHead := l.head
|
||||||
l.head = elem
|
l.head = elem
|
||||||
l.tail = elem
|
|
||||||
|
|
||||||
// Link elem to itself
|
|
||||||
elem.next = elem
|
|
||||||
elem.prev = elem
|
|
||||||
} else {
|
|
||||||
oldHead := l.head
|
|
||||||
|
|
||||||
|
if oldHead != nil {
|
||||||
// Link to old head
|
// Link to old head
|
||||||
elem.next = oldHead
|
elem.next = oldHead
|
||||||
oldHead.prev = elem
|
oldHead.prev = elem
|
||||||
|
} else {
|
||||||
// Link up to tail
|
// First in list.
|
||||||
elem.prev = l.tail
|
l.tail = elem
|
||||||
l.tail.next = elem
|
|
||||||
|
|
||||||
// Set new head
|
|
||||||
l.head = elem
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Incr count
|
// Incr count
|
||||||
|
@ -77,27 +67,17 @@ func (l *list) push_front(elem *list_elem) {
|
||||||
|
|
||||||
// push_back will push the given elem to back (tail) of list.
|
// push_back will push the given elem to back (tail) of list.
|
||||||
func (l *list) push_back(elem *list_elem) {
|
func (l *list) push_back(elem *list_elem) {
|
||||||
if l.len == 0 {
|
// Set new tail.
|
||||||
// Set new tail + head
|
oldTail := l.tail
|
||||||
l.head = elem
|
l.tail = elem
|
||||||
l.tail = elem
|
|
||||||
|
|
||||||
// Link elem to itself
|
|
||||||
elem.next = elem
|
|
||||||
elem.prev = elem
|
|
||||||
} else {
|
|
||||||
oldTail := l.tail
|
|
||||||
|
|
||||||
|
if oldTail != nil {
|
||||||
// Link to old tail
|
// Link to old tail
|
||||||
elem.prev = oldTail
|
elem.prev = oldTail
|
||||||
oldTail.next = elem
|
oldTail.next = elem
|
||||||
|
} else {
|
||||||
// Link up to head
|
// First in list.
|
||||||
elem.next = l.head
|
l.head = elem
|
||||||
l.head.prev = elem
|
|
||||||
|
|
||||||
// Set new tail
|
|
||||||
l.tail = elem
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Incr count
|
// Incr count
|
||||||
|
@ -105,53 +85,57 @@ func (l *list) push_back(elem *list_elem) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// move_front will move given elem to front (head) of list.
|
// move_front will move given elem to front (head) of list.
|
||||||
|
// if it is already at front this call is a no-op.
|
||||||
func (l *list) move_front(elem *list_elem) {
|
func (l *list) move_front(elem *list_elem) {
|
||||||
|
if elem == l.head {
|
||||||
|
return
|
||||||
|
}
|
||||||
l.remove(elem)
|
l.remove(elem)
|
||||||
l.push_front(elem)
|
l.push_front(elem)
|
||||||
}
|
}
|
||||||
|
|
||||||
// move_back will move given elem to back (tail) of list.
|
// move_back will move given elem to back (tail) of list,
|
||||||
|
// if it is already at back this call is a no-op.
|
||||||
func (l *list) move_back(elem *list_elem) {
|
func (l *list) move_back(elem *list_elem) {
|
||||||
|
if elem == l.tail {
|
||||||
|
return
|
||||||
|
}
|
||||||
l.remove(elem)
|
l.remove(elem)
|
||||||
l.push_back(elem)
|
l.push_back(elem)
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove will remove given elem from list.
|
// remove will remove given elem from list.
|
||||||
func (l *list) remove(elem *list_elem) {
|
func (l *list) remove(elem *list_elem) {
|
||||||
if l.len <= 1 {
|
// Get linked elems.
|
||||||
// Drop elem's links
|
|
||||||
elem.next = nil
|
|
||||||
elem.prev = nil
|
|
||||||
|
|
||||||
// Only elem in list
|
|
||||||
l.head = nil
|
|
||||||
l.tail = nil
|
|
||||||
l.len = 0
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get surrounding elems
|
|
||||||
next := elem.next
|
next := elem.next
|
||||||
prev := elem.prev
|
prev := elem.prev
|
||||||
|
|
||||||
// Relink chain
|
// Unset elem.
|
||||||
next.prev = prev
|
|
||||||
prev.next = next
|
|
||||||
|
|
||||||
switch elem {
|
|
||||||
// Set new head
|
|
||||||
case l.head:
|
|
||||||
l.head = next
|
|
||||||
|
|
||||||
// Set new tail
|
|
||||||
case l.tail:
|
|
||||||
l.tail = prev
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drop elem's links
|
|
||||||
elem.next = nil
|
elem.next = nil
|
||||||
elem.prev = nil
|
elem.prev = nil
|
||||||
|
|
||||||
|
switch {
|
||||||
|
// elem is ONLY one in list.
|
||||||
|
case next == nil && prev == nil:
|
||||||
|
l.head = nil
|
||||||
|
l.tail = nil
|
||||||
|
|
||||||
|
// elem is front in list.
|
||||||
|
case next != nil && prev == nil:
|
||||||
|
l.head = next
|
||||||
|
next.prev = nil
|
||||||
|
|
||||||
|
// elem is last in list.
|
||||||
|
case prev != nil && next == nil:
|
||||||
|
l.tail = prev
|
||||||
|
prev.next = nil
|
||||||
|
|
||||||
|
// elem in middle of list.
|
||||||
|
default:
|
||||||
|
next.prev = prev
|
||||||
|
prev.next = next
|
||||||
|
}
|
||||||
|
|
||||||
// Decr count
|
// Decr count
|
||||||
l.len--
|
l.len--
|
||||||
}
|
}
|
||||||
|
@ -161,9 +145,11 @@ func (l *list) rangefn(fn func(*list_elem)) {
|
||||||
if fn == nil {
|
if fn == nil {
|
||||||
panic("nil fn")
|
panic("nil fn")
|
||||||
}
|
}
|
||||||
elem := l.head
|
for e := l.head; //
|
||||||
for i := 0; i < l.len; i++ {
|
e != nil; //
|
||||||
fn(elem)
|
{
|
||||||
elem = elem.next
|
n := e.next
|
||||||
|
fn(e)
|
||||||
|
e = n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
5
vendor/codeberg.org/gruf/go-structr/map.go
generated
vendored
5
vendor/codeberg.org/gruf/go-structr/map.go
generated
vendored
|
@ -10,9 +10,8 @@ func (m *hashmap) init(cap int) {
|
||||||
m.n = cap
|
m.n = cap
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *hashmap) Get(key string) (*list, bool) {
|
func (m *hashmap) Get(key string) *list {
|
||||||
list, ok := m.m[key]
|
return m.m[key]
|
||||||
return list, ok
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *hashmap) Put(key string, list *list) {
|
func (m *hashmap) Put(key string, list *list) {
|
||||||
|
|
4
vendor/codeberg.org/gruf/go-structr/queue.go
generated
vendored
4
vendor/codeberg.org/gruf/go-structr/queue.go
generated
vendored
|
@ -308,8 +308,8 @@ func (q *Queue[T]) index(value T) *indexed_item {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append item to index.
|
// Append item to this index.
|
||||||
idx.append(key, item)
|
idx.append(&q.queue, key, item)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done with buf.
|
// Done with buf.
|
||||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -66,7 +66,7 @@ codeberg.org/gruf/go-storage/disk
|
||||||
codeberg.org/gruf/go-storage/internal
|
codeberg.org/gruf/go-storage/internal
|
||||||
codeberg.org/gruf/go-storage/memory
|
codeberg.org/gruf/go-storage/memory
|
||||||
codeberg.org/gruf/go-storage/s3
|
codeberg.org/gruf/go-storage/s3
|
||||||
# codeberg.org/gruf/go-structr v0.8.9
|
# codeberg.org/gruf/go-structr v0.8.10
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
codeberg.org/gruf/go-structr
|
codeberg.org/gruf/go-structr
|
||||||
# codeberg.org/superseriousbusiness/exif-terminator v0.9.0
|
# codeberg.org/superseriousbusiness/exif-terminator v0.9.0
|
||||||
|
|
Loading…
Reference in a new issue