mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-11-22 11:46:40 +00:00
Compare commits
14 commits
55e15431fb
...
fe83516c8d
Author | SHA1 | Date | |
---|---|---|---|
fe83516c8d | |||
400fd8c62b | |||
6a76b9d609 | |||
157ee3193d | |||
be3b8076ca | |||
020da28daa | |||
2076f7d85f | |||
096767bb3b | |||
3f68e13dc7 | |||
40c33ccc49 | |||
90b773ae2a | |||
4b7d7f9b8b | |||
af5a766f62 | |||
d9e59820ed |
|
@ -39,7 +39,6 @@ builds:
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
- freebsd
|
- freebsd
|
||||||
- openbsd
|
|
||||||
goarch:
|
goarch:
|
||||||
- 386
|
- 386
|
||||||
- amd64
|
- amd64
|
||||||
|
@ -49,19 +48,14 @@ builds:
|
||||||
- 6
|
- 6
|
||||||
- 7
|
- 7
|
||||||
ignore:
|
ignore:
|
||||||
# build freebsd + openbsd only for amd64
|
# Build FreeBSD
|
||||||
|
# only for amd64.
|
||||||
- goos: freebsd
|
- goos: freebsd
|
||||||
goarch: arm64
|
goarch: arm64
|
||||||
- goos: freebsd
|
- goos: freebsd
|
||||||
goarch: arm
|
goarch: arm
|
||||||
- goos: freebsd
|
- goos: freebsd
|
||||||
goarch: 386
|
goarch: 386
|
||||||
- goos: openbsd
|
|
||||||
goarch: arm64
|
|
||||||
- goos: openbsd
|
|
||||||
goarch: arm
|
|
||||||
- goos: openbsd
|
|
||||||
goarch: 386
|
|
||||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||||
# MODERNC SQLITE BINARY BUILDS
|
# MODERNC SQLITE BINARY BUILDS
|
||||||
-
|
-
|
||||||
|
@ -88,7 +82,6 @@ builds:
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
- freebsd
|
- freebsd
|
||||||
- openbsd
|
|
||||||
goarch:
|
goarch:
|
||||||
- 386
|
- 386
|
||||||
- amd64
|
- amd64
|
||||||
|
@ -98,19 +91,14 @@ builds:
|
||||||
- 6
|
- 6
|
||||||
- 7
|
- 7
|
||||||
ignore:
|
ignore:
|
||||||
# build freebsd + openbsd only for amd64
|
# Build FreeBSD
|
||||||
|
# only for amd64.
|
||||||
- goos: freebsd
|
- goos: freebsd
|
||||||
goarch: arm64
|
goarch: arm64
|
||||||
- goos: freebsd
|
- goos: freebsd
|
||||||
goarch: arm
|
goarch: arm
|
||||||
- goos: freebsd
|
- goos: freebsd
|
||||||
goarch: 386
|
goarch: 386
|
||||||
- goos: openbsd
|
|
||||||
goarch: arm64
|
|
||||||
- goos: openbsd
|
|
||||||
goarch: arm
|
|
||||||
- goos: openbsd
|
|
||||||
goarch: 386
|
|
||||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||||
|
|
||||||
# https://goreleaser.com/customization/docker/
|
# https://goreleaser.com/customization/docker/
|
||||||
|
@ -474,17 +462,42 @@ release:
|
||||||
|
|
||||||
Tl;dr: Regardless of whether you're using SQLite or Postgres as your DB driver, you most likely you want the regular version without `moderncsqlite` in the name.
|
Tl;dr: Regardless of whether you're using SQLite or Postgres as your DB driver, you most likely you want the regular version without `moderncsqlite` in the name.
|
||||||
|
|
||||||
However, if you're on FreeBSD or OpenBSD, use the `moderncsqlite` version instead. See the table below:
|
However, if you're on FreeBSD, 32-bit Linux or 32-bit ARM, we recommend using the `moderncsqlite` version instead.
|
||||||
|
|
||||||
| OS | Architecture | Binary archive | Docker |
|
You may need to change some configuration options too. See the table below:
|
||||||
| ------- | ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------- |
|
|
||||||
| Linux | x86-64/AMD64 (64-bit) | [linux_amd64.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_linux_amd64.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}` |
|
| OS | Architecture | Support level | Binary archive | Docker |
|
||||||
| Linux | x86-32/i386 (32-bit) | [linux_386.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_linux_386.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}` |
|
| ------- | ----------------------- | ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------- |
|
||||||
| Linux | Armv8/ARM64 (64-bit) | [linux_arm64.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_linux_arm64.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}` |
|
| Linux | x86-64/AMD64 (64-bit) | 🟢 Full | [linux_amd64.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_linux_amd64.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}` |
|
||||||
| Linux | Armv7/ARM32 (32-bit) | [linux_armv7.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_linux_armv7.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}` |
|
| Linux | Armv8/ARM64 (64-bit) | 🟢 Full | [linux_arm64.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_linux_arm64.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}` |
|
||||||
| Linux | Armv6/ARM32 (32-bit) | [linux_armv6.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_linux_armv6.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}` |
|
| FreeBSD | x86-64/AMD64 (64-bit) | 🟢 Full<sup>[1](#freebsd)</sup> | [freebsd_amd64_moderncsqlite.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_freebsd_amd64_moderncsqlite.tar.gz) | None provided |
|
||||||
| OpenBSD | x86-64/AMD64 (64-bit) | [openbsd_amd64_moderncsqlite.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_openbsd_amd64_moderncsqlite.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}-moderncsqlite` |
|
| Linux | x86-32/i386 (32-bit) | 🟡 Partial<sup>[2](#32-bit)</sup> | [linux_386_moderncsqlite.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_linux_386_moderncsqlite.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}-moderncsqlite` |
|
||||||
| FreeBSD | x86-64/AMD64 (64-bit) | [freebsd_amd64_moderncsqlite.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_freebsd_amd64_moderncsqlite.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}-moderncsqlite` |
|
| Linux | Armv7/ARM32 (32-bit) | 🟡 Partial<sup>[2](#32-bit)</sup> | [linux_armv7_moderncsqlite.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_linux_armv7_moderncsqlite.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}-moderncsqlite` |
|
||||||
|
| Linux | Armv6/ARM32 (32-bit) | 🟡 Partial<sup>[2](#32-bit)</sup> | [linux_armv6_moderncsqlite.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_linux_armv7_moderncsqlite.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}-moderncsqlite` |
|
||||||
|
| Linux | x86-32/i386 (32-bit) | 🟡 Partial<sup>[2](#32-bit)</sup> | [linux_386_moderncsqlite.tar.gz](https://github.com/superseriousbusiness/gotosocial/releases/download/{{ .Tag }}/gotosocial_{{ .Version }}_linux_386_moderncsqlite.tar.gz) | `superseriousbusiness/gotosocial:{{ .Version }}-moderncsqlite` |
|
||||||
|
|
||||||
|
#### FreeBSD
|
||||||
|
|
||||||
|
`moderncsqlite` version currently recommended, though you might have success with the regular WASM SQLite version.
|
||||||
|
|
||||||
|
If running with regular WASM SQLite and having instability or memory issues, the following settings *may* help:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
db-max-open-conns-multiplier: 0
|
||||||
|
db-sqlite-journal-mode: "TRUNCATE"
|
||||||
|
db-sqlite-synchronous: "FULL"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 32-bit
|
||||||
|
|
||||||
|
`moderncsqlite` version is needed, as performance with regular WASM SQLite is not guaranteed when running on 32-bit.
|
||||||
|
|
||||||
|
Remote media processing will likely not work with reasonable performance, so you should set the following config variables to prevent download of remote media onto your instance:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
media-remote-max-size: 0
|
||||||
|
media-emoji-remote-max-size: 0
|
||||||
|
```
|
||||||
|
|
||||||
# https://goreleaser.com/customization/changelog/
|
# https://goreleaser.com/customization/changelog/
|
||||||
changelog:
|
changelog:
|
||||||
|
|
|
@ -39,9 +39,10 @@ USER 1000:1000
|
||||||
#
|
#
|
||||||
# See https://docs.docker.com/engine/reference/builder/#workdir
|
# See https://docs.docker.com/engine/reference/builder/#workdir
|
||||||
#
|
#
|
||||||
# First make sure storage exists + is owned by 1000:1000, then go back
|
# First make sure storage + cache exist and are owned by 1000:1000,
|
||||||
# to just /gotosocial, where we'll run from
|
# then go back to just /gotosocial, where we'll actually run from.
|
||||||
WORKDIR "/gotosocial/storage"
|
WORKDIR "/gotosocial/storage"
|
||||||
|
WORKDIR "/gotosocial/.cache"
|
||||||
WORKDIR "/gotosocial"
|
WORKDIR "/gotosocial"
|
||||||
|
|
||||||
# copy the dist binary created by goreleaser or build.sh
|
# copy the dist binary created by goreleaser or build.sh
|
||||||
|
@ -51,5 +52,5 @@ COPY --chown=1000:1000 gotosocial /gotosocial/gotosocial
|
||||||
COPY --chown=1000:1000 --from=bundler web /gotosocial/web
|
COPY --chown=1000:1000 --from=bundler web /gotosocial/web
|
||||||
COPY --chown=1000:1000 --from=swagger /go/src/github.com/superseriousbusiness/gotosocial/swagger.yaml web/assets/swagger.yaml
|
COPY --chown=1000:1000 --from=swagger /go/src/github.com/superseriousbusiness/gotosocial/swagger.yaml web/assets/swagger.yaml
|
||||||
|
|
||||||
VOLUME [ "/gotosocial/storage" ]
|
VOLUME [ "/gotosocial/storage", "/gotosocial/.cache" ]
|
||||||
ENTRYPOINT [ "/gotosocial/gotosocial", "server", "start" ]
|
ENTRYPOINT [ "/gotosocial/gotosocial", "server", "start" ]
|
||||||
|
|
49
README.md
49
README.md
|
@ -41,7 +41,11 @@ Here's a screenshot of the instance landing page!
|
||||||
- [OIDC integration](#oidc-integration)
|
- [OIDC integration](#oidc-integration)
|
||||||
- [Backend-first design](#backend-first-design)
|
- [Backend-first design](#backend-first-design)
|
||||||
- [Known Issues](#known-issues)
|
- [Known Issues](#known-issues)
|
||||||
- [Getting Started](#getting-started)
|
- [Installing GoToSocial](#installing-gotosocial)
|
||||||
|
- [Supported Platforms](#supported-platforms)
|
||||||
|
- [FreeBSD](#freebsd)
|
||||||
|
- [32-bit](#32-bit)
|
||||||
|
- [OpenBSD](#openbsd)
|
||||||
- [Stable Releases](#stable-releases)
|
- [Stable Releases](#stable-releases)
|
||||||
- [Snapshot Releases](#snapshot-releases)
|
- [Snapshot Releases](#snapshot-releases)
|
||||||
- [Docker](#docker)
|
- [Docker](#docker)
|
||||||
|
@ -258,9 +262,47 @@ Since every ActivityPub server implementation has a slightly different interpret
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Getting Started
|
## Installing GoToSocial
|
||||||
|
|
||||||
Check our [releases page](https://github.com/superseriousbusiness/gotosocial/releases) and our [getting started](https://docs.gotosocial.org/en/latest/getting_started/) documentation.
|
Check our [getting started](https://docs.gotosocial.org/en/latest/getting_started/) documentation! And have a peruse of our [releases page](https://github.com/superseriousbusiness/gotosocial/releases).
|
||||||
|
|
||||||
|
<!--releases-start-->
|
||||||
|
### Supported Platforms
|
||||||
|
|
||||||
|
While we try to support a reasonable number of architectures and operating systems, it's not always possible to support a given platform due to library constraints or performance issues.
|
||||||
|
|
||||||
|
Platforms that we don't officially support *may* still work, but we can't test or guarantee performance or stability.
|
||||||
|
|
||||||
|
This is the current status of support offered by GoToSocial for different platforms (if something is unlisted it means we haven't checked yet so we don't know):
|
||||||
|
|
||||||
|
| OS | Architecture | Support level | Binary archive | Docker container |
|
||||||
|
| ------- | ----------------------- | ---------------------------------- | -------------- | ---------------- |
|
||||||
|
| Linux | x86-64/AMD64 (64-bit) | 🟢 Full | Yes | Yes |
|
||||||
|
| Linux | Armv8/ARM64 (64-bit) | 🟢 Full | Yes | Yes |
|
||||||
|
| FreeBSD | x86-64/AMD64 (64-bit) | 🟢 Full<sup>[1](#freebsd)</sup> | Yes | No |
|
||||||
|
| Linux | x86-32/i386 (32-bit) | 🟡 Partial<sup>[2](#32-bit)</sup> | Yes | Yes |
|
||||||
|
| Linux | Armv7/ARM32 (32-bit) | 🟡 Partial<sup>[2](#32-bit)</sup> | Yes | Yes |
|
||||||
|
| Linux | Armv6/ARM32 (32-bit) | 🟡 Partial<sup>[2](#32-bit)</sup> | Yes | Yes |
|
||||||
|
| Linux | x86-32/i386 (32-bit) | 🟡 Partial<sup>[2](#32-bit)</sup> | Yes | Yes |
|
||||||
|
| OpenBSD | Any | 🔴 None<sup>[3](#openbsd)</sup> | No | No |
|
||||||
|
|
||||||
|
#### FreeBSD
|
||||||
|
|
||||||
|
Mostly works, just a few issues with WASM SQLite; check release notes carefully when installing on FreeBSD. If running with Postgres you should have no issues.
|
||||||
|
|
||||||
|
#### 32-bit
|
||||||
|
|
||||||
|
GtS doesn't work well on 32-bit systems like i386, or Armv6/v7, mainly due to performance of media decoding.
|
||||||
|
|
||||||
|
We don't recommend running GtS on 32-bit, but you may have some success either turning off remote media processing altogether, or building a binary yourself with the totally **unsupported, experimental** [nowasm](https://docs.gotosocial.org/en/latest/advanced/builds/nowasm/) tag.
|
||||||
|
|
||||||
|
For more guidance, check release notes when trying to install on 32-bit.
|
||||||
|
|
||||||
|
#### OpenBSD
|
||||||
|
|
||||||
|
Marked as unsupported due to performance issues (high memory usage when idle, crashes while processing media).
|
||||||
|
|
||||||
|
While we don't support running GtS on OpenBSD, you may have some success building a binary yourself with the totally **unsupported, experimental** [nowasm](https://docs.gotosocial.org/en/latest/advanced/builds/nowasm/) tag.
|
||||||
|
|
||||||
### Stable Releases
|
### Stable Releases
|
||||||
|
|
||||||
|
@ -302,6 +344,7 @@ You can also deploy your own instance of GoToSocial with the help of:
|
||||||
- [Ansible Playbook (MASH)](https://github.com/mother-of-all-self-hosting/mash-playbook): The playbook supports a many services, including GoToSocial. [Documentation](https://github.com/mother-of-all-self-hosting/mash-playbook/blob/main/docs/services/gotosocial.md)
|
- [Ansible Playbook (MASH)](https://github.com/mother-of-all-self-hosting/mash-playbook): The playbook supports a many services, including GoToSocial. [Documentation](https://github.com/mother-of-all-self-hosting/mash-playbook/blob/main/docs/services/gotosocial.md)
|
||||||
- [GoToSocial Helm Chart](https://github.com/fSocietySocial/charts/tree/main/charts/gotosocial) by [0hlov3](https://github.com/0hlov3).
|
- [GoToSocial Helm Chart](https://github.com/fSocietySocial/charts/tree/main/charts/gotosocial) by [0hlov3](https://github.com/0hlov3).
|
||||||
|
|
||||||
|
<!--releases-end-->
|
||||||
---
|
---
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
27
docs/advanced/builds/nowasm.md
Normal file
27
docs/advanced/builds/nowasm.md
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
# Build without Wazero / WASM
|
||||||
|
|
||||||
|
!!! Danger "This is unsupported"
|
||||||
|
We do not offer any kind of support for deployments of GoToSocial built with the `nowasm` tag described in this section. Such builds should be considered strictly experimental, and any issues that come when running them are none of our business! Please don't open issues on the repo looking for help debugging deployments of `nowasm` builds.
|
||||||
|
|
||||||
|
On [supported platforms](../../getting_started/releases.md#supported-platforms), GoToSocial uses the WebAssembly runtime [Wazero](https://wazero.io/) to sandbox `ffmpeg`, `ffprobe`, and `sqlite3` WebAssembly binaries, allowing these applications to be packaged and run inside the GoToSocial binary, without requiring admins to install + manage any external dependencies.
|
||||||
|
|
||||||
|
This has the advantage of making it easier for admins to maintain their GoToSocial instance, as their GtS binary is completely isolated from any changes to their system-installed `ffmpeg`, `ffprobe`, and `sqlite`. It's also a bit safer to run `ffmpeg` in this way, as GoToSocial wraps the `ffmpeg` binary in a very constrained file system that doesn't permit the `ffmpeg` binary to access any files other than the ones it's decoding + reencoding. In other words, GoToSocial on supported platforms offers most of the functionality of `ffmpeg` and so on, without some of the headaches.
|
||||||
|
|
||||||
|
However, not all platforms are capable of running Wazero in the much-faster "compiler" mode, and have to fall back to the very slow (and resource-heavy) "interpreter" mode. See [this table](https://github.com/tetratelabs/wazero?tab=readme-ov-file#conformance) from Wazero for conformance.
|
||||||
|
|
||||||
|
"Interpreter" mode runs so poorly for GoToSocial's use case that it's simply not feasible to run a GoToSocial instance in a stable manner on platforms that aren't 64-bit Linux or 64-bit FreeBSD, as all the memory and CPU get gobbled up by media processing.
|
||||||
|
|
||||||
|
However! To enable folks to run **experimental, unsupported deployments of GoToSocial**, we expose the `nowasm` build tag, which can be used to compile a build of GoToSocial that does not use Wazero or WASM at all.
|
||||||
|
|
||||||
|
A GoToSocial binary built with `nowasm` will use the [modernc version of SQLite](https://pkg.go.dev/modernc.org/sqlite) instead of the WASM one, and will use on-system `ffmpeg` and `ffprobe` binaries for media processing.
|
||||||
|
|
||||||
|
To build GoToSocial with the `nowasm` tag, you can pass the tag into our convenience `build.sh` script like so:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
GO_BUILDTAGS=nowasm ./scripts/build.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
In order to run a version of GoToSocial built in this way, you must ensure that `ffmpeg` and `ffprobe` are installed on the host. This is usually as simple as running a command like `doas -u root pkg_add ffmpeg` (OpenBSD), or `sudo apt install ffmpeg` (Debian etc.).
|
||||||
|
|
||||||
|
!!! Danger "No really though, it's unsupported"
|
||||||
|
Again, if running builds of GoToSocial with `nowasm` works for your OS/Arch combination, that's great, but we do not support such builds and we won't be able to help debugging why something doesn't work.
|
|
@ -1,6 +1,6 @@
|
||||||
# Advanced
|
# Advanced
|
||||||
|
|
||||||
In this section we touch on a number of more advanced topics, primarily related around deploying, operating and tuning GoToSocial.
|
In this section we touch on a number of more advanced topics, primarily related around building, deploying, operating and tuning GoToSocial.
|
||||||
|
|
||||||
We consider these topics advanced because applying them incorrectly does have the possibility of causing client and federation issues. Applying any of these configuration changes may also make it harder for you to debug an issue with your GoToSocial instance if you don't understand the changes that you're making.
|
We consider these topics advanced because applying them incorrectly does have the possibility of causing client and federation issues. Applying any of these configuration changes may also make it harder for you to debug an issue with your GoToSocial instance if you don't understand the changes that you're making.
|
||||||
|
|
||||||
|
|
|
@ -284,6 +284,12 @@ definitions:
|
||||||
example: https://example.org/media/some_user/header/static/header.png
|
example: https://example.org/media/some_user/header/static/header.png
|
||||||
type: string
|
type: string
|
||||||
x-go-name: HeaderStatic
|
x-go-name: HeaderStatic
|
||||||
|
hide_boosts:
|
||||||
|
description: |-
|
||||||
|
Account has opted to hide boosts from their profile.
|
||||||
|
Key/value omitted if false.
|
||||||
|
type: boolean
|
||||||
|
x-go-name: HideBoosts
|
||||||
hide_collections:
|
hide_collections:
|
||||||
description: |-
|
description: |-
|
||||||
Account has opted to hide their followers/following collections.
|
Account has opted to hide their followers/following collections.
|
||||||
|
@ -2289,6 +2295,12 @@ definitions:
|
||||||
example: https://example.org/media/some_user/header/static/header.png
|
example: https://example.org/media/some_user/header/static/header.png
|
||||||
type: string
|
type: string
|
||||||
x-go-name: HeaderStatic
|
x-go-name: HeaderStatic
|
||||||
|
hide_boosts:
|
||||||
|
description: |-
|
||||||
|
Account has opted to hide boosts from their profile.
|
||||||
|
Key/value omitted if false.
|
||||||
|
type: boolean
|
||||||
|
x-go-name: HideBoosts
|
||||||
hide_collections:
|
hide_collections:
|
||||||
description: |-
|
description: |-
|
||||||
Account has opted to hide their followers/following collections.
|
Account has opted to hide their followers/following collections.
|
||||||
|
|
|
@ -126,3 +126,19 @@ This means in cases where you want to just try changing one thing, but don't wan
|
||||||
Reasonable default values are provided for *most* of the configuration parameters, except in cases where a custom value is absolutely required.
|
Reasonable default values are provided for *most* of the configuration parameters, except in cases where a custom value is absolutely required.
|
||||||
|
|
||||||
See the [example config file](https://github.com/superseriousbusiness/gotosocial/blob/main/example/config.yaml) for the default values, or run `gotosocial --help`.
|
See the [example config file](https://github.com/superseriousbusiness/gotosocial/blob/main/example/config.yaml) for the default values, or run `gotosocial --help`.
|
||||||
|
|
||||||
|
## `GTS_WAZERO_COMPILATION_CACHE`
|
||||||
|
|
||||||
|
On startup, GoToSocial compiles embedded WebAssembly `ffmpeg` and `ffprobe` binaries into [Wazero](https://wazero.io/)-compatible modules, which are used for media processing without requiring any external dependencies.
|
||||||
|
|
||||||
|
To speed up startup time of GoToSocial, you can cache the compiled modules between restarts so that GoToSocial doesn't have to compile them on every startup from scratch.
|
||||||
|
|
||||||
|
You can instruct GoToSocial on where to store the Wazero artifacts by setting the environment variable `GTS_WAZERO_COMPILATION_CACHE` to a directory, which will be used by GtS to store two smallish artifacts of ~50MiB or so each (~100MiB total).
|
||||||
|
|
||||||
|
For an example of this in action, see the [docker-compose.yaml](https://raw.githubusercontent.com/superseriousbusiness/gotosocial/main/example/docker-compose/docker-compose.yaml), and the [gotosocial.service](https://raw.githubusercontent.com/superseriousbusiness/gotosocial/main/example/gotosocial.service) example files.
|
||||||
|
|
||||||
|
If you want to provide this value to GtS outside of systemd or Docker, you can do so in the following manner when starting up your GtS server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
GTS_WAZERO_COMPILATION_CACHE=~/gotosocial/.cache ./gotosocial --config-path ./config.yaml server start
|
||||||
|
```
|
||||||
|
|
|
@ -6,7 +6,7 @@ GoToSocial will also sign all outgoing `GET` and `POST` requests that it makes t
|
||||||
|
|
||||||
This behavior is the equivalent of Mastodon's [AUTHORIZED_FETCH / "secure mode"](https://docs.joinmastodon.org/admin/config/#authorized_fetch).
|
This behavior is the equivalent of Mastodon's [AUTHORIZED_FETCH / "secure mode"](https://docs.joinmastodon.org/admin/config/#authorized_fetch).
|
||||||
|
|
||||||
GoToSocial uses the [superseriousbusiness/httpsig](https://github.com/superseriousbusiness/httpsign) library (forked from go-fed) for signing outgoing requests, and for parsing and validating the signatures of incoming requests. This library strictly follows the [Cavage http signature RFC](https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures-12), which is the same RFC used by other implementations like Mastodon, Pixelfed, Akkoma/Pleroma, etc. (This RFC has since been superceded by the [httpbis http signature RFC](https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-message-signatures), but this is not yet widely implemented.)
|
GoToSocial uses the [superseriousbusiness/httpsig](https://github.com/superseriousbusiness/httpsig) library (forked from go-fed) for signing outgoing requests, and for parsing and validating the signatures of incoming requests. This library strictly follows the [Cavage http signature RFC](https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures-12), which is the same RFC used by other implementations like Mastodon, Pixelfed, Akkoma/Pleroma, etc. (This RFC has since been superceded by the [httpbis http signature RFC](https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-message-signatures), but this is not yet widely implemented.)
|
||||||
|
|
||||||
## Query Parameters
|
## Query Parameters
|
||||||
|
|
||||||
|
|
|
@ -91,6 +91,32 @@ If you want to use [LetsEncrypt](../../configuration/tls.md) for TLS certificate
|
||||||
|
|
||||||
For help translating variable names from the config.yaml file to environment variables, refer to the [configuration section](../../configuration/index.md#environment-variables).
|
For help translating variable names from the config.yaml file to environment variables, refer to the [configuration section](../../configuration/index.md#environment-variables).
|
||||||
|
|
||||||
|
### Wazero Compilation Cache (optional)
|
||||||
|
|
||||||
|
On startup, GoToSocial compiles embedded WebAssembly `ffmpeg` and `ffprobe` binaries into [Wazero](https://wazero.io/)-compatible modules, which are used for media processing without requiring any external dependencies.
|
||||||
|
|
||||||
|
To speed up startup time of GoToSocial, you can cache the compiled modules between restarts so that GoToSocial doesn't have to compile them on every startup from scratch.
|
||||||
|
|
||||||
|
If you'd like to do this in your Docker container, first create a `.cache` directory in your working folder to store the modules:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p ~/gotosocial/.cache
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, uncomment the second volume in the docker-compose.yaml file by removing the leading `#` symbol, so that instead of
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
#- ~/gotosocial/.cache:/gotosocial/.cache
|
||||||
|
```
|
||||||
|
|
||||||
|
it reads
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- ~/gotosocial/.cache:/gotosocial/.cache
|
||||||
|
```
|
||||||
|
|
||||||
|
This will instruct Docker to mount the `~/gotosocial/.cache` directory at `/gotosocial/.cache` inside the Docker container.
|
||||||
|
|
||||||
## Start GoToSocial
|
## Start GoToSocial
|
||||||
|
|
||||||
With those small changes out of the way, you can now start GoToSocial with the following command:
|
With those small changes out of the way, you can now start GoToSocial with the following command:
|
||||||
|
|
|
@ -1,47 +1,11 @@
|
||||||
# Releases
|
# Releases
|
||||||
|
|
||||||
GoToSocial can be installed in a number of different ways. We publish official binary releases as well as container images. A number of third-party packages are maintained by different distributions and some people have created additional deployment tooling to make it easy to deploy GoToSocial yourself.
|
GoToSocial can be installed in a number of different ways. We publish official binary releases as well as container images.
|
||||||
|
|
||||||
## Binary releases
|
A number of third-party packages are maintained by different distributions, and some people have created additional deployment tooling to make it easy to deploy GoToSocial yourself.
|
||||||
|
|
||||||
We publish binary builds for Linux to [our GitHub project](https://github.com/superseriousbusiness/gotosocial/releases):
|
{%
|
||||||
|
include "../../README.md"
|
||||||
* 32-bit Intel/AMD (i386/x86)
|
start='<!--releases-start-->'
|
||||||
* 64-bit Intel/AMD (amd64/x86_64)
|
end='<!--releases-end-->'
|
||||||
* 32-bit ARM (v6 and v7)
|
%}
|
||||||
* 64-bit ARM64
|
|
||||||
|
|
||||||
For FreeBSD we publish:
|
|
||||||
|
|
||||||
* 64-bit Intel/AMD (amd64/x86_64)
|
|
||||||
|
|
||||||
## Containers
|
|
||||||
|
|
||||||
We also publish container images [on the Docker Hub](https://hub.docker.com/r/superseriousbusiness/gotosocial).
|
|
||||||
|
|
||||||
Containers are released for the same Linux platforms as our binary releases, with the exception of 32-bit Intel/AMD.
|
|
||||||
|
|
||||||
## Snapshots
|
|
||||||
|
|
||||||
We publish snapshot binary builds and Docker images of whatever is currently on main.
|
|
||||||
|
|
||||||
We always recommend using a stable release instead, but if you want to live on the edge (at your own risk!) then see the [snapshots](https://github.com/superseriousbusiness/gotosocial#snapshots) section on our GitHub repo for more information.
|
|
||||||
|
|
||||||
## Third-party
|
|
||||||
|
|
||||||
Some folks have created distribution packages for GoToSocial or additional tooling to aid in installing GoToSocial.
|
|
||||||
|
|
||||||
### Distribution packages
|
|
||||||
|
|
||||||
These packages are not maintained by GoToSocial, so please direct questions and issues to the repository maintainers (and donate to them!).
|
|
||||||
|
|
||||||
[![Packaging status](https://repology.org/badge/vertical-allrepos/gotosocial.svg)](https://repology.org/project/gotosocial/versions)
|
|
||||||
|
|
||||||
### Deployment tools
|
|
||||||
|
|
||||||
You can deploy your own instance of GoToSocial with the help of:
|
|
||||||
|
|
||||||
- [YunoHost GoToSocial Packaging](https://github.com/YunoHost-Apps/gotosocial_ynh) by [OniriCorpe](https://github.com/OniriCorpe).
|
|
||||||
- [Ansible Playbook (MASH)](https://github.com/mother-of-all-self-hosting/mash-playbook): The playbook supports a many services, including GoToSocial. [Documentation](https://github.com/mother-of-all-self-hosting/mash-playbook/blob/main/docs/services/gotosocial.md)
|
|
||||||
- GoToSocial Helm Charts:
|
|
||||||
- [GoToSocial Helm Chart](https://github.com/fSocietySocial/charts/tree/main/charts/gotosocial) by [0hlov3](https://github.com/0hlov3).
|
|
||||||
|
|
|
@ -88,9 +88,9 @@ This setting does not affect visibility of your posts over the ActivityPub proto
|
||||||
|
|
||||||
!!! warning
|
!!! warning
|
||||||
Be aware that changes to this setting also apply retroactively.
|
Be aware that changes to this setting also apply retroactively.
|
||||||
|
|
||||||
That is, if you previously made a post on Unlisted visibility, while set to show only Public posts on your profile, and you change this setting to show Public and Unlisted, then the Unlisted post you previously made will be visible on your profile alongside your Public posts.
|
That is, if you previously made a post on Unlisted visibility, while set to show only Public posts on your profile, and you change this setting to show Public and Unlisted, then the Unlisted post you previously made will be visible on your profile alongside your Public posts.
|
||||||
|
|
||||||
Likewise, if you change this setting to show no posts, then all your posts will be hidden from your profile, regardless of when you created them, and what this option was set to at the time. This will apply until you change this setting again.
|
Likewise, if you change this setting to show no posts, then all your posts will be hidden from your profile, regardless of when you created them, and what this option was set to at the time. This will apply until you change this setting again.
|
||||||
|
|
||||||
!!! tip
|
!!! tip
|
||||||
|
@ -134,6 +134,11 @@ This feed only includes posts set as 'Public' (see [Privacy Settings](./posts.md
|
||||||
!!! warning
|
!!! warning
|
||||||
Exposing your RSS feed allows *anyone* to subscribe to updates on your Public posts anonymously, bypassing follows and follow requests.
|
Exposing your RSS feed allows *anyone* to subscribe to updates on your Public posts anonymously, bypassing follows and follow requests.
|
||||||
|
|
||||||
|
#### Hide boosts from your public page
|
||||||
|
|
||||||
|
By default, GoToSocial will display posts boosted by you on your public web profile. If you do not wish to display them, You can hide them by checking this box.
|
||||||
|
|
||||||
|
|
||||||
#### Hide Who You Follow / Are Followed By
|
#### Hide Who You Follow / Are Followed By
|
||||||
|
|
||||||
By default, GoToSocial shows your following/followers counts on your public web profile, and allows others to see who you follow and are followed by. This can be useful for account discovery purposes. However, for privacy + safety reasons you may wish to hide these counts, and to hide your following/followers lists from other accounts. You can do this by checking this box.
|
By default, GoToSocial shows your following/followers counts on your public web profile, and allows others to see who you follow and are followed by. This can be useful for account discovery purposes. However, for privacy + safety reasons you may wish to hide these counts, and to hide your following/followers lists from other accounts. You can do this by checking this box.
|
||||||
|
@ -196,7 +201,7 @@ If you want to reset all your policies to the initial defaults, you can click on
|
||||||
|
|
||||||
!!! danger
|
!!! danger
|
||||||
While GoToSocial respects interaction policies, it is not guaranteed that other server softwares will, and it is possible that accounts on other servers will still send out replies and boosts of your post to their followers, even if your instance forbids these interactions.
|
While GoToSocial respects interaction policies, it is not guaranteed that other server softwares will, and it is possible that accounts on other servers will still send out replies and boosts of your post to their followers, even if your instance forbids these interactions.
|
||||||
|
|
||||||
As more ActivityPub servers roll out support for interaction policies, this issue will hopefully diminish, but in the meantime GoToSocial can offer only a "best effort" attempt to restrict interactions with your posts according to the policies you have set.
|
As more ActivityPub servers roll out support for interaction policies, this issue will hopefully diminish, but in the meantime GoToSocial can offer only a "best effort" attempt to restrict interactions with your posts according to the policies you have set.
|
||||||
|
|
||||||
## Email & Password
|
## Email & Password
|
||||||
|
|
|
@ -8,11 +8,21 @@ services:
|
||||||
networks:
|
networks:
|
||||||
- gotosocial
|
- gotosocial
|
||||||
environment:
|
environment:
|
||||||
|
# Change this to your actual host value.
|
||||||
GTS_HOST: example.org
|
GTS_HOST: example.org
|
||||||
GTS_DB_TYPE: sqlite
|
GTS_DB_TYPE: sqlite
|
||||||
|
# Path in the GtS Docker container where
|
||||||
|
# the sqlite.db file will be stored.
|
||||||
GTS_DB_ADDRESS: /gotosocial/storage/sqlite.db
|
GTS_DB_ADDRESS: /gotosocial/storage/sqlite.db
|
||||||
|
# Change this to true if you're not running
|
||||||
|
# GoToSocial behind a reverse proxy.
|
||||||
GTS_LETSENCRYPT_ENABLED: "false"
|
GTS_LETSENCRYPT_ENABLED: "false"
|
||||||
|
# Set your email address here if you
|
||||||
|
# want to receive letsencrypt notices.
|
||||||
GTS_LETSENCRYPT_EMAIL_ADDRESS: ""
|
GTS_LETSENCRYPT_EMAIL_ADDRESS: ""
|
||||||
|
# Path in the GtS Docker container where the
|
||||||
|
# Wazero compilation cache will be stored.
|
||||||
|
GTS_WAZERO_COMPILATION_CACHE: /gotosocial/.cache
|
||||||
## For reverse proxy setups:
|
## For reverse proxy setups:
|
||||||
# GTS_TRUSTED_PROXIES: "172.x.x.x"
|
# GTS_TRUSTED_PROXIES: "172.x.x.x"
|
||||||
## Set the timezone of your server:
|
## Set the timezone of your server:
|
||||||
|
@ -24,7 +34,13 @@ services:
|
||||||
## For reverse proxy setups:
|
## For reverse proxy setups:
|
||||||
#- "127.0.0.1:8080:8080"
|
#- "127.0.0.1:8080:8080"
|
||||||
volumes:
|
volumes:
|
||||||
|
# Your data volume, for your
|
||||||
|
# sqlite.db file and media files.
|
||||||
- ~/gotosocial/data:/gotosocial/storage
|
- ~/gotosocial/data:/gotosocial/storage
|
||||||
|
# OPTIONAL: To mount volume for the WAZERO
|
||||||
|
# compilation cache, for speedier restart
|
||||||
|
# times, uncomment the below line:
|
||||||
|
#- ~/gotosocial/.cache:/gotosocial/.cache
|
||||||
restart: "always"
|
restart: "always"
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
|
|
|
@ -13,6 +13,15 @@ Group=gotosocial
|
||||||
Type=exec
|
Type=exec
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
|
|
||||||
|
# For speedier restart times, you can uncomment the following Environment line to have GoToSocial cache compiled
|
||||||
|
# Wazero artifacts in the given directory between restarts, so that it doesn't need to compile on startup every time.
|
||||||
|
#
|
||||||
|
# You may need to change the exact path depending on where you've got GoToSocial installed, for example if you've
|
||||||
|
# installed at "~/gotosocial" then change the value to "GTS_WAZERO_COMPILATION_CACHE=~/gotosocial/.cache".
|
||||||
|
#
|
||||||
|
# Whatever you do, make sure the dir exists and that the gotosocial user has permission to write + read from it.
|
||||||
|
#Environment="GTS_WAZERO_COMPILATION_CACHE=/gotosocial/.cache"
|
||||||
|
|
||||||
# change if your path to the GoToSocial binary is different
|
# change if your path to the GoToSocial binary is different
|
||||||
ExecStart=/gotosocial/gotosocial --config-path config.yaml server start
|
ExecStart=/gotosocial/gotosocial --config-path config.yaml server start
|
||||||
WorkingDirectory=/gotosocial
|
WorkingDirectory=/gotosocial
|
||||||
|
|
6
go.mod
6
go.mod
|
@ -42,7 +42,7 @@ require (
|
||||||
github.com/k3a/html2text v1.2.1
|
github.com/k3a/html2text v1.2.1
|
||||||
github.com/microcosm-cc/bluemonday v1.0.27
|
github.com/microcosm-cc/bluemonday v1.0.27
|
||||||
github.com/miekg/dns v1.1.62
|
github.com/miekg/dns v1.1.62
|
||||||
github.com/minio/minio-go/v7 v7.0.77
|
github.com/minio/minio-go/v7 v7.0.78
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
github.com/mitchellh/mapstructure v1.5.0
|
||||||
github.com/ncruces/go-sqlite3 v0.19.0
|
github.com/ncruces/go-sqlite3 v0.19.0
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
|
@ -63,7 +63,7 @@ require (
|
||||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.1
|
github.com/uptrace/bun/dialect/sqlitedialect v1.2.1
|
||||||
github.com/uptrace/bun/extra/bunotel v1.2.1
|
github.com/uptrace/bun/extra/bunotel v1.2.1
|
||||||
github.com/wagslane/go-password-validator v0.3.0
|
github.com/wagslane/go-password-validator v0.3.0
|
||||||
github.com/yuin/goldmark v1.7.4
|
github.com/yuin/goldmark v1.7.6
|
||||||
go.opentelemetry.io/otel v1.29.0
|
go.opentelemetry.io/otel v1.29.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0
|
||||||
|
@ -158,7 +158,7 @@ require (
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/compress v1.17.9 // indirect
|
github.com/klauspost/compress v1.17.11 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||||
github.com/kr/pretty v0.3.1 // indirect
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
|
12
go.sum
12
go.sum
|
@ -381,8 +381,8 @@ github.com/k3a/html2text v1.2.1/go.mod h1:ieEXykM67iT8lTvEWBh6fhpH4B23kB9OMKPdIB
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
|
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
|
||||||
|
@ -413,8 +413,8 @@ github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
|
||||||
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
||||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||||
github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw=
|
github.com/minio/minio-go/v7 v7.0.78 h1:LqW2zy52fxnI4gg8C2oZviTaKHcBV36scS+RzJnxUFs=
|
||||||
github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg=
|
github.com/minio/minio-go/v7 v7.0.78/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0=
|
||||||
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
||||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||||
|
@ -621,8 +621,8 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
github.com/yuin/goldmark v1.7.4 h1:BDXOHExt+A7gwPCJgPIIq7ENvceR7we7rOS9TNoLZeg=
|
github.com/yuin/goldmark v1.7.6 h1:cZgJxVh5mL5cu8KOnwxvFJy5TFB0BHUskZZyq7TYbDg=
|
||||||
github.com/yuin/goldmark v1.7.4/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
|
github.com/yuin/goldmark v1.7.6/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
|
||||||
gitlab.com/NyaaaWhatsUpDoc/sqlite v1.33.1-concurrency-workaround h1:pFMJnlc1PuH+jcVz4vz53vcpnoZG+NqFBr3qikDmEB4=
|
gitlab.com/NyaaaWhatsUpDoc/sqlite v1.33.1-concurrency-workaround h1:pFMJnlc1PuH+jcVz4vz53vcpnoZG+NqFBr3qikDmEB4=
|
||||||
gitlab.com/NyaaaWhatsUpDoc/sqlite v1.33.1-concurrency-workaround/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k=
|
gitlab.com/NyaaaWhatsUpDoc/sqlite v1.33.1-concurrency-workaround/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k=
|
||||||
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
|
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
|
||||||
|
|
|
@ -348,6 +348,7 @@ func parseUpdateAccountForm(c *gin.Context) (*apimodel.UpdateCredentialsRequest,
|
||||||
form.Theme == nil &&
|
form.Theme == nil &&
|
||||||
form.CustomCSS == nil &&
|
form.CustomCSS == nil &&
|
||||||
form.EnableRSS == nil &&
|
form.EnableRSS == nil &&
|
||||||
|
form.HideBoosts == nil &&
|
||||||
form.HideCollections == nil &&
|
form.HideCollections == nil &&
|
||||||
form.WebVisibility == nil) {
|
form.WebVisibility == nil) {
|
||||||
return nil, errors.New("empty form submitted")
|
return nil, errors.New("empty form submitted")
|
||||||
|
|
|
@ -104,6 +104,9 @@ type Account struct {
|
||||||
// Account has enabled RSS feed.
|
// Account has enabled RSS feed.
|
||||||
// Key/value omitted if false.
|
// Key/value omitted if false.
|
||||||
EnableRSS bool `json:"enable_rss,omitempty"`
|
EnableRSS bool `json:"enable_rss,omitempty"`
|
||||||
|
// Account has opted to hide boosts from their profile.
|
||||||
|
// Key/value omitted if false.
|
||||||
|
HideBoosts bool `json:"hide_boosts,omitempty"`
|
||||||
// Account has opted to hide their followers/following collections.
|
// Account has opted to hide their followers/following collections.
|
||||||
// Key/value omitted if false.
|
// Key/value omitted if false.
|
||||||
HideCollections bool `json:"hide_collections,omitempty"`
|
HideCollections bool `json:"hide_collections,omitempty"`
|
||||||
|
@ -225,6 +228,8 @@ type UpdateCredentialsRequest struct {
|
||||||
CustomCSS *string `form:"custom_css" json:"custom_css"`
|
CustomCSS *string `form:"custom_css" json:"custom_css"`
|
||||||
// Enable RSS feed of public toots for this account at /@[username]/feed.rss
|
// Enable RSS feed of public toots for this account at /@[username]/feed.rss
|
||||||
EnableRSS *bool `form:"enable_rss" json:"enable_rss"`
|
EnableRSS *bool `form:"enable_rss" json:"enable_rss"`
|
||||||
|
// Hide boosts from this account's profile page.
|
||||||
|
HideBoosts *bool `form:"hide_boosts" json:"hide_boosts"`
|
||||||
// Hide this account's following/followers collections.
|
// Hide this account's following/followers collections.
|
||||||
HideCollections *bool `form:"hide_collections" json:"hide_collections"`
|
HideCollections *bool `form:"hide_collections" json:"hide_collections"`
|
||||||
// Visibility of statuses to show via the web view.
|
// Visibility of statuses to show via the web view.
|
||||||
|
|
|
@ -118,6 +118,10 @@ type WebStatus struct {
|
||||||
// Override API account with web account.
|
// Override API account with web account.
|
||||||
Account *WebAccount `json:"account"`
|
Account *WebAccount `json:"account"`
|
||||||
|
|
||||||
|
// Account that reblogged the status.
|
||||||
|
// needed to properly render reblogged statuses on profile pages.
|
||||||
|
ReblogAccount *WebAccount `json:"reblog_account"`
|
||||||
|
|
||||||
// Web version of media
|
// Web version of media
|
||||||
// attached to this status.
|
// attached to this status.
|
||||||
MediaAttachments []*WebAttachment `json:"media_attachments"`
|
MediaAttachments []*WebAttachment `json:"media_attachments"`
|
||||||
|
|
|
@ -1017,6 +1017,7 @@ func (a *accountDB) GetAccountWebStatuses(
|
||||||
) ([]*gtsmodel.Status, error) {
|
) ([]*gtsmodel.Status, error) {
|
||||||
// Check for an easy case: account exposes no statuses via the web.
|
// Check for an easy case: account exposes no statuses via the web.
|
||||||
webVisibility := account.Settings.WebVisibility
|
webVisibility := account.Settings.WebVisibility
|
||||||
|
hideBoosts := *account.Settings.HideBoosts
|
||||||
if webVisibility == gtsmodel.VisibilityNone {
|
if webVisibility == gtsmodel.VisibilityNone {
|
||||||
return nil, db.ErrNoEntries
|
return nil, db.ErrNoEntries
|
||||||
}
|
}
|
||||||
|
@ -1035,9 +1036,12 @@ func (a *accountDB) GetAccountWebStatuses(
|
||||||
// Select only IDs from table
|
// Select only IDs from table
|
||||||
Column("status.id").
|
Column("status.id").
|
||||||
Where("? = ?", bun.Ident("status.account_id"), account.ID).
|
Where("? = ?", bun.Ident("status.account_id"), account.ID).
|
||||||
// Don't show replies or boosts.
|
// Don't show replies.
|
||||||
Where("? IS NULL", bun.Ident("status.in_reply_to_uri")).
|
Where("? IS NULL", bun.Ident("status.in_reply_to_uri"))
|
||||||
Where("? IS NULL", bun.Ident("status.boost_of_id"))
|
|
||||||
|
if hideBoosts {
|
||||||
|
q = q.Where("? IS NULL", bun.Ident("status.boost_of_id"))
|
||||||
|
}
|
||||||
|
|
||||||
// Select statuses for this account according
|
// Select statuses for this account according
|
||||||
// to their web visibility preference.
|
// to their web visibility preference.
|
||||||
|
|
|
@ -401,6 +401,18 @@ func maxOpenConns() int {
|
||||||
if multiplier < 1 {
|
if multiplier < 1 {
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Specifically for SQLite databases with
|
||||||
|
// a journal mode of anything EXCEPT "wal",
|
||||||
|
// only 1 concurrent connection is supported.
|
||||||
|
if strings.ToLower(config.GetDbType()) == "sqlite" {
|
||||||
|
journalMode := config.GetDbSqliteJournalMode()
|
||||||
|
journalMode = strings.ToLower(journalMode)
|
||||||
|
if journalMode != "wal" {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return multiplier * runtime.GOMAXPROCS(0)
|
return multiplier * runtime.GOMAXPROCS(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,44 @@
|
||||||
|
// GoToSocial
|
||||||
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
up := func(ctx context.Context, db *bun.DB) error {
|
||||||
|
_, err := db.ExecContext(ctx, "ALTER TABLE ? ADD COLUMN ? BOOLEAN DEFAULT FALSE", bun.Ident("account_settings"), bun.Ident("hide_boosts"))
|
||||||
|
if err != nil && !(strings.Contains(err.Error(), "already exists") || strings.Contains(err.Error(), "duplicate column name") || strings.Contains(err.Error(), "SQLSTATE 42701")) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
down := func(ctx context.Context, db *bun.DB) error {
|
||||||
|
_, err := db.ExecContext(ctx, "ALTER TABLE ? DROP COLUMN ?", bun.Ident("account_settings"), bun.Ident("hide_boosts"))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := Migrations.Register(up, down); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
|
@ -15,7 +15,7 @@
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//go:build !moderncsqlite3
|
//go:build !moderncsqlite3 && !nowasm
|
||||||
|
|
||||||
package sqlite
|
package sqlite
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//go:build moderncsqlite3
|
//go:build moderncsqlite3 || nowasm
|
||||||
|
|
||||||
package sqlite
|
package sqlite
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//go:build !moderncsqlite3
|
//go:build !moderncsqlite3 && !nowasm
|
||||||
|
|
||||||
package sqlite
|
package sqlite
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//go:build moderncsqlite3
|
//go:build moderncsqlite3 || nowasm
|
||||||
|
|
||||||
package sqlite
|
package sqlite
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,7 @@ type AccountSettings struct {
|
||||||
Theme string `bun:",nullzero"` // Preset CSS theme filename selected by this Account (empty string if nothing set).
|
Theme string `bun:",nullzero"` // Preset CSS theme filename selected by this Account (empty string if nothing set).
|
||||||
CustomCSS string `bun:",nullzero"` // Custom CSS that should be displayed for this Account's profile and statuses.
|
CustomCSS string `bun:",nullzero"` // Custom CSS that should be displayed for this Account's profile and statuses.
|
||||||
EnableRSS *bool `bun:",nullzero,notnull,default:false"` // enable RSS feed subscription for this account's public posts at [URL]/feed
|
EnableRSS *bool `bun:",nullzero,notnull,default:false"` // enable RSS feed subscription for this account's public posts at [URL]/feed
|
||||||
|
HideBoosts *bool `bun:",nullzero,notnull,default:false"` // Hide boosts from this accounts profile page.
|
||||||
HideCollections *bool `bun:",nullzero,notnull,default:false"` // Hide this account's followers/following collections.
|
HideCollections *bool `bun:",nullzero,notnull,default:false"` // Hide this account's followers/following collections.
|
||||||
WebVisibility Visibility `bun:",nullzero,notnull,default:public"` // Visibility level of statuses that visitors can view via the web profile.
|
WebVisibility Visibility `bun:",nullzero,notnull,default:public"` // Visibility level of statuses that visitors can view via the web profile.
|
||||||
InteractionPolicyDirect *InteractionPolicy `bun:""` // Interaction policy to use for new direct visibility statuses by this account. If null, assume default policy.
|
InteractionPolicyDirect *InteractionPolicy `bun:""` // Interaction policy to use for new direct visibility statuses by this account. If null, assume default policy.
|
||||||
|
|
28
internal/media/ffmpeg/args.go
Normal file
28
internal/media/ffmpeg/args.go
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
// GoToSocial
|
||||||
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ffmpeg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"codeberg.org/gruf/go-ffmpreg/wasm"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Args encapsulates the passing of common
|
||||||
|
// configuration options to run an instance
|
||||||
|
// of a compiled WebAssembly module that is
|
||||||
|
// run in a typical CLI manner.
|
||||||
|
type Args = wasm.Args
|
142
internal/media/ffmpeg/exec_nowasm.go
Normal file
142
internal/media/ffmpeg/exec_nowasm.go
Normal file
|
@ -0,0 +1,142 @@
|
||||||
|
// GoToSocial
|
||||||
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build nowasm
|
||||||
|
|
||||||
|
package ffmpeg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os/exec"
|
||||||
|
|
||||||
|
"codeberg.org/gruf/go-ffmpreg/wasm"
|
||||||
|
"github.com/tetratelabs/wazero"
|
||||||
|
"github.com/tetratelabs/wazero/sys"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
fmt.Println("!! you are using an unsupported build configuration of gotosocial with WebAssembly disabled !!")
|
||||||
|
fmt.Println("!! please do not file bug reports regarding media processing with this configuration !!")
|
||||||
|
fmt.Println("!! it is also less secure; this does not enforce version checks on ffmpeg / ffprobe versions !!")
|
||||||
|
}
|
||||||
|
|
||||||
|
// runCmd will run 'name' with the given arguments, returning exit code or error.
|
||||||
|
func runCmd(ctx context.Context, name string, args wasm.Args) (uint32, error) {
|
||||||
|
cmd := exec.CommandContext(ctx, name, args.Args...) //nolint:gosec
|
||||||
|
|
||||||
|
// Set provided std files.
|
||||||
|
cmd.Stdin = args.Stdin
|
||||||
|
cmd.Stdout = args.Stdout
|
||||||
|
cmd.Stderr = args.Stderr
|
||||||
|
|
||||||
|
if args.Config != nil {
|
||||||
|
// Gather some information
|
||||||
|
// from module config func.
|
||||||
|
var cfg falseModuleConfig
|
||||||
|
_ = args.Config(&cfg)
|
||||||
|
|
||||||
|
// Extract from conf.
|
||||||
|
cmd.Env = cfg.env
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run prepared command, catching err type.
|
||||||
|
switch err := cmd.Run(); err := err.(type) {
|
||||||
|
|
||||||
|
// Extract code from
|
||||||
|
// any exit error type.
|
||||||
|
case *exec.ExitError:
|
||||||
|
rc := err.ExitCode()
|
||||||
|
return uint32(rc), err
|
||||||
|
|
||||||
|
default:
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type falseModuleConfig struct{ env []string }
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithArgs(...string) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithEnv(key string, value string) wazero.ModuleConfig {
|
||||||
|
cfg.env = append(cfg.env, key+"="+value)
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithFS(fs.FS) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithFSConfig(wazero.FSConfig) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithName(string) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithStartFunctions(...string) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithStderr(io.Writer) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithStdin(io.Reader) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithStdout(io.Writer) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithWalltime(sys.Walltime, sys.ClockResolution) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithSysWalltime() wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithNanotime(sys.Nanotime, sys.ClockResolution) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithSysNanotime() wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithNanosleep(sys.Nanosleep) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithOsyield(sys.Osyield) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithSysNanosleep() wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *falseModuleConfig) WithRandSource(io.Reader) wazero.ModuleConfig {
|
||||||
|
return cfg // noop
|
||||||
|
}
|
|
@ -15,10 +15,14 @@
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build !nowasm
|
||||||
|
|
||||||
package ffmpeg
|
package ffmpeg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"codeberg.org/gruf/go-ffmpreg/wasm"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ffmpegRunner limits the number of
|
// ffmpegRunner limits the number of
|
||||||
|
@ -36,5 +40,7 @@ func InitFfmpeg(ctx context.Context, max int) error {
|
||||||
|
|
||||||
// Ffmpeg runs the given arguments with an instance of ffmpeg.
|
// Ffmpeg runs the given arguments with an instance of ffmpeg.
|
||||||
func Ffmpeg(ctx context.Context, args Args) (uint32, error) {
|
func Ffmpeg(ctx context.Context, args Args) (uint32, error) {
|
||||||
return ffmpegRunner.Run(ctx, ffmpeg, args)
|
return ffmpegRunner.Run(ctx, func() (uint32, error) {
|
||||||
|
return wasm.Run(ctx, runtime, ffmpeg, args)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
49
internal/media/ffmpeg/ffmpeg_nowasm.go
Normal file
49
internal/media/ffmpeg/ffmpeg_nowasm.go
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
// GoToSocial
|
||||||
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build nowasm
|
||||||
|
|
||||||
|
package ffmpeg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ffmpegRunner limits the number of
|
||||||
|
// ffmpeg WebAssembly instances that
|
||||||
|
// may be concurrently running, in
|
||||||
|
// order to reduce memory usage.
|
||||||
|
var ffmpegRunner runner
|
||||||
|
|
||||||
|
// InitFfmpeg looks for a local copy of ffmpeg in path, and prepares
|
||||||
|
// the runner to only allow max given concurrent running instances.
|
||||||
|
func InitFfmpeg(ctx context.Context, max int) error {
|
||||||
|
_, err := exec.LookPath("ffmpeg")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ffmpegRunner.Init(max)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ffmpeg runs the given arguments with an instance of ffmpeg.
|
||||||
|
func Ffmpeg(ctx context.Context, args Args) (uint32, error) {
|
||||||
|
return ffmpegRunner.Run(ctx, func() (uint32, error) {
|
||||||
|
return runCmd(ctx, "ffmpeg", args)
|
||||||
|
})
|
||||||
|
}
|
|
@ -15,10 +15,14 @@
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build !nowasm
|
||||||
|
|
||||||
package ffmpeg
|
package ffmpeg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"codeberg.org/gruf/go-ffmpreg/wasm"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ffprobeRunner limits the number of
|
// ffprobeRunner limits the number of
|
||||||
|
@ -36,5 +40,7 @@ func InitFfprobe(ctx context.Context, max int) error {
|
||||||
|
|
||||||
// Ffprobe runs the given arguments with an instance of ffprobe.
|
// Ffprobe runs the given arguments with an instance of ffprobe.
|
||||||
func Ffprobe(ctx context.Context, args Args) (uint32, error) {
|
func Ffprobe(ctx context.Context, args Args) (uint32, error) {
|
||||||
return ffprobeRunner.Run(ctx, ffprobe, args)
|
return ffmpegRunner.Run(ctx, func() (uint32, error) {
|
||||||
|
return wasm.Run(ctx, runtime, ffprobe, args)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
49
internal/media/ffmpeg/ffprobe_nowasm.go
Normal file
49
internal/media/ffmpeg/ffprobe_nowasm.go
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
// GoToSocial
|
||||||
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build nowasm
|
||||||
|
|
||||||
|
package ffmpeg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ffprobeRunner limits the number of
|
||||||
|
// ffprobe WebAssembly instances that
|
||||||
|
// may be concurrently running, in
|
||||||
|
// order to reduce memory usage.
|
||||||
|
var ffprobeRunner runner
|
||||||
|
|
||||||
|
// InitFfprobe looks for a local copy of ffprobe in path, and prepares
|
||||||
|
// the runner to only allow max given concurrent running instances.
|
||||||
|
func InitFfprobe(ctx context.Context, max int) error {
|
||||||
|
_, err := exec.LookPath("ffprobe")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ffprobeRunner.Init(max)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ffprobe runs the given arguments with an instance of ffprobe.
|
||||||
|
func Ffprobe(ctx context.Context, args Args) (uint32, error) {
|
||||||
|
return ffprobeRunner.Run(ctx, func() (uint32, error) {
|
||||||
|
return runCmd(ctx, "ffprobe", args)
|
||||||
|
})
|
||||||
|
}
|
|
@ -19,9 +19,6 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"codeberg.org/gruf/go-ffmpreg/wasm"
|
|
||||||
"github.com/tetratelabs/wazero"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// runner simply abstracts away the complexities
|
// runner simply abstracts away the complexities
|
||||||
|
@ -53,7 +50,7 @@ func (r *runner) Init(n int) {
|
||||||
|
|
||||||
// Run will attempt to pass the given compiled WebAssembly module with args to run(), waiting on
|
// Run will attempt to pass the given compiled WebAssembly module with args to run(), waiting on
|
||||||
// the receiving runner until a free slot is available to run an instance, (if a limit is enabled).
|
// the receiving runner until a free slot is available to run an instance, (if a limit is enabled).
|
||||||
func (r *runner) Run(ctx context.Context, cmod wazero.CompiledModule, args Args) (uint32, error) {
|
func (r *runner) Run(ctx context.Context, run func() (uint32, error)) (uint32, error) {
|
||||||
select {
|
select {
|
||||||
// Context canceled.
|
// Context canceled.
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
@ -66,6 +63,6 @@ func (r *runner) Run(ctx context.Context, cmod wazero.CompiledModule, args Args)
|
||||||
// Release slot back to pool on end.
|
// Release slot back to pool on end.
|
||||||
defer func() { r.pool <- struct{}{} }()
|
defer func() { r.pool <- struct{}{} }()
|
||||||
|
|
||||||
// Pass to main module runner function.
|
// Call run.
|
||||||
return wasm.Run(ctx, runtime, cmod, args)
|
return run()
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,8 @@
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build !nowasm
|
||||||
|
|
||||||
package ffmpeg
|
package ffmpeg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -41,12 +43,6 @@
|
||||||
ffprobe wazero.CompiledModule
|
ffprobe wazero.CompiledModule
|
||||||
)
|
)
|
||||||
|
|
||||||
// Args encapsulates the passing of common
|
|
||||||
// configuration options to run an instance
|
|
||||||
// of a compiled WebAssembly module that is
|
|
||||||
// run in a typical CLI manner.
|
|
||||||
type Args = wasm.Args
|
|
||||||
|
|
||||||
// compileFfmpeg ensures the ffmpeg WebAssembly has been
|
// compileFfmpeg ensures the ffmpeg WebAssembly has been
|
||||||
// pre-compiled into memory. If already compiled is a no-op.
|
// pre-compiled into memory. If already compiled is a no-op.
|
||||||
func compileFfmpeg(ctx context.Context) error {
|
func compileFfmpeg(ctx context.Context) error {
|
||||||
|
|
|
@ -42,6 +42,16 @@ func (suite *GetRSSTestSuite) TestGetAccountRSSAdmin() {
|
||||||
<description>Posts from @admin@localhost:8080</description>
|
<description>Posts from @admin@localhost:8080</description>
|
||||||
<pubDate>Wed, 20 Oct 2021 10:41:37 +0000</pubDate>
|
<pubDate>Wed, 20 Oct 2021 10:41:37 +0000</pubDate>
|
||||||
<lastBuildDate>Wed, 20 Oct 2021 10:41:37 +0000</lastBuildDate>
|
<lastBuildDate>Wed, 20 Oct 2021 10:41:37 +0000</lastBuildDate>
|
||||||
|
<item>
|
||||||
|
<title>introduction post</title>
|
||||||
|
<link>http://localhost:8080/@the_mighty_zork/statuses/01F8MHAMCHF6Y650WCRSCP4WMY</link>
|
||||||
|
<description>@the_mighty_zork@localhost:8080 made a new post: "hello everyone!"</description>
|
||||||
|
<content:encoded><![CDATA[hello everyone!]]></content:encoded>
|
||||||
|
<author>@the_mighty_zork@localhost:8080</author>
|
||||||
|
<guid isPermaLink="true">http://localhost:8080/@the_mighty_zork/statuses/01F8MHAMCHF6Y650WCRSCP4WMY</guid>
|
||||||
|
<pubDate>Wed, 20 Oct 2021 10:40:37 +0000</pubDate>
|
||||||
|
<source>http://localhost:8080/@the_mighty_zork/feed.rss</source>
|
||||||
|
</item>
|
||||||
<item>
|
<item>
|
||||||
<title>open to see some puppies</title>
|
<title>open to see some puppies</title>
|
||||||
<link>http://localhost:8080/@admin/statuses/01F8MHAAY43M6RJ473VQFCVH37</link>
|
<link>http://localhost:8080/@admin/statuses/01F8MHAAY43M6RJ473VQFCVH37</link>
|
||||||
|
|
|
@ -274,6 +274,11 @@ func (p *Processor) Update(ctx context.Context, account *gtsmodel.Account, form
|
||||||
settingsColumns = append(settingsColumns, "enable_rss")
|
settingsColumns = append(settingsColumns, "enable_rss")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if form.HideBoosts != nil {
|
||||||
|
account.Settings.HideBoosts = form.HideBoosts
|
||||||
|
settingsColumns = append(settingsColumns, "hide_boosts")
|
||||||
|
}
|
||||||
|
|
||||||
if form.HideCollections != nil {
|
if form.HideCollections != nil {
|
||||||
account.Settings.HideCollections = form.HideCollections
|
account.Settings.HideCollections = form.HideCollections
|
||||||
settingsColumns = append(settingsColumns, "hide_collections")
|
settingsColumns = append(settingsColumns, "hide_collections")
|
||||||
|
|
|
@ -302,7 +302,7 @@ func (c *Converter) accountToAPIAccountPublic(ctx context.Context, a *gtsmodel.A
|
||||||
// Bits that vary between remote + local accounts:
|
// Bits that vary between remote + local accounts:
|
||||||
// - Account (acct) string.
|
// - Account (acct) string.
|
||||||
// - Role.
|
// - Role.
|
||||||
// - Settings things (enableRSS, theme, customCSS, hideCollections).
|
// - Settings things (enableRSS, theme, customCSS, hideBoosts ,hideCollections).
|
||||||
|
|
||||||
var (
|
var (
|
||||||
acct string
|
acct string
|
||||||
|
@ -310,6 +310,7 @@ func (c *Converter) accountToAPIAccountPublic(ctx context.Context, a *gtsmodel.A
|
||||||
enableRSS bool
|
enableRSS bool
|
||||||
theme string
|
theme string
|
||||||
customCSS string
|
customCSS string
|
||||||
|
hideBoosts bool
|
||||||
hideCollections bool
|
hideCollections bool
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -338,6 +339,7 @@ func (c *Converter) accountToAPIAccountPublic(ctx context.Context, a *gtsmodel.A
|
||||||
enableRSS = *a.Settings.EnableRSS
|
enableRSS = *a.Settings.EnableRSS
|
||||||
theme = a.Settings.Theme
|
theme = a.Settings.Theme
|
||||||
customCSS = a.Settings.CustomCSS
|
customCSS = a.Settings.CustomCSS
|
||||||
|
hideBoosts = *a.Settings.HideBoosts
|
||||||
hideCollections = *a.Settings.HideCollections
|
hideCollections = *a.Settings.HideCollections
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -380,6 +382,7 @@ func (c *Converter) accountToAPIAccountPublic(ctx context.Context, a *gtsmodel.A
|
||||||
Theme: theme,
|
Theme: theme,
|
||||||
CustomCSS: customCSS,
|
CustomCSS: customCSS,
|
||||||
EnableRSS: enableRSS,
|
EnableRSS: enableRSS,
|
||||||
|
HideBoosts: hideBoosts,
|
||||||
HideCollections: hideCollections,
|
HideCollections: hideCollections,
|
||||||
Roles: roles,
|
Roles: roles,
|
||||||
}
|
}
|
||||||
|
@ -1092,7 +1095,15 @@ func (c *Converter) StatusToWebStatus(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
s *gtsmodel.Status,
|
s *gtsmodel.Status,
|
||||||
) (*apimodel.WebStatus, error) {
|
) (*apimodel.WebStatus, error) {
|
||||||
apiStatus, err := c.statusToFrontend(ctx, s,
|
|
||||||
|
isBoost := s.BoostOf != nil
|
||||||
|
status := s
|
||||||
|
|
||||||
|
if isBoost {
|
||||||
|
status = s.BoostOf
|
||||||
|
}
|
||||||
|
|
||||||
|
apiStatus, err := c.statusToFrontend(ctx, status,
|
||||||
nil, // No authed requester.
|
nil, // No authed requester.
|
||||||
statusfilter.FilterContextNone, // No filters.
|
statusfilter.FilterContextNone, // No filters.
|
||||||
nil, // No filters.
|
nil, // No filters.
|
||||||
|
@ -1103,7 +1114,7 @@ func (c *Converter) StatusToWebStatus(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert status author to web model.
|
// Convert status author to web model.
|
||||||
acct, err := c.AccountToWebAccount(ctx, s.Account)
|
acct, err := c.AccountToWebAccount(ctx, status.Account)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1113,6 +1124,14 @@ func (c *Converter) StatusToWebStatus(
|
||||||
Account: acct,
|
Account: acct,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if isBoost {
|
||||||
|
reblogAcct, err := c.AccountToWebAccount(ctx, s.Account)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
webStatus.ReblogAccount = reblogAcct
|
||||||
|
}
|
||||||
|
|
||||||
// Whack a newline before and after each "pre" to make it easier to outdent it.
|
// Whack a newline before and after each "pre" to make it easier to outdent it.
|
||||||
webStatus.Content = strings.ReplaceAll(webStatus.Content, "<pre>", "\n<pre>")
|
webStatus.Content = strings.ReplaceAll(webStatus.Content, "<pre>", "\n<pre>")
|
||||||
webStatus.Content = strings.ReplaceAll(webStatus.Content, "</pre>", "</pre>\n")
|
webStatus.Content = strings.ReplaceAll(webStatus.Content, "</pre>", "</pre>\n")
|
||||||
|
|
|
@ -1402,6 +1402,7 @@ func (suite *InternalToFrontendTestSuite) TestStatusToWebStatus() {
|
||||||
"emojis": [],
|
"emojis": [],
|
||||||
"fields": []
|
"fields": []
|
||||||
},
|
},
|
||||||
|
"reblog_account": null,
|
||||||
"media_attachments": [
|
"media_attachments": [
|
||||||
{
|
{
|
||||||
"id": "01HE7Y3C432WRSNS10EZM86SA5",
|
"id": "01HE7Y3C432WRSNS10EZM86SA5",
|
||||||
|
|
|
@ -39,6 +39,12 @@
|
||||||
func (c *Converter) StatusToRSSItem(ctx context.Context, s *gtsmodel.Status) (*feeds.Item, error) {
|
func (c *Converter) StatusToRSSItem(ctx context.Context, s *gtsmodel.Status) (*feeds.Item, error) {
|
||||||
// see https://cyber.harvard.edu/rss/rss.html
|
// see https://cyber.harvard.edu/rss/rss.html
|
||||||
|
|
||||||
|
// If status is a boost,
|
||||||
|
// display the boost instead.
|
||||||
|
if s.BoostOf != nil {
|
||||||
|
s = s.BoostOf
|
||||||
|
}
|
||||||
|
|
||||||
// Title -- The title of the item.
|
// Title -- The title of the item.
|
||||||
// example: Venice Film Festival Tries to Quit Sinking
|
// example: Venice Film Festival Tries to Quit Sinking
|
||||||
var title string
|
var title string
|
||||||
|
|
|
@ -100,7 +100,7 @@ nav:
|
||||||
- "configuration/advanced.md"
|
- "configuration/advanced.md"
|
||||||
- "configuration/observability.md"
|
- "configuration/observability.md"
|
||||||
- "Advanced":
|
- "Advanced":
|
||||||
- "advanced/index.md"
|
- "Overview": "advanced/index.md"
|
||||||
- "advanced/host-account-domain.md"
|
- "advanced/host-account-domain.md"
|
||||||
- "advanced/outgoing-proxy.md"
|
- "advanced/outgoing-proxy.md"
|
||||||
- "Caching":
|
- "Caching":
|
||||||
|
@ -117,6 +117,8 @@ nav:
|
||||||
- "advanced/metrics.md"
|
- "advanced/metrics.md"
|
||||||
- "advanced/replicating-sqlite.md"
|
- "advanced/replicating-sqlite.md"
|
||||||
- "advanced/sqlite-networked-storage.md"
|
- "advanced/sqlite-networked-storage.md"
|
||||||
|
- "Advanced builds":
|
||||||
|
- "advanced/builds/nowasm.md"
|
||||||
|
|
||||||
- "Admin":
|
- "Admin":
|
||||||
- "admin/settings.md"
|
- "admin/settings.md"
|
||||||
|
|
|
@ -15,14 +15,16 @@ GO_GCFLAGS=${GO_GCFLAGS-}
|
||||||
GO_BUILDTAGS="${GO_BUILDTAGS} debugenv"
|
GO_BUILDTAGS="${GO_BUILDTAGS} debugenv"
|
||||||
|
|
||||||
# Available Go build tags, with explanation, followed by benefits of enabling it:
|
# Available Go build tags, with explanation, followed by benefits of enabling it:
|
||||||
# - kvformat: enables prettier output of log fields (slightly better performance)
|
# - kvformat: enables prettier output of log fields (slightly better performance)
|
||||||
# - timetzdata: embed timezone database inside binary (allow setting local time inside Docker containers, at cost of 450KB)
|
# - timetzdata: embed timezone database inside binary (allow setting local time inside Docker containers, at cost of 450KB)
|
||||||
# - notracing: disables compiling-in otel tracing support (reduced binary size, better performance)
|
# - notracing: disables compiling-in otel tracing support (reduced binary size, better performance)
|
||||||
# - nometrics: disables compiling-in otel metrics support (reduced binary size, better performance)
|
# - nometrics: disables compiling-in otel metrics support (reduced binary size, better performance)
|
||||||
# - noerrcaller: disables caller function prefix in errors (slightly better performance, at cost of err readability)
|
# - noerrcaller: disables caller function prefix in errors (slightly better performance, at cost of err readability)
|
||||||
# - debug: enables /debug/pprof endpoint (adds debug, at performance cost)
|
# - debug: enables /debug/pprof endpoint (adds debug, at performance cost)
|
||||||
# - debugenv: enables /debug/pprof endpoint if DEBUG=1 env during runtime (adds debug, at performance cost)
|
# - debugenv: enables /debug/pprof endpoint if DEBUG=1 env during runtime (adds debug, at performance cost)
|
||||||
# - moderncsqlite3: reverts to using the C-to-Go transpiled SQLite driver (disables the WASM-based SQLite driver)
|
# - moderncsqlite3: reverts to using the C-to-Go transpiled SQLite driver (disables the WASM-based SQLite driver)
|
||||||
|
# - nowasm: [UNSUPPORTED] removes all WebAssembly from builds including
|
||||||
|
# ffmpeg, ffprobe and SQLite (instead falling back to modernc).
|
||||||
log_exec env CGO_ENABLED=0 go build -trimpath -v \
|
log_exec env CGO_ENABLED=0 go build -trimpath -v \
|
||||||
-tags "${GO_BUILDTAGS}" \
|
-tags "${GO_BUILDTAGS}" \
|
||||||
-ldflags="${GO_LDFLAGS}" \
|
-ldflags="${GO_LDFLAGS}" \
|
||||||
|
|
|
@ -657,6 +657,7 @@ func NewTestAccountSettings() map[string]*gtsmodel.AccountSettings {
|
||||||
Sensitive: util.Ptr(false),
|
Sensitive: util.Ptr(false),
|
||||||
Language: "en",
|
Language: "en",
|
||||||
EnableRSS: util.Ptr(false),
|
EnableRSS: util.Ptr(false),
|
||||||
|
HideBoosts: util.Ptr(false),
|
||||||
HideCollections: util.Ptr(false),
|
HideCollections: util.Ptr(false),
|
||||||
WebVisibility: gtsmodel.VisibilityPublic,
|
WebVisibility: gtsmodel.VisibilityPublic,
|
||||||
},
|
},
|
||||||
|
@ -668,6 +669,7 @@ func NewTestAccountSettings() map[string]*gtsmodel.AccountSettings {
|
||||||
Sensitive: util.Ptr(false),
|
Sensitive: util.Ptr(false),
|
||||||
Language: "en",
|
Language: "en",
|
||||||
EnableRSS: util.Ptr(true),
|
EnableRSS: util.Ptr(true),
|
||||||
|
HideBoosts: util.Ptr(false),
|
||||||
HideCollections: util.Ptr(false),
|
HideCollections: util.Ptr(false),
|
||||||
WebVisibility: gtsmodel.VisibilityPublic,
|
WebVisibility: gtsmodel.VisibilityPublic,
|
||||||
},
|
},
|
||||||
|
@ -679,6 +681,7 @@ func NewTestAccountSettings() map[string]*gtsmodel.AccountSettings {
|
||||||
Sensitive: util.Ptr(false),
|
Sensitive: util.Ptr(false),
|
||||||
Language: "en",
|
Language: "en",
|
||||||
EnableRSS: util.Ptr(true),
|
EnableRSS: util.Ptr(true),
|
||||||
|
HideBoosts: util.Ptr(false),
|
||||||
HideCollections: util.Ptr(false),
|
HideCollections: util.Ptr(false),
|
||||||
WebVisibility: gtsmodel.VisibilityUnlocked,
|
WebVisibility: gtsmodel.VisibilityUnlocked,
|
||||||
},
|
},
|
||||||
|
@ -690,6 +693,7 @@ func NewTestAccountSettings() map[string]*gtsmodel.AccountSettings {
|
||||||
Sensitive: util.Ptr(true),
|
Sensitive: util.Ptr(true),
|
||||||
Language: "fr",
|
Language: "fr",
|
||||||
EnableRSS: util.Ptr(false),
|
EnableRSS: util.Ptr(false),
|
||||||
|
HideBoosts: util.Ptr(false),
|
||||||
HideCollections: util.Ptr(true),
|
HideCollections: util.Ptr(true),
|
||||||
WebVisibility: gtsmodel.VisibilityPublic,
|
WebVisibility: gtsmodel.VisibilityPublic,
|
||||||
},
|
},
|
||||||
|
|
6
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
6
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
|
@ -1,5 +1,5 @@
|
||||||
# This is an example goreleaser.yaml file with some sane defaults.
|
version: 2
|
||||||
# Make sure to check the documentation at http://goreleaser.com
|
|
||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- ./gen.sh
|
- ./gen.sh
|
||||||
|
@ -99,7 +99,7 @@ archives:
|
||||||
checksum:
|
checksum:
|
||||||
name_template: 'checksums.txt'
|
name_template: 'checksums.txt'
|
||||||
snapshot:
|
snapshot:
|
||||||
name_template: "{{ .Tag }}-next"
|
version_template: "{{ .Tag }}-next"
|
||||||
changelog:
|
changelog:
|
||||||
sort: asc
|
sort: asc
|
||||||
filters:
|
filters:
|
||||||
|
|
29
vendor/github.com/klauspost/compress/README.md
generated
vendored
29
vendor/github.com/klauspost/compress/README.md
generated
vendored
|
@ -16,6 +16,27 @@ This package provides various compression algorithms.
|
||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
|
* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
|
||||||
|
* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
|
||||||
|
* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
|
||||||
|
* s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982
|
||||||
|
* zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007
|
||||||
|
* flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996
|
||||||
|
|
||||||
|
* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
|
||||||
|
* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
|
||||||
|
* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
|
||||||
|
* Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971
|
||||||
|
* zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951
|
||||||
|
|
||||||
|
* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)
|
||||||
|
* zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885
|
||||||
|
* zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938
|
||||||
|
|
||||||
|
* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)
|
||||||
|
* s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927
|
||||||
|
* s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930
|
||||||
|
|
||||||
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
|
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
|
||||||
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
|
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
|
||||||
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
|
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
|
||||||
|
@ -81,7 +102,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||||
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
|
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
|
||||||
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
|
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
|
||||||
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
|
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
|
||||||
* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
|
* gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
|
||||||
|
|
||||||
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
|
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
|
||||||
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
|
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
|
||||||
|
@ -136,7 +157,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||||
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
|
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
|
||||||
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
|
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
|
||||||
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
|
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
|
||||||
* zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
|
* zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657
|
||||||
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
|
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
|
||||||
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
|
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
|
||||||
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
|
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
|
||||||
|
@ -339,7 +360,7 @@ While the release has been extensively tested, it is recommended to testing when
|
||||||
* s2: Fix binaries.
|
* s2: Fix binaries.
|
||||||
|
|
||||||
* Feb 25, 2021 (v1.11.8)
|
* Feb 25, 2021 (v1.11.8)
|
||||||
* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
|
* s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.
|
||||||
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
|
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
|
||||||
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
|
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
|
||||||
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
|
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
|
||||||
|
@ -518,7 +539,7 @@ While the release has been extensively tested, it is recommended to testing when
|
||||||
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
|
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
|
||||||
* Feb 19, 2016: Handle small payloads faster in level 1-3.
|
* Feb 19, 2016: Handle small payloads faster in level 1-3.
|
||||||
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
|
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
|
||||||
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
|
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.
|
||||||
* Feb 14, 2016: Snappy: Merge upstream changes.
|
* Feb 14, 2016: Snappy: Merge upstream changes.
|
||||||
* Feb 14, 2016: Snappy: Fix aggressive skipping.
|
* Feb 14, 2016: Snappy: Fix aggressive skipping.
|
||||||
* Feb 14, 2016: Snappy: Update benchmark.
|
* Feb 14, 2016: Snappy: Update benchmark.
|
||||||
|
|
2
vendor/github.com/klauspost/compress/fse/decompress.go
generated
vendored
2
vendor/github.com/klauspost/compress/fse/decompress.go
generated
vendored
|
@ -15,7 +15,7 @@
|
||||||
// It is possible, but by no way guaranteed that corrupt data will
|
// It is possible, but by no way guaranteed that corrupt data will
|
||||||
// return an error.
|
// return an error.
|
||||||
// It is up to the caller to verify integrity of the returned data.
|
// It is up to the caller to verify integrity of the returned data.
|
||||||
// Use a predefined Scrach to set maximum acceptable output size.
|
// Use a predefined Scratch to set maximum acceptable output size.
|
||||||
func Decompress(b []byte, s *Scratch) ([]byte, error) {
|
func Decompress(b []byte, s *Scratch) ([]byte, error) {
|
||||||
s, err := s.prepare(b)
|
s, err := s.prepare(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
4
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
4
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
|
@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
|
||||||
errs++
|
errs++
|
||||||
}
|
}
|
||||||
if errs > 0 {
|
if errs > 0 {
|
||||||
fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
|
fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Ensure that all combinations are covered.
|
// Ensure that all combinations are covered.
|
||||||
|
@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
|
||||||
errs++
|
errs++
|
||||||
}
|
}
|
||||||
if errs > 20 {
|
if errs > 20 {
|
||||||
fmt.Fprintf(w, "%d errros, stopping\n", errs)
|
fmt.Fprintf(w, "%d errors, stopping\n", errs)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
25
vendor/github.com/klauspost/compress/s2/encode.go
generated
vendored
25
vendor/github.com/klauspost/compress/s2/encode.go
generated
vendored
|
@ -9,6 +9,9 @@
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"math"
|
"math"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/klauspost/compress/internal/race"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||||
|
@ -52,6 +55,8 @@ func Encode(dst, src []byte) []byte {
|
||||||
return dst[:d]
|
return dst[:d]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var estblockPool [2]sync.Pool
|
||||||
|
|
||||||
// EstimateBlockSize will perform a very fast compression
|
// EstimateBlockSize will perform a very fast compression
|
||||||
// without outputting the result and return the compressed output size.
|
// without outputting the result and return the compressed output size.
|
||||||
// The function returns -1 if no improvement could be achieved.
|
// The function returns -1 if no improvement could be achieved.
|
||||||
|
@ -61,9 +66,25 @@ func EstimateBlockSize(src []byte) (d int) {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
if len(src) <= 1024 {
|
if len(src) <= 1024 {
|
||||||
d = calcBlockSizeSmall(src)
|
const sz, pool = 2048, 0
|
||||||
|
tmp, ok := estblockPool[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer estblockPool[pool].Put(tmp)
|
||||||
|
|
||||||
|
d = calcBlockSizeSmall(src, tmp)
|
||||||
} else {
|
} else {
|
||||||
d = calcBlockSize(src)
|
const sz, pool = 32768, 1
|
||||||
|
tmp, ok := estblockPool[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer estblockPool[pool].Put(tmp)
|
||||||
|
|
||||||
|
d = calcBlockSize(src, tmp)
|
||||||
}
|
}
|
||||||
|
|
||||||
if d == 0 {
|
if d == 0 {
|
||||||
|
|
201
vendor/github.com/klauspost/compress/s2/encode_amd64.go
generated
vendored
201
vendor/github.com/klauspost/compress/s2/encode_amd64.go
generated
vendored
|
@ -3,10 +3,16 @@
|
||||||
|
|
||||||
package s2
|
package s2
|
||||||
|
|
||||||
import "github.com/klauspost/compress/internal/race"
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/klauspost/compress/internal/race"
|
||||||
|
)
|
||||||
|
|
||||||
const hasAmd64Asm = true
|
const hasAmd64Asm = true
|
||||||
|
|
||||||
|
var encPools [4]sync.Pool
|
||||||
|
|
||||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||||
// been written.
|
// been written.
|
||||||
|
@ -29,23 +35,60 @@ func encodeBlock(dst, src []byte) (d int) {
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(src) >= 4<<20 {
|
if len(src) >= 4<<20 {
|
||||||
return encodeBlockAsm(dst, src)
|
const sz, pool = 65536, 0
|
||||||
|
tmp, ok := encPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encPools[pool].Put(tmp)
|
||||||
|
return encodeBlockAsm(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) >= limit12B {
|
if len(src) >= limit12B {
|
||||||
return encodeBlockAsm4MB(dst, src)
|
const sz, pool = 65536, 0
|
||||||
|
tmp, ok := encPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encPools[pool].Put(tmp)
|
||||||
|
return encodeBlockAsm4MB(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) >= limit10B {
|
if len(src) >= limit10B {
|
||||||
return encodeBlockAsm12B(dst, src)
|
const sz, pool = 16384, 1
|
||||||
|
tmp, ok := encPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encPools[pool].Put(tmp)
|
||||||
|
return encodeBlockAsm12B(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) >= limit8B {
|
if len(src) >= limit8B {
|
||||||
return encodeBlockAsm10B(dst, src)
|
const sz, pool = 4096, 2
|
||||||
|
tmp, ok := encPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encPools[pool].Put(tmp)
|
||||||
|
return encodeBlockAsm10B(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) < minNonLiteralBlockSize {
|
if len(src) < minNonLiteralBlockSize {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return encodeBlockAsm8B(dst, src)
|
const sz, pool = 1024, 3
|
||||||
|
tmp, ok := encPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encPools[pool].Put(tmp)
|
||||||
|
return encodeBlockAsm8B(dst, src, tmp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var encBetterPools [5]sync.Pool
|
||||||
|
|
||||||
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
|
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||||
// been written.
|
// been written.
|
||||||
|
@ -68,21 +111,59 @@ func encodeBlockBetter(dst, src []byte) (d int) {
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(src) > 4<<20 {
|
if len(src) > 4<<20 {
|
||||||
return encodeBetterBlockAsm(dst, src)
|
const sz, pool = 589824, 0
|
||||||
|
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encBetterPools[pool].Put(tmp)
|
||||||
|
return encodeBetterBlockAsm(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) >= limit12B {
|
if len(src) >= limit12B {
|
||||||
return encodeBetterBlockAsm4MB(dst, src)
|
const sz, pool = 589824, 0
|
||||||
|
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encBetterPools[pool].Put(tmp)
|
||||||
|
|
||||||
|
return encodeBetterBlockAsm4MB(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) >= limit10B {
|
if len(src) >= limit10B {
|
||||||
return encodeBetterBlockAsm12B(dst, src)
|
const sz, pool = 81920, 0
|
||||||
|
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encBetterPools[pool].Put(tmp)
|
||||||
|
|
||||||
|
return encodeBetterBlockAsm12B(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) >= limit8B {
|
if len(src) >= limit8B {
|
||||||
return encodeBetterBlockAsm10B(dst, src)
|
const sz, pool = 20480, 1
|
||||||
|
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encBetterPools[pool].Put(tmp)
|
||||||
|
return encodeBetterBlockAsm10B(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) < minNonLiteralBlockSize {
|
if len(src) < minNonLiteralBlockSize {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return encodeBetterBlockAsm8B(dst, src)
|
|
||||||
|
const sz, pool = 5120, 2
|
||||||
|
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encBetterPools[pool].Put(tmp)
|
||||||
|
return encodeBetterBlockAsm8B(dst, src, tmp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
|
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
|
@ -105,22 +186,57 @@ func encodeBlockSnappy(dst, src []byte) (d int) {
|
||||||
// Use 8 bit table when less than...
|
// Use 8 bit table when less than...
|
||||||
limit8B = 512
|
limit8B = 512
|
||||||
)
|
)
|
||||||
if len(src) >= 64<<10 {
|
if len(src) > 65536 {
|
||||||
return encodeSnappyBlockAsm(dst, src)
|
const sz, pool = 65536, 0
|
||||||
|
tmp, ok := encPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encPools[pool].Put(tmp)
|
||||||
|
return encodeSnappyBlockAsm(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) >= limit12B {
|
if len(src) >= limit12B {
|
||||||
return encodeSnappyBlockAsm64K(dst, src)
|
const sz, pool = 65536, 0
|
||||||
|
tmp, ok := encPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encPools[pool].Put(tmp)
|
||||||
|
return encodeSnappyBlockAsm64K(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) >= limit10B {
|
if len(src) >= limit10B {
|
||||||
return encodeSnappyBlockAsm12B(dst, src)
|
const sz, pool = 16384, 1
|
||||||
|
tmp, ok := encPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encPools[pool].Put(tmp)
|
||||||
|
return encodeSnappyBlockAsm12B(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) >= limit8B {
|
if len(src) >= limit8B {
|
||||||
return encodeSnappyBlockAsm10B(dst, src)
|
const sz, pool = 4096, 2
|
||||||
|
tmp, ok := encPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encPools[pool].Put(tmp)
|
||||||
|
return encodeSnappyBlockAsm10B(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) < minNonLiteralBlockSize {
|
if len(src) < minNonLiteralBlockSize {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return encodeSnappyBlockAsm8B(dst, src)
|
const sz, pool = 1024, 3
|
||||||
|
tmp, ok := encPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encPools[pool].Put(tmp)
|
||||||
|
return encodeSnappyBlockAsm8B(dst, src, tmp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
|
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
|
@ -143,20 +259,59 @@ func encodeBlockBetterSnappy(dst, src []byte) (d int) {
|
||||||
// Use 8 bit table when less than...
|
// Use 8 bit table when less than...
|
||||||
limit8B = 512
|
limit8B = 512
|
||||||
)
|
)
|
||||||
if len(src) >= 64<<10 {
|
if len(src) > 65536 {
|
||||||
return encodeSnappyBetterBlockAsm(dst, src)
|
const sz, pool = 589824, 0
|
||||||
|
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encBetterPools[pool].Put(tmp)
|
||||||
|
return encodeSnappyBetterBlockAsm(dst, src, tmp)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(src) >= limit12B {
|
if len(src) >= limit12B {
|
||||||
return encodeSnappyBetterBlockAsm64K(dst, src)
|
const sz, pool = 294912, 4
|
||||||
|
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encBetterPools[pool].Put(tmp)
|
||||||
|
|
||||||
|
return encodeSnappyBetterBlockAsm64K(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) >= limit10B {
|
if len(src) >= limit10B {
|
||||||
return encodeSnappyBetterBlockAsm12B(dst, src)
|
const sz, pool = 81920, 0
|
||||||
|
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encBetterPools[pool].Put(tmp)
|
||||||
|
|
||||||
|
return encodeSnappyBetterBlockAsm12B(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) >= limit8B {
|
if len(src) >= limit8B {
|
||||||
return encodeSnappyBetterBlockAsm10B(dst, src)
|
const sz, pool = 20480, 1
|
||||||
|
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encBetterPools[pool].Put(tmp)
|
||||||
|
return encodeSnappyBetterBlockAsm10B(dst, src, tmp)
|
||||||
}
|
}
|
||||||
if len(src) < minNonLiteralBlockSize {
|
if len(src) < minNonLiteralBlockSize {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return encodeSnappyBetterBlockAsm8B(dst, src)
|
|
||||||
|
const sz, pool = 5120, 2
|
||||||
|
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
|
||||||
|
if !ok {
|
||||||
|
tmp = &[sz]byte{}
|
||||||
|
}
|
||||||
|
race.WriteSlice(tmp[:])
|
||||||
|
defer encBetterPools[pool].Put(tmp)
|
||||||
|
return encodeSnappyBetterBlockAsm8B(dst, src, tmp)
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/klauspost/compress/s2/encode_go.go
generated
vendored
4
vendor/github.com/klauspost/compress/s2/encode_go.go
generated
vendored
|
@ -317,7 +317,7 @@ func matchLen(a []byte, b []byte) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// input must be > inputMargin
|
// input must be > inputMargin
|
||||||
func calcBlockSize(src []byte) (d int) {
|
func calcBlockSize(src []byte, _ *[32768]byte) (d int) {
|
||||||
// Initialize the hash table.
|
// Initialize the hash table.
|
||||||
const (
|
const (
|
||||||
tableBits = 13
|
tableBits = 13
|
||||||
|
@ -503,7 +503,7 @@ func calcBlockSize(src []byte) (d int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// length must be > inputMargin.
|
// length must be > inputMargin.
|
||||||
func calcBlockSizeSmall(src []byte) (d int) {
|
func calcBlockSizeSmall(src []byte, _ *[2048]byte) (d int) {
|
||||||
// Initialize the hash table.
|
// Initialize the hash table.
|
||||||
const (
|
const (
|
||||||
tableBits = 9
|
tableBits = 9
|
||||||
|
|
44
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
generated
vendored
44
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
generated
vendored
|
@ -11,154 +11,154 @@ func _dummy_()
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeBlockAsm(dst []byte, src []byte) int
|
func encodeBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int
|
||||||
|
|
||||||
// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 4194304 bytes.
|
// Maximum input 4194304 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeBlockAsm4MB(dst []byte, src []byte) int
|
func encodeBlockAsm4MB(dst []byte, src []byte, tmp *[65536]byte) int
|
||||||
|
|
||||||
// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 16383 bytes.
|
// Maximum input 16383 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeBlockAsm12B(dst []byte, src []byte) int
|
func encodeBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int
|
||||||
|
|
||||||
// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 4095 bytes.
|
// Maximum input 4095 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeBlockAsm10B(dst []byte, src []byte) int
|
func encodeBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int
|
||||||
|
|
||||||
// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 511 bytes.
|
// Maximum input 511 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeBlockAsm8B(dst []byte, src []byte) int
|
func encodeBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int
|
||||||
|
|
||||||
// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 4294967295 bytes.
|
// Maximum input 4294967295 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeBetterBlockAsm(dst []byte, src []byte) int
|
func encodeBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int
|
||||||
|
|
||||||
// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 4194304 bytes.
|
// Maximum input 4194304 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
|
func encodeBetterBlockAsm4MB(dst []byte, src []byte, tmp *[589824]byte) int
|
||||||
|
|
||||||
// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 16383 bytes.
|
// Maximum input 16383 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeBetterBlockAsm12B(dst []byte, src []byte) int
|
func encodeBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int
|
||||||
|
|
||||||
// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 4095 bytes.
|
// Maximum input 4095 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeBetterBlockAsm10B(dst []byte, src []byte) int
|
func encodeBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int
|
||||||
|
|
||||||
// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 511 bytes.
|
// Maximum input 511 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeBetterBlockAsm8B(dst []byte, src []byte) int
|
func encodeBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int
|
||||||
|
|
||||||
// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 4294967295 bytes.
|
// Maximum input 4294967295 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeSnappyBlockAsm(dst []byte, src []byte) int
|
func encodeSnappyBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int
|
||||||
|
|
||||||
// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 65535 bytes.
|
// Maximum input 65535 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
|
func encodeSnappyBlockAsm64K(dst []byte, src []byte, tmp *[65536]byte) int
|
||||||
|
|
||||||
// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 16383 bytes.
|
// Maximum input 16383 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
|
func encodeSnappyBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int
|
||||||
|
|
||||||
// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 4095 bytes.
|
// Maximum input 4095 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
|
func encodeSnappyBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int
|
||||||
|
|
||||||
// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 511 bytes.
|
// Maximum input 511 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
|
func encodeSnappyBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int
|
||||||
|
|
||||||
// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 4294967295 bytes.
|
// Maximum input 4294967295 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
|
func encodeSnappyBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int
|
||||||
|
|
||||||
// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 65535 bytes.
|
// Maximum input 65535 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
|
func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte, tmp *[294912]byte) int
|
||||||
|
|
||||||
// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 16383 bytes.
|
// Maximum input 16383 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
|
func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int
|
||||||
|
|
||||||
// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 4095 bytes.
|
// Maximum input 4095 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
|
func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int
|
||||||
|
|
||||||
// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
|
// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 511 bytes.
|
// Maximum input 511 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
|
func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int
|
||||||
|
|
||||||
// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst.
|
// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 4294967295 bytes.
|
// Maximum input 4294967295 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func calcBlockSize(src []byte) int
|
func calcBlockSize(src []byte, tmp *[32768]byte) int
|
||||||
|
|
||||||
// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst.
|
// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
// Maximum input 1024 bytes.
|
// Maximum input 1024 bytes.
|
||||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func calcBlockSizeSmall(src []byte) int
|
func calcBlockSizeSmall(src []byte, tmp *[2048]byte) int
|
||||||
|
|
||||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||||
//
|
//
|
||||||
|
|
25898
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
generated
vendored
25898
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
generated
vendored
File diff suppressed because it is too large
Load diff
31
vendor/github.com/klauspost/compress/s2/writer.go
generated
vendored
31
vendor/github.com/klauspost/compress/s2/writer.go
generated
vendored
|
@ -83,11 +83,14 @@ type Writer struct {
|
||||||
snappy bool
|
snappy bool
|
||||||
flushOnWrite bool
|
flushOnWrite bool
|
||||||
appendIndex bool
|
appendIndex bool
|
||||||
|
bufferCB func([]byte)
|
||||||
level uint8
|
level uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
type result struct {
|
type result struct {
|
||||||
b []byte
|
b []byte
|
||||||
|
// return when writing
|
||||||
|
ret []byte
|
||||||
// Uncompressed start offset
|
// Uncompressed start offset
|
||||||
startOffset int64
|
startOffset int64
|
||||||
}
|
}
|
||||||
|
@ -146,6 +149,10 @@ func (w *Writer) Reset(writer io.Writer) {
|
||||||
for write := range toWrite {
|
for write := range toWrite {
|
||||||
// Wait for the data to be available.
|
// Wait for the data to be available.
|
||||||
input := <-write
|
input := <-write
|
||||||
|
if input.ret != nil && w.bufferCB != nil {
|
||||||
|
w.bufferCB(input.ret)
|
||||||
|
input.ret = nil
|
||||||
|
}
|
||||||
in := input.b
|
in := input.b
|
||||||
if len(in) > 0 {
|
if len(in) > 0 {
|
||||||
if w.err(nil) == nil {
|
if w.err(nil) == nil {
|
||||||
|
@ -341,7 +348,8 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) {
|
||||||
// but the input buffer cannot be written to by the caller
|
// but the input buffer cannot be written to by the caller
|
||||||
// until Flush or Close has been called when concurrency != 1.
|
// until Flush or Close has been called when concurrency != 1.
|
||||||
//
|
//
|
||||||
// If you cannot control that, use the regular Write function.
|
// Use the WriterBufferDone to receive a callback when the buffer is done
|
||||||
|
// Processing.
|
||||||
//
|
//
|
||||||
// Note that input is not buffered.
|
// Note that input is not buffered.
|
||||||
// This means that each write will result in discrete blocks being created.
|
// This means that each write will result in discrete blocks being created.
|
||||||
|
@ -364,6 +372,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
|
||||||
}
|
}
|
||||||
if w.concurrency == 1 {
|
if w.concurrency == 1 {
|
||||||
_, err := w.writeSync(buf)
|
_, err := w.writeSync(buf)
|
||||||
|
if w.bufferCB != nil {
|
||||||
|
w.bufferCB(buf)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -378,7 +389,7 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
|
||||||
hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes}
|
hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
orgBuf := buf
|
||||||
for len(buf) > 0 {
|
for len(buf) > 0 {
|
||||||
// Cut input.
|
// Cut input.
|
||||||
uncompressed := buf
|
uncompressed := buf
|
||||||
|
@ -397,6 +408,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
|
||||||
startOffset: w.uncompWritten,
|
startOffset: w.uncompWritten,
|
||||||
}
|
}
|
||||||
w.uncompWritten += int64(len(uncompressed))
|
w.uncompWritten += int64(len(uncompressed))
|
||||||
|
if len(buf) == 0 && w.bufferCB != nil {
|
||||||
|
res.ret = orgBuf
|
||||||
|
}
|
||||||
go func() {
|
go func() {
|
||||||
race.ReadSlice(uncompressed)
|
race.ReadSlice(uncompressed)
|
||||||
|
|
||||||
|
@ -922,7 +936,7 @@ func WriterBetterCompression() WriterOption {
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriterBestCompression will enable better compression.
|
// WriterBestCompression will enable better compression.
|
||||||
// EncodeBetter compresses better than Encode but typically with a
|
// EncodeBest compresses better than Encode but typically with a
|
||||||
// big speed decrease on compression.
|
// big speed decrease on compression.
|
||||||
func WriterBestCompression() WriterOption {
|
func WriterBestCompression() WriterOption {
|
||||||
return func(w *Writer) error {
|
return func(w *Writer) error {
|
||||||
|
@ -941,6 +955,17 @@ func WriterUncompressed() WriterOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WriterBufferDone will perform a callback when EncodeBuffer has finished
|
||||||
|
// writing a buffer to the output and the buffer can safely be reused.
|
||||||
|
// If the buffer was split into several blocks, it will be sent after the last block.
|
||||||
|
// Callbacks will not be done concurrently.
|
||||||
|
func WriterBufferDone(fn func(b []byte)) WriterOption {
|
||||||
|
return func(w *Writer) error {
|
||||||
|
w.bufferCB = fn
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WriterBlockSize allows to override the default block size.
|
// WriterBlockSize allows to override the default block size.
|
||||||
// Blocks will be this size or smaller.
|
// Blocks will be this size or smaller.
|
||||||
// Minimum size is 4KB and maximum size is 4MB.
|
// Minimum size is 4KB and maximum size is 4MB.
|
||||||
|
|
4
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
|
@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
||||||
printf("RLE set to 0x%x, code: %v", symb, v)
|
printf("RLE set to 0x%x, code: %v", symb, v)
|
||||||
}
|
}
|
||||||
case compModeFSE:
|
case compModeFSE:
|
||||||
println("Reading table for", tableIndex(i))
|
if debugDecoder {
|
||||||
|
println("Reading table for", tableIndex(i))
|
||||||
|
}
|
||||||
if seq.fse == nil || seq.fse.preDefined {
|
if seq.fse == nil || seq.fse.preDefined {
|
||||||
seq.fse = fseDecoderPool.Get().(*fseDecoder)
|
seq.fse = fseDecoderPool.Get().(*fseDecoder)
|
||||||
}
|
}
|
||||||
|
|
32
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
32
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
|
@ -179,9 +179,9 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||||
|
|
||||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
// Extend as long as we can.
|
// Extend as long as we can.
|
||||||
|
@ -210,12 +210,12 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||||
|
|
||||||
// Index match start+1 (long) -> s - 1
|
// Index match start+1 (long) -> s - 1
|
||||||
index0 := s + repOff
|
index0 := s + repOff
|
||||||
s += lenght + repOff
|
s += length + repOff
|
||||||
|
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, lenght)
|
println("repeat ended", s, length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
|
@ -241,9 +241,9 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||||
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
|
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
||||||
|
|
||||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
// Extend as long as we can.
|
// Extend as long as we can.
|
||||||
|
@ -270,11 +270,11 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||||
}
|
}
|
||||||
blk.sequences = append(blk.sequences, seq)
|
blk.sequences = append(blk.sequences, seq)
|
||||||
|
|
||||||
s += lenght + repOff2
|
s += length + repOff2
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, lenght)
|
println("repeat ended", s, length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
|
@ -708,9 +708,9 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
||||||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||||
|
|
||||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
// Extend as long as we can.
|
// Extend as long as we can.
|
||||||
|
@ -738,12 +738,12 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
||||||
blk.sequences = append(blk.sequences, seq)
|
blk.sequences = append(blk.sequences, seq)
|
||||||
|
|
||||||
// Index match start+1 (long) -> s - 1
|
// Index match start+1 (long) -> s - 1
|
||||||
s += lenght + repOff
|
s += length + repOff
|
||||||
|
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, lenght)
|
println("repeat ended", s, length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
|
@ -772,9 +772,9 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
||||||
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
|
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
||||||
|
|
||||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
// Extend as long as we can.
|
// Extend as long as we can.
|
||||||
|
@ -801,11 +801,11 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
||||||
}
|
}
|
||||||
blk.sequences = append(blk.sequences, seq)
|
blk.sequences = append(blk.sequences, seq)
|
||||||
|
|
||||||
s += lenght + repOff2
|
s += length + repOff2
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, lenght)
|
println("repeat ended", s, length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
|
|
16
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
16
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
|
@ -138,9 +138,9 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||||
|
|
||||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
// Extend as long as we can.
|
// Extend as long as we can.
|
||||||
|
@ -166,11 +166,11 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||||
println("repeat sequence", seq, "next s:", s)
|
println("repeat sequence", seq, "next s:", s)
|
||||||
}
|
}
|
||||||
blk.sequences = append(blk.sequences, seq)
|
blk.sequences = append(blk.sequences, seq)
|
||||||
s += lenght + repOff
|
s += length + repOff
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, lenght)
|
println("repeat ended", s, length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
|
@ -798,9 +798,9 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
||||||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||||
|
|
||||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
// Extend as long as we can.
|
// Extend as long as we can.
|
||||||
|
@ -826,11 +826,11 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
||||||
println("repeat sequence", seq, "next s:", s)
|
println("repeat sequence", seq, "next s:", s)
|
||||||
}
|
}
|
||||||
blk.sequences = append(blk.sequences, seq)
|
blk.sequences = append(blk.sequences, seq)
|
||||||
s += lenght + repOff
|
s += length + repOff
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, lenght)
|
println("repeat ended", s, length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
|
|
45
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
45
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
|
||||||
// and write CRC if requested.
|
// and write CRC if requested.
|
||||||
func (e *Encoder) Write(p []byte) (n int, err error) {
|
func (e *Encoder) Write(p []byte) (n int, err error) {
|
||||||
s := &e.state
|
s := &e.state
|
||||||
|
if s.eofWritten {
|
||||||
|
return 0, ErrEncoderClosed
|
||||||
|
}
|
||||||
for len(p) > 0 {
|
for len(p) > 0 {
|
||||||
if len(p)+len(s.filling) < e.o.blockSize {
|
if len(p)+len(s.filling) < e.o.blockSize {
|
||||||
if e.o.crc {
|
if e.o.crc {
|
||||||
|
@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if final && len(s.filling) > 0 {
|
if final && len(s.filling) > 0 {
|
||||||
s.current = e.EncodeAll(s.filling, s.current[:0])
|
s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
|
||||||
var n2 int
|
var n2 int
|
||||||
n2, s.err = s.w.Write(s.current)
|
n2, s.err = s.w.Write(s.current)
|
||||||
if s.err != nil {
|
if s.err != nil {
|
||||||
|
@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error {
|
||||||
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
|
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
|
||||||
s.nInput += int64(len(s.current))
|
s.nInput += int64(len(s.current))
|
||||||
s.wg.Add(1)
|
s.wg.Add(1)
|
||||||
|
if final {
|
||||||
|
s.eofWritten = true
|
||||||
|
}
|
||||||
go func(src []byte) {
|
go func(src []byte) {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("Adding block,", len(src), "bytes, final:", final)
|
println("Adding block,", len(src), "bytes, final:", final)
|
||||||
|
@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error {
|
||||||
blk := enc.Block()
|
blk := enc.Block()
|
||||||
enc.Encode(blk, src)
|
enc.Encode(blk, src)
|
||||||
blk.last = final
|
blk.last = final
|
||||||
if final {
|
|
||||||
s.eofWritten = true
|
|
||||||
}
|
|
||||||
// Wait for pending writes.
|
// Wait for pending writes.
|
||||||
s.wWg.Wait()
|
s.wWg.Wait()
|
||||||
if s.writeErr != nil {
|
if s.writeErr != nil {
|
||||||
|
@ -401,12 +405,20 @@ func (e *Encoder) Flush() error {
|
||||||
if len(s.filling) > 0 {
|
if len(s.filling) > 0 {
|
||||||
err := e.nextBlock(false)
|
err := e.nextBlock(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Ignore Flush after Close.
|
||||||
|
if errors.Is(s.err, ErrEncoderClosed) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
s.wWg.Wait()
|
s.wWg.Wait()
|
||||||
if s.err != nil {
|
if s.err != nil {
|
||||||
|
// Ignore Flush after Close.
|
||||||
|
if errors.Is(s.err, ErrEncoderClosed) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return s.err
|
return s.err
|
||||||
}
|
}
|
||||||
return s.writeErr
|
return s.writeErr
|
||||||
|
@ -422,6 +434,9 @@ func (e *Encoder) Close() error {
|
||||||
}
|
}
|
||||||
err := e.nextBlock(true)
|
err := e.nextBlock(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(s.err, ErrEncoderClosed) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if s.frameContentSize > 0 {
|
if s.frameContentSize > 0 {
|
||||||
|
@ -459,6 +474,11 @@ func (e *Encoder) Close() error {
|
||||||
}
|
}
|
||||||
_, s.err = s.w.Write(frame)
|
_, s.err = s.w.Write(frame)
|
||||||
}
|
}
|
||||||
|
if s.err == nil {
|
||||||
|
s.err = ErrEncoderClosed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return s.err
|
return s.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -469,6 +489,15 @@ func (e *Encoder) Close() error {
|
||||||
// Data compressed with EncodeAll can be decoded with the Decoder,
|
// Data compressed with EncodeAll can be decoded with the Decoder,
|
||||||
// using either a stream or DecodeAll.
|
// using either a stream or DecodeAll.
|
||||||
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
||||||
|
e.init.Do(e.initialize)
|
||||||
|
enc := <-e.encoders
|
||||||
|
defer func() {
|
||||||
|
e.encoders <- enc
|
||||||
|
}()
|
||||||
|
return e.encodeAll(enc, src, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
|
||||||
if len(src) == 0 {
|
if len(src) == 0 {
|
||||||
if e.o.fullZero {
|
if e.o.fullZero {
|
||||||
// Add frame header.
|
// Add frame header.
|
||||||
|
@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
||||||
}
|
}
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
e.init.Do(e.initialize)
|
|
||||||
enc := <-e.encoders
|
|
||||||
defer func() {
|
|
||||||
// Release encoder reference to last block.
|
|
||||||
// If a non-single block is needed the encoder will reset again.
|
|
||||||
e.encoders <- enc
|
|
||||||
}()
|
|
||||||
// Use single segments when above minimum window and below window size.
|
// Use single segments when above minimum window and below window size.
|
||||||
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
|
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
|
||||||
if e.o.single != nil {
|
if e.o.single != nil {
|
||||||
|
|
4
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
|
@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
|
if debugDecoder {
|
||||||
|
printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
|
||||||
|
}
|
||||||
windowLog := 10 + (wd >> 3)
|
windowLog := 10 + (wd >> 3)
|
||||||
windowBase := uint64(1) << windowLog
|
windowBase := uint64(1) << windowLog
|
||||||
windowAdd := (windowBase / 8) * uint64(wd&0x7)
|
windowAdd := (windowBase / 8) * uint64(wd&0x7)
|
||||||
|
|
4
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
|
@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
||||||
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
|
return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.seqSize += ctx.litRemain
|
s.seqSize += ctx.litRemain
|
||||||
|
@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
|
return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.litRemain < 0 {
|
if ctx.litRemain < 0 {
|
||||||
|
|
8
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
8
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
|
@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
|
||||||
MOVQ 40(SP), AX
|
MOVQ 40(SP), AX
|
||||||
ADDQ AX, 48(SP)
|
ADDQ AX, 48(SP)
|
||||||
|
|
||||||
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
|
// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
|
||||||
ADDQ R10, 32(SP)
|
ADDQ R10, 32(SP)
|
||||||
|
|
||||||
// outBase += outPosition
|
// outBase += outPosition
|
||||||
|
@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
|
||||||
MOVQ 40(SP), CX
|
MOVQ 40(SP), CX
|
||||||
ADDQ CX, 48(SP)
|
ADDQ CX, 48(SP)
|
||||||
|
|
||||||
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
|
// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
|
||||||
ADDQ R9, 32(SP)
|
ADDQ R9, 32(SP)
|
||||||
|
|
||||||
// outBase += outPosition
|
// outBase += outPosition
|
||||||
|
@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
|
||||||
MOVQ 40(SP), AX
|
MOVQ 40(SP), AX
|
||||||
ADDQ AX, 48(SP)
|
ADDQ AX, 48(SP)
|
||||||
|
|
||||||
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
|
// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
|
||||||
ADDQ R10, 32(SP)
|
ADDQ R10, 32(SP)
|
||||||
|
|
||||||
// outBase += outPosition
|
// outBase += outPosition
|
||||||
|
@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
|
||||||
MOVQ 40(SP), CX
|
MOVQ 40(SP), CX
|
||||||
ADDQ CX, 48(SP)
|
ADDQ CX, 48(SP)
|
||||||
|
|
||||||
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
|
// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
|
||||||
ADDQ R9, 32(SP)
|
ADDQ R9, 32(SP)
|
||||||
|
|
||||||
// outBase += outPosition
|
// outBase += outPosition
|
||||||
|
|
4
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
|
@ -88,6 +88,10 @@
|
||||||
// Close has been called.
|
// Close has been called.
|
||||||
ErrDecoderClosed = errors.New("decoder used after Close")
|
ErrDecoderClosed = errors.New("decoder used after Close")
|
||||||
|
|
||||||
|
// ErrEncoderClosed will be returned if the Encoder was used after
|
||||||
|
// Close has been called.
|
||||||
|
ErrEncoderClosed = errors.New("encoder used after Close")
|
||||||
|
|
||||||
// ErrDecoderNilInput is returned when a nil Reader was provided
|
// ErrDecoderNilInput is returned when a nil Reader was provided
|
||||||
// and an operation other than Reset/DecodeAll/Close was attempted.
|
// and an operation other than Reset/DecodeAll/Close was attempted.
|
||||||
ErrDecoderNilInput = errors.New("nil input provided as reader")
|
ErrDecoderNilInput = errors.New("nil input provided as reader")
|
||||||
|
|
4
vendor/github.com/minio/minio-go/v7/Makefile
generated
vendored
4
vendor/github.com/minio/minio-go/v7/Makefile
generated
vendored
|
@ -32,6 +32,10 @@ functional-test:
|
||||||
@GO111MODULE=on go build -race functional_tests.go
|
@GO111MODULE=on go build -race functional_tests.go
|
||||||
@SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests
|
@SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests
|
||||||
|
|
||||||
|
functional-test-notls:
|
||||||
|
@GO111MODULE=on go build -race functional_tests.go
|
||||||
|
@SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=0 MINT_MODE=full ./functional_tests
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
@echo "Cleaning up all the generated files"
|
@echo "Cleaning up all the generated files"
|
||||||
@find . -name '*.test' | xargs rm -fv
|
@find . -name '*.test' | xargs rm -fv
|
||||||
|
|
2
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
|
@ -45,6 +45,8 @@
|
||||||
ReplicationStatusFailed ReplicationStatus = "FAILED"
|
ReplicationStatusFailed ReplicationStatus = "FAILED"
|
||||||
// ReplicationStatusReplica indicates object is a replica of a source
|
// ReplicationStatusReplica indicates object is a replica of a source
|
||||||
ReplicationStatusReplica ReplicationStatus = "REPLICA"
|
ReplicationStatusReplica ReplicationStatus = "REPLICA"
|
||||||
|
// ReplicationStatusReplicaEdge indicates object is a replica of a edge source
|
||||||
|
ReplicationStatusReplicaEdge ReplicationStatus = "REPLICA-EDGE"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Empty returns true if no replication status set.
|
// Empty returns true if no replication status set.
|
||||||
|
|
2
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
|
@ -128,7 +128,7 @@ type Options struct {
|
||||||
// Global constants.
|
// Global constants.
|
||||||
const (
|
const (
|
||||||
libraryName = "minio-go"
|
libraryName = "minio-go"
|
||||||
libraryVersion = "v7.0.77"
|
libraryVersion = "v7.0.78"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User Agent should always following the below style.
|
// User Agent should always following the below style.
|
||||||
|
|
14
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
14
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
|
@ -3565,16 +3565,10 @@ func validateObjectAttributeRequest(OA *minio.ObjectAttributes, opts *minio.Obje
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hasFullObjectChecksum := true
|
hasFullObjectChecksum := (OA.Checksum.ChecksumCRC32 != "" ||
|
||||||
if OA.Checksum.ChecksumCRC32 == "" {
|
OA.Checksum.ChecksumCRC32C != "" ||
|
||||||
if OA.Checksum.ChecksumCRC32C == "" {
|
OA.Checksum.ChecksumSHA1 != "" ||
|
||||||
if OA.Checksum.ChecksumSHA1 == "" {
|
OA.Checksum.ChecksumSHA256 != "")
|
||||||
if OA.Checksum.ChecksumSHA256 == "" {
|
|
||||||
hasFullObjectChecksum = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if test.HasFullChecksum {
|
if test.HasFullChecksum {
|
||||||
if !hasFullObjectChecksum {
|
if !hasFullObjectChecksum {
|
||||||
|
|
44
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
44
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
|
@ -25,6 +25,7 @@
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -85,29 +86,59 @@ type STSWebIdentity struct {
|
||||||
// assuming.
|
// assuming.
|
||||||
RoleARN string
|
RoleARN string
|
||||||
|
|
||||||
|
// Policy is the policy where the credentials should be limited too.
|
||||||
|
Policy string
|
||||||
|
|
||||||
// roleSessionName is the identifier for the assumed role session.
|
// roleSessionName is the identifier for the assumed role session.
|
||||||
roleSessionName string
|
roleSessionName string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSTSWebIdentity returns a pointer to a new
|
// NewSTSWebIdentity returns a pointer to a new
|
||||||
// Credentials object wrapping the STSWebIdentity.
|
// Credentials object wrapping the STSWebIdentity.
|
||||||
func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) {
|
func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error), opts ...func(*STSWebIdentity)) (*Credentials, error) {
|
||||||
if stsEndpoint == "" {
|
if stsEndpoint == "" {
|
||||||
return nil, errors.New("STS endpoint cannot be empty")
|
return nil, errors.New("STS endpoint cannot be empty")
|
||||||
}
|
}
|
||||||
if getWebIDTokenExpiry == nil {
|
if getWebIDTokenExpiry == nil {
|
||||||
return nil, errors.New("Web ID token and expiry retrieval function should be defined")
|
return nil, errors.New("Web ID token and expiry retrieval function should be defined")
|
||||||
}
|
}
|
||||||
return New(&STSWebIdentity{
|
i := &STSWebIdentity{
|
||||||
Client: &http.Client{
|
Client: &http.Client{
|
||||||
Transport: http.DefaultTransport,
|
Transport: http.DefaultTransport,
|
||||||
},
|
},
|
||||||
STSEndpoint: stsEndpoint,
|
STSEndpoint: stsEndpoint,
|
||||||
GetWebIDTokenExpiry: getWebIDTokenExpiry,
|
GetWebIDTokenExpiry: getWebIDTokenExpiry,
|
||||||
}), nil
|
}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(i)
|
||||||
|
}
|
||||||
|
return New(i), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string,
|
// NewKubernetesIdentity returns a pointer to a new
|
||||||
|
// Credentials object using the Kubernetes service account
|
||||||
|
func NewKubernetesIdentity(stsEndpoint string, opts ...func(*STSWebIdentity)) (*Credentials, error) {
|
||||||
|
return NewSTSWebIdentity(stsEndpoint, func() (*WebIdentityToken, error) {
|
||||||
|
token, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &WebIdentityToken{
|
||||||
|
Token: string(token),
|
||||||
|
}, nil
|
||||||
|
}, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPolicy option will enforce that the returned credentials
|
||||||
|
// will be scoped down to the specified policy
|
||||||
|
func WithPolicy(policy string) func(*STSWebIdentity) {
|
||||||
|
return func(i *STSWebIdentity) {
|
||||||
|
i.Policy = policy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, policy string,
|
||||||
getWebIDTokenExpiry func() (*WebIdentityToken, error),
|
getWebIDTokenExpiry func() (*WebIdentityToken, error),
|
||||||
) (AssumeRoleWithWebIdentityResponse, error) {
|
) (AssumeRoleWithWebIdentityResponse, error) {
|
||||||
idToken, err := getWebIDTokenExpiry()
|
idToken, err := getWebIDTokenExpiry()
|
||||||
|
@ -133,6 +164,9 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
|
||||||
if idToken.Expiry > 0 {
|
if idToken.Expiry > 0 {
|
||||||
v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
|
v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
|
||||||
}
|
}
|
||||||
|
if policy != "" {
|
||||||
|
v.Set("Policy", policy)
|
||||||
|
}
|
||||||
v.Set("Version", STSVersion)
|
v.Set("Version", STSVersion)
|
||||||
|
|
||||||
u, err := url.Parse(endpoint)
|
u, err := url.Parse(endpoint)
|
||||||
|
@ -183,7 +217,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
|
||||||
// Retrieve retrieves credentials from the MinIO service.
|
// Retrieve retrieves credentials from the MinIO service.
|
||||||
// Error will be returned if the request fails.
|
// Error will be returned if the request fails.
|
||||||
func (m *STSWebIdentity) Retrieve() (Value, error) {
|
func (m *STSWebIdentity) Retrieve() (Value, error) {
|
||||||
a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry)
|
a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Value{}, err
|
return Value{}, err
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
generated
vendored
|
@ -69,7 +69,7 @@ func (err errTag) Error() string {
|
||||||
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
|
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
|
||||||
// borrowed from this article and also testing various ASCII characters following regex
|
// borrowed from this article and also testing various ASCII characters following regex
|
||||||
// is supported by AWS S3 for both tags and values.
|
// is supported by AWS S3 for both tags and values.
|
||||||
var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`)
|
var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ =]+$`)
|
||||||
|
|
||||||
func checkKey(key string) error {
|
func checkKey(key string) error {
|
||||||
if len(key) == 0 {
|
if len(key) == 0 {
|
||||||
|
|
10
vendor/github.com/yuin/goldmark/ast/block.go
generated
vendored
10
vendor/github.com/yuin/goldmark/ast/block.go
generated
vendored
|
@ -1,6 +1,7 @@
|
||||||
package ast
|
package ast
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -47,6 +48,15 @@ func (b *BaseBlock) SetLines(v *textm.Segments) {
|
||||||
b.lines = v
|
b.lines = v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Text implements Node.Text.
|
||||||
|
func (b *BaseBlock) Text(source []byte) []byte {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for _, line := range b.Lines().Sliced(0, b.Lines().Len()) {
|
||||||
|
buf.Write(line.Value(source))
|
||||||
|
}
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
// A Document struct is a root node of Markdown text.
|
// A Document struct is a root node of Markdown text.
|
||||||
type Document struct {
|
type Document struct {
|
||||||
BaseBlock
|
BaseBlock
|
||||||
|
|
4
vendor/github.com/yuin/goldmark/extension/table.go
generated
vendored
4
vendor/github.com/yuin/goldmark/extension/table.go
generated
vendored
|
@ -184,11 +184,11 @@ func (b *tableParagraphTransformer) Transform(node *gast.Paragraph, reader text.
|
||||||
func (b *tableParagraphTransformer) parseRow(segment text.Segment,
|
func (b *tableParagraphTransformer) parseRow(segment text.Segment,
|
||||||
alignments []ast.Alignment, isHeader bool, reader text.Reader, pc parser.Context) *ast.TableRow {
|
alignments []ast.Alignment, isHeader bool, reader text.Reader, pc parser.Context) *ast.TableRow {
|
||||||
source := reader.Source()
|
source := reader.Source()
|
||||||
|
segment = segment.TrimLeftSpace(source)
|
||||||
|
segment = segment.TrimRightSpace(source)
|
||||||
line := segment.Value(source)
|
line := segment.Value(source)
|
||||||
pos := 0
|
pos := 0
|
||||||
pos += util.TrimLeftSpaceLength(line)
|
|
||||||
limit := len(line)
|
limit := len(line)
|
||||||
limit -= util.TrimRightSpaceLength(line)
|
|
||||||
row := ast.NewTableRow(alignments)
|
row := ast.NewTableRow(alignments)
|
||||||
if len(line) > 0 && line[pos] == '|' {
|
if len(line) > 0 && line[pos] == '|' {
|
||||||
pos++
|
pos++
|
||||||
|
|
11
vendor/github.com/yuin/goldmark/parser/blockquote.go
generated
vendored
11
vendor/github.com/yuin/goldmark/parser/blockquote.go
generated
vendored
|
@ -28,12 +28,13 @@ func (b *blockquoteParser) process(reader text.Reader) bool {
|
||||||
reader.Advance(pos)
|
reader.Advance(pos)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if line[pos] == ' ' || line[pos] == '\t' {
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
reader.Advance(pos)
|
reader.Advance(pos)
|
||||||
if line[pos-1] == '\t' {
|
if line[pos] == ' ' || line[pos] == '\t' {
|
||||||
reader.SetPadding(2)
|
padding := 0
|
||||||
|
if line[pos] == '\t' {
|
||||||
|
padding = util.TabWidth(reader.LineOffset()) - 1
|
||||||
|
}
|
||||||
|
reader.AdvanceAndSetPadding(1, padding)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/yuin/goldmark/parser/list_item.go
generated
vendored
2
vendor/github.com/yuin/goldmark/parser/list_item.go
generated
vendored
|
@ -58,7 +58,7 @@ func (b *listItemParser) Continue(node ast.Node, reader text.Reader, pc Context)
|
||||||
}
|
}
|
||||||
|
|
||||||
offset := lastOffset(node.Parent())
|
offset := lastOffset(node.Parent())
|
||||||
isEmpty := node.ChildCount() == 0
|
isEmpty := node.ChildCount() == 0 && pc.Get(emptyListItemWithBlankLines) != nil
|
||||||
indent, _ := util.IndentWidth(line, reader.LineOffset())
|
indent, _ := util.IndentWidth(line, reader.LineOffset())
|
||||||
if (isEmpty || indent < offset) && indent < 4 {
|
if (isEmpty || indent < offset) && indent < 4 {
|
||||||
_, typ := matchesListItem(line, true)
|
_, typ := matchesListItem(line, true)
|
||||||
|
|
8
vendor/github.com/yuin/goldmark/parser/parser.go
generated
vendored
8
vendor/github.com/yuin/goldmark/parser/parser.go
generated
vendored
|
@ -878,10 +878,17 @@ func (p *parser) Parse(reader text.Reader, opts ...ParseOption) ast.Node {
|
||||||
blockReader := text.NewBlockReader(reader.Source(), nil)
|
blockReader := text.NewBlockReader(reader.Source(), nil)
|
||||||
p.walkBlock(root, func(node ast.Node) {
|
p.walkBlock(root, func(node ast.Node) {
|
||||||
p.parseBlock(blockReader, node, pc)
|
p.parseBlock(blockReader, node, pc)
|
||||||
|
lines := node.Lines()
|
||||||
|
if lines != nil && lines.Len() != 0 {
|
||||||
|
s := lines.At(lines.Len() - 1)
|
||||||
|
s.EOB = true
|
||||||
|
lines.Set(lines.Len()-1, s)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
for _, at := range p.astTransformers {
|
for _, at := range p.astTransformers {
|
||||||
at.Transform(root, reader, pc)
|
at.Transform(root, reader, pc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// root.Dump(reader.Source(), 0)
|
// root.Dump(reader.Source(), 0)
|
||||||
return root
|
return root
|
||||||
}
|
}
|
||||||
|
@ -1256,4 +1263,5 @@ func (p *parser) parseBlock(block text.BlockReader, parent ast.Node, pc Context)
|
||||||
for _, ip := range p.closeBlockers {
|
for _, ip := range p.closeBlockers {
|
||||||
ip.CloseBlock(parent, block, pc)
|
ip.CloseBlock(parent, block, pc)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
18
vendor/github.com/yuin/goldmark/text/segment.go
generated
vendored
18
vendor/github.com/yuin/goldmark/text/segment.go
generated
vendored
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
|
||||||
"github.com/yuin/goldmark/util"
|
"github.com/yuin/goldmark/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -18,6 +19,9 @@ type Segment struct {
|
||||||
|
|
||||||
// Padding is a padding length of the segment.
|
// Padding is a padding length of the segment.
|
||||||
Padding int
|
Padding int
|
||||||
|
|
||||||
|
// EOB is true if the segment is end of the block.
|
||||||
|
EOB bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSegment return a new Segment.
|
// NewSegment return a new Segment.
|
||||||
|
@ -40,12 +44,18 @@ func NewSegmentPadding(start, stop, n int) Segment {
|
||||||
|
|
||||||
// Value returns a value of the segment.
|
// Value returns a value of the segment.
|
||||||
func (t *Segment) Value(buffer []byte) []byte {
|
func (t *Segment) Value(buffer []byte) []byte {
|
||||||
|
var result []byte
|
||||||
if t.Padding == 0 {
|
if t.Padding == 0 {
|
||||||
return buffer[t.Start:t.Stop]
|
result = buffer[t.Start:t.Stop]
|
||||||
|
} else {
|
||||||
|
result = make([]byte, 0, t.Padding+t.Stop-t.Start+1)
|
||||||
|
result = append(result, bytes.Repeat(space, t.Padding)...)
|
||||||
|
result = append(result, buffer[t.Start:t.Stop]...)
|
||||||
}
|
}
|
||||||
result := make([]byte, 0, t.Padding+t.Stop-t.Start+1)
|
if t.EOB && len(result) > 0 && result[len(result)-1] != '\n' {
|
||||||
result = append(result, bytes.Repeat(space, t.Padding)...)
|
result = append(result, '\n')
|
||||||
return append(result, buffer[t.Start:t.Stop]...)
|
}
|
||||||
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// Len returns a length of the segment.
|
// Len returns a length of the segment.
|
||||||
|
|
4264
vendor/github.com/yuin/goldmark/util/html5entities.go
generated
vendored
4264
vendor/github.com/yuin/goldmark/util/html5entities.go
generated
vendored
File diff suppressed because it is too large
Load diff
6
vendor/github.com/yuin/goldmark/util/util.go
generated
vendored
6
vendor/github.com/yuin/goldmark/util/util.go
generated
vendored
|
@ -166,7 +166,13 @@ func IndentPositionPadding(bs []byte, currentPos, paddingv, width int) (pos, pad
|
||||||
w := 0
|
w := 0
|
||||||
i := 0
|
i := 0
|
||||||
l := len(bs)
|
l := len(bs)
|
||||||
|
p := paddingv
|
||||||
for ; i < l; i++ {
|
for ; i < l; i++ {
|
||||||
|
if p > 0 {
|
||||||
|
p--
|
||||||
|
w++
|
||||||
|
continue
|
||||||
|
}
|
||||||
if bs[i] == '\t' && w < width {
|
if bs[i] == '\t' && w < width {
|
||||||
w += TabWidth(currentPos + w)
|
w += TabWidth(currentPos + w)
|
||||||
} else if bs[i] == ' ' && w < width {
|
} else if bs[i] == ' ' && w < width {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
//go:build !appengine && !js
|
//go:build !appengine && !js && !go1.21
|
||||||
// +build !appengine,!js
|
// +build !appengine,!js,!go1.21
|
||||||
|
|
||||||
package util
|
package util
|
||||||
|
|
18
vendor/github.com/yuin/goldmark/util/util_unsafe_go121.go
generated
vendored
Normal file
18
vendor/github.com/yuin/goldmark/util/util_unsafe_go121.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
//go:build !appengine && !js && go1.21
|
||||||
|
// +build !appengine,!js,go1.21
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BytesToReadOnlyString returns a string converted from given bytes.
|
||||||
|
func BytesToReadOnlyString(b []byte) string {
|
||||||
|
return unsafe.String(unsafe.SliceData(b), len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToReadOnlyBytes returns bytes converted from given string.
|
||||||
|
func StringToReadOnlyBytes(s string) []byte {
|
||||||
|
return unsafe.Slice(unsafe.StringData(s), len(s))
|
||||||
|
}
|
10
vendor/modules.txt
vendored
10
vendor/modules.txt
vendored
|
@ -441,8 +441,8 @@ github.com/json-iterator/go
|
||||||
# github.com/k3a/html2text v1.2.1
|
# github.com/k3a/html2text v1.2.1
|
||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
github.com/k3a/html2text
|
github.com/k3a/html2text
|
||||||
# github.com/klauspost/compress v1.17.9
|
# github.com/klauspost/compress v1.17.11
|
||||||
## explicit; go 1.20
|
## explicit; go 1.21
|
||||||
github.com/klauspost/compress
|
github.com/klauspost/compress
|
||||||
github.com/klauspost/compress/fse
|
github.com/klauspost/compress/fse
|
||||||
github.com/klauspost/compress/huff0
|
github.com/klauspost/compress/huff0
|
||||||
|
@ -486,8 +486,8 @@ github.com/miekg/dns
|
||||||
# github.com/minio/md5-simd v1.1.2
|
# github.com/minio/md5-simd v1.1.2
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/minio/md5-simd
|
github.com/minio/md5-simd
|
||||||
# github.com/minio/minio-go/v7 v7.0.77
|
# github.com/minio/minio-go/v7 v7.0.78
|
||||||
## explicit; go 1.21
|
## explicit; go 1.22
|
||||||
github.com/minio/minio-go/v7
|
github.com/minio/minio-go/v7
|
||||||
github.com/minio/minio-go/v7/pkg/cors
|
github.com/minio/minio-go/v7/pkg/cors
|
||||||
github.com/minio/minio-go/v7/pkg/credentials
|
github.com/minio/minio-go/v7/pkg/credentials
|
||||||
|
@ -954,7 +954,7 @@ github.com/vmihailenco/tagparser/v2/internal/parser
|
||||||
# github.com/wagslane/go-password-validator v0.3.0
|
# github.com/wagslane/go-password-validator v0.3.0
|
||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
github.com/wagslane/go-password-validator
|
github.com/wagslane/go-password-validator
|
||||||
# github.com/yuin/goldmark v1.7.4
|
# github.com/yuin/goldmark v1.7.6
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/yuin/goldmark
|
github.com/yuin/goldmark
|
||||||
github.com/yuin/goldmark/ast
|
github.com/yuin/goldmark/ast
|
||||||
|
|
|
@ -41,6 +41,12 @@ main {
|
||||||
text-decoration: none;
|
text-decoration: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.boosted {
|
||||||
|
padding: 0 0.75rem 0.75rem;
|
||||||
|
color: var(--fg-reduced);
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
|
||||||
.status-header > address {
|
.status-header > address {
|
||||||
/*
|
/*
|
||||||
Avoid stretching so wide that user
|
Avoid stretching so wide that user
|
||||||
|
@ -59,17 +65,27 @@ main {
|
||||||
"avatar author-strap author-strap";
|
"avatar author-strap author-strap";
|
||||||
gap: 0 0.5rem;
|
gap: 0 0.5rem;
|
||||||
font-style: normal;
|
font-style: normal;
|
||||||
|
|
||||||
.avatar {
|
.avatar {
|
||||||
grid-area: avatar;
|
grid-area: avatar;
|
||||||
height: 3.5rem;
|
height: 3.5rem;
|
||||||
width: 3.5rem;
|
width: 3.5rem;
|
||||||
object-fit: cover;
|
object-fit: cover;
|
||||||
|
position: relative;
|
||||||
|
|
||||||
border: 0.15rem solid $avatar-border;
|
border: 0.15rem solid $avatar-border;
|
||||||
border-radius: $br;
|
border-radius: $br;
|
||||||
overflow: hidden; /* hides corners from img overflowing */
|
overflow: hidden; /* hides corners from img overflowing */
|
||||||
|
|
||||||
|
.boosted-avatar {
|
||||||
|
height: 50%;
|
||||||
|
width: 50%;
|
||||||
|
z-index: 10;
|
||||||
|
position: absolute;
|
||||||
|
bottom: 0;
|
||||||
|
inset-inline-end: 0;
|
||||||
|
}
|
||||||
|
|
||||||
img {
|
img {
|
||||||
height: 100%;
|
height: 100%;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
|
@ -77,7 +93,7 @@ main {
|
||||||
background: $bg;
|
background: $bg;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
.author-strap {
|
.author-strap {
|
||||||
grid-area: author-strap;
|
grid-area: author-strap;
|
||||||
display: grid;
|
display: grid;
|
||||||
|
@ -87,7 +103,7 @@ main {
|
||||||
"display display"
|
"display display"
|
||||||
"user user";
|
"user user";
|
||||||
gap: 0 0.5rem;
|
gap: 0 0.5rem;
|
||||||
|
|
||||||
.displayname, .username {
|
.displayname, .username {
|
||||||
justify-self: start;
|
justify-self: start;
|
||||||
align-self: start;
|
align-self: start;
|
||||||
|
@ -95,12 +111,12 @@ main {
|
||||||
font-size: 1rem;
|
font-size: 1rem;
|
||||||
line-height: 1.3rem;
|
line-height: 1.3rem;
|
||||||
}
|
}
|
||||||
|
|
||||||
.displayname {
|
.displayname {
|
||||||
grid-area: display;
|
grid-area: display;
|
||||||
font-weight: bold;
|
font-weight: bold;
|
||||||
}
|
}
|
||||||
|
|
||||||
.username {
|
.username {
|
||||||
grid-area: user;
|
grid-area: user;
|
||||||
color: $link-fg;
|
color: $link-fg;
|
||||||
|
@ -200,34 +216,34 @@ main {
|
||||||
.poll {
|
.poll {
|
||||||
background-color: $gray2;
|
background-color: $gray2;
|
||||||
z-index: 2;
|
z-index: 2;
|
||||||
|
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
border-radius: $br;
|
border-radius: $br;
|
||||||
padding: 0.5rem;
|
padding: 0.5rem;
|
||||||
margin: 0;
|
margin: 0;
|
||||||
gap: 1rem;
|
gap: 1rem;
|
||||||
|
|
||||||
.poll-options {
|
.poll-options {
|
||||||
margin: 0;
|
margin: 0;
|
||||||
padding: 0;
|
padding: 0;
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
gap: 1rem;
|
gap: 1rem;
|
||||||
|
|
||||||
.poll-option {
|
.poll-option {
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
gap: 0.1rem;
|
gap: 0.1rem;
|
||||||
|
|
||||||
label {
|
label {
|
||||||
cursor: default;
|
cursor: default;
|
||||||
}
|
}
|
||||||
|
|
||||||
meter {
|
meter {
|
||||||
width: 100%;
|
width: 100%;
|
||||||
}
|
}
|
||||||
|
|
||||||
.poll-vote-summary {
|
.poll-vote-summary {
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-wrap: wrap;
|
flex-wrap: wrap;
|
||||||
|
@ -236,7 +252,7 @@ main {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
.poll-info {
|
.poll-info {
|
||||||
background-color: $gray4;
|
background-color: $gray4;
|
||||||
display: flex;
|
display: flex;
|
||||||
|
@ -245,7 +261,7 @@ main {
|
||||||
border-radius: $br-inner;
|
border-radius: $br-inner;
|
||||||
padding: 0.25rem;
|
padding: 0.25rem;
|
||||||
gap: 0.25rem;
|
gap: 0.25rem;
|
||||||
|
|
||||||
span {
|
span {
|
||||||
justify-self: center;
|
justify-self: center;
|
||||||
white-space: nowrap;
|
white-space: nowrap;
|
||||||
|
@ -301,12 +317,12 @@ main {
|
||||||
width: 100%;
|
width: 100%;
|
||||||
z-index: 3;
|
z-index: 3;
|
||||||
overflow: hidden;
|
overflow: hidden;
|
||||||
|
|
||||||
display: grid;
|
display: grid;
|
||||||
padding: 1rem;
|
padding: 1rem;
|
||||||
grid-template-columns: 1fr auto 1fr;
|
grid-template-columns: 1fr auto 1fr;
|
||||||
grid-template-rows: 1fr 1fr;
|
grid-template-rows: 1fr 1fr;
|
||||||
grid-template-areas:
|
grid-template-areas:
|
||||||
"eye sensitive ."
|
"eye sensitive ."
|
||||||
". sensitive .";
|
". sensitive .";
|
||||||
|
|
||||||
|
@ -369,7 +385,7 @@ main {
|
||||||
height: 100%;
|
height: 100%;
|
||||||
padding: 0.8rem;
|
padding: 0.8rem;
|
||||||
border: 0.2rem dashed $white2;
|
border: 0.2rem dashed $white2;
|
||||||
|
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
align-items: center;
|
align-items: center;
|
||||||
|
@ -518,4 +534,4 @@ main {
|
||||||
.plyr {
|
.plyr {
|
||||||
max-height: 100%;
|
max-height: 100%;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,7 @@ function UserProfileForm({ data: profile }) {
|
||||||
maxPinnedFields: instance?.configuration?.accounts?.max_profile_fields ?? 6
|
maxPinnedFields: instance?.configuration?.accounts?.max_profile_fields ?? 6
|
||||||
};
|
};
|
||||||
}, [instance]);
|
}, [instance]);
|
||||||
|
|
||||||
// Parse out available theme options into nice format.
|
// Parse out available theme options into nice format.
|
||||||
const { data: themes } = useAccountThemesQuery();
|
const { data: themes } = useAccountThemesQuery();
|
||||||
const themeOptions = useMemo(() => {
|
const themeOptions = useMemo(() => {
|
||||||
|
@ -114,6 +114,7 @@ function UserProfileForm({ data: profile }) {
|
||||||
locked: useBoolInput("locked", { source: profile }),
|
locked: useBoolInput("locked", { source: profile }),
|
||||||
discoverable: useBoolInput("discoverable", { source: profile}),
|
discoverable: useBoolInput("discoverable", { source: profile}),
|
||||||
enableRSS: useBoolInput("enable_rss", { source: profile }),
|
enableRSS: useBoolInput("enable_rss", { source: profile }),
|
||||||
|
hideBoosts: useBoolInput("hide_boosts", { source: profile }),
|
||||||
hideCollections: useBoolInput("hide_collections", { source: profile }),
|
hideCollections: useBoolInput("hide_collections", { source: profile }),
|
||||||
webVisibility: useTextInput("web_visibility", { source: profile, valueSelector: (p) => p.source?.web_visibility }),
|
webVisibility: useTextInput("web_visibility", { source: profile, valueSelector: (p) => p.source?.web_visibility }),
|
||||||
fields: useFieldArrayInput("fields_attributes", {
|
fields: useFieldArrayInput("fields_attributes", {
|
||||||
|
@ -158,7 +159,7 @@ function UserProfileForm({ data: profile }) {
|
||||||
autoCapitalize="sentences"
|
autoCapitalize="sentences"
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="file-input-with-image-description">
|
<div className="file-input-with-image-description">
|
||||||
<FileInput
|
<FileInput
|
||||||
label="Avatar (1:1 images look best)"
|
label="Avatar (1:1 images look best)"
|
||||||
|
@ -257,6 +258,10 @@ function UserProfileForm({ data: profile }) {
|
||||||
field={form.enableRSS}
|
field={form.enableRSS}
|
||||||
label="Enable RSS feed of posts."
|
label="Enable RSS feed of posts."
|
||||||
/>
|
/>
|
||||||
|
<Checkbox
|
||||||
|
field={form.hideBoosts}
|
||||||
|
label="Hide boosts from your public page"
|
||||||
|
/>
|
||||||
<Checkbox
|
<Checkbox
|
||||||
field={form.hideCollections}
|
field={form.hideCollections}
|
||||||
label="Hide who you follow / are followed by."
|
label="Hide who you follow / are followed by."
|
||||||
|
|
|
@ -247,6 +247,16 @@
|
||||||
class="status expanded"
|
class="status expanded"
|
||||||
{{- includeAttr "status_attributes.tmpl" . | indentAttr 6 }}
|
{{- includeAttr "status_attributes.tmpl" . | indentAttr 6 }}
|
||||||
>
|
>
|
||||||
|
{{- if .ReblogAccount }}
|
||||||
|
<div class="boosted text-cutoff">
|
||||||
|
<i class="fa fa-retweet" aria-hidden="true"></i> 
|
||||||
|
{{- if $.account.DisplayName }}
|
||||||
|
{{- emojify $.account.Emojis (escape $.account.DisplayName) }} boosted
|
||||||
|
{{- else }}
|
||||||
|
{{- $.account.Username }} boosted
|
||||||
|
{{- end }}
|
||||||
|
</div>
|
||||||
|
{{- end }}
|
||||||
{{- include "status.tmpl" . | indent 6 }}
|
{{- include "status.tmpl" . | indent 6 }}
|
||||||
</article>
|
</article>
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
@ -264,4 +274,4 @@
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</main>
|
</main>
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
@ -48,6 +48,16 @@
|
||||||
alt="Avatar for {{ .Username -}}"
|
alt="Avatar for {{ .Username -}}"
|
||||||
title="Avatar for {{ .Username -}}"
|
title="Avatar for {{ .Username -}}"
|
||||||
>
|
>
|
||||||
|
{{ if $.ReblogAccount }}
|
||||||
|
<img
|
||||||
|
class="boosted-avatar"
|
||||||
|
src="{{ $.ReblogAccount.Avatar }}"
|
||||||
|
alt="Avatar for {{ $.ReblogAccount.Username -}}"
|
||||||
|
title="Avatar for {{ $.ReblogAccount.Username -}}"
|
||||||
|
>
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
|
||||||
</picture>
|
</picture>
|
||||||
<div class="author-strap">
|
<div class="author-strap">
|
||||||
<span class="displayname text-cutoff">
|
<span class="displayname text-cutoff">
|
||||||
|
@ -63,4 +73,4 @@
|
||||||
<span class="sr-only">(open profile)</span>
|
<span class="sr-only">(open profile)</span>
|
||||||
</a>
|
</a>
|
||||||
</address>
|
</address>
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
Loading…
Reference in a new issue