mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-12-05 01:52:46 +00:00
Compare commits
13 commits
68f80e7ab7
...
5af3b3a616
Author | SHA1 | Date | |
---|---|---|---|
5af3b3a616 | |||
58af95a1d5 | |||
f3e2d36d64 | |||
82b9515a9d | |||
53ee6aef08 | |||
b0fbc327f0 | |||
4be1f780a1 | |||
8db3d6b700 | |||
666b8bc4f2 | |||
7c6c74243b | |||
75d3fca08c | |||
bd4c4d79fe | |||
c1543c029b |
12
.drone.yml
12
.drone.yml
|
@ -12,7 +12,7 @@ steps:
|
||||||
# We use golangci-lint for linting.
|
# We use golangci-lint for linting.
|
||||||
# See: https://golangci-lint.run/
|
# See: https://golangci-lint.run/
|
||||||
- name: lint
|
- name: lint
|
||||||
image: golangci/golangci-lint:v1.57.2
|
image: golangci/golangci-lint:v1.60.3
|
||||||
volumes:
|
volumes:
|
||||||
- name: go-build-cache
|
- name: go-build-cache
|
||||||
path: /root/.cache/go-build
|
path: /root/.cache/go-build
|
||||||
|
@ -28,7 +28,7 @@ steps:
|
||||||
- pull_request
|
- pull_request
|
||||||
|
|
||||||
- name: test
|
- name: test
|
||||||
image: golang:1.22-alpine
|
image: golang:1.23.0-alpine
|
||||||
volumes:
|
volumes:
|
||||||
- name: go-build-cache
|
- name: go-build-cache
|
||||||
path: /root/.cache/go-build
|
path: /root/.cache/go-build
|
||||||
|
@ -94,7 +94,7 @@ steps:
|
||||||
- pull_request
|
- pull_request
|
||||||
|
|
||||||
- name: snapshot
|
- name: snapshot
|
||||||
image: superseriousbusiness/gotosocial-drone-build:0.6.0 # https://github.com/superseriousbusiness/gotosocial-drone-build
|
image: superseriousbusiness/gotosocial-drone-build:0.7.0 # https://github.com/superseriousbusiness/gotosocial-drone-build
|
||||||
volumes:
|
volumes:
|
||||||
- name: go-build-cache
|
- name: go-build-cache
|
||||||
path: /root/.cache/go-build
|
path: /root/.cache/go-build
|
||||||
|
@ -135,7 +135,7 @@ steps:
|
||||||
- main
|
- main
|
||||||
|
|
||||||
- name: release
|
- name: release
|
||||||
image: superseriousbusiness/gotosocial-drone-build:0.6.0 # https://github.com/superseriousbusiness/gotosocial-drone-build
|
image: superseriousbusiness/gotosocial-drone-build:0.7.0 # https://github.com/superseriousbusiness/gotosocial-drone-build
|
||||||
volumes:
|
volumes:
|
||||||
- name: go-build-cache
|
- name: go-build-cache
|
||||||
path: /root/.cache/go-build
|
path: /root/.cache/go-build
|
||||||
|
@ -194,7 +194,7 @@ clone:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: mirror
|
- name: mirror
|
||||||
image: superseriousbusiness/gotosocial-drone-build:0.6.0
|
image: superseriousbusiness/gotosocial-drone-build:0.7.0
|
||||||
environment:
|
environment:
|
||||||
ORIGIN_REPO: https://github.com/superseriousbusiness/gotosocial
|
ORIGIN_REPO: https://github.com/superseriousbusiness/gotosocial
|
||||||
TARGET_REPO: https://codeberg.org/superseriousbusiness/gotosocial
|
TARGET_REPO: https://codeberg.org/superseriousbusiness/gotosocial
|
||||||
|
@ -207,6 +207,6 @@ steps:
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: signature
|
kind: signature
|
||||||
hmac: f4008d87e4e5b67251eb89f255c1224e6ab5818828cab24fc319b8f829176058
|
hmac: 9810bf692fb1029c13b0a1e2f556e2306d16f7d3eec9ca6163a0499c147280c1
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
# https://goreleaser.com
|
# Version 2 of GoReleaser: https://goreleaser.com/errors/version/
|
||||||
|
version: 2
|
||||||
project_name: gotosocial
|
project_name: gotosocial
|
||||||
before:
|
before:
|
||||||
# https://goreleaser.com/customization/hooks/
|
# https://goreleaser.com/customization/hooks/
|
||||||
|
@ -185,7 +186,7 @@ checksum:
|
||||||
name_template: 'checksums.txt'
|
name_template: 'checksums.txt'
|
||||||
snapshot:
|
snapshot:
|
||||||
# https://goreleaser.com/customization/snapshots/
|
# https://goreleaser.com/customization/snapshots/
|
||||||
name_template: "{{ incpatch .Version }}-SNAPSHOT"
|
version_template: "{{ incpatch .Version }}-SNAPSHOT"
|
||||||
source:
|
source:
|
||||||
# https://goreleaser.com/customization/source/
|
# https://goreleaser.com/customization/source/
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# Dockerfile reference: https://docs.docker.com/engine/reference/builder/
|
# Dockerfile reference: https://docs.docker.com/engine/reference/builder/
|
||||||
|
|
||||||
# stage 1: generate up-to-date swagger.yaml to put in the final container
|
# stage 1: generate up-to-date swagger.yaml to put in the final container
|
||||||
FROM --platform=${BUILDPLATFORM} golang:1.22-alpine AS swagger
|
FROM --platform=${BUILDPLATFORM} golang:1.23.0-alpine AS swagger
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
### Installs goswagger for building swagger definitions inside this container
|
### Installs goswagger for building swagger definitions inside this container
|
||||||
|
@ -28,7 +28,7 @@ RUN yarn --cwd ./web/source install && \
|
||||||
rm -rf ./web/source
|
rm -rf ./web/source
|
||||||
|
|
||||||
# stage 3: build the executor container
|
# stage 3: build the executor container
|
||||||
FROM --platform=${TARGETPLATFORM} alpine:3.19.1 as executor
|
FROM --platform=${TARGETPLATFORM} alpine:3.20.2 as executor
|
||||||
|
|
||||||
# switch to non-root user:group for GtS
|
# switch to non-root user:group for GtS
|
||||||
USER 1000:1000
|
USER 1000:1000
|
||||||
|
|
14
go.mod
14
go.mod
|
@ -1,13 +1,13 @@
|
||||||
module github.com/superseriousbusiness/gotosocial
|
module github.com/superseriousbusiness/gotosocial
|
||||||
|
|
||||||
go 1.22.2
|
go 1.23
|
||||||
|
|
||||||
replace modernc.org/sqlite => gitlab.com/NyaaaWhatsUpDoc/sqlite v1.29.9-concurrency-workaround
|
replace modernc.org/sqlite => gitlab.com/NyaaaWhatsUpDoc/sqlite v1.29.9-concurrency-workaround
|
||||||
|
|
||||||
require (
|
require (
|
||||||
codeberg.org/gruf/go-bytes v1.0.2
|
codeberg.org/gruf/go-bytes v1.0.2
|
||||||
codeberg.org/gruf/go-bytesize v1.0.3
|
codeberg.org/gruf/go-bytesize v1.0.3
|
||||||
codeberg.org/gruf/go-byteutil v1.2.0
|
codeberg.org/gruf/go-byteutil v1.3.0
|
||||||
codeberg.org/gruf/go-cache/v3 v3.5.7
|
codeberg.org/gruf/go-cache/v3 v3.5.7
|
||||||
codeberg.org/gruf/go-debug v1.3.0
|
codeberg.org/gruf/go-debug v1.3.0
|
||||||
codeberg.org/gruf/go-errors/v2 v2.3.2
|
codeberg.org/gruf/go-errors/v2 v2.3.2
|
||||||
|
@ -19,9 +19,9 @@ require (
|
||||||
codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760
|
codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760
|
||||||
codeberg.org/gruf/go-mimetypes v1.2.0
|
codeberg.org/gruf/go-mimetypes v1.2.0
|
||||||
codeberg.org/gruf/go-mutexes v1.5.1
|
codeberg.org/gruf/go-mutexes v1.5.1
|
||||||
codeberg.org/gruf/go-runners v1.6.2
|
codeberg.org/gruf/go-runners v1.6.3
|
||||||
codeberg.org/gruf/go-sched v1.2.3
|
codeberg.org/gruf/go-sched v1.2.4
|
||||||
codeberg.org/gruf/go-storage v0.1.2
|
codeberg.org/gruf/go-storage v0.2.0
|
||||||
codeberg.org/gruf/go-structr v0.8.9
|
codeberg.org/gruf/go-structr v0.8.9
|
||||||
codeberg.org/superseriousbusiness/exif-terminator v0.9.0
|
codeberg.org/superseriousbusiness/exif-terminator v0.9.0
|
||||||
github.com/DmitriyVTitov/size v1.5.0
|
github.com/DmitriyVTitov/size v1.5.0
|
||||||
|
@ -42,7 +42,7 @@ require (
|
||||||
github.com/k3a/html2text v1.2.1
|
github.com/k3a/html2text v1.2.1
|
||||||
github.com/microcosm-cc/bluemonday v1.0.27
|
github.com/microcosm-cc/bluemonday v1.0.27
|
||||||
github.com/miekg/dns v1.1.62
|
github.com/miekg/dns v1.1.62
|
||||||
github.com/minio/minio-go/v7 v7.0.76
|
github.com/minio/minio-go/v7 v7.0.77
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
github.com/mitchellh/mapstructure v1.5.0
|
||||||
github.com/ncruces/go-sqlite3 v0.18.3
|
github.com/ncruces/go-sqlite3 v0.18.3
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
|
@ -85,8 +85,6 @@ require (
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
codeberg.org/gruf/go-atomics v1.1.0 // indirect
|
|
||||||
codeberg.org/gruf/go-bitutil v1.1.0 // indirect
|
|
||||||
codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect
|
codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect
|
||||||
codeberg.org/gruf/go-mangler v1.4.1 // indirect
|
codeberg.org/gruf/go-mangler v1.4.1 // indirect
|
||||||
codeberg.org/gruf/go-maps v1.0.3 // indirect
|
codeberg.org/gruf/go-maps v1.0.3 // indirect
|
||||||
|
|
26
go.sum
26
go.sum
|
@ -30,22 +30,16 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
||||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
codeberg.org/gruf/go-atomics v1.1.0 h1:ni9QXYoRUFYQMXE3akWaUb1wMcPBDc05Md6Rgml7W58=
|
|
||||||
codeberg.org/gruf/go-atomics v1.1.0/go.mod h1:a/4/y/LgvjxjQVnpoy1VVkOSzLS1W9i1g4SJ0nflAa4=
|
|
||||||
codeberg.org/gruf/go-bitutil v1.0.0/go.mod h1:sb8IjlDnjVTz8zPK/8lmHesKxY0Yb3iqHWjUM/SkphA=
|
|
||||||
codeberg.org/gruf/go-bitutil v1.1.0 h1:U1Q+A1mtnPk+npqYrlRBc9ar2C5hYiBd17l1Wrp2Bt8=
|
|
||||||
codeberg.org/gruf/go-bitutil v1.1.0/go.mod h1:rGibFevYTQfYKcPv0Df5KpG8n5xC3AfD4d/UgYeoNy0=
|
|
||||||
codeberg.org/gruf/go-bytes v1.0.2 h1:malqE42Ni+h1nnYWBUAJaDDtEzF4aeN4uPN8DfMNNvo=
|
codeberg.org/gruf/go-bytes v1.0.2 h1:malqE42Ni+h1nnYWBUAJaDDtEzF4aeN4uPN8DfMNNvo=
|
||||||
codeberg.org/gruf/go-bytes v1.0.2/go.mod h1:1v/ibfaosfXSZtRdW2rWaVrDXMc9E3bsi/M9Ekx39cg=
|
codeberg.org/gruf/go-bytes v1.0.2/go.mod h1:1v/ibfaosfXSZtRdW2rWaVrDXMc9E3bsi/M9Ekx39cg=
|
||||||
codeberg.org/gruf/go-bytesize v1.0.3 h1:Tz8tCxhPLeyM5VryuBNjUHgKmLj4Bx9RbPaUSA3qg6g=
|
codeberg.org/gruf/go-bytesize v1.0.3 h1:Tz8tCxhPLeyM5VryuBNjUHgKmLj4Bx9RbPaUSA3qg6g=
|
||||||
codeberg.org/gruf/go-bytesize v1.0.3/go.mod h1:n/GU8HzL9f3UNp/mUKyr1qVmTlj7+xacpp0OHfkvLPs=
|
codeberg.org/gruf/go-bytesize v1.0.3/go.mod h1:n/GU8HzL9f3UNp/mUKyr1qVmTlj7+xacpp0OHfkvLPs=
|
||||||
codeberg.org/gruf/go-byteutil v1.2.0 h1:YoxkpUOoHS82BcPXfiIcWLe/YhS8QhpNUHdfuhN09QM=
|
codeberg.org/gruf/go-byteutil v1.3.0 h1:nRqJnCcRQ7xbfU6azw7zOzJrSMDIJHBqX6FL9vEMYmU=
|
||||||
codeberg.org/gruf/go-byteutil v1.2.0/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
|
codeberg.org/gruf/go-byteutil v1.3.0/go.mod h1:chgnZz1LUcfaObaIFglxF5MRYQkJGjQf4WwVz95ccCM=
|
||||||
codeberg.org/gruf/go-cache/v3 v3.5.7 h1:5hut49a8Wp3hdwrCEJYj6pHY2aRR1hyTmkK4+wHVYq4=
|
codeberg.org/gruf/go-cache/v3 v3.5.7 h1:5hut49a8Wp3hdwrCEJYj6pHY2aRR1hyTmkK4+wHVYq4=
|
||||||
codeberg.org/gruf/go-cache/v3 v3.5.7/go.mod h1:Thahfuf3PgHSv2+1zHpvhRdX97tx1WXurVNGWpZucAM=
|
codeberg.org/gruf/go-cache/v3 v3.5.7/go.mod h1:Thahfuf3PgHSv2+1zHpvhRdX97tx1WXurVNGWpZucAM=
|
||||||
codeberg.org/gruf/go-debug v1.3.0 h1:PIRxQiWUFKtGOGZFdZ3Y0pqyfI0Xr87j224IYe2snZs=
|
codeberg.org/gruf/go-debug v1.3.0 h1:PIRxQiWUFKtGOGZFdZ3Y0pqyfI0Xr87j224IYe2snZs=
|
||||||
codeberg.org/gruf/go-debug v1.3.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
|
codeberg.org/gruf/go-debug v1.3.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
|
||||||
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
|
|
||||||
codeberg.org/gruf/go-errors/v2 v2.3.2 h1:8ItWaOMfhDaqrJK1Pw8MO0Nu+o/tVcQtR5cJ58Vc4zo=
|
codeberg.org/gruf/go-errors/v2 v2.3.2 h1:8ItWaOMfhDaqrJK1Pw8MO0Nu+o/tVcQtR5cJ58Vc4zo=
|
||||||
codeberg.org/gruf/go-errors/v2 v2.3.2/go.mod h1:LfzD9nkAAJpEDbkUqOZQ2jdaQ8VrK0pnR36zLOMFq6Y=
|
codeberg.org/gruf/go-errors/v2 v2.3.2/go.mod h1:LfzD9nkAAJpEDbkUqOZQ2jdaQ8VrK0pnR36zLOMFq6Y=
|
||||||
codeberg.org/gruf/go-fastcopy v1.1.3 h1:Jo9VTQjI6KYimlw25PPc7YLA3Xm+XMQhaHwKnM7xD1g=
|
codeberg.org/gruf/go-fastcopy v1.1.3 h1:Jo9VTQjI6KYimlw25PPc7YLA3Xm+XMQhaHwKnM7xD1g=
|
||||||
|
@ -72,12 +66,12 @@ codeberg.org/gruf/go-mimetypes v1.2.0 h1:3rZGXY/SkNYbamiddWXs2gETXIBkGIeWYnbWpp2
|
||||||
codeberg.org/gruf/go-mimetypes v1.2.0/go.mod h1:YiUWRj/nAdJQc+UFRvcsL6xXZsbc6b6Ic739ycEO8Yg=
|
codeberg.org/gruf/go-mimetypes v1.2.0/go.mod h1:YiUWRj/nAdJQc+UFRvcsL6xXZsbc6b6Ic739ycEO8Yg=
|
||||||
codeberg.org/gruf/go-mutexes v1.5.1 h1:xICU0WXhWr6wf+Iror4eE3xT+xnXNPrO6o77D/G6QuY=
|
codeberg.org/gruf/go-mutexes v1.5.1 h1:xICU0WXhWr6wf+Iror4eE3xT+xnXNPrO6o77D/G6QuY=
|
||||||
codeberg.org/gruf/go-mutexes v1.5.1/go.mod h1:rPEqQ/y6CmGITaZ3GPTMQVsoZAOzbsAHyIaLsJcOqVE=
|
codeberg.org/gruf/go-mutexes v1.5.1/go.mod h1:rPEqQ/y6CmGITaZ3GPTMQVsoZAOzbsAHyIaLsJcOqVE=
|
||||||
codeberg.org/gruf/go-runners v1.6.2 h1:oQef9niahfHu/wch14xNxlRMP8i+ABXH1Cb9PzZ4oYo=
|
codeberg.org/gruf/go-runners v1.6.3 h1:To/AX7eTrWuXrTkA3RA01YTP5zha1VZ68LQ+0D4RY7E=
|
||||||
codeberg.org/gruf/go-runners v1.6.2/go.mod h1:Tq5PrZ/m/rBXbLZz0u5if+yP3nG5Sf6S8O/GnyEePeQ=
|
codeberg.org/gruf/go-runners v1.6.3/go.mod h1:oXAaUmG2VxoKttpCqZGv5nQBeSvZSR2BzIk7h1yTRlU=
|
||||||
codeberg.org/gruf/go-sched v1.2.3 h1:H5ViDxxzOBR3uIyGBCf0eH8b1L8wMybOXcdtUUTXZHk=
|
codeberg.org/gruf/go-sched v1.2.4 h1:ddBB9o0D/2oU8NbQ0ldN5aWxogpXPRBATWi58+p++Hw=
|
||||||
codeberg.org/gruf/go-sched v1.2.3/go.mod h1:vT9uB6KWFIIwnG9vcPY2a0alYNoqdL1mSzRM8I+PK7A=
|
codeberg.org/gruf/go-sched v1.2.4/go.mod h1:wad6l+OcYGWMA2TzNLMmLObsrbBDxdJfEy5WvTgBjNk=
|
||||||
codeberg.org/gruf/go-storage v0.1.2 h1:dIOVOKq1CJpRmuhbB8Zok3mmo8V6VV/nX5GLIm6hywA=
|
codeberg.org/gruf/go-storage v0.2.0 h1:mKj3Lx6AavEkuXXtxqPhdq+akW9YwrnP16yQBF7K5ZI=
|
||||||
codeberg.org/gruf/go-storage v0.1.2/go.mod h1:LRDpFHqRJi0f+35c3ltBH2e/pGfwY5dGlNlgCJ/R1DA=
|
codeberg.org/gruf/go-storage v0.2.0/go.mod h1:o3GzMDE5QNUaRnm/daUzFqvuAaC4utlgXDXYO79sWKU=
|
||||||
codeberg.org/gruf/go-structr v0.8.9 h1:OyiSspWYCeJOm356fFPd+bDRumPrard2VAUXAPqZiJ0=
|
codeberg.org/gruf/go-structr v0.8.9 h1:OyiSspWYCeJOm356fFPd+bDRumPrard2VAUXAPqZiJ0=
|
||||||
codeberg.org/gruf/go-structr v0.8.9/go.mod h1:zkoXVrAnKosh8VFAsbP/Hhs8FmLBjbVVy5w/Ngm8ApM=
|
codeberg.org/gruf/go-structr v0.8.9/go.mod h1:zkoXVrAnKosh8VFAsbP/Hhs8FmLBjbVVy5w/Ngm8ApM=
|
||||||
codeberg.org/superseriousbusiness/exif-terminator v0.9.0 h1:/EfyGI6HIrbkhFwgXGSjZ9o1kr/+k8v4mKdfXTH02Go=
|
codeberg.org/superseriousbusiness/exif-terminator v0.9.0 h1:/EfyGI6HIrbkhFwgXGSjZ9o1kr/+k8v4mKdfXTH02Go=
|
||||||
|
@ -419,8 +413,8 @@ github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
|
||||||
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
||||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||||
github.com/minio/minio-go/v7 v7.0.76 h1:9nxHH2XDai61cT/EFhyIw/wW4vJfpPNvl7lSFpRt+Ng=
|
github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw=
|
||||||
github.com/minio/minio-go/v7 v7.0.76/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg=
|
github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg=
|
||||||
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
||||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||||
|
|
|
@ -145,8 +145,8 @@ func validateCreateEmoji(form *apimodel.EmojiCreateRequest) error {
|
||||||
return errors.New("no emoji given")
|
return errors.New("no emoji given")
|
||||||
}
|
}
|
||||||
|
|
||||||
maxSize := config.GetMediaEmojiLocalMaxSize()
|
maxSize := int64(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
if form.Image.Size > int64(maxSize) {
|
if form.Image.Size > maxSize {
|
||||||
return fmt.Errorf("emoji image too large: image is %dKB but size limit for custom emojis is %dKB", form.Image.Size/1024, maxSize/1024)
|
return fmt.Errorf("emoji image too large: image is %dKB but size limit for custom emojis is %dKB", form.Image.Size/1024, maxSize/1024)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -208,8 +208,8 @@ func validateUpdateEmoji(form *apimodel.EmojiUpdateRequest) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasImage {
|
if hasImage {
|
||||||
maxSize := config.GetMediaEmojiLocalMaxSize()
|
maxSize := int64(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
if form.Image.Size > int64(maxSize) {
|
if form.Image.Size > maxSize {
|
||||||
return fmt.Errorf("emoji image too large: image is %dKB but size limit for custom emojis is %dKB", form.Image.Size/1024, maxSize/1024)
|
return fmt.Errorf("emoji image too large: image is %dKB but size limit for custom emojis is %dKB", form.Image.Size/1024, maxSize/1024)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -160,7 +160,7 @@ type MediaDimensions struct {
|
||||||
Duration float32 `json:"duration,omitempty"`
|
Duration float32 `json:"duration,omitempty"`
|
||||||
// Bitrate of the media in bits per second.
|
// Bitrate of the media in bits per second.
|
||||||
// example: 1000000
|
// example: 1000000
|
||||||
Bitrate int `json:"bitrate,omitempty"`
|
Bitrate uint64 `json:"bitrate,omitempty"`
|
||||||
// Size of the media, in the format `[width]x[height]`.
|
// Size of the media, in the format `[width]x[height]`.
|
||||||
// Not set for audio.
|
// Not set for audio.
|
||||||
// example: 1920x1080
|
// example: 1920x1080
|
||||||
|
|
2
internal/cache/domain/domain.go
vendored
2
internal/cache/domain/domain.go
vendored
|
@ -220,7 +220,7 @@ func (n *node) getChild(part string) *node {
|
||||||
|
|
||||||
for i < j {
|
for i < j {
|
||||||
// avoid overflow when computing h
|
// avoid overflow when computing h
|
||||||
h := int(uint(i+j) >> 1)
|
h := int(uint(i+j) >> 1) // #nosec G115
|
||||||
// i ≤ h < j
|
// i ≤ h < j
|
||||||
|
|
||||||
if n.child[h].part < part {
|
if n.child[h].part < part {
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -407,13 +408,12 @@ func maxOpenConns() int {
|
||||||
// deriveBunDBPGOptions takes an application config and returns either a ready-to-use set of options
|
// deriveBunDBPGOptions takes an application config and returns either a ready-to-use set of options
|
||||||
// with sensible defaults, or an error if it's not satisfied by the provided config.
|
// with sensible defaults, or an error if it's not satisfied by the provided config.
|
||||||
func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
|
func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
|
||||||
url := config.GetDbPostgresConnectionString()
|
// If database URL is defined, ignore
|
||||||
|
// other DB-related configuration fields.
|
||||||
// if database URL is defined, ignore other DB related configuration fields
|
if url := config.GetDbPostgresConnectionString(); url != "" {
|
||||||
if url != "" {
|
return pgx.ParseConfig(url)
|
||||||
cfg, err := pgx.ParseConfig(url)
|
|
||||||
return cfg, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// these are all optional, the db adapter figures out defaults
|
// these are all optional, the db adapter figures out defaults
|
||||||
address := config.GetDbAddress()
|
address := config.GetDbAddress()
|
||||||
|
|
||||||
|
@ -477,7 +477,10 @@ func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
|
||||||
cfg.Host = address
|
cfg.Host = address
|
||||||
}
|
}
|
||||||
if port := config.GetDbPort(); port > 0 {
|
if port := config.GetDbPort(); port > 0 {
|
||||||
cfg.Port = uint16(port)
|
if port > math.MaxUint16 {
|
||||||
|
return nil, errors.New("invalid port, must be in range 1-65535")
|
||||||
|
}
|
||||||
|
cfg.Port = uint16(port) // #nosec G115 -- Just validated above.
|
||||||
}
|
}
|
||||||
if u := config.GetDbUser(); u != "" {
|
if u := config.GetDbUser(); u != "" {
|
||||||
cfg.User = u
|
cfg.User = u
|
||||||
|
|
|
@ -97,11 +97,11 @@ func() (*media.ProcessingEmoji, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote emoji size.
|
// Get maximum supported remote emoji size.
|
||||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
maxsz := int64(config.GetMediaEmojiRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Prepare data function to dereference remote emoji media.
|
// Prepare data function to dereference remote emoji media.
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create new emoji with prepared info.
|
// Create new emoji with prepared info.
|
||||||
|
@ -189,11 +189,11 @@ func() (*media.ProcessingEmoji, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote emoji size.
|
// Get maximum supported remote emoji size.
|
||||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
maxsz := int64(config.GetMediaEmojiRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Prepare data function to dereference remote emoji media.
|
// Prepare data function to dereference remote emoji media.
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update emoji with prepared info.
|
// Update emoji with prepared info.
|
||||||
|
@ -255,11 +255,11 @@ func() (*media.ProcessingEmoji, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote emoji size.
|
// Get maximum supported remote emoji size.
|
||||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
maxsz := int64(config.GetMediaEmojiRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Prepare data function to dereference remote emoji media.
|
// Prepare data function to dereference remote emoji media.
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recache emoji with prepared info.
|
// Recache emoji with prepared info.
|
||||||
|
|
|
@ -77,14 +77,14 @@ func() (*media.ProcessingMedia, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote media size.
|
// Get maximum supported remote media size.
|
||||||
maxsz := config.GetMediaRemoteMaxSize()
|
maxsz := int64(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Create media with prepared info.
|
// Create media with prepared info.
|
||||||
return d.mediaManager.CreateMedia(
|
return d.mediaManager.CreateMedia(
|
||||||
ctx,
|
ctx,
|
||||||
accountID,
|
accountID,
|
||||||
func(ctx context.Context) (io.ReadCloser, error) {
|
func(ctx context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
},
|
},
|
||||||
info,
|
info,
|
||||||
)
|
)
|
||||||
|
@ -168,14 +168,14 @@ func() (*media.ProcessingMedia, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get maximum supported remote media size.
|
// Get maximum supported remote media size.
|
||||||
maxsz := config.GetMediaRemoteMaxSize()
|
maxsz := int64(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Recache media with prepared info,
|
// Recache media with prepared info,
|
||||||
// this will also update media in db.
|
// this will also update media in db.
|
||||||
return d.mediaManager.CacheMedia(
|
return d.mediaManager.CacheMedia(
|
||||||
attach,
|
attach,
|
||||||
func(ctx context.Context) (io.ReadCloser, error) {
|
func(ctx context.Context) (io.ReadCloser, error) {
|
||||||
return tsport.DereferenceMedia(ctx, url, int64(maxsz))
|
return tsport.DereferenceMedia(ctx, url, maxsz)
|
||||||
},
|
},
|
||||||
), nil
|
), nil
|
||||||
},
|
},
|
||||||
|
|
|
@ -340,14 +340,14 @@ func (c *Client) do(r *Request) (rsp *http.Response, retry bool, err error) {
|
||||||
|
|
||||||
if u, _ := strconv.ParseUint(after, 10, 32); u != 0 {
|
if u, _ := strconv.ParseUint(after, 10, 32); u != 0 {
|
||||||
// An integer no. of backoff seconds was provided.
|
// An integer no. of backoff seconds was provided.
|
||||||
r.backoff = time.Duration(u) * time.Second
|
r.backoff = time.Duration(u) * time.Second // #nosec G115 -- We clamp backoff below.
|
||||||
} else if at, _ := http.ParseTime(after); !at.Before(now) {
|
} else if at, _ := http.ParseTime(after); !at.Before(now) {
|
||||||
// An HTTP formatted future date-time was provided.
|
// An HTTP formatted future date-time was provided.
|
||||||
r.backoff = at.Sub(now)
|
r.backoff = at.Sub(now)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't let their provided backoff exceed our max.
|
// Don't let their provided backoff exceed our max.
|
||||||
if max := baseBackoff * time.Duration(c.retries); //
|
if max := baseBackoff * time.Duration(c.retries); // #nosec G115 -- We control c.retries.
|
||||||
r.backoff > max {
|
r.backoff > max {
|
||||||
r.backoff = max
|
r.backoff = max
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -548,10 +549,18 @@ func (res *ffprobeResult) Process() (*result, error) {
|
||||||
if p := strings.SplitN(str, "/", 2); len(p) == 2 {
|
if p := strings.SplitN(str, "/", 2); len(p) == 2 {
|
||||||
n, _ := strconv.ParseUint(p[0], 10, 32)
|
n, _ := strconv.ParseUint(p[0], 10, 32)
|
||||||
d, _ := strconv.ParseUint(p[1], 10, 32)
|
d, _ := strconv.ParseUint(p[1], 10, 32)
|
||||||
num, den = uint32(n), uint32(d)
|
|
||||||
|
if n > math.MaxUint32 || d > math.MaxUint32 {
|
||||||
|
return nil, gtserror.Newf("overflowed numerator or denominator")
|
||||||
|
}
|
||||||
|
num, den = uint32(n), uint32(d) // #nosec G115 -- Just checked.
|
||||||
} else {
|
} else {
|
||||||
n, _ := strconv.ParseUint(p[0], 10, 32)
|
n, _ := strconv.ParseUint(p[0], 10, 32)
|
||||||
num = uint32(n)
|
|
||||||
|
if n > math.MaxUint32 {
|
||||||
|
return nil, gtserror.Newf("overflowed numerator")
|
||||||
|
}
|
||||||
|
num = uint32(n) // #nosec G115 -- Just checked.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set final divised framerate.
|
// Set final divised framerate.
|
||||||
|
|
|
@ -399,9 +399,9 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
g16 := uint16(s[1])
|
g16 := uint16(s[1])
|
||||||
b16 := uint16(s[2])
|
b16 := uint16(s[2])
|
||||||
a16 := uint16(a)
|
a16 := uint16(a)
|
||||||
d[0] = uint8(r16 * 0xff / a16)
|
d[0] = uint8(r16 * 0xff / a16) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8(g16 * 0xff / a16)
|
d[1] = uint8(g16 * 0xff / a16) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8(b16 * 0xff / a16)
|
d[2] = uint8(b16 * 0xff / a16) // #nosec G115 -- Overflow desired.
|
||||||
d[3] = a
|
d[3] = a
|
||||||
}
|
}
|
||||||
j += 4
|
j += 4
|
||||||
|
@ -431,9 +431,9 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
g32 := uint32(s[2])<<8 | uint32(s[3])
|
g32 := uint32(s[2])<<8 | uint32(s[3])
|
||||||
b32 := uint32(s[4])<<8 | uint32(s[5])
|
b32 := uint32(s[4])<<8 | uint32(s[5])
|
||||||
a32 := uint32(s[6])<<8 | uint32(s[7])
|
a32 := uint32(s[6])<<8 | uint32(s[7])
|
||||||
d[0] = uint8((r32 * 0xffff / a32) >> 8)
|
d[0] = uint8((r32 * 0xffff / a32) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8((g32 * 0xffff / a32) >> 8)
|
d[1] = uint8((g32 * 0xffff / a32) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8((b32 * 0xffff / a32) >> 8)
|
d[2] = uint8((b32 * 0xffff / a32) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
}
|
}
|
||||||
d[3] = a
|
d[3] = a
|
||||||
j += 4
|
j += 4
|
||||||
|
@ -530,9 +530,9 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
}
|
}
|
||||||
|
|
||||||
d := dst[j : j+4 : j+4]
|
d := dst[j : j+4 : j+4]
|
||||||
d[0] = uint8(r)
|
d[0] = uint8(r) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8(g)
|
d[1] = uint8(g) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8(b)
|
d[2] = uint8(b) // #nosec G115 -- Overflow desired.
|
||||||
d[3] = 0xff
|
d[3] = 0xff
|
||||||
|
|
||||||
iy++
|
iy++
|
||||||
|
@ -569,9 +569,9 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
d := dst[j : j+4 : j+4]
|
d := dst[j : j+4 : j+4]
|
||||||
switch a16 {
|
switch a16 {
|
||||||
case 0xffff:
|
case 0xffff:
|
||||||
d[0] = uint8(r16 >> 8)
|
d[0] = uint8(r16 >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8(g16 >> 8)
|
d[1] = uint8(g16 >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8(b16 >> 8)
|
d[2] = uint8(b16 >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[3] = 0xff
|
d[3] = 0xff
|
||||||
case 0:
|
case 0:
|
||||||
d[0] = 0
|
d[0] = 0
|
||||||
|
@ -579,10 +579,10 @@ func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||||
d[2] = 0
|
d[2] = 0
|
||||||
d[3] = 0
|
d[3] = 0
|
||||||
default:
|
default:
|
||||||
d[0] = uint8(((r16 * 0xffff) / a16) >> 8)
|
d[0] = uint8(((r16 * 0xffff) / a16) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[1] = uint8(((g16 * 0xffff) / a16) >> 8)
|
d[1] = uint8(((g16 * 0xffff) / a16) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[2] = uint8(((b16 * 0xffff) / a16) >> 8)
|
d[2] = uint8(((b16 * 0xffff) / a16) >> 8) // #nosec G115 -- Overflow desired.
|
||||||
d[3] = uint8(a16 >> 8)
|
d[3] = uint8(a16 >> 8) // #nosec G115 -- Overflow desired.
|
||||||
}
|
}
|
||||||
j += 4
|
j += 4
|
||||||
}
|
}
|
||||||
|
@ -617,7 +617,7 @@ func clampFloat(x float64) uint8 {
|
||||||
return 255
|
return 255
|
||||||
}
|
}
|
||||||
if v > 0 {
|
if v > 0 {
|
||||||
return uint8(v)
|
return uint8(v) // #nosec G115 -- Just checked.
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -216,10 +216,18 @@ func (p *ProcessingEmoji) store(ctx context.Context) error {
|
||||||
"png",
|
"png",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Get mimetype for the file container
|
||||||
|
// type, falling back to generic data.
|
||||||
|
p.emoji.ImageContentType = getMimeType(ext)
|
||||||
|
|
||||||
|
// Set the known emoji static content type.
|
||||||
|
p.emoji.ImageStaticContentType = "image/png"
|
||||||
|
|
||||||
// Copy temporary file into storage at path.
|
// Copy temporary file into storage at path.
|
||||||
filesz, err := p.mgr.state.Storage.PutFile(ctx,
|
filesz, err := p.mgr.state.Storage.PutFile(ctx,
|
||||||
p.emoji.ImagePath,
|
p.emoji.ImagePath,
|
||||||
temppath,
|
temppath,
|
||||||
|
p.emoji.ImageContentType,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return gtserror.Newf("error writing emoji to storage: %w", err)
|
return gtserror.Newf("error writing emoji to storage: %w", err)
|
||||||
|
@ -229,6 +237,7 @@ func (p *ProcessingEmoji) store(ctx context.Context) error {
|
||||||
staticsz, err := p.mgr.state.Storage.PutFile(ctx,
|
staticsz, err := p.mgr.state.Storage.PutFile(ctx,
|
||||||
p.emoji.ImageStaticPath,
|
p.emoji.ImageStaticPath,
|
||||||
staticpath,
|
staticpath,
|
||||||
|
p.emoji.ImageStaticContentType,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return gtserror.Newf("error writing static to storage: %w", err)
|
return gtserror.Newf("error writing static to storage: %w", err)
|
||||||
|
@ -256,13 +265,6 @@ func (p *ProcessingEmoji) store(ctx context.Context) error {
|
||||||
"png",
|
"png",
|
||||||
)
|
)
|
||||||
|
|
||||||
// Get mimetype for the file container
|
|
||||||
// type, falling back to generic data.
|
|
||||||
p.emoji.ImageContentType = getMimeType(ext)
|
|
||||||
|
|
||||||
// Set the known emoji static content type.
|
|
||||||
p.emoji.ImageStaticContentType = "image/png"
|
|
||||||
|
|
||||||
// We can now consider this cached.
|
// We can now consider this cached.
|
||||||
p.emoji.Cached = util.Ptr(true)
|
p.emoji.Cached = util.Ptr(true)
|
||||||
|
|
||||||
|
|
|
@ -261,10 +261,15 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
||||||
ext,
|
ext,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Get mimetype for the file container
|
||||||
|
// type, falling back to generic data.
|
||||||
|
p.media.File.ContentType = getMimeType(ext)
|
||||||
|
|
||||||
// Copy temporary file into storage at path.
|
// Copy temporary file into storage at path.
|
||||||
filesz, err := p.mgr.state.Storage.PutFile(ctx,
|
filesz, err := p.mgr.state.Storage.PutFile(ctx,
|
||||||
p.media.File.Path,
|
p.media.File.Path,
|
||||||
temppath,
|
temppath,
|
||||||
|
p.media.File.ContentType,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return gtserror.Newf("error writing media to storage: %w", err)
|
return gtserror.Newf("error writing media to storage: %w", err)
|
||||||
|
@ -286,10 +291,14 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
||||||
thumbExt,
|
thumbExt,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Determine thumbnail content-type from thumb ext.
|
||||||
|
p.media.Thumbnail.ContentType = getMimeType(thumbExt)
|
||||||
|
|
||||||
// Copy thumbnail file into storage at path.
|
// Copy thumbnail file into storage at path.
|
||||||
thumbsz, err := p.mgr.state.Storage.PutFile(ctx,
|
thumbsz, err := p.mgr.state.Storage.PutFile(ctx,
|
||||||
p.media.Thumbnail.Path,
|
p.media.Thumbnail.Path,
|
||||||
thumbpath,
|
thumbpath,
|
||||||
|
p.media.Thumbnail.ContentType,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return gtserror.Newf("error writing thumb to storage: %w", err)
|
return gtserror.Newf("error writing thumb to storage: %w", err)
|
||||||
|
@ -298,9 +307,6 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
||||||
// Set final determined thumbnail size.
|
// Set final determined thumbnail size.
|
||||||
p.media.Thumbnail.FileSize = int(thumbsz)
|
p.media.Thumbnail.FileSize = int(thumbsz)
|
||||||
|
|
||||||
// Determine thumbnail content-type from thumb ext.
|
|
||||||
p.media.Thumbnail.ContentType = getMimeType(thumbExt)
|
|
||||||
|
|
||||||
// Generate a media attachment thumbnail URL.
|
// Generate a media attachment thumbnail URL.
|
||||||
p.media.Thumbnail.URL = uris.URIForAttachment(
|
p.media.Thumbnail.URL = uris.URIForAttachment(
|
||||||
p.media.AccountID,
|
p.media.AccountID,
|
||||||
|
@ -320,10 +326,6 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
||||||
ext,
|
ext,
|
||||||
)
|
)
|
||||||
|
|
||||||
// Get mimetype for the file container
|
|
||||||
// type, falling back to generic data.
|
|
||||||
p.media.File.ContentType = getMimeType(ext)
|
|
||||||
|
|
||||||
// We can now consider this cached.
|
// We can now consider this cached.
|
||||||
p.media.Cached = util.Ptr(true)
|
p.media.Cached = util.Ptr(true)
|
||||||
|
|
||||||
|
|
|
@ -49,9 +49,6 @@ func (m *Manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM
|
||||||
refetchIDs []string
|
refetchIDs []string
|
||||||
)
|
)
|
||||||
|
|
||||||
// Get max supported remote emoji media size.
|
|
||||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
|
||||||
|
|
||||||
// page through emojis 20 at a time, looking for those with missing images
|
// page through emojis 20 at a time, looking for those with missing images
|
||||||
for {
|
for {
|
||||||
// Fetch next block of emojis from database
|
// Fetch next block of emojis from database
|
||||||
|
@ -111,8 +108,10 @@ func (m *Manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get max supported remote emoji media size.
|
||||||
|
maxsz := int64(config.GetMediaEmojiRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
dataFunc := func(ctx context.Context) (reader io.ReadCloser, err error) {
|
dataFunc := func(ctx context.Context) (reader io.ReadCloser, err error) {
|
||||||
return dereferenceMedia(ctx, emojiImageIRI, int64(maxsz))
|
return dereferenceMedia(ctx, emojiImageIRI, maxsz)
|
||||||
}
|
}
|
||||||
|
|
||||||
processingEmoji, err := m.UpdateEmoji(ctx, emoji, dataFunc, AdditionalEmojiInfo{
|
processingEmoji, err := m.UpdateEmoji(ctx, emoji, dataFunc, AdditionalEmojiInfo{
|
||||||
|
|
|
@ -462,11 +462,11 @@ func (p *Processor) UpdateAvatar(
|
||||||
gtserror.WithCode,
|
gtserror.WithCode,
|
||||||
) {
|
) {
|
||||||
// Get maximum supported local media size.
|
// Get maximum supported local media size.
|
||||||
maxsz := config.GetMediaLocalMaxSize()
|
maxsz := int64(config.GetMediaLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if avatar.Size > int64(maxsz) {
|
if avatar.Size > maxsz {
|
||||||
text := fmt.Sprintf("media exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("media exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -478,7 +478,7 @@ func (p *Processor) UpdateAvatar(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
|
|
||||||
// Write to instance storage.
|
// Write to instance storage.
|
||||||
return p.c.StoreLocalMedia(ctx,
|
return p.c.StoreLocalMedia(ctx,
|
||||||
|
@ -507,11 +507,11 @@ func (p *Processor) UpdateHeader(
|
||||||
gtserror.WithCode,
|
gtserror.WithCode,
|
||||||
) {
|
) {
|
||||||
// Get maximum supported local media size.
|
// Get maximum supported local media size.
|
||||||
maxsz := config.GetMediaLocalMaxSize()
|
maxsz := int64(config.GetMediaLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if header.Size > int64(maxsz) {
|
if header.Size > maxsz {
|
||||||
text := fmt.Sprintf("media exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("media exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -523,7 +523,7 @@ func (p *Processor) UpdateHeader(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
|
|
||||||
// Write to instance storage.
|
// Write to instance storage.
|
||||||
return p.c.StoreLocalMedia(ctx,
|
return p.c.StoreLocalMedia(ctx,
|
||||||
|
|
|
@ -45,11 +45,11 @@ func (p *Processor) EmojiCreate(
|
||||||
) (*apimodel.Emoji, gtserror.WithCode) {
|
) (*apimodel.Emoji, gtserror.WithCode) {
|
||||||
|
|
||||||
// Get maximum supported local emoji size.
|
// Get maximum supported local emoji size.
|
||||||
maxsz := config.GetMediaEmojiLocalMaxSize()
|
maxsz := int64(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if form.Image.Size > int64(maxsz) {
|
if form.Image.Size > maxsz {
|
||||||
text := fmt.Sprintf("emoji exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("emoji exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ func (p *Processor) EmojiCreate(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return rc, nil
|
return rc, nil
|
||||||
}
|
}
|
||||||
|
@ -441,11 +441,11 @@ func (p *Processor) emojiUpdateModify(
|
||||||
// We can do both at the same time :)
|
// We can do both at the same time :)
|
||||||
|
|
||||||
// Get maximum supported local emoji size.
|
// Get maximum supported local emoji size.
|
||||||
maxsz := config.GetMediaEmojiLocalMaxSize()
|
maxsz := int64(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if image.Size > int64(maxsz) {
|
if image.Size > maxsz {
|
||||||
text := fmt.Sprintf("emoji exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("emoji exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -457,7 +457,7 @@ func (p *Processor) emojiUpdateModify(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
data := func(context.Context) (io.ReadCloser, error) {
|
data := func(context.Context) (io.ReadCloser, error) {
|
||||||
return rc, nil
|
return rc, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,11 +35,11 @@
|
||||||
func (p *Processor) Create(ctx context.Context, account *gtsmodel.Account, form *apimodel.AttachmentRequest) (*apimodel.Attachment, gtserror.WithCode) {
|
func (p *Processor) Create(ctx context.Context, account *gtsmodel.Account, form *apimodel.AttachmentRequest) (*apimodel.Attachment, gtserror.WithCode) {
|
||||||
|
|
||||||
// Get maximum supported local media size.
|
// Get maximum supported local media size.
|
||||||
maxsz := config.GetMediaLocalMaxSize()
|
maxsz := int64(config.GetMediaLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
|
|
||||||
// Ensure media within size bounds.
|
// Ensure media within size bounds.
|
||||||
if form.File.Size > int64(maxsz) {
|
if form.File.Size > maxsz {
|
||||||
text := fmt.Sprintf("media exceeds configured max size: %s", maxsz)
|
text := fmt.Sprintf("media exceeds configured max size: %d", maxsz)
|
||||||
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
return nil, gtserror.NewErrorBadRequest(errors.New(text), text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ func (p *Processor) Create(ctx context.Context, account *gtsmodel.Account, form
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the multipart file reader to ensure is limited to max.
|
// Wrap the multipart file reader to ensure is limited to max.
|
||||||
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz))
|
rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, maxsz)
|
||||||
|
|
||||||
// Create local media and write to instance storage.
|
// Create local media and write to instance storage.
|
||||||
attachment, errWithCode := p.c.StoreLocalMedia(ctx,
|
attachment, errWithCode := p.c.StoreLocalMedia(ctx,
|
||||||
|
|
|
@ -97,23 +97,39 @@ func (d *Driver) Put(ctx context.Context, key string, value []byte) (int, error)
|
||||||
return d.Storage.WriteBytes(ctx, key, value)
|
return d.Storage.WriteBytes(ctx, key, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream writes the bytes from supplied reader at key in the storage
|
// PutFile moves the contents of file at path, to storage.Driver{} under given key (with content-type if supported).
|
||||||
func (d *Driver) PutStream(ctx context.Context, key string, r io.Reader) (int64, error) {
|
func (d *Driver) PutFile(ctx context.Context, key, filepath, contentType string) (int64, error) {
|
||||||
return d.Storage.WriteStream(ctx, key, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutFile moves the contents of file at path, to storage.Driver{} under given key.
|
|
||||||
func (d *Driver) PutFile(ctx context.Context, key string, filepath string) (int64, error) {
|
|
||||||
// Open file at path for reading.
|
// Open file at path for reading.
|
||||||
file, err := os.Open(filepath)
|
file, err := os.Open(filepath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, gtserror.Newf("error opening file %s: %w", filepath, err)
|
return 0, gtserror.Newf("error opening file %s: %w", filepath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the file data to storage under key. Note
|
var sz int64
|
||||||
// that for disk.DiskStorage{} this should end up
|
|
||||||
// being a highly optimized Linux sendfile syscall.
|
switch d := d.Storage.(type) {
|
||||||
sz, err := d.Storage.WriteStream(ctx, key, file)
|
case *s3.S3Storage:
|
||||||
|
var info minio.UploadInfo
|
||||||
|
|
||||||
|
// For S3 storage, write the file but specifically pass in the
|
||||||
|
// content-type as an extra option. This handles the case of media
|
||||||
|
// being served via CDN redirect (where we don't handle content-type).
|
||||||
|
info, err = d.PutObject(ctx, key, file, minio.PutObjectOptions{
|
||||||
|
ContentType: contentType,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Get size from
|
||||||
|
// uploaded info.
|
||||||
|
sz = info.Size
|
||||||
|
|
||||||
|
default:
|
||||||
|
// Write the file data to storage under key. Note
|
||||||
|
// that for disk.DiskStorage{} this should end up
|
||||||
|
// being a highly optimized Linux sendfile syscall.
|
||||||
|
sz, err = d.WriteStream(ctx, key, file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap write error.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = gtserror.Newf("error writing file %s: %w", key, err)
|
err = gtserror.Newf("error writing file %s: %w", key, err)
|
||||||
}
|
}
|
||||||
|
@ -305,11 +321,7 @@ func NewS3Storage() (*Driver, error) {
|
||||||
Creds: credentials.NewStaticV4(access, secret, ""),
|
Creds: credentials.NewStaticV4(access, secret, ""),
|
||||||
Secure: secure,
|
Secure: secure,
|
||||||
},
|
},
|
||||||
GetOpts: minio.GetObjectOptions{},
|
|
||||||
PutOpts: minio.PutObjectOptions{},
|
|
||||||
PutChunkSize: 5 * 1024 * 1024, // 5MiB
|
PutChunkSize: 5 * 1024 * 1024, // 5MiB
|
||||||
StatOpts: minio.StatObjectOptions{},
|
|
||||||
RemoveOpts: minio.RemoveObjectOptions{},
|
|
||||||
ListSize: 200,
|
ListSize: 200,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
package delivery_test
|
package delivery_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
@ -27,7 +28,6 @@
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"codeberg.org/gruf/go-byteutil"
|
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/httpclient"
|
"github.com/superseriousbusiness/gotosocial/internal/httpclient"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/queue"
|
"github.com/superseriousbusiness/gotosocial/internal/queue"
|
||||||
|
@ -176,9 +176,9 @@ func requiresBody(method string) bool {
|
||||||
func (t *testrequest) Generate(addr string) *http.Request {
|
func (t *testrequest) Generate(addr string) *http.Request {
|
||||||
var body io.ReadCloser
|
var body io.ReadCloser
|
||||||
if t.body != nil {
|
if t.body != nil {
|
||||||
var b byteutil.ReadNopCloser
|
var b bytes.Reader
|
||||||
b.Reset(t.body)
|
b.Reset(t.body)
|
||||||
body = &b
|
body = io.NopCloser(&b)
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest(t.method, addr+t.uri, body)
|
req, err := http.NewRequest(t.method, addr+t.uri, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -647,7 +647,7 @@ func (c *Converter) AttachmentToAPIAttachment(ctx context.Context, media *gtsmod
|
||||||
Size: toAPISize(media.FileMeta.Original.Width, media.FileMeta.Original.Height),
|
Size: toAPISize(media.FileMeta.Original.Width, media.FileMeta.Original.Height),
|
||||||
FrameRate: toAPIFrameRate(media.FileMeta.Original.Framerate),
|
FrameRate: toAPIFrameRate(media.FileMeta.Original.Framerate),
|
||||||
Duration: util.PtrOrZero(media.FileMeta.Original.Duration),
|
Duration: util.PtrOrZero(media.FileMeta.Original.Duration),
|
||||||
Bitrate: int(util.PtrOrZero(media.FileMeta.Original.Bitrate)),
|
Bitrate: util.PtrOrZero(media.FileMeta.Original.Bitrate),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy over local file URL.
|
// Copy over local file URL.
|
||||||
|
@ -1551,9 +1551,9 @@ func (c *Converter) InstanceToAPIV1Instance(ctx context.Context, i *gtsmodel.Ins
|
||||||
instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL
|
instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL
|
||||||
instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes
|
instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes
|
||||||
instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes
|
instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes
|
||||||
instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize())
|
instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit
|
instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit
|
||||||
instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize())
|
instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit
|
instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit
|
||||||
instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit
|
instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit
|
||||||
instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions()
|
instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions()
|
||||||
|
@ -1563,7 +1563,7 @@ func (c *Converter) InstanceToAPIV1Instance(ctx context.Context, i *gtsmodel.Ins
|
||||||
instance.Configuration.Accounts.AllowCustomCSS = config.GetAccountsAllowCustomCSS()
|
instance.Configuration.Accounts.AllowCustomCSS = config.GetAccountsAllowCustomCSS()
|
||||||
instance.Configuration.Accounts.MaxFeaturedTags = instanceAccountsMaxFeaturedTags
|
instance.Configuration.Accounts.MaxFeaturedTags = instanceAccountsMaxFeaturedTags
|
||||||
instance.Configuration.Accounts.MaxProfileFields = instanceAccountsMaxProfileFields
|
instance.Configuration.Accounts.MaxProfileFields = instanceAccountsMaxProfileFields
|
||||||
instance.Configuration.Emojis.EmojiSizeLimit = int(config.GetMediaEmojiLocalMaxSize())
|
instance.Configuration.Emojis.EmojiSizeLimit = int(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.OIDCEnabled = config.GetOIDCEnabled()
|
instance.Configuration.OIDCEnabled = config.GetOIDCEnabled()
|
||||||
|
|
||||||
// URLs
|
// URLs
|
||||||
|
@ -1695,9 +1695,9 @@ func (c *Converter) InstanceToAPIV2Instance(ctx context.Context, i *gtsmodel.Ins
|
||||||
instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL
|
instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL
|
||||||
instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes
|
instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes
|
||||||
instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes
|
instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes
|
||||||
instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize())
|
instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit
|
instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit
|
||||||
instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize())
|
instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit
|
instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit
|
||||||
instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit
|
instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit
|
||||||
instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions()
|
instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions()
|
||||||
|
@ -1707,7 +1707,7 @@ func (c *Converter) InstanceToAPIV2Instance(ctx context.Context, i *gtsmodel.Ins
|
||||||
instance.Configuration.Accounts.AllowCustomCSS = config.GetAccountsAllowCustomCSS()
|
instance.Configuration.Accounts.AllowCustomCSS = config.GetAccountsAllowCustomCSS()
|
||||||
instance.Configuration.Accounts.MaxFeaturedTags = instanceAccountsMaxFeaturedTags
|
instance.Configuration.Accounts.MaxFeaturedTags = instanceAccountsMaxFeaturedTags
|
||||||
instance.Configuration.Accounts.MaxProfileFields = instanceAccountsMaxProfileFields
|
instance.Configuration.Accounts.MaxProfileFields = instanceAccountsMaxProfileFields
|
||||||
instance.Configuration.Emojis.EmojiSizeLimit = int(config.GetMediaEmojiLocalMaxSize())
|
instance.Configuration.Emojis.EmojiSizeLimit = int(config.GetMediaEmojiLocalMaxSize()) // #nosec G115 -- Already validated.
|
||||||
instance.Configuration.OIDCEnabled = config.GetOIDCEnabled()
|
instance.Configuration.OIDCEnabled = config.GetOIDCEnabled()
|
||||||
|
|
||||||
// registrations
|
// registrations
|
||||||
|
|
|
@ -87,7 +87,7 @@ func (w *Workers) Start() {
|
||||||
w.Dereference.Start(n)
|
w.Dereference.Start(n)
|
||||||
log.Infof(nil, "started %d dereference workers", n)
|
log.Infof(nil, "started %d dereference workers", n)
|
||||||
|
|
||||||
n = 4 * maxprocs
|
n = maxprocs
|
||||||
w.Processing.Start(n)
|
w.Processing.Start(n)
|
||||||
log.Infof(nil, "started %d processing workers", n)
|
log.Infof(nil, "started %d processing workers", n)
|
||||||
}
|
}
|
||||||
|
|
9
vendor/codeberg.org/gruf/go-atomics/LICENSE
generated
vendored
9
vendor/codeberg.org/gruf/go-atomics/LICENSE
generated
vendored
|
@ -1,9 +0,0 @@
|
||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2022 gruf
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
3
vendor/codeberg.org/gruf/go-atomics/README.md
generated
vendored
3
vendor/codeberg.org/gruf/go-atomics/README.md
generated
vendored
|
@ -1,3 +0,0 @@
|
||||||
# go-atomics
|
|
||||||
|
|
||||||
This library provides a variety of types for atomic operations on common Go types.
|
|
57
vendor/codeberg.org/gruf/go-atomics/atomic.tpl
generated
vendored
57
vendor/codeberg.org/gruf/go-atomics/atomic.tpl
generated
vendored
|
@ -1,57 +0,0 @@
|
||||||
package atomics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync/atomic"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// {{ .Name }} provides user-friendly means of performing atomic operations on {{ .Type }} types.
|
|
||||||
type {{ .Name }} struct{ ptr unsafe.Pointer }
|
|
||||||
|
|
||||||
// New{{ .Name }} will return a new {{ .Name }} instance initialized with zero value.
|
|
||||||
func New{{ .Name }}() *{{ .Name }} {
|
|
||||||
var v {{ .Type }}
|
|
||||||
return &{{ .Name }}{
|
|
||||||
ptr: unsafe.Pointer(&v),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store {{ .Type }} value in address contained within v.
|
|
||||||
func (v *{{ .Name }}) Store(val {{ .Type }}) {
|
|
||||||
atomic.StorePointer(&v.ptr, unsafe.Pointer(&val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load {{ .Type }} value at address contained within v.
|
|
||||||
func (v *{{ .Name }}) Load() {{ .Type }} {
|
|
||||||
return *(*{{ .Type }})(atomic.LoadPointer(&v.ptr))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) {{ .Type }} value at address contained within v.
|
|
||||||
func (v *{{ .Name }}) CAS(cmp, swp {{ .Type }}) bool {
|
|
||||||
for {
|
|
||||||
// Load current value at address
|
|
||||||
ptr := atomic.LoadPointer(&v.ptr)
|
|
||||||
cur := *(*{{ .Type }})(ptr)
|
|
||||||
|
|
||||||
// Perform comparison against current
|
|
||||||
if !({{ call .Compare "cur" "cmp" }}) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to replace pointer
|
|
||||||
if atomic.CompareAndSwapPointer(
|
|
||||||
&v.ptr,
|
|
||||||
ptr,
|
|
||||||
unsafe.Pointer(&swp),
|
|
||||||
) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new {{ .Type }} value into address contained within v, and returns previous value.
|
|
||||||
func (v *{{ .Name }}) Swap(swp {{ .Type }}) {{ .Type }} {
|
|
||||||
ptr := unsafe.Pointer(&swp)
|
|
||||||
ptr = atomic.SwapPointer(&v.ptr, ptr)
|
|
||||||
return *(*{{ .Type }})(ptr)
|
|
||||||
}
|
|
60
vendor/codeberg.org/gruf/go-atomics/atomic_test.tpl
generated
vendored
60
vendor/codeberg.org/gruf/go-atomics/atomic_test.tpl
generated
vendored
|
@ -1,60 +0,0 @@
|
||||||
package atomics_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"atomic"
|
|
||||||
"unsafe"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"codeberg.org/gruf/go-atomics"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Test{{ .Name }}StoreLoad(t *testing.T) {
|
|
||||||
for _, test := range {{ .Name }}Tests {
|
|
||||||
val := atomics.New{{ .Name }}()
|
|
||||||
|
|
||||||
val.Store(test.V1)
|
|
||||||
|
|
||||||
if !({{ call .Compare "val.Load()" "test.V1" }}) {
|
|
||||||
t.Fatalf("failed testing .Store and .Load: expect=%v actual=%v", val.Load(), test.V1)
|
|
||||||
}
|
|
||||||
|
|
||||||
val.Store(test.V2)
|
|
||||||
|
|
||||||
if !({{ call .Compare "val.Load()" "test.V2" }}) {
|
|
||||||
t.Fatalf("failed testing .Store and .Load: expect=%v actual=%v", val.Load(), test.V2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test{{ .Name }}CAS(t *testing.T) {
|
|
||||||
for _, test := range {{ .Name }}Tests {
|
|
||||||
val := atomics.New{{ .Name }}()
|
|
||||||
|
|
||||||
val.Store(test.V1)
|
|
||||||
|
|
||||||
if val.CAS(test.V2, test.V1) {
|
|
||||||
t.Fatalf("failed testing negative .CAS: test=%+v state=%v", test, val.Load())
|
|
||||||
}
|
|
||||||
|
|
||||||
if !val.CAS(test.V1, test.V2) {
|
|
||||||
t.Fatalf("failed testing positive .CAS: test=%+v state=%v", test, val.Load())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test{{ .Name }}Swap(t *testing.T) {
|
|
||||||
for _, test := range {{ .Name }}Tests {
|
|
||||||
val := atomics.New{{ .Name }}()
|
|
||||||
|
|
||||||
val.Store(test.V1)
|
|
||||||
|
|
||||||
if !({{ call .Compare "val.Swap(test.V2)" "test.V1" }}) {
|
|
||||||
t.Fatal("failed testing .Swap")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !({{ call .Compare "val.Swap(test.V1)" "test.V2" }}) {
|
|
||||||
t.Fatal("failed testing .Swap")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
47
vendor/codeberg.org/gruf/go-atomics/bool.go
generated
vendored
47
vendor/codeberg.org/gruf/go-atomics/bool.go
generated
vendored
|
@ -1,47 +0,0 @@
|
||||||
package atomics
|
|
||||||
|
|
||||||
import "sync/atomic"
|
|
||||||
|
|
||||||
// Bool provides user-friendly means of performing atomic operations on bool types.
|
|
||||||
type Bool uint32
|
|
||||||
|
|
||||||
// NewBool will return a new Bool instance initialized with zero value.
|
|
||||||
func NewBool() *Bool {
|
|
||||||
return new(Bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store bool value in address contained within i.
|
|
||||||
func (b *Bool) Store(val bool) {
|
|
||||||
atomic.StoreUint32((*uint32)(b), fromBool(val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load bool value at address contained within i.
|
|
||||||
func (b *Bool) Load() bool {
|
|
||||||
return toBool(atomic.LoadUint32((*uint32)(b)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) bool value at address contained within i.
|
|
||||||
func (b *Bool) CAS(cmp, swp bool) bool {
|
|
||||||
return atomic.CompareAndSwapUint32((*uint32)(b), fromBool(cmp), fromBool(swp))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new bool value into address contained within i, and returns previous value.
|
|
||||||
func (b *Bool) Swap(swp bool) bool {
|
|
||||||
return toBool(atomic.SwapUint32((*uint32)(b), fromBool(swp)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// toBool converts uint32 value to bool.
|
|
||||||
func toBool(u uint32) bool {
|
|
||||||
if u == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// fromBool converts from bool to uint32 value.
|
|
||||||
func fromBool(b bool) uint32 {
|
|
||||||
if b {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
57
vendor/codeberg.org/gruf/go-atomics/bytes.go
generated
vendored
57
vendor/codeberg.org/gruf/go-atomics/bytes.go
generated
vendored
|
@ -1,57 +0,0 @@
|
||||||
package atomics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync/atomic"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Bytes provides user-friendly means of performing atomic operations on []byte types.
|
|
||||||
type Bytes struct{ ptr unsafe.Pointer }
|
|
||||||
|
|
||||||
// NewBytes will return a new Bytes instance initialized with zero value.
|
|
||||||
func NewBytes() *Bytes {
|
|
||||||
var v []byte
|
|
||||||
return &Bytes{
|
|
||||||
ptr: unsafe.Pointer(&v),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store []byte value in address contained within v.
|
|
||||||
func (v *Bytes) Store(val []byte) {
|
|
||||||
atomic.StorePointer(&v.ptr, unsafe.Pointer(&val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load []byte value at address contained within v.
|
|
||||||
func (v *Bytes) Load() []byte {
|
|
||||||
return *(*[]byte)(atomic.LoadPointer(&v.ptr))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) []byte value at address contained within v.
|
|
||||||
func (v *Bytes) CAS(cmp, swp []byte) bool {
|
|
||||||
for {
|
|
||||||
// Load current value at address
|
|
||||||
ptr := atomic.LoadPointer(&v.ptr)
|
|
||||||
cur := *(*[]byte)(ptr)
|
|
||||||
|
|
||||||
// Perform comparison against current
|
|
||||||
if !(string(cur) == string(cmp)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to replace pointer
|
|
||||||
if atomic.CompareAndSwapPointer(
|
|
||||||
&v.ptr,
|
|
||||||
ptr,
|
|
||||||
unsafe.Pointer(&swp),
|
|
||||||
) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new []byte value into address contained within v, and returns previous value.
|
|
||||||
func (v *Bytes) Swap(swp []byte) []byte {
|
|
||||||
ptr := unsafe.Pointer(&swp)
|
|
||||||
ptr = atomic.SwapPointer(&v.ptr, ptr)
|
|
||||||
return *(*[]byte)(ptr)
|
|
||||||
}
|
|
57
vendor/codeberg.org/gruf/go-atomics/error.go
generated
vendored
57
vendor/codeberg.org/gruf/go-atomics/error.go
generated
vendored
|
@ -1,57 +0,0 @@
|
||||||
package atomics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync/atomic"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Error provides user-friendly means of performing atomic operations on error types.
|
|
||||||
type Error struct{ ptr unsafe.Pointer }
|
|
||||||
|
|
||||||
// NewError will return a new Error instance initialized with zero value.
|
|
||||||
func NewError() *Error {
|
|
||||||
var v error
|
|
||||||
return &Error{
|
|
||||||
ptr: unsafe.Pointer(&v),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store error value in address contained within v.
|
|
||||||
func (v *Error) Store(val error) {
|
|
||||||
atomic.StorePointer(&v.ptr, unsafe.Pointer(&val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load error value at address contained within v.
|
|
||||||
func (v *Error) Load() error {
|
|
||||||
return *(*error)(atomic.LoadPointer(&v.ptr))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) error value at address contained within v.
|
|
||||||
func (v *Error) CAS(cmp, swp error) bool {
|
|
||||||
for {
|
|
||||||
// Load current value at address
|
|
||||||
ptr := atomic.LoadPointer(&v.ptr)
|
|
||||||
cur := *(*error)(ptr)
|
|
||||||
|
|
||||||
// Perform comparison against current
|
|
||||||
if !(cur == cmp) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to replace pointer
|
|
||||||
if atomic.CompareAndSwapPointer(
|
|
||||||
&v.ptr,
|
|
||||||
ptr,
|
|
||||||
unsafe.Pointer(&swp),
|
|
||||||
) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new error value into address contained within v, and returns previous value.
|
|
||||||
func (v *Error) Swap(swp error) error {
|
|
||||||
ptr := unsafe.Pointer(&swp)
|
|
||||||
ptr = atomic.SwapPointer(&v.ptr, ptr)
|
|
||||||
return *(*error)(ptr)
|
|
||||||
}
|
|
97
vendor/codeberg.org/gruf/go-atomics/flags.go
generated
vendored
97
vendor/codeberg.org/gruf/go-atomics/flags.go
generated
vendored
|
@ -1,97 +0,0 @@
|
||||||
package atomics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"codeberg.org/gruf/go-bitutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Flags32 provides user-friendly means of performing atomic operations on bitutil.Flags32 types.
|
|
||||||
type Flags32 bitutil.Flags32
|
|
||||||
|
|
||||||
// NewFlags32 will return a new Flags32 instance initialized with zero value.
|
|
||||||
func NewFlags32() *Flags32 {
|
|
||||||
return new(Flags32)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get will atomically load a(n) bitutil.Flags32 value contained within f, and check if bit value is set.
|
|
||||||
func (f *Flags32) Get(bit uint8) bool {
|
|
||||||
return f.Load().Get(bit)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set performs a compare-and-swap for a(n) bitutil.Flags32 with bit value set, at address contained within f.
|
|
||||||
func (f *Flags32) Set(bit uint8) bool {
|
|
||||||
cur := f.Load()
|
|
||||||
return f.CAS(cur, cur.Set(bit))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unset performs a compare-and-swap for a(n) bitutil.Flags32 with bit value unset, at address contained within f.
|
|
||||||
func (f *Flags32) Unset(bit uint8) bool {
|
|
||||||
cur := f.Load()
|
|
||||||
return f.CAS(cur, cur.Unset(bit))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store bitutil.Flags32 value in address contained within f.
|
|
||||||
func (f *Flags32) Store(val bitutil.Flags32) {
|
|
||||||
atomic.StoreUint32((*uint32)(f), uint32(val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load bitutil.Flags32 value at address contained within f.
|
|
||||||
func (f *Flags32) Load() bitutil.Flags32 {
|
|
||||||
return bitutil.Flags32(atomic.LoadUint32((*uint32)(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) bitutil.Flags32 value at address contained within f.
|
|
||||||
func (f *Flags32) CAS(cmp, swp bitutil.Flags32) bool {
|
|
||||||
return atomic.CompareAndSwapUint32((*uint32)(f), uint32(cmp), uint32(swp))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new bitutil.Flags32 value into address contained within f, and returns previous value.
|
|
||||||
func (f *Flags32) Swap(swp bitutil.Flags32) bitutil.Flags32 {
|
|
||||||
return bitutil.Flags32(atomic.SwapUint32((*uint32)(f), uint32(swp)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flags64 provides user-friendly means of performing atomic operations on bitutil.Flags64 types.
|
|
||||||
type Flags64 bitutil.Flags64
|
|
||||||
|
|
||||||
// NewFlags64 will return a new Flags64 instance initialized with zero value.
|
|
||||||
func NewFlags64() *Flags64 {
|
|
||||||
return new(Flags64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get will atomically load a(n) bitutil.Flags64 value contained within f, and check if bit value is set.
|
|
||||||
func (f *Flags64) Get(bit uint8) bool {
|
|
||||||
return f.Load().Get(bit)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set performs a compare-and-swap for a(n) bitutil.Flags64 with bit value set, at address contained within f.
|
|
||||||
func (f *Flags64) Set(bit uint8) bool {
|
|
||||||
cur := f.Load()
|
|
||||||
return f.CAS(cur, cur.Set(bit))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unset performs a compare-and-swap for a(n) bitutil.Flags64 with bit value unset, at address contained within f.
|
|
||||||
func (f *Flags64) Unset(bit uint8) bool {
|
|
||||||
cur := f.Load()
|
|
||||||
return f.CAS(cur, cur.Unset(bit))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store bitutil.Flags64 value in address contained within f.
|
|
||||||
func (f *Flags64) Store(val bitutil.Flags64) {
|
|
||||||
atomic.StoreUint64((*uint64)(f), uint64(val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load bitutil.Flags64 value at address contained within f.
|
|
||||||
func (f *Flags64) Load() bitutil.Flags64 {
|
|
||||||
return bitutil.Flags64(atomic.LoadUint64((*uint64)(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) bitutil.Flags64 value at address contained within f.
|
|
||||||
func (f *Flags64) CAS(cmp, swp bitutil.Flags64) bool {
|
|
||||||
return atomic.CompareAndSwapUint64((*uint64)(f), uint64(cmp), uint64(swp))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new bitutil.Flags64 value into address contained within f, and returns previous value.
|
|
||||||
func (f *Flags64) Swap(swp bitutil.Flags64) bitutil.Flags64 {
|
|
||||||
return bitutil.Flags64(atomic.SwapUint64((*uint64)(f), uint64(swp)))
|
|
||||||
}
|
|
69
vendor/codeberg.org/gruf/go-atomics/int.go
generated
vendored
69
vendor/codeberg.org/gruf/go-atomics/int.go
generated
vendored
|
@ -1,69 +0,0 @@
|
||||||
package atomics
|
|
||||||
|
|
||||||
import "sync/atomic"
|
|
||||||
|
|
||||||
// Int32 provides user-friendly means of performing atomic operations on int32 types.
|
|
||||||
type Int32 int32
|
|
||||||
|
|
||||||
// NewInt32 will return a new Int32 instance initialized with zero value.
|
|
||||||
func NewInt32() *Int32 {
|
|
||||||
return new(Int32)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add will atomically add int32 delta to value in address contained within i, returning new value.
|
|
||||||
func (i *Int32) Add(delta int32) int32 {
|
|
||||||
return atomic.AddInt32((*int32)(i), delta)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store int32 value in address contained within i.
|
|
||||||
func (i *Int32) Store(val int32) {
|
|
||||||
atomic.StoreInt32((*int32)(i), val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load int32 value at address contained within i.
|
|
||||||
func (i *Int32) Load() int32 {
|
|
||||||
return atomic.LoadInt32((*int32)(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) int32 value at address contained within i.
|
|
||||||
func (i *Int32) CAS(cmp, swp int32) bool {
|
|
||||||
return atomic.CompareAndSwapInt32((*int32)(i), cmp, swp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new int32 value into address contained within i, and returns previous value.
|
|
||||||
func (i *Int32) Swap(swp int32) int32 {
|
|
||||||
return atomic.SwapInt32((*int32)(i), swp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64 provides user-friendly means of performing atomic operations on int64 types.
|
|
||||||
type Int64 int64
|
|
||||||
|
|
||||||
// NewInt64 will return a new Int64 instance initialized with zero value.
|
|
||||||
func NewInt64() *Int64 {
|
|
||||||
return new(Int64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add will atomically add int64 delta to value in address contained within i, returning new value.
|
|
||||||
func (i *Int64) Add(delta int64) int64 {
|
|
||||||
return atomic.AddInt64((*int64)(i), delta)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store int64 value in address contained within i.
|
|
||||||
func (i *Int64) Store(val int64) {
|
|
||||||
atomic.StoreInt64((*int64)(i), val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load int64 value at address contained within i.
|
|
||||||
func (i *Int64) Load() int64 {
|
|
||||||
return atomic.LoadInt64((*int64)(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) int64 value at address contained within i.
|
|
||||||
func (i *Int64) CAS(cmp, swp int64) bool {
|
|
||||||
return atomic.CompareAndSwapInt64((*int64)(i), cmp, swp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new int64 value into address contained within i, and returns previous value.
|
|
||||||
func (i *Int64) Swap(swp int64) int64 {
|
|
||||||
return atomic.SwapInt64((*int64)(i), swp)
|
|
||||||
}
|
|
57
vendor/codeberg.org/gruf/go-atomics/interface.go
generated
vendored
57
vendor/codeberg.org/gruf/go-atomics/interface.go
generated
vendored
|
@ -1,57 +0,0 @@
|
||||||
package atomics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync/atomic"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Interface provides user-friendly means of performing atomic operations on interface{} types.
|
|
||||||
type Interface struct{ ptr unsafe.Pointer }
|
|
||||||
|
|
||||||
// NewInterface will return a new Interface instance initialized with zero value.
|
|
||||||
func NewInterface() *Interface {
|
|
||||||
var v interface{}
|
|
||||||
return &Interface{
|
|
||||||
ptr: unsafe.Pointer(&v),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store interface{} value in address contained within v.
|
|
||||||
func (v *Interface) Store(val interface{}) {
|
|
||||||
atomic.StorePointer(&v.ptr, unsafe.Pointer(&val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load interface{} value at address contained within v.
|
|
||||||
func (v *Interface) Load() interface{} {
|
|
||||||
return *(*interface{})(atomic.LoadPointer(&v.ptr))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) interface{} value at address contained within v.
|
|
||||||
func (v *Interface) CAS(cmp, swp interface{}) bool {
|
|
||||||
for {
|
|
||||||
// Load current value at address
|
|
||||||
ptr := atomic.LoadPointer(&v.ptr)
|
|
||||||
cur := *(*interface{})(ptr)
|
|
||||||
|
|
||||||
// Perform comparison against current
|
|
||||||
if !(cur == cmp) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to replace pointer
|
|
||||||
if atomic.CompareAndSwapPointer(
|
|
||||||
&v.ptr,
|
|
||||||
ptr,
|
|
||||||
unsafe.Pointer(&swp),
|
|
||||||
) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new interface{} value into address contained within v, and returns previous value.
|
|
||||||
func (v *Interface) Swap(swp interface{}) interface{} {
|
|
||||||
ptr := unsafe.Pointer(&swp)
|
|
||||||
ptr = atomic.SwapPointer(&v.ptr, ptr)
|
|
||||||
return *(*interface{})(ptr)
|
|
||||||
}
|
|
58
vendor/codeberg.org/gruf/go-atomics/state.go
generated
vendored
58
vendor/codeberg.org/gruf/go-atomics/state.go
generated
vendored
|
@ -1,58 +0,0 @@
|
||||||
package atomics
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
// State provides user-friendly means of performing atomic-like
|
|
||||||
// operations on a uint32 state, and allowing callbacks on successful
|
|
||||||
// state change. This is a bit of a misnomer being where it is, as it
|
|
||||||
// actually uses a mutex under-the-hood.
|
|
||||||
type State struct {
|
|
||||||
mutex sync.Mutex
|
|
||||||
state uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will update State value safely within mutex lock.
|
|
||||||
func (st *State) Store(val uint32) {
|
|
||||||
st.mutex.Lock()
|
|
||||||
st.state = val
|
|
||||||
st.mutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will get value of State safely within mutex lock.
|
|
||||||
func (st *State) Load() uint32 {
|
|
||||||
st.mutex.Lock()
|
|
||||||
state := st.state
|
|
||||||
st.mutex.Unlock()
|
|
||||||
return state
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLock performs fn within State mutex lock, useful if you want
|
|
||||||
// to just use State's mutex for locking instead of creating another.
|
|
||||||
func (st *State) WithLock(fn func()) {
|
|
||||||
st.mutex.Lock()
|
|
||||||
defer st.mutex.Unlock()
|
|
||||||
fn()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update performs fn within State mutex lock, with the current state
|
|
||||||
// value provided as an argument, and return value used to update state.
|
|
||||||
func (st *State) Update(fn func(state uint32) uint32) {
|
|
||||||
st.mutex.Lock()
|
|
||||||
defer st.mutex.Unlock()
|
|
||||||
st.state = fn(st.state)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap on State, calling fn on success. Success value is also returned.
|
|
||||||
func (st *State) CAS(cmp, swp uint32, fn func()) (ok bool) {
|
|
||||||
// Acquire lock
|
|
||||||
st.mutex.Lock()
|
|
||||||
defer st.mutex.Unlock()
|
|
||||||
|
|
||||||
// Perform CAS operation, fn() on success
|
|
||||||
if ok = (st.state == cmp); ok {
|
|
||||||
st.state = swp
|
|
||||||
fn()
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
57
vendor/codeberg.org/gruf/go-atomics/string.go
generated
vendored
57
vendor/codeberg.org/gruf/go-atomics/string.go
generated
vendored
|
@ -1,57 +0,0 @@
|
||||||
package atomics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync/atomic"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// String provides user-friendly means of performing atomic operations on string types.
|
|
||||||
type String struct{ ptr unsafe.Pointer }
|
|
||||||
|
|
||||||
// NewString will return a new String instance initialized with zero value.
|
|
||||||
func NewString() *String {
|
|
||||||
var v string
|
|
||||||
return &String{
|
|
||||||
ptr: unsafe.Pointer(&v),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store string value in address contained within v.
|
|
||||||
func (v *String) Store(val string) {
|
|
||||||
atomic.StorePointer(&v.ptr, unsafe.Pointer(&val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load string value at address contained within v.
|
|
||||||
func (v *String) Load() string {
|
|
||||||
return *(*string)(atomic.LoadPointer(&v.ptr))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) string value at address contained within v.
|
|
||||||
func (v *String) CAS(cmp, swp string) bool {
|
|
||||||
for {
|
|
||||||
// Load current value at address
|
|
||||||
ptr := atomic.LoadPointer(&v.ptr)
|
|
||||||
cur := *(*string)(ptr)
|
|
||||||
|
|
||||||
// Perform comparison against current
|
|
||||||
if !(cur == cmp) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to replace pointer
|
|
||||||
if atomic.CompareAndSwapPointer(
|
|
||||||
&v.ptr,
|
|
||||||
ptr,
|
|
||||||
unsafe.Pointer(&swp),
|
|
||||||
) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new string value into address contained within v, and returns previous value.
|
|
||||||
func (v *String) Swap(swp string) string {
|
|
||||||
ptr := unsafe.Pointer(&swp)
|
|
||||||
ptr = atomic.SwapPointer(&v.ptr, ptr)
|
|
||||||
return *(*string)(ptr)
|
|
||||||
}
|
|
58
vendor/codeberg.org/gruf/go-atomics/time.go
generated
vendored
58
vendor/codeberg.org/gruf/go-atomics/time.go
generated
vendored
|
@ -1,58 +0,0 @@
|
||||||
package atomics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Time provides user-friendly means of performing atomic operations on time.Time types.
|
|
||||||
type Time struct{ ptr unsafe.Pointer }
|
|
||||||
|
|
||||||
// NewTime will return a new Time instance initialized with zero value.
|
|
||||||
func NewTime() *Time {
|
|
||||||
var v time.Time
|
|
||||||
return &Time{
|
|
||||||
ptr: unsafe.Pointer(&v),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store time.Time value in address contained within v.
|
|
||||||
func (v *Time) Store(val time.Time) {
|
|
||||||
atomic.StorePointer(&v.ptr, unsafe.Pointer(&val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load time.Time value at address contained within v.
|
|
||||||
func (v *Time) Load() time.Time {
|
|
||||||
return *(*time.Time)(atomic.LoadPointer(&v.ptr))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) time.Time value at address contained within v.
|
|
||||||
func (v *Time) CAS(cmp, swp time.Time) bool {
|
|
||||||
for {
|
|
||||||
// Load current value at address
|
|
||||||
ptr := atomic.LoadPointer(&v.ptr)
|
|
||||||
cur := *(*time.Time)(ptr)
|
|
||||||
|
|
||||||
// Perform comparison against current
|
|
||||||
if !(cur.Equal(cmp)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to replace pointer
|
|
||||||
if atomic.CompareAndSwapPointer(
|
|
||||||
&v.ptr,
|
|
||||||
ptr,
|
|
||||||
unsafe.Pointer(&swp),
|
|
||||||
) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new time.Time value into address contained within v, and returns previous value.
|
|
||||||
func (v *Time) Swap(swp time.Time) time.Time {
|
|
||||||
ptr := unsafe.Pointer(&swp)
|
|
||||||
ptr = atomic.SwapPointer(&v.ptr, ptr)
|
|
||||||
return *(*time.Time)(ptr)
|
|
||||||
}
|
|
69
vendor/codeberg.org/gruf/go-atomics/uint.go
generated
vendored
69
vendor/codeberg.org/gruf/go-atomics/uint.go
generated
vendored
|
@ -1,69 +0,0 @@
|
||||||
package atomics
|
|
||||||
|
|
||||||
import "sync/atomic"
|
|
||||||
|
|
||||||
// Uint32 provides user-friendly means of performing atomic operations on uint32 types.
|
|
||||||
type Uint32 uint32
|
|
||||||
|
|
||||||
// NewUint32 will return a new Uint32 instance initialized with zero value.
|
|
||||||
func NewUint32() *Uint32 {
|
|
||||||
return new(Uint32)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add will atomically add uint32 delta to value in address contained within i, returning new value.
|
|
||||||
func (u *Uint32) Add(delta uint32) uint32 {
|
|
||||||
return atomic.AddUint32((*uint32)(u), delta)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store uint32 value in address contained within i.
|
|
||||||
func (u *Uint32) Store(val uint32) {
|
|
||||||
atomic.StoreUint32((*uint32)(u), val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load uint32 value at address contained within i.
|
|
||||||
func (u *Uint32) Load() uint32 {
|
|
||||||
return atomic.LoadUint32((*uint32)(u))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) uint32 value at address contained within i.
|
|
||||||
func (u *Uint32) CAS(cmp, swp uint32) bool {
|
|
||||||
return atomic.CompareAndSwapUint32((*uint32)(u), cmp, swp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new uint32 value into address contained within i, and returns previous value.
|
|
||||||
func (u *Uint32) Swap(swp uint32) uint32 {
|
|
||||||
return atomic.SwapUint32((*uint32)(u), swp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64 provides user-friendly means of performing atomic operations on uint64 types.
|
|
||||||
type Uint64 uint64
|
|
||||||
|
|
||||||
// NewUint64 will return a new Uint64 instance initialized with zero value.
|
|
||||||
func NewUint64() *Uint64 {
|
|
||||||
return new(Uint64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add will atomically add uint64 delta to value in address contained within i, returning new value.
|
|
||||||
func (u *Uint64) Add(delta uint64) uint64 {
|
|
||||||
return atomic.AddUint64((*uint64)(u), delta)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store will atomically store uint64 value in address contained within i.
|
|
||||||
func (u *Uint64) Store(val uint64) {
|
|
||||||
atomic.StoreUint64((*uint64)(u), val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load will atomically load uint64 value at address contained within i.
|
|
||||||
func (u *Uint64) Load() uint64 {
|
|
||||||
return atomic.LoadUint64((*uint64)(u))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAS performs a compare-and-swap for a(n) uint64 value at address contained within i.
|
|
||||||
func (u *Uint64) CAS(cmp, swp uint64) bool {
|
|
||||||
return atomic.CompareAndSwapUint64((*uint64)(u), cmp, swp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap atomically stores new uint64 value into address contained within i, and returns previous value.
|
|
||||||
func (u *Uint64) Swap(swp uint64) uint64 {
|
|
||||||
return atomic.SwapUint64((*uint64)(u), swp)
|
|
||||||
}
|
|
9
vendor/codeberg.org/gruf/go-bitutil/LICENSE
generated
vendored
9
vendor/codeberg.org/gruf/go-bitutil/LICENSE
generated
vendored
|
@ -1,9 +0,0 @@
|
||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2022 gruf
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
3
vendor/codeberg.org/gruf/go-bitutil/README.md
generated
vendored
3
vendor/codeberg.org/gruf/go-bitutil/README.md
generated
vendored
|
@ -1,3 +0,0 @@
|
||||||
# go-bitutil
|
|
||||||
|
|
||||||
This library provides helpful methods and types for performing typical bitwise operations on integers, e.g. packing/unpacking, bit flags.
|
|
29
vendor/codeberg.org/gruf/go-bitutil/abs.go
generated
vendored
29
vendor/codeberg.org/gruf/go-bitutil/abs.go
generated
vendored
|
@ -1,29 +0,0 @@
|
||||||
package bitutil
|
|
||||||
|
|
||||||
// Abs8 returns the absolute value of i (calculated without branching).
|
|
||||||
func Abs8(i int8) int8 {
|
|
||||||
const bits = 8
|
|
||||||
u := uint64(i >> (bits - 1))
|
|
||||||
return (i ^ int8(u)) + int8(u&1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abs16 returns the absolute value of i (calculated without branching).
|
|
||||||
func Abs16(i int16) int16 {
|
|
||||||
const bits = 16
|
|
||||||
u := uint64(i >> (bits - 1))
|
|
||||||
return (i ^ int16(u)) + int16(u&1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abs32 returns the absolute value of i (calculated without branching).
|
|
||||||
func Abs32(i int32) int32 {
|
|
||||||
const bits = 32
|
|
||||||
u := uint64(i >> (bits - 1))
|
|
||||||
return (i ^ int32(u)) + int32(u&1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abs64 returns the absolute value of i (calculated without branching).
|
|
||||||
func Abs64(i int64) int64 {
|
|
||||||
const bits = 64
|
|
||||||
u := uint64(i >> (bits - 1))
|
|
||||||
return (i ^ int64(u)) + int64(u&1)
|
|
||||||
}
|
|
3744
vendor/codeberg.org/gruf/go-bitutil/flag.go
generated
vendored
3744
vendor/codeberg.org/gruf/go-bitutil/flag.go
generated
vendored
File diff suppressed because it is too large
Load diff
117
vendor/codeberg.org/gruf/go-bitutil/flag.tpl
generated
vendored
117
vendor/codeberg.org/gruf/go-bitutil/flag.tpl
generated
vendored
|
@ -1,117 +0,0 @@
|
||||||
package bitutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
{{ range $idx, $size := . }}
|
|
||||||
|
|
||||||
// Flags{{ $size.Size }} is a type-casted unsigned integer with helper
|
|
||||||
// methods for easily managing up to {{ $size.Size }} bit-flags.
|
|
||||||
type Flags{{ $size.Size }} uint{{ $size.Size }}
|
|
||||||
|
|
||||||
// Get will fetch the flag bit value at index 'bit'.
|
|
||||||
func (f Flags{{ $size.Size }}) Get(bit uint8) bool {
|
|
||||||
mask := Flags{{ $size.Size }}(1) << bit
|
|
||||||
return (f & mask != 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set will set the flag bit value at index 'bit'.
|
|
||||||
func (f Flags{{ $size.Size }}) Set(bit uint8) Flags{{ $size.Size }} {
|
|
||||||
mask := Flags{{ $size.Size }}(1) << bit
|
|
||||||
return f | mask
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unset will unset the flag bit value at index 'bit'.
|
|
||||||
func (f Flags{{ $size.Size }}) Unset(bit uint8) Flags{{ $size.Size }} {
|
|
||||||
mask := Flags{{ $size.Size }}(1) << bit
|
|
||||||
return f & ^mask
|
|
||||||
}
|
|
||||||
|
|
||||||
{{ range $idx := $size.Bits }}
|
|
||||||
|
|
||||||
// Get{{ $idx }} will fetch the flag bit value at index {{ $idx }}.
|
|
||||||
func (f Flags{{ $size.Size }}) Get{{ $idx }}() bool {
|
|
||||||
const mask = Flags{{ $size.Size }}(1) << {{ $idx }}
|
|
||||||
return (f & mask != 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set{{ $idx }} will set the flag bit value at index {{ $idx }}.
|
|
||||||
func (f Flags{{ $size.Size }}) Set{{ $idx }}() Flags{{ $size.Size }} {
|
|
||||||
const mask = Flags{{ $size.Size }}(1) << {{ $idx }}
|
|
||||||
return f | mask
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unset{{ $idx }} will unset the flag bit value at index {{ $idx }}.
|
|
||||||
func (f Flags{{ $size.Size }}) Unset{{ $idx }}() Flags{{ $size.Size }} {
|
|
||||||
const mask = Flags{{ $size.Size }}(1) << {{ $idx }}
|
|
||||||
return f & ^mask
|
|
||||||
}
|
|
||||||
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
// String returns a human readable representation of Flags{{ $size.Size }}.
|
|
||||||
func (f Flags{{ $size.Size }}) String() string {
|
|
||||||
var (
|
|
||||||
i int
|
|
||||||
val bool
|
|
||||||
buf []byte
|
|
||||||
)
|
|
||||||
|
|
||||||
// Make a prealloc est. based on longest-possible value
|
|
||||||
const prealloc = 1+(len("false ")*{{ $size.Size }})-1+1
|
|
||||||
buf = make([]byte, prealloc)
|
|
||||||
|
|
||||||
buf[i] = '{'
|
|
||||||
i++
|
|
||||||
|
|
||||||
{{ range $idx := .Bits }}
|
|
||||||
val = f.Get{{ $idx }}()
|
|
||||||
i += copy(buf[i:], bool2str(val))
|
|
||||||
buf[i] = ' '
|
|
||||||
i++
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
buf[i-1] = '}'
|
|
||||||
buf = buf[:i]
|
|
||||||
|
|
||||||
return *(*string)(unsafe.Pointer(&buf))
|
|
||||||
}
|
|
||||||
|
|
||||||
// GoString returns a more verbose human readable representation of Flags{{ $size.Size }}.
|
|
||||||
func (f Flags{{ $size.Size }})GoString() string {
|
|
||||||
var (
|
|
||||||
i int
|
|
||||||
val bool
|
|
||||||
buf []byte
|
|
||||||
)
|
|
||||||
|
|
||||||
// Make a prealloc est. based on longest-possible value
|
|
||||||
const prealloc = len("bitutil.Flags{{ $size.Size }}{")+(len("{{ sub $size.Size 1 }}=false ")*{{ $size.Size }})-1+1
|
|
||||||
buf = make([]byte, prealloc)
|
|
||||||
|
|
||||||
i += copy(buf[i:], "bitutil.Flags{{ $size.Size }}{")
|
|
||||||
|
|
||||||
{{ range $idx := .Bits }}
|
|
||||||
val = f.Get{{ $idx }}()
|
|
||||||
i += copy(buf[i:], "{{ $idx }}=")
|
|
||||||
i += copy(buf[i:], bool2str(val))
|
|
||||||
buf[i] = ' '
|
|
||||||
i++
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
buf[i-1] = '}'
|
|
||||||
buf = buf[:i]
|
|
||||||
|
|
||||||
return *(*string)(unsafe.Pointer(&buf))
|
|
||||||
}
|
|
||||||
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
func bool2str(b bool) string {
|
|
||||||
if b {
|
|
||||||
return "true"
|
|
||||||
}
|
|
||||||
return "false"
|
|
||||||
}
|
|
98
vendor/codeberg.org/gruf/go-bitutil/flag_test.tpl
generated
vendored
98
vendor/codeberg.org/gruf/go-bitutil/flag_test.tpl
generated
vendored
|
@ -1,98 +0,0 @@
|
||||||
package bitutil_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"codeberg.org/gruf/go-bytes"
|
|
||||||
)
|
|
||||||
|
|
||||||
{{ range $idx, $size := . }}
|
|
||||||
|
|
||||||
func TestFlags{{ $size.Size }}Get(t *testing.T) {
|
|
||||||
var mask, flags bitutil.Flags{{ $size.Size }}
|
|
||||||
|
|
||||||
{{ range $idx := $size.Bits }}
|
|
||||||
|
|
||||||
mask = bitutil.Flags{{ $size.Size }}(1) << {{ $idx }}
|
|
||||||
|
|
||||||
flags = 0
|
|
||||||
|
|
||||||
flags |= mask
|
|
||||||
if !flags.Get({{ $idx }}) {
|
|
||||||
t.Error("failed .Get() set Flags{{ $size.Size }} bit at index {{ $idx }}")
|
|
||||||
}
|
|
||||||
|
|
||||||
flags = ^bitutil.Flags{{ $size.Size }}(0)
|
|
||||||
|
|
||||||
flags &= ^mask
|
|
||||||
if flags.Get({{ $idx }}) {
|
|
||||||
t.Error("failed .Get() unset Flags{{ $size.Size }} bit at index {{ $idx }}")
|
|
||||||
}
|
|
||||||
|
|
||||||
flags = 0
|
|
||||||
|
|
||||||
flags |= mask
|
|
||||||
if !flags.Get{{ $idx }}() {
|
|
||||||
t.Error("failed .Get{{ $idx }}() set Flags{{ $size.Size }} bit at index {{ $idx }}")
|
|
||||||
}
|
|
||||||
|
|
||||||
flags = ^bitutil.Flags{{ $size.Size }}(0)
|
|
||||||
|
|
||||||
flags &= ^mask
|
|
||||||
if flags.Get{{ $idx }}() {
|
|
||||||
t.Error("failed .Get{{ $idx }}() unset Flags{{ $size.Size }} bit at index {{ $idx }}")
|
|
||||||
}
|
|
||||||
|
|
||||||
{{ end }}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFlags{{ $size.Size }}Set(t *testing.T) {
|
|
||||||
var mask, flags bitutil.Flags{{ $size.Size }}
|
|
||||||
|
|
||||||
{{ range $idx := $size.Bits }}
|
|
||||||
|
|
||||||
mask = bitutil.Flags{{ $size.Size }}(1) << {{ $idx }}
|
|
||||||
|
|
||||||
flags = 0
|
|
||||||
|
|
||||||
flags = flags.Set({{ $idx }})
|
|
||||||
if flags & mask == 0 {
|
|
||||||
t.Error("failed .Set() Flags{{ $size.Size }} bit at index {{ $idx }}")
|
|
||||||
}
|
|
||||||
|
|
||||||
flags = 0
|
|
||||||
|
|
||||||
flags = flags.Set{{ $idx }}()
|
|
||||||
if flags & mask == 0 {
|
|
||||||
t.Error("failed .Set{{ $idx }}() Flags{{ $size.Size }} bit at index {{ $idx }}")
|
|
||||||
}
|
|
||||||
|
|
||||||
{{ end }}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFlags{{ $size.Size }}Unset(t *testing.T) {
|
|
||||||
var mask, flags bitutil.Flags{{ $size.Size }}
|
|
||||||
|
|
||||||
{{ range $idx := $size.Bits }}
|
|
||||||
|
|
||||||
mask = bitutil.Flags{{ $size.Size }}(1) << {{ $idx }}
|
|
||||||
|
|
||||||
flags = ^bitutil.Flags{{ $size.Size }}(0)
|
|
||||||
|
|
||||||
flags = flags.Unset({{ $idx }})
|
|
||||||
if flags & mask != 0 {
|
|
||||||
t.Error("failed .Unset() Flags{{ $size.Size }} bit at index {{ $idx }}")
|
|
||||||
}
|
|
||||||
|
|
||||||
flags = ^bitutil.Flags{{ $size.Size }}(0)
|
|
||||||
|
|
||||||
flags = flags.Unset{{ $idx }}()
|
|
||||||
if flags & mask != 0 {
|
|
||||||
t.Error("failed .Unset{{ $idx }}() Flags{{ $size.Size }} bit at index {{ $idx }}")
|
|
||||||
}
|
|
||||||
|
|
||||||
{{ end }}
|
|
||||||
}
|
|
||||||
|
|
||||||
{{ end }}
|
|
85
vendor/codeberg.org/gruf/go-bitutil/pack.go
generated
vendored
85
vendor/codeberg.org/gruf/go-bitutil/pack.go
generated
vendored
|
@ -1,85 +0,0 @@
|
||||||
package bitutil
|
|
||||||
|
|
||||||
// PackInt8s will pack two signed 8bit integers into an unsigned 16bit integer.
|
|
||||||
func PackInt8s(i1, i2 int8) uint16 {
|
|
||||||
const bits = 8
|
|
||||||
const mask = (1 << bits) - 1
|
|
||||||
return uint16(i1)<<bits | uint16(i2)&mask
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnpackInt8s will unpack two signed 8bit integers from an unsigned 16bit integer.
|
|
||||||
func UnpackInt8s(i uint16) (int8, int8) {
|
|
||||||
const bits = 8
|
|
||||||
const mask = (1 << bits) - 1
|
|
||||||
return int8(i >> bits), int8(i & mask)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PackInt16s will pack two signed 16bit integers into an unsigned 32bit integer.
|
|
||||||
func PackInt16s(i1, i2 int16) uint32 {
|
|
||||||
const bits = 16
|
|
||||||
const mask = (1 << bits) - 1
|
|
||||||
return uint32(i1)<<bits | uint32(i2)&mask
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnpackInt16s will unpack two signed 16bit integers from an unsigned 32bit integer.
|
|
||||||
func UnpackInt16s(i uint32) (int16, int16) {
|
|
||||||
const bits = 16
|
|
||||||
const mask = (1 << bits) - 1
|
|
||||||
return int16(i >> bits), int16(i & mask)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PackInt32s will pack two signed 32bit integers into an unsigned 64bit integer.
|
|
||||||
func PackInt32s(i1, i2 int32) uint64 {
|
|
||||||
const bits = 32
|
|
||||||
const mask = (1 << bits) - 1
|
|
||||||
return uint64(i1)<<bits | uint64(i2)&mask
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnpackInt32s will unpack two signed 32bit integers from an unsigned 64bit integer.
|
|
||||||
func UnpackInt32s(i uint64) (int32, int32) {
|
|
||||||
const bits = 32
|
|
||||||
const mask = (1 << bits) - 1
|
|
||||||
return int32(i >> bits), int32(i & mask)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PackUint8s will pack two unsigned 8bit integers into an unsigned 16bit integer.
|
|
||||||
func PackUint8s(u1, u2 uint8) uint16 {
|
|
||||||
const bits = 8
|
|
||||||
const mask = (1 << bits) - 1
|
|
||||||
return uint16(u1)<<bits | uint16(u2)&mask
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnpackUint8s will unpack two unsigned 8bit integers from an unsigned 16bit integer.
|
|
||||||
func UnpackUint8s(u uint16) (uint8, uint8) {
|
|
||||||
const bits = 8
|
|
||||||
const mask = (1 << bits) - 1
|
|
||||||
return uint8(u >> bits), uint8(u & mask)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PackUint16s will pack two unsigned 16bit integers into an unsigned 32bit integer.
|
|
||||||
func PackUint16s(u1, u2 uint16) uint32 {
|
|
||||||
const bits = 16
|
|
||||||
const mask = (1 << bits) - 1
|
|
||||||
return uint32(u1)<<bits | uint32(u2)&mask
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnpackUint16s will unpack two unsigned 16bit integers from an unsigned 32bit integer.
|
|
||||||
func UnpackUint16s(u uint32) (uint16, uint16) {
|
|
||||||
const bits = 16
|
|
||||||
const mask = (1 << bits) - 1
|
|
||||||
return uint16(u >> bits), uint16(u & mask)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PackUint32s will pack two unsigned 32bit integers into an unsigned 64bit integer.
|
|
||||||
func PackUint32s(u1, u2 uint32) uint64 {
|
|
||||||
const bits = 32
|
|
||||||
const mask = (1 << bits) - 1
|
|
||||||
return uint64(u1)<<bits | uint64(u2)&mask
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnpackUint32s will unpack two unsigned 32bit integers from an unsigned 64bit integer.
|
|
||||||
func UnpackUint32s(u uint64) (uint32, uint32) {
|
|
||||||
const bits = 32
|
|
||||||
const mask = (1 << bits) - 1
|
|
||||||
return uint32(u >> bits), uint32(u & mask)
|
|
||||||
}
|
|
60
vendor/codeberg.org/gruf/go-bitutil/test.tpl
generated
vendored
60
vendor/codeberg.org/gruf/go-bitutil/test.tpl
generated
vendored
|
@ -1,60 +0,0 @@
|
||||||
package atomics_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"atomic"
|
|
||||||
"unsafe"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"codeberg.org/gruf/go-atomics"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Test{{ .Name }}StoreLoad(t *testing.T) {
|
|
||||||
for _, test := range {{ .Name }}Tests {
|
|
||||||
val := atomics.New{{ .Name }}()
|
|
||||||
|
|
||||||
val.Store(test.V1)
|
|
||||||
|
|
||||||
if !({{ call .Compare "val.Load()" "test.V1" }}) {
|
|
||||||
t.Fatalf("failed testing .Store and .Load: expect=%v actual=%v", val.Load(), test.V1)
|
|
||||||
}
|
|
||||||
|
|
||||||
val.Store(test.V2)
|
|
||||||
|
|
||||||
if !({{ call .Compare "val.Load()" "test.V2" }}) {
|
|
||||||
t.Fatalf("failed testing .Store and .Load: expect=%v actual=%v", val.Load(), test.V2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test{{ .Name }}CAS(t *testing.T) {
|
|
||||||
for _, test := range {{ .Name }}Tests {
|
|
||||||
val := atomics.New{{ .Name }}()
|
|
||||||
|
|
||||||
val.Store(test.V1)
|
|
||||||
|
|
||||||
if val.CAS(test.V2, test.V1) {
|
|
||||||
t.Fatalf("failed testing negative .CAS: test=%+v state=%v", test, val.Load())
|
|
||||||
}
|
|
||||||
|
|
||||||
if !val.CAS(test.V1, test.V2) {
|
|
||||||
t.Fatalf("failed testing positive .CAS: test=%+v state=%v", test, val.Load())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test{{ .Name }}Swap(t *testing.T) {
|
|
||||||
for _, test := range {{ .Name }}Tests {
|
|
||||||
val := atomics.New{{ .Name }}()
|
|
||||||
|
|
||||||
val.Store(test.V1)
|
|
||||||
|
|
||||||
if !({{ call .Compare "val.Swap(test.V2)" "test.V1" }}) {
|
|
||||||
t.Fatal("failed testing .Swap")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !({{ call .Compare "val.Swap(test.V1)" "test.V2" }}) {
|
|
||||||
t.Fatal("failed testing .Swap")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
42
vendor/codeberg.org/gruf/go-byteutil/bytes.go
generated
vendored
42
vendor/codeberg.org/gruf/go-byteutil/bytes.go
generated
vendored
|
@ -1,7 +1,6 @@
|
||||||
package byteutil
|
package byteutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -16,44 +15,13 @@ func Copy(b []byte) []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// B2S returns a string representation of []byte without allocation.
|
// B2S returns a string representation of []byte without allocation.
|
||||||
//
|
// Since Go strings are immutable, the bytes passed to String must
|
||||||
// According to the Go spec strings are immutable and byte slices are not. The way this gets implemented is strings under the hood are:
|
// not be modified as long as the returned string value exists.
|
||||||
//
|
|
||||||
// type StringHeader struct {
|
|
||||||
// Data uintptr
|
|
||||||
// Len int
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// while slices are:
|
|
||||||
//
|
|
||||||
// type SliceHeader struct {
|
|
||||||
// Data uintptr
|
|
||||||
// Len int
|
|
||||||
// Cap int
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// because being mutable, you can change the data, length etc, but the string has to promise to be read-only to all who get copies of it.
|
|
||||||
//
|
|
||||||
// So in practice when you do a conversion of `string(byteSlice)` it actually performs an allocation because it has to copy the contents of the byte slice into a safe read-only state.
|
|
||||||
//
|
|
||||||
// Being that the shared fields are in the same struct indices (no different offsets), means that if you have a byte slice you can "forcibly" cast it to a string. Which in a lot of situations can be risky, because then it means you have a string that is NOT immutable, as if someone changes the data in the originating byte slice then the string will reflect that change! Now while this does seem hacky, and it _kind_ of is, it is something that you see performed in the standard library. If you look at the definition for `strings.Builder{}.String()` you'll see this :)
|
|
||||||
func B2S(b []byte) string {
|
func B2S(b []byte) string {
|
||||||
return *(*string)(unsafe.Pointer(&b))
|
return unsafe.String(unsafe.SliceData(b), len(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
// S2B returns a []byte representation of string without allocation (minus slice header).
|
// S2B returns a []byte representation of string without allocation.
|
||||||
// See B2S() code comment, and this function's implementation for a better understanding.
|
|
||||||
func S2B(s string) []byte {
|
func S2B(s string) []byte {
|
||||||
var b []byte
|
return unsafe.Slice(unsafe.StringData(s), len(s))
|
||||||
|
|
||||||
// Get byte + string headers
|
|
||||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
|
||||||
sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
|
||||||
|
|
||||||
// Manually set bytes to string
|
|
||||||
bh.Data = sh.Data
|
|
||||||
bh.Len = sh.Len
|
|
||||||
bh.Cap = sh.Len
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
}
|
||||||
|
|
36
vendor/codeberg.org/gruf/go-byteutil/reader.go
generated
vendored
36
vendor/codeberg.org/gruf/go-byteutil/reader.go
generated
vendored
|
@ -1,36 +0,0 @@
|
||||||
package byteutil
|
|
||||||
|
|
||||||
import "bytes"
|
|
||||||
|
|
||||||
// Reader wraps a bytes.Reader{} to provide Rewind() capabilities.
|
|
||||||
type Reader struct {
|
|
||||||
B []byte
|
|
||||||
bytes.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader returns a new Reader{} instance reset to b.
|
|
||||||
func NewReader(b []byte) *Reader {
|
|
||||||
r := &Reader{}
|
|
||||||
r.Reset(b)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset resets the Reader{} to be reading from b and sets Reader{}.B.
|
|
||||||
func (r *Reader) Reset(b []byte) {
|
|
||||||
r.B = b
|
|
||||||
r.Rewind()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rewind resets the Reader{} to be reading from the start of Reader{}.B.
|
|
||||||
func (r *Reader) Rewind() {
|
|
||||||
r.Reader.Reset(r.B)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadNopCloser wraps a Reader{} to provide nop close method.
|
|
||||||
type ReadNopCloser struct {
|
|
||||||
Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*ReadNopCloser) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
32
vendor/codeberg.org/gruf/go-sched/job.go
generated
vendored
32
vendor/codeberg.org/gruf/go-sched/job.go
generated
vendored
|
@ -4,9 +4,9 @@
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
"unsafe"
|
||||||
"codeberg.org/gruf/go-atomics"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Job encapsulates logic for a scheduled job to be run according
|
// Job encapsulates logic for a scheduled job to be run according
|
||||||
|
@ -14,7 +14,7 @@
|
||||||
// holding onto a next execution time safely in a concurrent environment.
|
// holding onto a next execution time safely in a concurrent environment.
|
||||||
type Job struct {
|
type Job struct {
|
||||||
id uint64
|
id uint64
|
||||||
next atomics.Time
|
next unsafe.Pointer // *time.Time
|
||||||
timing Timing
|
timing Timing
|
||||||
call func(time.Time)
|
call func(time.Time)
|
||||||
panic func(interface{})
|
panic func(interface{})
|
||||||
|
@ -33,9 +33,6 @@ func NewJob(fn func(now time.Time)) *Job {
|
||||||
panic: func(i interface{}) { panic(i) },
|
panic: func(i interface{}) { panic(i) },
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init next time ptr
|
|
||||||
j.next.Store(zerotime)
|
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,14 +96,20 @@ func (job *Job) OnPanic(fn func(interface{})) *Job {
|
||||||
|
|
||||||
// Next returns the next time this Job is expected to run.
|
// Next returns the next time this Job is expected to run.
|
||||||
func (job *Job) Next() time.Time {
|
func (job *Job) Next() time.Time {
|
||||||
return job.next.Load()
|
return loadTime(&job.next)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run will execute this Job and pass through given now time.
|
// Run will execute this Job and pass through given now time.
|
||||||
func (job *Job) Run(now time.Time) {
|
func (job *Job) Run(now time.Time) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
switch r := recover(); {
|
||||||
|
case r == nil:
|
||||||
|
// no panic
|
||||||
|
case job != nil &&
|
||||||
|
job.panic != nil:
|
||||||
job.panic(r)
|
job.panic(r)
|
||||||
|
default:
|
||||||
|
panic(r)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
job.call(now)
|
job.call(now)
|
||||||
|
@ -120,10 +123,21 @@ func (job *Job) String() string {
|
||||||
buf.WriteString(strconv.FormatUint(job.id, 10))
|
buf.WriteString(strconv.FormatUint(job.id, 10))
|
||||||
buf.WriteByte(' ')
|
buf.WriteByte(' ')
|
||||||
buf.WriteString("next=")
|
buf.WriteString("next=")
|
||||||
buf.WriteString(job.next.Load().Format(time.StampMicro))
|
buf.WriteString(loadTime(&job.next).Format(time.StampMicro))
|
||||||
buf.WriteByte(' ')
|
buf.WriteByte(' ')
|
||||||
buf.WriteString("timing=")
|
buf.WriteString("timing=")
|
||||||
buf.WriteString(reflect.TypeOf(job.timing).String())
|
buf.WriteString(reflect.TypeOf(job.timing).String())
|
||||||
buf.WriteByte('}')
|
buf.WriteByte('}')
|
||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func loadTime(p *unsafe.Pointer) time.Time {
|
||||||
|
if p := atomic.LoadPointer(p); p != nil {
|
||||||
|
return *(*time.Time)(p)
|
||||||
|
}
|
||||||
|
return zerotime
|
||||||
|
}
|
||||||
|
|
||||||
|
func storeTime(p *unsafe.Pointer, t time.Time) {
|
||||||
|
atomic.StorePointer(p, unsafe.Pointer(&t))
|
||||||
|
}
|
||||||
|
|
4
vendor/codeberg.org/gruf/go-sched/scheduler.go
generated
vendored
4
vendor/codeberg.org/gruf/go-sched/scheduler.go
generated
vendored
|
@ -225,7 +225,7 @@ func (sch *Scheduler) handle(v interface{}) {
|
||||||
|
|
||||||
// Update the next call time
|
// Update the next call time
|
||||||
next := v.timing.Next(now)
|
next := v.timing.Next(now)
|
||||||
v.next.Store(next)
|
storeTime(&v.next, next)
|
||||||
|
|
||||||
// Append this job to queued
|
// Append this job to queued
|
||||||
sch.jobs = append(sch.jobs, v)
|
sch.jobs = append(sch.jobs, v)
|
||||||
|
@ -261,7 +261,7 @@ func (sch *Scheduler) schedule(now time.Time) {
|
||||||
|
|
||||||
// Update the next call time
|
// Update the next call time
|
||||||
next := job.timing.Next(now)
|
next := job.timing.Next(now)
|
||||||
job.next.Store(next)
|
storeTime(&job.next, next)
|
||||||
|
|
||||||
if next.IsZero() {
|
if next.IsZero() {
|
||||||
// Zero time, this job is done and can be dropped
|
// Zero time, this job is done and can be dropped
|
||||||
|
|
26
vendor/codeberg.org/gruf/go-storage/s3/errors.go
generated
vendored
26
vendor/codeberg.org/gruf/go-storage/s3/errors.go
generated
vendored
|
@ -3,35 +3,9 @@
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"codeberg.org/gruf/go-storage"
|
|
||||||
"codeberg.org/gruf/go-storage/internal"
|
|
||||||
"github.com/minio/minio-go/v7"
|
"github.com/minio/minio-go/v7"
|
||||||
)
|
)
|
||||||
|
|
||||||
// transformS3Error transforms an error returned from S3Storage underlying
|
|
||||||
// minio.Core client, by wrapping where necessary with our own error types.
|
|
||||||
func transformS3Error(err error) error {
|
|
||||||
// Cast this to a minio error response
|
|
||||||
ersp, ok := err.(minio.ErrorResponse)
|
|
||||||
if ok {
|
|
||||||
switch ersp.Code {
|
|
||||||
case "NoSuchKey":
|
|
||||||
return internal.WrapErr(err, storage.ErrNotFound)
|
|
||||||
case "Conflict":
|
|
||||||
return internal.WrapErr(err, storage.ErrAlreadyExists)
|
|
||||||
default:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if error has an invalid object name prefix
|
|
||||||
if strings.HasPrefix(err.Error(), "Object name ") {
|
|
||||||
return internal.WrapErr(err, storage.ErrInvalidKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNotFoundError(err error) bool {
|
func isNotFoundError(err error) bool {
|
||||||
errRsp, ok := err.(minio.ErrorResponse)
|
errRsp, ok := err.(minio.ErrorResponse)
|
||||||
return ok && errRsp.Code == "NoSuchKey"
|
return ok && errRsp.Code == "NoSuchKey"
|
||||||
|
|
167
vendor/codeberg.org/gruf/go-storage/s3/s3.go
generated
vendored
167
vendor/codeberg.org/gruf/go-storage/s3/s3.go
generated
vendored
|
@ -5,6 +5,7 @@
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
"codeberg.org/gruf/go-storage"
|
"codeberg.org/gruf/go-storage"
|
||||||
"codeberg.org/gruf/go-storage/internal"
|
"codeberg.org/gruf/go-storage/internal"
|
||||||
|
@ -34,12 +35,7 @@ func DefaultConfig() Config {
|
||||||
// immutable default configuration.
|
// immutable default configuration.
|
||||||
var defaultConfig = Config{
|
var defaultConfig = Config{
|
||||||
CoreOpts: minio.Options{},
|
CoreOpts: minio.Options{},
|
||||||
GetOpts: minio.GetObjectOptions{},
|
|
||||||
PutOpts: minio.PutObjectOptions{},
|
|
||||||
PutChunkOpts: minio.PutObjectPartOptions{},
|
|
||||||
PutChunkSize: 4 * 1024 * 1024, // 4MiB
|
PutChunkSize: 4 * 1024 * 1024, // 4MiB
|
||||||
StatOpts: minio.StatObjectOptions{},
|
|
||||||
RemoveOpts: minio.RemoveObjectOptions{},
|
|
||||||
ListSize: 200,
|
ListSize: 200,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,31 +46,11 @@ type Config struct {
|
||||||
// passed during initialization.
|
// passed during initialization.
|
||||||
CoreOpts minio.Options
|
CoreOpts minio.Options
|
||||||
|
|
||||||
// GetOpts are S3 client options
|
|
||||||
// passed during .Read___() calls.
|
|
||||||
GetOpts minio.GetObjectOptions
|
|
||||||
|
|
||||||
// PutOpts are S3 client options
|
|
||||||
// passed during .Write___() calls.
|
|
||||||
PutOpts minio.PutObjectOptions
|
|
||||||
|
|
||||||
// PutChunkSize is the chunk size (in bytes)
|
// PutChunkSize is the chunk size (in bytes)
|
||||||
// to use when sending a byte stream reader
|
// to use when sending a byte stream reader
|
||||||
// of unknown size as a multi-part object.
|
// of unknown size as a multi-part object.
|
||||||
PutChunkSize int64
|
PutChunkSize int64
|
||||||
|
|
||||||
// PutChunkOpts are S3 client options
|
|
||||||
// passed during chunked .Write___() calls.
|
|
||||||
PutChunkOpts minio.PutObjectPartOptions
|
|
||||||
|
|
||||||
// StatOpts are S3 client options
|
|
||||||
// passed during .Stat() calls.
|
|
||||||
StatOpts minio.StatObjectOptions
|
|
||||||
|
|
||||||
// RemoveOpts are S3 client options
|
|
||||||
// passed during .Remove() calls.
|
|
||||||
RemoveOpts minio.RemoveObjectOptions
|
|
||||||
|
|
||||||
// ListSize determines how many items
|
// ListSize determines how many items
|
||||||
// to include in each list request, made
|
// to include in each list request, made
|
||||||
// during calls to .WalkKeys().
|
// during calls to .WalkKeys().
|
||||||
|
@ -103,12 +79,8 @@ func getS3Config(cfg *Config) Config {
|
||||||
|
|
||||||
return Config{
|
return Config{
|
||||||
CoreOpts: cfg.CoreOpts,
|
CoreOpts: cfg.CoreOpts,
|
||||||
GetOpts: cfg.GetOpts,
|
|
||||||
PutOpts: cfg.PutOpts,
|
|
||||||
PutChunkSize: cfg.PutChunkSize,
|
PutChunkSize: cfg.PutChunkSize,
|
||||||
ListSize: cfg.ListSize,
|
ListSize: cfg.ListSize,
|
||||||
StatOpts: cfg.StatOpts,
|
|
||||||
RemoveOpts: cfg.RemoveOpts,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -183,36 +155,50 @@ func (st *S3Storage) ReadBytes(ctx context.Context, key string) ([]byte, error)
|
||||||
|
|
||||||
// ReadStream: implements Storage.ReadStream().
|
// ReadStream: implements Storage.ReadStream().
|
||||||
func (st *S3Storage) ReadStream(ctx context.Context, key string) (io.ReadCloser, error) {
|
func (st *S3Storage) ReadStream(ctx context.Context, key string) (io.ReadCloser, error) {
|
||||||
// Fetch object reader from S3 bucket
|
rc, _, _, err := st.GetObject(ctx, key, minio.GetObjectOptions{})
|
||||||
rc, _, _, err := st.client.GetObject(
|
return rc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetObject wraps minio.Core{}.GetObject() to handle wrapping with our own storage library error types.
|
||||||
|
func (st *S3Storage) GetObject(ctx context.Context, key string, opts minio.GetObjectOptions) (io.ReadCloser, minio.ObjectInfo, http.Header, error) {
|
||||||
|
|
||||||
|
// Query bucket for object data and info.
|
||||||
|
rc, info, hdr, err := st.client.GetObject(
|
||||||
ctx,
|
ctx,
|
||||||
st.bucket,
|
st.bucket,
|
||||||
key,
|
key,
|
||||||
st.config.GetOpts,
|
opts,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
||||||
if isNotFoundError(err) {
|
if isNotFoundError(err) {
|
||||||
// Wrap not found errors as our not found type.
|
// Wrap not found errors as our not found type.
|
||||||
err = internal.WrapErr(err, storage.ErrNotFound)
|
err = internal.WrapErr(err, storage.ErrNotFound)
|
||||||
} else if !isObjectNameError(err) {
|
} else if isObjectNameError(err) {
|
||||||
// Wrap object name errors as our invalid key type.
|
// Wrap object name errors as our invalid key type.
|
||||||
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, transformS3Error(err)
|
|
||||||
}
|
}
|
||||||
return rc, nil
|
|
||||||
|
return rc, info, hdr, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteBytes: implements Storage.WriteBytes().
|
// WriteBytes: implements Storage.WriteBytes().
|
||||||
func (st *S3Storage) WriteBytes(ctx context.Context, key string, value []byte) (int, error) {
|
func (st *S3Storage) WriteBytes(ctx context.Context, key string, value []byte) (int, error) {
|
||||||
n, err := st.WriteStream(ctx, key, bytes.NewReader(value))
|
info, err := st.PutObject(ctx, key, bytes.NewReader(value), minio.PutObjectOptions{})
|
||||||
return int(n), err
|
return int(info.Size), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteStream: implements Storage.WriteStream().
|
// WriteStream: implements Storage.WriteStream().
|
||||||
func (st *S3Storage) WriteStream(ctx context.Context, key string, r io.Reader) (int64, error) {
|
func (st *S3Storage) WriteStream(ctx context.Context, key string, r io.Reader) (int64, error) {
|
||||||
|
info, err := st.PutObject(ctx, key, r, minio.PutObjectOptions{})
|
||||||
|
return info.Size, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutObject wraps minio.Core{}.PutObject() to handle wrapping with our own storage library error types, and in the case of an io.Reader
|
||||||
|
// that does not implement ReaderSize{}, it will instead handle upload by using minio.Core{}.NewMultipartUpload() in chunks of PutChunkSize.
|
||||||
|
func (st *S3Storage) PutObject(ctx context.Context, key string, r io.Reader, opts minio.PutObjectOptions) (minio.UploadInfo, error) {
|
||||||
if rs, ok := r.(ReaderSize); ok {
|
if rs, ok := r.(ReaderSize); ok {
|
||||||
// This reader supports providing us the size of
|
// This reader supports providing us the size of
|
||||||
// the encompassed data, allowing us to perform
|
// the encompassed data, allowing us to perform
|
||||||
|
@ -225,22 +211,21 @@ func (st *S3Storage) WriteStream(ctx context.Context, key string, r io.Reader) (
|
||||||
rs.Size(),
|
rs.Size(),
|
||||||
"",
|
"",
|
||||||
"",
|
"",
|
||||||
st.config.PutOpts,
|
opts,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
||||||
if isConflictError(err) {
|
if isConflictError(err) {
|
||||||
// Wrap conflict errors as our already exists type.
|
// Wrap conflict errors as our already exists type.
|
||||||
err = internal.WrapErr(err, storage.ErrAlreadyExists)
|
err = internal.WrapErr(err, storage.ErrAlreadyExists)
|
||||||
} else if !isObjectNameError(err) {
|
} else if isObjectNameError(err) {
|
||||||
// Wrap object name errors as our invalid key type.
|
// Wrap object name errors as our invalid key type.
|
||||||
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return info.Size, nil
|
return info, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start a new multipart upload to get ID.
|
// Start a new multipart upload to get ID.
|
||||||
|
@ -248,24 +233,24 @@ func (st *S3Storage) WriteStream(ctx context.Context, key string, r io.Reader) (
|
||||||
ctx,
|
ctx,
|
||||||
st.bucket,
|
st.bucket,
|
||||||
key,
|
key,
|
||||||
st.config.PutOpts,
|
opts,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
||||||
if isConflictError(err) {
|
if isConflictError(err) {
|
||||||
// Wrap conflict errors as our already exists type.
|
// Wrap conflict errors as our already exists type.
|
||||||
err = internal.WrapErr(err, storage.ErrAlreadyExists)
|
err = internal.WrapErr(err, storage.ErrAlreadyExists)
|
||||||
} else if !isObjectNameError(err) {
|
} else if isObjectNameError(err) {
|
||||||
// Wrap object name errors as our invalid key type.
|
// Wrap object name errors as our invalid key type.
|
||||||
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0, transformS3Error(err)
|
return minio.UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
index = int(1) // parts index
|
|
||||||
total = int64(0)
|
total = int64(0)
|
||||||
|
index = int(1) // parts index
|
||||||
parts []minio.CompletePart
|
parts []minio.CompletePart
|
||||||
chunk = make([]byte, st.config.PutChunkSize)
|
chunk = make([]byte, st.config.PutChunkSize)
|
||||||
rbuf = bytes.NewReader(nil)
|
rbuf = bytes.NewReader(nil)
|
||||||
|
@ -296,7 +281,7 @@ func (st *S3Storage) WriteStream(ctx context.Context, key string, r io.Reader) (
|
||||||
|
|
||||||
// All other errors.
|
// All other errors.
|
||||||
default:
|
default:
|
||||||
return 0, err
|
return minio.UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset byte reader.
|
// Reset byte reader.
|
||||||
|
@ -311,10 +296,13 @@ func (st *S3Storage) WriteStream(ctx context.Context, key string, r io.Reader) (
|
||||||
index,
|
index,
|
||||||
rbuf,
|
rbuf,
|
||||||
int64(n),
|
int64(n),
|
||||||
st.config.PutChunkOpts,
|
minio.PutObjectPartOptions{
|
||||||
|
SSE: opts.ServerSideEncryption,
|
||||||
|
DisableContentSha256: opts.DisableContentSha256,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return minio.UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append completed part to slice.
|
// Append completed part to slice.
|
||||||
|
@ -327,101 +315,104 @@ func (st *S3Storage) WriteStream(ctx context.Context, key string, r io.Reader) (
|
||||||
ChecksumSHA256: pt.ChecksumSHA256,
|
ChecksumSHA256: pt.ChecksumSHA256,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Update total.
|
||||||
|
total += int64(n)
|
||||||
|
|
||||||
// Iterate.
|
// Iterate.
|
||||||
index++
|
index++
|
||||||
|
|
||||||
// Update total size.
|
|
||||||
total += pt.Size
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Complete this multi-part upload operation
|
// Complete this multi-part upload operation
|
||||||
_, err = st.client.CompleteMultipartUpload(
|
info, err := st.client.CompleteMultipartUpload(
|
||||||
ctx,
|
ctx,
|
||||||
st.bucket,
|
st.bucket,
|
||||||
key,
|
key,
|
||||||
uploadID,
|
uploadID,
|
||||||
parts,
|
parts,
|
||||||
st.config.PutOpts,
|
opts,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return minio.UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return total, nil
|
// Set correct size.
|
||||||
|
info.Size = total
|
||||||
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stat: implements Storage.Stat().
|
// Stat: implements Storage.Stat().
|
||||||
func (st *S3Storage) Stat(ctx context.Context, key string) (*storage.Entry, error) {
|
func (st *S3Storage) Stat(ctx context.Context, key string) (*storage.Entry, error) {
|
||||||
// Query object in S3 bucket.
|
info, err := st.StatObject(ctx, key, minio.StatObjectOptions{})
|
||||||
stat, err := st.client.StatObject(
|
if err != nil {
|
||||||
|
if errors.Is(err, storage.ErrNotFound) {
|
||||||
|
err = nil // mask not-found errors
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &storage.Entry{
|
||||||
|
Key: key,
|
||||||
|
Size: info.Size,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatObject wraps minio.Core{}.StatObject() to handle wrapping with our own storage library error types.
|
||||||
|
func (st *S3Storage) StatObject(ctx context.Context, key string, opts minio.StatObjectOptions) (minio.ObjectInfo, error) {
|
||||||
|
|
||||||
|
// Query bucket for object info.
|
||||||
|
info, err := st.client.StatObject(
|
||||||
ctx,
|
ctx,
|
||||||
st.bucket,
|
st.bucket,
|
||||||
key,
|
key,
|
||||||
st.config.StatOpts,
|
opts,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
||||||
if isNotFoundError(err) {
|
if isNotFoundError(err) {
|
||||||
// Ignore err return
|
// Wrap not found errors as our not found type.
|
||||||
// for not-found.
|
err = internal.WrapErr(err, storage.ErrNotFound)
|
||||||
err = nil
|
} else if isObjectNameError(err) {
|
||||||
} else if !isObjectNameError(err) {
|
|
||||||
// Wrap object name errors as our invalid key type.
|
// Wrap object name errors as our invalid key type.
|
||||||
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &storage.Entry{
|
return info, err
|
||||||
Key: key,
|
|
||||||
Size: stat.Size,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove: implements Storage.Remove().
|
// Remove: implements Storage.Remove().
|
||||||
func (st *S3Storage) Remove(ctx context.Context, key string) error {
|
func (st *S3Storage) Remove(ctx context.Context, key string) error {
|
||||||
// Query object in S3 bucket.
|
_, err := st.StatObject(ctx, key, minio.StatObjectOptions{})
|
||||||
_, err := st.client.StatObject(
|
|
||||||
ctx,
|
|
||||||
st.bucket,
|
|
||||||
key,
|
|
||||||
st.config.StatOpts,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
||||||
if isNotFoundError(err) {
|
|
||||||
// Wrap not found errors as our not found type.
|
|
||||||
err = internal.WrapErr(err, storage.ErrNotFound)
|
|
||||||
} else if !isObjectNameError(err) {
|
|
||||||
// Wrap object name errors as our invalid key type.
|
|
||||||
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return st.RemoveObject(ctx, key, minio.RemoveObjectOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveObject wraps minio.Core{}.RemoveObject() to handle wrapping with our own storage library error types.
|
||||||
|
func (st *S3Storage) RemoveObject(ctx context.Context, key string, opts minio.RemoveObjectOptions) error {
|
||||||
|
|
||||||
// Remove object from S3 bucket
|
// Remove object from S3 bucket
|
||||||
err = st.client.RemoveObject(
|
err := st.client.RemoveObject(
|
||||||
ctx,
|
ctx,
|
||||||
st.bucket,
|
st.bucket,
|
||||||
key,
|
key,
|
||||||
st.config.RemoveOpts,
|
opts,
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
||||||
if isNotFoundError(err) {
|
if isNotFoundError(err) {
|
||||||
// Wrap not found errors as our not found type.
|
// Wrap not found errors as our not found type.
|
||||||
err = internal.WrapErr(err, storage.ErrNotFound)
|
err = internal.WrapErr(err, storage.ErrNotFound)
|
||||||
} else if !isObjectNameError(err) {
|
} else if isObjectNameError(err) {
|
||||||
// Wrap object name errors as our invalid key type.
|
// Wrap object name errors as our invalid key type.
|
||||||
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
err = internal.WrapErr(err, storage.ErrInvalidKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WalkKeys: implements Storage.WalkKeys().
|
// WalkKeys: implements Storage.WalkKeys().
|
||||||
|
|
49
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
49
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
|
@ -108,7 +108,9 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
if opts.Checksum.IsSet() {
|
||||||
|
opts.AutoChecksum = opts.Checksum
|
||||||
|
}
|
||||||
withChecksum := c.trailingHeaderSupport
|
withChecksum := c.trailingHeaderSupport
|
||||||
if withChecksum {
|
if withChecksum {
|
||||||
if opts.UserMetadata == nil {
|
if opts.UserMetadata == nil {
|
||||||
|
@ -304,6 +306,11 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opts.Checksum.IsSet() {
|
||||||
|
opts.AutoChecksum = opts.Checksum
|
||||||
|
opts.SendContentMd5 = false
|
||||||
|
}
|
||||||
|
|
||||||
if !opts.SendContentMd5 {
|
if !opts.SendContentMd5 {
|
||||||
if opts.UserMetadata == nil {
|
if opts.UserMetadata == nil {
|
||||||
opts.UserMetadata = make(map[string]string, 1)
|
opts.UserMetadata = make(map[string]string, 1)
|
||||||
|
@ -463,7 +470,10 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
if opts.Checksum.IsSet() {
|
||||||
|
opts.SendContentMd5 = false
|
||||||
|
opts.AutoChecksum = opts.Checksum
|
||||||
|
}
|
||||||
if !opts.SendContentMd5 {
|
if !opts.SendContentMd5 {
|
||||||
if opts.UserMetadata == nil {
|
if opts.UserMetadata == nil {
|
||||||
opts.UserMetadata = make(map[string]string, 1)
|
opts.UserMetadata = make(map[string]string, 1)
|
||||||
|
@ -555,7 +565,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||||
// Calculate md5sum.
|
// Calculate md5sum.
|
||||||
customHeader := make(http.Header)
|
customHeader := make(http.Header)
|
||||||
if !opts.SendContentMd5 {
|
if !opts.SendContentMd5 {
|
||||||
// Add CRC32C instead.
|
// Add Checksum instead.
|
||||||
crc.Reset()
|
crc.Reset()
|
||||||
crc.Write(buf[:length])
|
crc.Write(buf[:length])
|
||||||
cSum := crc.Sum(nil)
|
cSum := crc.Sum(nil)
|
||||||
|
@ -677,6 +687,9 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r
|
||||||
if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 {
|
if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 {
|
||||||
return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'")
|
return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'")
|
||||||
}
|
}
|
||||||
|
if opts.Checksum.IsSet() {
|
||||||
|
opts.SendContentMd5 = false
|
||||||
|
}
|
||||||
|
|
||||||
var readSeeker io.Seeker
|
var readSeeker io.Seeker
|
||||||
if size > 0 {
|
if size > 0 {
|
||||||
|
@ -746,17 +759,6 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
|
||||||
// Set headers.
|
// Set headers.
|
||||||
customHeader := opts.Header()
|
customHeader := opts.Header()
|
||||||
|
|
||||||
// Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks.
|
|
||||||
addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure)
|
|
||||||
|
|
||||||
if addCrc {
|
|
||||||
// If user has added checksums, don't add them ourselves.
|
|
||||||
for k := range opts.UserMetadata {
|
|
||||||
if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
|
|
||||||
addCrc = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Populate request metadata.
|
// Populate request metadata.
|
||||||
reqMetadata := requestMetadata{
|
reqMetadata := requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
|
@ -768,10 +770,23 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
|
||||||
contentSHA256Hex: sha256Hex,
|
contentSHA256Hex: sha256Hex,
|
||||||
streamSha256: !opts.DisableContentSha256,
|
streamSha256: !opts.DisableContentSha256,
|
||||||
}
|
}
|
||||||
if addCrc {
|
// Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks.
|
||||||
opts.AutoChecksum.SetDefault(ChecksumCRC32C)
|
addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure)
|
||||||
reqMetadata.addCrc = &opts.AutoChecksum
|
if opts.Checksum.IsSet() {
|
||||||
|
reqMetadata.addCrc = &opts.Checksum
|
||||||
|
} else if addCrc {
|
||||||
|
// If user has added checksums, don't add them ourselves.
|
||||||
|
for k := range opts.UserMetadata {
|
||||||
|
if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
|
||||||
|
addCrc = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if addCrc {
|
||||||
|
opts.AutoChecksum.SetDefault(ChecksumCRC32C)
|
||||||
|
reqMetadata.addCrc = &opts.AutoChecksum
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.Internal.SourceVersionID != "" {
|
if opts.Internal.SourceVersionID != "" {
|
||||||
if opts.Internal.SourceVersionID != nullVersionID {
|
if opts.Internal.SourceVersionID != nullVersionID {
|
||||||
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
|
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
|
||||||
|
|
28
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
28
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
|
@ -94,6 +94,13 @@ type PutObjectOptions struct {
|
||||||
// If none is specified CRC32C is used, since it is generally the fastest.
|
// If none is specified CRC32C is used, since it is generally the fastest.
|
||||||
AutoChecksum ChecksumType
|
AutoChecksum ChecksumType
|
||||||
|
|
||||||
|
// Checksum will force a checksum of the specific type.
|
||||||
|
// This requires that the client was created with "TrailingHeaders:true" option,
|
||||||
|
// and that the destination server supports it.
|
||||||
|
// Unavailable with V2 signatures & Google endpoints.
|
||||||
|
// This will disable content MD5 checksums if set.
|
||||||
|
Checksum ChecksumType
|
||||||
|
|
||||||
// ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
|
// ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
|
||||||
// fill them serially and upload them in parallel.
|
// fill them serially and upload them in parallel.
|
||||||
// This can be used for faster uploads on non-seekable or slow-to-seek input.
|
// This can be used for faster uploads on non-seekable or slow-to-seek input.
|
||||||
|
@ -240,7 +247,7 @@ func (opts PutObjectOptions) Header() (header http.Header) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate() checks if the UserMetadata map has standard headers or and raises an error if so.
|
// validate() checks if the UserMetadata map has standard headers or and raises an error if so.
|
||||||
func (opts PutObjectOptions) validate() (err error) {
|
func (opts PutObjectOptions) validate(c *Client) (err error) {
|
||||||
for k, v := range opts.UserMetadata {
|
for k, v := range opts.UserMetadata {
|
||||||
if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) {
|
if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) {
|
||||||
return errInvalidArgument(k + " unsupported user defined metadata name")
|
return errInvalidArgument(k + " unsupported user defined metadata name")
|
||||||
|
@ -255,6 +262,17 @@ func (opts PutObjectOptions) validate() (err error) {
|
||||||
if opts.LegalHold != "" && !opts.LegalHold.IsValid() {
|
if opts.LegalHold != "" && !opts.LegalHold.IsValid() {
|
||||||
return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status")
|
return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status")
|
||||||
}
|
}
|
||||||
|
if opts.Checksum.IsSet() {
|
||||||
|
switch {
|
||||||
|
case !c.trailingHeaderSupport:
|
||||||
|
return errInvalidArgument("Checksum requires Client with TrailingHeaders enabled")
|
||||||
|
case c.overrideSignerType.IsV2():
|
||||||
|
return errInvalidArgument("Checksum cannot be used with v2 signatures")
|
||||||
|
case s3utils.IsGoogleEndpoint(*c.endpointURL):
|
||||||
|
return errInvalidArgument("Checksum cannot be used with GCS endpoints")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -291,7 +309,7 @@ func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, r
|
||||||
return UploadInfo{}, errors.New("object size must be provided with disable multipart upload")
|
return UploadInfo{}, errors.New("object size must be provided with disable multipart upload")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = opts.validate()
|
err = opts.validate(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
@ -333,7 +351,7 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
|
||||||
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
|
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
if size < int64(partSize) || opts.DisableMultipart {
|
if size <= int64(partSize) || opts.DisableMultipart {
|
||||||
return c.putObject(ctx, bucketName, objectName, reader, size, opts)
|
return c.putObject(ctx, bucketName, objectName, reader, size, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -362,6 +380,10 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||||
return UploadInfo{}, err
|
return UploadInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opts.Checksum.IsSet() {
|
||||||
|
opts.SendContentMd5 = false
|
||||||
|
opts.AutoChecksum = opts.Checksum
|
||||||
|
}
|
||||||
if !opts.SendContentMd5 {
|
if !opts.SendContentMd5 {
|
||||||
if opts.UserMetadata == nil {
|
if opts.UserMetadata == nil {
|
||||||
opts.UserMetadata = make(map[string]string, 1)
|
opts.UserMetadata = make(map[string]string, 1)
|
||||||
|
|
2
vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
generated
vendored
|
@ -107,7 +107,7 @@ type readSeekCloser interface {
|
||||||
// Total size should be < 5TB.
|
// Total size should be < 5TB.
|
||||||
// This function blocks until 'objs' is closed and the content has been uploaded.
|
// This function blocks until 'objs' is closed and the content has been uploaded.
|
||||||
func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) {
|
func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) {
|
||||||
err = opts.Opts.validate()
|
err = opts.Opts.validate(&c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
|
@ -128,7 +128,7 @@ type Options struct {
|
||||||
// Global constants.
|
// Global constants.
|
||||||
const (
|
const (
|
||||||
libraryName = "minio-go"
|
libraryName = "minio-go"
|
||||||
libraryVersion = "v7.0.76"
|
libraryVersion = "v7.0.77"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User Agent should always following the below style.
|
// User Agent should always following the below style.
|
||||||
|
@ -661,7 +661,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
||||||
// Initiate the request.
|
// Initiate the request.
|
||||||
res, err = c.do(req)
|
res, err = c.do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isRequestErrorRetryable(err) {
|
if isRequestErrorRetryable(ctx, err) {
|
||||||
// Retry the request
|
// Retry the request
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
253
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
253
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
|
@ -83,7 +83,7 @@ func createHTTPTransport() (transport *http.Transport) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if mustParseBool(os.Getenv(skipCERTValidation)) {
|
if mustParseBool(os.Getenv(enableHTTPS)) && mustParseBool(os.Getenv(skipCERTValidation)) {
|
||||||
transport.TLSClientConfig.InsecureSkipVerify = true
|
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2334,7 +2334,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test PutObject with custom checksums.
|
// Test PutObject with custom checksums.
|
||||||
func testPutMultipartObjectWithChecksums() {
|
func testPutObjectWithTrailingChecksums() {
|
||||||
// initialize logging params
|
// initialize logging params
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
testName := getFuncName()
|
testName := getFuncName()
|
||||||
|
@ -2342,7 +2342,7 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
args := map[string]interface{}{
|
args := map[string]interface{}{
|
||||||
"bucketName": "",
|
"bucketName": "",
|
||||||
"objectName": "",
|
"objectName": "",
|
||||||
"opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
|
"opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress, TrailChecksum: xxx}",
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isFullMode() {
|
if !isFullMode() {
|
||||||
|
@ -2356,9 +2356,201 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := minio.New(os.Getenv(serverEndpoint),
|
c, err := minio.New(os.Getenv(serverEndpoint),
|
||||||
&minio.Options{
|
&minio.Options{
|
||||||
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
|
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
|
||||||
Transport: createHTTPTransport(),
|
Transport: createHTTPTransport(),
|
||||||
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
||||||
|
TrailingHeaders: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable tracing, write to stderr.
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
|
||||||
|
|
||||||
|
// Generate a new random bucket name.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
|
||||||
|
args["bucketName"] = bucketName
|
||||||
|
|
||||||
|
// Make a new bucket.
|
||||||
|
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Make bucket failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer cleanupBucket(bucketName, c)
|
||||||
|
tests := []struct {
|
||||||
|
cs minio.ChecksumType
|
||||||
|
}{
|
||||||
|
{cs: minio.ChecksumCRC32C},
|
||||||
|
{cs: minio.ChecksumCRC32},
|
||||||
|
{cs: minio.ChecksumSHA1},
|
||||||
|
{cs: minio.ChecksumSHA256},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
|
bufSize := dataFileMap["datafile-10-kB"]
|
||||||
|
|
||||||
|
// Save the data
|
||||||
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
|
args["objectName"] = objectName
|
||||||
|
|
||||||
|
cmpChecksum := func(got, want string) {
|
||||||
|
if want != got {
|
||||||
|
logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
meta := map[string]string{}
|
||||||
|
reader := getDataReader("datafile-10-kB")
|
||||||
|
b, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Read failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h := test.cs.Hasher()
|
||||||
|
h.Reset()
|
||||||
|
|
||||||
|
// Test with Wrong CRC.
|
||||||
|
args["metadata"] = meta
|
||||||
|
args["range"] = "false"
|
||||||
|
args["checksum"] = test.cs.String()
|
||||||
|
|
||||||
|
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
|
||||||
|
DisableMultipart: true,
|
||||||
|
DisableContentSha256: true,
|
||||||
|
UserMetadata: meta,
|
||||||
|
Checksum: test.cs,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
h.Write(b)
|
||||||
|
meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||||
|
|
||||||
|
cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
|
||||||
|
cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
|
||||||
|
cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
|
||||||
|
cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
|
||||||
|
|
||||||
|
// Read the data back
|
||||||
|
gopts := minio.GetObjectOptions{Checksum: true}
|
||||||
|
|
||||||
|
function = "GetObject(...)"
|
||||||
|
r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "GetObject failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
st, err := r.Stat()
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Stat failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"])
|
||||||
|
cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"])
|
||||||
|
cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"])
|
||||||
|
cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
|
||||||
|
|
||||||
|
if st.Size != int64(bufSize) {
|
||||||
|
logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := r.Close(); err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Object Close failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := r.Close(); err == nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
function = "GetObject( Range...)"
|
||||||
|
args["range"] = "true"
|
||||||
|
err = gopts.SetRange(100, 1000)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "SetRange failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r, err = c.GetObject(context.Background(), bucketName, objectName, gopts)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "GetObject failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = io.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Read failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
st, err = r.Stat()
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "Stat failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Range requests should return empty checksums...
|
||||||
|
cmpChecksum(st.ChecksumSHA256, "")
|
||||||
|
cmpChecksum(st.ChecksumSHA1, "")
|
||||||
|
cmpChecksum(st.ChecksumCRC32, "")
|
||||||
|
cmpChecksum(st.ChecksumCRC32C, "")
|
||||||
|
|
||||||
|
function = "GetObjectAttributes(...)"
|
||||||
|
s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{})
|
||||||
|
if err != nil {
|
||||||
|
logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cmpChecksum(s.Checksum.ChecksumSHA256, meta["x-amz-checksum-sha256"])
|
||||||
|
cmpChecksum(s.Checksum.ChecksumSHA1, meta["x-amz-checksum-sha1"])
|
||||||
|
cmpChecksum(s.Checksum.ChecksumCRC32, meta["x-amz-checksum-crc32"])
|
||||||
|
cmpChecksum(s.Checksum.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
|
||||||
|
|
||||||
|
delete(args, "range")
|
||||||
|
delete(args, "metadata")
|
||||||
|
}
|
||||||
|
|
||||||
|
logSuccess(testName, function, args, startTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test PutObject with custom checksums.
|
||||||
|
func testPutMultipartObjectWithChecksums(trailing bool) {
|
||||||
|
// initialize logging params
|
||||||
|
startTime := time.Now()
|
||||||
|
testName := getFuncName()
|
||||||
|
function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
|
args := map[string]interface{}{
|
||||||
|
"bucketName": "",
|
||||||
|
"objectName": "",
|
||||||
|
"opts": fmt.Sprintf("minio.PutObjectOptions{UserMetadata: metadata, Progress: progress Checksum: %v}", trailing),
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isFullMode() {
|
||||||
|
logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed random based on current time.
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
// Instantiate new minio client object.
|
||||||
|
c, err := minio.New(os.Getenv(serverEndpoint),
|
||||||
|
&minio.Options{
|
||||||
|
Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
|
||||||
|
Transport: createHTTPTransport(),
|
||||||
|
Secure: mustParseBool(os.Getenv(enableHTTPS)),
|
||||||
|
TrailingHeaders: trailing,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||||
|
@ -2445,14 +2637,20 @@ function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||||
h.Reset()
|
h.Reset()
|
||||||
want := hashMultiPart(b, partSize, test.cs.Hasher())
|
want := hashMultiPart(b, partSize, test.cs.Hasher())
|
||||||
|
|
||||||
|
var cs minio.ChecksumType
|
||||||
|
rd := io.Reader(io.NopCloser(bytes.NewReader(b)))
|
||||||
|
if trailing {
|
||||||
|
cs = test.cs
|
||||||
|
rd = bytes.NewReader(b)
|
||||||
|
}
|
||||||
// Set correct CRC.
|
// Set correct CRC.
|
||||||
|
resp, err := c.PutObject(context.Background(), bucketName, objectName, rd, int64(bufSize), minio.PutObjectOptions{
|
||||||
resp, err := c.PutObject(context.Background(), bucketName, objectName, io.NopCloser(bytes.NewReader(b)), int64(bufSize), minio.PutObjectOptions{
|
|
||||||
DisableContentSha256: true,
|
DisableContentSha256: true,
|
||||||
DisableMultipart: false,
|
DisableMultipart: false,
|
||||||
UserMetadata: nil,
|
UserMetadata: nil,
|
||||||
PartSize: partSize,
|
PartSize: partSize,
|
||||||
AutoChecksum: test.cs,
|
AutoChecksum: test.cs,
|
||||||
|
Checksum: cs,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||||
|
@ -2982,6 +3180,7 @@ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)"
|
||||||
testFiles[i].UploadInfo, err = c.PutObject(context.Background(), v.Bucket, v.Object, reader, int64(bufSize), minio.PutObjectOptions{
|
testFiles[i].UploadInfo, err = c.PutObject(context.Background(), v.Bucket, v.Object, reader, int64(bufSize), minio.PutObjectOptions{
|
||||||
ContentType: v.ContentType,
|
ContentType: v.ContentType,
|
||||||
SendContentMd5: v.SendContentMd5,
|
SendContentMd5: v.SendContentMd5,
|
||||||
|
Checksum: minio.ChecksumCRC32C,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||||
|
@ -3063,7 +3262,7 @@ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)"
|
||||||
test: objectAttributesTestOptions{
|
test: objectAttributesTestOptions{
|
||||||
TestFileName: "file1",
|
TestFileName: "file1",
|
||||||
StorageClass: "STANDARD",
|
StorageClass: "STANDARD",
|
||||||
HasFullChecksum: false,
|
HasFullChecksum: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3152,9 +3351,10 @@ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)"
|
||||||
|
|
||||||
info, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
|
info, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
|
||||||
ContentType: "content/custom",
|
ContentType: "content/custom",
|
||||||
SendContentMd5: true,
|
SendContentMd5: false,
|
||||||
ServerSideEncryption: sse,
|
ServerSideEncryption: sse,
|
||||||
PartSize: uint64(bufSize) / 2,
|
PartSize: uint64(bufSize) / 2,
|
||||||
|
Checksum: minio.ChecksumCRC32C,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||||
|
@ -3174,9 +3374,9 @@ function := "GetObjectAttributes(ctx, bucketName, objectName, opts)"
|
||||||
ETag: info.ETag,
|
ETag: info.ETag,
|
||||||
NumberOfParts: 2,
|
NumberOfParts: 2,
|
||||||
ObjectSize: int(info.Size),
|
ObjectSize: int(info.Size),
|
||||||
HasFullChecksum: false,
|
HasFullChecksum: true,
|
||||||
HasParts: true,
|
HasParts: true,
|
||||||
HasPartChecksums: false,
|
HasPartChecksums: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed", err)
|
logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed", err)
|
||||||
|
@ -5594,18 +5794,12 @@ function := "PresignedPostPolicy(policy)"
|
||||||
}
|
}
|
||||||
writer.Close()
|
writer.Close()
|
||||||
|
|
||||||
transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
|
|
||||||
if err != nil {
|
|
||||||
logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
httpClient := &http.Client{
|
httpClient := &http.Client{
|
||||||
// Setting a sensible time out of 30secs to wait for response
|
// Setting a sensible time out of 30secs to wait for response
|
||||||
// headers. Request is pro-actively canceled after 30secs
|
// headers. Request is pro-actively canceled after 30secs
|
||||||
// with no response.
|
// with no response.
|
||||||
Timeout: 30 * time.Second,
|
Timeout: 30 * time.Second,
|
||||||
Transport: transport,
|
Transport: createHTTPTransport(),
|
||||||
}
|
}
|
||||||
args["url"] = presignedPostPolicyURL.String()
|
args["url"] = presignedPostPolicyURL.String()
|
||||||
|
|
||||||
|
@ -7519,7 +7713,7 @@ functionAll += ", " + function
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
|
transport := createHTTPTransport()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
|
logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
|
||||||
return
|
return
|
||||||
|
@ -12450,18 +12644,12 @@ functionAll += ", " + function
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
|
|
||||||
if err != nil {
|
|
||||||
logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
httpClient := &http.Client{
|
httpClient := &http.Client{
|
||||||
// Setting a sensible time out of 30secs to wait for response
|
// Setting a sensible time out of 30secs to wait for response
|
||||||
// headers. Request is pro-actively canceled after 30secs
|
// headers. Request is pro-actively canceled after 30secs
|
||||||
// with no response.
|
// with no response.
|
||||||
Timeout: 30 * time.Second,
|
Timeout: 30 * time.Second,
|
||||||
Transport: transport,
|
Transport: createHTTPTransport(),
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil)
|
req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil)
|
||||||
|
@ -13556,14 +13744,9 @@ function := "SetBucketCors(bucketName, cors)"
|
||||||
bucketURL := c.EndpointURL().String() + "/" + bucketName + "/"
|
bucketURL := c.EndpointURL().String() + "/" + bucketName + "/"
|
||||||
objectURL := bucketURL + objectName
|
objectURL := bucketURL + objectName
|
||||||
|
|
||||||
transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
|
|
||||||
if err != nil {
|
|
||||||
logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
httpClient := &http.Client{
|
httpClient := &http.Client{
|
||||||
Timeout: 30 * time.Second,
|
Timeout: 30 * time.Second,
|
||||||
Transport: transport,
|
Transport: createHTTPTransport(),
|
||||||
}
|
}
|
||||||
|
|
||||||
errStrAccessForbidden := `<Error><Code>AccessForbidden</Code><Message>CORSResponse: This CORS request is not allowed. This is usually because the evalution of Origin, request method / Access-Control-Request-Method or Access-Control-Request-Headers are not whitelisted`
|
errStrAccessForbidden := `<Error><Code>AccessForbidden</Code><Message>CORSResponse: This CORS request is not allowed. This is usually because the evalution of Origin, request method / Access-Control-Request-Method or Access-Control-Request-Headers are not whitelisted`
|
||||||
|
@ -14757,7 +14940,9 @@ func main() {
|
||||||
testCompose10KSourcesV2()
|
testCompose10KSourcesV2()
|
||||||
testUserMetadataCopyingV2()
|
testUserMetadataCopyingV2()
|
||||||
testPutObjectWithChecksums()
|
testPutObjectWithChecksums()
|
||||||
testPutMultipartObjectWithChecksums()
|
testPutObjectWithTrailingChecksums()
|
||||||
|
testPutMultipartObjectWithChecksums(false)
|
||||||
|
testPutMultipartObjectWithChecksums(true)
|
||||||
testPutObject0ByteV2()
|
testPutObject0ByteV2()
|
||||||
testPutObjectNoLengthV2()
|
testPutObjectNoLengthV2()
|
||||||
testPutObjectsUnknownV2()
|
testPutObjectsUnknownV2()
|
||||||
|
|
19
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
19
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
|
@ -301,6 +301,25 @@ func (p *PostPolicy) SetUserMetadata(key, value string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetUserMetadataStartsWith - Set how an user metadata should starts with.
|
||||||
|
// Can be retrieved through a HEAD request or an event.
|
||||||
|
func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error {
|
||||||
|
if strings.TrimSpace(key) == "" || key == "" {
|
||||||
|
return errInvalidArgument("Key is empty")
|
||||||
|
}
|
||||||
|
headerName := fmt.Sprintf("x-amz-meta-%s", key)
|
||||||
|
policyCond := policyCondition{
|
||||||
|
matchType: "starts-with",
|
||||||
|
condition: fmt.Sprintf("$%s", headerName),
|
||||||
|
value: value,
|
||||||
|
}
|
||||||
|
if err := p.addNewPolicy(policyCond); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.formData[headerName] = value
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// SetChecksum sets the checksum of the request.
|
// SetChecksum sets the checksum of the request.
|
||||||
func (p *PostPolicy) SetChecksum(c Checksum) {
|
func (p *PostPolicy) SetChecksum(c Checksum) {
|
||||||
if c.IsSet() {
|
if c.IsSet() {
|
||||||
|
|
5
vendor/github.com/minio/minio-go/v7/retry.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/retry.go
generated
vendored
|
@ -129,9 +129,10 @@ func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// For now, all http Do() requests are retriable except some well defined errors
|
// For now, all http Do() requests are retriable except some well defined errors
|
||||||
func isRequestErrorRetryable(err error) bool {
|
func isRequestErrorRetryable(ctx context.Context, err error) bool {
|
||||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
return false
|
// Retry if internal timeout in the HTTP call.
|
||||||
|
return ctx.Err() == nil
|
||||||
}
|
}
|
||||||
if ue, ok := err.(*url.Error); ok {
|
if ue, ok := err.(*url.Error); ok {
|
||||||
e := ue.Unwrap()
|
e := ue.Unwrap()
|
||||||
|
|
18
vendor/modules.txt
vendored
18
vendor/modules.txt
vendored
|
@ -1,17 +1,11 @@
|
||||||
# codeberg.org/gruf/go-atomics v1.1.0
|
|
||||||
## explicit; go 1.16
|
|
||||||
codeberg.org/gruf/go-atomics
|
|
||||||
# codeberg.org/gruf/go-bitutil v1.1.0
|
|
||||||
## explicit; go 1.19
|
|
||||||
codeberg.org/gruf/go-bitutil
|
|
||||||
# codeberg.org/gruf/go-bytes v1.0.2
|
# codeberg.org/gruf/go-bytes v1.0.2
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
codeberg.org/gruf/go-bytes
|
codeberg.org/gruf/go-bytes
|
||||||
# codeberg.org/gruf/go-bytesize v1.0.3
|
# codeberg.org/gruf/go-bytesize v1.0.3
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
codeberg.org/gruf/go-bytesize
|
codeberg.org/gruf/go-bytesize
|
||||||
# codeberg.org/gruf/go-byteutil v1.2.0
|
# codeberg.org/gruf/go-byteutil v1.3.0
|
||||||
## explicit; go 1.16
|
## explicit; go 1.20
|
||||||
codeberg.org/gruf/go-byteutil
|
codeberg.org/gruf/go-byteutil
|
||||||
# codeberg.org/gruf/go-cache/v3 v3.5.7
|
# codeberg.org/gruf/go-cache/v3 v3.5.7
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
|
@ -59,13 +53,13 @@ codeberg.org/gruf/go-mimetypes
|
||||||
# codeberg.org/gruf/go-mutexes v1.5.1
|
# codeberg.org/gruf/go-mutexes v1.5.1
|
||||||
## explicit; go 1.22.2
|
## explicit; go 1.22.2
|
||||||
codeberg.org/gruf/go-mutexes
|
codeberg.org/gruf/go-mutexes
|
||||||
# codeberg.org/gruf/go-runners v1.6.2
|
# codeberg.org/gruf/go-runners v1.6.3
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
codeberg.org/gruf/go-runners
|
codeberg.org/gruf/go-runners
|
||||||
# codeberg.org/gruf/go-sched v1.2.3
|
# codeberg.org/gruf/go-sched v1.2.4
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
codeberg.org/gruf/go-sched
|
codeberg.org/gruf/go-sched
|
||||||
# codeberg.org/gruf/go-storage v0.1.2
|
# codeberg.org/gruf/go-storage v0.2.0
|
||||||
## explicit; go 1.22
|
## explicit; go 1.22
|
||||||
codeberg.org/gruf/go-storage
|
codeberg.org/gruf/go-storage
|
||||||
codeberg.org/gruf/go-storage/disk
|
codeberg.org/gruf/go-storage/disk
|
||||||
|
@ -491,7 +485,7 @@ github.com/miekg/dns
|
||||||
# github.com/minio/md5-simd v1.1.2
|
# github.com/minio/md5-simd v1.1.2
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/minio/md5-simd
|
github.com/minio/md5-simd
|
||||||
# github.com/minio/minio-go/v7 v7.0.76
|
# github.com/minio/minio-go/v7 v7.0.77
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
github.com/minio/minio-go/v7
|
github.com/minio/minio-go/v7
|
||||||
github.com/minio/minio-go/v7/pkg/cors
|
github.com/minio/minio-go/v7/pkg/cors
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
--blue3: var(--blurple3);
|
--blue3: var(--blurple3);
|
||||||
|
|
||||||
/* Basic page styling (background + foreground) */
|
/* Basic page styling (background + foreground) */
|
||||||
--bg: linear-gradient(var(--blurple7), black);
|
--bg: var(--blurple7);
|
||||||
--bg-accent: var(--blurple6);
|
--bg-accent: var(--blurple6);
|
||||||
--fg: var(--blurple1);
|
--fg: var(--blurple1);
|
||||||
--fg-reduced: var(--blurple2);
|
--fg-reduced: var(--blurple2);
|
||||||
|
@ -44,6 +44,11 @@
|
||||||
--boxshadow-border: 0.08rem solid black;
|
--boxshadow-border: 0.08rem solid black;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Main page background */
|
||||||
|
body {
|
||||||
|
background: linear-gradient(var(--blurple7), black);
|
||||||
|
}
|
||||||
|
|
||||||
/* Scroll bar */
|
/* Scroll bar */
|
||||||
html, body {
|
html, body {
|
||||||
scrollbar-color: var(--blurple4) var(--blurple7);
|
scrollbar-color: var(--blurple4) var(--blurple7);
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
--blue3: var(--blurple6);
|
--blue3: var(--blurple6);
|
||||||
|
|
||||||
/* Basic page styling (background + foreground) */
|
/* Basic page styling (background + foreground) */
|
||||||
--bg: linear-gradient(var(--blurple1), white);
|
--bg: var(--blurple1);
|
||||||
--bg-accent: var(--white2);
|
--bg-accent: var(--white2);
|
||||||
--fg: var(--gray1);
|
--fg: var(--gray1);
|
||||||
--fg-reduced: var(--gray2);
|
--fg-reduced: var(--gray2);
|
||||||
|
@ -46,6 +46,11 @@
|
||||||
--boxshadow-border: 0.08rem solid var(--blurple6);
|
--boxshadow-border: 0.08rem solid var(--blurple6);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Main page background */
|
||||||
|
body {
|
||||||
|
background: linear-gradient(var(--blurple1), white);
|
||||||
|
}
|
||||||
|
|
||||||
/* Scroll bar */
|
/* Scroll bar */
|
||||||
html, body {
|
html, body {
|
||||||
scrollbar-color: var(--blurple5) var(--blurple2);
|
scrollbar-color: var(--blurple5) var(--blurple2);
|
||||||
|
|
|
@ -66,11 +66,7 @@
|
||||||
--blue3: var(--ecks-pee-white);
|
--blue3: var(--ecks-pee-white);
|
||||||
|
|
||||||
/* Basic page styling (background + foreground) */
|
/* Basic page styling (background + foreground) */
|
||||||
--bg: radial-gradient(
|
--bg: var(--ecks-pee-light-blue);
|
||||||
circle closest-corner at 20% 20%,
|
|
||||||
var(--ecks-pee-lighter-blue),
|
|
||||||
var(--ecks-pee-light-blue)
|
|
||||||
);
|
|
||||||
--bg-accent: var(--ecks-pee-blue);
|
--bg-accent: var(--ecks-pee-blue);
|
||||||
--fg: var(--ecks-pee-white);
|
--fg: var(--ecks-pee-white);
|
||||||
--fg-reduced: var(--ecks-pee-lightest-blue);
|
--fg-reduced: var(--ecks-pee-lightest-blue);
|
||||||
|
@ -122,6 +118,15 @@
|
||||||
src: url(/assets/fonts/Atkinson-Hyperlegible-BoldItalic-102.woff) format('woff');
|
src: url(/assets/fonts/Atkinson-Hyperlegible-BoldItalic-102.woff) format('woff');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Main page background */
|
||||||
|
body {
|
||||||
|
background: radial-gradient(
|
||||||
|
circle closest-corner at 20% 20%,
|
||||||
|
var(--ecks-pee-lighter-blue),
|
||||||
|
var(--ecks-pee-light-blue)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
/* Scroll bar */
|
/* Scroll bar */
|
||||||
html, body {
|
html, body {
|
||||||
/* Try Atkinson, fall back to default GtS fonts */
|
/* Try Atkinson, fall back to default GtS fonts */
|
||||||
|
|
|
@ -19,8 +19,14 @@
|
||||||
--blue3: var(--acid-green);
|
--blue3: var(--acid-green);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Main page background */
|
||||||
body {
|
body {
|
||||||
background: linear-gradient(90deg, var(--darkmagenta), black, var(--darkmagenta));
|
background: linear-gradient(
|
||||||
|
90deg,
|
||||||
|
var(--darkmagenta),
|
||||||
|
black,
|
||||||
|
var(--darkmagenta)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
html, body {
|
html, body {
|
||||||
|
|
|
@ -8,21 +8,21 @@
|
||||||
v1.0 by xmgz at github */
|
v1.0 by xmgz at github */
|
||||||
|
|
||||||
:root {
|
:root {
|
||||||
/* color definitions */
|
/* color definitions */
|
||||||
--dgreen1: #003333;
|
--dgreen1: #003333;
|
||||||
--dgreen2: #196C41;
|
--dgreen2: #196C41;
|
||||||
--dgreen3: #027C68;
|
--dgreen3: #027C68;
|
||||||
--dgreen4: #009933;
|
--dgreen4: #009933;
|
||||||
--dblue1: #141E46; /* very dark blue */
|
--dblue1: #141E46; /* very dark blue */
|
||||||
--typecolor: #F8F4EC;
|
--typecolor: #F8F4EC;
|
||||||
--linkcolor: #c0f0c0; /* very soft lime green */
|
--linkcolor: #c0f0c0; /* very soft lime green */
|
||||||
--sunny: #FCDC2A;
|
--sunny: #FCDC2A;
|
||||||
--lesssunny: #FF7431; /* papaya */
|
--lesssunny: #FF7431; /* papaya */
|
||||||
/* wood/earth colors */
|
/* wood/earth colors */
|
||||||
--codebg: #3A2722; /* darker caoba */
|
--codebg: #3A2722; /* darker caoba */
|
||||||
--quotebg: #800000; /* maroon */
|
--quotebg: #800000; /* maroon */
|
||||||
/* water, post's date and stats. User stats */
|
/* water, post's date and stats. User stats */
|
||||||
--fg-reduced: #BBEBFF;
|
--fg-reduced: #BBEBFF;
|
||||||
|
|
||||||
/* Restyle basic colors */
|
/* Restyle basic colors */
|
||||||
--blue1: var(--dgreen2);
|
--blue1: var(--dgreen2);
|
||||||
|
@ -43,12 +43,9 @@
|
||||||
--button-bg: var(--lesssunny);
|
--button-bg: var(--lesssunny);
|
||||||
--button-fg: var(--dblue1);
|
--button-fg: var(--dblue1);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* Used around statuses + other items */
|
/* Used around statuses + other items */
|
||||||
--boxshadow: 0 0.4rem 0.7rem -0.1rem rgba(252,220,42,0.15); /* subtle status glow */
|
--boxshadow: 0 0.4rem 0.7rem -0.1rem rgba(252,220,42,0.15); /* subtle status glow */
|
||||||
--boxshadow-border: 0.07rem solid var(--lesssunny); /* thin papaya border */
|
--boxshadow-border: 0.07rem solid var(--lesssunny); /* thin papaya border */
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
--br-inner: 0.4rem;
|
--br-inner: 0.4rem;
|
||||||
|
|
||||||
/* Basic page styling (background + foreground) */
|
/* Basic page styling (background + foreground) */
|
||||||
--bg: linear-gradient(-90deg, var(--soft-blue), var(--soft-pink), white, var(--soft-pink), var(--soft-blue));
|
--bg: var(--soft-pink);
|
||||||
--bg-accent: var(--soft-pink-translucent);
|
--bg-accent: var(--soft-pink-translucent);
|
||||||
--fg: var(--gray1);
|
--fg: var(--gray1);
|
||||||
--fg-reduced: var(--gray3);
|
--fg-reduced: var(--gray3);
|
||||||
|
@ -41,6 +41,18 @@
|
||||||
--boxshadow-border: 0.08rem solid var(--gray8);
|
--boxshadow-border: 0.08rem solid var(--gray8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Main page background */
|
||||||
|
body {
|
||||||
|
background: linear-gradient(
|
||||||
|
-90deg,
|
||||||
|
var(--soft-blue),
|
||||||
|
var(--soft-pink),
|
||||||
|
white,
|
||||||
|
var(--soft-pink),
|
||||||
|
var(--soft-blue)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
/* Scroll bar */
|
/* Scroll bar */
|
||||||
html, body {
|
html, body {
|
||||||
scrollbar-color: var(--orange2) var(--soft-pink);
|
scrollbar-color: var(--orange2) var(--soft-pink);
|
||||||
|
|
|
@ -29,40 +29,29 @@
|
||||||
|
|
||||||
:root {
|
:root {
|
||||||
/* Define solarized palette */
|
/* Define solarized palette */
|
||||||
--base3: #002b36;
|
--base03: #002b36; /* Background. */
|
||||||
--base2: #073642;
|
--base02: #073642; /* Background highlights. */
|
||||||
--base1: #586e75;
|
--base01: #586e75; /* Comments / secondary color. */
|
||||||
--base0: #657b83;
|
--base0: #839496; /* Body text / default code / primary content. */
|
||||||
--base00: #839496;
|
--base1: #93a1a1; /* Optional emphasized content. */
|
||||||
--base01: #93a1a1;
|
--red: #dc322f; /* Trim. */
|
||||||
--base02: #eee8d5;
|
|
||||||
--base03: #fdf6e3;
|
|
||||||
--yellow: #b58900;
|
|
||||||
--orange: #cb4b16;
|
|
||||||
--red: #dc322f;
|
|
||||||
--magenta: #d33682;
|
|
||||||
--violet: #6c71c4;
|
|
||||||
--blue: #268bd2;
|
|
||||||
--cyan: #2aa198;
|
|
||||||
--green: #859900;
|
|
||||||
|
|
||||||
/* Override orange trim */
|
/* Override orange trim */
|
||||||
--orange2: var(--red);
|
--orange2: var(--red);
|
||||||
|
|
||||||
/* Restyle basic colors to use Solarized */
|
/* Restyle basic colors to use Solarized */
|
||||||
--white1: var(--base3);
|
--white1: var(--base02);
|
||||||
--white2: var(--base2);
|
--white2: var(--base03);
|
||||||
--blue1: var(--cyan);
|
--blue2: var(--base0);
|
||||||
--blue2: var(--base03);
|
--blue3: var(--base1);
|
||||||
--blue3: var(--base02);
|
|
||||||
|
|
||||||
/* Basic page styling (background + foreground) */
|
/* Basic page styling (background + foreground) */
|
||||||
--bg: var(--white1);
|
--bg: var(--white1);
|
||||||
--bg-accent: var(--white2);
|
--bg-accent: var(--white2);
|
||||||
--fg: var(--base02);
|
--fg-reduced: var(--base0);
|
||||||
--fg-reduced: var(--base01);
|
--fg: var(--base1);
|
||||||
|
|
||||||
/* Profile page styling (light) */
|
/* Profile page styling */
|
||||||
--profile-bg: var(--white2);
|
--profile-bg: var(--white2);
|
||||||
|
|
||||||
/* Solarize buttons */
|
/* Solarize buttons */
|
||||||
|
@ -76,10 +65,10 @@
|
||||||
--status-focus-info-bg: var(--white2);
|
--status-focus-info-bg: var(--white2);
|
||||||
|
|
||||||
/* Used around statuses + other items */
|
/* Used around statuses + other items */
|
||||||
--boxshadow-border: 0.1rem solid var(--base1);
|
--boxshadow-border: 0.15rem solid var(--base01);
|
||||||
|
|
||||||
--plyr-video-control-color: var(--fg-reduced);
|
--plyr-video-control-color: var(--fg);
|
||||||
--plyr-video-control-color-hover: var(--fg);
|
--plyr-video-control-color-hover: var(--fg-reduced);
|
||||||
}
|
}
|
||||||
|
|
||||||
@font-face {
|
@font-face {
|
||||||
|
@ -108,7 +97,7 @@ html, body {
|
||||||
border: var(--boxshadow-border);
|
border: var(--boxshadow-border);
|
||||||
}
|
}
|
||||||
.status .media .media-wrapper details .unknown-attachment .placeholder {
|
.status .media .media-wrapper details .unknown-attachment .placeholder {
|
||||||
color: var(--blue2);
|
color: var(--base01);
|
||||||
}
|
}
|
||||||
.status .media .media-wrapper details video.plyr-video {
|
.status .media .media-wrapper details video.plyr-video {
|
||||||
background: transparent;
|
background: transparent;
|
||||||
|
@ -126,22 +115,38 @@ html, body {
|
||||||
pre, pre[class*="language-"],
|
pre, pre[class*="language-"],
|
||||||
code, code[class*="language-"] {
|
code, code[class*="language-"] {
|
||||||
background-color: black;
|
background-color: black;
|
||||||
color: var(--base03);
|
color: #93a1a1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Block quotes */
|
/* Block quotes */
|
||||||
blockquote {
|
blockquote {
|
||||||
background-color: var(--bg-accent);
|
background-color: var(--base03);
|
||||||
color: var(--fg);
|
color: var(--base0);
|
||||||
}
|
}
|
||||||
|
|
||||||
button,
|
button, .button,
|
||||||
.button,
|
|
||||||
.status .text-spoiler > summary .button {
|
.status .text-spoiler > summary .button {
|
||||||
font-family: 'Noto Sans Mono', monospace;
|
font-family: 'Noto Sans Mono', monospace;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.button {
|
||||||
|
color: var(--base0);
|
||||||
|
background: var(--base03);
|
||||||
|
border: var(--boxshadow-border);
|
||||||
|
}
|
||||||
|
|
||||||
|
.button:hover {
|
||||||
|
color: var(--base1);
|
||||||
|
background: var(--base02);
|
||||||
|
border: var(--boxshadow-border);
|
||||||
|
}
|
||||||
|
|
||||||
/* Ensure role badge readable */
|
/* Ensure role badge readable */
|
||||||
.profile .profile-header .basic-info .namerole .role.admin {
|
.profile .profile-header .basic-info .namerole .role.admin {
|
||||||
color: var(--base03);
|
color: var(--base03);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.col-header a {
|
||||||
|
font-size: 1.2rem;
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
/*
|
/*
|
||||||
theme-title: Solarized (light)
|
theme-title: Solarized (dark)
|
||||||
theme-description: Solarized sloth (light)
|
theme-description: Solarized sloth (dark)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -29,40 +29,29 @@
|
||||||
|
|
||||||
:root {
|
:root {
|
||||||
/* Define solarized palette */
|
/* Define solarized palette */
|
||||||
--base03: #002b36;
|
--base3: #fdf6e3; /* Background. */
|
||||||
--base02: #073642;
|
--base2: #eee8d5; /* Background highlights. */
|
||||||
--base01: #586e75;
|
--base1: #93a1a1; /* Comments / secondary color. */
|
||||||
--base00: #657b83;
|
--base00: #657b83; /* Body text / default code / primary content. */
|
||||||
--base0: #839496;
|
--base01: #586e75; /* Optional emphasized content. */
|
||||||
--base1: #93a1a1;
|
--red: #cb4b16; /* Trim. */
|
||||||
--base2: #eee8d5;
|
|
||||||
--base3: #fdf6e3;
|
|
||||||
--yellow: #b58900;
|
|
||||||
--orange: #cb4b16;
|
|
||||||
--red: #dc322f;
|
|
||||||
--magenta: #d33682;
|
|
||||||
--violet: #6c71c4;
|
|
||||||
--blue: #268bd2;
|
|
||||||
--cyan: #2aa198;
|
|
||||||
--green: #859900;
|
|
||||||
|
|
||||||
/* Override orange trim */
|
/* Override orange trim */
|
||||||
--orange2: var(--orange);
|
--orange2: var(--red);
|
||||||
|
|
||||||
/* Restyle basic colors to use Solarized */
|
/* Restyle basic colors to use Solarized */
|
||||||
--white1: var(--base3);
|
--white1: var(--base3);
|
||||||
--white2: var(--base2);
|
--white2: var(--base2);
|
||||||
--blue1: var(--cyan);
|
--blue2: var(--base00);
|
||||||
--blue2: var(--base03);
|
--blue3: var(--base01);
|
||||||
--blue3: var(--base02);
|
|
||||||
|
|
||||||
/* Basic page styling (background + foreground) */
|
/* Basic page styling (background + foreground) */
|
||||||
--bg: var(--white1);
|
--bg: var(--white1);
|
||||||
--bg-accent: var(--white2);
|
--bg-accent: var(--white2);
|
||||||
--fg: var(--base02);
|
--fg-reduced: var(--base00);
|
||||||
--fg-reduced: var(--base01);
|
--fg: var(--base01);
|
||||||
|
|
||||||
/* Profile page styling (light) */
|
/* Profile page styling */
|
||||||
--profile-bg: var(--white2);
|
--profile-bg: var(--white2);
|
||||||
|
|
||||||
/* Solarize buttons */
|
/* Solarize buttons */
|
||||||
|
@ -76,10 +65,10 @@
|
||||||
--status-focus-info-bg: var(--white2);
|
--status-focus-info-bg: var(--white2);
|
||||||
|
|
||||||
/* Used around statuses + other items */
|
/* Used around statuses + other items */
|
||||||
--boxshadow-border: 0.1rem solid var(--base1);
|
--boxshadow-border: 0.15rem solid var(--base1);
|
||||||
|
|
||||||
--plyr-video-control-color: var(--fg-reduced);
|
--plyr-video-control-color: var(--fg);
|
||||||
--plyr-video-control-color-hover: var(--fg);
|
--plyr-video-control-color-hover: var(--fg-reduced);
|
||||||
}
|
}
|
||||||
|
|
||||||
@font-face {
|
@font-face {
|
||||||
|
@ -108,7 +97,7 @@ html, body {
|
||||||
border: var(--boxshadow-border);
|
border: var(--boxshadow-border);
|
||||||
}
|
}
|
||||||
.status .media .media-wrapper details .unknown-attachment .placeholder {
|
.status .media .media-wrapper details .unknown-attachment .placeholder {
|
||||||
color: var(--blue2);
|
color: var(--base1);
|
||||||
}
|
}
|
||||||
.status .media .media-wrapper details video.plyr-video {
|
.status .media .media-wrapper details video.plyr-video {
|
||||||
background: transparent;
|
background: transparent;
|
||||||
|
@ -126,22 +115,38 @@ html, body {
|
||||||
pre, pre[class*="language-"],
|
pre, pre[class*="language-"],
|
||||||
code, code[class*="language-"] {
|
code, code[class*="language-"] {
|
||||||
background-color: black;
|
background-color: black;
|
||||||
color: var(--base3);
|
color: #93a1a1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Block quotes */
|
/* Block quotes */
|
||||||
blockquote {
|
blockquote {
|
||||||
background-color: var(--bg-accent);
|
background-color: var(--base3);
|
||||||
color: var(--fg);
|
color: var(--base00);
|
||||||
}
|
}
|
||||||
|
|
||||||
button,
|
button, .button,
|
||||||
.button,
|
|
||||||
.status .text-spoiler > summary .button {
|
.status .text-spoiler > summary .button {
|
||||||
font-family: 'Noto Sans Mono', monospace;
|
font-family: 'Noto Sans Mono', monospace;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.button {
|
||||||
|
color: var(--base01);
|
||||||
|
background: var(--base2);
|
||||||
|
border: var(--boxshadow-border);
|
||||||
|
}
|
||||||
|
|
||||||
|
.button:hover {
|
||||||
|
color: var(--base00);
|
||||||
|
background: var(--base3);
|
||||||
|
border: var(--boxshadow-border);
|
||||||
|
}
|
||||||
|
|
||||||
/* Ensure role badge readable */
|
/* Ensure role badge readable */
|
||||||
.profile .profile-header .basic-info .namerole .role.admin {
|
.profile .profile-header .basic-info .namerole .role.admin {
|
||||||
color: var(--base03);
|
color: var(--base3);
|
||||||
|
}
|
||||||
|
|
||||||
|
.col-header a {
|
||||||
|
font-size: 1.2rem;
|
||||||
|
font-weight: bold;
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
--orange2: var(--pink);
|
--orange2: var(--pink);
|
||||||
|
|
||||||
/* Basic page styling (background + foreground) */
|
/* Basic page styling (background + foreground) */
|
||||||
--bg: linear-gradient(var(--eggplant1), var(--pink), var(--orange), var(--yellow), var(--eggshell));
|
--bg: var(--eggshell);
|
||||||
--bg-accent: var(--white2);
|
--bg-accent: var(--white2);
|
||||||
--fg: var(--eggplant4);
|
--fg: var(--eggplant4);
|
||||||
--fg-reduced: var(--eggplant3);
|
--fg-reduced: var(--eggplant3);
|
||||||
|
@ -45,6 +45,17 @@
|
||||||
--boxshadow-border: 0.08rem solid var(--orange);
|
--boxshadow-border: 0.08rem solid var(--orange);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Main page background */
|
||||||
|
body {
|
||||||
|
background: linear-gradient(
|
||||||
|
var(--eggplant1),
|
||||||
|
var(--pink),
|
||||||
|
var(--orange),
|
||||||
|
var(--yellow),
|
||||||
|
var(--eggshell)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
/* Scroll bar */
|
/* Scroll bar */
|
||||||
html, body {
|
html, body {
|
||||||
scrollbar-color: var(--pink) var(--eggshell);
|
scrollbar-color: var(--pink) var(--eggshell);
|
||||||
|
@ -55,12 +66,6 @@ html, body {
|
||||||
color: var(--eggshell);
|
color: var(--eggshell);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Role and bot badge backgrounds */
|
|
||||||
.profile .profile-header .basic-info .namerole .role,
|
|
||||||
.profile .profile-header .basic-info .namerole .bot-username-wrapper .bot-legend-wrapper {
|
|
||||||
background: var(--eggshell);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Profile fields */
|
/* Profile fields */
|
||||||
.profile .about-user .fields .field {
|
.profile .about-user .fields .field {
|
||||||
border-bottom: 0.1rem solid var(--orange);
|
border-bottom: 0.1rem solid var(--orange);
|
||||||
|
|
Loading…
Reference in a new issue