Compare commits

...

7 commits

Author SHA1 Message Date
Rafael Caricio 120510ccbf
No need for published field to be strictly present 2023-08-07 19:05:21 +02:00
kim 9a291dea84
[performance] add caching of status fave, boost of, in reply to ID lists (#2060) 2023-08-04 12:28:33 +01:00
kim 00adf18c24
[feature] simpler cache size configuration (#2051)
* add automatic cache max size generation based on ratios of a singular fixed memory target

Signed-off-by: kim <grufwub@gmail.com>

* remove now-unused cache max-size config variables

Signed-off-by: kim <grufwub@gmail.com>

* slight ratio tweak

Signed-off-by: kim <grufwub@gmail.com>

* remove unused visibility config var

Signed-off-by: kim <grufwub@gmail.com>

* add secret little ratio config trick

Signed-off-by: kim <grufwub@gmail.com>

* fixed a word

Signed-off-by: kim <grufwub@gmail.com>

* update cache library to remove use of TTL in result caches + slice cache

Signed-off-by: kim <grufwub@gmail.com>

* update other cache usages to use correct interface

Signed-off-by: kim <grufwub@gmail.com>

* update example config to explain the cache memory target

Signed-off-by: kim <grufwub@gmail.com>

* update env parsing test with new config values

Signed-off-by: kim <grufwub@gmail.com>

* do some ratio twiddling

Signed-off-by: kim <grufwub@gmail.com>

* add missing header

* update envparsing with latest defaults

Signed-off-by: kim <grufwub@gmail.com>

* update size calculations to take into account result cache, simple cache and extra map overheads

Signed-off-by: kim <grufwub@gmail.com>

* tweak the ratios some more

Signed-off-by: kim <grufwub@gmail.com>

* more nan rampaging

Signed-off-by: kim <grufwub@gmail.com>

* fix envparsing script

Signed-off-by: kim <grufwub@gmail.com>

* update cache library, add sweep function to keep caches trim

Signed-off-by: kim <grufwub@gmail.com>

* sweep caches once a minute

Signed-off-by: kim <grufwub@gmail.com>

* add a regular job to sweep caches and keep under 80% utilisation

Signed-off-by: kim <grufwub@gmail.com>

* remove dead code

Signed-off-by: kim <grufwub@gmail.com>

* add new size library used to libraries section of readme

Signed-off-by: kim <grufwub@gmail.com>

* add better explanations for the mem-ratio numbers

Signed-off-by: kim <grufwub@gmail.com>

* update go-cache

Signed-off-by: kim <grufwub@gmail.com>

* library version bump

Signed-off-by: kim <grufwub@gmail.com>

* update cache.result{} size model estimation

Signed-off-by: kim <grufwub@gmail.com>

---------

Signed-off-by: kim <grufwub@gmail.com>
2023-08-03 11:34:35 +02:00
tobi e8a20f587c
[bugfix] Rework MultiError to wrap + unwrap errors properly (#2057)
* rework multierror a bit

* test multierror
2023-08-02 17:21:46 +02:00
kim 2cee8f2dd8
[bugfix] fix slow accounts / statuses using emojis lookups (#2056)
* update DeleteEmoji to use faster relational tables for status / account finding

Signed-off-by: kim <grufwub@gmail.com>

* update Get{Accounts,Statuses}UsingEmoji() to also use relational tables

Signed-off-by: kim <grufwub@gmail.com>

* remove the now unneeded tags relation from newStatusQ()

Signed-off-by: kim <grufwub@gmail.com>

* fix table names

Signed-off-by: kim <grufwub@gmail.com>

* fix account and status selects using emojis

Signed-off-by: kim <grufwub@gmail.com>

---------

Signed-off-by: kim <grufwub@gmail.com>
2023-08-02 16:11:23 +02:00
kim 24516b84c2
[bugfix] handle HEAD requests more elegantly (#2055)
Signed-off-by: kim <grufwub@gmail.com>
2023-08-02 10:28:20 +02:00
tobi cec29e2a8d
[bugfix] Allow instance accounts to be shown in search results in certain circumstances (#2053) 2023-08-02 08:31:09 +01:00
73 changed files with 3132 additions and 2889 deletions

View file

@ -224,6 +224,7 @@ The following open source libraries, frameworks, and tools are used by GoToSocia
- [buckket/go-blurhash](https://github.com/buckket/go-blurhash); used for generating image blurhashes. [GPL-3.0 License](https://spdx.org/licenses/GPL-3.0-only.html). - [buckket/go-blurhash](https://github.com/buckket/go-blurhash); used for generating image blurhashes. [GPL-3.0 License](https://spdx.org/licenses/GPL-3.0-only.html).
- [coreos/go-oidc](https://github.com/coreos/go-oidc); OIDC client library. [Apache-2.0 License](https://spdx.org/licenses/Apache-2.0.html). - [coreos/go-oidc](https://github.com/coreos/go-oidc); OIDC client library. [Apache-2.0 License](https://spdx.org/licenses/Apache-2.0.html).
- [disintegration/imaging](https://github.com/disintegration/imaging); image resizing. [MIT License](https://spdx.org/licenses/MIT.html). - [disintegration/imaging](https://github.com/disintegration/imaging); image resizing. [MIT License](https://spdx.org/licenses/MIT.html).
- [DmitriyVTitov/size](https://github.com/DmitriyVTitov/size); runtime model memory size calculations. [MIT License](https://spdx.org/licenses/MIT.html).
- Gin: - Gin:
- [gin-contrib/cors](https://github.com/gin-contrib/cors); Gin CORS middleware. [MIT License](https://spdx.org/licenses/MIT.html). - [gin-contrib/cors](https://github.com/gin-contrib/cors); Gin CORS middleware. [MIT License](https://spdx.org/licenses/MIT.html).
- [gin-contrib/gzip](https://github.com/gin-contrib/gzip); Gin gzip middleware. [MIT License](https://spdx.org/licenses/MIT.html). - [gin-contrib/gzip](https://github.com/gin-contrib/gzip); Gin gzip middleware. [MIT License](https://spdx.org/licenses/MIT.html).

View file

@ -75,14 +75,14 @@ func setupPrune(ctx context.Context) (*prune, error) {
} }
func (p *prune) shutdown(ctx context.Context) error { func (p *prune) shutdown(ctx context.Context) error {
var errs gtserror.MultiError errs := gtserror.NewMultiError(2)
if err := p.storage.Close(); err != nil { if err := p.storage.Close(); err != nil {
errs.Appendf("error closing storage backend: %v", err) errs.Appendf("error closing storage backend: %w", err)
} }
if err := p.dbService.Stop(ctx); err != nil { if err := p.dbService.Stop(ctx); err != nil {
errs.Appendf("error stopping database: %v", err) errs.Appendf("error stopping database: %w", err)
} }
p.state.Workers.Stop() p.state.Workers.Stop()

View file

@ -25,7 +25,9 @@ import (
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
"time"
"codeberg.org/gruf/go-sched"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action" "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action"
"github.com/superseriousbusiness/gotosocial/internal/api" "github.com/superseriousbusiness/gotosocial/internal/api"
@ -117,6 +119,13 @@ var Start action.GTSAction = func(ctx context.Context) error {
state.Workers.Start() state.Workers.Start()
defer state.Workers.Stop() defer state.Workers.Stop()
// Add a task to the scheduler to sweep caches.
// Frequency = 1 * minute
// Threshold = 80% capacity
sweep := func(time.Time) { state.Caches.Sweep(80) }
job := sched.NewJob(sweep).Every(time.Minute)
_ = state.Workers.Scheduler.Schedule(job)
// Build handlers used in later initializations. // Build handlers used in later initializations.
mediaManager := media.NewManager(&state) mediaManager := media.NewManager(&state)
oauthServer := oauth.New(ctx, dbService) oauthServer := oauth.New(ctx, dbService)

View file

@ -231,111 +231,13 @@ db-sqlite-cache-size: "8MiB"
db-sqlite-busy-timeout: "30m" db-sqlite-busy-timeout: "30m"
cache: cache:
# Cache configuration options: # cache.memory-target sets a target limit that
# # the application will try to keep it's caches
# max-size = maximum cached objects count # within. This is based on estimated sizes of
# ttl = cached object lifetime # in-memory objects, and so NOT AT ALL EXACT.
# sweep-freq = frequency to look for stale cache objects # Examples: ["100MiB", "200MiB", "500MiB", "1GiB"]
# (zero will disable cache sweeping) # Default: "200MiB"
memory-target: "200MiB"
#############################
#### VISIBILITY CACHES ######
#############################
#
# Configure Status and account
# visibility cache.
visibility-max-size: 2000
visibility-ttl: "30m"
visibility-sweep-freq: "1m"
gts:
###########################
#### DATABASE CACHES ######
###########################
#
# Configure GTS database
# model caches.
account-max-size: 2000
account-ttl: "30m"
account-sweep-freq: "1m"
block-max-size: 1000
block-ttl: "30m"
block-sweep-freq: "1m"
domain-block-max-size: 2000
domain-block-ttl: "24h"
domain-block-sweep-freq: "1m"
emoji-max-size: 2000
emoji-ttl: "30m"
emoji-sweep-freq: "1m"
emoji-category-max-size: 100
emoji-category-ttl: "30m"
emoji-category-sweep-freq: "1m"
follow-max-size: 2000
follow-ttl: "30m"
follow-sweep-freq: "1m"
follow-request-max-size: 2000
follow-request-ttl: "30m"
follow-request-sweep-freq: "1m"
instance-max-size: 2000
instance-ttl: "30m"
instance-sweep-freq: "1m"
list-max-size: 2000
list-ttl: "30m"
list-sweep-freq: "1m"
list-entry-max-size: 2000
list-entry-ttl: "30m"
list-entry-sweep-freq: "1m"
media-max-size: 1000
media-ttl: "30m"
media-sweep-freq: "1m"
mention-max-size: 2000
mention-ttl: "30m"
mention-sweep-freq: "1m"
notification-max-size: 1000
notification-ttl: "30m"
notification-sweep-freq: "1m"
report-max-size: 100
report-ttl: "30m"
report-sweep-freq: "1m"
status-max-size: 2000
status-ttl: "30m"
status-sweep-freq: "1m"
status-fave-max-size: 2000
status-fave-ttl: "30m"
status-fave-sweep-freq: "1m"
tag-max-size: 2000
tag-ttl: "30m"
tag-sweep-freq: "1m"
tombstone-max-size: 500
tombstone-ttl: "30m"
tombstone-sweep-freq: "1m"
user-max-size: 500
user-ttl: "30m"
user-sweep-freq: "1m"
webfinger-max-size: 250
webfinger-ttl: "24h"
webfinger-sweep-freq: "15m"
###################### ######################
##### WEB CONFIG ##### ##### WEB CONFIG #####

3
go.mod
View file

@ -5,7 +5,7 @@ go 1.20
require ( require (
codeberg.org/gruf/go-bytesize v1.0.2 codeberg.org/gruf/go-bytesize v1.0.2
codeberg.org/gruf/go-byteutil v1.1.2 codeberg.org/gruf/go-byteutil v1.1.2
codeberg.org/gruf/go-cache/v3 v3.4.4 codeberg.org/gruf/go-cache/v3 v3.5.5
codeberg.org/gruf/go-debug v1.3.0 codeberg.org/gruf/go-debug v1.3.0
codeberg.org/gruf/go-errors/v2 v2.2.0 codeberg.org/gruf/go-errors/v2 v2.2.0
codeberg.org/gruf/go-fastcopy v1.1.2 codeberg.org/gruf/go-fastcopy v1.1.2
@ -16,6 +16,7 @@ require (
codeberg.org/gruf/go-runners v1.6.1 codeberg.org/gruf/go-runners v1.6.1
codeberg.org/gruf/go-sched v1.2.3 codeberg.org/gruf/go-sched v1.2.3
codeberg.org/gruf/go-store/v2 v2.2.2 codeberg.org/gruf/go-store/v2 v2.2.2
github.com/DmitriyVTitov/size v1.5.0
github.com/KimMachineGun/automemlimit v0.2.6 github.com/KimMachineGun/automemlimit v0.2.6
github.com/abema/go-mp4 v0.11.0 github.com/abema/go-mp4 v0.11.0
github.com/buckket/go-blurhash v1.1.0 github.com/buckket/go-blurhash v1.1.0

8
go.sum
View file

@ -48,8 +48,8 @@ codeberg.org/gruf/go-bytesize v1.0.2/go.mod h1:n/GU8HzL9f3UNp/mUKyr1qVmTlj7+xacp
codeberg.org/gruf/go-byteutil v1.0.0/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU= codeberg.org/gruf/go-byteutil v1.0.0/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
codeberg.org/gruf/go-byteutil v1.1.2 h1:TQLZtTxTNca9xEfDIndmo7nBYxeS94nrv/9DS3Nk5Tw= codeberg.org/gruf/go-byteutil v1.1.2 h1:TQLZtTxTNca9xEfDIndmo7nBYxeS94nrv/9DS3Nk5Tw=
codeberg.org/gruf/go-byteutil v1.1.2/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU= codeberg.org/gruf/go-byteutil v1.1.2/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
codeberg.org/gruf/go-cache/v3 v3.4.4 h1:V0A3EzjhzhULOydD16pwa2DRDwF67OuuP4ORnm//7p8= codeberg.org/gruf/go-cache/v3 v3.5.5 h1:Ce7odyvr8oF6h49LSjPL7AZs2QGyKMN9BPkgKcfR0BA=
codeberg.org/gruf/go-cache/v3 v3.4.4/go.mod h1:pTeVPEb9DshXUkd8Dg76UcsLpU6EC/tXQ2qb+JrmxEc= codeberg.org/gruf/go-cache/v3 v3.5.5/go.mod h1:NbsGQUgEdNFd631WSasvCHIVAaY9ovuiSeoBwtsIeDc=
codeberg.org/gruf/go-debug v1.3.0 h1:PIRxQiWUFKtGOGZFdZ3Y0pqyfI0Xr87j224IYe2snZs= codeberg.org/gruf/go-debug v1.3.0 h1:PIRxQiWUFKtGOGZFdZ3Y0pqyfI0Xr87j224IYe2snZs=
codeberg.org/gruf/go-debug v1.3.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg= codeberg.org/gruf/go-debug v1.3.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4= codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
@ -87,6 +87,8 @@ codeberg.org/gruf/go-store/v2 v2.2.2/go.mod h1:QRM3LUAfYyoGMWLTqA1WzohxQgYqPFiVv
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW515g=
github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0=
github.com/KimMachineGun/automemlimit v0.2.6 h1:tQFriVTcIteUkV5EgU9iz03eDY36T8JU5RAjP2r6Kt0= github.com/KimMachineGun/automemlimit v0.2.6 h1:tQFriVTcIteUkV5EgU9iz03eDY36T8JU5RAjP2r6Kt0=
github.com/KimMachineGun/automemlimit v0.2.6/go.mod h1:pJhTW/nWJMj6SnWSU2TEKSlCaM+1N5Mej+IfS/5/Ol0= github.com/KimMachineGun/automemlimit v0.2.6/go.mod h1:pJhTW/nWJMj6SnWSU2TEKSlCaM+1N5Mej+IfS/5/Ol0=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@ -256,6 +258,8 @@ github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=

View file

@ -22,7 +22,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"io" "io"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -105,16 +104,16 @@ func (suite *InboxPostTestSuite) inboxPost(
suite.FailNow(err.Error()) suite.FailNow(err.Error())
} }
errs := gtserror.MultiError{} errs := gtserror.NewMultiError(2)
// Check expected code + body. // Check expected code + body.
if resultCode := recorder.Code; expectedHTTPStatus != resultCode { if resultCode := recorder.Code; expectedHTTPStatus != resultCode {
errs = append(errs, fmt.Sprintf("expected %d got %d", expectedHTTPStatus, resultCode)) errs.Appendf("expected %d got %d", expectedHTTPStatus, resultCode)
} }
// If we got an expected body, return early. // If we got an expected body, return early.
if expectedBody != "" && string(b) != expectedBody { if expectedBody != "" && string(b) != expectedBody {
errs = append(errs, fmt.Sprintf("expected %s got %s", expectedBody, string(b))) errs.Appendf("expected %s got %s", expectedBody, string(b))
} }
if err := errs.Combine(); err != nil { if err := errs.Combine(); err != nil {

View file

@ -90,16 +90,16 @@ func (suite *AccountUpdateTestSuite) updateAccount(
return nil, err return nil, err
} }
errs := gtserror.MultiError{} errs := gtserror.NewMultiError(2)
// Check expected code + body. // Check expected code + body.
if resultCode := recorder.Code; expectedHTTPStatus != resultCode { if resultCode := recorder.Code; expectedHTTPStatus != resultCode {
errs = append(errs, fmt.Sprintf("expected %d got %d", expectedHTTPStatus, resultCode)) errs.Appendf("expected %d got %d", expectedHTTPStatus, resultCode)
} }
// If we got an expected body, return early. // If we got an expected body, return early.
if expectedBody != "" && string(b) != expectedBody { if expectedBody != "" && string(b) != expectedBody {
errs = append(errs, fmt.Sprintf("expected %s got %s", expectedBody, string(b))) errs.Appendf("expected %s got %s", expectedBody, string(b))
} }
if err := errs.Combine(); err != nil { if err := errs.Combine(); err != nil {

View file

@ -19,7 +19,6 @@ package accounts_test
import ( import (
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -63,16 +62,16 @@ func (suite *ListsTestSuite) getLists(targetAccountID string, expectedHTTPStatus
suite.FailNow(err.Error()) suite.FailNow(err.Error())
} }
errs := gtserror.MultiError{} errs := gtserror.NewMultiError(2)
// Check expected code + body. // Check expected code + body.
if resultCode := recorder.Code; expectedHTTPStatus != resultCode { if resultCode := recorder.Code; expectedHTTPStatus != resultCode {
errs = append(errs, fmt.Sprintf("expected %d got %d", expectedHTTPStatus, resultCode)) errs.Appendf("expected %d got %d", expectedHTTPStatus, resultCode)
} }
// If we got an expected body, return early. // If we got an expected body, return early.
if expectedBody != "" && string(b) != expectedBody { if expectedBody != "" && string(b) != expectedBody {
errs = append(errs, fmt.Sprintf("expected %s got %s", expectedBody, string(b))) errs.Appendf("expected %s got %s", expectedBody, string(b))
} }
if err := errs.Combine(); err != nil { if err := errs.Combine(); err != nil {

View file

@ -19,7 +19,6 @@ package accounts_test
import ( import (
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -99,16 +98,16 @@ func (suite *AccountSearchTestSuite) getSearch(
suite.FailNow(err.Error()) suite.FailNow(err.Error())
} }
errs := gtserror.MultiError{} errs := gtserror.NewMultiError(2)
// Check expected code + body. // Check expected code + body.
if resultCode := recorder.Code; expectedHTTPStatus != resultCode { if resultCode := recorder.Code; expectedHTTPStatus != resultCode {
errs = append(errs, fmt.Sprintf("expected %d got %d", expectedHTTPStatus, resultCode)) errs.Appendf("expected %d got %d", expectedHTTPStatus, resultCode)
} }
// If we got an expected body, return early. // If we got an expected body, return early.
if expectedBody != "" && string(b) != expectedBody { if expectedBody != "" && string(b) != expectedBody {
errs = append(errs, fmt.Sprintf("expected %s got %s", expectedBody, string(b))) errs.Appendf("expected %s got %s", expectedBody, string(b))
} }
if err := errs.Combine(); err != nil { if err := errs.Combine(); err != nil {

View file

@ -19,7 +19,6 @@ package admin_test
import ( import (
"encoding/json" "encoding/json"
"fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -84,16 +83,16 @@ func (suite *ReportResolveTestSuite) resolveReport(
return nil, err return nil, err
} }
errs := gtserror.MultiError{} errs := gtserror.NewMultiError(2)
if resultCode := recorder.Code; expectedHTTPStatus != resultCode { if resultCode := recorder.Code; expectedHTTPStatus != resultCode {
errs = append(errs, fmt.Sprintf("expected %d got %d", expectedHTTPStatus, resultCode)) errs.Appendf("expected %d got %d", expectedHTTPStatus, resultCode)
} }
// if we got an expected body, return early // if we got an expected body, return early
if expectedBody != "" { if expectedBody != "" {
if string(b) != expectedBody { if string(b) != expectedBody {
errs = append(errs, fmt.Sprintf("expected %s got %s", expectedBody, string(b))) errs.Appendf("expected %s got %s", expectedBody, string(b))
} }
return nil, errs.Combine() return nil, errs.Combine()
} }

View file

@ -19,7 +19,6 @@ package admin_test
import ( import (
"encoding/json" "encoding/json"
"fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -101,16 +100,16 @@ func (suite *ReportsGetTestSuite) getReports(
return nil, "", err return nil, "", err
} }
errs := gtserror.MultiError{} errs := gtserror.NewMultiError(2)
if resultCode := recorder.Code; expectedHTTPStatus != resultCode { if resultCode := recorder.Code; expectedHTTPStatus != resultCode {
errs = append(errs, fmt.Sprintf("expected %d got %d", expectedHTTPStatus, resultCode)) errs.Appendf("expected %d got %d", expectedHTTPStatus, resultCode)
} }
// if we got an expected body, return early // if we got an expected body, return early
if expectedBody != "" { if expectedBody != "" {
if string(b) != expectedBody { if string(b) != expectedBody {
errs = append(errs, fmt.Sprintf("expected %s got %s", expectedBody, string(b))) errs.Appendf("expected %s got %s", expectedBody, string(b))
} }
return nil, "", errs.Combine() return nil, "", errs.Combine()
} }

View file

@ -19,7 +19,6 @@ package lists_test
import ( import (
"encoding/json" "encoding/json"
"fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -103,17 +102,17 @@ func (suite *ListAccountsTestSuite) getListAccounts(
return nil, "", err return nil, "", err
} }
errs := gtserror.MultiError{} errs := gtserror.NewMultiError(2)
// check code + body // check code + body
if resultCode := recorder.Code; expectedHTTPStatus != resultCode { if resultCode := recorder.Code; expectedHTTPStatus != resultCode {
errs = append(errs, fmt.Sprintf("expected %d got %d", expectedHTTPStatus, resultCode)) errs.Appendf("expected %d got %d", expectedHTTPStatus, resultCode)
} }
// if we got an expected body, return early // if we got an expected body, return early
if expectedBody != "" { if expectedBody != "" {
if string(b) != expectedBody { if string(b) != expectedBody {
errs = append(errs, fmt.Sprintf("expected %s got %s", expectedBody, string(b))) errs.Appendf("expected %s got %s", expectedBody, string(b))
} }
return nil, "", errs.Combine() return nil, "", errs.Combine()
} }

View file

@ -19,7 +19,6 @@ package reports_test
import ( import (
"encoding/json" "encoding/json"
"fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -77,17 +76,17 @@ func (suite *ReportCreateTestSuite) createReport(expectedHTTPStatus int, expecte
return nil, err return nil, err
} }
errs := gtserror.MultiError{} errs := gtserror.NewMultiError(2)
// check code + body // check code + body
if resultCode := recorder.Code; expectedHTTPStatus != resultCode { if resultCode := recorder.Code; expectedHTTPStatus != resultCode {
errs = append(errs, fmt.Sprintf("expected %d got %d", expectedHTTPStatus, resultCode)) errs.Appendf("expected %d got %d", expectedHTTPStatus, resultCode)
} }
// if we got an expected body, return early // if we got an expected body, return early
if expectedBody != "" { if expectedBody != "" {
if string(b) != expectedBody { if string(b) != expectedBody {
errs = append(errs, fmt.Sprintf("expected %s got %s", expectedBody, string(b))) errs.Appendf("expected %s got %s", expectedBody, string(b))
} }
return nil, errs.Combine() return nil, errs.Combine()
} }

View file

@ -19,7 +19,6 @@ package reports_test
import ( import (
"encoding/json" "encoding/json"
"fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -64,17 +63,17 @@ func (suite *ReportGetTestSuite) getReport(expectedHTTPStatus int, expectedBody
return nil, err return nil, err
} }
errs := gtserror.MultiError{} errs := gtserror.NewMultiError(2)
// check code + body // check code + body
if resultCode := recorder.Code; expectedHTTPStatus != resultCode { if resultCode := recorder.Code; expectedHTTPStatus != resultCode {
errs = append(errs, fmt.Sprintf("expected %d got %d", expectedHTTPStatus, resultCode)) errs.Appendf("expected %d got %d", expectedHTTPStatus, resultCode)
} }
// if we got an expected body, return early // if we got an expected body, return early
if expectedBody != "" { if expectedBody != "" {
if string(b) != expectedBody { if string(b) != expectedBody {
errs = append(errs, fmt.Sprintf("expected %s got %s", expectedBody, string(b))) errs.Appendf("expected %s got %s", expectedBody, string(b))
} }
return nil, errs.Combine() return nil, errs.Combine()
} }

View file

@ -19,8 +19,9 @@ package search_test
import ( import (
"context" "context"
"crypto/rand"
"crypto/rsa"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -30,6 +31,7 @@ import (
"testing" "testing"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/ap"
"github.com/superseriousbusiness/gotosocial/internal/api/client/search" "github.com/superseriousbusiness/gotosocial/internal/api/client/search"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util" apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
@ -119,16 +121,16 @@ func (suite *SearchGetTestSuite) getSearch(
suite.FailNow(err.Error()) suite.FailNow(err.Error())
} }
errs := gtserror.MultiError{} errs := gtserror.NewMultiError(2)
// Check expected code + body. // Check expected code + body.
if resultCode := recorder.Code; expectedHTTPStatus != resultCode { if resultCode := recorder.Code; expectedHTTPStatus != resultCode {
errs = append(errs, fmt.Sprintf("expected %d got %d: %v", expectedHTTPStatus, resultCode, ctx.Errors.JSON())) errs.Appendf("expected %d got %d", expectedHTTPStatus, resultCode)
} }
// If we got an expected body, return early. // If we got an expected body, return early.
if expectedBody != "" && string(b) != expectedBody { if expectedBody != "" && string(b) != expectedBody {
errs = append(errs, fmt.Sprintf("expected %s got %s", expectedBody, string(b))) errs.Appendf("expected %s got %s", expectedBody, string(b))
} }
if err := errs.Combine(); err != nil { if err := errs.Combine(); err != nil {
@ -1001,7 +1003,7 @@ func (suite *SearchGetTestSuite) TestSearchAAccounts() {
suite.Len(searchResult.Hashtags, 0) suite.Len(searchResult.Hashtags, 0)
} }
func (suite *SearchGetTestSuite) TestSearchAAccountsLimit1() { func (suite *SearchGetTestSuite) TestSearchAccountsLimit1() {
var ( var (
requestingAccount = suite.testAccounts["local_account_1"] requestingAccount = suite.testAccounts["local_account_1"]
token = suite.testTokens["local_account_1"] token = suite.testTokens["local_account_1"]
@ -1078,12 +1080,14 @@ func (suite *SearchGetTestSuite) TestSearchLocalInstanceAccountByURI() {
suite.FailNow(err.Error()) suite.FailNow(err.Error())
} }
suite.Len(searchResult.Accounts, 0) // Should be able to get instance
// account by exact URI.
suite.Len(searchResult.Accounts, 1)
suite.Len(searchResult.Statuses, 0) suite.Len(searchResult.Statuses, 0)
suite.Len(searchResult.Hashtags, 0) suite.Len(searchResult.Hashtags, 0)
} }
func (suite *SearchGetTestSuite) TestSearchInstanceAccountFull() { func (suite *SearchGetTestSuite) TestSearchLocalInstanceAccountFull() {
// Namestring excludes ':' in usernames, so we // Namestring excludes ':' in usernames, so we
// need to fiddle with the instance account a // need to fiddle with the instance account a
// bit to get it to look like a different domain. // bit to get it to look like a different domain.
@ -1125,12 +1129,14 @@ func (suite *SearchGetTestSuite) TestSearchInstanceAccountFull() {
suite.FailNow(err.Error()) suite.FailNow(err.Error())
} }
suite.Len(searchResult.Accounts, 0) // Should be able to get instance
// account by full namestring.
suite.Len(searchResult.Accounts, 1)
suite.Len(searchResult.Statuses, 0) suite.Len(searchResult.Statuses, 0)
suite.Len(searchResult.Hashtags, 0) suite.Len(searchResult.Hashtags, 0)
} }
func (suite *SearchGetTestSuite) TestSearchInstanceAccountPartial() { func (suite *SearchGetTestSuite) TestSearchLocalInstanceAccountPartial() {
// Namestring excludes ':' in usernames, so we // Namestring excludes ':' in usernames, so we
// need to fiddle with the instance account a // need to fiddle with the instance account a
// bit to get it to look like a different domain. // bit to get it to look like a different domain.
@ -1172,6 +1178,131 @@ func (suite *SearchGetTestSuite) TestSearchInstanceAccountPartial() {
suite.FailNow(err.Error()) suite.FailNow(err.Error())
} }
// Query was a partial namestring from our
// instance, so will return the instance account.
suite.Len(searchResult.Accounts, 1)
suite.Len(searchResult.Statuses, 0)
suite.Len(searchResult.Hashtags, 0)
}
func (suite *SearchGetTestSuite) TestSearchLocalInstanceAccountEvenMorePartial() {
// Namestring excludes ':' in usernames, so we
// need to fiddle with the instance account a
// bit to get it to look like a different domain.
newDomain := "example.org"
suite.bodgeLocalInstance(newDomain)
var (
requestingAccount = suite.testAccounts["local_account_1"]
token = suite.testTokens["local_account_1"]
user = suite.testUsers["local_account_1"]
maxID *string = nil
minID *string = nil
limit *int = nil
offset *int = nil
resolve *bool = nil
query = newDomain
queryType *string = nil
following *bool = nil
expectedHTTPStatus = http.StatusOK
expectedBody = ""
)
searchResult, err := suite.getSearch(
requestingAccount,
token,
apiutil.APIv2,
user,
maxID,
minID,
limit,
offset,
query,
queryType,
resolve,
following,
expectedHTTPStatus,
expectedBody)
if err != nil {
suite.FailNow(err.Error())
}
// Query was just 'example.org' which doesn't
// look like a namestring, so search should
// fall back to text search and therefore give
// 0 results back.
suite.Len(searchResult.Accounts, 0)
suite.Len(searchResult.Statuses, 0)
suite.Len(searchResult.Hashtags, 0)
}
func (suite *SearchGetTestSuite) TestSearchRemoteInstanceAccountPartial() {
// Insert an instance account that's not
// from our instance, and try to search
// for it with a partial namestring.
theirDomain := "example.org"
key, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
suite.FailNow(err.Error())
}
if err := suite.db.PutAccount(context.Background(), &gtsmodel.Account{
ID: "01H6RWPG8T6DNW6VNXPBCJBH5S",
Username: theirDomain,
Domain: theirDomain,
URI: "http://" + theirDomain + "/users/" + theirDomain,
URL: "http://" + theirDomain + "/@" + theirDomain,
PublicKeyURI: "http://" + theirDomain + "/users/" + theirDomain + "#main-key",
InboxURI: "http://" + theirDomain + "/users/" + theirDomain + "/inbox",
OutboxURI: "http://" + theirDomain + "/users/" + theirDomain + "/outbox",
FollowersURI: "http://" + theirDomain + "/users/" + theirDomain + "/followers",
FollowingURI: "http://" + theirDomain + "/users/" + theirDomain + "/following",
FeaturedCollectionURI: "http://" + theirDomain + "/users/" + theirDomain + "/collections/featured",
ActorType: ap.ActorPerson,
PrivateKey: key,
PublicKey: &key.PublicKey,
}); err != nil {
suite.FailNow(err.Error())
}
var (
requestingAccount = suite.testAccounts["local_account_1"]
token = suite.testTokens["local_account_1"]
user = suite.testUsers["local_account_1"]
maxID *string = nil
minID *string = nil
limit *int = nil
offset *int = nil
resolve *bool = nil
query = "@" + theirDomain
queryType *string = nil
following *bool = nil
expectedHTTPStatus = http.StatusOK
expectedBody = ""
)
searchResult, err := suite.getSearch(
requestingAccount,
token,
apiutil.APIv2,
user,
maxID,
minID,
limit,
offset,
query,
queryType,
resolve,
following,
expectedHTTPStatus,
expectedBody)
if err != nil {
suite.FailNow(err.Error())
}
// Search for instance account from
// another domain should return 0 results.
suite.Len(searchResult.Accounts, 0) suite.Len(searchResult.Accounts, 0)
suite.Len(searchResult.Statuses, 0) suite.Len(searchResult.Statuses, 0)
suite.Len(searchResult.Hashtags, 0) suite.Len(searchResult.Hashtags, 0)

View file

@ -20,7 +20,6 @@ package statuses_test
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -74,20 +73,20 @@ func (suite *StatusPinTestSuite) createPin(
return nil, err return nil, err
} }
errs := gtserror.MultiError{} errs := gtserror.NewMultiError(2)
// check code + body // Check expected code + body.
if resultCode := recorder.Code; expectedHTTPStatus != resultCode { if resultCode := recorder.Code; expectedHTTPStatus != resultCode {
errs = append(errs, fmt.Sprintf("expected %d got %d", expectedHTTPStatus, resultCode)) errs.Appendf("expected %d got %d", expectedHTTPStatus, resultCode)
} }
// if we got an expected body, return early // If we got an expected body, return early.
if expectedBody != "" && string(b) != expectedBody { if expectedBody != "" && string(b) != expectedBody {
errs = append(errs, fmt.Sprintf("expected %s got %s", expectedBody, string(b))) errs.Appendf("expected %s got %s", expectedBody, string(b))
} }
if len(errs) > 0 { if err := errs.Combine(); err != nil {
return nil, errs.Combine() suite.FailNow("", "%v (body %s)", err, string(b))
} }
resp := &apimodel.Status{} resp := &apimodel.Status{}

View file

@ -19,7 +19,6 @@ package statuses_test
import ( import (
"encoding/json" "encoding/json"
"fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -68,20 +67,20 @@ func (suite *StatusUnpinTestSuite) createUnpin(
return nil, err return nil, err
} }
errs := gtserror.MultiError{} errs := gtserror.NewMultiError(2)
// check code + body // Check expected code + body.
if resultCode := recorder.Code; expectedHTTPStatus != resultCode { if resultCode := recorder.Code; expectedHTTPStatus != resultCode {
errs = append(errs, fmt.Sprintf("expected %d got %d", expectedHTTPStatus, resultCode)) errs.Appendf("expected %d got %d", expectedHTTPStatus, resultCode)
} }
// if we got an expected body, return early // If we got an expected body, return early.
if expectedBody != "" && string(b) != expectedBody { if expectedBody != "" && string(b) != expectedBody {
errs = append(errs, fmt.Sprintf("expected %s got %s", expectedBody, string(b))) errs.Appendf("expected %s got %s", expectedBody, string(b))
} }
if len(errs) > 0 { if err := errs.Combine(); err != nil {
return nil, errs.Combine() suite.FailNow("", "%v (body %s)", err, string(b))
} }
resp := &apimodel.Status{} resp := &apimodel.Status{}

View file

@ -196,6 +196,21 @@ func (c *Caches) setuphooks() {
// c.GTS.Media().Invalidate("StatusID") will not work. // c.GTS.Media().Invalidate("StatusID") will not work.
c.GTS.Media().Invalidate("ID", id) c.GTS.Media().Invalidate("ID", id)
} }
if status.BoostOfID != "" {
// Invalidate boost ID list of the original status.
c.GTS.BoostOfIDs().Invalidate(status.BoostOfID)
}
if status.InReplyToID != "" {
// Invalidate in reply to ID list of original status.
c.GTS.InReplyToIDs().Invalidate(status.InReplyToID)
}
})
c.GTS.StatusFave().SetInvalidateCallback(func(fave *gtsmodel.StatusFave) {
// Invalidate status fave ID list for this status.
c.GTS.StatusFaveIDs().Invalidate(fave.StatusID)
}) })
c.GTS.User().SetInvalidateCallback(func(user *gtsmodel.User) { c.GTS.User().SetInvalidateCallback(func(user *gtsmodel.User) {
@ -204,3 +219,36 @@ func (c *Caches) setuphooks() {
c.Visibility.Invalidate("RequesterID", user.AccountID) c.Visibility.Invalidate("RequesterID", user.AccountID)
}) })
} }
// Sweep will sweep all the available caches to ensure none
// are above threshold percent full to their total capacity.
//
// This helps with cache performance, as a full cache will
// require an eviction on every single write, which adds
// significant overhead to all cache writes.
func (c *Caches) Sweep(threshold float64) {
c.GTS.Account().Trim(threshold)
c.GTS.AccountNote().Trim(threshold)
c.GTS.Block().Trim(threshold)
c.GTS.BlockIDs().Trim(threshold)
c.GTS.Emoji().Trim(threshold)
c.GTS.EmojiCategory().Trim(threshold)
c.GTS.Follow().Trim(threshold)
c.GTS.FollowIDs().Trim(threshold)
c.GTS.FollowRequest().Trim(threshold)
c.GTS.FollowRequestIDs().Trim(threshold)
c.GTS.Instance().Trim(threshold)
c.GTS.List().Trim(threshold)
c.GTS.ListEntry().Trim(threshold)
c.GTS.Marker().Trim(threshold)
c.GTS.Media().Trim(threshold)
c.GTS.Mention().Trim(threshold)
c.GTS.Notification().Trim(threshold)
c.GTS.Report().Trim(threshold)
c.GTS.Status().Trim(threshold)
c.GTS.StatusFave().Trim(threshold)
c.GTS.Tag().Trim(threshold)
c.GTS.Tombstone().Trim(threshold)
c.GTS.User().Trim(threshold)
c.Visibility.Trim(threshold)
}

458
internal/cache/gts.go vendored
View file

@ -18,11 +18,15 @@
package cache package cache
import ( import (
"time"
"codeberg.org/gruf/go-cache/v3/result" "codeberg.org/gruf/go-cache/v3/result"
"codeberg.org/gruf/go-cache/v3/simple"
"codeberg.org/gruf/go-cache/v3/ttl" "codeberg.org/gruf/go-cache/v3/ttl"
"github.com/superseriousbusiness/gotosocial/internal/cache/domain" "github.com/superseriousbusiness/gotosocial/internal/cache/domain"
"github.com/superseriousbusiness/gotosocial/internal/config" "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/log"
) )
type GTSCaches struct { type GTSCaches struct {
@ -30,6 +34,7 @@ type GTSCaches struct {
accountNote *result.Cache[*gtsmodel.AccountNote] accountNote *result.Cache[*gtsmodel.AccountNote]
block *result.Cache[*gtsmodel.Block] block *result.Cache[*gtsmodel.Block]
blockIDs *SliceCache[string] blockIDs *SliceCache[string]
boostOfIDs *SliceCache[string]
domainBlock *domain.BlockCache domainBlock *domain.BlockCache
emoji *result.Cache[*gtsmodel.Emoji] emoji *result.Cache[*gtsmodel.Emoji]
emojiCategory *result.Cache[*gtsmodel.EmojiCategory] emojiCategory *result.Cache[*gtsmodel.EmojiCategory]
@ -38,6 +43,7 @@ type GTSCaches struct {
followRequest *result.Cache[*gtsmodel.FollowRequest] followRequest *result.Cache[*gtsmodel.FollowRequest]
followRequestIDs *SliceCache[string] followRequestIDs *SliceCache[string]
instance *result.Cache[*gtsmodel.Instance] instance *result.Cache[*gtsmodel.Instance]
inReplyToIDs *SliceCache[string]
list *result.Cache[*gtsmodel.List] list *result.Cache[*gtsmodel.List]
listEntry *result.Cache[*gtsmodel.ListEntry] listEntry *result.Cache[*gtsmodel.ListEntry]
marker *result.Cache[*gtsmodel.Marker] marker *result.Cache[*gtsmodel.Marker]
@ -47,12 +53,13 @@ type GTSCaches struct {
report *result.Cache[*gtsmodel.Report] report *result.Cache[*gtsmodel.Report]
status *result.Cache[*gtsmodel.Status] status *result.Cache[*gtsmodel.Status]
statusFave *result.Cache[*gtsmodel.StatusFave] statusFave *result.Cache[*gtsmodel.StatusFave]
statusFaveIDs *SliceCache[string]
tag *result.Cache[*gtsmodel.Tag] tag *result.Cache[*gtsmodel.Tag]
tombstone *result.Cache[*gtsmodel.Tombstone] tombstone *result.Cache[*gtsmodel.Tombstone]
user *result.Cache[*gtsmodel.User] user *result.Cache[*gtsmodel.User]
// TODO: move out of GTS caches since unrelated to DB. // TODO: move out of GTS caches since unrelated to DB.
webfinger *ttl.Cache[string, string] webfinger *ttl.Cache[string, string] // TTL=24hr, sweep=5min
} }
// Init will initialize all the gtsmodel caches in this collection. // Init will initialize all the gtsmodel caches in this collection.
@ -62,6 +69,7 @@ func (c *GTSCaches) Init() {
c.initAccountNote() c.initAccountNote()
c.initBlock() c.initBlock()
c.initBlockIDs() c.initBlockIDs()
c.initBoostOfIDs()
c.initDomainBlock() c.initDomainBlock()
c.initEmoji() c.initEmoji()
c.initEmojiCategory() c.initEmojiCategory()
@ -69,6 +77,7 @@ func (c *GTSCaches) Init() {
c.initFollowIDs() c.initFollowIDs()
c.initFollowRequest() c.initFollowRequest()
c.initFollowRequestIDs() c.initFollowRequestIDs()
c.initInReplyToIDs()
c.initInstance() c.initInstance()
c.initList() c.initList()
c.initListEntry() c.initListEntry()
@ -80,6 +89,7 @@ func (c *GTSCaches) Init() {
c.initStatus() c.initStatus()
c.initStatusFave() c.initStatusFave()
c.initTag() c.initTag()
c.initStatusFaveIDs()
c.initTombstone() c.initTombstone()
c.initUser() c.initUser()
c.initWebfinger() c.initWebfinger()
@ -87,98 +97,14 @@ func (c *GTSCaches) Init() {
// Start will attempt to start all of the gtsmodel caches, or panic. // Start will attempt to start all of the gtsmodel caches, or panic.
func (c *GTSCaches) Start() { func (c *GTSCaches) Start() {
tryStart(c.account, config.GetCacheGTSAccountSweepFreq())
tryStart(c.accountNote, config.GetCacheGTSAccountNoteSweepFreq())
tryStart(c.block, config.GetCacheGTSBlockSweepFreq())
tryUntil("starting block IDs cache", 5, func() bool {
if sweep := config.GetCacheGTSBlockIDsSweepFreq(); sweep > 0 {
return c.blockIDs.Start(sweep)
}
return true
})
tryStart(c.emoji, config.GetCacheGTSEmojiSweepFreq())
tryStart(c.emojiCategory, config.GetCacheGTSEmojiCategorySweepFreq())
tryStart(c.follow, config.GetCacheGTSFollowSweepFreq())
tryUntil("starting follow IDs cache", 5, func() bool {
if sweep := config.GetCacheGTSFollowIDsSweepFreq(); sweep > 0 {
return c.followIDs.Start(sweep)
}
return true
})
tryStart(c.followRequest, config.GetCacheGTSFollowRequestSweepFreq())
tryUntil("starting follow request IDs cache", 5, func() bool {
if sweep := config.GetCacheGTSFollowRequestIDsSweepFreq(); sweep > 0 {
return c.followRequestIDs.Start(sweep)
}
return true
})
tryStart(c.instance, config.GetCacheGTSInstanceSweepFreq())
tryStart(c.list, config.GetCacheGTSListSweepFreq())
tryStart(c.listEntry, config.GetCacheGTSListEntrySweepFreq())
tryStart(c.marker, config.GetCacheGTSMarkerSweepFreq())
tryStart(c.media, config.GetCacheGTSMediaSweepFreq())
tryStart(c.mention, config.GetCacheGTSMentionSweepFreq())
tryStart(c.notification, config.GetCacheGTSNotificationSweepFreq())
tryStart(c.report, config.GetCacheGTSReportSweepFreq())
tryStart(c.status, config.GetCacheGTSStatusSweepFreq())
tryStart(c.statusFave, config.GetCacheGTSStatusFaveSweepFreq())
tryStart(c.tag, config.GetCacheGTSTagSweepFreq())
tryStart(c.tombstone, config.GetCacheGTSTombstoneSweepFreq())
tryStart(c.user, config.GetCacheGTSUserSweepFreq())
tryUntil("starting *gtsmodel.Webfinger cache", 5, func() bool { tryUntil("starting *gtsmodel.Webfinger cache", 5, func() bool {
if sweep := config.GetCacheGTSWebfingerSweepFreq(); sweep > 0 { return c.webfinger.Start(5 * time.Minute)
return c.webfinger.Start(sweep)
}
return true
}) })
} }
// Stop will attempt to stop all of the gtsmodel caches, or panic. // Stop will attempt to stop all of the gtsmodel caches, or panic.
func (c *GTSCaches) Stop() { func (c *GTSCaches) Stop() {
tryStop(c.account, config.GetCacheGTSAccountSweepFreq()) tryUntil("stopping *gtsmodel.Webfinger cache", 5, c.webfinger.Stop)
tryStop(c.accountNote, config.GetCacheGTSAccountNoteSweepFreq())
tryStop(c.block, config.GetCacheGTSBlockSweepFreq())
tryUntil("stopping block IDs cache", 5, func() bool {
if config.GetCacheGTSBlockIDsSweepFreq() > 0 {
return c.blockIDs.Stop()
}
return true
})
tryStop(c.emoji, config.GetCacheGTSEmojiSweepFreq())
tryStop(c.emojiCategory, config.GetCacheGTSEmojiCategorySweepFreq())
tryStop(c.follow, config.GetCacheGTSFollowSweepFreq())
tryUntil("stopping follow IDs cache", 5, func() bool {
if config.GetCacheGTSFollowIDsSweepFreq() > 0 {
return c.followIDs.Stop()
}
return true
})
tryStop(c.followRequest, config.GetCacheGTSFollowRequestSweepFreq())
tryUntil("stopping follow request IDs cache", 5, func() bool {
if config.GetCacheGTSFollowRequestIDsSweepFreq() > 0 {
return c.followRequestIDs.Stop()
}
return true
})
tryStop(c.instance, config.GetCacheGTSInstanceSweepFreq())
tryStop(c.list, config.GetCacheGTSListSweepFreq())
tryStop(c.listEntry, config.GetCacheGTSListEntrySweepFreq())
tryStop(c.marker, config.GetCacheGTSMarkerSweepFreq())
tryStop(c.media, config.GetCacheGTSMediaSweepFreq())
tryStop(c.mention, config.GetCacheGTSNotificationSweepFreq())
tryStop(c.notification, config.GetCacheGTSNotificationSweepFreq())
tryStop(c.report, config.GetCacheGTSReportSweepFreq())
tryStop(c.status, config.GetCacheGTSStatusSweepFreq())
tryStop(c.statusFave, config.GetCacheGTSStatusFaveSweepFreq())
tryStop(c.tag, config.GetCacheGTSTagSweepFreq())
tryStop(c.tombstone, config.GetCacheGTSTombstoneSweepFreq())
tryStop(c.user, config.GetCacheGTSUserSweepFreq())
tryUntil("stopping *gtsmodel.Webfinger cache", 5, func() bool {
if config.GetCacheGTSWebfingerSweepFreq() > 0 {
return c.webfinger.Stop()
}
return true
})
} }
// Account provides access to the gtsmodel Account database cache. // Account provides access to the gtsmodel Account database cache.
@ -201,6 +127,11 @@ func (c *GTSCaches) BlockIDs() *SliceCache[string] {
return c.blockIDs return c.blockIDs
} }
// BoostOfIDs provides access to the boost of IDs list database cache.
func (c *GTSCaches) BoostOfIDs() *SliceCache[string] {
return c.boostOfIDs
}
// DomainBlock provides access to the domain block database cache. // DomainBlock provides access to the domain block database cache.
func (c *GTSCaches) DomainBlock() *domain.BlockCache { func (c *GTSCaches) DomainBlock() *domain.BlockCache {
return c.domainBlock return c.domainBlock
@ -249,6 +180,11 @@ func (c *GTSCaches) Instance() *result.Cache[*gtsmodel.Instance] {
return c.instance return c.instance
} }
// InReplyToIDs provides access to the status in reply to IDs list database cache.
func (c *GTSCaches) InReplyToIDs() *SliceCache[string] {
return c.inReplyToIDs
}
// List provides access to the gtsmodel List database cache. // List provides access to the gtsmodel List database cache.
func (c *GTSCaches) List() *result.Cache[*gtsmodel.List] { func (c *GTSCaches) List() *result.Cache[*gtsmodel.List] {
return c.list return c.list
@ -299,6 +235,11 @@ func (c *GTSCaches) Tag() *result.Cache[*gtsmodel.Tag] {
return c.tag return c.tag
} }
// StatusFaveIDs provides access to the status fave IDs list database cache.
func (c *GTSCaches) StatusFaveIDs() *SliceCache[string] {
return c.statusFaveIDs
}
// Tombstone provides access to the gtsmodel Tombstone database cache. // Tombstone provides access to the gtsmodel Tombstone database cache.
func (c *GTSCaches) Tombstone() *result.Cache[*gtsmodel.Tombstone] { func (c *GTSCaches) Tombstone() *result.Cache[*gtsmodel.Tombstone] {
return c.tombstone return c.tombstone
@ -315,11 +256,19 @@ func (c *GTSCaches) Webfinger() *ttl.Cache[string, string] {
} }
func (c *GTSCaches) initAccount() { func (c *GTSCaches) initAccount() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofAccount(), // model in-mem size.
config.GetCacheAccountMemRatio(),
)
log.Infof(nil, "Account cache size = %d", cap)
c.account = result.New([]result.Lookup{ c.account = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "URI"}, {Name: "URI"},
{Name: "URL"}, {Name: "URL"},
{Name: "Username.Domain"}, {Name: "Username.Domain", AllowZero: true /* domain can be zero i.e. "" */},
{Name: "PublicKeyURI"}, {Name: "PublicKeyURI"},
{Name: "InboxURI"}, {Name: "InboxURI"},
{Name: "OutboxURI"}, {Name: "OutboxURI"},
@ -329,12 +278,19 @@ func (c *GTSCaches) initAccount() {
a2 := new(gtsmodel.Account) a2 := new(gtsmodel.Account)
*a2 = *a1 *a2 = *a1
return a2 return a2
}, config.GetCacheGTSAccountMaxSize()) }, cap)
c.account.SetTTL(config.GetCacheGTSAccountTTL(), true)
c.account.IgnoreErrors(ignoreErrors) c.account.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initAccountNote() { func (c *GTSCaches) initAccountNote() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofAccountNote(), // model in-mem size.
config.GetCacheAccountNoteMemRatio(),
)
log.Infof(nil, "AccountNote cache size = %d", cap)
c.accountNote = result.New([]result.Lookup{ c.accountNote = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "AccountID.TargetAccountID"}, {Name: "AccountID.TargetAccountID"},
@ -342,12 +298,20 @@ func (c *GTSCaches) initAccountNote() {
n2 := new(gtsmodel.AccountNote) n2 := new(gtsmodel.AccountNote)
*n2 = *n1 *n2 = *n1
return n2 return n2
}, config.GetCacheGTSAccountNoteMaxSize()) }, cap)
c.accountNote.SetTTL(config.GetCacheGTSAccountNoteTTL(), true)
c.accountNote.IgnoreErrors(ignoreErrors) c.accountNote.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initBlock() { func (c *GTSCaches) initBlock() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofBlock(), // model in-mem size.
config.GetCacheBlockMemRatio(),
)
log.Infof(nil, "Block cache size = %d", cap)
c.block = result.New([]result.Lookup{ c.block = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "URI"}, {Name: "URI"},
@ -358,16 +322,36 @@ func (c *GTSCaches) initBlock() {
b2 := new(gtsmodel.Block) b2 := new(gtsmodel.Block)
*b2 = *b1 *b2 = *b1
return b2 return b2
}, config.GetCacheGTSBlockMaxSize()) }, cap)
c.block.SetTTL(config.GetCacheGTSBlockTTL(), true)
c.block.IgnoreErrors(ignoreErrors) c.block.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initBlockIDs() { func (c *GTSCaches) initBlockIDs() {
c.blockIDs = &SliceCache[string]{Cache: ttl.New[string, []string]( // Calculate maximum cache size.
cap := calculateSliceCacheMax(
config.GetCacheBlockIDsMemRatio(),
)
log.Infof(nil, "Block IDs cache size = %d", cap)
c.blockIDs = &SliceCache[string]{Cache: simple.New[string, []string](
0, 0,
config.GetCacheGTSBlockIDsMaxSize(), cap,
config.GetCacheGTSBlockIDsTTL(), )}
}
func (c *GTSCaches) initBoostOfIDs() {
// Calculate maximum cache size.
cap := calculateSliceCacheMax(
config.GetCacheBoostOfIDsMemRatio(),
)
log.Infof(nil, "BoostofIDs cache size = %d", cap)
c.boostOfIDs = &SliceCache[string]{Cache: simple.New[string, []string](
0,
cap,
)} )}
} }
@ -376,22 +360,38 @@ func (c *GTSCaches) initDomainBlock() {
} }
func (c *GTSCaches) initEmoji() { func (c *GTSCaches) initEmoji() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofEmoji(), // model in-mem size.
config.GetCacheEmojiMemRatio(),
)
log.Infof(nil, "Emoji cache size = %d", cap)
c.emoji = result.New([]result.Lookup{ c.emoji = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "URI"}, {Name: "URI"},
{Name: "Shortcode.Domain"}, {Name: "Shortcode.Domain", AllowZero: true /* domain can be zero i.e. "" */},
{Name: "ImageStaticURL"}, {Name: "ImageStaticURL"},
{Name: "CategoryID", Multi: true}, {Name: "CategoryID", Multi: true},
}, func(e1 *gtsmodel.Emoji) *gtsmodel.Emoji { }, func(e1 *gtsmodel.Emoji) *gtsmodel.Emoji {
e2 := new(gtsmodel.Emoji) e2 := new(gtsmodel.Emoji)
*e2 = *e1 *e2 = *e1
return e2 return e2
}, config.GetCacheGTSEmojiMaxSize()) }, cap)
c.emoji.SetTTL(config.GetCacheGTSEmojiTTL(), true)
c.emoji.IgnoreErrors(ignoreErrors) c.emoji.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initEmojiCategory() { func (c *GTSCaches) initEmojiCategory() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofEmojiCategory(), // model in-mem size.
config.GetCacheEmojiCategoryMemRatio(),
)
log.Infof(nil, "EmojiCategory cache size = %d", cap)
c.emojiCategory = result.New([]result.Lookup{ c.emojiCategory = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "Name"}, {Name: "Name"},
@ -399,12 +399,20 @@ func (c *GTSCaches) initEmojiCategory() {
c2 := new(gtsmodel.EmojiCategory) c2 := new(gtsmodel.EmojiCategory)
*c2 = *c1 *c2 = *c1
return c2 return c2
}, config.GetCacheGTSEmojiCategoryMaxSize()) }, cap)
c.emojiCategory.SetTTL(config.GetCacheGTSEmojiCategoryTTL(), true)
c.emojiCategory.IgnoreErrors(ignoreErrors) c.emojiCategory.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initFollow() { func (c *GTSCaches) initFollow() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofFollow(), // model in-mem size.
config.GetCacheFollowMemRatio(),
)
log.Infof(nil, "Follow cache size = %d", cap)
c.follow = result.New([]result.Lookup{ c.follow = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "URI"}, {Name: "URI"},
@ -415,19 +423,34 @@ func (c *GTSCaches) initFollow() {
f2 := new(gtsmodel.Follow) f2 := new(gtsmodel.Follow)
*f2 = *f1 *f2 = *f1
return f2 return f2
}, config.GetCacheGTSFollowMaxSize()) }, cap)
c.follow.SetTTL(config.GetCacheGTSFollowTTL(), true)
c.follow.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initFollowIDs() { func (c *GTSCaches) initFollowIDs() {
c.followIDs = &SliceCache[string]{Cache: ttl.New[string, []string]( // Calculate maximum cache size.
cap := calculateSliceCacheMax(
config.GetCacheFollowIDsMemRatio(),
)
log.Infof(nil, "Follow IDs cache size = %d", cap)
c.followIDs = &SliceCache[string]{Cache: simple.New[string, []string](
0, 0,
config.GetCacheGTSFollowIDsMaxSize(), cap,
config.GetCacheGTSFollowIDsTTL(),
)} )}
} }
func (c *GTSCaches) initFollowRequest() { func (c *GTSCaches) initFollowRequest() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofFollowRequest(), // model in-mem size.
config.GetCacheFollowRequestMemRatio(),
)
log.Infof(nil, "FollowRequest cache size = %d", cap)
c.followRequest = result.New([]result.Lookup{ c.followRequest = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "URI"}, {Name: "URI"},
@ -438,19 +461,48 @@ func (c *GTSCaches) initFollowRequest() {
f2 := new(gtsmodel.FollowRequest) f2 := new(gtsmodel.FollowRequest)
*f2 = *f1 *f2 = *f1
return f2 return f2
}, config.GetCacheGTSFollowRequestMaxSize()) }, cap)
c.followRequest.SetTTL(config.GetCacheGTSFollowRequestTTL(), true)
c.followRequest.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initFollowRequestIDs() { func (c *GTSCaches) initFollowRequestIDs() {
c.followRequestIDs = &SliceCache[string]{Cache: ttl.New[string, []string]( // Calculate maximum cache size.
cap := calculateSliceCacheMax(
config.GetCacheFollowRequestIDsMemRatio(),
)
log.Infof(nil, "Follow Request IDs cache size = %d", cap)
c.followRequestIDs = &SliceCache[string]{Cache: simple.New[string, []string](
0, 0,
config.GetCacheGTSFollowRequestIDsMaxSize(), cap,
config.GetCacheGTSFollowRequestIDsTTL(), )}
}
func (c *GTSCaches) initInReplyToIDs() {
// Calculate maximum cache size.
cap := calculateSliceCacheMax(
config.GetCacheInReplyToIDsMemRatio(),
)
log.Infof(nil, "InReplyTo IDs cache size = %d", cap)
c.inReplyToIDs = &SliceCache[string]{Cache: simple.New[string, []string](
0,
cap,
)} )}
} }
func (c *GTSCaches) initInstance() { func (c *GTSCaches) initInstance() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofInstance(), // model in-mem size.
config.GetCacheInstanceMemRatio(),
)
log.Infof(nil, "Instance cache size = %d", cap)
c.instance = result.New([]result.Lookup{ c.instance = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "Domain"}, {Name: "Domain"},
@ -458,24 +510,40 @@ func (c *GTSCaches) initInstance() {
i2 := new(gtsmodel.Instance) i2 := new(gtsmodel.Instance)
*i2 = *i1 *i2 = *i1
return i1 return i1
}, config.GetCacheGTSInstanceMaxSize()) }, cap)
c.instance.SetTTL(config.GetCacheGTSInstanceTTL(), true)
c.emojiCategory.IgnoreErrors(ignoreErrors) c.instance.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initList() { func (c *GTSCaches) initList() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofList(), // model in-mem size.
config.GetCacheListMemRatio(),
)
log.Infof(nil, "List cache size = %d", cap)
c.list = result.New([]result.Lookup{ c.list = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
}, func(l1 *gtsmodel.List) *gtsmodel.List { }, func(l1 *gtsmodel.List) *gtsmodel.List {
l2 := new(gtsmodel.List) l2 := new(gtsmodel.List)
*l2 = *l1 *l2 = *l1
return l2 return l2
}, config.GetCacheGTSListMaxSize()) }, cap)
c.list.SetTTL(config.GetCacheGTSListTTL(), true)
c.list.IgnoreErrors(ignoreErrors) c.list.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initListEntry() { func (c *GTSCaches) initListEntry() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofListEntry(), // model in-mem size.
config.GetCacheListEntryMemRatio(),
)
log.Infof(nil, "ListEntry cache size = %d", cap)
c.listEntry = result.New([]result.Lookup{ c.listEntry = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "ListID", Multi: true}, {Name: "ListID", Multi: true},
@ -484,48 +552,80 @@ func (c *GTSCaches) initListEntry() {
l2 := new(gtsmodel.ListEntry) l2 := new(gtsmodel.ListEntry)
*l2 = *l1 *l2 = *l1
return l2 return l2
}, config.GetCacheGTSListEntryMaxSize()) }, cap)
c.list.SetTTL(config.GetCacheGTSListEntryTTL(), true)
c.list.IgnoreErrors(ignoreErrors) c.listEntry.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initMarker() { func (c *GTSCaches) initMarker() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofMarker(), // model in-mem size.
config.GetCacheMarkerMemRatio(),
)
log.Infof(nil, "Marker cache size = %d", cap)
c.marker = result.New([]result.Lookup{ c.marker = result.New([]result.Lookup{
{Name: "AccountID.Name"}, {Name: "AccountID.Name"},
}, func(m1 *gtsmodel.Marker) *gtsmodel.Marker { }, func(m1 *gtsmodel.Marker) *gtsmodel.Marker {
m2 := new(gtsmodel.Marker) m2 := new(gtsmodel.Marker)
*m2 = *m1 *m2 = *m1
return m2 return m2
}, config.GetCacheGTSMarkerMaxSize()) }, cap)
c.marker.SetTTL(config.GetCacheGTSMarkerTTL(), true)
c.marker.IgnoreErrors(ignoreErrors) c.marker.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initMedia() { func (c *GTSCaches) initMedia() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofMedia(), // model in-mem size.
config.GetCacheMediaMemRatio(),
)
log.Infof(nil, "Media cache size = %d", cap)
c.media = result.New([]result.Lookup{ c.media = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
}, func(m1 *gtsmodel.MediaAttachment) *gtsmodel.MediaAttachment { }, func(m1 *gtsmodel.MediaAttachment) *gtsmodel.MediaAttachment {
m2 := new(gtsmodel.MediaAttachment) m2 := new(gtsmodel.MediaAttachment)
*m2 = *m1 *m2 = *m1
return m2 return m2
}, config.GetCacheGTSMediaMaxSize()) }, cap)
c.media.SetTTL(config.GetCacheGTSMediaTTL(), true)
c.media.IgnoreErrors(ignoreErrors) c.media.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initMention() { func (c *GTSCaches) initMention() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofMention(), // model in-mem size.
config.GetCacheMentionMemRatio(),
)
log.Infof(nil, "Mention cache size = %d", cap)
c.mention = result.New([]result.Lookup{ c.mention = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
}, func(m1 *gtsmodel.Mention) *gtsmodel.Mention { }, func(m1 *gtsmodel.Mention) *gtsmodel.Mention {
m2 := new(gtsmodel.Mention) m2 := new(gtsmodel.Mention)
*m2 = *m1 *m2 = *m1
return m2 return m2
}, config.GetCacheGTSMentionMaxSize()) }, cap)
c.mention.SetTTL(config.GetCacheGTSMentionTTL(), true)
c.mention.IgnoreErrors(ignoreErrors) c.mention.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initNotification() { func (c *GTSCaches) initNotification() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofNotification(), // model in-mem size.
config.GetCacheNotificationMemRatio(),
)
log.Infof(nil, "Notification cache size = %d", cap)
c.notification = result.New([]result.Lookup{ c.notification = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "NotificationType.TargetAccountID.OriginAccountID.StatusID"}, {Name: "NotificationType.TargetAccountID.OriginAccountID.StatusID"},
@ -533,51 +633,99 @@ func (c *GTSCaches) initNotification() {
n2 := new(gtsmodel.Notification) n2 := new(gtsmodel.Notification)
*n2 = *n1 *n2 = *n1
return n2 return n2
}, config.GetCacheGTSNotificationMaxSize()) }, cap)
c.notification.SetTTL(config.GetCacheGTSNotificationTTL(), true)
c.notification.IgnoreErrors(ignoreErrors) c.notification.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initReport() { func (c *GTSCaches) initReport() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofReport(), // model in-mem size.
config.GetCacheReportMemRatio(),
)
log.Infof(nil, "Report cache size = %d", cap)
c.report = result.New([]result.Lookup{ c.report = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
}, func(r1 *gtsmodel.Report) *gtsmodel.Report { }, func(r1 *gtsmodel.Report) *gtsmodel.Report {
r2 := new(gtsmodel.Report) r2 := new(gtsmodel.Report)
*r2 = *r1 *r2 = *r1
return r2 return r2
}, config.GetCacheGTSReportMaxSize()) }, cap)
c.report.SetTTL(config.GetCacheGTSReportTTL(), true)
c.report.IgnoreErrors(ignoreErrors) c.report.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initStatus() { func (c *GTSCaches) initStatus() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofStatus(), // model in-mem size.
config.GetCacheStatusMemRatio(),
)
log.Infof(nil, "Status cache size = %d", cap)
c.status = result.New([]result.Lookup{ c.status = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "URI"}, {Name: "URI"},
{Name: "URL"}, {Name: "URL"},
{Name: "BoostOfID.AccountID"},
}, func(s1 *gtsmodel.Status) *gtsmodel.Status { }, func(s1 *gtsmodel.Status) *gtsmodel.Status {
s2 := new(gtsmodel.Status) s2 := new(gtsmodel.Status)
*s2 = *s1 *s2 = *s1
return s2 return s2
}, config.GetCacheGTSStatusMaxSize()) }, cap)
c.status.SetTTL(config.GetCacheGTSStatusTTL(), true)
c.status.IgnoreErrors(ignoreErrors) c.status.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initStatusFave() { func (c *GTSCaches) initStatusFave() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofStatusFave(), // model in-mem size.
config.GetCacheStatusFaveMemRatio(),
)
log.Infof(nil, "StatusFave cache size = %d", cap)
c.statusFave = result.New([]result.Lookup{ c.statusFave = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "AccountID.StatusID"}, {Name: "AccountID.StatusID"},
{Name: "StatusID", Multi: true},
}, func(f1 *gtsmodel.StatusFave) *gtsmodel.StatusFave { }, func(f1 *gtsmodel.StatusFave) *gtsmodel.StatusFave {
f2 := new(gtsmodel.StatusFave) f2 := new(gtsmodel.StatusFave)
*f2 = *f1 *f2 = *f1
return f2 return f2
}, config.GetCacheGTSStatusFaveMaxSize()) }, cap)
c.status.SetTTL(config.GetCacheGTSStatusFaveTTL(), true)
c.status.IgnoreErrors(ignoreErrors) c.statusFave.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initStatusFaveIDs() {
// Calculate maximum cache size.
cap := calculateSliceCacheMax(
config.GetCacheStatusFaveIDsMemRatio(),
)
log.Infof(nil, "StatusFave IDs cache size = %d", cap)
c.statusFaveIDs = &SliceCache[string]{Cache: simple.New[string, []string](
0,
cap,
)}
} }
func (c *GTSCaches) initTag() { func (c *GTSCaches) initTag() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofTag(), // model in-mem size.
config.GetCacheTagMemRatio(),
)
log.Infof(nil, "Tag cache size = %d", cap)
c.tag = result.New([]result.Lookup{ c.tag = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "Name"}, {Name: "Name"},
@ -585,12 +733,20 @@ func (c *GTSCaches) initTag() {
m2 := new(gtsmodel.Tag) m2 := new(gtsmodel.Tag)
*m2 = *m1 *m2 = *m1
return m2 return m2
}, config.GetCacheGTSTagMaxSize()) }, cap)
c.tag.SetTTL(config.GetCacheGTSTagTTL(), true)
c.tag.IgnoreErrors(ignoreErrors) c.tag.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initTombstone() { func (c *GTSCaches) initTombstone() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofTombstone(), // model in-mem size.
config.GetCacheTombstoneMemRatio(),
)
log.Infof(nil, "Tombstone cache size = %d", cap)
c.tombstone = result.New([]result.Lookup{ c.tombstone = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "URI"}, {Name: "URI"},
@ -598,12 +754,20 @@ func (c *GTSCaches) initTombstone() {
t2 := new(gtsmodel.Tombstone) t2 := new(gtsmodel.Tombstone)
*t2 = *t1 *t2 = *t1
return t2 return t2
}, config.GetCacheGTSTombstoneMaxSize()) }, cap)
c.tombstone.SetTTL(config.GetCacheGTSTombstoneTTL(), true)
c.tombstone.IgnoreErrors(ignoreErrors) c.tombstone.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initUser() { func (c *GTSCaches) initUser() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofUser(), // model in-mem size.
config.GetCacheUserMemRatio(),
)
log.Infof(nil, "User cache size = %d", cap)
c.user = result.New([]result.Lookup{ c.user = result.New([]result.Lookup{
{Name: "ID"}, {Name: "ID"},
{Name: "AccountID"}, {Name: "AccountID"},
@ -614,15 +778,23 @@ func (c *GTSCaches) initUser() {
u2 := new(gtsmodel.User) u2 := new(gtsmodel.User)
*u2 = *u1 *u2 = *u1
return u2 return u2
}, config.GetCacheGTSUserMaxSize()) }, cap)
c.user.SetTTL(config.GetCacheGTSUserTTL(), true)
c.user.IgnoreErrors(ignoreErrors) c.user.IgnoreErrors(ignoreErrors)
} }
func (c *GTSCaches) initWebfinger() { func (c *GTSCaches) initWebfinger() {
// Calculate maximum cache size.
cap := calculateCacheMax(
sizeofURIStr, sizeofURIStr,
config.GetCacheWebfingerMemRatio(),
)
log.Infof(nil, "Webfinger cache size = %d", cap)
c.webfinger = ttl.New[string, string]( c.webfinger = ttl.New[string, string](
0, 0,
config.GetCacheGTSWebfingerMaxSize(), cap,
config.GetCacheGTSWebfingerTTL(), 24*time.Hour,
) )
} }

501
internal/cache/size.go vendored Normal file
View file

@ -0,0 +1,501 @@
// GoToSocial
// Copyright (C) GoToSocial Authors admin@gotosocial.org
// SPDX-License-Identifier: AGPL-3.0-or-later
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cache
import (
"crypto/rsa"
"time"
"unsafe"
"codeberg.org/gruf/go-cache/v3/simple"
"github.com/DmitriyVTitov/size"
"github.com/superseriousbusiness/gotosocial/internal/ap"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/id"
)
const (
// example data values.
exampleID = id.Highest
exampleURI = "https://social.bbc/users/ItsMePrinceCharlesInit"
exampleText = `
oh no me nan's gone and done it :shocked:
she fuckin killed the king :regicide:
nan what have you done :shocked:
no nan put down the knife, don't go after the landlords next! :knife:
you'll make society more equitable for all if you're not careful! :hammer_sickle:
#JustNanProblems #WhatWillSheDoNext #MaybeItWasntSuchABadThingAfterAll
`
exampleTextSmall = "Small problem lads, me nan's gone on a bit of a rampage"
exampleUsername = "@SexHaver1969"
// ID string size in memory (is always 26 char ULID).
sizeofIDStr = unsafe.Sizeof(exampleID)
// URI string size in memory (use some random example URI).
sizeofURIStr = unsafe.Sizeof(exampleURI)
// ID slice size in memory (using some estimate of length = 250).
sizeofIDSlice = unsafe.Sizeof([]string{}) + 250*sizeofIDStr
// result cache key size estimate which is tricky. it can
// be a serialized string of almost any type, so we pick a
// nice serialized key size on the upper end of normal.
sizeofResultKey = 2 * sizeofIDStr
)
// calculateSliceCacheMax calculates the maximum capacity for a slice cache with given individual ratio.
func calculateSliceCacheMax(ratio float64) int {
return calculateCacheMax(sizeofIDStr, sizeofIDSlice, ratio)
}
// calculateResultCacheMax calculates the maximum cache capacity for a result
// cache's individual ratio number, and the size of the struct model in memory.
func calculateResultCacheMax(structSz uintptr, ratio float64) int {
// Estimate a worse-case scenario of extra lookup hash maps,
// where lookups are the no. "keys" each result can be found under
const lookups = 10
// Calculate the extra cache lookup map overheads.
totalLookupKeySz := uintptr(lookups) * sizeofResultKey
totalLookupValSz := uintptr(lookups) * unsafe.Sizeof(uint64(0))
// Primary cache sizes.
pkeySz := unsafe.Sizeof(uint64(0))
pvalSz := structSz
// The result cache wraps each struct result in a wrapping
// struct with further information, and possible error. This
// also needs to be taken into account when calculating value.
const resultValueOverhead = unsafe.Sizeof(&struct {
_ int64
_ []any
_ any
_ error
}{})
return calculateCacheMax(
pkeySz+totalLookupKeySz,
pvalSz+totalLookupValSz+resultValueOverhead,
ratio,
)
}
// calculateCacheMax calculates the maximum cache capacity for a cache's
// individual ratio number, and key + value object sizes in memory.
func calculateCacheMax(keySz, valSz uintptr, ratio float64) int {
if ratio < 0 {
// Negative ratios are a secret little trick
// to manually set the cache capacity sizes.
return int(-1 * ratio)
}
// see: https://golang.org/src/runtime/map.go
const emptyBucketOverhead = 10.79
// This takes into account (roughly) that the underlying simple cache library wraps
// elements within a simple.Entry{}, and the ordered map wraps each in a linked list elem.
const cacheElemOverhead = unsafe.Sizeof(simple.Entry{}) + unsafe.Sizeof(struct {
key, value interface{}
next, prev uintptr
}{})
// The inputted memory ratio does not take into account the
// total of all ratios, so divide it here to get perc. ratio.
totalRatio := ratio / totalOfRatios()
// TODO: we should also further weight this ratio depending
// on the combined keySz + valSz as a ratio of all available
// cache model memories. otherwise you can end up with a
// low-ratio cache of tiny models with larger capacity than
// a high-ratio cache of large models.
// Get max available cache memory, calculating max for
// this cache by multiplying by this cache's mem ratio.
maxMem := config.GetCacheMemoryTarget()
fMaxMem := float64(maxMem) * totalRatio
// Cast to useable types.
fKeySz := float64(keySz)
fValSz := float64(valSz)
// Calculated using the internal cache map size:
// (($keysz + $valsz) * $len) + ($len * $allOverheads) = $memSz
return int(fMaxMem / (fKeySz + fValSz + emptyBucketOverhead + float64(cacheElemOverhead)))
}
// totalOfRatios returns the total of all cache ratios added together.
func totalOfRatios() float64 {
// NOTE: this is not performant calculating
// this every damn time (mainly the mutex unlocks
// required to access each config var). fortunately
// we only do this on init so fuck it :D
return 0 +
config.GetCacheAccountMemRatio() +
config.GetCacheAccountNoteMemRatio() +
config.GetCacheBlockMemRatio() +
config.GetCacheBlockIDsMemRatio() +
config.GetCacheEmojiMemRatio() +
config.GetCacheEmojiCategoryMemRatio() +
config.GetCacheFollowMemRatio() +
config.GetCacheFollowIDsMemRatio() +
config.GetCacheFollowRequestMemRatio() +
config.GetCacheFollowRequestIDsMemRatio() +
config.GetCacheInstanceMemRatio() +
config.GetCacheListMemRatio() +
config.GetCacheListEntryMemRatio() +
config.GetCacheMarkerMemRatio() +
config.GetCacheMediaMemRatio() +
config.GetCacheMentionMemRatio() +
config.GetCacheNotificationMemRatio() +
config.GetCacheReportMemRatio() +
config.GetCacheStatusMemRatio() +
config.GetCacheStatusFaveMemRatio() +
config.GetCacheTagMemRatio() +
config.GetCacheTombstoneMemRatio() +
config.GetCacheUserMemRatio() +
config.GetCacheWebfingerMemRatio() +
config.GetCacheVisibilityMemRatio()
}
func sizeofAccount() uintptr {
return uintptr(size.Of(&gtsmodel.Account{
ID: exampleID,
Username: exampleUsername,
AvatarMediaAttachmentID: exampleID,
HeaderMediaAttachmentID: exampleID,
DisplayName: exampleUsername,
Note: exampleText,
NoteRaw: exampleText,
Memorial: func() *bool { ok := false; return &ok }(),
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
FetchedAt: time.Now(),
Bot: func() *bool { ok := true; return &ok }(),
Locked: func() *bool { ok := true; return &ok }(),
Discoverable: func() *bool { ok := false; return &ok }(),
Privacy: gtsmodel.VisibilityFollowersOnly,
Sensitive: func() *bool { ok := true; return &ok }(),
Language: "fr",
URI: exampleURI,
URL: exampleURI,
InboxURI: exampleURI,
OutboxURI: exampleURI,
FollowersURI: exampleURI,
FollowingURI: exampleURI,
FeaturedCollectionURI: exampleURI,
ActorType: ap.ActorPerson,
PrivateKey: &rsa.PrivateKey{},
PublicKey: &rsa.PublicKey{},
PublicKeyURI: exampleURI,
SensitizedAt: time.Time{},
SilencedAt: time.Now(),
SuspendedAt: time.Now(),
HideCollections: func() *bool { ok := true; return &ok }(),
SuspensionOrigin: "",
EnableRSS: func() *bool { ok := true; return &ok }(),
}))
}
func sizeofAccountNote() uintptr {
return uintptr(size.Of(&gtsmodel.AccountNote{
ID: exampleID,
AccountID: exampleID,
TargetAccountID: exampleID,
Comment: exampleTextSmall,
}))
}
func sizeofBlock() uintptr {
return uintptr(size.Of(&gtsmodel.Block{
ID: exampleID,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
URI: exampleURI,
AccountID: exampleID,
TargetAccountID: exampleID,
}))
}
func sizeofEmoji() uintptr {
return uintptr(size.Of(&gtsmodel.Emoji{
ID: exampleID,
Shortcode: exampleTextSmall,
Domain: exampleURI,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
ImageRemoteURL: exampleURI,
ImageStaticRemoteURL: exampleURI,
ImageURL: exampleURI,
ImagePath: exampleURI,
ImageStaticURL: exampleURI,
ImageStaticPath: exampleURI,
ImageContentType: "image/png",
ImageStaticContentType: "image/png",
ImageUpdatedAt: time.Now(),
Disabled: func() *bool { ok := false; return &ok }(),
URI: "http://localhost:8080/emoji/01F8MH9H8E4VG3KDYJR9EGPXCQ",
VisibleInPicker: func() *bool { ok := true; return &ok }(),
CategoryID: "01GGQ8V4993XK67B2JB396YFB7",
Cached: func() *bool { ok := true; return &ok }(),
}))
}
func sizeofEmojiCategory() uintptr {
return uintptr(size.Of(&gtsmodel.EmojiCategory{
ID: exampleID,
Name: exampleUsername,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}))
}
func sizeofFollow() uintptr {
return uintptr(size.Of(&gtsmodel.Follow{
ID: exampleID,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
AccountID: exampleID,
TargetAccountID: exampleID,
ShowReblogs: func() *bool { ok := true; return &ok }(),
URI: exampleURI,
Notify: func() *bool { ok := false; return &ok }(),
}))
}
func sizeofFollowRequest() uintptr {
return uintptr(size.Of(&gtsmodel.FollowRequest{
ID: exampleID,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
AccountID: exampleID,
TargetAccountID: exampleID,
ShowReblogs: func() *bool { ok := true; return &ok }(),
URI: exampleURI,
Notify: func() *bool { ok := false; return &ok }(),
}))
}
func sizeofInstance() uintptr {
return uintptr(size.Of(&gtsmodel.Instance{
ID: exampleID,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Domain: exampleURI,
URI: exampleURI,
Title: exampleTextSmall,
ShortDescription: exampleText,
Description: exampleText,
ContactEmail: exampleUsername,
ContactAccountUsername: exampleUsername,
ContactAccountID: exampleID,
}))
}
func sizeofList() uintptr {
return uintptr(size.Of(&gtsmodel.List{
ID: exampleID,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: exampleTextSmall,
AccountID: exampleID,
RepliesPolicy: gtsmodel.RepliesPolicyFollowed,
}))
}
func sizeofListEntry() uintptr {
return uintptr(size.Of(&gtsmodel.ListEntry{
ID: exampleID,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
ListID: exampleID,
FollowID: exampleID,
}))
}
func sizeofMarker() uintptr {
return uintptr(size.Of(&gtsmodel.Marker{
AccountID: exampleID,
Name: gtsmodel.MarkerNameHome,
UpdatedAt: time.Now(),
Version: 0,
LastReadID: exampleID,
}))
}
func sizeofMedia() uintptr {
return uintptr(size.Of(&gtsmodel.MediaAttachment{
ID: exampleID,
StatusID: exampleID,
URL: exampleURI,
RemoteURL: exampleURI,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Type: gtsmodel.FileTypeImage,
AccountID: exampleID,
Description: exampleText,
ScheduledStatusID: exampleID,
Blurhash: exampleTextSmall,
File: gtsmodel.File{
Path: exampleURI,
ContentType: "image/jpeg",
UpdatedAt: time.Now(),
},
Thumbnail: gtsmodel.Thumbnail{
Path: exampleURI,
ContentType: "image/jpeg",
UpdatedAt: time.Now(),
URL: exampleURI,
RemoteURL: exampleURI,
},
Avatar: func() *bool { ok := false; return &ok }(),
Header: func() *bool { ok := false; return &ok }(),
Cached: func() *bool { ok := true; return &ok }(),
}))
}
func sizeofMention() uintptr {
return uintptr(size.Of(&gtsmodel.Mention{
ID: exampleURI,
StatusID: exampleURI,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
OriginAccountID: exampleURI,
OriginAccountURI: exampleURI,
TargetAccountID: exampleID,
NameString: exampleUsername,
TargetAccountURI: exampleURI,
TargetAccountURL: exampleURI,
}))
}
func sizeofNotification() uintptr {
return uintptr(size.Of(&gtsmodel.Notification{
ID: exampleID,
NotificationType: gtsmodel.NotificationFave,
CreatedAt: time.Now(),
TargetAccountID: exampleID,
OriginAccountID: exampleID,
StatusID: exampleID,
Read: func() *bool { ok := false; return &ok }(),
}))
}
func sizeofReport() uintptr {
return uintptr(size.Of(&gtsmodel.Report{
ID: exampleID,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
URI: exampleURI,
AccountID: exampleID,
TargetAccountID: exampleID,
Comment: exampleText,
StatusIDs: []string{exampleID, exampleID, exampleID},
Forwarded: func() *bool { ok := true; return &ok }(),
ActionTaken: exampleText,
ActionTakenAt: time.Now(),
ActionTakenByAccountID: exampleID,
}))
}
func sizeofStatus() uintptr {
return uintptr(size.Of(&gtsmodel.Status{
ID: exampleURI,
URI: exampleURI,
URL: exampleURI,
Content: exampleText,
Text: exampleText,
AttachmentIDs: []string{exampleID, exampleID, exampleID},
TagIDs: []string{exampleID, exampleID, exampleID},
MentionIDs: []string{},
EmojiIDs: []string{exampleID, exampleID, exampleID},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
FetchedAt: time.Now(),
Local: func() *bool { ok := false; return &ok }(),
AccountURI: exampleURI,
AccountID: exampleID,
InReplyToID: exampleID,
InReplyToURI: exampleURI,
InReplyToAccountID: exampleID,
BoostOfID: exampleID,
BoostOfAccountID: exampleID,
ContentWarning: exampleUsername, // similar length
Visibility: gtsmodel.VisibilityPublic,
Sensitive: func() *bool { ok := false; return &ok }(),
Language: "en",
CreatedWithApplicationID: exampleID,
Federated: func() *bool { ok := true; return &ok }(),
Boostable: func() *bool { ok := true; return &ok }(),
Replyable: func() *bool { ok := true; return &ok }(),
Likeable: func() *bool { ok := true; return &ok }(),
ActivityStreamsType: ap.ObjectNote,
}))
}
func sizeofStatusFave() uintptr {
return uintptr(size.Of(&gtsmodel.StatusFave{
ID: exampleID,
CreatedAt: time.Now(),
AccountID: exampleID,
TargetAccountID: exampleID,
StatusID: exampleID,
URI: exampleURI,
}))
}
func sizeofTag() uintptr {
return uintptr(size.Of(&gtsmodel.Tag{
ID: exampleID,
Name: exampleUsername,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Useable: func() *bool { ok := true; return &ok }(),
Listable: func() *bool { ok := true; return &ok }(),
}))
}
func sizeofTombstone() uintptr {
return uintptr(size.Of(&gtsmodel.Tombstone{
ID: exampleID,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Domain: exampleUsername,
URI: exampleURI,
}))
}
func sizeofVisibility() uintptr {
return uintptr(size.Of(&CachedVisibility{
ItemID: exampleID,
RequesterID: exampleID,
Type: VisibilityTypeAccount,
Value: false,
}))
}
func sizeofUser() uintptr {
return uintptr(size.Of(&gtsmodel.User{}))
}

View file

@ -18,14 +18,14 @@
package cache package cache
import ( import (
"codeberg.org/gruf/go-cache/v3/ttl" "codeberg.org/gruf/go-cache/v3/simple"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
// SliceCache wraps a ttl.Cache to provide simple loader-callback // SliceCache wraps a ttl.Cache to provide simple loader-callback
// functions for fetching + caching slices of objects (e.g. IDs). // functions for fetching + caching slices of objects (e.g. IDs).
type SliceCache[T any] struct { type SliceCache[T any] struct {
*ttl.Cache[string, []T] *simple.Cache[string, []T]
} }
// Load will attempt to load an existing slice from the cache for the given key, else calling the provided load function and caching the result. // Load will attempt to load an existing slice from the cache for the given key, else calling the provided load function and caching the result.

View file

@ -20,10 +20,8 @@ package cache
import ( import (
"database/sql" "database/sql"
"errors" "errors"
"fmt"
"time" "time"
"codeberg.org/gruf/go-cache/v3/result"
errorsv2 "codeberg.org/gruf/go-errors/v2" errorsv2 "codeberg.org/gruf/go-errors/v2"
"github.com/superseriousbusiness/gotosocial/internal/db" "github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/log" "github.com/superseriousbusiness/gotosocial/internal/log"
@ -56,26 +54,6 @@ func (*nocopy) Lock() {}
func (*nocopy) Unlock() {} func (*nocopy) Unlock() {}
// tryStart will attempt to start the given cache only if sweep duration > 0 (sweeping is enabled).
func tryStart[ValueType any](cache *result.Cache[ValueType], sweep time.Duration) {
if sweep > 0 {
var z ValueType
msg := fmt.Sprintf("starting %T cache", z)
tryUntil(msg, 5, func() bool {
return cache.Start(sweep)
})
}
}
// tryStop will attempt to stop the given cache only if sweep duration > 0 (sweeping is enabled).
func tryStop[ValueType any](cache *result.Cache[ValueType], sweep time.Duration) {
if sweep > 0 {
var z ValueType
msg := fmt.Sprintf("stopping %T cache", z)
tryUntil(msg, 5, cache.Stop)
}
}
// tryUntil will attempt to call 'do' for 'count' attempts, before panicking with 'msg'. // tryUntil will attempt to call 'do' for 'count' attempts, before panicking with 'msg'.
func tryUntil(msg string, count int, do func() bool) { func tryUntil(msg string, count int, do func() bool) {
for i := 0; i < count; i++ { for i := 0; i < count; i++ {

View file

@ -20,6 +20,7 @@ package cache
import ( import (
"codeberg.org/gruf/go-cache/v3/result" "codeberg.org/gruf/go-cache/v3/result"
"github.com/superseriousbusiness/gotosocial/internal/config" "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/log"
) )
type VisibilityCache struct { type VisibilityCache struct {
@ -29,6 +30,14 @@ type VisibilityCache struct {
// Init will initialize the visibility cache in this collection. // Init will initialize the visibility cache in this collection.
// NOTE: the cache MUST NOT be in use anywhere, this is not thread-safe. // NOTE: the cache MUST NOT be in use anywhere, this is not thread-safe.
func (c *VisibilityCache) Init() { func (c *VisibilityCache) Init() {
// Calculate maximum cache size.
cap := calculateResultCacheMax(
sizeofVisibility(), // model in-mem size.
config.GetCacheVisibilityMemRatio(),
)
log.Infof(nil, "Visibility cache size = %d", cap)
c.Cache = result.New([]result.Lookup{ c.Cache = result.New([]result.Lookup{
{Name: "ItemID", Multi: true}, {Name: "ItemID", Multi: true},
{Name: "RequesterID", Multi: true}, {Name: "RequesterID", Multi: true},
@ -37,19 +46,17 @@ func (c *VisibilityCache) Init() {
v2 := new(CachedVisibility) v2 := new(CachedVisibility)
*v2 = *v1 *v2 = *v1
return v2 return v2
}, config.GetCacheVisibilityMaxSize()) }, cap)
c.Cache.SetTTL(config.GetCacheVisibilityTTL(), true)
c.Cache.IgnoreErrors(ignoreErrors) c.Cache.IgnoreErrors(ignoreErrors)
} }
// Start will attempt to start the visibility cache, or panic. // Start will attempt to start the visibility cache, or panic.
func (c *VisibilityCache) Start() { func (c *VisibilityCache) Start() {
tryStart(c.Cache, config.GetCacheVisibilitySweepFreq())
} }
// Stop will attempt to stop the visibility cache, or panic. // Stop will attempt to stop the visibility cache, or panic.
func (c *VisibilityCache) Stop() { func (c *VisibilityCache) Stop() {
tryStop(c.Cache, config.GetCacheVisibilitySweepFreq())
} }
// VisibilityType represents a visibility lookup type. // VisibilityType represents a visibility lookup type.

View file

@ -83,19 +83,23 @@ func (c *Cleaner) removeFiles(ctx context.Context, files ...string) (int, error)
return len(files), nil return len(files), nil
} }
var errs gtserror.MultiError var (
errs gtserror.MultiError
errCount int
)
for _, path := range files { for _, path := range files {
// Remove each provided storage path. // Remove each provided storage path.
log.Debugf(ctx, "removing file: %s", path) log.Debugf(ctx, "removing file: %s", path)
err := c.state.Storage.Delete(ctx, path) err := c.state.Storage.Delete(ctx, path)
if err != nil && !errors.Is(err, storage.ErrNotFound) { if err != nil && !errors.Is(err, storage.ErrNotFound) {
errs.Appendf("error removing %s: %v", path, err) errs.Appendf("error removing %s: %w", path, err)
errCount++
} }
} }
// Calculate no. files removed. // Calculate no. files removed.
diff := len(files) - len(errs) diff := len(files) - errCount
// Wrap the combined error slice. // Wrap the combined error slice.
if err := errs.Combine(); err != nil { if err := errs.Combine(); err != nil {

View file

@ -175,113 +175,35 @@ type HTTPClientConfiguration struct {
} }
type CacheConfiguration struct { type CacheConfiguration struct {
GTS GTSCacheConfiguration `name:"gts"` MemoryTarget bytesize.Size `name:"memory-target"`
AccountMemRatio float64 `name:"account-mem-ratio"`
VisibilityMaxSize int `name:"visibility-max-size"` AccountNoteMemRatio float64 `name:"account-note-mem-ratio"`
VisibilityTTL time.Duration `name:"visibility-ttl"` BlockMemRatio float64 `name:"block-mem-ratio"`
VisibilitySweepFreq time.Duration `name:"visibility-sweep-freq"` BlockIDsMemRatio float64 `name:"block-mem-ratio"`
} BoostOfIDsMemRatio float64 `name:"boost-of-ids-mem-ratio"`
EmojiMemRatio float64 `name:"emoji-mem-ratio"`
type GTSCacheConfiguration struct { EmojiCategoryMemRatio float64 `name:"emoji-category-mem-ratio"`
AccountMaxSize int `name:"account-max-size"` FollowMemRatio float64 `name:"follow-mem-ratio"`
AccountTTL time.Duration `name:"account-ttl"` FollowIDsMemRatio float64 `name:"follow-ids-mem-ratio"`
AccountSweepFreq time.Duration `name:"account-sweep-freq"` FollowRequestMemRatio float64 `name:"follow-request-mem-ratio"`
FollowRequestIDsMemRatio float64 `name:"follow-request-ids-mem-ratio"`
AccountNoteMaxSize int `name:"account-note-max-size"` InReplyToIDsMemRatio float64 `name:"in-reply-to-ids-mem-ratio"`
AccountNoteTTL time.Duration `name:"account-note-ttl"` InstanceMemRatio float64 `name:"instance-mem-ratio"`
AccountNoteSweepFreq time.Duration `name:"account-note-sweep-freq"` ListMemRatio float64 `name:"list-mem-ratio"`
ListEntryMemRatio float64 `name:"list-entry-mem-ratio"`
BlockMaxSize int `name:"block-max-size"` MarkerMemRatio float64 `name:"marker-mem-ratio"`
BlockTTL time.Duration `name:"block-ttl"` MediaMemRatio float64 `name:"media-mem-ratio"`
BlockSweepFreq time.Duration `name:"block-sweep-freq"` MentionMemRatio float64 `name:"mention-mem-ratio"`
NotificationMemRatio float64 `name:"notification-mem-ratio"`
BlockIDsMaxSize int `name:"block-ids-max-size"` ReportMemRatio float64 `name:"report-mem-ratio"`
BlockIDsTTL time.Duration `name:"block-ids-ttl"` StatusMemRatio float64 `name:"status-mem-ratio"`
BlockIDsSweepFreq time.Duration `name:"block-ids-sweep-freq"` StatusFaveMemRatio float64 `name:"status-fave-mem-ratio"`
StatusFaveIDsMemRatio float64 `name:"status-fave-ids-mem-ratio"`
DomainBlockMaxSize int `name:"domain-block-max-size"` TagMemRatio float64 `name:"tag-mem-ratio"`
DomainBlockTTL time.Duration `name:"domain-block-ttl"` TombstoneMemRatio float64 `name:"tombstone-mem-ratio"`
DomainBlockSweepFreq time.Duration `name:"domain-block-sweep-freq"` UserMemRatio float64 `name:"user-mem-ratio"`
WebfingerMemRatio float64 `name:"webfinger-mem-ratio"`
EmojiMaxSize int `name:"emoji-max-size"` VisibilityMemRatio float64 `name:"visibility-mem-ratio"`
EmojiTTL time.Duration `name:"emoji-ttl"`
EmojiSweepFreq time.Duration `name:"emoji-sweep-freq"`
EmojiCategoryMaxSize int `name:"emoji-category-max-size"`
EmojiCategoryTTL time.Duration `name:"emoji-category-ttl"`
EmojiCategorySweepFreq time.Duration `name:"emoji-category-sweep-freq"`
FollowMaxSize int `name:"follow-max-size"`
FollowTTL time.Duration `name:"follow-ttl"`
FollowSweepFreq time.Duration `name:"follow-sweep-freq"`
FollowIDsMaxSize int `name:"follow-ids-max-size"`
FollowIDsTTL time.Duration `name:"follow-ids-ttl"`
FollowIDsSweepFreq time.Duration `name:"follow-ids-sweep-freq"`
FollowRequestMaxSize int `name:"follow-request-max-size"`
FollowRequestTTL time.Duration `name:"follow-request-ttl"`
FollowRequestSweepFreq time.Duration `name:"follow-request-sweep-freq"`
FollowRequestIDsMaxSize int `name:"follow-request-ids-max-size"`
FollowRequestIDsTTL time.Duration `name:"follow-request-ids-ttl"`
FollowRequestIDsSweepFreq time.Duration `name:"follow-request-ids-sweep-freq"`
InstanceMaxSize int `name:"instance-max-size"`
InstanceTTL time.Duration `name:"instance-ttl"`
InstanceSweepFreq time.Duration `name:"instance-sweep-freq"`
ListMaxSize int `name:"list-max-size"`
ListTTL time.Duration `name:"list-ttl"`
ListSweepFreq time.Duration `name:"list-sweep-freq"`
ListEntryMaxSize int `name:"list-entry-max-size"`
ListEntryTTL time.Duration `name:"list-entry-ttl"`
ListEntrySweepFreq time.Duration `name:"list-entry-sweep-freq"`
MarkerMaxSize int `name:"marker-max-size"`
MarkerTTL time.Duration `name:"marker-ttl"`
MarkerSweepFreq time.Duration `name:"marker-sweep-freq"`
MediaMaxSize int `name:"media-max-size"`
MediaTTL time.Duration `name:"media-ttl"`
MediaSweepFreq time.Duration `name:"media-sweep-freq"`
MentionMaxSize int `name:"mention-max-size"`
MentionTTL time.Duration `name:"mention-ttl"`
MentionSweepFreq time.Duration `name:"mention-sweep-freq"`
NotificationMaxSize int `name:"notification-max-size"`
NotificationTTL time.Duration `name:"notification-ttl"`
NotificationSweepFreq time.Duration `name:"notification-sweep-freq"`
ReportMaxSize int `name:"report-max-size"`
ReportTTL time.Duration `name:"report-ttl"`
ReportSweepFreq time.Duration `name:"report-sweep-freq"`
StatusMaxSize int `name:"status-max-size"`
StatusTTL time.Duration `name:"status-ttl"`
StatusSweepFreq time.Duration `name:"status-sweep-freq"`
StatusFaveMaxSize int `name:"status-fave-max-size"`
StatusFaveTTL time.Duration `name:"status-fave-ttl"`
StatusFaveSweepFreq time.Duration `name:"status-fave-sweep-freq"`
TagMaxSize int `name:"tag-max-size"`
TagTTL time.Duration `name:"tag-ttl"`
TagSweepFreq time.Duration `name:"tag-sweep-freq"`
TombstoneMaxSize int `name:"tombstone-max-size"`
TombstoneTTL time.Duration `name:"tombstone-ttl"`
TombstoneSweepFreq time.Duration `name:"tombstone-sweep-freq"`
UserMaxSize int `name:"user-max-size"`
UserTTL time.Duration `name:"user-ttl"`
UserSweepFreq time.Duration `name:"user-sweep-freq"`
WebfingerMaxSize int `name:"webfinger-max-size"`
WebfingerTTL time.Duration `name:"webfinger-ttl"`
WebfingerSweepFreq time.Duration `name:"webfinger-sweep-freq"`
} }
// MarshalMap will marshal current Configuration into a map structure (useful for JSON/TOML/YAML). // MarshalMap will marshal current Configuration into a map structure (useful for JSON/TOML/YAML).

View file

@ -126,111 +126,53 @@ var Defaults = Configuration{
AdvancedSenderMultiplier: 2, // 2 senders per CPU AdvancedSenderMultiplier: 2, // 2 senders per CPU
Cache: CacheConfiguration{ Cache: CacheConfiguration{
GTS: GTSCacheConfiguration{ // Rough memory target that the total
AccountMaxSize: 2000, // size of all State.Caches will attempt
AccountTTL: time.Minute * 30, // to remain with. Emphasis on *rough*.
AccountSweepFreq: time.Minute, MemoryTarget: 200 * bytesize.MiB,
AccountNoteMaxSize: 1000, // These ratios signal what percentage
AccountNoteTTL: time.Minute * 30, // of the available cache target memory
AccountNoteSweepFreq: time.Minute, // is allocated to each object type's
// cache.
BlockMaxSize: 1000, //
BlockTTL: time.Minute * 30, // These are weighted by a totally
BlockSweepFreq: time.Minute, // assorted mixture of priority, and
// manual twiddling to get the generated
BlockIDsMaxSize: 500, // cache capacity ratios within normal
BlockIDsTTL: time.Minute * 30, // amounts dependent size of the models.
BlockIDsSweepFreq: time.Minute, //
// when TODO items in the size.go source
DomainBlockMaxSize: 2000, // file have been addressed, these should
DomainBlockTTL: time.Hour * 24, // be able to make some more sense :D
DomainBlockSweepFreq: time.Minute, AccountMemRatio: 18,
AccountNoteMemRatio: 0.1,
EmojiMaxSize: 2000, BlockMemRatio: 3,
EmojiTTL: time.Minute * 30, BlockIDsMemRatio: 3,
EmojiSweepFreq: time.Minute, BoostOfIDsMemRatio: 3,
EmojiMemRatio: 3,
EmojiCategoryMaxSize: 100, EmojiCategoryMemRatio: 0.1,
EmojiCategoryTTL: time.Minute * 30, FollowMemRatio: 4,
EmojiCategorySweepFreq: time.Minute, FollowIDsMemRatio: 4,
FollowRequestMemRatio: 2,
FollowMaxSize: 2000, FollowRequestIDsMemRatio: 2,
FollowTTL: time.Minute * 30, InReplyToIDsMemRatio: 3,
FollowSweepFreq: time.Minute, InstanceMemRatio: 1,
ListMemRatio: 3,
FollowIDsMaxSize: 500, ListEntryMemRatio: 3,
FollowIDsTTL: time.Minute * 30, MarkerMemRatio: 0.5,
FollowIDsSweepFreq: time.Minute, MediaMemRatio: 4,
MentionMemRatio: 5,
FollowRequestMaxSize: 2000, NotificationMemRatio: 5,
FollowRequestTTL: time.Minute * 30, ReportMemRatio: 1,
FollowRequestSweepFreq: time.Minute, StatusMemRatio: 18,
StatusFaveMemRatio: 5,
FollowRequestIDsMaxSize: 500, StatusFaveIDsMemRatio: 3,
FollowRequestIDsTTL: time.Minute * 30, TagMemRatio: 3,
FollowRequestIDsSweepFreq: time.Minute, TombstoneMemRatio: 2,
UserMemRatio: 0.1,
InstanceMaxSize: 2000, WebfingerMemRatio: 0.1,
InstanceTTL: time.Minute * 30, VisibilityMemRatio: 2,
InstanceSweepFreq: time.Minute,
ListMaxSize: 2000,
ListTTL: time.Minute * 30,
ListSweepFreq: time.Minute,
ListEntryMaxSize: 2000,
ListEntryTTL: time.Minute * 30,
ListEntrySweepFreq: time.Minute,
MarkerMaxSize: 2000,
MarkerTTL: time.Hour * 6,
MarkerSweepFreq: time.Minute,
MediaMaxSize: 1000,
MediaTTL: time.Minute * 30,
MediaSweepFreq: time.Minute,
MentionMaxSize: 2000,
MentionTTL: time.Minute * 30,
MentionSweepFreq: time.Minute,
NotificationMaxSize: 1000,
NotificationTTL: time.Minute * 30,
NotificationSweepFreq: time.Minute,
ReportMaxSize: 100,
ReportTTL: time.Minute * 30,
ReportSweepFreq: time.Minute,
StatusMaxSize: 2000,
StatusTTL: time.Minute * 30,
StatusSweepFreq: time.Minute,
StatusFaveMaxSize: 2000,
StatusFaveTTL: time.Minute * 30,
StatusFaveSweepFreq: time.Minute,
TagMaxSize: 2000,
TagTTL: time.Minute * 30,
TagSweepFreq: time.Minute,
TombstoneMaxSize: 500,
TombstoneTTL: time.Minute * 30,
TombstoneSweepFreq: time.Minute,
UserMaxSize: 500,
UserTTL: time.Minute * 30,
UserSweepFreq: time.Minute,
WebfingerMaxSize: 250,
WebfingerTTL: time.Hour * 24,
WebfingerSweepFreq: time.Minute * 15,
},
VisibilityMaxSize: 2000,
VisibilityTTL: time.Minute * 30,
VisibilitySweepFreq: time.Minute,
}, },
HTTPClient: HTTPClientConfiguration{ HTTPClient: HTTPClientConfiguration{

File diff suppressed because it is too large Load diff

View file

@ -20,7 +20,6 @@ package bundb
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"strings" "strings"
"time" "time"
@ -255,7 +254,7 @@ func (a *accountDB) getAccount(ctx context.Context, lookup string, dbQuery func(
func (a *accountDB) PopulateAccount(ctx context.Context, account *gtsmodel.Account) error { func (a *accountDB) PopulateAccount(ctx context.Context, account *gtsmodel.Account) error {
var ( var (
err error err error
errs = make(gtserror.MultiError, 0, 3) errs = gtserror.NewMultiError(3)
) )
if account.AvatarMediaAttachment == nil && account.AvatarMediaAttachmentID != "" { if account.AvatarMediaAttachment == nil && account.AvatarMediaAttachmentID != "" {
@ -265,7 +264,7 @@ func (a *accountDB) PopulateAccount(ctx context.Context, account *gtsmodel.Accou
account.AvatarMediaAttachmentID, account.AvatarMediaAttachmentID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating account avatar: %w", err)) errs.Appendf("error populating account avatar: %w", err)
} }
} }
@ -276,7 +275,7 @@ func (a *accountDB) PopulateAccount(ctx context.Context, account *gtsmodel.Accou
account.HeaderMediaAttachmentID, account.HeaderMediaAttachmentID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating account header: %w", err)) errs.Appendf("error populating account header: %w", err)
} }
} }
@ -287,11 +286,15 @@ func (a *accountDB) PopulateAccount(ctx context.Context, account *gtsmodel.Accou
account.EmojiIDs, account.EmojiIDs,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating account emojis: %w", err)) errs.Appendf("error populating account emojis: %w", err)
} }
} }
return errs.Combine() if err := errs.Combine(); err != nil {
return gtserror.Newf("%w", err)
}
return nil
} }
func (a *accountDB) PutAccount(ctx context.Context, account *gtsmodel.Account) error { func (a *accountDB) PutAccount(ctx context.Context, account *gtsmodel.Account) error {
@ -468,24 +471,13 @@ func (a *accountDB) GetAccountCustomCSSByUsername(ctx context.Context, username
func (a *accountDB) GetAccountsUsingEmoji(ctx context.Context, emojiID string) ([]*gtsmodel.Account, error) { func (a *accountDB) GetAccountsUsingEmoji(ctx context.Context, emojiID string) ([]*gtsmodel.Account, error) {
var accountIDs []string var accountIDs []string
// Create SELECT account query. // SELECT all accounts using this emoji,
q := a.db.NewSelect(). // using a relational table for improved perf.
Table("accounts"). if _, err := a.db.NewSelect().
Column("id") Table("account_to_emojis").
Column("account_id").
// Append a WHERE LIKE clause to the query Where("? = ?", bun.Ident("emoji_id"), emojiID).
// that checks the `emoji` column for any Exec(ctx, &accountIDs); err != nil {
// text containing this specific emoji ID.
//
// The reason we do this instead of doing a
// `WHERE ? IN (emojis)` is that the latter
// ends up being much MUCH slower, and the
// database stores this ID-array-column as
// text anyways, allowing a simple LIKE query.
q = whereLike(q, "emojis", emojiID)
// Execute the query, scanning destination into accountIDs.
if _, err := q.Exec(ctx, &accountIDs); err != nil {
return nil, a.db.ProcessError(err) return nil, a.db.ProcessError(err)
} }

View file

@ -106,76 +106,25 @@ func (e *emojiDB) DeleteEmojiByID(ctx context.Context, id string) error {
} }
return e.db.RunInTx(ctx, func(tx bun.Tx) error { return e.db.RunInTx(ctx, func(tx bun.Tx) error {
// delete links between this emoji and any statuses that use it // Delete relational links between this emoji
// TODO: remove when we delete this table // and any statuses using it, returning the
if _, err := tx. // status IDs so we can later update them.
NewDelete(). if _, err := tx.NewDelete().
TableExpr("? AS ?", bun.Ident("status_to_emojis"), bun.Ident("status_to_emoji")). Table("status_to_emojis").
Where("? = ?", bun.Ident("status_to_emoji.emoji_id"), id). Where("? = ?", bun.Ident("emoji_id"), id).
Exec(ctx); err != nil { Returning("status_id").
Exec(ctx, &statusIDs); err != nil {
return err return err
} }
// delete links between this emoji and any accounts that use it // Delete relational links between this emoji
// TODO: remove when we delete this table // and any accounts using it, returning the
if _, err := tx. // account IDs so we can later update them.
NewDelete(). if _, err := tx.NewDelete().
TableExpr("? AS ?", bun.Ident("account_to_emojis"), bun.Ident("account_to_emoji")). Table("account_to_emojis").
Where("? = ?", bun.Ident("account_to_emoji.emoji_id"), id). Where("? = ?", bun.Ident("emoji_id"), id).
Exec(ctx); err != nil { Returning("account_id").
return err Exec(ctx, &accountIDs); err != nil {
}
// Prepare a SELECT query with a WHERE LIKE
// that checks the `emoji` column for any
// text containing this specific emoji ID.
//
// (see GetStatusesUsingEmoji() for details.)
aq := tx.NewSelect().Table("accounts").Column("id")
aq = whereLike(aq, "emojis", id)
// Select all accounts using this emoji into accountIDss.
if _, err := aq.Exec(ctx, &accountIDs); err != nil {
return err
}
for _, id := range accountIDs {
var emojiIDs []string
// Select account with ID.
if _, err := tx.NewSelect().
Table("accounts").
Column("emojis").
Where("id = ?", id).
Exec(ctx); err != nil &&
err != sql.ErrNoRows {
return err
}
// Drop ID from account emojis.
emojiIDs = dropID(emojiIDs, id)
// Update account emoji IDs.
if _, err := tx.NewUpdate().
Table("accounts").
Where("id = ?", id).
Set("emojis = ?", emojiIDs).
Exec(ctx); err != nil &&
err != sql.ErrNoRows {
return err
}
}
// Prepare a SELECT query with a WHERE LIKE
// that checks the `emoji` column for any
// text containing this specific emoji ID.
//
// (see GetStatusesUsingEmoji() for details.)
sq := tx.NewSelect().Table("statuses").Column("id")
sq = whereLike(sq, "emojis", id)
// Select all statuses using this emoji into statusIDs.
if _, err := sq.Exec(ctx, &statusIDs); err != nil {
return err return err
} }
@ -186,7 +135,7 @@ func (e *emojiDB) DeleteEmojiByID(ctx context.Context, id string) error {
if _, err := tx.NewSelect(). if _, err := tx.NewSelect().
Table("statuses"). Table("statuses").
Column("emojis"). Column("emojis").
Where("id = ?", id). Where("? = ?", bun.Ident("id"), id).
Exec(ctx); err != nil && Exec(ctx); err != nil &&
err != sql.ErrNoRows { err != sql.ErrNoRows {
return err return err
@ -198,7 +147,34 @@ func (e *emojiDB) DeleteEmojiByID(ctx context.Context, id string) error {
// Update status emoji IDs. // Update status emoji IDs.
if _, err := tx.NewUpdate(). if _, err := tx.NewUpdate().
Table("statuses"). Table("statuses").
Where("id = ?", id). Where("? = ?", bun.Ident("id"), id).
Set("emojis = ?", emojiIDs).
Exec(ctx); err != nil &&
err != sql.ErrNoRows {
return err
}
}
for _, id := range accountIDs {
var emojiIDs []string
// Select account with ID.
if _, err := tx.NewSelect().
Table("accounts").
Column("emojis").
Where("? = ?", bun.Ident("id"), id).
Exec(ctx); err != nil &&
err != sql.ErrNoRows {
return err
}
// Drop ID from account emojis.
emojiIDs = dropID(emojiIDs, id)
// Update account emoji IDs.
if _, err := tx.NewUpdate().
Table("accounts").
Where("? = ?", bun.Ident("id"), id).
Set("emojis = ?", emojiIDs). Set("emojis = ?", emojiIDs).
Exec(ctx); err != nil && Exec(ctx); err != nil &&
err != sql.ErrNoRows { err != sql.ErrNoRows {
@ -209,7 +185,7 @@ func (e *emojiDB) DeleteEmojiByID(ctx context.Context, id string) error {
// Delete emoji from database. // Delete emoji from database.
if _, err := tx.NewDelete(). if _, err := tx.NewDelete().
Table("emojis"). Table("emojis").
Where("id = ?", id). Where("? = ?", bun.Ident("id"), id).
Exec(ctx); err != nil { Exec(ctx); err != nil {
return err return err
} }

View file

@ -173,7 +173,7 @@ func (i *instanceDB) getInstance(ctx context.Context, lookup string, dbQuery fun
func (i *instanceDB) populateInstance(ctx context.Context, instance *gtsmodel.Instance) error { func (i *instanceDB) populateInstance(ctx context.Context, instance *gtsmodel.Instance) error {
var ( var (
err error err error
errs = make(gtserror.MultiError, 0, 2) errs = gtserror.NewMultiError(2)
) )
if instance.DomainBlockID != "" && instance.DomainBlock == nil { if instance.DomainBlockID != "" && instance.DomainBlock == nil {
@ -183,7 +183,7 @@ func (i *instanceDB) populateInstance(ctx context.Context, instance *gtsmodel.In
instance.Domain, instance.Domain,
) )
if err != nil { if err != nil {
errs.Append(gtserror.Newf("error populating instance domain block: %w", err)) errs.Appendf("error populating instance domain block: %w", err)
} }
} }
@ -194,11 +194,15 @@ func (i *instanceDB) populateInstance(ctx context.Context, instance *gtsmodel.In
instance.ContactAccountID, instance.ContactAccountID,
) )
if err != nil { if err != nil {
errs.Append(gtserror.Newf("error populating instance contact account: %w", err)) errs.Appendf("error populating instance contact account: %w", err)
} }
} }
return errs.Combine() if err := errs.Combine(); err != nil {
return gtserror.Newf("%w", err)
}
return nil
} }
func (i *instanceDB) PutInstance(ctx context.Context, instance *gtsmodel.Instance) error { func (i *instanceDB) PutInstance(ctx context.Context, instance *gtsmodel.Instance) error {

View file

@ -117,7 +117,7 @@ func (l *listDB) GetListsForAccountID(ctx context.Context, accountID string) ([]
func (l *listDB) PopulateList(ctx context.Context, list *gtsmodel.List) error { func (l *listDB) PopulateList(ctx context.Context, list *gtsmodel.List) error {
var ( var (
err error err error
errs = make(gtserror.MultiError, 0, 2) errs = gtserror.NewMultiError(2)
) )
if list.Account == nil { if list.Account == nil {
@ -127,7 +127,7 @@ func (l *listDB) PopulateList(ctx context.Context, list *gtsmodel.List) error {
list.AccountID, list.AccountID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating list account: %w", err)) errs.Appendf("error populating list account: %w", err)
} }
} }
@ -139,11 +139,15 @@ func (l *listDB) PopulateList(ctx context.Context, list *gtsmodel.List) error {
"", "", "", 0, "", "", "", 0,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating list entries: %w", err)) errs.Appendf("error populating list entries: %w", err)
} }
} }
return errs.Combine() if err := errs.Combine(); err != nil {
return gtserror.Newf("%w", err)
}
return nil
} }
func (l *listDB) PutList(ctx context.Context, list *gtsmodel.List) error { func (l *listDB) PutList(ctx context.Context, list *gtsmodel.List) error {

View file

@ -160,7 +160,7 @@ func (r *relationshipDB) getFollow(ctx context.Context, lookup string, dbQuery f
func (r *relationshipDB) PopulateFollow(ctx context.Context, follow *gtsmodel.Follow) error { func (r *relationshipDB) PopulateFollow(ctx context.Context, follow *gtsmodel.Follow) error {
var ( var (
err error err error
errs = make(gtserror.MultiError, 0, 2) errs = gtserror.NewMultiError(2)
) )
if follow.Account == nil { if follow.Account == nil {
@ -170,7 +170,7 @@ func (r *relationshipDB) PopulateFollow(ctx context.Context, follow *gtsmodel.Fo
follow.AccountID, follow.AccountID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating follow account: %w", err)) errs.Appendf("error populating follow account: %w", err)
} }
} }
@ -181,11 +181,15 @@ func (r *relationshipDB) PopulateFollow(ctx context.Context, follow *gtsmodel.Fo
follow.TargetAccountID, follow.TargetAccountID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating follow target account: %w", err)) errs.Appendf("error populating follow target account: %w", err)
} }
} }
return errs.Combine() if err := errs.Combine(); err != nil {
return gtserror.Newf("%w", err)
}
return nil
} }
func (r *relationshipDB) PutFollow(ctx context.Context, follow *gtsmodel.Follow) error { func (r *relationshipDB) PutFollow(ctx context.Context, follow *gtsmodel.Follow) error {

View file

@ -20,9 +20,7 @@ package bundb
import ( import (
"container/list" "container/list"
"context" "context"
"database/sql"
"errors" "errors"
"fmt"
"time" "time"
"github.com/superseriousbusiness/gotosocial/internal/db" "github.com/superseriousbusiness/gotosocial/internal/db"
@ -43,7 +41,6 @@ func (s *statusDB) newStatusQ(status interface{}) *bun.SelectQuery {
return s.db. return s.db.
NewSelect(). NewSelect().
Model(status). Model(status).
Relation("Tags").
Relation("CreatedWithApplication") Relation("CreatedWithApplication")
} }
@ -98,6 +95,26 @@ func (s *statusDB) GetStatusByURL(ctx context.Context, url string) (*gtsmodel.St
) )
} }
func (s *statusDB) GetStatusBoost(ctx context.Context, boostOfID string, byAccountID string) (*gtsmodel.Status, error) {
return s.getStatus(
ctx,
"BoostOfID.AccountID",
func(status *gtsmodel.Status) error {
return s.newStatusQ(status).
Where("status.boost_of_id = ?", boostOfID).
Where("status.account_id = ?", byAccountID).
// Our old code actually allowed a status to
// be boosted multiple times by the same author,
// so limit our query + order to fetch latest.
Order("status.id DESC"). // our IDs are timestamped
Limit(1).
Scan(ctx)
},
boostOfID, byAccountID,
)
}
func (s *statusDB) getStatus(ctx context.Context, lookup string, dbQuery func(*gtsmodel.Status) error, keyParts ...any) (*gtsmodel.Status, error) { func (s *statusDB) getStatus(ctx context.Context, lookup string, dbQuery func(*gtsmodel.Status) error, keyParts ...any) (*gtsmodel.Status, error) {
// Fetch status from database cache with loader callback // Fetch status from database cache with loader callback
status, err := s.state.Caches.GTS.Status().Load(lookup, func() (*gtsmodel.Status, error) { status, err := s.state.Caches.GTS.Status().Load(lookup, func() (*gtsmodel.Status, error) {
@ -130,7 +147,7 @@ func (s *statusDB) getStatus(ctx context.Context, lookup string, dbQuery func(*g
func (s *statusDB) PopulateStatus(ctx context.Context, status *gtsmodel.Status) error { func (s *statusDB) PopulateStatus(ctx context.Context, status *gtsmodel.Status) error {
var ( var (
err error err error
errs = make(gtserror.MultiError, 0, 9) errs = gtserror.NewMultiError(9)
) )
if status.Account == nil { if status.Account == nil {
@ -140,7 +157,7 @@ func (s *statusDB) PopulateStatus(ctx context.Context, status *gtsmodel.Status)
status.AccountID, status.AccountID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status author: %w", err)) errs.Appendf("error populating status author: %w", err)
} }
} }
@ -151,7 +168,7 @@ func (s *statusDB) PopulateStatus(ctx context.Context, status *gtsmodel.Status)
status.InReplyToID, status.InReplyToID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status parent: %w", err)) errs.Appendf("error populating status parent: %w", err)
} }
} }
@ -163,7 +180,7 @@ func (s *statusDB) PopulateStatus(ctx context.Context, status *gtsmodel.Status)
status.InReplyToID, status.InReplyToID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status parent: %w", err)) errs.Appendf("error populating status parent: %w", err)
} }
} }
@ -174,7 +191,7 @@ func (s *statusDB) PopulateStatus(ctx context.Context, status *gtsmodel.Status)
status.InReplyToAccountID, status.InReplyToAccountID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status parent author: %w", err)) errs.Appendf("error populating status parent author: %w", err)
} }
} }
} }
@ -187,7 +204,7 @@ func (s *statusDB) PopulateStatus(ctx context.Context, status *gtsmodel.Status)
status.BoostOfID, status.BoostOfID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status boost: %w", err)) errs.Appendf("error populating status boost: %w", err)
} }
} }
@ -198,7 +215,7 @@ func (s *statusDB) PopulateStatus(ctx context.Context, status *gtsmodel.Status)
status.BoostOfAccountID, status.BoostOfAccountID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status boost author: %w", err)) errs.Appendf("error populating status boost author: %w", err)
} }
} }
} }
@ -210,7 +227,7 @@ func (s *statusDB) PopulateStatus(ctx context.Context, status *gtsmodel.Status)
status.AttachmentIDs, status.AttachmentIDs,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status attachments: %w", err)) errs.Appendf("error populating status attachments: %w", err)
} }
} }
@ -221,7 +238,7 @@ func (s *statusDB) PopulateStatus(ctx context.Context, status *gtsmodel.Status)
status.TagIDs, status.TagIDs,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status tags: %w", err)) errs.Appendf("error populating status tags: %w", err)
} }
} }
@ -232,7 +249,7 @@ func (s *statusDB) PopulateStatus(ctx context.Context, status *gtsmodel.Status)
status.MentionIDs, status.MentionIDs,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status mentions: %w", err)) errs.Appendf("error populating status mentions: %w", err)
} }
} }
@ -243,7 +260,7 @@ func (s *statusDB) PopulateStatus(ctx context.Context, status *gtsmodel.Status)
status.EmojiIDs, status.EmojiIDs,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status emojis: %w", err)) errs.Appendf("error populating status emojis: %w", err)
} }
} }
@ -440,24 +457,13 @@ func (s *statusDB) DeleteStatusByID(ctx context.Context, id string) error {
func (s *statusDB) GetStatusesUsingEmoji(ctx context.Context, emojiID string) ([]*gtsmodel.Status, error) { func (s *statusDB) GetStatusesUsingEmoji(ctx context.Context, emojiID string) ([]*gtsmodel.Status, error) {
var statusIDs []string var statusIDs []string
// Create SELECT status query. // SELECT all statuses using this emoji,
q := s.db.NewSelect(). // using a relational table for improved perf.
Table("statuses"). if _, err := s.db.NewSelect().
Column("id") Table("status_to_emojis").
Column("status_id").
// Append a WHERE LIKE clause to the query Where("? = ?", bun.Ident("emoji_id"), emojiID).
// that checks the `emoji` column for any Exec(ctx, &statusIDs); err != nil {
// text containing this specific emoji ID.
//
// The reason we do this instead of doing a
// `WHERE ? IN (emojis)` is that the latter
// ends up being much MUCH slower, and the
// database stores this ID-array-column as
// text anyways, allowing a simple LIKE query.
q = whereLike(q, "emojis", emojiID)
// Execute the query, scanning destination into statusIDs.
if _, err := q.Exec(ctx, &statusIDs); err != nil {
return nil, s.db.ProcessError(err) return nil, s.db.ProcessError(err)
} }
@ -515,25 +521,17 @@ func (s *statusDB) GetStatusChildren(ctx context.Context, status *gtsmodel.Statu
} }
func (s *statusDB) statusChildren(ctx context.Context, status *gtsmodel.Status, foundStatuses *list.List, onlyDirect bool, minID string) { func (s *statusDB) statusChildren(ctx context.Context, status *gtsmodel.Status, foundStatuses *list.List, onlyDirect bool, minID string) {
var childIDs []string childIDs, err := s.getStatusReplyIDs(ctx, status.ID)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
q := s.db. log.Errorf(ctx, "error getting status %s children: %v", status.ID, err)
NewSelect().
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
Column("status.id").
Where("? = ?", bun.Ident("status.in_reply_to_id"), status.ID)
if minID != "" {
q = q.Where("? > ?", bun.Ident("status.id"), minID)
}
if err := q.Scan(ctx, &childIDs); err != nil {
if err != sql.ErrNoRows {
log.Errorf(ctx, "error getting children for %q: %v", status.ID, err)
}
return return
} }
for _, id := range childIDs { for _, id := range childIDs {
if id <= minID {
continue
}
// Fetch child with ID from database // Fetch child with ID from database
child, err := s.GetStatusByID(ctx, id) child, err := s.GetStatusByID(ctx, id)
if err != nil { if err != nil {
@ -562,48 +560,80 @@ func (s *statusDB) statusChildren(ctx context.Context, status *gtsmodel.Status,
} }
} }
func (s *statusDB) CountStatusReplies(ctx context.Context, status *gtsmodel.Status) (int, error) { func (s *statusDB) GetStatusReplies(ctx context.Context, statusID string) ([]*gtsmodel.Status, error) {
return s.db. statusIDs, err := s.getStatusReplyIDs(ctx, statusID)
NewSelect(). if err != nil {
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")). return nil, err
Where("? = ?", bun.Ident("status.in_reply_to_id"), status.ID). }
Count(ctx) return s.GetStatusesByIDs(ctx, statusIDs)
} }
func (s *statusDB) CountStatusReblogs(ctx context.Context, status *gtsmodel.Status) (int, error) { func (s *statusDB) CountStatusReplies(ctx context.Context, statusID string) (int, error) {
return s.db. statusIDs, err := s.getStatusReplyIDs(ctx, statusID)
NewSelect(). return len(statusIDs), err
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
Where("? = ?", bun.Ident("status.boost_of_id"), status.ID).
Count(ctx)
} }
func (s *statusDB) CountStatusFaves(ctx context.Context, status *gtsmodel.Status) (int, error) { func (s *statusDB) getStatusReplyIDs(ctx context.Context, statusID string) ([]string, error) {
return s.db. return s.state.Caches.GTS.InReplyToIDs().Load(statusID, func() ([]string, error) {
NewSelect(). var statusIDs []string
TableExpr("? AS ?", bun.Ident("status_faves"), bun.Ident("status_fave")).
Where("? = ?", bun.Ident("status_fave.status_id"), status.ID). // Status reply IDs not in cache, perform DB query!
Count(ctx) if err := s.db.
NewSelect().
Table("statuses").
Column("id").
Where("? = ?", bun.Ident("in_reply_to_id"), statusID).
Order("id DESC").
Scan(ctx, &statusIDs); err != nil {
return nil, s.db.ProcessError(err)
}
return statusIDs, nil
})
} }
func (s *statusDB) IsStatusFavedBy(ctx context.Context, status *gtsmodel.Status, accountID string) (bool, error) { func (s *statusDB) GetStatusBoosts(ctx context.Context, statusID string) ([]*gtsmodel.Status, error) {
q := s.db. statusIDs, err := s.getStatusBoostIDs(ctx, statusID)
NewSelect(). if err != nil {
TableExpr("? AS ?", bun.Ident("status_faves"), bun.Ident("status_fave")). return nil, err
Where("? = ?", bun.Ident("status_fave.status_id"), status.ID). }
Where("? = ?", bun.Ident("status_fave.account_id"), accountID) return s.GetStatusesByIDs(ctx, statusIDs)
return s.db.Exists(ctx, q)
} }
func (s *statusDB) IsStatusRebloggedBy(ctx context.Context, status *gtsmodel.Status, accountID string) (bool, error) { func (s *statusDB) IsStatusBoostedBy(ctx context.Context, statusID string, accountID string) (bool, error) {
q := s.db. boost, err := s.GetStatusBoost(
NewSelect(). gtscontext.SetBarebones(ctx),
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")). statusID,
Where("? = ?", bun.Ident("status.boost_of_id"), status.ID). accountID,
Where("? = ?", bun.Ident("status.account_id"), accountID) )
if err != nil && !errors.Is(err, db.ErrNoEntries) {
return false, err
}
return (boost != nil), nil
}
return s.db.Exists(ctx, q) func (s *statusDB) CountStatusBoosts(ctx context.Context, statusID string) (int, error) {
statusIDs, err := s.getStatusBoostIDs(ctx, statusID)
return len(statusIDs), err
}
func (s *statusDB) getStatusBoostIDs(ctx context.Context, statusID string) ([]string, error) {
return s.state.Caches.GTS.BoostOfIDs().Load(statusID, func() ([]string, error) {
var statusIDs []string
// Status boost IDs not in cache, perform DB query!
if err := s.db.
NewSelect().
Table("statuses").
Column("id").
Where("? = ?", bun.Ident("boost_of_id"), statusID).
Order("id DESC").
Scan(ctx, &statusIDs); err != nil {
return nil, s.db.ProcessError(err)
}
return statusIDs, nil
})
} }
func (s *statusDB) IsStatusMutedBy(ctx context.Context, status *gtsmodel.Status, accountID string) (bool, error) { func (s *statusDB) IsStatusMutedBy(ctx context.Context, status *gtsmodel.Status, accountID string) (bool, error) {
@ -625,16 +655,3 @@ func (s *statusDB) IsStatusBookmarkedBy(ctx context.Context, status *gtsmodel.St
return s.db.Exists(ctx, q) return s.db.Exists(ctx, q)
} }
func (s *statusDB) GetStatusReblogs(ctx context.Context, status *gtsmodel.Status) ([]*gtsmodel.Status, error) {
reblogs := []*gtsmodel.Status{}
q := s.
newStatusQ(&reblogs).
Where("? = ?", bun.Ident("status.boost_of_id"), status.ID)
if err := q.Scan(ctx); err != nil {
return nil, s.db.ProcessError(err)
}
return reblogs, nil
}

View file

@ -19,6 +19,7 @@ package bundb
import ( import (
"context" "context"
"database/sql"
"errors" "errors"
"fmt" "fmt"
@ -44,8 +45,14 @@ func (s *statusFaveDB) GetStatusFave(ctx context.Context, accountID string, stat
return s.db. return s.db.
NewSelect(). NewSelect().
Model(fave). Model(fave).
Where("? = ?", bun.Ident("account_id"), accountID). Where("status_fave.account_id = ?", accountID).
Where("? = ?", bun.Ident("status_id"), statusID). Where("status_fave.status_id = ?", statusID).
// Our old code actually allowed a status to
// be faved multiple times by the same author,
// so limit our query + order to fetch latest.
Order("status_fave.id DESC"). // our IDs are timestamped
Limit(1).
Scan(ctx) Scan(ctx)
}, },
accountID, accountID,
@ -89,67 +96,72 @@ func (s *statusFaveDB) getStatusFave(ctx context.Context, lookup string, dbQuery
return fave, nil return fave, nil
} }
// Fetch the status fave author account. // Populate the status favourite model.
fave.Account, err = s.state.DB.GetAccountByID( if err := s.PopulateStatusFave(ctx, fave); err != nil {
gtscontext.SetBarebones(ctx), return nil, fmt.Errorf("error(s) populating status fave: %w", err)
fave.AccountID,
)
if err != nil {
return nil, fmt.Errorf("error getting status fave account %q: %w", fave.AccountID, err)
}
// Fetch the status fave target account.
fave.TargetAccount, err = s.state.DB.GetAccountByID(
gtscontext.SetBarebones(ctx),
fave.TargetAccountID,
)
if err != nil {
return nil, fmt.Errorf("error getting status fave target account %q: %w", fave.TargetAccountID, err)
}
// Fetch the status fave target status.
fave.Status, err = s.state.DB.GetStatusByID(
gtscontext.SetBarebones(ctx),
fave.StatusID,
)
if err != nil {
return nil, fmt.Errorf("error getting status fave status %q: %w", fave.StatusID, err)
} }
return fave, nil return fave, nil
} }
func (s *statusFaveDB) GetStatusFavesForStatus(ctx context.Context, statusID string) ([]*gtsmodel.StatusFave, error) { func (s *statusFaveDB) GetStatusFaves(ctx context.Context, statusID string) ([]*gtsmodel.StatusFave, error) {
ids := []string{} // Fetch the status fave IDs for status.
faveIDs, err := s.getStatusFaveIDs(ctx, statusID)
if err := s.db. if err != nil {
NewSelect(). return nil, err
Table("status_faves").
Column("id").
Where("? = ?", bun.Ident("status_id"), statusID).
Scan(ctx, &ids); err != nil {
return nil, s.db.ProcessError(err)
} }
faves := make([]*gtsmodel.StatusFave, 0, len(ids)) // Preallocate a slice of expected status fave capacity.
faves := make([]*gtsmodel.StatusFave, 0, len(faveIDs))
for _, id := range ids { for _, id := range faveIDs {
// Fetch status fave model for each ID.
fave, err := s.GetStatusFaveByID(ctx, id) fave, err := s.GetStatusFaveByID(ctx, id)
if err != nil { if err != nil {
log.Errorf(ctx, "error getting status fave %q: %v", id, err) log.Errorf(ctx, "error getting status fave %q: %v", id, err)
continue continue
} }
faves = append(faves, fave) faves = append(faves, fave)
} }
return faves, nil return faves, nil
} }
func (s *statusFaveDB) IsStatusFavedBy(ctx context.Context, statusID string, accountID string) (bool, error) {
fave, err := s.GetStatusFave(ctx, accountID, statusID)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
return false, err
}
return (fave != nil), nil
}
func (s *statusFaveDB) CountStatusFaves(ctx context.Context, statusID string) (int, error) {
faveIDs, err := s.getStatusFaveIDs(ctx, statusID)
return len(faveIDs), err
}
func (s *statusFaveDB) getStatusFaveIDs(ctx context.Context, statusID string) ([]string, error) {
return s.state.Caches.GTS.StatusFaveIDs().Load(statusID, func() ([]string, error) {
var faveIDs []string
// Status fave IDs not in cache, perform DB query!
if err := s.db.
NewSelect().
Table("status_faves").
Column("id").
Where("? = ?", bun.Ident("status_id"), statusID).
Scan(ctx, &faveIDs); err != nil {
return nil, s.db.ProcessError(err)
}
return faveIDs, nil
})
}
func (s *statusFaveDB) PopulateStatusFave(ctx context.Context, statusFave *gtsmodel.StatusFave) error { func (s *statusFaveDB) PopulateStatusFave(ctx context.Context, statusFave *gtsmodel.StatusFave) error {
var ( var (
err error err error
errs = make(gtserror.MultiError, 0, 3) errs = gtserror.NewMultiError(3)
) )
if statusFave.Account == nil { if statusFave.Account == nil {
@ -159,7 +171,7 @@ func (s *statusFaveDB) PopulateStatusFave(ctx context.Context, statusFave *gtsmo
statusFave.AccountID, statusFave.AccountID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status fave author: %w", err)) errs.Appendf("error populating status fave author: %w", err)
} }
} }
@ -170,7 +182,7 @@ func (s *statusFaveDB) PopulateStatusFave(ctx context.Context, statusFave *gtsmo
statusFave.TargetAccountID, statusFave.TargetAccountID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status fave target account: %w", err)) errs.Appendf("error populating status fave target account: %w", err)
} }
} }
@ -181,11 +193,15 @@ func (s *statusFaveDB) PopulateStatusFave(ctx context.Context, statusFave *gtsmo
statusFave.StatusID, statusFave.StatusID,
) )
if err != nil { if err != nil {
errs.Append(fmt.Errorf("error populating status fave status: %w", err)) errs.Appendf("error populating status fave status: %w", err)
} }
} }
return errs.Combine() if err := errs.Combine(); err != nil {
return gtserror.Newf("%w", err)
}
return nil
} }
func (s *statusFaveDB) PutStatusFave(ctx context.Context, fave *gtsmodel.StatusFave) error { func (s *statusFaveDB) PutStatusFave(ctx context.Context, fave *gtsmodel.StatusFave) error {
@ -199,26 +215,32 @@ func (s *statusFaveDB) PutStatusFave(ctx context.Context, fave *gtsmodel.StatusF
} }
func (s *statusFaveDB) DeleteStatusFaveByID(ctx context.Context, id string) error { func (s *statusFaveDB) DeleteStatusFaveByID(ctx context.Context, id string) error {
defer s.state.Caches.GTS.StatusFave().Invalidate("ID", id) var statusID string
// Load fave into cache before attempting a delete, // Perform DELETE on status fave,
// as we need it cached in order to trigger the invalidate // returning the status ID it was for.
// callback. This in turn invalidates others. if _, err := s.db.NewDelete().
_, err := s.GetStatusFaveByID(gtscontext.SetBarebones(ctx), id) Table("status_faves").
if err != nil { Where("id = ?", id).
if errors.Is(err, db.ErrNoEntries) { Returning("status_id").
// not an issue. Exec(ctx, &statusID); err != nil {
if err == sql.ErrNoRows {
// Not an issue, only due
// to us doing a RETURNING.
err = nil err = nil
} }
return err return s.db.ProcessError(err)
} }
// Finally delete fave from DB. if statusID != "" {
_, err = s.db.NewDelete(). // Invalidate any cached status faves for this status.
Table("status_faves"). s.state.Caches.GTS.StatusFave().Invalidate("ID", id)
Where("? = ?", bun.Ident("id"), id).
Exec(ctx) // Invalidate any cached status fave IDs for this status.
return s.db.ProcessError(err) s.state.Caches.GTS.StatusFaveIDs().Invalidate(statusID)
}
return nil
} }
func (s *statusFaveDB) DeleteStatusFaves(ctx context.Context, targetAccountID string, originAccountID string) error { func (s *statusFaveDB) DeleteStatusFaves(ctx context.Context, targetAccountID string, originAccountID string) error {
@ -226,12 +248,13 @@ func (s *statusFaveDB) DeleteStatusFaves(ctx context.Context, targetAccountID st
return errors.New("DeleteStatusFaves: one of targetAccountID or originAccountID must be set") return errors.New("DeleteStatusFaves: one of targetAccountID or originAccountID must be set")
} }
var faveIDs []string var statusIDs []string
q := s.db. // Prepare DELETE query returning
NewSelect(). // the deleted faves for status IDs.
Column("id"). q := s.db.NewDelete().
Table("status_faves") Table("status_faves").
Returning("status_id")
if targetAccountID != "" { if targetAccountID != "" {
q = q.Where("? = ?", bun.Ident("target_account_id"), targetAccountID) q = q.Where("? = ?", bun.Ident("target_account_id"), targetAccountID)
@ -241,69 +264,46 @@ func (s *statusFaveDB) DeleteStatusFaves(ctx context.Context, targetAccountID st
q = q.Where("? = ?", bun.Ident("account_id"), originAccountID) q = q.Where("? = ?", bun.Ident("account_id"), originAccountID)
} }
if _, err := q.Exec(ctx, &faveIDs); err != nil { // Execute query, store favourited status IDs.
if _, err := q.Exec(ctx, &statusIDs); err != nil {
if err == sql.ErrNoRows {
// Not an issue, only due
// to us doing a RETURNING.
err = nil
}
return s.db.ProcessError(err) return s.db.ProcessError(err)
} }
defer func() { // Collate (deduplicating) status IDs.
// Invalidate all IDs on return. statusIDs = collate(func(i int) string {
for _, id := range faveIDs { return statusIDs[i]
s.state.Caches.GTS.StatusFave().Invalidate("ID", id) }, len(statusIDs))
}
}()
// Load all faves into cache, this *really* isn't great for _, id := range statusIDs {
// but it is the only way we can ensure we invalidate all // Invalidate any cached status faves for this status.
// related caches correctly (e.g. visibility). s.state.Caches.GTS.StatusFave().Invalidate("ID", id)
for _, id := range faveIDs {
_, err := s.GetStatusFaveByID(ctx, id) // Invalidate any cached status fave IDs for this status.
if err != nil && !errors.Is(err, db.ErrNoEntries) { s.state.Caches.GTS.StatusFaveIDs().Invalidate(id)
return err
}
} }
// Finally delete all from DB. return nil
_, err := s.db.NewDelete().
Table("status_faves").
Where("? IN (?)", bun.Ident("id"), bun.In(faveIDs)).
Exec(ctx)
return s.db.ProcessError(err)
} }
func (s *statusFaveDB) DeleteStatusFavesForStatus(ctx context.Context, statusID string) error { func (s *statusFaveDB) DeleteStatusFavesForStatus(ctx context.Context, statusID string) error {
// Capture fave IDs in a RETURNING statement. // Delete all status faves for status.
var faveIDs []string if _, err := s.db.NewDelete().
q := s.db.
NewSelect().
Column("id").
Table("status_faves"). Table("status_faves").
Where("? = ?", bun.Ident("status_id"), statusID) Where("status_id = ?", statusID).
if _, err := q.Exec(ctx, &faveIDs); err != nil { Exec(ctx); err != nil {
return s.db.ProcessError(err) return s.db.ProcessError(err)
} }
defer func() { // Invalidate any cached status faves for this status.
// Invalidate all IDs on return. s.state.Caches.GTS.StatusFave().Invalidate("ID", statusID)
for _, id := range faveIDs {
s.state.Caches.GTS.StatusFave().Invalidate("ID", id)
}
}()
// Load all faves into cache, this *really* isn't great // Invalidate any cached status fave IDs for this status.
// but it is the only way we can ensure we invalidate all s.state.Caches.GTS.StatusFaveIDs().Invalidate(statusID)
// related caches correctly (e.g. visibility).
for _, id := range faveIDs {
_, err := s.GetStatusFaveByID(ctx, id)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
return err
}
}
// Finally delete all from DB. return nil
_, err := s.db.NewDelete().
Table("status_faves").
Where("? IN (?)", bun.Ident("id"), bun.In(faveIDs)).
Exec(ctx)
return s.db.ProcessError(err)
} }

View file

@ -35,7 +35,7 @@ type StatusFaveTestSuite struct {
func (suite *StatusFaveTestSuite) TestGetStatusFaves() { func (suite *StatusFaveTestSuite) TestGetStatusFaves() {
testStatus := suite.testStatuses["admin_account_status_1"] testStatus := suite.testStatuses["admin_account_status_1"]
faves, err := suite.db.GetStatusFavesForStatus(context.Background(), testStatus.ID) faves, err := suite.db.GetStatusFaves(context.Background(), testStatus.ID)
if err != nil { if err != nil {
suite.FailNow(err.Error()) suite.FailNow(err.Error())
} }
@ -51,7 +51,7 @@ func (suite *StatusFaveTestSuite) TestGetStatusFaves() {
func (suite *StatusFaveTestSuite) TestGetStatusFavesNone() { func (suite *StatusFaveTestSuite) TestGetStatusFavesNone() {
testStatus := suite.testStatuses["admin_account_status_4"] testStatus := suite.testStatuses["admin_account_status_4"]
faves, err := suite.db.GetStatusFavesForStatus(context.Background(), testStatus.ID) faves, err := suite.db.GetStatusFaves(context.Background(), testStatus.ID)
if err != nil { if err != nil {
suite.FailNow(err.Error()) suite.FailNow(err.Error())
} }

View file

@ -41,10 +41,10 @@ type Media interface {
// DeleteAttachment deletes the attachment with given ID from the database. // DeleteAttachment deletes the attachment with given ID from the database.
DeleteAttachment(ctx context.Context, id string) error DeleteAttachment(ctx context.Context, id string) error
// GetAttachments ... // GetAttachments fetches media attachments up to a given max ID, and at most limit.
GetAttachments(ctx context.Context, maxID string, limit int) ([]*gtsmodel.MediaAttachment, error) GetAttachments(ctx context.Context, maxID string, limit int) ([]*gtsmodel.MediaAttachment, error)
// GetRemoteAttachments ... // GetRemoteAttachments fetches media attachments with a non-empty domain, up to a given max ID, and at most limit.
GetRemoteAttachments(ctx context.Context, maxID string, limit int) ([]*gtsmodel.MediaAttachment, error) GetRemoteAttachments(ctx context.Context, maxID string, limit int) ([]*gtsmodel.MediaAttachment, error)
// GetCachedAttachmentsOlderThan gets limit n remote attachments (including avatars and headers) older than // GetCachedAttachmentsOlderThan gets limit n remote attachments (including avatars and headers) older than

View file

@ -34,6 +34,9 @@ type Status interface {
// GetStatusByURL returns one status from the database, with no rel fields populated, only their linking ID / URIs // GetStatusByURL returns one status from the database, with no rel fields populated, only their linking ID / URIs
GetStatusByURL(ctx context.Context, uri string) (*gtsmodel.Status, error) GetStatusByURL(ctx context.Context, uri string) (*gtsmodel.Status, error)
// GetStatusBoost fetches the status whose boost_of_id column refers to boostOfID, authored by given account ID.
GetStatusBoost(ctx context.Context, boostOfID string, byAccountID string) (*gtsmodel.Status, error)
// PopulateStatus ensures that all sub-models of a status are populated (e.g. mentions, attachments, etc). // PopulateStatus ensures that all sub-models of a status are populated (e.g. mentions, attachments, etc).
PopulateStatus(ctx context.Context, status *gtsmodel.Status) error PopulateStatus(ctx context.Context, status *gtsmodel.Status) error
@ -46,21 +49,27 @@ type Status interface {
// DeleteStatusByID deletes one status from the database. // DeleteStatusByID deletes one status from the database.
DeleteStatusByID(ctx context.Context, id string) error DeleteStatusByID(ctx context.Context, id string) error
// CountStatusReplies returns the amount of replies recorded for a status, or an error if something goes wrong
CountStatusReplies(ctx context.Context, status *gtsmodel.Status) (int, error)
// CountStatusReblogs returns the amount of reblogs/boosts recorded for a status, or an error if something goes wrong
CountStatusReblogs(ctx context.Context, status *gtsmodel.Status) (int, error)
// CountStatusFaves returns the amount of faves/likes recorded for a status, or an error if something goes wrong
CountStatusFaves(ctx context.Context, status *gtsmodel.Status) (int, error)
// GetStatuses gets a slice of statuses corresponding to the given status IDs. // GetStatuses gets a slice of statuses corresponding to the given status IDs.
GetStatusesByIDs(ctx context.Context, ids []string) ([]*gtsmodel.Status, error) GetStatusesByIDs(ctx context.Context, ids []string) ([]*gtsmodel.Status, error)
// GetStatusesUsingEmoji fetches all status models using emoji with given ID stored in their 'emojis' column. // GetStatusesUsingEmoji fetches all status models using emoji with given ID stored in their 'emojis' column.
GetStatusesUsingEmoji(ctx context.Context, emojiID string) ([]*gtsmodel.Status, error) GetStatusesUsingEmoji(ctx context.Context, emojiID string) ([]*gtsmodel.Status, error)
// GetStatusReplies returns the *direct* (i.e. in_reply_to_id column) replies to this status ID.
GetStatusReplies(ctx context.Context, statusID string) ([]*gtsmodel.Status, error)
// CountStatusReplies returns the number of stored *direct* (i.e. in_reply_to_id column) replies to this status ID.
CountStatusReplies(ctx context.Context, statusID string) (int, error)
// GetStatusBoosts returns all statuses whose boost_of_id column refer to given status ID.
GetStatusBoosts(ctx context.Context, statusID string) ([]*gtsmodel.Status, error)
// CountStatusBoosts returns the number of stored boosts for status ID.
CountStatusBoosts(ctx context.Context, statusID string) (int, error)
// IsStatusBoostedBy checks whether the given status ID is boosted by account ID.
IsStatusBoostedBy(ctx context.Context, statusID string, accountID string) (bool, error)
// GetStatusParents gets the parent statuses of a given status. // GetStatusParents gets the parent statuses of a given status.
// //
// If onlyDirect is true, only the immediate parent will be returned. // If onlyDirect is true, only the immediate parent will be returned.
@ -71,19 +80,9 @@ type Status interface {
// If onlyDirect is true, only the immediate children will be returned. // If onlyDirect is true, only the immediate children will be returned.
GetStatusChildren(ctx context.Context, status *gtsmodel.Status, onlyDirect bool, minID string) ([]*gtsmodel.Status, error) GetStatusChildren(ctx context.Context, status *gtsmodel.Status, onlyDirect bool, minID string) ([]*gtsmodel.Status, error)
// IsStatusFavedBy checks if a given status has been faved by a given account ID
IsStatusFavedBy(ctx context.Context, status *gtsmodel.Status, accountID string) (bool, error)
// IsStatusRebloggedBy checks if a given status has been reblogged/boosted by a given account ID
IsStatusRebloggedBy(ctx context.Context, status *gtsmodel.Status, accountID string) (bool, error)
// IsStatusMutedBy checks if a given status has been muted by a given account ID // IsStatusMutedBy checks if a given status has been muted by a given account ID
IsStatusMutedBy(ctx context.Context, status *gtsmodel.Status, accountID string) (bool, error) IsStatusMutedBy(ctx context.Context, status *gtsmodel.Status, accountID string) (bool, error)
// IsStatusBookmarkedBy checks if a given status has been bookmarked by a given account ID // IsStatusBookmarkedBy checks if a given status has been bookmarked by a given account ID
IsStatusBookmarkedBy(ctx context.Context, status *gtsmodel.Status, accountID string) (bool, error) IsStatusBookmarkedBy(ctx context.Context, status *gtsmodel.Status, accountID string) (bool, error)
// GetStatusReblogs returns a slice of statuses that are a boost/reblog of the given status.
// This slice will be unfiltered, not taking account of blocks and whatnot, so filter it before serving it back to a user.
GetStatusReblogs(ctx context.Context, status *gtsmodel.Status) ([]*gtsmodel.Status, error)
} }

View file

@ -24,16 +24,15 @@ import (
) )
type StatusFave interface { type StatusFave interface {
// GetStatusFaveByAccountID gets one status fave created by the given // GetStatusFaveByAccountID gets one status fave created by the given accountID, targeting the given statusID.
// accountID, targeting the given statusID.
GetStatusFave(ctx context.Context, accountID string, statusID string) (*gtsmodel.StatusFave, error) GetStatusFave(ctx context.Context, accountID string, statusID string) (*gtsmodel.StatusFave, error)
// GetStatusFave returns one status fave with the given id. // GetStatusFave returns one status fave with the given id.
GetStatusFaveByID(ctx context.Context, id string) (*gtsmodel.StatusFave, error) GetStatusFaveByID(ctx context.Context, id string) (*gtsmodel.StatusFave, error)
// GetStatusFaves returns a slice of faves/likes of the given status. // GetStatusFaves returns a slice of faves/likes of the status with given ID.
// This slice will be unfiltered, not taking account of blocks and whatnot, so filter it before serving it back to a user. // This slice will be unfiltered, not taking account of blocks and whatnot, so filter it before serving it back to a user.
GetStatusFavesForStatus(ctx context.Context, statusID string) ([]*gtsmodel.StatusFave, error) GetStatusFaves(ctx context.Context, statusID string) ([]*gtsmodel.StatusFave, error)
// PopulateStatusFave ensures that all sub-models of a fave are populated (account, status, etc). // PopulateStatusFave ensures that all sub-models of a fave are populated (account, status, etc).
PopulateStatusFave(ctx context.Context, statusFave *gtsmodel.StatusFave) error PopulateStatusFave(ctx context.Context, statusFave *gtsmodel.StatusFave) error
@ -59,8 +58,13 @@ type StatusFave interface {
// At least one parameter must not be an empty string. // At least one parameter must not be an empty string.
DeleteStatusFaves(ctx context.Context, targetAccountID string, originAccountID string) error DeleteStatusFaves(ctx context.Context, targetAccountID string, originAccountID string) error
// DeleteStatusFavesForStatus deletes all status faves that target the // DeleteStatusFavesForStatus deletes all status faves that target the given status ID.
// given status ID. This is useful when a status has been deleted, and you need // This is useful when a status has been deleted, and you need to clean up after it.
// to clean up after it.
DeleteStatusFavesForStatus(ctx context.Context, statusID string) error DeleteStatusFavesForStatus(ctx context.Context, statusID string) error
// CountStatusFaves returns the number of status favourites registered for status with ID.
CountStatusFaves(ctx context.Context, statusID string) (int, error)
// IsStatusFavedBy returns whether the status with ID has been favourited by account with ID.
IsStatusFavedBy(ctx context.Context, statusID string, accountID string) (bool, error)
} }

View file

@ -19,26 +19,45 @@ package gtserror
import ( import (
"errors" "errors"
"fmt"
"strings"
) )
// MultiError allows encapsulating multiple errors under a singular instance, // MultiError allows encapsulating multiple
// which is useful when you only want to log on errors, not return early / bubble up. // errors under a singular instance, which
type MultiError []string // is useful when you only want to log on
// errors, not return early / bubble up.
type MultiError []error
func (e *MultiError) Append(err error) { // NewMultiError returns a *MultiError with
*e = append(*e, err.Error()) // the capacity of its underlying error slice
// set to the provided value.
//
// This capacity can be exceeded if necessary,
// but it saves a teeny tiny bit of memory if
// callers set it correctly.
//
// If you don't know in advance what the capacity
// must be, just use new(MultiError) instead.
func NewMultiError(capacity int) MultiError {
return make([]error, 0, capacity)
} }
func (e *MultiError) Appendf(format string, args ...any) { // Append the given error to the MultiError.
*e = append(*e, fmt.Sprintf(format, args...)) func (m *MultiError) Append(err error) {
(*m) = append((*m), err)
} }
// Combine converts this multiError to a singular error instance, returning nil if empty. // Append the given format string to the MultiError.
func (e MultiError) Combine() error { //
if len(e) == 0 { // It is valid to use %w in the format string
return nil // to wrap any other errors.
} func (m *MultiError) Appendf(format string, args ...any) {
return errors.New(`"` + strings.Join(e, `","`) + `"`) err := newfAt(3, format, args...)
(*m) = append((*m), err)
}
// Combine the MultiError into a single error.
//
// Unwrap will work on the returned error as expected.
func (m MultiError) Combine() error {
return errors.Join(m...)
} }

View file

@ -0,0 +1,64 @@
// GoToSocial
// Copyright (C) GoToSocial Authors admin@gotosocial.org
// SPDX-License-Identifier: AGPL-3.0-or-later
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package gtserror_test
import (
"errors"
"testing"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
)
func TestMultiError(t *testing.T) {
errs := gtserror.MultiError([]error{
db.ErrNoEntries,
errors.New("oopsie woopsie we did a fucky wucky etc"),
})
errs.Appendf("appended + wrapped error: %w", db.ErrAlreadyExists)
err := errs.Combine()
if !errors.Is(err, db.ErrNoEntries) {
t.Error("should be db.ErrNoEntries")
}
if !errors.Is(err, db.ErrAlreadyExists) {
t.Error("should be db.ErrAlreadyExists")
}
if errors.Is(err, db.ErrBusyTimeout) {
t.Error("should not be db.ErrBusyTimeout")
}
errString := err.Error()
expected := `sql: no rows in result set
oopsie woopsie we did a fucky wucky etc
TestMultiError: appended + wrapped error: already exists`
if errString != expected {
t.Errorf("errString '%s' should be '%s'", errString, expected)
}
}
func TestMultiErrorEmpty(t *testing.T) {
err := new(gtserror.MultiError).Combine()
if err != nil {
t.Errorf("should be nil")
}
}

View file

@ -108,7 +108,7 @@ type Config struct {
// - request logging // - request logging
type Client struct { type Client struct {
client http.Client client http.Client
badHosts cache.Cache[string, struct{}] badHosts cache.TTLCache[string, struct{}]
bodyMax int64 bodyMax int64
} }
@ -178,7 +178,7 @@ func New(cfg Config) *Client {
} }
// Initiate outgoing bad hosts lookup cache. // Initiate outgoing bad hosts lookup cache.
c.badHosts = cache.New[string, struct{}](0, 1000, 0) c.badHosts = cache.NewTTL[string, struct{}](0, 1000, 0)
c.badHosts.SetTTL(time.Hour, false) c.badHosts.SetTTL(time.Hour, false)
if !c.badHosts.Start(time.Minute) { if !c.badHosts.Start(time.Minute) {
log.Panic(nil, "failed to start transport controller cache") log.Panic(nil, "failed to start transport controller cache")

View file

@ -330,7 +330,7 @@ statusLoop:
}) })
// Look for any boosts of this status in DB. // Look for any boosts of this status in DB.
boosts, err := p.state.DB.GetStatusReblogs(ctx, status) boosts, err := p.state.DB.GetStatusBoosts(ctx, status.ID)
if err != nil && !errors.Is(err, db.ErrNoEntries) { if err != nil && !errors.Is(err, db.ErrNoEntries) {
return gtserror.Newf("error fetching status reblogs for %s: %w", status.ID, err) return gtserror.Newf("error fetching status reblogs for %s: %w", status.ID, err)
} }

View file

@ -20,7 +20,6 @@ package processing
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"github.com/superseriousbusiness/gotosocial/internal/config" "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/db" "github.com/superseriousbusiness/gotosocial/internal/db"
@ -42,13 +41,13 @@ import (
func (p *Processor) timelineAndNotifyStatus(ctx context.Context, status *gtsmodel.Status) error { func (p *Processor) timelineAndNotifyStatus(ctx context.Context, status *gtsmodel.Status) error {
// Ensure status fully populated; including account, mentions, etc. // Ensure status fully populated; including account, mentions, etc.
if err := p.state.DB.PopulateStatus(ctx, status); err != nil { if err := p.state.DB.PopulateStatus(ctx, status); err != nil {
return fmt.Errorf("timelineAndNotifyStatus: error populating status with id %s: %w", status.ID, err) return gtserror.Newf("error populating status with id %s: %w", status.ID, err)
} }
// Get local followers of the account that posted the status. // Get local followers of the account that posted the status.
follows, err := p.state.DB.GetAccountLocalFollowers(ctx, status.AccountID) follows, err := p.state.DB.GetAccountLocalFollowers(ctx, status.AccountID)
if err != nil { if err != nil {
return fmt.Errorf("timelineAndNotifyStatus: error getting local followers for account id %s: %w", status.AccountID, err) return gtserror.Newf("error getting local followers for account id %s: %w", status.AccountID, err)
} }
// If the poster is also local, add a fake entry for them // If the poster is also local, add a fake entry for them
@ -66,12 +65,12 @@ func (p *Processor) timelineAndNotifyStatus(ctx context.Context, status *gtsmode
// This will also handle notifying any followers with notify // This will also handle notifying any followers with notify
// set to true on their follow. // set to true on their follow.
if err := p.timelineAndNotifyStatusForFollowers(ctx, status, follows); err != nil { if err := p.timelineAndNotifyStatusForFollowers(ctx, status, follows); err != nil {
return fmt.Errorf("timelineAndNotifyStatus: error timelining status %s for followers: %w", status.ID, err) return gtserror.Newf("error timelining status %s for followers: %w", status.ID, err)
} }
// Notify each local account that's mentioned by this status. // Notify each local account that's mentioned by this status.
if err := p.notifyStatusMentions(ctx, status); err != nil { if err := p.notifyStatusMentions(ctx, status); err != nil {
return fmt.Errorf("timelineAndNotifyStatus: error notifying status mentions for status %s: %w", status.ID, err) return gtserror.Newf("error notifying status mentions for status %s: %w", status.ID, err)
} }
return nil return nil
@ -79,7 +78,7 @@ func (p *Processor) timelineAndNotifyStatus(ctx context.Context, status *gtsmode
func (p *Processor) timelineAndNotifyStatusForFollowers(ctx context.Context, status *gtsmodel.Status, follows []*gtsmodel.Follow) error { func (p *Processor) timelineAndNotifyStatusForFollowers(ctx context.Context, status *gtsmodel.Status, follows []*gtsmodel.Follow) error {
var ( var (
errs = make(gtserror.MultiError, 0, len(follows)) errs = gtserror.NewMultiError(len(follows))
boost = status.BoostOfID != "" boost = status.BoostOfID != ""
reply = status.InReplyToURI != "" reply = status.InReplyToURI != ""
) )
@ -100,7 +99,7 @@ func (p *Processor) timelineAndNotifyStatusForFollowers(ctx context.Context, sta
follow.ID, follow.ID,
) )
if err != nil && !errors.Is(err, db.ErrNoEntries) { if err != nil && !errors.Is(err, db.ErrNoEntries) {
errs.Append(fmt.Errorf("timelineAndNotifyStatusForFollowers: error list timelining status: %w", err)) errs.Appendf("error list timelining status: %w", err)
continue continue
} }
@ -113,7 +112,7 @@ func (p *Processor) timelineAndNotifyStatusForFollowers(ctx context.Context, sta
status, status,
stream.TimelineList+":"+listEntry.ListID, // key streamType to this specific list stream.TimelineList+":"+listEntry.ListID, // key streamType to this specific list
); err != nil { ); err != nil {
errs.Append(fmt.Errorf("timelineAndNotifyStatusForFollowers: error list timelining status: %w", err)) errs.Appendf("error list timelining status: %w", err)
continue continue
} }
} }
@ -128,7 +127,7 @@ func (p *Processor) timelineAndNotifyStatusForFollowers(ctx context.Context, sta
status, status,
stream.TimelineHome, stream.TimelineHome,
); err != nil { ); err != nil {
errs.Append(fmt.Errorf("timelineAndNotifyStatusForFollowers: error home timelining status: %w", err)) errs.Appendf("error home timelining status: %w", err)
continue continue
} else if !timelined { } else if !timelined {
// Status wasn't added to home tomeline, // Status wasn't added to home tomeline,
@ -162,11 +161,15 @@ func (p *Processor) timelineAndNotifyStatusForFollowers(ctx context.Context, sta
status.AccountID, status.AccountID,
status.ID, status.ID,
); err != nil { ); err != nil {
errs.Append(fmt.Errorf("timelineAndNotifyStatusForFollowers: error notifying account %s about new status: %w", follow.AccountID, err)) errs.Appendf("error notifying account %s about new status: %w", follow.AccountID, err)
} }
} }
return errs.Combine() if err := errs.Combine(); err != nil {
return gtserror.Newf("%w", err)
}
return nil
} }
// timelineStatus uses the provided ingest function to put the given // timelineStatus uses the provided ingest function to put the given
@ -185,7 +188,7 @@ func (p *Processor) timelineStatus(
// Make sure the status is timelineable. // Make sure the status is timelineable.
// This works for both home and list timelines. // This works for both home and list timelines.
if timelineable, err := p.filter.StatusHomeTimelineable(ctx, account, status); err != nil { if timelineable, err := p.filter.StatusHomeTimelineable(ctx, account, status); err != nil {
err = fmt.Errorf("timelineStatusForAccount: error getting timelineability for status for timeline with id %s: %w", account.ID, err) err = gtserror.Newf("error getting timelineability for status for timeline with id %s: %w", account.ID, err)
return false, err return false, err
} else if !timelineable { } else if !timelineable {
// Nothing to do. // Nothing to do.
@ -194,7 +197,7 @@ func (p *Processor) timelineStatus(
// Ingest status into given timeline using provided function. // Ingest status into given timeline using provided function.
if inserted, err := ingest(ctx, timelineID, status); err != nil { if inserted, err := ingest(ctx, timelineID, status); err != nil {
err = fmt.Errorf("timelineStatusForAccount: error ingesting status %s: %w", status.ID, err) err = gtserror.Newf("error ingesting status %s: %w", status.ID, err)
return false, err return false, err
} else if !inserted { } else if !inserted {
// Nothing more to do. // Nothing more to do.
@ -204,12 +207,12 @@ func (p *Processor) timelineStatus(
// The status was inserted so stream it to the user. // The status was inserted so stream it to the user.
apiStatus, err := p.tc.StatusToAPIStatus(ctx, status, account) apiStatus, err := p.tc.StatusToAPIStatus(ctx, status, account)
if err != nil { if err != nil {
err = fmt.Errorf("timelineStatusForAccount: error converting status %s to frontend representation: %w", status.ID, err) err = gtserror.Newf("error converting status %s to frontend representation: %w", status.ID, err)
return true, err return true, err
} }
if err := p.stream.Update(apiStatus, account, []string{streamType}); err != nil { if err := p.stream.Update(apiStatus, account, []string{streamType}); err != nil {
err = fmt.Errorf("timelineStatusForAccount: error streaming update for status %s: %w", status.ID, err) err = gtserror.Newf("error streaming update for status %s: %w", status.ID, err)
return true, err return true, err
} }
@ -217,7 +220,7 @@ func (p *Processor) timelineStatus(
} }
func (p *Processor) notifyStatusMentions(ctx context.Context, status *gtsmodel.Status) error { func (p *Processor) notifyStatusMentions(ctx context.Context, status *gtsmodel.Status) error {
errs := make(gtserror.MultiError, 0, len(status.Mentions)) errs := gtserror.NewMultiError(len(status.Mentions))
for _, m := range status.Mentions { for _, m := range status.Mentions {
if err := p.notify( if err := p.notify(
@ -231,7 +234,11 @@ func (p *Processor) notifyStatusMentions(ctx context.Context, status *gtsmodel.S
} }
} }
return errs.Combine() if err := errs.Combine(); err != nil {
return gtserror.Newf("%w", err)
}
return nil
} }
func (p *Processor) notifyFollowRequest(ctx context.Context, followRequest *gtsmodel.FollowRequest) error { func (p *Processor) notifyFollowRequest(ctx context.Context, followRequest *gtsmodel.FollowRequest) error {
@ -255,13 +262,13 @@ func (p *Processor) notifyFollow(ctx context.Context, follow *gtsmodel.Follow, t
) )
if err != nil && !errors.Is(err, db.ErrNoEntries) { if err != nil && !errors.Is(err, db.ErrNoEntries) {
// Proper error while checking. // Proper error while checking.
return fmt.Errorf("notifyFollow: db error checking for previous follow request notification: %w", err) return gtserror.Newf("db error checking for previous follow request notification: %w", err)
} }
if prevNotif != nil { if prevNotif != nil {
// Previous notification existed, delete. // Previous notification existed, delete.
if err := p.state.DB.DeleteNotificationByID(ctx, prevNotif.ID); err != nil { if err := p.state.DB.DeleteNotificationByID(ctx, prevNotif.ID); err != nil {
return fmt.Errorf("notifyFollow: db error removing previous follow request notification %s: %w", prevNotif.ID, err) return gtserror.Newf("db error removing previous follow request notification %s: %w", prevNotif.ID, err)
} }
} }
@ -319,7 +326,7 @@ func (p *Processor) notify(
) error { ) error {
targetAccount, err := p.state.DB.GetAccountByID(ctx, targetAccountID) targetAccount, err := p.state.DB.GetAccountByID(ctx, targetAccountID)
if err != nil { if err != nil {
return fmt.Errorf("notify: error getting target account %s: %w", targetAccountID, err) return gtserror.Newf("error getting target account %s: %w", targetAccountID, err)
} }
if !targetAccount.IsLocal() { if !targetAccount.IsLocal() {
@ -340,7 +347,7 @@ func (p *Processor) notify(
return nil return nil
} else if !errors.Is(err, db.ErrNoEntries) { } else if !errors.Is(err, db.ErrNoEntries) {
// Real error. // Real error.
return fmt.Errorf("notify: error checking existence of notification: %w", err) return gtserror.Newf("error checking existence of notification: %w", err)
} }
// Notification doesn't yet exist, so // Notification doesn't yet exist, so
@ -354,17 +361,17 @@ func (p *Processor) notify(
} }
if err := p.state.DB.PutNotification(ctx, notif); err != nil { if err := p.state.DB.PutNotification(ctx, notif); err != nil {
return fmt.Errorf("notify: error putting notification in database: %w", err) return gtserror.Newf("error putting notification in database: %w", err)
} }
// Stream notification to the user. // Stream notification to the user.
apiNotif, err := p.tc.NotificationToAPINotification(ctx, notif) apiNotif, err := p.tc.NotificationToAPINotification(ctx, notif)
if err != nil { if err != nil {
return fmt.Errorf("notify: error converting notification to api representation: %w", err) return gtserror.Newf("error converting notification to api representation: %w", err)
} }
if err := p.stream.Notify(apiNotif, targetAccount); err != nil { if err := p.stream.Notify(apiNotif, targetAccount); err != nil {
return fmt.Errorf("notify: error streaming notification to account: %w", err) return gtserror.Newf("error streaming notification to account: %w", err)
} }
return nil return nil
@ -373,6 +380,8 @@ func (p *Processor) notify(
// wipeStatus contains common logic used to totally delete a status // wipeStatus contains common logic used to totally delete a status
// + all its attachments, notifications, boosts, and timeline entries. // + all its attachments, notifications, boosts, and timeline entries.
func (p *Processor) wipeStatus(ctx context.Context, statusToDelete *gtsmodel.Status, deleteAttachments bool) error { func (p *Processor) wipeStatus(ctx context.Context, statusToDelete *gtsmodel.Status, deleteAttachments bool) error {
var errs gtserror.MultiError
// either delete all attachments for this status, or simply // either delete all attachments for this status, or simply
// unattach all attachments for this status, so they'll be // unattach all attachments for this status, so they'll be
// cleaned later by a separate process; reason to unattach rather // cleaned later by a separate process; reason to unattach rather
@ -382,14 +391,14 @@ func (p *Processor) wipeStatus(ctx context.Context, statusToDelete *gtsmodel.Sta
// todo: p.state.DB.DeleteAttachmentsForStatus // todo: p.state.DB.DeleteAttachmentsForStatus
for _, a := range statusToDelete.AttachmentIDs { for _, a := range statusToDelete.AttachmentIDs {
if err := p.media.Delete(ctx, a); err != nil { if err := p.media.Delete(ctx, a); err != nil {
return err errs.Appendf("error deleting media: %w", err)
} }
} }
} else { } else {
// todo: p.state.DB.UnattachAttachmentsForStatus // todo: p.state.DB.UnattachAttachmentsForStatus
for _, a := range statusToDelete.AttachmentIDs { for _, a := range statusToDelete.AttachmentIDs {
if _, err := p.media.Unattach(ctx, statusToDelete.Account, a); err != nil { if _, err := p.media.Unattach(ctx, statusToDelete.Account, a); err != nil {
return err errs.Appendf("error unattaching media: %w", err)
} }
} }
} }
@ -398,44 +407,55 @@ func (p *Processor) wipeStatus(ctx context.Context, statusToDelete *gtsmodel.Sta
// todo: p.state.DB.DeleteMentionsForStatus // todo: p.state.DB.DeleteMentionsForStatus
for _, id := range statusToDelete.MentionIDs { for _, id := range statusToDelete.MentionIDs {
if err := p.state.DB.DeleteMentionByID(ctx, id); err != nil { if err := p.state.DB.DeleteMentionByID(ctx, id); err != nil {
return err errs.Appendf("error deleting status mention: %w", err)
} }
} }
// delete all notification entries generated by this status // delete all notification entries generated by this status
if err := p.state.DB.DeleteNotificationsForStatus(ctx, statusToDelete.ID); err != nil { if err := p.state.DB.DeleteNotificationsForStatus(ctx, statusToDelete.ID); err != nil {
return err errs.Appendf("error deleting status notifications: %w", err)
} }
// delete all bookmarks that point to this status // delete all bookmarks that point to this status
if err := p.state.DB.DeleteStatusBookmarksForStatus(ctx, statusToDelete.ID); err != nil { if err := p.state.DB.DeleteStatusBookmarksForStatus(ctx, statusToDelete.ID); err != nil {
return err errs.Appendf("error deleting status bookmarks: %w", err)
} }
// delete all faves of this status // delete all faves of this status
if err := p.state.DB.DeleteStatusFavesForStatus(ctx, statusToDelete.ID); err != nil { if err := p.state.DB.DeleteStatusFavesForStatus(ctx, statusToDelete.ID); err != nil {
return err errs.Appendf("error deleting status faves: %w", err)
} }
// delete all boosts for this status + remove them from timelines // delete all boosts for this status + remove them from timelines
if boosts, err := p.state.DB.GetStatusReblogs(ctx, statusToDelete); err == nil { boosts, err := p.state.DB.GetStatusBoosts(
for _, b := range boosts { // we MUST set a barebones context here,
if err := p.deleteStatusFromTimelines(ctx, b.ID); err != nil { // as depending on where it came from the
return err // original BoostOf may already be gone.
} gtscontext.SetBarebones(ctx),
if err := p.state.DB.DeleteStatusByID(ctx, b.ID); err != nil { statusToDelete.ID)
return err if err != nil {
} errs.Appendf("error fetching status boosts: %w", err)
}
for _, b := range boosts {
if err := p.deleteStatusFromTimelines(ctx, b.ID); err != nil {
errs.Appendf("error deleting boost from timelines: %w", err)
}
if err := p.state.DB.DeleteStatusByID(ctx, b.ID); err != nil {
errs.Appendf("error deleting boost: %w", err)
} }
} }
// delete this status from any and all timelines // delete this status from any and all timelines
if err := p.deleteStatusFromTimelines(ctx, statusToDelete.ID); err != nil { if err := p.deleteStatusFromTimelines(ctx, statusToDelete.ID); err != nil {
return err errs.Appendf("error deleting status from timelines: %w", err)
} }
// delete the status itself // finally, delete the status itself
return p.state.DB.DeleteStatusByID(ctx, statusToDelete.ID) if err := p.state.DB.DeleteStatusByID(ctx, statusToDelete.ID); err != nil {
errs.Appendf("error deleting status: %w", err)
}
return errs.Combine()
} }
// deleteStatusFromTimelines completely removes the given status from all timelines. // deleteStatusFromTimelines completely removes the given status from all timelines.
@ -479,7 +499,7 @@ func (p *Processor) invalidateStatusFromTimelines(ctx context.Context, statusID
func (p *Processor) emailReport(ctx context.Context, report *gtsmodel.Report) error { func (p *Processor) emailReport(ctx context.Context, report *gtsmodel.Report) error {
instance, err := p.state.DB.GetInstance(ctx, config.GetHost()) instance, err := p.state.DB.GetInstance(ctx, config.GetHost())
if err != nil { if err != nil {
return fmt.Errorf("emailReport: error getting instance: %w", err) return gtserror.Newf("error getting instance: %w", err)
} }
toAddresses, err := p.state.DB.GetInstanceModeratorAddresses(ctx) toAddresses, err := p.state.DB.GetInstanceModeratorAddresses(ctx)
@ -488,20 +508,20 @@ func (p *Processor) emailReport(ctx context.Context, report *gtsmodel.Report) er
// No registered moderator addresses. // No registered moderator addresses.
return nil return nil
} }
return fmt.Errorf("emailReport: error getting instance moderator addresses: %w", err) return gtserror.Newf("error getting instance moderator addresses: %w", err)
} }
if report.Account == nil { if report.Account == nil {
report.Account, err = p.state.DB.GetAccountByID(ctx, report.AccountID) report.Account, err = p.state.DB.GetAccountByID(ctx, report.AccountID)
if err != nil { if err != nil {
return fmt.Errorf("emailReport: error getting report account: %w", err) return gtserror.Newf("error getting report account: %w", err)
} }
} }
if report.TargetAccount == nil { if report.TargetAccount == nil {
report.TargetAccount, err = p.state.DB.GetAccountByID(ctx, report.TargetAccountID) report.TargetAccount, err = p.state.DB.GetAccountByID(ctx, report.TargetAccountID)
if err != nil { if err != nil {
return fmt.Errorf("emailReport: error getting report target account: %w", err) return gtserror.Newf("error getting report target account: %w", err)
} }
} }
@ -514,7 +534,7 @@ func (p *Processor) emailReport(ctx context.Context, report *gtsmodel.Report) er
} }
if err := p.emailSender.SendNewReportEmail(toAddresses, reportData); err != nil { if err := p.emailSender.SendNewReportEmail(toAddresses, reportData); err != nil {
return fmt.Errorf("emailReport: error emailing instance moderators: %w", err) return gtserror.Newf("error emailing instance moderators: %w", err)
} }
return nil return nil
@ -523,7 +543,7 @@ func (p *Processor) emailReport(ctx context.Context, report *gtsmodel.Report) er
func (p *Processor) emailReportClosed(ctx context.Context, report *gtsmodel.Report) error { func (p *Processor) emailReportClosed(ctx context.Context, report *gtsmodel.Report) error {
user, err := p.state.DB.GetUserByAccountID(ctx, report.Account.ID) user, err := p.state.DB.GetUserByAccountID(ctx, report.Account.ID)
if err != nil { if err != nil {
return fmt.Errorf("emailReportClosed: db error getting user: %w", err) return gtserror.Newf("db error getting user: %w", err)
} }
if user.ConfirmedAt.IsZero() || !*user.Approved || *user.Disabled || user.Email == "" { if user.ConfirmedAt.IsZero() || !*user.Approved || *user.Disabled || user.Email == "" {
@ -537,20 +557,20 @@ func (p *Processor) emailReportClosed(ctx context.Context, report *gtsmodel.Repo
instance, err := p.state.DB.GetInstance(ctx, config.GetHost()) instance, err := p.state.DB.GetInstance(ctx, config.GetHost())
if err != nil { if err != nil {
return fmt.Errorf("emailReportClosed: db error getting instance: %w", err) return gtserror.Newf("db error getting instance: %w", err)
} }
if report.Account == nil { if report.Account == nil {
report.Account, err = p.state.DB.GetAccountByID(ctx, report.AccountID) report.Account, err = p.state.DB.GetAccountByID(ctx, report.AccountID)
if err != nil { if err != nil {
return fmt.Errorf("emailReportClosed: error getting report account: %w", err) return gtserror.Newf("error getting report account: %w", err)
} }
} }
if report.TargetAccount == nil { if report.TargetAccount == nil {
report.TargetAccount, err = p.state.DB.GetAccountByID(ctx, report.TargetAccountID) report.TargetAccount, err = p.state.DB.GetAccountByID(ctx, report.TargetAccountID)
if err != nil { if err != nil {
return fmt.Errorf("emailReportClosed: error getting report target account: %w", err) return gtserror.Newf("error getting report target account: %w", err)
} }
} }

View file

@ -49,6 +49,13 @@ func (p *Processor) Accounts(
resolve bool, resolve bool,
following bool, following bool,
) ([]*apimodel.Account, gtserror.WithCode) { ) ([]*apimodel.Account, gtserror.WithCode) {
// Don't include instance accounts in this search.
//
// We don't want someone to start typing '@mastodon'
// and then get a million instance service accounts
// in their search results.
const includeInstanceAccounts = false
var ( var (
foundAccounts = make([]*gtsmodel.Account, 0, limit) foundAccounts = make([]*gtsmodel.Account, 0, limit)
appendAccount = func(foundAccount *gtsmodel.Account) { foundAccounts = append(foundAccounts, foundAccount) } appendAccount = func(foundAccount *gtsmodel.Account) { foundAccounts = append(foundAccounts, foundAccount) }
@ -83,7 +90,12 @@ func (p *Processor) Accounts(
// if caller supplied an offset greater than 0, return // if caller supplied an offset greater than 0, return
// nothing as though there were no additional results. // nothing as though there were no additional results.
if offset > 0 { if offset > 0 {
return p.packageAccounts(ctx, requestingAccount, foundAccounts) return p.packageAccounts(
ctx,
requestingAccount,
foundAccounts,
includeInstanceAccounts,
)
} }
// Return all accounts we can find that match the // Return all accounts we can find that match the
@ -106,5 +118,10 @@ func (p *Processor) Accounts(
} }
// Return whatever we got (if anything). // Return whatever we got (if anything).
return p.packageAccounts(ctx, requestingAccount, foundAccounts) return p.packageAccounts(
ctx,
requestingAccount,
foundAccounts,
includeInstanceAccounts,
)
} }

View file

@ -70,6 +70,13 @@ func (p *Processor) Get(
queryType = strings.TrimSpace(strings.ToLower(req.QueryType)) // Trim trailing/leading whitespace; convert to lowercase. queryType = strings.TrimSpace(strings.ToLower(req.QueryType)) // Trim trailing/leading whitespace; convert to lowercase.
resolve = req.Resolve resolve = req.Resolve
following = req.Following following = req.Following
// Include instance accounts in the first
// parts of this search. This will be
// changed to 'false' when doing text
// search in the database in the latter
// parts of this function.
includeInstanceAccounts = true
) )
// Validate query. // Validate query.
@ -109,7 +116,12 @@ func (p *Processor) Get(
// supply an offset greater than 0, return nothing as // supply an offset greater than 0, return nothing as
// though there were no additional results. // though there were no additional results.
if req.Offset > 0 { if req.Offset > 0 {
return p.packageSearchResult(ctx, account, nil, nil, nil, req.APIv1) return p.packageSearchResult(
ctx,
account,
nil, nil, nil, // No results.
req.APIv1, includeInstanceAccounts,
)
} }
var ( var (
@ -167,6 +179,7 @@ func (p *Processor) Get(
foundStatuses, foundStatuses,
foundTags, foundTags,
req.APIv1, req.APIv1,
includeInstanceAccounts,
) )
} }
} }
@ -196,6 +209,7 @@ func (p *Processor) Get(
foundStatuses, foundStatuses,
foundTags, foundTags,
req.APIv1, req.APIv1,
includeInstanceAccounts,
) )
} }
@ -236,11 +250,20 @@ func (p *Processor) Get(
foundStatuses, foundStatuses,
foundTags, foundTags,
req.APIv1, req.APIv1,
includeInstanceAccounts,
) )
} }
// As a last resort, search for accounts and // As a last resort, search for accounts and
// statuses using the query as arbitrary text. // statuses using the query as arbitrary text.
//
// At this point we no longer want to include
// instance accounts in the results, since searching
// for something like 'mastodon', for example, will
// include a million instance/service accounts that
// have 'mastodon' in the domain, and therefore in
// the username, making the search results useless.
includeInstanceAccounts = false
if err := p.byText( if err := p.byText(
ctx, ctx,
account, account,
@ -267,6 +290,7 @@ func (p *Processor) Get(
foundStatuses, foundStatuses,
foundTags, foundTags,
req.APIv1, req.APIv1,
includeInstanceAccounts,
) )
} }

View file

@ -44,6 +44,13 @@ func (p *Processor) Lookup(
requestingAccount *gtsmodel.Account, requestingAccount *gtsmodel.Account,
query string, query string,
) (*apimodel.Account, gtserror.WithCode) { ) (*apimodel.Account, gtserror.WithCode) {
// Include instance accounts in this search.
//
// Lookup is for one specific account so we
// can't return loads of instance accounts by
// accident.
const includeInstanceAccounts = true
// Validate query. // Validate query.
query = strings.TrimSpace(query) query = strings.TrimSpace(query)
if query == "" { if query == "" {
@ -96,7 +103,12 @@ func (p *Processor) Lookup(
// using the packageAccounts function to return it. This // using the packageAccounts function to return it. This
// may cause the account to be filtered out if it's not // may cause the account to be filtered out if it's not
// visible to the caller, so anticipate this. // visible to the caller, so anticipate this.
accounts, errWithCode := p.packageAccounts(ctx, requestingAccount, []*gtsmodel.Account{account}) accounts, errWithCode := p.packageAccounts(
ctx,
requestingAccount,
[]*gtsmodel.Account{account},
includeInstanceAccounts,
)
if errWithCode != nil { if errWithCode != nil {
return nil, errWithCode return nil, errWithCode
} }

View file

@ -48,11 +48,12 @@ func (p *Processor) packageAccounts(
ctx context.Context, ctx context.Context,
requestingAccount *gtsmodel.Account, requestingAccount *gtsmodel.Account,
accounts []*gtsmodel.Account, accounts []*gtsmodel.Account,
includeInstanceAccounts bool,
) ([]*apimodel.Account, gtserror.WithCode) { ) ([]*apimodel.Account, gtserror.WithCode) {
apiAccounts := make([]*apimodel.Account, 0, len(accounts)) apiAccounts := make([]*apimodel.Account, 0, len(accounts))
for _, account := range accounts { for _, account := range accounts {
if account.IsInstance() { if !includeInstanceAccounts && account.IsInstance() {
// No need to show instance accounts. // No need to show instance accounts.
continue continue
} }
@ -169,8 +170,9 @@ func (p *Processor) packageSearchResult(
statuses []*gtsmodel.Status, statuses []*gtsmodel.Status,
tags []*gtsmodel.Tag, tags []*gtsmodel.Tag,
v1 bool, v1 bool,
includeInstanceAccounts bool,
) (*apimodel.SearchResult, gtserror.WithCode) { ) (*apimodel.SearchResult, gtserror.WithCode) {
apiAccounts, errWithCode := p.packageAccounts(ctx, requestingAccount, accounts) apiAccounts, errWithCode := p.packageAccounts(ctx, requestingAccount, accounts, includeInstanceAccounts)
if errWithCode != nil { if errWithCode != nil {
return nil, errWithCode return nil, errWithCode
} }

View file

@ -106,47 +106,24 @@ func (p *Processor) BoostRemove(ctx context.Context, requestingAccount *gtsmodel
return nil, gtserror.NewErrorNotFound(errors.New("status is not visible")) return nil, gtserror.NewErrorNotFound(errors.New("status is not visible"))
} }
// check if we actually have a boost for this status // Check whether the requesting account has boosted the given status ID.
var toUnboost bool boost, err := p.state.DB.GetStatusBoost(ctx, targetStatusID, requestingAccount.ID)
gtsBoost := &gtsmodel.Status{}
where := []db.Where{
{
Key: "boost_of_id",
Value: targetStatusID,
},
{
Key: "account_id",
Value: requestingAccount.ID,
},
}
err = p.state.DB.GetWhere(ctx, where, gtsBoost)
if err == nil {
// we have a boost
toUnboost = true
}
if err != nil { if err != nil {
// something went wrong in the db finding the boost return nil, gtserror.NewErrorNotFound(fmt.Errorf("error checking status boost %s: %w", targetStatusID, err))
if err != db.ErrNoEntries {
return nil, gtserror.NewErrorInternalError(fmt.Errorf("error fetching existing boost from database: %s", err))
}
// we just don't have a boost
toUnboost = false
} }
if toUnboost { if boost != nil {
// pin some stuff onto the boost while we have it out of the db // pin some stuff onto the boost while we have it out of the db
gtsBoost.Account = requestingAccount boost.Account = requestingAccount
gtsBoost.BoostOf = targetStatus boost.BoostOf = targetStatus
gtsBoost.BoostOfAccount = targetStatus.Account boost.BoostOfAccount = targetStatus.Account
gtsBoost.BoostOf.Account = targetStatus.Account boost.BoostOf.Account = targetStatus.Account
// send it back to the processor for async processing // send it back to the processor for async processing
p.state.Workers.EnqueueClientAPI(ctx, messages.FromClientAPI{ p.state.Workers.EnqueueClientAPI(ctx, messages.FromClientAPI{
APObjectType: ap.ActivityAnnounce, APObjectType: ap.ActivityAnnounce,
APActivityType: ap.ActivityUndo, APActivityType: ap.ActivityUndo,
GTSModel: gtsBoost, GTSModel: boost,
OriginAccount: requestingAccount, OriginAccount: requestingAccount,
TargetAccount: targetStatus.Account, TargetAccount: targetStatus.Account,
}) })
@ -189,15 +166,15 @@ func (p *Processor) StatusBoostedBy(ctx context.Context, requestingAccount *gtsm
return nil, gtserror.NewErrorNotFound(err) return nil, gtserror.NewErrorNotFound(err)
} }
statusReblogs, err := p.state.DB.GetStatusReblogs(ctx, targetStatus) statusBoosts, err := p.state.DB.GetStatusBoosts(ctx, targetStatus.ID)
if err != nil { if err != nil {
err = fmt.Errorf("BoostedBy: error seeing who boosted status: %s", err) err = fmt.Errorf("BoostedBy: error seeing who boosted status: %s", err)
return nil, gtserror.NewErrorNotFound(err) return nil, gtserror.NewErrorNotFound(err)
} }
// filter account IDs so the user doesn't see accounts they blocked or which blocked them // filter account IDs so the user doesn't see accounts they blocked or which blocked them
accountIDs := make([]string, 0, len(statusReblogs)) accountIDs := make([]string, 0, len(statusBoosts))
for _, s := range statusReblogs { for _, s := range statusBoosts {
blocked, err := p.state.DB.IsEitherBlocked(ctx, requestingAccount.ID, s.AccountID) blocked, err := p.state.DB.IsEitherBlocked(ctx, requestingAccount.ID, s.AccountID)
if err != nil { if err != nil {
err = fmt.Errorf("BoostedBy: error checking blocks: %s", err) err = fmt.Errorf("BoostedBy: error checking blocks: %s", err)

View file

@ -112,7 +112,7 @@ func (p *Processor) FavedBy(ctx context.Context, requestingAccount *gtsmodel.Acc
return nil, errWithCode return nil, errWithCode
} }
statusFaves, err := p.state.DB.GetStatusFavesForStatus(ctx, targetStatus.ID) statusFaves, err := p.state.DB.GetStatusFaves(ctx, targetStatus.ID)
if err != nil { if err != nil {
return nil, gtserror.NewErrorNotFound(fmt.Errorf("FavedBy: error seeing who faved status: %s", err)) return nil, gtserror.NewErrorNotFound(fmt.Errorf("FavedBy: error seeing who faved status: %s", err))
} }

View file

@ -181,6 +181,7 @@ func New(ctx context.Context) (Router, error) {
// create the actual engine here -- this is the core request routing handler for gts // create the actual engine here -- this is the core request routing handler for gts
engine := gin.New() engine := gin.New()
engine.MaxMultipartMemory = maxMultipartMemory engine.MaxMultipartMemory = maxMultipartMemory
engine.HandleMethodNotAllowed = true
// set up IP forwarding via x-forward-* headers. // set up IP forwarding via x-forward-* headers.
trustedProxies := config.GetTrustedProxies() trustedProxies := config.GetTrustedProxies()

View file

@ -190,18 +190,18 @@ func (m *manager) GetOldestIndexedID(ctx context.Context, timelineID string) str
} }
func (m *manager) WipeItemFromAllTimelines(ctx context.Context, itemID string) error { func (m *manager) WipeItemFromAllTimelines(ctx context.Context, itemID string) error {
errors := gtserror.MultiError{} errs := new(gtserror.MultiError)
m.timelines.Range(func(_ any, v any) bool { m.timelines.Range(func(_ any, v any) bool {
if _, err := v.(Timeline).Remove(ctx, itemID); err != nil { if _, err := v.(Timeline).Remove(ctx, itemID); err != nil {
errors.Append(err) errs.Append(err)
} }
return true // always continue range return true // always continue range
}) })
if len(errors) > 0 { if err := errs.Combine(); err != nil {
return gtserror.Newf("error(s) wiping status %s: %w", itemID, errors.Combine()) return gtserror.Newf("error(s) wiping status %s: %w", itemID, errs.Combine())
} }
return nil return nil
@ -213,21 +213,21 @@ func (m *manager) WipeItemsFromAccountID(ctx context.Context, timelineID string,
} }
func (m *manager) UnprepareItemFromAllTimelines(ctx context.Context, itemID string) error { func (m *manager) UnprepareItemFromAllTimelines(ctx context.Context, itemID string) error {
errors := gtserror.MultiError{} errs := new(gtserror.MultiError)
// Work through all timelines held by this // Work through all timelines held by this
// manager, and call Unprepare for each. // manager, and call Unprepare for each.
m.timelines.Range(func(_ any, v any) bool { m.timelines.Range(func(_ any, v any) bool {
// nolint:forcetypeassert // nolint:forcetypeassert
if err := v.(Timeline).Unprepare(ctx, itemID); err != nil { if err := v.(Timeline).Unprepare(ctx, itemID); err != nil {
errors.Append(err) errs.Append(err)
} }
return true // always continue range return true // always continue range
}) })
if len(errors) > 0 { if err := errs.Combine(); err != nil {
return gtserror.Newf("error(s) unpreparing status %s: %w", itemID, errors.Combine()) return gtserror.Newf("error(s) unpreparing status %s: %w", itemID, errs.Combine())
} }
return nil return nil

View file

@ -50,7 +50,7 @@ type controller struct {
fedDB federatingdb.DB fedDB federatingdb.DB
clock pub.Clock clock pub.Clock
client httpclient.SigningClient client httpclient.SigningClient
trspCache cache.Cache[string, *transport] trspCache cache.TTLCache[string, *transport]
userAgent string userAgent string
senders int // no. concurrent batch delivery routines. senders int // no. concurrent batch delivery routines.
} }
@ -76,7 +76,7 @@ func NewController(state *state.State, federatingDB federatingdb.DB, clock pub.C
fedDB: federatingDB, fedDB: federatingDB,
clock: clock, clock: clock,
client: client, client: client,
trspCache: cache.New[string, *transport](0, 100, 0), trspCache: cache.NewTTL[string, *transport](0, 100, 0),
userAgent: fmt.Sprintf("%s (+%s://%s) gotosocial/%s", applicationName, proto, host, version), userAgent: fmt.Sprintf("%s (+%s://%s) gotosocial/%s", applicationName, proto, host, version),
senders: senders, senders: senders,
} }

View file

@ -22,6 +22,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/url" "net/url"
"time"
"github.com/miekg/dns" "github.com/miekg/dns"
"github.com/superseriousbusiness/gotosocial/internal/ap" "github.com/superseriousbusiness/gotosocial/internal/ap"
@ -630,8 +631,10 @@ func (c *converter) ASAnnounceToStatus(ctx context.Context, announceable ap.Anno
// Extract published time for the boost. // Extract published time for the boost.
published, err := ap.ExtractPublished(announceable) published, err := ap.ExtractPublished(announceable)
if err != nil { if err != nil {
err = gtserror.Newf("error extracting published: %w", err) // If not available, use the current time.
return nil, isNew, err published = time.Now().UTC()
//err = gtserror.Newf("error extracting published: %w", err)
//return nil, isNew, err
} }
status.CreatedAt = published status.CreatedAt = published
status.UpdatedAt = published status.UpdatedAt = published

View file

@ -600,17 +600,17 @@ func (c *converter) StatusToAPIStatus(ctx context.Context, s *gtsmodel.Status, r
return nil, fmt.Errorf("error converting status author: %w", err) return nil, fmt.Errorf("error converting status author: %w", err)
} }
repliesCount, err := c.db.CountStatusReplies(ctx, s) repliesCount, err := c.db.CountStatusReplies(ctx, s.ID)
if err != nil { if err != nil {
return nil, fmt.Errorf("error counting replies: %w", err) return nil, fmt.Errorf("error counting replies: %w", err)
} }
reblogsCount, err := c.db.CountStatusReblogs(ctx, s) reblogsCount, err := c.db.CountStatusBoosts(ctx, s.ID)
if err != nil { if err != nil {
return nil, fmt.Errorf("error counting reblogs: %w", err) return nil, fmt.Errorf("error counting reblogs: %w", err)
} }
favesCount, err := c.db.CountStatusFaves(ctx, s) favesCount, err := c.db.CountStatusFaves(ctx, s.ID)
if err != nil { if err != nil {
return nil, fmt.Errorf("error counting faves: %w", err) return nil, fmt.Errorf("error counting faves: %w", err)
} }

View file

@ -40,13 +40,13 @@ func (c *converter) interactionsWithStatusForAccount(ctx context.Context, s *gts
si := &statusInteractions{} si := &statusInteractions{}
if requestingAccount != nil { if requestingAccount != nil {
faved, err := c.db.IsStatusFavedBy(ctx, s, requestingAccount.ID) faved, err := c.db.IsStatusFavedBy(ctx, s.ID, requestingAccount.ID)
if err != nil { if err != nil {
return nil, fmt.Errorf("error checking if requesting account has faved status: %s", err) return nil, fmt.Errorf("error checking if requesting account has faved status: %s", err)
} }
si.Faved = faved si.Faved = faved
reblogged, err := c.db.IsStatusRebloggedBy(ctx, s, requestingAccount.ID) reblogged, err := c.db.IsStatusBoostedBy(ctx, s.ID, requestingAccount.ID)
if err != nil { if err != nil {
return nil, fmt.Errorf("error checking if requesting account has reblogged status: %s", err) return nil, fmt.Errorf("error checking if requesting account has reblogged status: %s", err)
} }

View file

@ -19,10 +19,10 @@ package visibility
import ( import (
"context" "context"
"fmt"
"github.com/superseriousbusiness/gotosocial/internal/cache" "github.com/superseriousbusiness/gotosocial/internal/cache"
"github.com/superseriousbusiness/gotosocial/internal/config" "github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/log" "github.com/superseriousbusiness/gotosocial/internal/log"
) )
@ -66,7 +66,7 @@ func (f *Filter) isAccountVisibleTo(ctx context.Context, requester *gtsmodel.Acc
// Check whether target account is visible to anyone. // Check whether target account is visible to anyone.
visible, err := f.isAccountVisible(ctx, account) visible, err := f.isAccountVisible(ctx, account)
if err != nil { if err != nil {
return false, fmt.Errorf("isAccountVisibleTo: error checking account %s visibility: %w", account.ID, err) return false, gtserror.Newf("error checking account %s visibility: %w", account.ID, err)
} }
if !visible { if !visible {
@ -83,7 +83,7 @@ func (f *Filter) isAccountVisibleTo(ctx context.Context, requester *gtsmodel.Acc
// If requester is not visible, they cannot *see* either. // If requester is not visible, they cannot *see* either.
visible, err = f.isAccountVisible(ctx, requester) visible, err = f.isAccountVisible(ctx, requester)
if err != nil { if err != nil {
return false, fmt.Errorf("isAccountVisibleTo: error checking account %s visibility: %w", account.ID, err) return false, gtserror.Newf("error checking account %s visibility: %w", account.ID, err)
} }
if !visible { if !visible {
@ -97,7 +97,7 @@ func (f *Filter) isAccountVisibleTo(ctx context.Context, requester *gtsmodel.Acc
account.ID, account.ID,
) )
if err != nil { if err != nil {
return false, fmt.Errorf("isAccountVisibleTo: error checking account blocks: %w", err) return false, gtserror.Newf("error checking account blocks: %w", err)
} }
if blocked { if blocked {
@ -121,6 +121,7 @@ func (f *Filter) isAccountVisible(ctx context.Context, account *gtsmodel.Account
// Fetch the local user model for this account. // Fetch the local user model for this account.
user, err := f.state.DB.GetUserByAccountID(ctx, account.ID) user, err := f.state.DB.GetUserByAccountID(ctx, account.ID)
if err != nil { if err != nil {
err := gtserror.Newf("db error getting user for account %s: %w", account.ID, err)
return false, err return false, err
} }

View file

@ -29,8 +29,8 @@ import (
"codeberg.org/gruf/go-cache/v3" "codeberg.org/gruf/go-cache/v3"
) )
func newETagCache() cache.Cache[string, eTagCacheEntry] { func newETagCache() cache.TTLCache[string, eTagCacheEntry] {
eTagCache := cache.New[string, eTagCacheEntry](0, 1000, 0) eTagCache := cache.NewTTL[string, eTagCacheEntry](0, 1000, 0)
eTagCache.SetTTL(time.Hour, false) eTagCache.SetTTL(time.Hour, false)
if !eTagCache.Start(time.Minute) { if !eTagCache.Start(time.Minute) {
log.Panic(nil, "could not start eTagCache") log.Panic(nil, "could not start eTagCache")

View file

@ -2,7 +2,7 @@
set -eu set -eu
EXPECT=$(cat <<"EOF" EXPECT=$(cat << "EOF"
{ {
"account-domain": "peepee", "account-domain": "peepee",
"accounts-allow-custom-css": true, "accounts-allow-custom-css": true,
@ -18,86 +18,34 @@ EXPECT=$(cat <<"EOF"
"application-name": "gts", "application-name": "gts",
"bind-address": "127.0.0.1", "bind-address": "127.0.0.1",
"cache": { "cache": {
"gts": { "account-mem-ratio": 18,
"account-max-size": 99, "account-note-mem-ratio": 0.1,
"account-note-max-size": 1000, "block-mem-ratio": 3,
"account-note-sweep-freq": 60000000000, "boost-of-ids-mem-ratio": 3,
"account-note-ttl": 1800000000000, "emoji-category-mem-ratio": 0.1,
"account-sweep-freq": 1000000000, "emoji-mem-ratio": 3,
"account-ttl": 10800000000000, "follow-ids-mem-ratio": 4,
"block-ids-max-size": 500, "follow-mem-ratio": 4,
"block-ids-sweep-freq": 60000000000, "follow-request-ids-mem-ratio": 2,
"block-ids-ttl": 1800000000000, "follow-request-mem-ratio": 2,
"block-max-size": 1000, "in-reply-to-ids-mem-ratio": 3,
"block-sweep-freq": 60000000000, "instance-mem-ratio": 1,
"block-ttl": 1800000000000, "list-entry-mem-ratio": 3,
"domain-block-max-size": 2000, "list-mem-ratio": 3,
"domain-block-sweep-freq": 60000000000, "marker-mem-ratio": 0.5,
"domain-block-ttl": 86400000000000, "media-mem-ratio": 4,
"emoji-category-max-size": 100, "memory-target": 209715200,
"emoji-category-sweep-freq": 60000000000, "mention-mem-ratio": 5,
"emoji-category-ttl": 1800000000000, "notification-mem-ratio": 5,
"emoji-max-size": 2000, "report-mem-ratio": 1,
"emoji-sweep-freq": 60000000000, "status-fave-ids-mem-ratio": 3,
"emoji-ttl": 1800000000000, "status-fave-mem-ratio": 5,
"follow-ids-max-size": 500, "status-mem-ratio": 18,
"follow-ids-sweep-freq": 60000000000, "tag-mem-ratio": 3,
"follow-ids-ttl": 1800000000000, "tombstone-mem-ratio": 2,
"follow-max-size": 2000, "user-mem-ratio": 0.1,
"follow-request-ids-max-size": 500, "visibility-mem-ratio": 2,
"follow-request-ids-sweep-freq": 60000000000, "webfinger-mem-ratio": 0.1
"follow-request-ids-ttl": 1800000000000,
"follow-request-max-size": 2000,
"follow-request-sweep-freq": 60000000000,
"follow-request-ttl": 1800000000000,
"follow-sweep-freq": 60000000000,
"follow-ttl": 1800000000000,
"instance-max-size": 2000,
"instance-sweep-freq": 60000000000,
"instance-ttl": 1800000000000,
"list-entry-max-size": 2000,
"list-entry-sweep-freq": 60000000000,
"list-entry-ttl": 1800000000000,
"list-max-size": 2000,
"list-sweep-freq": 60000000000,
"list-ttl": 1800000000000,
"marker-max-size": 2000,
"marker-sweep-freq": 60000000000,
"marker-ttl": 21600000000000,
"media-max-size": 1000,
"media-sweep-freq": 60000000000,
"media-ttl": 1800000000000,
"mention-max-size": 2000,
"mention-sweep-freq": 60000000000,
"mention-ttl": 1800000000000,
"notification-max-size": 1000,
"notification-sweep-freq": 60000000000,
"notification-ttl": 1800000000000,
"report-max-size": 100,
"report-sweep-freq": 60000000000,
"report-ttl": 1800000000000,
"status-fave-max-size": 2000,
"status-fave-sweep-freq": 60000000000,
"status-fave-ttl": 1800000000000,
"status-max-size": 2000,
"status-sweep-freq": 60000000000,
"status-ttl": 1800000000000,
"tag-max-size": 2000,
"tag-sweep-freq": 60000000000,
"tag-ttl": 1800000000000,
"tombstone-max-size": 500,
"tombstone-sweep-freq": 60000000000,
"tombstone-ttl": 1800000000000,
"user-max-size": 500,
"user-sweep-freq": 60000000000,
"user-ttl": 1800000000000,
"webfinger-max-size": 250,
"webfinger-sweep-freq": 900000000000,
"webfinger-ttl": 86400000000000
},
"visibility-max-size": 2000,
"visibility-sweep-freq": 60000000000,
"visibility-ttl": 1800000000000
}, },
"config-path": "internal/config/testdata/test.yaml", "config-path": "internal/config/testdata/test.yaml",
"db-address": ":memory:", "db-address": ":memory:",

View file

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2022 gruf Copyright (c) gruf
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

View file

@ -1,14 +1,14 @@
# go-cache # go-cache
Provides access to a simple yet flexible, performant TTL cache via the `Cache{}` interface and `cache.New()`. Under the hood this is returning a `ttl.Cache{}`. Provides access to simple, yet flexible, and performant caches (with TTL if required) via the `cache.Cache{}` and `cache.TTLCache{}` interfaces.
## simple
A `cache.Cache{}` implementation with much more of the inner workings exposed. Designed to be used as a base for your own customizations, or used as-is.
## ttl ## ttl
A TTL cache implementation with much of the inner workings exposed, designed to be used as a base for your own customizations, or used as-is. Access via the base package `cache.New()` is recommended in the latter case, to prevent accidental use of unsafe methods. A `cache.TTLCache{}` implementation with much more of the inner workings exposed. Designed to be used as a base for your own customizations, or used as-is.
## lookup
`lookup.Cache` is an example of a more complex cache implementation using `ttl.Cache{}` as its underpinning. It provides caching of items under multiple keys.
## result ## result

View file

@ -3,26 +3,33 @@ package cache
import ( import (
"time" "time"
ttlcache "codeberg.org/gruf/go-cache/v3/ttl" "codeberg.org/gruf/go-cache/v3/simple"
"codeberg.org/gruf/go-cache/v3/ttl"
) )
// Cache represents a TTL cache with customizable callbacks, it exists here to abstract away the "unsafe" methods in the case that you do not want your own implementation atop ttl.Cache{}. // TTLCache represents a TTL cache with customizable callbacks, it exists here to abstract away the "unsafe" methods in the case that you do not want your own implementation atop ttl.Cache{}.
type Cache[Key comparable, Value any] interface { type TTLCache[Key comparable, Value any] interface {
// Start will start the cache background eviction routine with given sweep frequency. If already running or a freq <= 0 provided, this is a no-op. This will block until the eviction routine has started. // Start will start the cache background eviction routine with given sweep frequency. If already running or a freq <= 0 provided, this is a no-op. This will block until the eviction routine has started.
Start(freq time.Duration) bool Start(freq time.Duration) bool
// Stop will stop cache background eviction routine. If not running this is a no-op. This will block until the eviction routine has stopped. // Stop will stop cache background eviction routine. If not running this is a no-op. This will block until the eviction routine has stopped.
Stop() bool Stop() bool
// SetTTL sets the cache item TTL. Update can be specified to force updates of existing items in the cache, this will simply add the change in TTL to their current expiry time.
SetTTL(ttl time.Duration, update bool)
// implements base cache.
Cache[Key, Value]
}
// Cache represents a cache with customizable callbacks, it exists here to abstract away the "unsafe" methods in the case that you do not want your own implementation atop simple.Cache{}.
type Cache[Key comparable, Value any] interface {
// SetEvictionCallback sets the eviction callback to the provided hook. // SetEvictionCallback sets the eviction callback to the provided hook.
SetEvictionCallback(hook func(Key, Value)) SetEvictionCallback(hook func(Key, Value))
// SetInvalidateCallback sets the invalidate callback to the provided hook. // SetInvalidateCallback sets the invalidate callback to the provided hook.
SetInvalidateCallback(hook func(Key, Value)) SetInvalidateCallback(hook func(Key, Value))
// SetTTL sets the cache item TTL. Update can be specified to force updates of existing items in the cache, this will simply add the change in TTL to their current expiry time.
SetTTL(ttl time.Duration, update bool)
// Get fetches the value with key from the cache, extending its TTL. // Get fetches the value with key from the cache, extending its TTL.
Get(key Key) (value Value, ok bool) Get(key Key) (value Value, ok bool)
@ -57,7 +64,12 @@ type Cache[Key comparable, Value any] interface {
Cap() int Cap() int
} }
// New returns a new initialized Cache with given initial length, maximum capacity and item TTL. // New returns a new initialized Cache with given initial length, maximum capacity.
func New[K comparable, V any](len, cap int, ttl time.Duration) Cache[K, V] { func New[K comparable, V any](len, cap int) Cache[K, V] {
return ttlcache.New[K, V](len, cap, ttl) return simple.New[K, V](len, cap)
}
// NewTTL returns a new initialized TTLCache with given initial length, maximum capacity and TTL duration.
func NewTTL[K comparable, V any](len, cap int, _ttl time.Duration) TTLCache[K, V] {
return ttl.New[K, V](len, cap, _ttl)
} }

View file

@ -2,14 +2,17 @@ package result
import ( import (
"context" "context"
"fmt"
"os"
"reflect" "reflect"
"time"
_ "unsafe" _ "unsafe"
"codeberg.org/gruf/go-cache/v3/ttl" "codeberg.org/gruf/go-cache/v3/simple"
"codeberg.org/gruf/go-errors/v2" "codeberg.org/gruf/go-errors/v2"
) )
var ErrUnsupportedZero = errors.New("")
// Lookup represents a struct object lookup method in the cache. // Lookup represents a struct object lookup method in the cache.
type Lookup struct { type Lookup struct {
// Name is a period ('.') separated string // Name is a period ('.') separated string
@ -23,26 +26,23 @@ type Lookup struct {
// Multi allows specifying a key capable of storing // Multi allows specifying a key capable of storing
// multiple results. Note this only supports invalidate. // multiple results. Note this only supports invalidate.
Multi bool Multi bool
// TODO: support toggling case sensitive lookups.
// CaseSensitive bool
} }
// Cache provides a means of caching value structures, along with // Cache provides a means of caching value structures, along with
// the results of attempting to load them. An example usecase of this // the results of attempting to load them. An example usecase of this
// cache would be in wrapping a database, allowing caching of sql.ErrNoRows. // cache would be in wrapping a database, allowing caching of sql.ErrNoRows.
type Cache[Value any] struct { type Cache[T any] struct {
cache ttl.Cache[int64, result[Value]] // underlying result cache cache simple.Cache[int64, *result] // underlying result cache
invalid func(Value) // store unwrapped invalidate callback. lookups structKeys // pre-determined struct lookups
lookups structKeys // pre-determined struct lookups invalid func(T) // store unwrapped invalidate callback.
ignore func(error) bool // determines cacheable errors ignore func(error) bool // determines cacheable errors
copy func(Value) Value // copies a Value type copy func(T) T // copies a Value type
next int64 // update key counter next int64 // update key counter
} }
// New returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity. // New returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity.
func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Value] { func New[T any](lookups []Lookup, copy func(T) T, cap int) *Cache[T] {
var z Value var z T
// Determine generic type // Determine generic type
t := reflect.TypeOf(z) t := reflect.TypeOf(z)
@ -58,7 +58,7 @@ func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Va
} }
// Allocate new cache object // Allocate new cache object
c := &Cache[Value]{copy: copy} c := &Cache[T]{copy: copy}
c.lookups = make([]structKey, len(lookups)) c.lookups = make([]structKey, len(lookups))
for i, lookup := range lookups { for i, lookup := range lookups {
@ -67,38 +67,20 @@ func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Va
} }
// Create and initialize underlying cache // Create and initialize underlying cache
c.cache.Init(0, cap, 0) c.cache.Init(0, cap)
c.SetEvictionCallback(nil) c.SetEvictionCallback(nil)
c.SetInvalidateCallback(nil) c.SetInvalidateCallback(nil)
c.IgnoreErrors(nil) c.IgnoreErrors(nil)
return c return c
} }
// Start will start the cache background eviction routine with given sweep frequency. If already
// running or a freq <= 0 provided, this is a no-op. This will block until eviction routine started.
func (c *Cache[Value]) Start(freq time.Duration) bool {
return c.cache.Start(freq)
}
// Stop will stop cache background eviction routine. If not running this
// is a no-op. This will block until the eviction routine has stopped.
func (c *Cache[Value]) Stop() bool {
return c.cache.Stop()
}
// SetTTL sets the cache item TTL. Update can be specified to force updates of existing items
// in the cache, this will simply add the change in TTL to their current expiry time.
func (c *Cache[Value]) SetTTL(ttl time.Duration, update bool) {
c.cache.SetTTL(ttl, update)
}
// SetEvictionCallback sets the eviction callback to the provided hook. // SetEvictionCallback sets the eviction callback to the provided hook.
func (c *Cache[Value]) SetEvictionCallback(hook func(Value)) { func (c *Cache[T]) SetEvictionCallback(hook func(T)) {
if hook == nil { if hook == nil {
// Ensure non-nil hook. // Ensure non-nil hook.
hook = func(Value) {} hook = func(T) {}
} }
c.cache.SetEvictionCallback(func(pkey int64, res result[Value]) { c.cache.SetEvictionCallback(func(pkey int64, res *result) {
c.cache.Lock() c.cache.Lock()
for _, key := range res.Keys { for _, key := range res.Keys {
// Delete key->pkey lookup // Delete key->pkey lookup
@ -108,23 +90,25 @@ func (c *Cache[Value]) SetEvictionCallback(hook func(Value)) {
c.cache.Unlock() c.cache.Unlock()
if res.Error != nil { if res.Error != nil {
// Skip error hooks // Skip value hooks
return return
} }
// Call user hook. // Free result and call hook.
hook(res.Value) v := getResultValue[T](res)
putResult(res)
hook(v)
}) })
} }
// SetInvalidateCallback sets the invalidate callback to the provided hook. // SetInvalidateCallback sets the invalidate callback to the provided hook.
func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) { func (c *Cache[T]) SetInvalidateCallback(hook func(T)) {
if hook == nil { if hook == nil {
// Ensure non-nil hook. // Ensure non-nil hook.
hook = func(Value) {} hook = func(T) {}
} // store hook. } // store hook.
c.invalid = hook c.invalid = hook
c.cache.SetInvalidateCallback(func(pkey int64, res result[Value]) { c.cache.SetInvalidateCallback(func(pkey int64, res *result) {
c.cache.Lock() c.cache.Lock()
for _, key := range res.Keys { for _, key := range res.Keys {
// Delete key->pkey lookup // Delete key->pkey lookup
@ -134,17 +118,19 @@ func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) {
c.cache.Unlock() c.cache.Unlock()
if res.Error != nil { if res.Error != nil {
// Skip error hooks // Skip value hooks
return return
} }
// Call user hook. // Free result and call hook.
hook(res.Value) v := getResultValue[T](res)
putResult(res)
hook(v)
}) })
} }
// IgnoreErrors allows setting a function hook to determine which error types should / not be cached. // IgnoreErrors allows setting a function hook to determine which error types should / not be cached.
func (c *Cache[Value]) IgnoreErrors(ignore func(error) bool) { func (c *Cache[T]) IgnoreErrors(ignore func(error) bool) {
if ignore == nil { if ignore == nil {
ignore = func(err error) bool { ignore = func(err error) bool {
return errors.Comparable( return errors.Comparable(
@ -160,11 +146,10 @@ func (c *Cache[Value]) IgnoreErrors(ignore func(error) bool) {
} }
// Load will attempt to load an existing result from the cacche for the given lookup and key parts, else calling the provided load function and caching the result. // Load will attempt to load an existing result from the cacche for the given lookup and key parts, else calling the provided load function and caching the result.
func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts ...any) (Value, error) { func (c *Cache[T]) Load(lookup string, load func() (T, error), keyParts ...any) (T, error) {
var ( var (
zero Value zero T
res result[Value] res *result
ok bool
) )
// Get lookup key info by name. // Get lookup key info by name.
@ -182,24 +167,22 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
// Look for primary cache key // Look for primary cache key
pkeys := keyInfo.pkeys[ckey] pkeys := keyInfo.pkeys[ckey]
if ok = (len(pkeys) > 0); ok { if len(pkeys) > 0 {
var entry *ttl.Entry[int64, result[Value]]
// Fetch the result for primary key // Fetch the result for primary key
entry, ok = c.cache.Cache.Get(pkeys[0]) entry, ok := c.cache.Cache.Get(pkeys[0])
if ok { if ok {
// Since the invalidation / eviction hooks acquire a mutex // Since the invalidation / eviction hooks acquire a mutex
// lock separately, and only at this point are the pkeys // lock separately, and only at this point are the pkeys
// updated, there is a chance that a primary key may return // updated, there is a chance that a primary key may return
// no matching entry. Hence we have to check for it here. // no matching entry. Hence we have to check for it here.
res = entry.Value res = entry.Value.(*result)
} }
} }
// Done with lock // Done with lock
c.cache.Unlock() c.cache.Unlock()
if !ok { if res == nil {
// Generate fresh result. // Generate fresh result.
value, err := load() value, err := load()
@ -209,6 +192,9 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
return zero, err return zero, err
} }
// Alloc result.
res = getResult()
// Store error result. // Store error result.
res.Error = err res.Error = err
@ -219,6 +205,9 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
key: ckey, key: ckey,
}} }}
} else { } else {
// Alloc result.
res = getResult()
// Store value result. // Store value result.
res.Value = value res.Value = value
@ -245,28 +234,29 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
evict = c.store(res) evict = c.store(res)
} }
// Catch and return error // Catch and return cached error
if res.Error != nil { if err := res.Error; err != nil {
return zero, res.Error return zero, err
} }
// Return a copy of value from cache // Copy value from cached result.
return c.copy(res.Value), nil v := c.copy(getResultValue[T](res))
return v, nil
} }
// Store will call the given store function, and on success store the value in the cache as a positive result. // Store will call the given store function, and on success store the value in the cache as a positive result.
func (c *Cache[Value]) Store(value Value, store func() error) error { func (c *Cache[T]) Store(value T, store func() error) error {
// Attempt to store this value. // Attempt to store this value.
if err := store(); err != nil { if err := store(); err != nil {
return err return err
} }
// Prepare cached result. // Prepare cached result.
result := result[Value]{ result := getResult()
Keys: c.lookups.generate(value), result.Keys = c.lookups.generate(value)
Value: c.copy(value), result.Value = c.copy(value)
Error: nil, result.Error = nil
}
var evict func() var evict func()
@ -293,9 +283,8 @@ func (c *Cache[Value]) Store(value Value, store func() error) error {
} }
// Has checks the cache for a positive result under the given lookup and key parts. // Has checks the cache for a positive result under the given lookup and key parts.
func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool { func (c *Cache[T]) Has(lookup string, keyParts ...any) bool {
var res result[Value] var res *result
var ok bool
// Get lookup key info by name. // Get lookup key info by name.
keyInfo := c.lookups.get(lookup) keyInfo := c.lookups.get(lookup)
@ -312,29 +301,29 @@ func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool {
// Look for primary key for cache key // Look for primary key for cache key
pkeys := keyInfo.pkeys[ckey] pkeys := keyInfo.pkeys[ckey]
if ok = (len(pkeys) > 0); ok { if len(pkeys) > 0 {
var entry *ttl.Entry[int64, result[Value]]
// Fetch the result for primary key // Fetch the result for primary key
entry, ok = c.cache.Cache.Get(pkeys[0]) entry, ok := c.cache.Cache.Get(pkeys[0])
if ok { if ok {
// Since the invalidation / eviction hooks acquire a mutex // Since the invalidation / eviction hooks acquire a mutex
// lock separately, and only at this point are the pkeys // lock separately, and only at this point are the pkeys
// updated, there is a chance that a primary key may return // updated, there is a chance that a primary key may return
// no matching entry. Hence we have to check for it here. // no matching entry. Hence we have to check for it here.
res = entry.Value res = entry.Value.(*result)
} }
} }
// Check for result AND non-error result.
ok := (res != nil && res.Error == nil)
// Done with lock // Done with lock
c.cache.Unlock() c.cache.Unlock()
// Check for non-error result. return ok
return ok && (res.Error == nil)
} }
// Invalidate will invalidate any result from the cache found under given lookup and key parts. // Invalidate will invalidate any result from the cache found under given lookup and key parts.
func (c *Cache[Value]) Invalidate(lookup string, keyParts ...any) { func (c *Cache[T]) Invalidate(lookup string, keyParts ...any) {
// Get lookup key info by name. // Get lookup key info by name.
keyInfo := c.lookups.get(lookup) keyInfo := c.lookups.get(lookup)
@ -351,15 +340,20 @@ func (c *Cache[Value]) Invalidate(lookup string, keyParts ...any) {
c.cache.InvalidateAll(pkeys...) c.cache.InvalidateAll(pkeys...)
} }
// Clear empties the cache, calling the invalidate callback. // Clear empties the cache, calling the invalidate callback where necessary.
func (c *Cache[Value]) Clear() { c.cache.Clear() } func (c *Cache[T]) Clear() { c.Trim(100) }
// Trim ensures the cache stays within percentage of total capacity, truncating where necessary.
func (c *Cache[T]) Trim(perc float64) { c.cache.Trim(perc) }
// store will cache this result under all of its required cache keys. // store will cache this result under all of its required cache keys.
func (c *Cache[Value]) store(res result[Value]) (evict func()) { func (c *Cache[T]) store(res *result) (evict func()) {
var toEvict []*result
// Get primary key // Get primary key
pnext := c.next res.PKey = c.next
c.next++ c.next++
if pnext > c.next { if res.PKey > c.next {
panic("cache primary key overflow") panic("cache primary key overflow")
} }
@ -371,15 +365,19 @@ func (c *Cache[Value]) store(res result[Value]) (evict func()) {
for _, conflict := range pkeys { for _, conflict := range pkeys {
// Get the overlapping result with this key. // Get the overlapping result with this key.
entry, _ := c.cache.Cache.Get(conflict) entry, _ := c.cache.Cache.Get(conflict)
confRes := entry.Value.(*result)
// From conflicting entry, drop this key, this // From conflicting entry, drop this key, this
// will prevent eviction cleanup key confusion. // will prevent eviction cleanup key confusion.
entry.Value.Keys.drop(key.info.name) confRes.Keys.drop(key.info.name)
if len(entry.Value.Keys) == 0 { if len(res.Keys) == 0 {
// We just over-wrote the only lookup key for // We just over-wrote the only lookup key for
// this value, so we drop its primary key too. // this value, so we drop its primary key too.
c.cache.Cache.Delete(conflict) c.cache.Cache.Delete(conflict)
// Add finished result to evict queue.
toEvict = append(toEvict, confRes)
} }
} }
@ -388,42 +386,58 @@ func (c *Cache[Value]) store(res result[Value]) (evict func()) {
} }
// Store primary key lookup. // Store primary key lookup.
pkeys = append(pkeys, pnext) pkeys = append(pkeys, res.PKey)
key.info.pkeys[key.key] = pkeys key.info.pkeys[key.key] = pkeys
} }
// Store main entry under primary key, using evict hook if needed // Acquire new cache entry.
c.cache.Cache.SetWithHook(pnext, &ttl.Entry[int64, result[Value]]{ entry := simple.GetEntry()
Expiry: c.expiry(), entry.Key = res.PKey
Key: pnext, entry.Value = res
Value: res,
}, func(_ int64, item *ttl.Entry[int64, result[Value]]) {
evict = func() { c.cache.Evict(item.Key, item.Value) }
})
return evict evictFn := func(_ int64, entry *simple.Entry) {
} // on evict during set, store evicted result.
toEvict = append(toEvict, entry.Value.(*result))
//go:linkname runtime_nanotime runtime.nanotime }
func runtime_nanotime() uint64
// Store main entry under primary key, catch evicted.
// expiry returns an the next expiry time to use for an entry, c.cache.Cache.SetWithHook(res.PKey, entry, evictFn)
// which is equivalent to time.Now().Add(ttl), or zero if disabled.
func (c *Cache[Value]) expiry() uint64 { if len(toEvict) == 0 {
if ttl := c.cache.TTL; ttl > 0 { // none evicted.
return runtime_nanotime() + return nil
uint64(c.cache.TTL) }
return func() {
for i := range toEvict {
// Rescope result.
res := toEvict[i]
// Call evict hook on each entry.
c.cache.Evict(res.PKey, res)
}
} }
return 0
} }
type result[Value any] struct { type result struct {
// Result primary key
PKey int64
// keys accessible under // keys accessible under
Keys cacheKeys Keys cacheKeys
// cached value // cached value
Value Value Value any
// cached error // cached error
Error error Error error
} }
// getResultValue is a safe way of casting and fetching result value.
func getResultValue[T any](res *result) T {
v, ok := res.Value.(T)
if !ok {
fmt.Fprintf(os.Stderr, "!! BUG: unexpected value type in result: %T\n", res.Value)
}
return v
}

View file

@ -47,27 +47,32 @@ func (sk structKeys) generate(a any) []cacheKey {
buf := getBuf() buf := getBuf()
defer putBuf(buf) defer putBuf(buf)
outer:
for i := range sk { for i := range sk {
// Reset buffer // Reset buffer
buf.B = buf.B[:0] buf.Reset()
// Append each field value to buffer. // Append each field value to buffer.
for _, field := range sk[i].fields { for _, field := range sk[i].fields {
fv := v.Field(field.index) fv := v.Field(field.index)
fi := fv.Interface() fi := fv.Interface()
buf.B = field.mangle(buf.B, fi)
// Mangle this key part into buffer.
ok := field.manglePart(buf, fi)
if !ok {
// don't generate keys
// for zero value parts.
continue outer
}
// Append part separator.
buf.B = append(buf.B, '.') buf.B = append(buf.B, '.')
} }
// Drop last '.' // Drop last '.'
buf.Truncate(1) buf.Truncate(1)
// Don't generate keys for zero values
if allowZero := sk[i].zero == ""; // nocollapse
!allowZero && buf.String() == sk[i].zero {
continue
}
// Append new cached key to slice // Append new cached key to slice
keys = append(keys, cacheKey{ keys = append(keys, cacheKey{
info: &sk[i], info: &sk[i],
@ -114,14 +119,6 @@ type structKey struct {
// period ('.') separated struct field names. // period ('.') separated struct field names.
name string name string
// zero is the possible zero value for this key.
// if set, this will _always_ be non-empty, as
// the mangled cache key will never be empty.
//
// i.e. zero = "" --> allow zero value keys
// zero != "" --> don't allow zero value keys
zero string
// unique determines whether this structKey supports // unique determines whether this structKey supports
// multiple or just the singular unique result. // multiple or just the singular unique result.
unique bool unique bool
@ -135,47 +132,10 @@ type structKey struct {
pkeys map[string][]int64 pkeys map[string][]int64
} }
type structField struct {
// index is the reflect index of this struct field.
index int
// mangle is the mangler function for
// serializing values of this struct field.
mangle mangler.Mangler
}
// genKey generates a cache key string for given key parts (i.e. serializes them using "go-mangler").
func (sk structKey) genKey(parts []any) string {
// Check this expected no. key parts.
if len(parts) != len(sk.fields) {
panic(fmt.Sprintf("incorrect no. key parts provided: want=%d received=%d", len(parts), len(sk.fields)))
}
// Acquire byte buffer
buf := getBuf()
defer putBuf(buf)
buf.Reset()
// Encode each key part
for i, part := range parts {
buf.B = sk.fields[i].mangle(buf.B, part)
buf.B = append(buf.B, '.')
}
// Drop last '.'
buf.Truncate(1)
// Return string copy
return string(buf.B)
}
// newStructKey will generate a structKey{} information object for user-given lookup // newStructKey will generate a structKey{} information object for user-given lookup
// key information, and the receiving generic paramter's type information. Panics on error. // key information, and the receiving generic paramter's type information. Panics on error.
func newStructKey(lk Lookup, t reflect.Type) structKey { func newStructKey(lk Lookup, t reflect.Type) structKey {
var ( var sk structKey
sk structKey
zeros []any
)
// Set the lookup name // Set the lookup name
sk.name = lk.Name sk.name = lk.Name
@ -183,9 +143,6 @@ func newStructKey(lk Lookup, t reflect.Type) structKey {
// Split dot-separated lookup to get // Split dot-separated lookup to get
// the individual struct field names // the individual struct field names
names := strings.Split(lk.Name, ".") names := strings.Split(lk.Name, ".")
if len(names) == 0 {
panic("no key fields specified")
}
// Allocate the mangler and field indices slice. // Allocate the mangler and field indices slice.
sk.fields = make([]structField, len(names)) sk.fields = make([]structField, len(names))
@ -213,16 +170,12 @@ func newStructKey(lk Lookup, t reflect.Type) structKey {
sk.fields[i].mangle = mangler.Get(ft.Type) sk.fields[i].mangle = mangler.Get(ft.Type)
if !lk.AllowZero { if !lk.AllowZero {
// Append the zero value interface // Append the mangled zero value interface
zeros = append(zeros, v.Interface()) zero := sk.fields[i].mangle(nil, v.Interface())
sk.fields[i].zero = string(zero)
} }
} }
if len(zeros) > 0 {
// Generate zero value string
sk.zero = sk.genKey(zeros)
}
// Set unique lookup flag. // Set unique lookup flag.
sk.unique = !lk.Multi sk.unique = !lk.Multi
@ -232,6 +185,68 @@ func newStructKey(lk Lookup, t reflect.Type) structKey {
return sk return sk
} }
// genKey generates a cache key string for given key parts (i.e. serializes them using "go-mangler").
func (sk *structKey) genKey(parts []any) string {
// Check this expected no. key parts.
if len(parts) != len(sk.fields) {
panic(fmt.Sprintf("incorrect no. key parts provided: want=%d received=%d", len(parts), len(sk.fields)))
}
// Acquire byte buffer
buf := getBuf()
defer putBuf(buf)
buf.Reset()
for i, part := range parts {
// Mangle this key part into buffer.
// specifically ignoring whether this
// is returning a zero value key part.
_ = sk.fields[i].manglePart(buf, part)
// Append part separator.
buf.B = append(buf.B, '.')
}
// Drop last '.'
buf.Truncate(1)
// Return string copy
return string(buf.B)
}
type structField struct {
// index is the reflect index of this struct field.
index int
// zero is the possible zero value for this
// key part. if set, this will _always_ be
// non-empty due to how the mangler works.
//
// i.e. zero = "" --> allow zero value keys
// zero != "" --> don't allow zero value keys
zero string
// mangle is the mangler function for
// serializing values of this struct field.
mangle mangler.Mangler
}
// manglePart ...
func (field *structField) manglePart(buf *byteutil.Buffer, part any) bool {
// Start of part bytes.
start := len(buf.B)
// Mangle this key part into buffer.
buf.B = field.mangle(buf.B, part)
// End of part bytes.
end := len(buf.B)
// Return whether this is zero value.
return (field.zero == "" ||
string(buf.B[start:end]) != field.zero)
}
// isExported checks whether function name is exported. // isExported checks whether function name is exported.
func isExported(fnName string) bool { func isExported(fnName string) bool {
r, _ := utf8.DecodeRuneInString(fnName) r, _ := utf8.DecodeRuneInString(fnName)
@ -246,10 +261,12 @@ var bufPool = sync.Pool{
}, },
} }
// getBuf acquires a byte buffer from memory pool.
func getBuf() *byteutil.Buffer { func getBuf() *byteutil.Buffer {
return bufPool.Get().(*byteutil.Buffer) return bufPool.Get().(*byteutil.Buffer)
} }
// putBuf replaces a byte buffer back in memory pool.
func putBuf(buf *byteutil.Buffer) { func putBuf(buf *byteutil.Buffer) {
if buf.Cap() > int(^uint16(0)) { if buf.Cap() > int(^uint16(0)) {
return // drop large bufs return // drop large bufs

24
vendor/codeberg.org/gruf/go-cache/v3/result/pool.go generated vendored Normal file
View file

@ -0,0 +1,24 @@
package result
import "sync"
// resultPool is a global pool for result
// objects, regardless of cache type.
var resultPool sync.Pool
// getEntry fetches a result from pool, or allocates new.
func getResult() *result {
v := resultPool.Get()
if v == nil {
return new(result)
}
return v.(*result)
}
// putResult replaces a result in the pool.
func putResult(r *result) {
r.Keys = nil
r.Value = nil
r.Error = nil
resultPool.Put(r)
}

454
vendor/codeberg.org/gruf/go-cache/v3/simple/cache.go generated vendored Normal file
View file

@ -0,0 +1,454 @@
package simple
import (
"sync"
"codeberg.org/gruf/go-maps"
)
// Entry represents an item in the cache.
type Entry struct {
Key any
Value any
}
// Cache is the underlying Cache implementation, providing both the base Cache interface and unsafe access to underlying map to allow flexibility in building your own.
type Cache[Key comparable, Value any] struct {
// Evict is the hook that is called when an item is evicted from the cache.
Evict func(Key, Value)
// Invalid is the hook that is called when an item's data in the cache is invalidated, includes Add/Set.
Invalid func(Key, Value)
// Cache is the underlying hashmap used for this cache.
Cache maps.LRUMap[Key, *Entry]
// Embedded mutex.
sync.Mutex
}
// New returns a new initialized Cache with given initial length, maximum capacity and item TTL.
func New[K comparable, V any](len, cap int) *Cache[K, V] {
c := new(Cache[K, V])
c.Init(len, cap)
return c
}
// Init will initialize this cache with given initial length, maximum capacity and item TTL.
func (c *Cache[K, V]) Init(len, cap int) {
c.SetEvictionCallback(nil)
c.SetInvalidateCallback(nil)
c.Cache.Init(len, cap)
}
// SetEvictionCallback: implements cache.Cache's SetEvictionCallback().
func (c *Cache[K, V]) SetEvictionCallback(hook func(K, V)) {
c.locked(func() { c.Evict = hook })
}
// SetInvalidateCallback: implements cache.Cache's SetInvalidateCallback().
func (c *Cache[K, V]) SetInvalidateCallback(hook func(K, V)) {
c.locked(func() { c.Invalid = hook })
}
// Get: implements cache.Cache's Get().
func (c *Cache[K, V]) Get(key K) (V, bool) {
var (
// did exist in cache?
ok bool
// cached value.
v V
)
c.locked(func() {
var item *Entry
// Check for item in cache
item, ok = c.Cache.Get(key)
if !ok {
return
}
// Set item value.
v = item.Value.(V)
})
return v, ok
}
// Add: implements cache.Cache's Add().
func (c *Cache[K, V]) Add(key K, value V) bool {
var (
// did exist in cache?
ok bool
// was entry evicted?
ev bool
// evicted key values.
evcK K
evcV V
// hook func ptrs.
evict func(K, V)
)
c.locked(func() {
// Check if in cache.
ok = c.Cache.Has(key)
if ok {
return
}
// Alloc new entry.
new := GetEntry()
new.Key = key
new.Value = value
// Add new entry to cache and catched any evicted item.
c.Cache.SetWithHook(key, new, func(_ K, item *Entry) {
evcK = item.Key.(K)
evcV = item.Value.(V)
ev = true
PutEntry(item)
})
// Set hook func ptr.
evict = c.Evict
})
if ev && evict != nil {
// Pass to eviction hook.
evict(evcK, evcV)
}
return !ok
}
// Set: implements cache.Cache's Set().
func (c *Cache[K, V]) Set(key K, value V) {
var (
// did exist in cache?
ok bool
// was entry evicted?
ev bool
// old value.
oldV V
// evicted key values.
evcK K
evcV V
// hook func ptrs.
invalid func(K, V)
evict func(K, V)
)
c.locked(func() {
var item *Entry
// Check for item in cache
item, ok = c.Cache.Get(key)
if ok {
// Set old value.
oldV = item.Value.(V)
// Update the existing item.
item.Value = value
} else {
// Alloc new entry.
new := GetEntry()
new.Key = key
new.Value = value
// Add new entry to cache and catched any evicted item.
c.Cache.SetWithHook(key, new, func(_ K, item *Entry) {
evcK = item.Key.(K)
evcV = item.Value.(V)
ev = true
PutEntry(item)
})
}
// Set hook func ptrs.
invalid = c.Invalid
evict = c.Evict
})
if ok && invalid != nil {
// Pass to invalidate hook.
invalid(key, oldV)
}
if ev && evict != nil {
// Pass to eviction hook.
evict(evcK, evcV)
}
}
// CAS: implements cache.Cache's CAS().
func (c *Cache[K, V]) CAS(key K, old V, new V, cmp func(V, V) bool) bool {
var (
// did exist in cache?
ok bool
// swapped value.
oldV V
// hook func ptrs.
invalid func(K, V)
)
c.locked(func() {
var item *Entry
// Check for item in cache
item, ok = c.Cache.Get(key)
if !ok {
return
}
// Set old value.
oldV = item.Value.(V)
// Perform the comparison
if !cmp(old, oldV) {
var zero V
oldV = zero
return
}
// Update value.
item.Value = new
// Set hook func ptr.
invalid = c.Invalid
})
if ok && invalid != nil {
// Pass to invalidate hook.
invalid(key, oldV)
}
return ok
}
// Swap: implements cache.Cache's Swap().
func (c *Cache[K, V]) Swap(key K, swp V) V {
var (
// did exist in cache?
ok bool
// swapped value.
oldV V
// hook func ptrs.
invalid func(K, V)
)
c.locked(func() {
var item *Entry
// Check for item in cache
item, ok = c.Cache.Get(key)
if !ok {
return
}
// Set old value.
oldV = item.Value.(V)
// Update value.
item.Value = swp
// Set hook func ptr.
invalid = c.Invalid
})
if ok && invalid != nil {
// Pass to invalidate hook.
invalid(key, oldV)
}
return oldV
}
// Has: implements cache.Cache's Has().
func (c *Cache[K, V]) Has(key K) (ok bool) {
c.locked(func() {
ok = c.Cache.Has(key)
})
return
}
// Invalidate: implements cache.Cache's Invalidate().
func (c *Cache[K, V]) Invalidate(key K) (ok bool) {
var (
// old value.
oldV V
// hook func ptrs.
invalid func(K, V)
)
c.locked(func() {
var item *Entry
// Check for item in cache
item, ok = c.Cache.Get(key)
if !ok {
return
}
// Set old value.
oldV = item.Value.(V)
// Remove from cache map
_ = c.Cache.Delete(key)
// Free entry
PutEntry(item)
// Set hook func ptrs.
invalid = c.Invalid
})
if ok && invalid != nil {
// Pass to invalidate hook.
invalid(key, oldV)
}
return
}
// InvalidateAll: implements cache.Cache's InvalidateAll().
func (c *Cache[K, V]) InvalidateAll(keys ...K) (ok bool) {
var (
// deleted items.
items []*Entry
// hook func ptrs.
invalid func(K, V)
)
// Allocate a slice for invalidated.
items = make([]*Entry, 0, len(keys))
c.locked(func() {
for x := range keys {
var item *Entry
// Check for item in cache
item, ok = c.Cache.Get(keys[x])
if !ok {
continue
}
// Append this old value.
items = append(items, item)
// Remove from cache map
_ = c.Cache.Delete(keys[x])
}
// Set hook func ptrs.
invalid = c.Invalid
})
if invalid != nil {
for x := range items {
// Pass to invalidate hook.
k := items[x].Key.(K)
v := items[x].Value.(V)
invalid(k, v)
// Free this entry.
PutEntry(items[x])
}
}
return
}
// Clear: implements cache.Cache's Clear().
func (c *Cache[K, V]) Clear() { c.Trim(100) }
// Trim will truncate the cache to ensure it stays within given percentage of total capacity.
func (c *Cache[K, V]) Trim(perc float64) {
var (
// deleted items
items []*Entry
// hook func ptrs.
invalid func(K, V)
)
c.locked(func() {
// Calculate number of cache items to truncate.
max := (perc / 100) * float64(c.Cache.Cap())
diff := c.Cache.Len() - int(max)
if diff <= 0 {
return
}
// Set hook func ptr.
invalid = c.Invalid
// Truncate by calculated length.
items = c.truncate(diff, invalid)
})
if invalid != nil {
for x := range items {
// Pass to invalidate hook.
k := items[x].Key.(K)
v := items[x].Value.(V)
invalid(k, v)
// Free this entry.
PutEntry(items[x])
}
}
}
// Len: implements cache.Cache's Len().
func (c *Cache[K, V]) Len() (l int) {
c.locked(func() { l = c.Cache.Len() })
return
}
// Cap: implements cache.Cache's Cap().
func (c *Cache[K, V]) Cap() (l int) {
c.locked(func() { l = c.Cache.Cap() })
return
}
// locked performs given function within mutex lock (NOTE: UNLOCK IS NOT DEFERRED).
func (c *Cache[K, V]) locked(fn func()) {
c.Lock()
fn()
c.Unlock()
}
// truncate will truncate the cache by given size, returning deleted items.
func (c *Cache[K, V]) truncate(sz int, hook func(K, V)) []*Entry {
if hook == nil {
// No hook to execute, simply release all truncated entries.
c.Cache.Truncate(sz, func(_ K, item *Entry) { PutEntry(item) })
return nil
}
// Allocate a slice for deleted.
deleted := make([]*Entry, 0, sz)
// Truncate and catch all deleted k-v pairs.
c.Cache.Truncate(sz, func(_ K, item *Entry) {
deleted = append(deleted, item)
})
return deleted
}

23
vendor/codeberg.org/gruf/go-cache/v3/simple/pool.go generated vendored Normal file
View file

@ -0,0 +1,23 @@
package simple
import "sync"
// entryPool is a global pool for Entry
// objects, regardless of cache type.
var entryPool sync.Pool
// GetEntry fetches an Entry from pool, or allocates new.
func GetEntry() *Entry {
v := entryPool.Get()
if v == nil {
return new(Entry)
}
return v.(*Entry)
}
// PutEntry replaces an Entry in the pool.
func PutEntry(e *Entry) {
e.Key = nil
e.Value = nil
entryPool.Put(e)
}

View file

@ -15,7 +15,7 @@ type Entry[Key comparable, Value any] struct {
Expiry uint64 Expiry uint64
} }
// Cache is the underlying Cache implementation, providing both the base Cache interface and unsafe access to underlying map to allow flexibility in building your own. // Cache is the underlying TTLCache implementation, providing both the base Cache interface and unsafe access to underlying map to allow flexibility in building your own.
type Cache[Key comparable, Value any] struct { type Cache[Key comparable, Value any] struct {
// TTL is the cache item TTL. // TTL is the cache item TTL.
TTL time.Duration TTL time.Duration

19
vendor/github.com/DmitriyVTitov/size/.gitignore generated vendored Normal file
View file

@ -0,0 +1,19 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
example
.idea
go.sum

21
vendor/github.com/DmitriyVTitov/size/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 Dmitriy Titov (Дмитрий Титов)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

48
vendor/github.com/DmitriyVTitov/size/README.md generated vendored Normal file
View file

@ -0,0 +1,48 @@
# size - calculates variable's memory consumption at runtime
### Part of the [Transflow Project](http://transflow.ru/)
Sometimes you may need a tool to measure the size of object in your Go program at runtime. This package makes an attempt to do so. Package based on `binary.Size()` from Go standard library.
Features:
- supports non-fixed size variables and struct fields: `struct`, `int`, `slice`, `string`, `map`;
- supports complex types including structs with non-fixed size fields;
- supports all basic types (numbers, bool);
- supports `chan` and `interface`;
- supports pointers;
- implements infinite recursion detection (i.e. pointer inside struct field references to parent struct).
### Usage example
```
package main
import (
"fmt"
// Use latest tag.
"github.com/DmitriyVTitov/size"
)
func main() {
a := struct {
a int
b string
c bool
d int32
e []byte
f [3]int64
}{
a: 10, // 8 bytes
b: "Text", // 16 (string itself) + 4 = 20 bytes
c: true, // 1 byte
d: 25, // 4 bytes
e: []byte{'c', 'd', 'e'}, // 24 (slice itself) + 3 = 27 bytes
f: [3]int64{1, 2, 3}, // 3 * 8 = 24 bytes
} // 84 + 3 (padding) = 87 bytes
fmt.Println(size.Of(a))
}
// Output: 87
```

142
vendor/github.com/DmitriyVTitov/size/size.go generated vendored Normal file
View file

@ -0,0 +1,142 @@
// Package size implements run-time calculation of size of the variable.
// Source code is based on "binary.Size()" function from Go standard library.
// size.Of() omits size of slices, arrays and maps containers itself (24, 24 and 8 bytes).
// When counting maps separate calculations are done for keys and values.
package size
import (
"reflect"
"unsafe"
)
// Of returns the size of 'v' in bytes.
// If there is an error during calculation, Of returns -1.
func Of(v interface{}) int {
// Cache with every visited pointer so we don't count two pointers
// to the same memory twice.
cache := make(map[uintptr]bool)
return sizeOf(reflect.Indirect(reflect.ValueOf(v)), cache)
}
// sizeOf returns the number of bytes the actual data represented by v occupies in memory.
// If there is an error, sizeOf returns -1.
func sizeOf(v reflect.Value, cache map[uintptr]bool) int {
switch v.Kind() {
case reflect.Array:
sum := 0
for i := 0; i < v.Len(); i++ {
s := sizeOf(v.Index(i), cache)
if s < 0 {
return -1
}
sum += s
}
return sum + (v.Cap()-v.Len())*int(v.Type().Elem().Size())
case reflect.Slice:
// return 0 if this node has been visited already
if cache[v.Pointer()] {
return 0
}
cache[v.Pointer()] = true
sum := 0
for i := 0; i < v.Len(); i++ {
s := sizeOf(v.Index(i), cache)
if s < 0 {
return -1
}
sum += s
}
sum += (v.Cap() - v.Len()) * int(v.Type().Elem().Size())
return sum + int(v.Type().Size())
case reflect.Struct:
sum := 0
for i, n := 0, v.NumField(); i < n; i++ {
s := sizeOf(v.Field(i), cache)
if s < 0 {
return -1
}
sum += s
}
// Look for struct padding.
padding := int(v.Type().Size())
for i, n := 0, v.NumField(); i < n; i++ {
padding -= int(v.Field(i).Type().Size())
}
return sum + padding
case reflect.String:
s := v.String()
hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
if cache[hdr.Data] {
return int(v.Type().Size())
}
cache[hdr.Data] = true
return len(s) + int(v.Type().Size())
case reflect.Ptr:
// return Ptr size if this node has been visited already (infinite recursion)
if cache[v.Pointer()] {
return int(v.Type().Size())
}
cache[v.Pointer()] = true
if v.IsNil() {
return int(reflect.New(v.Type()).Type().Size())
}
s := sizeOf(reflect.Indirect(v), cache)
if s < 0 {
return -1
}
return s + int(v.Type().Size())
case reflect.Bool,
reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Int, reflect.Uint,
reflect.Chan,
reflect.Uintptr,
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128,
reflect.Func:
return int(v.Type().Size())
case reflect.Map:
// return 0 if this node has been visited already (infinite recursion)
if cache[v.Pointer()] {
return 0
}
cache[v.Pointer()] = true
sum := 0
keys := v.MapKeys()
for i := range keys {
val := v.MapIndex(keys[i])
// calculate size of key and value separately
sv := sizeOf(val, cache)
if sv < 0 {
return -1
}
sum += sv
sk := sizeOf(keys[i], cache)
if sk < 0 {
return -1
}
sum += sk
}
// Include overhead due to unused map buckets. 10.79 comes
// from https://golang.org/src/runtime/map.go.
return sum + int(v.Type().Size()) + int(float64(len(keys))*10.79)
case reflect.Interface:
return sizeOf(v.Elem(), cache) + int(v.Type().Size())
}
return -1
}

6
vendor/modules.txt vendored
View file

@ -13,10 +13,11 @@ codeberg.org/gruf/go-bytesize
# codeberg.org/gruf/go-byteutil v1.1.2 # codeberg.org/gruf/go-byteutil v1.1.2
## explicit; go 1.16 ## explicit; go 1.16
codeberg.org/gruf/go-byteutil codeberg.org/gruf/go-byteutil
# codeberg.org/gruf/go-cache/v3 v3.4.4 # codeberg.org/gruf/go-cache/v3 v3.5.5
## explicit; go 1.19 ## explicit; go 1.19
codeberg.org/gruf/go-cache/v3 codeberg.org/gruf/go-cache/v3
codeberg.org/gruf/go-cache/v3/result codeberg.org/gruf/go-cache/v3/result
codeberg.org/gruf/go-cache/v3/simple
codeberg.org/gruf/go-cache/v3/ttl codeberg.org/gruf/go-cache/v3/ttl
# codeberg.org/gruf/go-debug v1.3.0 # codeberg.org/gruf/go-debug v1.3.0
## explicit; go 1.16 ## explicit; go 1.16
@ -69,6 +70,9 @@ codeberg.org/gruf/go-sched
codeberg.org/gruf/go-store/v2/kv codeberg.org/gruf/go-store/v2/kv
codeberg.org/gruf/go-store/v2/storage codeberg.org/gruf/go-store/v2/storage
codeberg.org/gruf/go-store/v2/util codeberg.org/gruf/go-store/v2/util
# github.com/DmitriyVTitov/size v1.5.0
## explicit; go 1.14
github.com/DmitriyVTitov/size
# github.com/KimMachineGun/automemlimit v0.2.6 # github.com/KimMachineGun/automemlimit v0.2.6
## explicit; go 1.19 ## explicit; go 1.19
github.com/KimMachineGun/automemlimit github.com/KimMachineGun/automemlimit