simplify timeline cache loading, fix lo/hi returns, fix timeline invalidation side-effects missing for some federated actions

This commit is contained in:
kim 2025-04-02 17:25:33 +01:00
parent 40b95f7a5a
commit d3a1abeefb
12 changed files with 311 additions and 279 deletions

View file

@ -338,15 +338,10 @@ func (t *StatusTimeline) Load(
// to load status models of already cached entries in the timeline.
loadIDs func(ids []string) (statuses []*gtsmodel.Status, err error),
// preFilter can be used to perform filtering of returned
// filter can be used to perform filtering of returned
// statuses BEFORE insert into cache. i.e. this will effect
// what actually gets stored in the timeline cache.
preFilter func(each *gtsmodel.Status) (delete bool, err error),
// postFilter can be used to perform filtering of returned
// statuses AFTER insert into cache. i.e. this will not effect
// what actually gets stored in the timeline cache.
postFilter func(each *gtsmodel.Status) (delete bool, err error),
filter func(each *gtsmodel.Status) (delete bool, err error),
// prepareAPI should prepare internal status model to frontend API model.
prepareAPI func(status *gtsmodel.Status) (apiStatus *apimodel.Status, err error),
@ -392,15 +387,20 @@ func (t *StatusTimeline) Load(
dir,
)
if len(metas) > 0 {
// We ALWAYS return and work on
// statuses in DESC order, but the
// timeline cache returns statuses
// in the *requested* order.
if dir == structr.Asc {
slices.Reverse(metas)
}
// We now reset the lo,hi values to
// represent the lowest and highest
// index values of loaded statuses.
//
// We continually update these while
// building up statuses to return, for
// caller to build next / prev page
// response values.
lo, hi = "", ""
// Preallocate a slice of up-to-limit API models.
apiStatuses := make([]*apimodel.Status, 0, limit)
if len(metas) > 0 {
// Before we can do any filtering, we need
// to load status models for cached entries.
err := loadStatuses(metas, loadIDs)
@ -408,42 +408,38 @@ func (t *StatusTimeline) Load(
return nil, "", "", gtserror.Newf("error loading statuses: %w", err)
}
// Update paging values
// based on returned data.
nextPageParams(nextPg,
metas[len(metas)-1].ID,
metas[0].ID,
order,
// Set initial lo, hi values.
hi = metas[len(metas)-1].ID
lo = metas[0].ID
// Update paging parameters used for next database query.
nextPageParams(nextPg, metas[len(metas)-1].ID, order)
// Prepare frontend API models for
// the cached statuses. For now this
// also does its own extra filtering.
apiStatuses = prepareStatuses(ctx,
metas,
prepareAPI,
apiStatuses,
limit,
)
// Before any further loading,
// store current lo, hi values
// as possible lo, hi returns.
lo = metas[len(metas)-1].ID
hi = metas[0].ID
// Drop all entries we failed to load statuses for.
metas = slices.DeleteFunc(metas, (*StatusMeta).isNotLoaded)
// Perform post-filtering on cached status entries.
metas, err = doStatusPostFilter(metas, postFilter)
if err != nil {
return nil, "", "", gtserror.Newf("error post-filtering statuses: %w", err)
}
}
// Track all newly loaded status entries
// AFTER 'preFilter', but before 'postFilter',
// to later insert into timeline cache.
// after filtering for insert into cache.
var justLoaded []*StatusMeta
// Check whether loaded enough from cache.
if need := limit - len(metas); need > 0 {
if need := limit - len(apiStatuses); need > 0 {
// Perform a maximum of 5
// load attempts fetching
// statuses to reach limit.
for i := 0; i < 5; i++ {
// Load a little more than
// limit to reduce db calls.
nextPg.Limit += 10
// Perform maximum of 10 load
// attempts fetching statuses.
for i := 0; i < 10; i++ {
// Load next timeline statuses.
statuses, err := loadPage(nextPg)
@ -457,18 +453,19 @@ func (t *StatusTimeline) Load(
break
}
// Update paging values
// based on returned data.
nextPageParams(nextPg,
statuses[len(statuses)-1].ID,
statuses[0].ID,
order,
)
if lo == "" {
// Set min returned paging
// value if not already set.
lo = statuses[0].ID
}
// Perform any pre-filtering on newly loaded statuses.
statuses, err = doStatusPreFilter(statuses, preFilter)
// Update nextPg cursor parameter for next database query.
nextPageParams(nextPg, statuses[len(statuses)-1].ID, order)
// Perform any filtering on newly loaded statuses.
statuses, err = doStatusFilter(statuses, filter)
if err != nil {
return nil, "", "", gtserror.Newf("error pre-filtering statuses: %w", err)
return nil, "", "", gtserror.Newf("error filtering statuses: %w", err)
}
// After filtering no more
@ -477,71 +474,46 @@ func (t *StatusTimeline) Load(
continue
}
// On each iteration, since statuses
// returned will always be in DESC order,
// iteratively update the lo paging value
// that we return for next / prev pages.
lo = statuses[len(statuses)-1].ID
// Convert to our cache type,
// these will get inserted into
// the cache in prepare() below.
uncached := toStatusMeta(statuses)
metas := toStatusMeta(statuses)
// Before any filtering append to newly loaded.
justLoaded = append(justLoaded, uncached...)
// Append to newly loaded for later insert.
justLoaded = append(justLoaded, metas...)
// Perform any post-filtering on loaded timeline entries.
filtered, err := doStatusPostFilter(uncached, postFilter)
if err != nil {
return nil, "", "", gtserror.Newf("error post-filtering statuses: %w", err)
}
// Prepare frontend API models for
// the loaded statuses. For now this
// also does its own extra filtering.
apiStatuses = prepareStatuses(ctx,
metas,
prepareAPI,
apiStatuses,
limit,
)
// Append newly filtered meta entries.
metas = append(metas, filtered...)
// If we have anything, return
// here. Even if below limit.
if len(apiStatuses) > 0 {
// Check if we reached
// requested page limit.
if len(metas) >= limit {
// Set returned hi status paging value.
hi = apiStatuses[len(apiStatuses)-1].ID
break
}
}
}
// Prepare frontend API models.
var apiStatuses []*apimodel.Status
if len(metas) > 0 {
switch {
case len(metas) <= limit:
// We have under
// expected limit.
case order.Ascending():
// Ascending order was requested
// and we have more than limit, so
// trim extra metadata from end.
metas = metas[:limit]
default: /* i.e. descending */
// Descending order was requested
// and we have more than limit, so
// trim extra metadata from start.
metas = metas[len(metas)-limit:]
}
// Using meta and funcs, prepare frontend API models.
apiStatuses = prepareStatuses(ctx, metas, prepareAPI)
if order.Ascending() {
// The caller always expects the statuses
// to be returned in DESC order, but we
// build the status slice in paging order.
// If paging ASC, we need to reverse the
// returned statuses and paging values.
slices.Reverse(apiStatuses)
lo, hi = hi, lo
}
if len(justLoaded) > 0 {
if hi == "" {
// Check whether a hi value was set
// from an initial load of cached entries,
// if not we set the returned hi paging
// value from first in loaded statuses.
hi = justLoaded[0].ID
}
// Even if we don't return them, insert
// the excess (post-filtered) into cache.
t.cache.Insert(justLoaded...)
@ -729,6 +701,8 @@ func prepareStatuses(
ctx context.Context,
meta []*StatusMeta,
prepareAPI func(*gtsmodel.Status) (*apimodel.Status, error),
apiStatuses []*apimodel.Status,
limit int,
) []*apimodel.Status {
switch { //nolint:gocritic
case prepareAPI == nil:
@ -737,9 +711,14 @@ func prepareStatuses(
// Iterate the given StatusMeta objects for pre-prepared
// frontend models, otherwise attempting to prepare them.
apiStatuses := make([]*apimodel.Status, 0, len(meta))
for _, meta := range meta {
// Check if we have prepared enough
// API statuses for caller to return.
if len(apiStatuses) >= limit {
break
}
if meta.loaded == nil {
// We failed loading this
// status, skip preparing.
@ -758,10 +737,6 @@ func prepareStatuses(
}
if meta.prepared != nil {
// TODO: we won't need nil check when mutes
// / filters are moved to appropriate funcs.
//
// Add the prepared API model to return slice.
apiStatuses = append(apiStatuses, meta.prepared)
}
}
@ -823,9 +798,9 @@ func toStatusMeta(statuses []*gtsmodel.Status) []*StatusMeta {
})
}
// doStatusPreFilter performs given filter function on provided statuses,
// doStatusFilter performs given filter function on provided statuses,
// returning early if an error is returned. returns filtered statuses.
func doStatusPreFilter(statuses []*gtsmodel.Status, filter func(*gtsmodel.Status) (bool, error)) ([]*gtsmodel.Status, error) {
func doStatusFilter(statuses []*gtsmodel.Status, filter func(*gtsmodel.Status) (bool, error)) ([]*gtsmodel.Status, error) {
// Check for provided
// filter function.
@ -855,37 +830,3 @@ func doStatusPreFilter(statuses []*gtsmodel.Status, filter func(*gtsmodel.Status
return statuses, nil
}
// doStatusPostFilter performs given filter function on provided status meta,
// expecting that embedded status is already loaded, returning filtered status
// meta, as well as those *filtered out*. returns early if error is returned.
func doStatusPostFilter(metas []*StatusMeta, filter func(*gtsmodel.Status) (bool, error)) ([]*StatusMeta, error) {
// Check for provided
// filter function.
if filter == nil {
return metas, nil
}
// Iterate through input metas.
for i := 0; i < len(metas); {
meta := metas[i]
// Pass through filter func.
ok, err := filter(meta.loaded)
if err != nil {
return nil, err
}
if ok {
// Delete meta entry from input slice.
metas = slices.Delete(metas, i, i+1)
continue
}
// Iter.
i++
}
return metas, nil
}

View file

@ -30,13 +30,13 @@ import (
// updated while maintaining the boundary value.
func nextPageParams(
page *paging.Page,
nextLo, nextHi string,
lastIdx string,
order paging.Order,
) {
if order.Ascending() {
page.Min.Value = nextLo
page.Min.Value = lastIdx
} else /* i.e. descending */ { //nolint:revive
page.Max.Value = nextHi
page.Max.Value = lastIdx
}
}

View file

@ -365,13 +365,6 @@ func loadStatusTimelinePage(
return nil, err
}
// The order we return from the database and
// timeline caches differs depending on ordering,
// but the caller always expects DESCENDING.
if order.Ascending() {
slices.Reverse(statusIDs)
}
// Fetch statuses from DB / cache with given IDs.
return state.DB.GetStatusesByIDs(ctx, statusIDs)
}

View file

@ -136,10 +136,7 @@ func (f *federatingDB) undoFollow(
// Convert AS Follow to barebones *gtsmodel.Follow,
// retrieving origin + target accts from the db.
follow, err := f.converter.ASFollowToFollow(
gtscontext.SetBarebones(ctx),
asFollow,
)
follow, err := f.converter.ASFollowToFollow(ctx, asFollow)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
err := gtserror.Newf("error converting AS Follow to follow: %w", err)
return err
@ -152,6 +149,11 @@ func (f *federatingDB) undoFollow(
return nil
}
// Lock on the Follow URI
// as we may be updating it.
unlock := f.state.FedLocks.Lock(follow.URI)
defer unlock()
// Ensure addressee is follow target.
if follow.TargetAccountID != receivingAcct.ID {
const text = "receivingAcct was not Follow target"
@ -178,7 +180,16 @@ func (f *federatingDB) undoFollow(
return err
}
log.Debug(ctx, "Follow undone")
// Send the deleted follow through to
// the fedi worker to process side effects.
f.state.Workers.Federator.Queue.Push(&messages.FromFediAPI{
APObjectType: ap.ActivityFollow,
APActivityType: ap.ActivityUndo,
GTSModel: follow,
Receiving: receivingAcct,
Requesting: requestingAcct,
})
return nil
}
@ -269,7 +280,16 @@ func (f *federatingDB) undoLike(
return err
}
log.Debug(ctx, "Like undone")
// Send the deleted block through to
// the fedi worker to process side effects.
f.state.Workers.Federator.Queue.Push(&messages.FromFediAPI{
APObjectType: ap.ActivityLike,
APActivityType: ap.ActivityUndo,
GTSModel: fave,
Receiving: receivingAcct,
Requesting: requestingAcct,
})
return nil
}
@ -298,10 +318,7 @@ func (f *federatingDB) undoBlock(
// Convert AS Block to barebones *gtsmodel.Block,
// retrieving origin + target accts from the DB.
block, err := f.converter.ASBlockToBlock(
gtscontext.SetBarebones(ctx),
asBlock,
)
block, err := f.converter.ASBlockToBlock(ctx, asBlock)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
err := gtserror.Newf("error converting AS Block to block: %w", err)
return err
@ -333,7 +350,16 @@ func (f *federatingDB) undoBlock(
return err
}
log.Debug(ctx, "Block undone")
// Send the deleted block through to
// the fedi worker to process side effects.
f.state.Workers.Federator.Queue.Push(&messages.FromFediAPI{
APObjectType: ap.ActivityBlock,
APActivityType: ap.ActivityUndo,
GTSModel: block,
Receiving: receivingAcct,
Requesting: requestingAcct,
})
return nil
}

View file

@ -38,22 +38,6 @@ func (p *Processor) HomeTimelineGet(
*apimodel.PageableResponse,
gtserror.WithCode,
) {
var pageQuery url.Values
var postFilter func(*gtsmodel.Status) (bool, error)
if local {
// Set local = true query.
pageQuery = localOnlyTrue
// Remove any non-local statuses if local-only requested.
postFilter = func(s *gtsmodel.Status) (bool, error) {
return !*s.Local, nil
}
} else {
// Set local = false query.
pageQuery = localOnlyFalse
}
return p.getStatusTimeline(ctx,
// Auth'd
@ -74,7 +58,19 @@ func (p *Processor) HomeTimelineGet(
// page query flag, (this map
// later gets copied before
// any further usage).
pageQuery,
func() url.Values {
var pageQuery url.Values
if local {
// Set local = true query.
pageQuery = localOnlyTrue
} else {
// Set local = false query.
pageQuery = localOnlyFalse
}
return pageQuery
}(),
// Status filter context.
statusfilter.FilterContextHome,
@ -92,9 +88,5 @@ func (p *Processor) HomeTimelineGet(
ok, err := p.visFilter.StatusHomeTimelineable(ctx, requester, s)
return !ok, err
},
// Post-filtering function,
// i.e. filter after caching.
postFilter,
)
}

View file

@ -93,7 +93,7 @@ func (p *Processor) ListTimelineGet(
return p.state.DB.GetListTimeline(ctx, listID, pg)
},
// Pre-filtering function,
// Filtering function,
// i.e. filter before caching.
func(s *gtsmodel.Status) (bool, error) {
@ -101,9 +101,5 @@ func (p *Processor) ListTimelineGet(
ok, err := p.visFilter.StatusHomeTimelineable(ctx, requester, s)
return !ok, err
},
// Post-filtering function,
// i.e. filter after caching.
nil,
)
}

View file

@ -89,10 +89,6 @@ func (p *Processor) publicTimelineGet(
ok, err := p.visFilter.StatusPublicTimelineable(ctx, requester, s)
return !ok, err
},
// Post-filtering function,
// i.e. filter after caching.
nil,
)
}
@ -134,7 +130,7 @@ func (p *Processor) localTimelineGet(
return p.state.DB.GetLocalTimeline(ctx, pg)
},
// Pre-filtering function,
// Filtering function,
// i.e. filter before caching.
func(s *gtsmodel.Status) (bool, error) {
@ -142,9 +138,5 @@ func (p *Processor) localTimelineGet(
ok, err := p.visFilter.StatusPublicTimelineable(ctx, requester, s)
return !ok, err
},
// Post-filtering function,
// i.e. filter after caching.
nil,
)
}

View file

@ -70,8 +70,7 @@ func (p *Processor) getStatusTimeline(
pageQuery url.Values,
filterCtx statusfilter.FilterContext,
loadPage func(*paging.Page) (statuses []*gtsmodel.Status, err error),
preFilter func(*gtsmodel.Status) (bool, error),
postFilter func(*gtsmodel.Status) (bool, error),
filter func(*gtsmodel.Status) (bool, error),
) (
*apimodel.PageableResponse,
gtserror.WithCode,
@ -128,13 +127,9 @@ func (p *Processor) getStatusTimeline(
return p.state.DB.GetStatusesByIDs(ctx, ids)
},
// Pre-filtering function,
// Filtering function,
// i.e. filter before caching.
preFilter,
// Post-filtering function,
// i.e. filter after caching.
postFilter,
filter,
// Frontend API model preparation function.
func(status *gtsmodel.Status) (*apimodel.Status, error) {

View file

@ -682,8 +682,15 @@ func (p *clientAPI) CreateBlock(ctx context.Context, cMsg *messages.FromClientAP
return gtserror.Newf("%T not parseable as *gtsmodel.Block", cMsg.GTSModel)
}
// Perform any necessary timeline invalidation.
p.surface.invalidateTimelinesForBlock(ctx, block)
if block.Account.IsLocal() {
// Perform timeline invalidation for block origin account.
p.surface.invalidateTimelinesForAccount(ctx, block.AccountID)
}
if block.TargetAccount.IsLocal() {
// Perform timeline invalidation for block target account.
p.surface.invalidateTimelinesForAccount(ctx, block.TargetAccountID)
}
// TODO: same with notifications?
// TODO: same with bookmarks?
@ -843,6 +850,16 @@ func (p *clientAPI) UndoFollow(ctx context.Context, cMsg *messages.FromClientAPI
log.Errorf(ctx, "error updating account stats: %v", err)
}
if follow.Account.IsLocal() {
// Perform timeline invalidation for block origin account.
p.surface.invalidateTimelinesForAccount(ctx, follow.AccountID)
}
if follow.TargetAccount.IsLocal() {
// Perform timeline invalidation for block target account.
p.surface.invalidateTimelinesForAccount(ctx, follow.TargetAccountID)
}
if err := p.federate.UndoFollow(ctx, follow); err != nil {
log.Errorf(ctx, "error federating follow undo: %v", err)
}
@ -856,6 +873,16 @@ func (p *clientAPI) UndoBlock(ctx context.Context, cMsg *messages.FromClientAPI)
return gtserror.Newf("%T not parseable as *gtsmodel.Block", cMsg.GTSModel)
}
if block.Account.IsLocal() {
// Perform timeline invalidation for block origin account.
p.surface.invalidateTimelinesForAccount(ctx, block.AccountID)
}
if block.TargetAccount.IsLocal() {
// Perform timeline invalidation for block target account.
p.surface.invalidateTimelinesForAccount(ctx, block.TargetAccountID)
}
if err := p.federate.UndoBlock(ctx, block); err != nil {
log.Errorf(ctx, "error federating block undo: %v", err)
}
@ -1010,6 +1037,25 @@ func (p *clientAPI) DeleteAccountOrUser(ctx context.Context, cMsg *messages.From
p.state.Workers.Federator.Queue.Delete("Receiving.ID", account.ID)
p.state.Workers.Federator.Queue.Delete("TargetURI", account.URI)
// Remove any entries authored by account from timelines.
p.surface.removeTimelineEntriesByAccount(account.ID)
// Remove any of their cached timelines.
p.state.Caches.Timelines.Public.Delete(account.ID)
p.state.Caches.Timelines.Home.Delete(account.ID)
p.state.Caches.Timelines.Local.Delete(account.ID)
// Get the IDs of all the lists owned by the given account ID.
listIDs, err := p.state.DB.GetListIDsByAccountID(ctx, account.ID)
if err != nil {
log.Errorf(ctx, "error getting lists for account %s: %v", account.ID, err)
}
// Remove list timelines of account.
for _, listID := range listIDs {
p.state.Caches.Timelines.List.Delete(listID)
}
if err := p.federate.DeleteAccount(ctx, cMsg.Target); err != nil {
log.Errorf(ctx, "error federating account delete: %v", err)
}

View file

@ -197,9 +197,22 @@ func (p *Processor) ProcessFromFediAPI(ctx context.Context, fMsg *messages.FromF
// UNDO SOMETHING
case ap.ActivityUndo:
switch fMsg.APObjectType {
// UNDO FOLLOW
case ap.ActivityFollow:
return p.fediAPI.UndoFollow(ctx, fMsg)
// UNDO BLOCK
case ap.ActivityBlock:
return p.fediAPI.UndoBlock(ctx, fMsg)
// UNDO ANNOUNCE
if fMsg.APObjectType == ap.ActivityAnnounce {
case ap.ActivityAnnounce:
return p.fediAPI.UndoAnnounce(ctx, fMsg)
// UNDO LIKE
case ap.ActivityLike:
return p.fediAPI.UndoFave(ctx, fMsg)
}
}
@ -701,8 +714,15 @@ func (p *fediAPI) CreateBlock(ctx context.Context, fMsg *messages.FromFediAPI) e
return gtserror.Newf("%T not parseable as *gtsmodel.Block", fMsg.GTSModel)
}
// Perform any necessary timeline invalidation.
p.surface.invalidateTimelinesForBlock(ctx, block)
if block.Account.IsLocal() {
// Perform timeline invalidation for block origin account.
p.surface.invalidateTimelinesForAccount(ctx, block.AccountID)
}
if block.TargetAccount.IsLocal() {
// Perform timeline invalidation for block target account.
p.surface.invalidateTimelinesForAccount(ctx, block.TargetAccountID)
}
// Remove any follows that existed between blocker + blockee.
// (note this handles removing any necessary list entries).
@ -1054,6 +1074,9 @@ func (p *fediAPI) DeleteAccount(ctx context.Context, fMsg *messages.FromFediAPI)
p.state.Workers.Federator.Queue.Delete("Requesting.ID", account.ID)
p.state.Workers.Federator.Queue.Delete("TargetURI", account.URI)
// Remove any entries authored by account from timelines.
p.surface.removeTimelineEntriesByAccount(account.ID)
// First perform the actual account deletion.
if err := p.account.Delete(ctx, account, account.ID); err != nil {
log.Errorf(ctx, "error deleting account: %v", err)
@ -1172,6 +1195,44 @@ func (p *fediAPI) RejectAnnounce(ctx context.Context, fMsg *messages.FromFediAPI
return nil
}
func (p *fediAPI) UndoFollow(ctx context.Context, fMsg *messages.FromFediAPI) error {
follow, ok := fMsg.GTSModel.(*gtsmodel.Follow)
if !ok {
return gtserror.Newf("%T not parseable as *gtsmodel.Follow", fMsg.GTSModel)
}
if follow.Account.IsLocal() {
// Perform timeline invalidation for block origin account.
p.surface.invalidateTimelinesForAccount(ctx, follow.AccountID)
}
if follow.TargetAccount.IsLocal() {
// Perform timeline invalidation for block target account.
p.surface.invalidateTimelinesForAccount(ctx, follow.TargetAccountID)
}
return nil
}
func (p *fediAPI) UndoBlock(ctx context.Context, fMsg *messages.FromFediAPI) error {
block, ok := fMsg.GTSModel.(*gtsmodel.Block)
if !ok {
return gtserror.Newf("%T not parseable as *gtsmodel.Block", fMsg.GTSModel)
}
if block.Account.IsLocal() {
// Perform timeline invalidation for block origin account.
p.surface.invalidateTimelinesForAccount(ctx, block.AccountID)
}
if block.TargetAccount.IsLocal() {
// Perform timeline invalidation for block target account.
p.surface.invalidateTimelinesForAccount(ctx, block.TargetAccountID)
}
return nil
}
func (p *fediAPI) UndoAnnounce(
ctx context.Context,
fMsg *messages.FromFediAPI,
@ -1200,3 +1261,16 @@ func (p *fediAPI) UndoAnnounce(
return nil
}
func (p *fediAPI) UndoFave(ctx context.Context, fMsg *messages.FromFediAPI) error {
statusFave, ok := fMsg.GTSModel.(*gtsmodel.StatusFave)
if !ok {
return gtserror.Newf("%T not parseable as *gtsmodel.StatusFave", fMsg.GTSModel)
}
// Interaction counts changed on the faved status;
// uncache the prepared version from all timelines.
p.surface.invalidateStatusFromTimelines(statusFave.StatusID)
return nil
}

View file

@ -524,27 +524,6 @@ func (s *Surface) tagFollowersForStatus(
return visibleTagFollowerAccounts, errs.Combine()
}
// deleteStatusFromTimelines completely removes the given status from all timelines.
// It will also stream deletion of the status to all open streams.
func (s *Surface) deleteStatusFromTimelines(ctx context.Context, statusID string) {
s.State.Caches.Timelines.Public.RemoveByStatusIDs(statusID)
s.State.Caches.Timelines.Local.RemoveByStatusIDs(statusID)
s.State.Caches.Timelines.Home.RemoveByStatusIDs(statusID)
s.State.Caches.Timelines.List.RemoveByStatusIDs(statusID)
s.Stream.Delete(ctx, statusID)
}
// invalidateStatusFromTimelines does cache invalidation on the given status by
// unpreparing it from all timelines, forcing it to be prepared again (with updated
// stats, boost counts, etc) next time it's fetched by the timeline owner. This goes
// both for the status itself, and for any boosts of the status.
func (s *Surface) invalidateStatusFromTimelines(statusID string) {
s.State.Caches.Timelines.Public.UnprepareByStatusIDs(statusID)
s.State.Caches.Timelines.Local.UnprepareByStatusIDs(statusID)
s.State.Caches.Timelines.Home.UnprepareByStatusIDs(statusID)
s.State.Caches.Timelines.List.UnprepareByStatusIDs(statusID)
}
// timelineStatusUpdate looks up HOME and LIST timelines of accounts
// that follow the the status author or tags and pushes edit messages into any
// active streams.
@ -822,56 +801,52 @@ func (s *Surface) timelineStatusUpdateForTagFollowers(
return errs.Combine()
}
// invalidateTimelinesForBlock ...
func (s *Surface) invalidateTimelinesForBlock(ctx context.Context, block *gtsmodel.Block) {
// deleteStatusFromTimelines completely removes the given status from all timelines.
// It will also stream deletion of the status to all open streams.
func (s *Surface) deleteStatusFromTimelines(ctx context.Context, statusID string) {
s.State.Caches.Timelines.Public.RemoveByStatusIDs(statusID)
s.State.Caches.Timelines.Local.RemoveByStatusIDs(statusID)
s.State.Caches.Timelines.Home.RemoveByStatusIDs(statusID)
s.State.Caches.Timelines.List.RemoveByStatusIDs(statusID)
s.Stream.Delete(ctx, statusID)
}
// Check if origin is local account,
// i.e. has status timeline caches.
if block.Account.IsLocal() {
// invalidateStatusFromTimelines does cache invalidation on the given status by
// unpreparing it from all timelines, forcing it to be prepared again (with updated
// stats, boost counts, etc) next time it's fetched by the timeline owner. This goes
// both for the status itself, and for any boosts of the status.
func (s *Surface) invalidateStatusFromTimelines(statusID string) {
s.State.Caches.Timelines.Public.UnprepareByStatusIDs(statusID)
s.State.Caches.Timelines.Local.UnprepareByStatusIDs(statusID)
s.State.Caches.Timelines.Home.UnprepareByStatusIDs(statusID)
s.State.Caches.Timelines.List.UnprepareByStatusIDs(statusID)
}
// Remove target's statuses
// from origin's home timeline.
s.State.Caches.Timelines.Home.
MustGet(block.AccountID).
RemoveByAccountIDs(block.TargetAccountID)
// removeTimelineEntriesByAccount removes all cached timeline entries authored by account ID.
func (s *Surface) removeTimelineEntriesByAccount(accountID string) {
s.State.Caches.Timelines.Public.RemoveByAccountIDs(accountID)
s.State.Caches.Timelines.Home.RemoveByAccountIDs(accountID)
s.State.Caches.Timelines.Local.RemoveByAccountIDs(accountID)
s.State.Caches.Timelines.List.RemoveByAccountIDs(accountID)
}
// Get the IDs of any lists created by origin account.
listIDs, err := s.State.DB.GetListIDsByAccountID(ctx, block.AccountID)
if err != nil {
log.Errorf(ctx, "error getting account's list IDs for %s: %v", block.URI, err)
}
// invalidateTimelinesForAccount invalidates all timeline caches stored for given account ID.
func (s *Surface) invalidateTimelinesForAccount(ctx context.Context, accountID string) {
// Remove target's statuses from
// any of origin's list timelines.
for _, listID := range listIDs {
s.State.Caches.Timelines.List.
MustGet(listID).
RemoveByAccountIDs(block.TargetAccountID)
}
// There's a lot of visibility changes to caclculate for any
// relationship change, so just clear all account's timelines.
s.State.Caches.Timelines.Public.Clear(accountID)
s.State.Caches.Timelines.Home.Clear(accountID)
s.State.Caches.Timelines.Local.Clear(accountID)
// Get the IDs of all the lists owned by the given account ID.
listIDs, err := s.State.DB.GetListIDsByAccountID(ctx, accountID)
if err != nil {
log.Errorf(ctx, "error getting lists for account %s: %v", accountID, err)
}
// Check if target is local account,
// i.e. has status timeline caches.
if block.TargetAccount.IsLocal() {
// Remove origin's statuses
// from target's home timeline.
s.State.Caches.Timelines.Home.
MustGet(block.TargetAccountID).
RemoveByAccountIDs(block.AccountID)
// Get the IDs of any lists created by target account.
listIDs, err := s.State.DB.GetListIDsByAccountID(ctx, block.TargetAccountID)
if err != nil {
log.Errorf(ctx, "error getting target account's list IDs for %s: %v", block.URI, err)
}
// Remove origin's statuses from
// any of target's list timelines.
for _, listID := range listIDs {
s.State.Caches.Timelines.List.
MustGet(listID).
RemoveByAccountIDs(block.AccountID)
}
// Clear list timelines of account.
for _, listID := range listIDs {
s.State.Caches.Timelines.List.Clear(listID)
}
}

View file

@ -512,7 +512,9 @@ func (c *Converter) ASFollowToFollow(ctx context.Context, followable ap.Followab
follow := &gtsmodel.Follow{
URI: uri,
Account: origin,
AccountID: origin.ID,
TargetAccount: target,
TargetAccountID: target.ID,
}