[bugfix] Set cache-control max-age dynamically for s3 (#1510)

* [bugfix] set cache-control max-age dynamically for s3

* woops

* double whoops

* time until, thank you linter, bless you, you're the best, no matter what kim says

* aa
This commit is contained in:
tobi 2023-02-16 14:18:53 +01:00 committed by GitHub
parent 40b584c219
commit 6ee0dc8c7d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 42 additions and 29 deletions

View file

@ -31,31 +31,25 @@ type Fileserver struct {
fileserver *fileserver.Module
}
// maxAge returns an appropriate max-age value for the
// storage method that's being used.
//
// The default max-age is very long to reflect that we
// never host different files at the same URL (since
// ULIDs are generated per piece of media), so we can
// easily prevent clients having to fetch files repeatedly.
//
// If we're using non-proxying s3, however, the max age is
// significantly shorter, to ensure that clients don't
// cache redirect responses to expired pre-signed URLs.
func maxAge() string {
if config.GetStorageBackend() == "s3" && !config.GetStorageS3Proxy() {
return "max-age=86400" // 24h
}
return "max-age=604800" // 7d
}
func (f *Fileserver) Route(r router.Router, m ...gin.HandlerFunc) {
fileserverGroup := r.AttachGroup("fileserver")
// attach middlewares appropriate for this group
// Attach middlewares appropriate for this group.
fileserverGroup.Use(m...)
fileserverGroup.Use(middleware.CacheControl("private", maxAge()))
// If we're using local storage or proxying s3, we can set a
// long max-age on all file requests to reflect that we
// never host different files at the same URL (since
// ULIDs are generated per piece of media), so we can
// easily prevent clients having to fetch files repeatedly.
//
// If we *are* using non-proxying s3, however, the max age
// must be set dynamically within the request handler,
// based on how long the signed URL has left to live before
// it expires. This ensures that clients won't cache expired
// links. This is done within fileserver/servefile.go.
if config.GetStorageBackend() == "local" || config.GetStorageS3Proxy() {
fileserverGroup.Use(middleware.CacheControl("private", "max-age=604800")) // 7d
}
f.fileserver.Route(fileserverGroup.Handle)
}

View file

@ -24,6 +24,7 @@ import (
"net/http"
"strconv"
"strings"
"time"
"codeberg.org/gruf/go-fastcopy"
"github.com/gin-gonic/gin"
@ -89,6 +90,10 @@ func (m *Module) ServeFile(c *gin.Context) {
if content.URL != nil {
// This is a non-local, non-proxied S3 file we're redirecting to.
// Derive the max-age value from how long the link has left until
// it expires.
maxAge := int(time.Until(content.URL.Expiry).Seconds())
c.Header("Cache-Control", "private,max-age="+strconv.Itoa(maxAge))
c.Redirect(http.StatusFound, content.URL.String())
return
}

View file

@ -20,8 +20,9 @@ package model
import (
"io"
"net/url"
"time"
"github.com/superseriousbusiness/gotosocial/internal/storage"
)
// Content wraps everything needed to serve a blob of content (some kind of media) through the API.
@ -35,7 +36,7 @@ type Content struct {
// Actual content
Content io.ReadCloser
// Resource URL to forward to if the file can be fetched from the storage directly (e.g signed S3 URL)
URL *url.URL
URL *storage.PresignedURL
}
// GetContentRequestForm describes a piece of content desired by the caller of the fileserver API.

View file

@ -41,6 +41,13 @@ const (
urlCacheExpiryFrequency = time.Minute * 5
)
// PresignedURL represents a pre signed S3 URL with
// an expiry time.
type PresignedURL struct {
*url.URL
Expiry time.Time // link expires at this time
}
// ErrAlreadyExists is a ptr to underlying storage.ErrAlreadyExists,
// to put the related errors in the same package as our storage wrapper.
var ErrAlreadyExists = storage.ErrAlreadyExists
@ -54,11 +61,11 @@ type Driver struct {
// S3-only parameters
Proxy bool
Bucket string
PresignedCache *ttl.Cache[string, *url.URL]
PresignedCache *ttl.Cache[string, PresignedURL]
}
// URL will return a presigned GET object URL, but only if running on S3 storage with proxying disabled.
func (d *Driver) URL(ctx context.Context, key string) *url.URL {
func (d *Driver) URL(ctx context.Context, key string) *PresignedURL {
// Check whether S3 *without* proxying is enabled
s3, ok := d.Storage.(*storage.S3Storage)
if !ok || d.Proxy {
@ -72,7 +79,7 @@ func (d *Driver) URL(ctx context.Context, key string) *url.URL {
d.PresignedCache.Unlock()
if ok {
return e.Value
return &e.Value
}
u, err := s3.Client().PresignedGetObject(ctx, d.Bucket, key, urlCacheTTL, url.Values{
@ -82,8 +89,14 @@ func (d *Driver) URL(ctx context.Context, key string) *url.URL {
// If URL request fails, fallback is to fetch the file. So ignore the error here
return nil
}
d.PresignedCache.Set(key, u)
return u
psu := PresignedURL{
URL: u,
Expiry: time.Now().Add(urlCacheTTL), // link expires in 24h time
}
d.PresignedCache.Set(key, psu)
return &psu
}
func AutoConfig() (*Driver, error) {
@ -151,7 +164,7 @@ func NewS3Storage() (*Driver, error) {
}
// ttl should be lower than the expiry used by S3 to avoid serving invalid URLs
presignedCache := ttl.New[string, *url.URL](0, 1000, urlCacheTTL-urlCacheExpiryFrequency)
presignedCache := ttl.New[string, PresignedURL](0, 1000, urlCacheTTL-urlCacheExpiryFrequency)
presignedCache.Start(urlCacheExpiryFrequency)
return &Driver{