[bugfix] Set appropriate cache-control when using presigned s3 links (#1480)

This commit is contained in:
tobi 2023-02-12 14:42:28 +01:00 committed by GitHub
parent 40bc03e717
commit c223c7598d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 9 additions and 1 deletions

View file

@ -88,7 +88,12 @@ func (m *Module) ServeFile(c *gin.Context) {
}
if content.URL != nil {
// This is a non-local S3 file we're proxying to.
// This is a non-local S3 file we're redirecting to.
// Rewrite the cache control header to reflect the
// TTL of the generated signed link, instead of the
// default very long cache.
const cacheControl = "private,max-age=86400" // 24h
c.Header("Cache-Control", cacheControl)
c.Redirect(http.StatusFound, content.URL.String())
return
}

View file

@ -232,10 +232,13 @@ func (p *processor) getEmojiContent(ctx context.Context, fileName string, owning
}
func (p *processor) retrieveFromStorage(ctx context.Context, storagePath string, content *apimodel.Content) (*apimodel.Content, gtserror.WithCode) {
// If running on S3 storage with proxying disabled then
// just fetch a pre-signed URL instead of serving the content.
if url := p.storage.URL(ctx, storagePath); url != nil {
content.URL = url
return content, nil
}
reader, err := p.storage.GetStream(ctx, storagePath)
if err != nil {
return nil, gtserror.NewErrorNotFound(fmt.Errorf("error retrieving from storage: %s", err))