mirror of
https://git.asonix.dog/asonix/pict-rs.git
synced 2024-05-28 16:38:08 +00:00
Compare commits
119 commits
v0.5.6-thr
...
main
Author | SHA1 | Date | |
---|---|---|---|
1c4e343d9d | |||
d03cc63d2b | |||
260f9a158a | |||
a7c78cd54e | |||
d7dc2e506d | |||
e48f60a6c6 | |||
9d01aeb82c | |||
bddfb3c9d0 | |||
7ae3c0c776 | |||
983e9ce151 | |||
9302062b26 | |||
33e72266f5 | |||
39da69b1aa | |||
64b8635059 | |||
d45e3fa386 | |||
bfd4fd4689 | |||
89f3c447a8 | |||
46cfbf99a5 | |||
58529a2eb2 | |||
700aeb90e0 | |||
ff39c30cc8 | |||
9561c578dc | |||
dc7bdf7eeb | |||
33ba045ee1 | |||
f082e48ed8 | |||
97159e0030 | |||
6d40fbee47 | |||
c4e99ef539 | |||
3428c31f16 | |||
4bb3bad703 | |||
4021458be8 | |||
eca3697410 | |||
d41fca5b6c | |||
e3183c923f | |||
d97cfe2a64 | |||
cef9a68307 | |||
5f9efb2e1a | |||
dfb38c7144 | |||
a3bce4c2d3 | |||
c013f697fd | |||
960f6487b7 | |||
cd6fb84cc4 | |||
056b96d0ad | |||
74885f2932 | |||
d9d5ac5388 | |||
612e4017d5 | |||
b43a435e64 | |||
6e9239fa36 | |||
525deffd8d | |||
fe5a5723be | |||
3211ce459e | |||
4b46f1ae2a | |||
55bc4b64c1 | |||
84a882392a | |||
5f850f8c86 | |||
bcc7773433 | |||
793d3c0c70 | |||
34b9919428 | |||
dacfc43c44 | |||
2ead3e00e2 | |||
df04ca9b12 | |||
9178e3ef9f | |||
b7f508207f | |||
d8d1ce1634 | |||
7021c50156 | |||
6f95c72070 | |||
286279cdf5 | |||
996fe0686b | |||
5b1f4219fa | |||
dff588aafd | |||
4976fcb2eb | |||
aa4582a3f8 | |||
e302df7e39 | |||
9fe586b9dd | |||
40bb58d603 | |||
4897c90ed7 | |||
719626de07 | |||
feb8840761 | |||
8301d321de | |||
3ecefcb64e | |||
348f4ce0a3 | |||
25ef3861f1 | |||
6ee7c5c4a0 | |||
04dcc9a0c8 | |||
d13f7fe969 | |||
a6134999aa | |||
16890eaa45 | |||
c17a8722c6 | |||
7c6112e631 | |||
277b47af46 | |||
029beef61a | |||
b139190663 | |||
03bd3cbe2f | |||
13fc0df31a | |||
2074334131 | |||
eabd7ea228 | |||
ad51e6cd9f | |||
0fd19a5682 | |||
ca13b7b30b | |||
6a6c61058a | |||
d73e683d48 | |||
3470a6caf0 | |||
16bf18bda4 | |||
227e9cc3a7 | |||
59b03d548d | |||
f3e455a1c3 | |||
00a08a8bc9 | |||
de356c1f12 | |||
3a7d5b7bfb | |||
0ebee2a07c | |||
c1e651c01a | |||
c722cdd905 | |||
9db5cc82f8 | |||
4d2c642c79 | |||
5c551221b4 | |||
2517e3660c | |||
fd5ce91c32 | |||
e814cd6c20 | |||
e7e13a41f0 |
419
.drone.yml
419
.drone.yml
|
@ -1,419 +0,0 @@
|
|||
kind: pipeline
|
||||
type: docker
|
||||
name: clippy
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: clippy
|
||||
image: asonix/rust-builder:latest-linux-amd64
|
||||
pull: always
|
||||
commands:
|
||||
- rustup component add clippy
|
||||
- cargo clippy --no-default-features -- -D warnings
|
||||
- cargo clippy --no-default-features --features io-uring -- -D warnings
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
- tag
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: tests
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: tests
|
||||
image: asonix/rust-builder:latest-linux-amd64
|
||||
pull: always
|
||||
commands:
|
||||
- cargo test
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
- tag
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: check-amd64
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: check
|
||||
image: asonix/rust-builder:latest-linux-amd64
|
||||
pull: always
|
||||
commands:
|
||||
- cargo check --target=$TARGET
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: build-amd64
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: build
|
||||
image: asonix/rust-builder:latest-linux-amd64
|
||||
pull: always
|
||||
commands:
|
||||
- cargo build --target=$TARGET --release
|
||||
- cp target/$TARGET/release/pict-rs .
|
||||
- cp pict-rs pict-rs-linux-amd64
|
||||
|
||||
- name: push
|
||||
image: plugins/docker:20
|
||||
settings:
|
||||
username: asonix
|
||||
password:
|
||||
from_secret: dockerhub_token
|
||||
repo: asonix/pictrs
|
||||
dockerfile: docker/drone/Dockerfile
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-amd64
|
||||
build_args:
|
||||
- REPO_ARCH=amd64
|
||||
|
||||
- name: publish
|
||||
image: plugins/gitea-release:1
|
||||
settings:
|
||||
api_key:
|
||||
from_secret: gitea_token
|
||||
base_url: https://git.asonix.dog
|
||||
files:
|
||||
- pict-rs-linux-amd64
|
||||
|
||||
depends_on:
|
||||
- clippy
|
||||
- tests
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: check-arm64v8
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: check
|
||||
image: asonix/rust-builder:latest-linux-arm64v8
|
||||
pull: always
|
||||
commands:
|
||||
- cargo check --target=$TARGET
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: build-arm64v8
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: build
|
||||
image: asonix/rust-builder:latest-linux-arm64v8
|
||||
pull: always
|
||||
commands:
|
||||
- cargo build --target=$TARGET --release
|
||||
- cp target/$TARGET/release/pict-rs .
|
||||
- cp pict-rs pict-rs-linux-arm64v8
|
||||
|
||||
- name: push
|
||||
image: plugins/docker:20
|
||||
settings:
|
||||
username: asonix
|
||||
password:
|
||||
from_secret: dockerhub_token
|
||||
repo: asonix/pictrs
|
||||
dockerfile: docker/drone/Dockerfile
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm64v8
|
||||
build_args:
|
||||
- REPO_ARCH=arm64v8
|
||||
|
||||
- name: publish
|
||||
image: plugins/gitea-release:1
|
||||
settings:
|
||||
api_key:
|
||||
from_secret: gitea_token
|
||||
base_url: https://git.asonix.dog
|
||||
files:
|
||||
- pict-rs-linux-arm64v8
|
||||
|
||||
depends_on:
|
||||
- clippy
|
||||
- tests
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: check-arm32v7
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: check
|
||||
image: asonix/rust-builder:latest-linux-arm32v7
|
||||
pull: always
|
||||
commands:
|
||||
- cargo check --target=$TARGET
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: build-arm32v7
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: build
|
||||
image: asonix/rust-builder:latest-linux-arm32v7
|
||||
pull: always
|
||||
commands:
|
||||
- cargo build --target=$TARGET --release
|
||||
- cp target/$TARGET/release/pict-rs .
|
||||
- cp pict-rs pict-rs-linux-arm32v7
|
||||
|
||||
- name: push
|
||||
image: plugins/docker:20
|
||||
settings:
|
||||
username: asonix
|
||||
password:
|
||||
from_secret: dockerhub_token
|
||||
repo: asonix/pictrs
|
||||
dockerfile: docker/drone/Dockerfile
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm32v7
|
||||
build_args:
|
||||
- REPO_ARCH=arm32v7
|
||||
|
||||
- name: publish
|
||||
image: plugins/gitea-release:1
|
||||
settings:
|
||||
api_key:
|
||||
from_secret: gitea_token
|
||||
base_url: https://git.asonix.dog
|
||||
files:
|
||||
- pict-rs-linux-arm32v7
|
||||
|
||||
depends_on:
|
||||
- clippy
|
||||
- tests
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: manifest
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: manifest
|
||||
image: plugins/manifest:1
|
||||
settings:
|
||||
username: asonix
|
||||
password:
|
||||
from_secret: dockerhub_token
|
||||
dump: true
|
||||
auto_tag: true
|
||||
ignore_missing: true
|
||||
spec: docker/drone/manifest.tmpl
|
||||
|
||||
|
||||
depends_on:
|
||||
- build-amd64
|
||||
- build-arm64v8
|
||||
- build-arm32v7
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: publish-crate
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: publish
|
||||
image: asonix/rust-builder:latest-linux-amd64
|
||||
pull: always
|
||||
environment:
|
||||
CRATES_IO_TOKEN:
|
||||
from_secret: crates_io_token
|
||||
commands:
|
||||
- cargo publish --token $CRATES_IO_TOKEN
|
||||
|
||||
depends_on:
|
||||
- build-amd64
|
||||
- build-arm64v8
|
||||
- build-arm32v7
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
|
@ -1,68 +0,0 @@
|
|||
name: Prepare Rust
|
||||
description: Prepare an environment to build or test rust packages
|
||||
|
||||
inputs:
|
||||
targets:
|
||||
description: "JSON Array of targets to add to rustup"
|
||||
required: false
|
||||
default: '[]'
|
||||
rust-version:
|
||||
description: "Version of rust to install"
|
||||
required: true
|
||||
default: '1.75.0'
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
-
|
||||
name: Install zig
|
||||
uses: https://github.com/goto-bus-stop/setup-zig@v2
|
||||
with:
|
||||
version: 0.11.0
|
||||
-
|
||||
name: Fetch rust cache
|
||||
id: cache
|
||||
uses: https://github.com/actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin
|
||||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
~/.cargo/git/db
|
||||
~/.rustup
|
||||
target/
|
||||
key: rust-${{ inputs.rust-version }}-${{ join(fromJSON(inputs.targets), '-') }}-${{ hashFiles('Cargo.toml') }}-${{ hashFiles('Cargo.lock')}}
|
||||
-
|
||||
name: Install rustup and rust
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
uses: https://github.com/dtolnay/rust-toolchain@${{ inputs.rust-version }}
|
||||
with:
|
||||
components: clippy
|
||||
targets: ${{ join(fromJSON(inputs.targets), ',') }}
|
||||
-
|
||||
name: Install cargo-zigbuild
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: cargo install cargo-zigbuild
|
||||
-
|
||||
name: Fetch dependencies
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: cargo fetch
|
||||
-
|
||||
name: Export path
|
||||
if: steps.cache.outputs.cache-hit == 'true'
|
||||
run: |
|
||||
echo "${HOME}/.cargo/bin" >> "${GITHUB_PATH}"
|
||||
echo "${HOME}/.rustup/bin" >> "${GITHUB_PATH}"
|
||||
-
|
||||
name: Populate rust cache
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
uses: https://github.com/actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin
|
||||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
~/.cargo/git/db
|
||||
~/.rustup
|
||||
target/
|
||||
key: ${{ steps.cache.outputs.cache-primary-key }}
|
|
@ -1,5 +1,7 @@
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
@ -8,14 +10,14 @@ jobs:
|
|||
clippy:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/node:20-bookworm
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
-
|
||||
name: Checkout pict-rs
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Setup Rust
|
||||
uses: https://git.asonix.dog/asonix/pict-rs/.forgejo/actions/prepare-rust@main
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Clippy
|
||||
run: |
|
||||
|
@ -25,20 +27,21 @@ jobs:
|
|||
tests:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/node:20-bookworm
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
-
|
||||
name: Checkout pict-rs
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Setup Rust
|
||||
uses: https://git.asonix.dog/asonix/pict-rs/.forgejo/actions/prepare-rust@main
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Test
|
||||
run: cargo test
|
||||
|
||||
check:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- x86_64-unknown-linux-musl
|
||||
|
@ -46,19 +49,14 @@ jobs:
|
|||
- aarch64-unknown-linux-musl
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/node:20-bookworm
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
-
|
||||
name: Checkout pict-rs
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Setup Rust
|
||||
uses: https://git.asonix.dog/asonix/pict-rs/.forgejo/actions/prepare-rust@main
|
||||
with:
|
||||
targets: |
|
||||
["x86_64-unknown-linux-musl",
|
||||
"armv7-unknown-linux-musleabihf",
|
||||
"aarch64-unknown-linux-musl"]
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Debug builds
|
||||
run: cargo zigbuild --target ${{ matrix.target }}
|
||||
|
|
|
@ -5,49 +5,49 @@ on:
|
|||
|
||||
env:
|
||||
REGISTRY_IMAGE: asonix/pictrs
|
||||
DOCKER_HOST: tcp://docker-in-docker:2375
|
||||
|
||||
jobs:
|
||||
# clippy:
|
||||
# runs-on: docker
|
||||
# container:
|
||||
# image: docker.io/node:20-bookworm
|
||||
# steps:
|
||||
# -
|
||||
# name: Checkout pict-rs
|
||||
# uses: https://github.com/actions/checkout@v4
|
||||
# -
|
||||
# name: Setup Rust
|
||||
# uses: https://git.asonix.dog/asonix/pict-rs/.forgejo/actions/prepare-rust@main
|
||||
# -
|
||||
# name: Clippy
|
||||
# run: |
|
||||
# cargo clippy --no-default-features -- -D warnings
|
||||
# cargo clippy --no-default-features --features io-uring -- -D warnings
|
||||
clippy:
|
||||
runs-on: base-image
|
||||
container:
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
-
|
||||
name: Checkout pict-rs
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Clippy
|
||||
run: |
|
||||
cargo clippy --no-default-features -- -D warnings
|
||||
cargo clippy --no-default-features --features io-uring -- -D warnings
|
||||
|
||||
# tests:
|
||||
# runs-on: docker
|
||||
# container:
|
||||
# image: docker.io/node:20-bookworm
|
||||
# steps:
|
||||
# -
|
||||
# name: Checkout pict-rs
|
||||
# uses: https://github.com/actions/checkout@v4
|
||||
# -
|
||||
# name: Setup Rust
|
||||
# uses: https://git.asonix.dog/asonix/pict-rs/.forgejo/actions/prepare-rust@main
|
||||
# -
|
||||
# name: Test
|
||||
# run: cargo test
|
||||
|
||||
build:
|
||||
# needs:
|
||||
# - clippy
|
||||
# - tests
|
||||
tests:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/node:20-bookworm
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
-
|
||||
name: Checkout pict-rs
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Test
|
||||
run: cargo test
|
||||
|
||||
build:
|
||||
needs:
|
||||
- clippy
|
||||
- tests
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
info:
|
||||
- target: x86_64-unknown-linux-musl
|
||||
|
@ -64,19 +64,8 @@ jobs:
|
|||
name: Checkout pict-rs
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Install Docker
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y ca-certificates curl jq
|
||||
install -m 0755 -d /etc/apt/keyrings
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc
|
||||
chmod a+r /etc/apt/keyrings/docker.asc
|
||||
echo \
|
||||
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \
|
||||
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
|
||||
tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
apt-get update
|
||||
apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Prepare Platform
|
||||
run: |
|
||||
|
@ -89,6 +78,14 @@ jobs:
|
|||
uses: https://github.com/docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY_IMAGE }}
|
||||
flavor: |
|
||||
latest=auto
|
||||
suffix=-${{ matrix.info.artifact }}
|
||||
tags: |
|
||||
type=raw,value=latest,enable={{ is_default_branch }}
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: https://github.com/docker/setup-qemu-action@v3
|
||||
|
@ -101,14 +98,6 @@ jobs:
|
|||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Setup Rust
|
||||
uses: https://git.asonix.dog/asonix/pict-rs/.forgejo/actions/prepare-rust@main
|
||||
with:
|
||||
targets: |
|
||||
["x86_64-unknown-linux-musl",
|
||||
"armv7-unknown-linux-musleabihf",
|
||||
"aarch64-unknown-linux-musl"]
|
||||
-
|
||||
name: Compile pict-rs
|
||||
run: cargo zigbuild --target ${{ matrix.info.target }} --release
|
||||
|
@ -132,10 +121,10 @@ jobs:
|
|||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/forgejo
|
||||
# file: ./docker/forgejo/Dockerfile
|
||||
platforms: ${{ matrix.info.platform }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true
|
||||
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},name-canonical=true,push=true
|
||||
-
|
||||
name: Export digest
|
||||
run: |
|
||||
|
@ -156,23 +145,9 @@ jobs:
|
|||
publish-docker:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/node:20-bookworm
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
needs: [build]
|
||||
steps:
|
||||
-
|
||||
name: Install Docker
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y ca-certificates curl jq
|
||||
install -m 0755 -d /etc/apt/keyrings
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc
|
||||
chmod a+r /etc/apt/keyrings/docker.asc
|
||||
echo \
|
||||
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \
|
||||
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
|
||||
tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
apt-get update
|
||||
apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
-
|
||||
name: Download digests
|
||||
uses: https://github.com/actions/download-artifact@v3
|
||||
|
@ -196,7 +171,10 @@ jobs:
|
|||
uses: https://github.com/docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY_IMAGE }}
|
||||
flavor: |
|
||||
latest=auto
|
||||
tags: |
|
||||
type=raw,value=latest,enable={{ is_default_branch }}
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
|
@ -206,8 +184,8 @@ jobs:
|
|||
run: |
|
||||
tags=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${DOCKER_METADATA_OUTPUT_JSON}")
|
||||
images=$(printf "${{ env.REGISTRY_IMAGE }}@sha256:%s " *)
|
||||
echo "Running 'docker buildx imagetools create \"${tags}\" \"${images}\"'"
|
||||
docker buildx imagetools create ${tags} ${images}
|
||||
echo "Running 'docker buildx imagetools create ${tags[@]} ${images[@]}'"
|
||||
docker buildx imagetools create ${tags[@]} ${images[@]}
|
||||
shell: bash
|
||||
-
|
||||
name: Inspect Image
|
||||
|
@ -218,7 +196,7 @@ jobs:
|
|||
needs: [build]
|
||||
runs-on: docker
|
||||
container:
|
||||
image: node:20-bookworm
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
- uses: https://github.com/actions/download-artifact@v3
|
||||
with:
|
||||
|
@ -230,20 +208,19 @@ jobs:
|
|||
direction: upload
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
release-dir: artifacts/
|
||||
prerelease: true
|
||||
|
||||
publish-crate:
|
||||
needs: [build]
|
||||
runs-on: docker
|
||||
container:
|
||||
image: node:20-bookworm
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
-
|
||||
name: Checkout pict-rs
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Setup Rust
|
||||
uses: https://git.asonix.dog/asonix/pict-rs/.forgejo/actions/prepare-rust@main
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Publish Crate
|
||||
run: cargo publish --token ${{ secrets.CRATES_IO_TOKEN }}
|
||||
|
|
1658
Cargo.lock
generated
1658
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
52
Cargo.toml
52
Cargo.toml
|
@ -1,7 +1,7 @@
|
|||
[package]
|
||||
name = "pict-rs"
|
||||
description = "A simple image hosting service"
|
||||
version = "0.5.6"
|
||||
version = "0.5.14"
|
||||
authors = ["asonix <asonix@asonix.dog>"]
|
||||
license = "AGPL-3.0"
|
||||
readme = "README.md"
|
||||
|
@ -15,58 +15,61 @@ strip = true
|
|||
[features]
|
||||
default = []
|
||||
io-uring = ["dep:tokio-uring", "sled/io_uring", "actix-web/experimental-io-uring"]
|
||||
poll-timer-warnings = []
|
||||
random-errors = ["dep:nanorand"]
|
||||
|
||||
[dependencies]
|
||||
actix-form-data = "0.7.0-beta.6"
|
||||
actix-web = { version = "4.0.0", default-features = false, features = ["rustls-0_22"] }
|
||||
actix-form-data = "0.7.0-beta.7"
|
||||
actix-web = { version = "4.6.0", default-features = false, features = ["rustls-0_23"] }
|
||||
async-trait = "0.1.51"
|
||||
barrel = { version = "0.7.0", features = ["pg"] }
|
||||
base64 = "0.21.0"
|
||||
base64 = "0.22.0"
|
||||
bb8 = "0.8.3"
|
||||
blurhash-update = "0.1.0"
|
||||
clap = { version = "4.0.2", features = ["derive"] }
|
||||
color-eyre = "0.6"
|
||||
config = { version = "0.14.0", default-features = false, features = ["json", "ron", "toml", "yaml"] }
|
||||
console-subscriber = "0.2"
|
||||
dashmap = "5.1.0"
|
||||
deadpool = { version = "0.9.5", features = ["rt_tokio_1"] }
|
||||
diesel = { version = "2.1.1", features = ["postgres_backend", "serde_json", "time", "uuid"] }
|
||||
diesel-async = { version = "0.4.1", features = ["postgres", "deadpool"] }
|
||||
diesel-async = { version = "0.4.1", features = ["bb8", "postgres"] }
|
||||
diesel-derive-enum = { version = "2.1.0", features = ["postgres"] }
|
||||
flume = "0.11.0"
|
||||
futures-core = "0.3"
|
||||
hex = "0.4.3"
|
||||
md-5 = "0.10.5"
|
||||
metrics = "0.22.0"
|
||||
metrics-exporter-prometheus = { version = "0.13.0", default-features = false, features = ["http-listener"] }
|
||||
metrics-exporter-prometheus = { version = "0.14.0", default-features = false, features = ["http-listener"] }
|
||||
mime = "0.3.1"
|
||||
opentelemetry_sdk = { version = "0.21", features = ["rt-tokio"] }
|
||||
opentelemetry = { version = "0.21" }
|
||||
opentelemetry-otlp = "0.14"
|
||||
nanorand = { version = "0.7", optional = true }
|
||||
opentelemetry_sdk = { version = "0.22", features = ["rt-tokio"] }
|
||||
opentelemetry = "0.22"
|
||||
opentelemetry-otlp = "0.15"
|
||||
pin-project-lite = "0.2.7"
|
||||
refinery = { version = "0.8.10", features = ["tokio-postgres", "postgres"] }
|
||||
reqwest = { version = "0.11.18", default-features = false, features = ["json", "rustls-tls", "stream"] }
|
||||
reqwest-middleware = "0.2.2"
|
||||
reqwest-tracing = { version = "0.4.5" }
|
||||
# pinned to tokio-postgres-rustls
|
||||
rustls = "0.22.0"
|
||||
reqwest = { version = "0.12.0", default-features = false, features = ["json", "rustls-tls", "stream"] }
|
||||
reqwest-middleware = "0.3.0"
|
||||
reqwest-tracing = "0.5.0"
|
||||
# pinned to tokio-postgres-generic-rustls
|
||||
# pinned to actix-web
|
||||
rustls = "0.23"
|
||||
# pinned to rustls
|
||||
rustls-channel-resolver = "0.2.0"
|
||||
rustls-channel-resolver = "0.3.0"
|
||||
# pinned to rustls
|
||||
rustls-pemfile = "2.0.0"
|
||||
rusty-s3 = "0.5.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde-tuple-vec-map = "1.0.1"
|
||||
serde_json = "1.0"
|
||||
serde-tuple-vec-map = "1.0.1"
|
||||
serde_urlencoded = "0.7.1"
|
||||
sha2 = "0.10.0"
|
||||
sled = { version = "0.34.7" }
|
||||
storage-path-generator = "0.1.0"
|
||||
streem = "0.2.0"
|
||||
subtle = { version = "2.5.0", default-features = false }
|
||||
thiserror = "1.0"
|
||||
time = { version = "0.3.0", features = ["serde", "serde-well-known"] }
|
||||
tokio = { version = "1", features = ["full", "tracing"] }
|
||||
tokio-postgres = { version = "0.7.10", features = ["with-uuid-1", "with-time-0_3", "with-serde_json-1"] }
|
||||
tokio-postgres-rustls = "0.11.0"
|
||||
tokio-postgres-generic-rustls = { version = "0.1.0", default-features = false, features = ["aws-lc-rs"] }
|
||||
tokio-uring = { version = "0.4", optional = true, features = ["bytes"] }
|
||||
tokio-util = { version = "0.7", default-features = false, features = [
|
||||
"codec",
|
||||
|
@ -76,7 +79,7 @@ toml = "0.8.0"
|
|||
tracing = "0.1.15"
|
||||
tracing-error = "0.2.0"
|
||||
tracing-log = "0.2.0"
|
||||
tracing-opentelemetry = "0.22"
|
||||
tracing-opentelemetry = "0.23"
|
||||
tracing-subscriber = { version = "0.3.0", features = [
|
||||
"ansi",
|
||||
"env-filter",
|
||||
|
@ -91,9 +94,6 @@ uuid = { version = "1", features = ["serde", "std", "v4", "v7"] }
|
|||
webpki-roots = "0.26.0"
|
||||
|
||||
[dependencies.tracing-actix-web]
|
||||
version = "0.7.8"
|
||||
version = "0.7.10"
|
||||
default-features = false
|
||||
features = ["emit_event_on_error", "opentelemetry_0_21"]
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-uring = { version = "0.4", features = ["bytes"] }
|
||||
features = ["emit_event_on_error", "opentelemetry_0_22"]
|
||||
|
|
43
README.md
43
README.md
|
@ -220,8 +220,7 @@ More information is available in the [Ubuntu and Debian docs](./docs/ubuntu-and-
|
|||
|
||||
##### Compile from Source
|
||||
pict-rs can be compiled from source using a recent version of the rust compiler. I do development
|
||||
and produce releases on 1.72. pict-rs also requires the `protoc` protobuf compiler to be present at
|
||||
build-time in order to enable use of [`tokio-console`](https://github.com/tokio-rs/console).
|
||||
and produce releases on 1.75
|
||||
|
||||
Like the Binary Download option, `imagemagick`, `ffmpeg`, and `exiftool` must be installed for
|
||||
pict-rs to run properly.
|
||||
|
@ -254,9 +253,27 @@ Example:
|
|||
|
||||
### API
|
||||
pict-rs offers the following endpoints:
|
||||
- `POST /image` for uploading an image. Uploaded content must be valid multipart/form-data with an
|
||||
- `POST /image?{args}` for uploading an image. Uploaded content must be valid multipart/form-data with an
|
||||
image array located within the `images[]` key
|
||||
|
||||
The {args} query serves multiple purpose for image uploads. The first is to provide
|
||||
request-level validations for the uploaded media. Available keys are as follows:
|
||||
- max_width: maximum width, in pixels, allowed for the uploaded media
|
||||
- max_height: maximum height, in pixels, allowed for the uploaded media
|
||||
- max_area: maximum area, in pixels, allowed for the uploaded media
|
||||
- max_frame_count: maximum number of frames permitted for animations and videos
|
||||
- max_file_size: maximum size, in megabytes, allowed
|
||||
- allow_image: whether to permit still images in the upload
|
||||
- allow_animation: whether to permit animations in the upload
|
||||
- allow_video: whether to permit video in the upload
|
||||
|
||||
These validations apply in addition to the validations specified in the pict-rs configuration,
|
||||
so uploaded media will be rejected if any of the validations fail.
|
||||
|
||||
The second purpose for the {args} query is to provide preprocess steps for the uploaded image.
|
||||
The format is the same as in the process.{ext} endpoint. The images uploaded with these steps
|
||||
provided will be processed before saving.
|
||||
|
||||
This endpoint returns the following JSON structure on success with a 201 Created status
|
||||
```json
|
||||
{
|
||||
|
@ -295,7 +312,9 @@ pict-rs offers the following endpoints:
|
|||
"msg": "ok"
|
||||
}
|
||||
```
|
||||
- `POST /image/backgrounded` Upload an image, like the `/image` endpoint, but don't wait to validate and process it.
|
||||
- `POST /image/backgrounded?{args}` Upload an image, like the `/image` endpoint, but don't wait to validate and process it.
|
||||
The {args} query is the same format is the inline image upload endpoint.
|
||||
|
||||
This endpoint returns the following JSON structure on success with a 202 Accepted status
|
||||
```json
|
||||
{
|
||||
|
@ -380,6 +399,22 @@ pict-rs offers the following endpoints:
|
|||
These proxied images are removed from pict-rs some time after their last access. This time
|
||||
is configurable with `PICTRS__MEDIA__RETENTION__PROXY`. See (./pict-rs.toml)[./pict-rs.toml]
|
||||
for more information.
|
||||
- `GET /image/blurhash?alias={alias}` Create and store a blurhash for the provided alias
|
||||
|
||||
Available source arguments are
|
||||
- `?alias={alias}` Serve a blurhash for an image identified by the provided alias
|
||||
- `?proxy={url}` Serve a blurhash for the media hosted at `url`
|
||||
This will download and store the original version of the specified media, as well as its
|
||||
blurhash. Retention for proxied media is configurable with `PICTRS__MEDIA__RETENTION__PROXY`.
|
||||
See (./pict-rs.toml)[./pict-rs.toml] for more information.
|
||||
|
||||
The returned JSON is structured like so:
|
||||
```json
|
||||
{
|
||||
"msg": "ok",
|
||||
"blurhash": "LGF5]+Yk^6#M@-5c,1J5@[or[Q6."
|
||||
}
|
||||
```
|
||||
- `GET /image/process.{ext}?src={alias}&...` Get a file with transformations applied.
|
||||
Available source arguments are
|
||||
- `?src={alias}` This behavior is the same as in previous releases
|
||||
|
|
|
@ -4,6 +4,7 @@ read_only = false
|
|||
danger_dummy_mode = false
|
||||
max_file_count = 1
|
||||
temporary_directory = "/tmp"
|
||||
cleanup_temporary_directory = true
|
||||
|
||||
[client]
|
||||
timeout = 30
|
||||
|
@ -15,6 +16,7 @@ concurrency = 32
|
|||
format = "normal"
|
||||
targets = "info"
|
||||
log_spans = false
|
||||
no_ansi = false
|
||||
|
||||
[tracing.console]
|
||||
buffer_capacity = 102400
|
||||
|
@ -46,7 +48,7 @@ proxy = "7d"
|
|||
[media.magick]
|
||||
max_width = 10000
|
||||
max_height = 10000
|
||||
max_area = 40000000
|
||||
max_area = 20000
|
||||
memory = 256
|
||||
map = 512
|
||||
disk = 1024
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
ARG REPO_ARCH
|
||||
|
||||
FROM asonix/rust-runner:latest-linux-$REPO_ARCH
|
||||
|
||||
USER root
|
||||
RUN \
|
||||
apk add exiftool imagemagick imagemagick-heic ffmpeg && \
|
||||
chown -R app:app /mnt
|
||||
|
||||
COPY pict-rs /usr/local/bin/pict-rs
|
||||
|
||||
USER app
|
||||
EXPOSE 6669
|
||||
EXPOSE 8080
|
||||
VOLUME /mnt
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ["/usr/local/bin/pict-rs", "run"]
|
|
@ -1,25 +0,0 @@
|
|||
image: asonix/pictrs:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}
|
||||
{{#if build.tags}}
|
||||
tags:
|
||||
{{#each build.tags}}
|
||||
- {{this}}
|
||||
{{/each}}
|
||||
{{/if}}
|
||||
manifests:
|
||||
-
|
||||
image: asonix/pictrs:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-amd64
|
||||
platform:
|
||||
architecture: amd64
|
||||
os: linux
|
||||
-
|
||||
image: asonix/pictrs:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm64v8
|
||||
platform:
|
||||
architecture: arm64
|
||||
os: linux
|
||||
variant: v8
|
||||
-
|
||||
image: asonix/pictrs:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm32v7
|
||||
platform:
|
||||
architecture: arm
|
||||
os: linux
|
||||
variant: v7
|
|
@ -29,14 +29,14 @@ services:
|
|||
# - PICTRS_PROXY_UPSTREAM=http://pictrs:8080
|
||||
# - PICTRS_PROXY_OPENTELEMETRY_URL=http://jaeger:4317
|
||||
|
||||
minio:
|
||||
image: quay.io/minio/minio
|
||||
command: server /mnt --console-address ":9001"
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
volumes:
|
||||
- ./storage/minio:/mnt
|
||||
# minio:
|
||||
# image: quay.io/minio/minio
|
||||
# command: server /mnt --console-address ":9001"
|
||||
# ports:
|
||||
# - "9000:9000"
|
||||
# - "9001:9001"
|
||||
# volumes:
|
||||
# - ./storage/minio:/mnt
|
||||
|
||||
garage:
|
||||
image: dxflrs/garage:v0.9.0
|
||||
|
|
12
flake.lock
12
flake.lock
|
@ -5,11 +5,11 @@
|
|||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1705309234,
|
||||
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
|
||||
"lastModified": 1710146030,
|
||||
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
|
||||
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -20,11 +20,11 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1705133751,
|
||||
"narHash": "sha256-rCIsyE80jgiOU78gCWN3A0wE0tR2GI5nH6MlS+HaaSQ=",
|
||||
"lastModified": 1713537308,
|
||||
"narHash": "sha256-XtTSSIB2DA6tOv+l0FhvfDMiyCmhoRbNB+0SeInZkbk=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "9b19f5e77dd906cb52dade0b7bd280339d2a1f3d",
|
||||
"rev": "5c24cf2f0a12ad855f444c30b2421d044120c66f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
22
flake.nix
22
flake.nix
|
@ -15,13 +15,29 @@
|
|||
in
|
||||
{
|
||||
packages = rec {
|
||||
imagemagick7_pict-rs = pkgs.callPackage ./nix/pkgs/imagemagick_pict-rs {};
|
||||
ffmpeg6_pict-rs = pkgs.callPackage ./nix/pkgs/ffmpeg_pict-rs {};
|
||||
|
||||
pict-rs = pkgs.callPackage ./pict-rs.nix {
|
||||
inherit (pkgs.darwin.apple_sdk.frameworks) Security;
|
||||
inherit imagemagick7_pict-rs ffmpeg6_pict-rs;
|
||||
};
|
||||
|
||||
default = pict-rs;
|
||||
};
|
||||
|
||||
docker = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "pict-rs";
|
||||
tag = "latest";
|
||||
|
||||
contents = [ pkgs.tini self.packages.${system}.pict-rs pkgs.bash ];
|
||||
|
||||
config = {
|
||||
Entrypoint = [ "/bin/tini" "--" "/bin/pict-rs" ];
|
||||
Cmd = [ "run" ];
|
||||
};
|
||||
};
|
||||
|
||||
apps = rec {
|
||||
dev = flake-utils.lib.mkApp { drv = self.packages.${system}.pict-rs; };
|
||||
default = dev;
|
||||
|
@ -33,11 +49,13 @@
|
|||
cargo-outdated
|
||||
certstrap
|
||||
clippy
|
||||
curl
|
||||
diesel-cli
|
||||
exiftool
|
||||
ffmpeg_6-full
|
||||
garage
|
||||
imagemagick
|
||||
self.packages.${system}.imagemagick7_pict-rs
|
||||
self.packages.${system}.ffmpeg6_pict-rs
|
||||
jq
|
||||
minio-client
|
||||
rust-analyzer
|
||||
rustc
|
||||
|
|
5
nix/pkgs/ffmpeg_pict-rs/default.nix
Normal file
5
nix/pkgs/ffmpeg_pict-rs/default.nix
Normal file
|
@ -0,0 +1,5 @@
|
|||
{ ffmpeg_6-headless }:
|
||||
|
||||
ffmpeg_6-headless.override {
|
||||
withWebp = true;
|
||||
}
|
23
nix/pkgs/imagemagick_pict-rs/default.nix
Normal file
23
nix/pkgs/imagemagick_pict-rs/default.nix
Normal file
|
@ -0,0 +1,23 @@
|
|||
{ imagemagick7 }:
|
||||
|
||||
imagemagick7.override {
|
||||
bzip2Support = true;
|
||||
zlibSupport = true;
|
||||
libX11Support = false;
|
||||
libXtSupport = false;
|
||||
fontconfigSupport = false;
|
||||
freetypeSupport = false;
|
||||
libjpegSupport = true;
|
||||
djvulibreSupport = false;
|
||||
lcms2Support = false;
|
||||
openexrSupport = false;
|
||||
libjxlSupport = true;
|
||||
libpngSupport = true;
|
||||
liblqr1Support = false;
|
||||
librsvgSupport = false;
|
||||
libtiffSupport = false;
|
||||
libxml2Support = false;
|
||||
openjpegSupport = true;
|
||||
libwebpSupport = true;
|
||||
libheifSupport = true;
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
{ exiftool
|
||||
, ffmpeg_6-full
|
||||
, imagemagick
|
||||
, ffmpeg6_pict-rs
|
||||
, imagemagick7_pict-rs
|
||||
, lib
|
||||
, makeWrapper
|
||||
, nixosTests
|
||||
|
@ -11,7 +11,7 @@
|
|||
|
||||
rustPlatform.buildRustPackage {
|
||||
pname = "pict-rs";
|
||||
version = "0.5.6";
|
||||
version = "0.5.14";
|
||||
src = ./.;
|
||||
|
||||
cargoLock = {
|
||||
|
@ -27,7 +27,7 @@ rustPlatform.buildRustPackage {
|
|||
|
||||
postInstall = ''
|
||||
wrapProgram $out/bin/pict-rs \
|
||||
--prefix PATH : "${lib.makeBinPath [ imagemagick ffmpeg_6-full exiftool ]}"
|
||||
--prefix PATH : "${lib.makeBinPath [ imagemagick7_pict-rs ffmpeg6_pict-rs exiftool ]}"
|
||||
'';
|
||||
|
||||
passthru.tests = { inherit (nixosTests) pict-rs; };
|
||||
|
|
10
pict-rs.toml
10
pict-rs.toml
|
@ -37,6 +37,11 @@ max_file_count = 1
|
|||
# default: The system's advertised temporary directory ("/tmp" on most linuxes)
|
||||
temporary_directory = "/tmp"
|
||||
|
||||
## Optional: whether to delete the contents of $temporary_directory/pict-rs on launch
|
||||
# environment variable: PICTRS__SERVER__CLEANUP_TEMPORARY_DIRECTORY
|
||||
# default: true
|
||||
cleanup_temporary_directory = true
|
||||
|
||||
## Optional: path to server certificate to enable TLS
|
||||
# environment variable: PICTRS__SERVER__CERTIFICATE
|
||||
# default: empty
|
||||
|
@ -93,6 +98,11 @@ targets = 'info'
|
|||
# default: false
|
||||
log_spans = false
|
||||
|
||||
## Optional: whether to disable colorized log output
|
||||
# environment variable: PICTRS__TRACING__LOGGING__NO_ANSI
|
||||
# default: false
|
||||
no_ansi = false
|
||||
|
||||
|
||||
## Console configuration
|
||||
[tracing.console]
|
||||
|
|
11
releases/0.4.8.md
Normal file
11
releases/0.4.8.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
# pict-rs 0.4.8
|
||||
|
||||
## Overview
|
||||
|
||||
This is a maintenance release. There's no meaningful changes outside of dependency upgrades and use
|
||||
of the new CI system (Migrated from Drone to Forgejo Actions).
|
||||
|
||||
## Upgrade Notes
|
||||
|
||||
There's no significant changes from 0.4.7, so upgrading should be as simple as pulling a new version
|
||||
of pict-rs.
|
31
releases/0.5.10.md
Normal file
31
releases/0.5.10.md
Normal file
|
@ -0,0 +1,31 @@
|
|||
# pict-rs 0.5.10
|
||||
|
||||
## Overview
|
||||
|
||||
pict-rs 0.5.10 is a small release with changes to how pict-rs handles temporary files.
|
||||
|
||||
### Changes
|
||||
|
||||
- [Temporary File Cleanup](#temporary-file-cleanup)
|
||||
|
||||
|
||||
## Upgrade Notes
|
||||
|
||||
There are no significant changes from 0.5.9. Upgrading should be as simple as pulling the new
|
||||
version.
|
||||
|
||||
|
||||
## Descriptions
|
||||
|
||||
### Temporary File Cleanup
|
||||
|
||||
pict-rs now nests its temporary files inside a `pict-rs` toplevel temporary folder. This is useful
|
||||
because pict-rs 0.5.10 introduces a new behavior: it will completely delete that folder and its
|
||||
contents on launch. If you are running multiple copies of pict-rs on the same host and they share
|
||||
your temporary folder, this might cause problems. In that scenario, this behavior can be disabled by
|
||||
setting `PICTRS__SERVER__CLEANUP_TEMPORARY_DIRECTORY=false` or passing
|
||||
`--no-cleanup-temporary-directory` on the commandline.
|
||||
|
||||
This new behavior has been introduced in order to better clean up after crashes. If pict-rs is
|
||||
killed while processing media, maybe due to an OOM, it will leave files behind in the temporary
|
||||
directory. This can cause the temporary directory to grow, leading to memory or disk problems.
|
82
releases/0.5.11.md
Normal file
82
releases/0.5.11.md
Normal file
|
@ -0,0 +1,82 @@
|
|||
# pict-rs 0.5.11
|
||||
|
||||
pict-rs is a simple image hosting microservice, designed to handle storing and retrieving images,
|
||||
animations, and videos, as well as providing basic image processing functionality.
|
||||
|
||||
## Overview
|
||||
|
||||
pict-rs 0.5.11 introduces new per-upload media validations, and new per-upload media processing.
|
||||
These features will enable applications to be more precise about their media requirements, such as
|
||||
allowing different media types and sizes for different endpoints, or pre-processing certain media to
|
||||
optimize for size.
|
||||
|
||||
### Features
|
||||
|
||||
- [Upload Validations](#upload-validations)
|
||||
- [Upload Processing](#upload-processing)
|
||||
|
||||
|
||||
### Changes
|
||||
|
||||
- [Backgrounded Variants](#backgrounded-variants)
|
||||
|
||||
|
||||
## Upgrade Notes
|
||||
|
||||
For postgres-based installations, a small migration will be run when pict-rs 0.5.11 first launches
|
||||
to create a new notifications table. No manual intervention is required. Upgrading should be as
|
||||
simple as pulling a new version of pict-rs.
|
||||
|
||||
|
||||
## Descriptions
|
||||
|
||||
### Upload Validations
|
||||
|
||||
When ingesting media using `POST /image`, `POST /image/backgrounded`, `POST /internal/import`, or
|
||||
`GET /image/download`, validations can now be applied per-upload. These can be provided in the
|
||||
request query. The following query parameters are supported:
|
||||
|
||||
- max_width: maximum width, in pixels, allowed for the uploaded media
|
||||
- max_height: maximum height, in pixels, allowed for the uploaded media
|
||||
- max_area: maximum area, in pixels, allowed for the uploaded media
|
||||
- max_frame_count: maximum number of frames permitted for animations and videos
|
||||
- max_file_size: maximum size, in megabytes, allowed
|
||||
- allow_image: whether to permit still images in the upload
|
||||
- allow_animation: whether to permit animations in the upload
|
||||
- allow_video: whether to permit video in the upload
|
||||
|
||||
An example request could look like this: `POST /image/backgrounded?max_area=3200&allow_video=false`
|
||||
|
||||
Validations are performed in addition to the validations specified in the pict-rs configuration, so
|
||||
if uploaded media violates any of the validations, it will fail to ingest.
|
||||
|
||||
|
||||
### Upload Processing
|
||||
|
||||
In a similar vein to the upload validations, preprocessing steps can now be applied on a per-upload
|
||||
basis. These are also provided as query parameters, and will be applied _instead of_ the configured
|
||||
preprocess steps. The preprocess query parameters are provided and processed the same way as in the
|
||||
`GET image/process.{ext}` endpoint.
|
||||
|
||||
An example request could be `POST /image/backgrounded?blur=2.5&resize=300`, which would blur the
|
||||
uploaded image and fit it inside a 300x300 box before saving it.
|
||||
|
||||
|
||||
### Backgrounded Variants
|
||||
|
||||
When serving images from the /process.{ext} endpoint, pict-rs will now queue the processing to
|
||||
happen via the job queue, rather than processing media inline. It will still wait up to 30 seconds
|
||||
for the processing to be complete, and return the processed image the same way it always has.
|
||||
|
||||
If processing exceeds 30 seconds, pict-rs will return a timeout error, but the processing will
|
||||
continue in the background. The same variant can be requested again, and it will wait for the same
|
||||
background process to complete, rather than trying to process the variant a second time.
|
||||
|
||||
pict-rs has historically had a method of reducing variant processing to prevent two requests for the
|
||||
same variant from doing the same work, but this was only effective in environments that only ran 1
|
||||
copy of pict-rs. In environments that had multiple replicas, each one could end up processing the
|
||||
same variant if it was requested more than once at a time. This has been solved by using postgres as
|
||||
a notification system to enable globally unique processing for a given variant.
|
||||
|
||||
In sled-based configurations there shouldn't be a noticible difference, aside from the 30 second
|
||||
timeout on variant endpoints.
|
46
releases/0.5.12.md
Normal file
46
releases/0.5.12.md
Normal file
|
@ -0,0 +1,46 @@
|
|||
# pict-rs 0.5.12
|
||||
|
||||
pict-rs is a simple image hosting microservice, designed to handle storing and retrieving images,
|
||||
animations, and videos, as well as providing basic image processing functionality.
|
||||
|
||||
## Overview
|
||||
|
||||
pict-rs 0.5.12 is a bugfix release to remove two issues that, when compounded, would cause pict-rs
|
||||
to fail to process media.
|
||||
|
||||
### Fixes
|
||||
|
||||
- [Panic Handling in Background Jobs](#panic-handling-in-background-jobs)
|
||||
- [BytesStream Divide-by-Zero](#bytes-stream-divide-by-zero)
|
||||
|
||||
|
||||
## Upgrade Notes
|
||||
|
||||
There are no significant differences from 0.5.11. Upgrading should be as simple as pulling a new
|
||||
version of pict-rs.
|
||||
|
||||
|
||||
## Descriptions
|
||||
|
||||
### Panic Handling in Background Jobs
|
||||
|
||||
pict-rs makes an effort to never use explicitly panicking code, but since there's no static way to
|
||||
guarantee that a given function wont panic, pict-rs needs to be able to deal with that. pict-rs
|
||||
0.5.12 now wraps invocations of jobs in spawned tasks, which can catch and report panics that happen
|
||||
in background jobs.
|
||||
|
||||
Previously, a panic in a background job would bring down that thread's job processor, which resulted
|
||||
in future jobs never being processed. Now job processing should properly continue after panics
|
||||
occur.
|
||||
|
||||
|
||||
### BytesStream Divide-by-Zero
|
||||
|
||||
Part of my rework of BytesStream recently included adding debug logs around how many bytes chunks
|
||||
were in a given stream, and their average length. Unfortunately, if there were no bytes in the
|
||||
stream, this would cause the "average chunk length" calculation to divide by 0. In previous versions
|
||||
of pict-rs, this would generally result in a failed request for processed media, but in pict-rs
|
||||
0.5.11 this would end up killing the background jobs processor.
|
||||
|
||||
This specific panic has been fixed by ensuring we divide by the number of chunks or 1, whichever is
|
||||
greater.
|
62
releases/0.5.13.md
Normal file
62
releases/0.5.13.md
Normal file
|
@ -0,0 +1,62 @@
|
|||
# pict-rs 0.5.13
|
||||
|
||||
pict-rs is a simple image hosting microservice, designed to handle storing and retrieving images,
|
||||
animations, and videos, as well as providing basic image processing functionality.
|
||||
|
||||
## Overview
|
||||
|
||||
pict-rs 0.5.13 is a maintenance release aiming to enable better logging in some scenarios.
|
||||
|
||||
### Features
|
||||
|
||||
- [Colorless Logging](#colorless-logging)
|
||||
|
||||
|
||||
### Changes
|
||||
|
||||
- [Remove Flume](#remove-flume)
|
||||
|
||||
|
||||
## Upgrade Notes
|
||||
|
||||
There are no significant changes from 0.5.12. Upgrading should be as simple as pulling a new version
|
||||
of pict-rs.
|
||||
|
||||
|
||||
## Descriptions
|
||||
|
||||
### Colorless Logging
|
||||
|
||||
When opting to use the `json` logger, the tracing subscriber automatically disables colored output.
|
||||
This didn't remove colors from errors, though, and pict-rs hasn't had a way to disable colors while
|
||||
using other log formats. pict-rs 0.5.13 introduces a new configuration value to remove colored
|
||||
output from all logs regardless of logging format.
|
||||
|
||||
With pict-rs.toml
|
||||
```toml
|
||||
[tracing.logging]
|
||||
no_ansi = true
|
||||
```
|
||||
|
||||
With environment variables
|
||||
```bash
|
||||
PICTRS__TRACING__LOGGING__NO_ANSI=true
|
||||
```
|
||||
|
||||
With commandline flags
|
||||
```bash
|
||||
pict-rs --no-log-ansi run
|
||||
```
|
||||
|
||||
Colors in logs can be useful, so I imagine this option won't be used much. There has been a request
|
||||
for this functionality though and it's little cost to maintain.
|
||||
|
||||
|
||||
### Remove Flume
|
||||
|
||||
Recently I've been debugging a memory usage issue in another project of mine. I wasn't able to fully
|
||||
track down the cause, but I did notice that removing the
|
||||
[flume channel library](https://github.com/zesterer/flume) seemed to make the leak go away. Since I
|
||||
also use flume in pict-rs, I'm opting to replace it with tokio's native channel implementation. This
|
||||
may or may not improve memory usage, but it does reduce the depenency count and therefore build time
|
||||
for pict-rs.
|
28
releases/0.5.14.md
Normal file
28
releases/0.5.14.md
Normal file
|
@ -0,0 +1,28 @@
|
|||
# pict-rs 0.5.14
|
||||
|
||||
pict-rs is a simple image hosting microservice, designed to handle storing and retrieving images,
|
||||
animations, and videos, as well as providing basic image processing functionality.
|
||||
|
||||
## Overview
|
||||
|
||||
pict-rs 0.5.14 includes a bugfix for identifying certain MOV videos, as well as updated dependencies.
|
||||
|
||||
### Fixes
|
||||
|
||||
- [Empty Stream Parsing](#empty-stream-parsing)
|
||||
|
||||
|
||||
## Upgrade Notes
|
||||
|
||||
There are no significant changes from 0.5.13. Upgrading should be as simple as pulling a new version
|
||||
of pict-rs.
|
||||
|
||||
|
||||
## Descriptions
|
||||
|
||||
### Empty Stream Parsing
|
||||
|
||||
Certain videos, when identified with ffprobe, contain stream json objects with no fields. This would
|
||||
cause pict-rs to fail to parse the information for these videos, as it expects streams to at least
|
||||
contain a codec field. In pict-rs 0.5.14, empty streams are now considered valid and are simply
|
||||
ignored.
|
93
releases/0.5.7.md
Normal file
93
releases/0.5.7.md
Normal file
|
@ -0,0 +1,93 @@
|
|||
# pict-rs 0.5.7
|
||||
|
||||
## Overview
|
||||
|
||||
pict-rs 0.5.7 adds support for generating blurhashes from images and includes a couple unrelated
|
||||
fixes and tweaks.
|
||||
|
||||
### Features
|
||||
|
||||
- [Blurhash Endpoint](#blurhash-endpoint)
|
||||
|
||||
|
||||
### Changes
|
||||
|
||||
- [File Path Changes](#file-path-changes)
|
||||
- [Performance Improvements](#performance-improvements)
|
||||
|
||||
|
||||
### Fixes
|
||||
|
||||
- [More Consistent Errors](#more-consistent-errors)
|
||||
- [APNG Detection](#apng-detection)
|
||||
|
||||
|
||||
## Upgrade Notes
|
||||
|
||||
There is a small repo format migration between 0.5.6 and 0.5.7. For sled it's simply opening a new
|
||||
tree, for postgres it involves adding a new column to the hashes table. These changes will
|
||||
automatically apply when launching pict-rs 0.5.7. Upgrading should be as simple as pulling a new
|
||||
version of pict-rs.
|
||||
|
||||
|
||||
## Descriptions
|
||||
|
||||
### Blurhash Endpoint
|
||||
|
||||
A new endpoint at `/image/blurhash` has been added for generating blurhashes from uploaded media. A
|
||||
blurhash is a short string that encodes a few notable color values from an image that can be
|
||||
reconstructed into a blurred approximation of the original image. Notably, blurhashes are used by
|
||||
Mastodon to act as placeholders for sensitive media. For more information about blurhashes, see
|
||||
[blurha.sh](https://blurha.sh).
|
||||
|
||||
This endpoint is powered by my new blurhash encoding library,
|
||||
[blurhash-update](https://crates.io/crates/blurhash-update).
|
||||
|
||||
On success, the blurhash endpoint returns the following JSON.
|
||||
|
||||
```json
|
||||
{
|
||||
"msg": "ok",
|
||||
"blurhash": "LGF5]+Yk^6#M@-5c,1J5@[or[Q6."
|
||||
}
|
||||
```
|
||||
|
||||
pict-rs does not provide a blurhash decoding mechanism (it would defeat the purpose of blurhashes to
|
||||
do so).
|
||||
|
||||
|
||||
### File Path Changes
|
||||
|
||||
pict-rs has dropped its dependency on my `storage-path-generator` library in favor of using UUIDs to
|
||||
create unique file paths for uploaded media. This means that newly uploaded media will be stored in
|
||||
a different directory structure, and with different filenames. The purpose of this is to reduce
|
||||
database use by removing the need to synchronize the current path state.
|
||||
|
||||
pict-rs 0.5.7 also adds file extensions back to file paths, since they are now somewhat-publicly
|
||||
visible (when using the public_endpoint configuration with object storage).
|
||||
|
||||
Neither of these changes affect previously uploaded media.
|
||||
|
||||
|
||||
### Performance Improvements
|
||||
|
||||
pict-rs 0.5.7 now buffers media in memory less frequently, opting to stream bytes directly from
|
||||
sources to sinks. This should improve general memory use, as well as decrease time pict-rs would
|
||||
spend waiting to aggregate bytes.
|
||||
|
||||
pict-rs also no longer requires bytes be present in contiguous buffers, avoiding large allocations
|
||||
and reducing memcpys.
|
||||
|
||||
|
||||
### More Consistent Errors
|
||||
|
||||
pict-rs 0.5 introduced machine-readable error codes that returned alongside error messages, but
|
||||
there were a couple locations in pict-rs that failed to include them. pict-rs 0.5.7 resolves this,
|
||||
ensuring all error paths properly return codes.
|
||||
|
||||
|
||||
### APNG Detection
|
||||
|
||||
pict-rs 0.5.7 fixes the imagemagick delegate policy for ffmpeg, which allows for properly detecting
|
||||
certain media, notably APNG files. pict-rs should once again be able to properly handle uploaded
|
||||
APNGs.
|
84
releases/0.5.8.md
Normal file
84
releases/0.5.8.md
Normal file
|
@ -0,0 +1,84 @@
|
|||
# pict-rs 0.5.8
|
||||
|
||||
## Overview
|
||||
|
||||
pict-rs 0.5.8 improves reliability of deletions by allowing background tasks to be retried.
|
||||
Otherwise changes are fairly minor.
|
||||
|
||||
### Changes
|
||||
|
||||
- [Improved Task Reliability](#improved-task-reliability)
|
||||
- [Improved Latency](#improved-latency)
|
||||
|
||||
|
||||
## Upgrade Notes
|
||||
|
||||
There is a small repo format migration between 0.5.7 and 0.5.8. For sled it's simply opening a new
|
||||
tree, for postgre it involves adding a new column to the job_queue table. These changes will
|
||||
automatically apply when launching pict-rs 0.5.8. Upgrading should be as simple as pulling a new
|
||||
version of pict-rs.
|
||||
|
||||
|
||||
## Configuration Notes
|
||||
|
||||
Check your configurations to make sure you haven't enabled the tokio-console integration unless
|
||||
you're using it. In my local testing, I've found the console subscriber to use a significant amount
|
||||
of CPU. While it is very useful for debugging, it shouldn't be used generally in production.
|
||||
|
||||
The relevant configuration values are `PICTRS__TRACING__CONSOLE__ADDRESS` with environment variables
|
||||
or `[tracing.console] address = ""` in the toml.
|
||||
|
||||
|
||||
## Packaging Notes
|
||||
|
||||
While I have never recommended packaging pict-rs with non-default crate features enabled, and the
|
||||
binaries and containers I provide enable only the default features, there are two new crate features
|
||||
in this release that I would advise against enabling in downstream packaging environments.
|
||||
|
||||
The new features are `poll-timer-warnings` and `random-errors`. These are each described below if
|
||||
you want to learn about them, but as a general recommendation, do not enable non-default features
|
||||
when packaging pict-rs (yes, i'm talking to you `grawlinson` from the AUR).
|
||||
|
||||
The other optional feature, `io-uring`, is considered less stable. It's possible that folks will
|
||||
find it works alright, and maybe Arch can enable it since they can assume recent kernels, but I
|
||||
don't personally test much with `io-uring`. It exists mostly as a historical curiosity. Please
|
||||
consider carefully before enabling io-uring for pict-rs.
|
||||
|
||||
|
||||
## Descriptions
|
||||
|
||||
### Improved Task Reliability
|
||||
|
||||
pict-rs 0.5.8 adds the ability for tasks to be retried. pict-rs generally spawns background tasks to
|
||||
handle things like Image deletion or other cleanup operations. Until now, if a background task
|
||||
failed, the only indication would be a warning that appeared in the logs. These warnings are
|
||||
generally descriptive and help track the error source, but end users aren't notified, and the repo
|
||||
or store state can become inconsistant.
|
||||
|
||||
With the newly added ability to retry tasks, operations should be completed more reliably. By
|
||||
default, a failed task will be retried after a 2 minute wait, and if it continues to fail, it will
|
||||
be retried up to five times. If a task fails after 5 retries, an additional warning will be output
|
||||
to the log.
|
||||
|
||||
In order to test this, I've added a new optional crate feature called `random-errors`, which will
|
||||
inject errors into various pict-rs operations randomly. This feature should never be enabled in
|
||||
production scenarios, and two warnings will be printed when launching pict-rs if it was compiled
|
||||
with this feature enabled.
|
||||
|
||||
|
||||
### Improved Latency
|
||||
|
||||
pict-rs 0.5.8 implements a couple new techniques to improve system latency.
|
||||
|
||||
1. The postgres connection pooling library has been swapped from deadpool to bb8. Not only does this
|
||||
(slightly) improve connection pool access times, but it also means pict-rs is no longer pinned
|
||||
to an outdated version of deadpool.
|
||||
2. Processes like ffmpeg, imagemagick, and exiftool are now spawned from background threads,
|
||||
rather than from within the webserver threads. This is notable, since the act of spawning a
|
||||
process ends up using a good amount of time, and prevents other requests from being handled
|
||||
until the spawning has completed.
|
||||
3. pict-rs now has the ability to monitor polling times for futures. By default, any task pict-rs
|
||||
spawns itself will be monitored to report polling times, and a trait has been added to enable
|
||||
easily tracking more polling times in the future. These polling times will appear in the
|
||||
prometheus metrics, as well as in logs at DEBUG or TRACE visibility. There's an optional crate
|
||||
feature called `poll-timer-warnings` that will upgrade some of these logs to WARN visibility.
|
32
releases/0.5.9.md
Normal file
32
releases/0.5.9.md
Normal file
|
@ -0,0 +1,32 @@
|
|||
# pict-rs 0.5.9
|
||||
|
||||
## Overview
|
||||
|
||||
pict-rs 0.5.9 is a bugfix release for 0.5.8. All deployments on 0.5.8 should upgrade to 0.5.9
|
||||
|
||||
### Fixes
|
||||
|
||||
- [Fix Postgres Pooling](#fix-postgres-pooling)
|
||||
- [Fix io-uring feature](#fix-io-uring-feature)
|
||||
|
||||
|
||||
## Upgrade Notes
|
||||
|
||||
There are no significant changes from 0.5.8. Upgrading should be as simple as pulling the new
|
||||
version.
|
||||
|
||||
|
||||
## Descriptions
|
||||
|
||||
### Fix Postgres Pooling
|
||||
|
||||
When pict-rs 0.5.8 was built without the `tokio_unstable` flag, it would use tokio's `spwan_local`
|
||||
utility from outside a LocalSet, leading to panics and timeouts. This release replaces the use of
|
||||
`spawn_local` with `spawn` in that scenario.
|
||||
|
||||
|
||||
### Fix io-uring Feature
|
||||
|
||||
As mentioned in the 0.5.8 release notes, io-uring is not considered to be a stable feature. However,
|
||||
0.5.9 should make it usable again. Instead of manually launching a tokio_uring runtime, pict-rs once
|
||||
again relies on actix-rt to configure a System for use with io-uring.
|
|
@ -34,7 +34,7 @@ impl Backgrounded {
|
|||
pub(crate) async fn proxy<S, P>(state: &State<S>, stream: P) -> Result<Self, Error>
|
||||
where
|
||||
S: Store,
|
||||
P: Stream<Item = Result<Bytes, Error>> + 'static,
|
||||
P: Stream<Item = Result<Bytes, Error>>,
|
||||
{
|
||||
let mut this = Self {
|
||||
repo: state.repo.clone(),
|
||||
|
@ -50,16 +50,18 @@ impl Backgrounded {
|
|||
async fn do_proxy<S, P>(&mut self, store: &S, stream: P) -> Result<(), Error>
|
||||
where
|
||||
S: Store,
|
||||
P: Stream<Item = Result<Bytes, Error>> + 'static,
|
||||
P: Stream<Item = Result<Bytes, Error>>,
|
||||
{
|
||||
self.upload_id = Some(self.repo.create_upload().await?);
|
||||
|
||||
let stream = Box::pin(crate::stream::map_err(stream, |e| {
|
||||
let stream = crate::stream::map_err(stream, |e| {
|
||||
std::io::Error::new(std::io::ErrorKind::Other, e)
|
||||
}));
|
||||
});
|
||||
|
||||
// use octet-stream, we don't know the upload's real type yet
|
||||
let identifier = store.save_stream(stream, APPLICATION_OCTET_STREAM).await?;
|
||||
let identifier = store
|
||||
.save_stream(stream, APPLICATION_OCTET_STREAM, None)
|
||||
.await?;
|
||||
|
||||
self.identifier = Some(identifier);
|
||||
|
||||
|
|
106
src/blurhash.rs
Normal file
106
src/blurhash.rs
Normal file
|
@ -0,0 +1,106 @@
|
|||
use std::ffi::{OsStr, OsString};
|
||||
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
use crate::{
|
||||
details::Details,
|
||||
error::{Error, UploadError},
|
||||
formats::ProcessableFormat,
|
||||
magick::{MagickError, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||
process::Process,
|
||||
repo::Alias,
|
||||
state::State,
|
||||
store::Store,
|
||||
};
|
||||
|
||||
pub(crate) async fn generate<S>(
|
||||
state: &State<S>,
|
||||
alias: &Alias,
|
||||
original_details: &Details,
|
||||
) -> Result<String, Error>
|
||||
where
|
||||
S: Store + 'static,
|
||||
{
|
||||
let hash = state
|
||||
.repo
|
||||
.hash(alias)
|
||||
.await?
|
||||
.ok_or(UploadError::MissingIdentifier)?;
|
||||
|
||||
let identifier = if original_details.is_video() {
|
||||
crate::generate::ensure_motion_identifier(state, hash, original_details).await?
|
||||
} else {
|
||||
state
|
||||
.repo
|
||||
.identifier(hash)
|
||||
.await?
|
||||
.ok_or(UploadError::MissingIdentifier)?
|
||||
};
|
||||
|
||||
let input_details = crate::ensure_details_identifier(state, &identifier).await?;
|
||||
|
||||
let stream = state.store.to_stream(&identifier, None, None).await?;
|
||||
|
||||
let blurhash = read_rgba_command(
|
||||
state,
|
||||
input_details
|
||||
.internal_format()
|
||||
.processable_format()
|
||||
.expect("not a video"),
|
||||
)
|
||||
.await?
|
||||
.drive_with_stream(stream)
|
||||
.with_stdout(|mut stdout| async move {
|
||||
let mut encoder = blurhash_update::Encoder::auto(blurhash_update::ImageBounds {
|
||||
width: input_details.width() as _,
|
||||
height: input_details.height() as _,
|
||||
});
|
||||
|
||||
let mut buf = [0u8; 1024 * 8];
|
||||
|
||||
loop {
|
||||
let n = stdout.read(&mut buf).await?;
|
||||
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
encoder.update(&buf[..n]);
|
||||
}
|
||||
|
||||
Ok(encoder.finalize()) as std::io::Result<String>
|
||||
})
|
||||
.await??;
|
||||
|
||||
Ok(blurhash)
|
||||
}
|
||||
|
||||
async fn read_rgba_command<S>(
|
||||
state: &State<S>,
|
||||
input_format: ProcessableFormat,
|
||||
) -> Result<Process, MagickError> {
|
||||
let temporary_path = state
|
||||
.tmp_dir
|
||||
.tmp_folder()
|
||||
.await
|
||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||
|
||||
let mut input_arg = OsString::from(input_format.magick_format());
|
||||
input_arg.push(":-");
|
||||
if input_format.coalesce() {
|
||||
input_arg.push("[0]");
|
||||
}
|
||||
|
||||
let args: [&OsStr; 3] = ["convert".as_ref(), &input_arg, "RGBA:-".as_ref()];
|
||||
|
||||
let envs = [
|
||||
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
||||
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||
];
|
||||
|
||||
let process = Process::run("magick", &args, &envs, state.config.media.process_timeout)
|
||||
.await?
|
||||
.add_extras(temporary_path);
|
||||
|
||||
Ok(process)
|
||||
}
|
|
@ -1,14 +1,7 @@
|
|||
use actix_web::{
|
||||
body::MessageBody,
|
||||
web::{Bytes, BytesMut},
|
||||
};
|
||||
use actix_web::web::Bytes;
|
||||
use futures_core::Stream;
|
||||
use std::{
|
||||
collections::{vec_deque::IntoIter, VecDeque},
|
||||
convert::Infallible,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use std::collections::{vec_deque::IntoIter, VecDeque};
|
||||
use streem::IntoStreamer;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct BytesStream {
|
||||
|
@ -24,6 +17,34 @@ impl BytesStream {
|
|||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(stream))]
|
||||
pub(crate) async fn try_from_stream<S, E>(stream: S) -> Result<Self, E>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>>,
|
||||
{
|
||||
let stream = std::pin::pin!(stream);
|
||||
let mut stream = stream.into_streamer();
|
||||
let mut bs = Self::new();
|
||||
|
||||
while let Some(bytes) = stream.try_next().await? {
|
||||
tracing::trace!("try_from_stream: looping");
|
||||
bs.add_bytes(bytes);
|
||||
crate::sync::cooperate().await;
|
||||
}
|
||||
|
||||
tracing::debug!(
|
||||
"BytesStream with {} chunks, avg length {}",
|
||||
bs.chunks_len(),
|
||||
bs.len() / bs.chunks_len().max(1)
|
||||
);
|
||||
|
||||
Ok(bs)
|
||||
}
|
||||
|
||||
pub(crate) fn chunks_len(&self) -> usize {
|
||||
self.inner.len()
|
||||
}
|
||||
|
||||
pub(crate) fn add_bytes(&mut self, bytes: Bytes) {
|
||||
self.total_len += bytes.len();
|
||||
self.inner.push_back(bytes);
|
||||
|
@ -33,14 +54,17 @@ impl BytesStream {
|
|||
self.total_len
|
||||
}
|
||||
|
||||
pub(crate) fn into_bytes(self) -> Bytes {
|
||||
let mut buf = BytesMut::with_capacity(self.total_len);
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
self.total_len == 0
|
||||
}
|
||||
|
||||
for bytes in self.inner {
|
||||
buf.extend_from_slice(&bytes);
|
||||
}
|
||||
|
||||
buf.freeze()
|
||||
pub(crate) fn into_io_stream(self) -> impl Stream<Item = std::io::Result<Bytes>> {
|
||||
crate::stream::error_injector(streem::from_fn(move |yielder| async move {
|
||||
for bytes in self {
|
||||
crate::sync::cooperate().await;
|
||||
yielder.yield_ok(bytes).await;
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,37 +76,3 @@ impl IntoIterator for BytesStream {
|
|||
self.inner.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for BytesStream {
|
||||
type Error = std::io::Error;
|
||||
|
||||
fn size(&self) -> actix_web::body::BodySize {
|
||||
if let Ok(len) = self.len().try_into() {
|
||||
actix_web::body::BodySize::Sized(len)
|
||||
} else {
|
||||
actix_web::body::BodySize::None
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
Poll::Ready(self.get_mut().inner.pop_front().map(Ok))
|
||||
}
|
||||
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
Ok(self.into_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for BytesStream {
|
||||
type Item = Result<Bytes, Infallible>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
Poll::Ready(self.get_mut().inner.pop_front().map(Ok))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,172 +0,0 @@
|
|||
use crate::{
|
||||
details::Details,
|
||||
error::{Error, UploadError},
|
||||
repo::Hash,
|
||||
};
|
||||
use actix_web::web;
|
||||
use dashmap::{mapref::entry::Entry, DashMap};
|
||||
use flume::{r#async::RecvFut, Receiver, Sender};
|
||||
use std::{
|
||||
future::Future,
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use tracing::Span;
|
||||
|
||||
type OutcomeReceiver = Receiver<(Details, web::Bytes)>;
|
||||
|
||||
type ProcessMapKey = (Hash, PathBuf);
|
||||
|
||||
type ProcessMapInner = DashMap<ProcessMapKey, OutcomeReceiver>;
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub(crate) struct ProcessMap {
|
||||
process_map: Arc<ProcessMapInner>,
|
||||
}
|
||||
|
||||
impl ProcessMap {
|
||||
pub(super) fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub(super) async fn process<Fut>(
|
||||
&self,
|
||||
hash: Hash,
|
||||
path: PathBuf,
|
||||
fut: Fut,
|
||||
) -> Result<(Details, web::Bytes), Error>
|
||||
where
|
||||
Fut: Future<Output = Result<(Details, web::Bytes), Error>>,
|
||||
{
|
||||
let key = (hash.clone(), path.clone());
|
||||
|
||||
let (sender, receiver) = flume::bounded(1);
|
||||
|
||||
let entry = self.process_map.entry(key.clone());
|
||||
|
||||
let (state, span) = match entry {
|
||||
Entry::Vacant(vacant) => {
|
||||
vacant.insert(receiver);
|
||||
|
||||
let span = tracing::info_span!(
|
||||
"Processing image",
|
||||
hash = ?hash,
|
||||
path = ?path,
|
||||
completed = &tracing::field::Empty,
|
||||
);
|
||||
|
||||
metrics::counter!(crate::init_metrics::PROCESS_MAP_INSERTED).increment(1);
|
||||
|
||||
(CancelState::Sender { sender }, span)
|
||||
}
|
||||
Entry::Occupied(receiver) => {
|
||||
let span = tracing::info_span!(
|
||||
"Waiting for processed image",
|
||||
hash = ?hash,
|
||||
path = ?path,
|
||||
);
|
||||
|
||||
let receiver = receiver.get().clone().into_recv_async();
|
||||
|
||||
(CancelState::Receiver { receiver }, span)
|
||||
}
|
||||
};
|
||||
|
||||
CancelSafeProcessor {
|
||||
cancel_token: CancelToken {
|
||||
span,
|
||||
key,
|
||||
state,
|
||||
process_map: self.clone(),
|
||||
},
|
||||
fut,
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
fn remove(&self, key: &ProcessMapKey) -> Option<OutcomeReceiver> {
|
||||
self.process_map.remove(key).map(|(_, v)| v)
|
||||
}
|
||||
}
|
||||
|
||||
struct CancelToken {
|
||||
span: Span,
|
||||
key: ProcessMapKey,
|
||||
state: CancelState,
|
||||
process_map: ProcessMap,
|
||||
}
|
||||
|
||||
enum CancelState {
|
||||
Sender {
|
||||
sender: Sender<(Details, web::Bytes)>,
|
||||
},
|
||||
Receiver {
|
||||
receiver: RecvFut<'static, (Details, web::Bytes)>,
|
||||
},
|
||||
}
|
||||
|
||||
impl CancelState {
|
||||
const fn is_sender(&self) -> bool {
|
||||
matches!(self, Self::Sender { .. })
|
||||
}
|
||||
}
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
struct CancelSafeProcessor<F> {
|
||||
cancel_token: CancelToken,
|
||||
|
||||
#[pin]
|
||||
fut: F,
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> Future for CancelSafeProcessor<F>
|
||||
where
|
||||
F: Future<Output = Result<(Details, web::Bytes), Error>>,
|
||||
{
|
||||
type Output = Result<(Details, web::Bytes), Error>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.as_mut().project();
|
||||
|
||||
let span = &this.cancel_token.span;
|
||||
let process_map = &this.cancel_token.process_map;
|
||||
let state = &mut this.cancel_token.state;
|
||||
let key = &this.cancel_token.key;
|
||||
let fut = this.fut;
|
||||
|
||||
span.in_scope(|| match state {
|
||||
CancelState::Sender { sender } => {
|
||||
let res = std::task::ready!(fut.poll(cx));
|
||||
|
||||
if process_map.remove(key).is_some() {
|
||||
metrics::counter!(crate::init_metrics::PROCESS_MAP_REMOVED).increment(1);
|
||||
}
|
||||
|
||||
if let Ok(tup) = &res {
|
||||
let _ = sender.try_send(tup.clone());
|
||||
}
|
||||
|
||||
Poll::Ready(res)
|
||||
}
|
||||
CancelState::Receiver { ref mut receiver } => Pin::new(receiver)
|
||||
.poll(cx)
|
||||
.map(|res| res.map_err(|_| UploadError::Canceled.into())),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for CancelToken {
|
||||
fn drop(&mut self) {
|
||||
if self.state.is_sender() {
|
||||
let completed = self.process_map.remove(&self.key).is_none();
|
||||
self.span.record("completed", completed);
|
||||
|
||||
if !completed {
|
||||
metrics::counter!(crate::init_metrics::PROCESS_MAP_REMOVED).increment(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,6 +18,7 @@ impl Args {
|
|||
log_format,
|
||||
log_targets,
|
||||
log_spans,
|
||||
no_log_ansi,
|
||||
console_address,
|
||||
console_buffer_capacity,
|
||||
opentelemetry_url,
|
||||
|
@ -38,6 +39,7 @@ impl Args {
|
|||
format: log_format,
|
||||
targets: log_targets.map(Serde::new),
|
||||
log_spans,
|
||||
no_ansi: no_log_ansi,
|
||||
},
|
||||
console: Console {
|
||||
address: console_address,
|
||||
|
@ -55,6 +57,7 @@ impl Args {
|
|||
address,
|
||||
api_key,
|
||||
temporary_directory,
|
||||
no_cleanup_temporary_directory,
|
||||
certificate,
|
||||
private_key,
|
||||
client_timeout,
|
||||
|
@ -122,6 +125,7 @@ impl Args {
|
|||
danger_dummy_mode,
|
||||
max_file_count,
|
||||
temporary_directory,
|
||||
cleanup_temporary_directory: !no_cleanup_temporary_directory,
|
||||
certificate,
|
||||
private_key,
|
||||
};
|
||||
|
@ -541,6 +545,7 @@ struct Server {
|
|||
max_file_count: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
temporary_directory: Option<PathBuf>,
|
||||
cleanup_temporary_directory: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
certificate: Option<PathBuf>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
|
@ -578,6 +583,8 @@ struct Logging {
|
|||
targets: Option<Serde<Targets>>,
|
||||
#[serde(skip_serializing_if = "std::ops::Not::not")]
|
||||
log_spans: bool,
|
||||
#[serde(skip_serializing_if = "std::ops::Not::not")]
|
||||
no_ansi: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, serde::Serialize)]
|
||||
|
@ -922,6 +929,10 @@ pub(super) struct Args {
|
|||
#[arg(long)]
|
||||
log_spans: bool,
|
||||
|
||||
#[arg(long)]
|
||||
/// Whether to disable color-codes in log output
|
||||
no_log_ansi: bool,
|
||||
|
||||
/// Address and port to expose tokio-console metrics
|
||||
#[arg(long)]
|
||||
console_address: Option<SocketAddr>,
|
||||
|
@ -973,6 +984,10 @@ struct Run {
|
|||
#[arg(long)]
|
||||
temporary_directory: Option<PathBuf>,
|
||||
|
||||
/// Whether to attempt to clean files left behind from a previous run of pict-rs
|
||||
#[arg(long)]
|
||||
no_cleanup_temporary_directory: bool,
|
||||
|
||||
/// The path to the TLS certificate. Both the certificate and the private_key must be specified
|
||||
/// to enable TLS
|
||||
#[arg(long)]
|
||||
|
|
|
@ -24,6 +24,7 @@ struct ServerDefaults {
|
|||
danger_dummy_mode: bool,
|
||||
max_file_count: u32,
|
||||
temporary_directory: PathBuf,
|
||||
cleanup_temporary_directory: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, serde::Serialize)]
|
||||
|
@ -54,6 +55,7 @@ struct LoggingDefaults {
|
|||
format: LogFormat,
|
||||
targets: Serde<Targets>,
|
||||
log_spans: bool,
|
||||
no_ansi: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, serde::Serialize)]
|
||||
|
@ -211,6 +213,7 @@ impl Default for ServerDefaults {
|
|||
danger_dummy_mode: false,
|
||||
max_file_count: 1,
|
||||
temporary_directory: std::env::temp_dir(),
|
||||
cleanup_temporary_directory: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -233,6 +236,7 @@ impl Default for LoggingDefaults {
|
|||
format: LogFormat::Normal,
|
||||
targets: "info".parse().expect("Valid targets string"),
|
||||
log_spans: false,
|
||||
no_ansi: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,6 +119,8 @@ pub(crate) struct Server {
|
|||
|
||||
pub(crate) temporary_directory: PathBuf,
|
||||
|
||||
pub(crate) cleanup_temporary_directory: bool,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) certificate: Option<PathBuf>,
|
||||
|
||||
|
@ -161,6 +163,8 @@ pub(crate) struct Logging {
|
|||
pub(crate) targets: Serde<Targets>,
|
||||
|
||||
pub(crate) log_spans: bool,
|
||||
|
||||
pub(crate) no_ansi: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
use crate::{
|
||||
bytes_stream::BytesStream,
|
||||
discover::Discovery,
|
||||
error::Error,
|
||||
formats::{InternalFormat, InternalVideoFormat},
|
||||
serde_str::Serde,
|
||||
state::State,
|
||||
};
|
||||
use actix_web::web;
|
||||
|
||||
use time::{format_description::well_known::Rfc3339, OffsetDateTime};
|
||||
|
||||
#[derive(Copy, Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
|
@ -80,13 +81,16 @@ impl Details {
|
|||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
pub(crate) async fn from_bytes<S>(state: &State<S>, input: web::Bytes) -> Result<Self, Error> {
|
||||
pub(crate) async fn from_bytes_stream<S>(
|
||||
state: &State<S>,
|
||||
input: BytesStream,
|
||||
) -> Result<Self, Error> {
|
||||
let Discovery {
|
||||
input,
|
||||
width,
|
||||
height,
|
||||
frames,
|
||||
} = crate::discover::discover_bytes(state, input).await?;
|
||||
} = crate::discover::discover_bytes_stream(state, input).await?;
|
||||
|
||||
Ok(Details::from_parts(
|
||||
input.internal_format(),
|
||||
|
@ -96,6 +100,14 @@ impl Details {
|
|||
))
|
||||
}
|
||||
|
||||
pub(crate) fn width(&self) -> u16 {
|
||||
self.inner.width
|
||||
}
|
||||
|
||||
pub(crate) fn height(&self) -> u16 {
|
||||
self.inner.height
|
||||
}
|
||||
|
||||
pub(crate) fn internal_format(&self) -> InternalFormat {
|
||||
self.inner.format
|
||||
}
|
||||
|
@ -104,10 +116,18 @@ impl Details {
|
|||
(*self.inner.content_type).clone()
|
||||
}
|
||||
|
||||
pub(crate) fn file_extension(&self) -> &'static str {
|
||||
self.inner.format.file_extension()
|
||||
}
|
||||
|
||||
pub(crate) fn system_time(&self) -> std::time::SystemTime {
|
||||
self.inner.created_at.into()
|
||||
}
|
||||
|
||||
pub(crate) fn is_video(&self) -> bool {
|
||||
matches!(self.inner.format, InternalFormat::Video(_))
|
||||
}
|
||||
|
||||
pub(crate) fn video_format(&self) -> Option<InternalVideoFormat> {
|
||||
match self.inner.format {
|
||||
InternalFormat::Video(format) => Some(format),
|
||||
|
|
|
@ -2,9 +2,7 @@ mod exiftool;
|
|||
mod ffmpeg;
|
||||
mod magick;
|
||||
|
||||
use actix_web::web::Bytes;
|
||||
|
||||
use crate::{formats::InputFile, state::State};
|
||||
use crate::{bytes_stream::BytesStream, formats::InputFile, future::WithPollTimer, state::State};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub(crate) struct Discovery {
|
||||
|
@ -27,16 +25,21 @@ pub(crate) enum DiscoverError {
|
|||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip_all)]
|
||||
pub(crate) async fn discover_bytes<S>(
|
||||
pub(crate) async fn discover_bytes_stream<S>(
|
||||
state: &State<S>,
|
||||
bytes: Bytes,
|
||||
bytes: BytesStream,
|
||||
) -> Result<Discovery, crate::error::Error> {
|
||||
let discovery = ffmpeg::discover_bytes(state, bytes.clone()).await?;
|
||||
let discovery = ffmpeg::discover_bytes_stream(state, bytes.clone())
|
||||
.with_poll_timer("discover-ffmpeg")
|
||||
.await?;
|
||||
|
||||
let discovery = magick::confirm_bytes(state, discovery, bytes.clone()).await?;
|
||||
let discovery = magick::confirm_bytes_stream(state, discovery, bytes.clone())
|
||||
.with_poll_timer("confirm-imagemagick")
|
||||
.await?;
|
||||
|
||||
let discovery =
|
||||
exiftool::check_reorient(discovery, bytes, state.config.media.process_timeout).await?;
|
||||
let discovery = exiftool::check_reorient(discovery, bytes, state.config.media.process_timeout)
|
||||
.with_poll_timer("reorient-exiftool")
|
||||
.await?;
|
||||
|
||||
Ok(discovery)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use actix_web::web::Bytes;
|
||||
|
||||
use crate::{
|
||||
bytes_stream::BytesStream,
|
||||
exiftool::ExifError,
|
||||
formats::{ImageInput, InputFile},
|
||||
process::Process,
|
||||
|
@ -16,7 +15,7 @@ pub(super) async fn check_reorient(
|
|||
height,
|
||||
frames,
|
||||
}: Discovery,
|
||||
bytes: Bytes,
|
||||
bytes: BytesStream,
|
||||
timeout: u64,
|
||||
) -> Result<Discovery, ExifError> {
|
||||
let input = match input {
|
||||
|
@ -40,9 +39,10 @@ pub(super) async fn check_reorient(
|
|||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip_all)]
|
||||
async fn needs_reorienting(input: Bytes, timeout: u64) -> Result<bool, ExifError> {
|
||||
let buf = Process::run("exiftool", &["-n", "-Orientation", "-"], &[], timeout)?
|
||||
.bytes_read(input)
|
||||
async fn needs_reorienting(input: BytesStream, timeout: u64) -> Result<bool, ExifError> {
|
||||
let buf = Process::run("exiftool", &["-n", "-Orientation", "-"], &[], timeout)
|
||||
.await?
|
||||
.drive_with_stream(input.into_io_stream())
|
||||
.into_string()
|
||||
.await?;
|
||||
|
||||
|
|
|
@ -4,15 +4,16 @@ mod tests;
|
|||
use std::{collections::HashSet, sync::OnceLock};
|
||||
|
||||
use crate::{
|
||||
bytes_stream::BytesStream,
|
||||
ffmpeg::FfMpegError,
|
||||
formats::{
|
||||
AlphaCodec, AnimationFormat, ImageFormat, ImageInput, InputFile, InputVideoFormat,
|
||||
Mp4AudioCodec, Mp4Codec, WebmAlphaCodec, WebmAudioCodec, WebmCodec,
|
||||
},
|
||||
future::WithPollTimer,
|
||||
process::Process,
|
||||
state::State,
|
||||
};
|
||||
use actix_web::web::Bytes;
|
||||
|
||||
use super::Discovery;
|
||||
|
||||
|
@ -52,6 +53,7 @@ impl FfMpegStreams {
|
|||
FfMpegStream::Unknown { codec_name } => {
|
||||
tracing::info!("Encountered unknown stream {codec_name}");
|
||||
}
|
||||
FfMpegStream::Empty {} => {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -134,6 +136,7 @@ enum FfMpegStream {
|
|||
Audio(FfMpegAudioStream),
|
||||
Video(FfMpegVideoStream),
|
||||
Unknown { codec_name: String },
|
||||
Empty {},
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
|
@ -157,24 +160,6 @@ struct Flags {
|
|||
alpha: usize,
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(super) async fn discover_bytes<S>(
|
||||
state: &State<S>,
|
||||
bytes: Bytes,
|
||||
) -> Result<Option<Discovery>, FfMpegError> {
|
||||
discover_file(state, move |mut file| {
|
||||
let bytes = bytes.clone();
|
||||
|
||||
async move {
|
||||
file.write_from_bytes(bytes)
|
||||
.await
|
||||
.map_err(FfMpegError::Write)?;
|
||||
Ok(file)
|
||||
}
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn allows_alpha(pixel_format: &str, timeout: u64) -> Result<bool, FfMpegError> {
|
||||
static ALPHA_PIXEL_FORMATS: OnceLock<HashSet<String>> = OnceLock::new();
|
||||
|
||||
|
@ -190,46 +175,41 @@ async fn allows_alpha(pixel_format: &str, timeout: u64) -> Result<bool, FfMpegEr
|
|||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn discover_file<S, F, Fut>(state: &State<S>, f: F) -> Result<Option<Discovery>, FfMpegError>
|
||||
where
|
||||
F: FnOnce(crate::file::File) -> Fut,
|
||||
Fut: std::future::Future<Output = Result<crate::file::File, FfMpegError>>,
|
||||
{
|
||||
let input_file = state.tmp_dir.tmp_file(None);
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
pub(super) async fn discover_bytes_stream<S>(
|
||||
state: &State<S>,
|
||||
bytes: BytesStream,
|
||||
) -> Result<Option<Discovery>, FfMpegError> {
|
||||
let output = crate::ffmpeg::with_file(&state.tmp_dir, None, |path| async move {
|
||||
crate::file::write_from_stream(&path, bytes.into_io_stream())
|
||||
.with_poll_timer("discover-ffmpeg-write-file")
|
||||
.await
|
||||
.map_err(FfMpegError::Write)?;
|
||||
|
||||
Process::run(
|
||||
"ffprobe",
|
||||
&[
|
||||
"-v".as_ref(),
|
||||
"quiet".as_ref(),
|
||||
"-count_frames".as_ref(),
|
||||
"-show_entries".as_ref(),
|
||||
"stream=width,height,nb_read_frames,codec_name,pix_fmt:format=format_name".as_ref(),
|
||||
"-of".as_ref(),
|
||||
"default=noprint_wrappers=1:nokey=1".as_ref(),
|
||||
"-print_format".as_ref(),
|
||||
"json".as_ref(),
|
||||
path.as_os_str(),
|
||||
],
|
||||
&[],
|
||||
state.config.media.process_timeout,
|
||||
)
|
||||
.await?
|
||||
.read()
|
||||
.into_vec()
|
||||
.with_poll_timer("discover-ffmpeg-into-vec")
|
||||
.await
|
||||
.map_err(FfMpegError::CreateDir)?;
|
||||
|
||||
let tmp_one = crate::file::File::create(&input_file)
|
||||
.await
|
||||
.map_err(FfMpegError::CreateFile)?;
|
||||
let tmp_one = (f)(tmp_one).await?;
|
||||
tmp_one.close().await.map_err(FfMpegError::CloseFile)?;
|
||||
|
||||
let res = Process::run(
|
||||
"ffprobe",
|
||||
&[
|
||||
"-v".as_ref(),
|
||||
"quiet".as_ref(),
|
||||
"-count_frames".as_ref(),
|
||||
"-show_entries".as_ref(),
|
||||
"stream=width,height,nb_read_frames,codec_name,pix_fmt:format=format_name".as_ref(),
|
||||
"-of".as_ref(),
|
||||
"default=noprint_wrappers=1:nokey=1".as_ref(),
|
||||
"-print_format".as_ref(),
|
||||
"json".as_ref(),
|
||||
input_file.as_os_str(),
|
||||
],
|
||||
&[],
|
||||
state.config.media.process_timeout,
|
||||
)?
|
||||
.read()
|
||||
.into_vec()
|
||||
.await;
|
||||
|
||||
input_file.cleanup().await.map_err(FfMpegError::Cleanup)?;
|
||||
|
||||
let output = res?;
|
||||
.map_err(FfMpegError::Process)
|
||||
})
|
||||
.await??;
|
||||
|
||||
let output: FfMpegDiscovery = serde_json::from_slice(&output).map_err(FfMpegError::Json)?;
|
||||
|
||||
|
@ -268,7 +248,8 @@ async fn alpha_pixel_formats(timeout: u64) -> Result<HashSet<String>, FfMpegErro
|
|||
],
|
||||
&[],
|
||||
timeout,
|
||||
)?
|
||||
)
|
||||
.await?
|
||||
.read()
|
||||
.into_vec()
|
||||
.await?;
|
||||
|
|
35
src/discover/ffmpeg/ffprobe_6_0_mov_details.json
Normal file
35
src/discover/ffmpeg/ffprobe_6_0_mov_details.json
Normal file
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"programs": [
|
||||
|
||||
],
|
||||
"streams": [
|
||||
{
|
||||
"codec_name": "hevc",
|
||||
"width": 1920,
|
||||
"height": 1080,
|
||||
"pix_fmt": "yuv420p10le",
|
||||
"nb_read_frames": "187",
|
||||
"side_data_list": [
|
||||
{
|
||||
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"codec_name": "aac",
|
||||
"nb_read_frames": "135"
|
||||
},
|
||||
{
|
||||
|
||||
},
|
||||
{
|
||||
|
||||
},
|
||||
{
|
||||
|
||||
}
|
||||
],
|
||||
"format": {
|
||||
"format_name": "mov,mp4,m4a,3gp,3g2,mj2"
|
||||
}
|
||||
}
|
|
@ -1,11 +1,11 @@
|
|||
use crate::formats::{
|
||||
AlphaCodec, AnimationFormat, ImageFormat, ImageInput, InputFile, InputVideoFormat, Mp4Codec,
|
||||
WebmAlphaCodec, WebmCodec,
|
||||
AlphaCodec, AnimationFormat, ImageFormat, ImageInput, InputFile, InputVideoFormat,
|
||||
Mp4AudioCodec, Mp4Codec, WebmAlphaCodec, WebmCodec,
|
||||
};
|
||||
|
||||
use super::{Discovery, FfMpegDiscovery, PixelFormatOutput};
|
||||
|
||||
fn details_tests() -> [(&'static str, Option<Discovery>); 13] {
|
||||
fn details_tests() -> [(&'static str, Option<Discovery>); 14] {
|
||||
[
|
||||
(
|
||||
"animated_webp",
|
||||
|
@ -151,6 +151,18 @@ fn details_tests() -> [(&'static str, Option<Discovery>); 13] {
|
|||
frames: None,
|
||||
}),
|
||||
),
|
||||
(
|
||||
"mov",
|
||||
Some(Discovery {
|
||||
input: InputFile::Video(InputVideoFormat::Mp4 {
|
||||
video_codec: Mp4Codec::H265,
|
||||
audio_codec: Some(Mp4AudioCodec::Aac),
|
||||
}),
|
||||
width: 1920,
|
||||
height: 1080,
|
||||
frames: Some(187),
|
||||
}),
|
||||
),
|
||||
]
|
||||
}
|
||||
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use actix_web::web::Bytes;
|
||||
|
||||
use crate::{
|
||||
bytes_stream::BytesStream,
|
||||
discover::DiscoverError,
|
||||
formats::{AnimationFormat, ImageFormat, ImageInput, InputFile},
|
||||
magick::{MagickError, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||
|
@ -31,10 +30,10 @@ struct Geometry {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(super) async fn confirm_bytes<S>(
|
||||
pub(super) async fn confirm_bytes_stream<S>(
|
||||
state: &State<S>,
|
||||
discovery: Option<Discovery>,
|
||||
bytes: Bytes,
|
||||
bytes: BytesStream,
|
||||
) -> Result<Discovery, MagickError> {
|
||||
match discovery {
|
||||
Some(Discovery {
|
||||
|
@ -49,39 +48,17 @@ pub(super) async fn confirm_bytes<S>(
|
|||
}
|
||||
}
|
||||
|
||||
discover_file(state, move |mut file| async move {
|
||||
file.write_from_bytes(bytes)
|
||||
.await
|
||||
.map_err(MagickError::Write)?;
|
||||
|
||||
Ok(file)
|
||||
})
|
||||
.await
|
||||
discover(state, bytes).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn discover_file<S, F, Fut>(state: &State<S>, f: F) -> Result<Discovery, MagickError>
|
||||
where
|
||||
F: FnOnce(crate::file::File) -> Fut,
|
||||
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
|
||||
{
|
||||
async fn discover<S>(state: &State<S>, stream: BytesStream) -> Result<Discovery, MagickError> {
|
||||
let temporary_path = state
|
||||
.tmp_dir
|
||||
.tmp_folder()
|
||||
.await
|
||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||
|
||||
let input_file = state.tmp_dir.tmp_file(None);
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
.await
|
||||
.map_err(MagickError::CreateDir)?;
|
||||
|
||||
let tmp_one = crate::file::File::create(&input_file)
|
||||
.await
|
||||
.map_err(MagickError::CreateFile)?;
|
||||
let tmp_one = (f)(tmp_one).await?;
|
||||
tmp_one.close().await.map_err(MagickError::CloseFile)?;
|
||||
|
||||
let envs = [
|
||||
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
||||
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||
|
@ -90,19 +67,17 @@ where
|
|||
let res = Process::run(
|
||||
"magick",
|
||||
&[
|
||||
"convert".as_ref(),
|
||||
// "-ping".as_ref(), // re-enable -ping after imagemagick fix
|
||||
input_file.as_os_str(),
|
||||
"JSON:".as_ref(),
|
||||
"convert", // "-ping".as_ref(), // re-enable -ping after imagemagick fix
|
||||
"-", "JSON:",
|
||||
],
|
||||
&envs,
|
||||
state.config.media.process_timeout,
|
||||
)?
|
||||
.read()
|
||||
)
|
||||
.await?
|
||||
.drive_with_stream(stream.into_io_stream())
|
||||
.into_string()
|
||||
.await;
|
||||
|
||||
input_file.cleanup().await.map_err(MagickError::Cleanup)?;
|
||||
temporary_path
|
||||
.cleanup()
|
||||
.await
|
||||
|
|
33
src/error.rs
33
src/error.rs
|
@ -82,7 +82,7 @@ pub(crate) enum UploadError {
|
|||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Error validating upload")]
|
||||
Validation(#[from] crate::validate::ValidationError),
|
||||
Validation(#[from] crate::ingest::ValidationError),
|
||||
|
||||
#[error("Error in store")]
|
||||
Store(#[source] crate::store::StoreError),
|
||||
|
@ -108,6 +108,15 @@ pub(crate) enum UploadError {
|
|||
#[error("Error in request response")]
|
||||
Request(#[from] reqwest::Error),
|
||||
|
||||
#[error("Invalid job popped from job queue: {1}")]
|
||||
InvalidJob(#[source] serde_json::Error, String),
|
||||
|
||||
#[error("Invalid query supplied")]
|
||||
InvalidQuery(#[source] actix_web::error::QueryPayloadError),
|
||||
|
||||
#[error("Invalid json supplied")]
|
||||
InvalidJson(#[source] actix_web::error::JsonPayloadError),
|
||||
|
||||
#[error("pict-rs is in read-only mode")]
|
||||
ReadOnly,
|
||||
|
||||
|
@ -123,6 +132,9 @@ pub(crate) enum UploadError {
|
|||
#[error("No files present in upload")]
|
||||
NoFiles,
|
||||
|
||||
#[error("Media has not been proxied")]
|
||||
MissingProxy,
|
||||
|
||||
#[error("Requested a file that doesn't exist")]
|
||||
MissingAlias,
|
||||
|
||||
|
@ -161,11 +173,16 @@ pub(crate) enum UploadError {
|
|||
|
||||
#[error("Failed external validation")]
|
||||
FailedExternalValidation,
|
||||
|
||||
#[cfg(feature = "random-errors")]
|
||||
#[error("Randomly generated error for testing purposes")]
|
||||
RandomError,
|
||||
}
|
||||
|
||||
impl UploadError {
|
||||
const fn error_code(&self) -> ErrorCode {
|
||||
match self {
|
||||
Self::Upload(actix_form_data::Error::FileSize) => ErrorCode::VALIDATE_FILE_SIZE,
|
||||
Self::Upload(_) => ErrorCode::FILE_UPLOAD_ERROR,
|
||||
Self::Repo(e) => e.error_code(),
|
||||
Self::OldRepo(_) => ErrorCode::OLD_REPO_ERROR,
|
||||
|
@ -185,6 +202,7 @@ impl UploadError {
|
|||
Self::Semaphore => ErrorCode::PROCESS_SEMAPHORE_CLOSED,
|
||||
Self::Canceled => ErrorCode::PANIC,
|
||||
Self::NoFiles => ErrorCode::VALIDATE_NO_FILES,
|
||||
Self::MissingProxy => ErrorCode::PROXY_NOT_FOUND,
|
||||
Self::MissingAlias => ErrorCode::ALIAS_NOT_FOUND,
|
||||
Self::MissingIdentifier => ErrorCode::LOST_FILE,
|
||||
Self::InvalidToken => ErrorCode::INVALID_DELETE_TOKEN,
|
||||
|
@ -196,6 +214,11 @@ impl UploadError {
|
|||
Self::Timeout(_) | Self::AggregateTimeout => ErrorCode::STREAM_TOO_SLOW,
|
||||
Self::ProcessTimeout => ErrorCode::COMMAND_TIMEOUT,
|
||||
Self::FailedExternalValidation => ErrorCode::FAILED_EXTERNAL_VALIDATION,
|
||||
Self::InvalidJob(_, _) => ErrorCode::INVALID_JOB,
|
||||
Self::InvalidQuery(_) => ErrorCode::INVALID_QUERY,
|
||||
Self::InvalidJson(_) => ErrorCode::INVALID_JSON,
|
||||
#[cfg(feature = "random-errors")]
|
||||
Self::RandomError => ErrorCode::RANDOM_ERROR,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -232,6 +255,10 @@ impl From<crate::store::StoreError> for UploadError {
|
|||
impl ResponseError for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self.kind() {
|
||||
Some(UploadError::Upload(actix_form_data::Error::FileSize))
|
||||
| Some(UploadError::Validation(crate::ingest::ValidationError::Filesize)) => {
|
||||
StatusCode::PAYLOAD_TOO_LARGE
|
||||
}
|
||||
Some(
|
||||
UploadError::DuplicateAlias
|
||||
| UploadError::Limit(_)
|
||||
|
@ -242,6 +269,8 @@ impl ResponseError for Error {
|
|||
))
|
||||
| UploadError::Repo(crate::repo::RepoError::AlreadyClaimed)
|
||||
| UploadError::Validation(_)
|
||||
| UploadError::InvalidQuery(_)
|
||||
| UploadError::InvalidJson(_)
|
||||
| UploadError::UnsupportedProcessExtension
|
||||
| UploadError::ReadOnly
|
||||
| UploadError::FailedExternalValidation
|
||||
|
@ -251,7 +280,7 @@ impl ResponseError for Error {
|
|||
Some(UploadError::Ffmpeg(e)) if e.is_client_error() => StatusCode::BAD_REQUEST,
|
||||
Some(UploadError::Exiftool(e)) if e.is_client_error() => StatusCode::BAD_REQUEST,
|
||||
Some(UploadError::Process(e)) if e.is_client_error() => StatusCode::BAD_REQUEST,
|
||||
Some(UploadError::MissingAlias) => StatusCode::NOT_FOUND,
|
||||
Some(UploadError::MissingProxy | UploadError::MissingAlias) => StatusCode::NOT_FOUND,
|
||||
Some(UploadError::Ffmpeg(e)) if e.is_not_found() => StatusCode::NOT_FOUND,
|
||||
Some(UploadError::InvalidToken) => StatusCode::FORBIDDEN,
|
||||
Some(UploadError::Range) => StatusCode::RANGE_NOT_SATISFIABLE,
|
||||
|
|
|
@ -33,9 +33,6 @@ impl ErrorCode {
|
|||
pub(crate) const FILE_IO_ERROR: ErrorCode = ErrorCode {
|
||||
code: "file-io-error",
|
||||
};
|
||||
pub(crate) const PARSE_PATH_ERROR: ErrorCode = ErrorCode {
|
||||
code: "parse-path-error",
|
||||
};
|
||||
pub(crate) const FILE_EXISTS: ErrorCode = ErrorCode {
|
||||
code: "file-exists",
|
||||
};
|
||||
|
@ -103,6 +100,9 @@ impl ErrorCode {
|
|||
pub(crate) const VIDEO_DISABLED: ErrorCode = ErrorCode {
|
||||
code: "video-disabled",
|
||||
};
|
||||
pub(crate) const MEDIA_DISALLOWED: ErrorCode = ErrorCode {
|
||||
code: "media-disallowed",
|
||||
};
|
||||
pub(crate) const HTTP_CLIENT_ERROR: ErrorCode = ErrorCode {
|
||||
code: "http-client-error",
|
||||
};
|
||||
|
@ -122,6 +122,9 @@ impl ErrorCode {
|
|||
pub(crate) const VALIDATE_NO_FILES: ErrorCode = ErrorCode {
|
||||
code: "validate-no-files",
|
||||
};
|
||||
pub(crate) const PROXY_NOT_FOUND: ErrorCode = ErrorCode {
|
||||
code: "proxy-not-found",
|
||||
};
|
||||
pub(crate) const ALIAS_NOT_FOUND: ErrorCode = ErrorCode {
|
||||
code: "alias-not-found",
|
||||
};
|
||||
|
@ -144,4 +147,17 @@ impl ErrorCode {
|
|||
pub(crate) const FAILED_EXTERNAL_VALIDATION: ErrorCode = ErrorCode {
|
||||
code: "failed-external-validation",
|
||||
};
|
||||
pub(crate) const INVALID_JOB: ErrorCode = ErrorCode {
|
||||
code: "invalid-job",
|
||||
};
|
||||
pub(crate) const INVALID_QUERY: ErrorCode = ErrorCode {
|
||||
code: "invalid-query",
|
||||
};
|
||||
pub(crate) const INVALID_JSON: ErrorCode = ErrorCode {
|
||||
code: "invalid-json",
|
||||
};
|
||||
#[cfg(feature = "random-errors")]
|
||||
pub(crate) const RANDOM_ERROR: ErrorCode = ErrorCode {
|
||||
code: "random-error",
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
use crate::{
|
||||
error_code::ErrorCode,
|
||||
process::{Process, ProcessError, ProcessRead},
|
||||
};
|
||||
use actix_web::web::Bytes;
|
||||
use crate::{error_code::ErrorCode, process::ProcessError};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum ExifError {
|
||||
|
@ -37,23 +33,3 @@ impl ExifError {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(input))]
|
||||
pub(crate) async fn needs_reorienting(timeout: u64, input: Bytes) -> Result<bool, ExifError> {
|
||||
let buf = Process::run("exiftool", &["-n", "-Orientation", "-"], &[], timeout)?
|
||||
.bytes_read(input)
|
||||
.into_string()
|
||||
.await?;
|
||||
|
||||
Ok(!buf.is_empty())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(input))]
|
||||
pub(crate) fn clear_metadata_bytes_read(
|
||||
timeout: u64,
|
||||
input: Bytes,
|
||||
) -> Result<ProcessRead, ExifError> {
|
||||
let process = Process::run("exiftool", &["-all=", "-", "-out", "-"], &[], timeout)?;
|
||||
|
||||
Ok(process.bytes_read(input))
|
||||
}
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
use crate::{error_code::ErrorCode, process::ProcessError, store::StoreError};
|
||||
use std::ffi::OsString;
|
||||
|
||||
use futures_core::Future;
|
||||
|
||||
use crate::{error_code::ErrorCode, process::ProcessError, store::StoreError, tmp_file::TmpDir};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum FfMpegError {
|
||||
|
@ -20,12 +24,6 @@ pub(crate) enum FfMpegError {
|
|||
#[error("Error opening file")]
|
||||
OpenFile(#[source] std::io::Error),
|
||||
|
||||
#[error("Error creating file")]
|
||||
CreateFile(#[source] std::io::Error),
|
||||
|
||||
#[error("Error closing file")]
|
||||
CloseFile(#[source] std::io::Error),
|
||||
|
||||
#[error("Error cleaning up after command")]
|
||||
Cleanup(#[source] std::io::Error),
|
||||
|
||||
|
@ -56,9 +54,7 @@ impl FfMpegError {
|
|||
| Self::CreateDir(_)
|
||||
| Self::ReadFile(_)
|
||||
| Self::OpenFile(_)
|
||||
| Self::Cleanup(_)
|
||||
| Self::CreateFile(_)
|
||||
| Self::CloseFile(_) => ErrorCode::COMMAND_ERROR,
|
||||
| Self::Cleanup(_) => ErrorCode::COMMAND_ERROR,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -78,3 +74,25 @@ impl FfMpegError {
|
|||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn with_file<F, Fut>(
|
||||
tmp: &TmpDir,
|
||||
ext: Option<&str>,
|
||||
f: F,
|
||||
) -> Result<Fut::Output, FfMpegError>
|
||||
where
|
||||
F: FnOnce(OsString) -> Fut,
|
||||
Fut: Future,
|
||||
{
|
||||
let file = tmp.tmp_file(ext);
|
||||
|
||||
crate::store::file_store::safe_create_parent(&file)
|
||||
.await
|
||||
.map_err(FfMpegError::CreateDir)?;
|
||||
|
||||
let res = (f)(file.as_os_str().to_os_string()).await;
|
||||
|
||||
file.cleanup().await.map_err(FfMpegError::Cleanup)?;
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
|
204
src/file.rs
204
src/file.rs
|
@ -1,18 +1,40 @@
|
|||
use std::path::Path;
|
||||
|
||||
use futures_core::Stream;
|
||||
use tokio_util::bytes::Bytes;
|
||||
|
||||
#[cfg(feature = "io-uring")]
|
||||
pub(crate) use io_uring::File;
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
pub(crate) use tokio_file::File;
|
||||
|
||||
use crate::future::WithPollTimer;
|
||||
|
||||
pub(crate) async fn write_from_stream(
|
||||
path: impl AsRef<Path>,
|
||||
stream: impl Stream<Item = std::io::Result<Bytes>>,
|
||||
) -> std::io::Result<()> {
|
||||
let mut file = File::create(path).with_poll_timer("create-file").await?;
|
||||
file.write_from_stream(stream)
|
||||
.with_poll_timer("write-from-stream")
|
||||
.await?;
|
||||
file.close().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
mod tokio_file {
|
||||
use crate::{store::file_store::FileError, Either};
|
||||
use crate::{future::WithPollTimer, store::file_store::FileError, Either};
|
||||
use actix_web::web::{Bytes, BytesMut};
|
||||
use futures_core::Stream;
|
||||
use std::{io::SeekFrom, path::Path};
|
||||
use streem::IntoStreamer;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeekExt, AsyncWrite, AsyncWriteExt};
|
||||
use tokio_util::codec::{BytesCodec, FramedRead};
|
||||
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt};
|
||||
use tokio_util::{
|
||||
bytes::Buf,
|
||||
codec::{BytesCodec, FramedRead},
|
||||
};
|
||||
|
||||
pub(crate) struct File {
|
||||
inner: tokio::fs::File,
|
||||
|
@ -35,11 +57,6 @@ mod tokio_file {
|
|||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn write_from_bytes(&mut self, mut bytes: Bytes) -> std::io::Result<()> {
|
||||
self.inner.write_all_buf(&mut bytes).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn write_from_stream<S>(&mut self, stream: S) -> std::io::Result<()>
|
||||
where
|
||||
S: Stream<Item = std::io::Result<Bytes>>,
|
||||
|
@ -47,40 +64,26 @@ mod tokio_file {
|
|||
let stream = std::pin::pin!(stream);
|
||||
let mut stream = stream.into_streamer();
|
||||
|
||||
while let Some(res) = stream.next().await {
|
||||
while let Some(mut bytes) = stream.try_next().with_poll_timer("try-next").await? {
|
||||
tracing::trace!("write_from_stream: looping");
|
||||
|
||||
let mut bytes = res?;
|
||||
while bytes.has_remaining() {
|
||||
self.inner
|
||||
.write_buf(&mut bytes)
|
||||
.with_poll_timer("write-buf")
|
||||
.await?;
|
||||
|
||||
self.inner.write_all_buf(&mut bytes).await?;
|
||||
crate::sync::cooperate().await;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn write_from_async_read<R>(
|
||||
&mut self,
|
||||
mut reader: R,
|
||||
) -> std::io::Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
tokio::io::copy(&mut reader, &mut self.inner).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn close(self) -> std::io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn read_to_async_write<W>(&mut self, writer: &mut W) -> std::io::Result<()>
|
||||
where
|
||||
W: AsyncWrite + Unpin + ?Sized,
|
||||
{
|
||||
tokio::io::copy(&mut self.inner, writer).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn read_to_stream(
|
||||
mut self,
|
||||
from_start: Option<u64>,
|
||||
|
@ -118,7 +121,6 @@ mod io_uring {
|
|||
path::{Path, PathBuf},
|
||||
};
|
||||
use streem::IntoStreamer;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use tokio_uring::{
|
||||
buf::{IoBuf, IoBufMut},
|
||||
BufResult,
|
||||
|
@ -154,36 +156,6 @@ mod io_uring {
|
|||
tokio::fs::metadata(&self.path).await
|
||||
}
|
||||
|
||||
pub(crate) async fn write_from_bytes(&mut self, mut buf: Bytes) -> std::io::Result<()> {
|
||||
let len: u64 = buf.len().try_into().unwrap();
|
||||
|
||||
let mut cursor: u64 = 0;
|
||||
|
||||
loop {
|
||||
tracing::trace!("write_from_bytes: looping");
|
||||
|
||||
if cursor == len {
|
||||
break;
|
||||
}
|
||||
|
||||
let cursor_usize: usize = cursor.try_into().unwrap();
|
||||
let (res, slice) = self.inner.write_at(buf.slice(cursor_usize..), cursor).await;
|
||||
let n: usize = res?;
|
||||
|
||||
if n == 0 {
|
||||
return Err(std::io::ErrorKind::UnexpectedEof.into());
|
||||
}
|
||||
|
||||
buf = slice.into_inner();
|
||||
let n: u64 = n.try_into().unwrap();
|
||||
cursor += n;
|
||||
}
|
||||
|
||||
self.inner.sync_all().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn write_from_stream<S>(&mut self, stream: S) -> std::io::Result<()>
|
||||
where
|
||||
S: Stream<Item = std::io::Result<Bytes>>,
|
||||
|
@ -231,100 +203,10 @@ mod io_uring {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
pub(crate) async fn write_from_async_read<R>(
|
||||
&mut self,
|
||||
mut reader: R,
|
||||
) -> std::io::Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
let mut cursor: u64 = 0;
|
||||
|
||||
loop {
|
||||
tracing::trace!("write_from_async_read: looping");
|
||||
|
||||
let max_size = 65_536;
|
||||
let mut buf = Vec::with_capacity(max_size.try_into().unwrap());
|
||||
|
||||
let n = (&mut reader).take(max_size).read_buf(&mut buf).await?;
|
||||
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let mut position = 0;
|
||||
|
||||
loop {
|
||||
tracing::trace!("write_from_async_read: looping inner");
|
||||
|
||||
if position == n {
|
||||
break;
|
||||
}
|
||||
|
||||
let position_u64: u64 = position.try_into().unwrap();
|
||||
let (res, slice) = self
|
||||
.write_at(buf.slice(position..n), cursor + position_u64)
|
||||
.await;
|
||||
|
||||
let n = res?;
|
||||
if n == 0 {
|
||||
return Err(std::io::ErrorKind::UnexpectedEof.into());
|
||||
}
|
||||
|
||||
position += n;
|
||||
|
||||
buf = slice.into_inner();
|
||||
}
|
||||
|
||||
let position: u64 = position.try_into().unwrap();
|
||||
cursor += position;
|
||||
}
|
||||
|
||||
self.inner.sync_all().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn close(self) -> std::io::Result<()> {
|
||||
self.inner.close().await
|
||||
}
|
||||
|
||||
pub(crate) async fn read_to_async_write<W>(&mut self, writer: &mut W) -> std::io::Result<()>
|
||||
where
|
||||
W: AsyncWrite + Unpin + ?Sized,
|
||||
{
|
||||
let metadata = self.metadata().await?;
|
||||
let size = metadata.len();
|
||||
|
||||
let mut cursor: u64 = 0;
|
||||
|
||||
loop {
|
||||
tracing::trace!("read_to_async_write: looping");
|
||||
|
||||
if cursor == size {
|
||||
break;
|
||||
}
|
||||
|
||||
let max_size = (size - cursor).min(65_536);
|
||||
let buf = BytesMut::with_capacity(max_size.try_into().unwrap());
|
||||
|
||||
let (res, buf): (_, BytesMut) = self.read_at(buf, cursor).await;
|
||||
let n: usize = res?;
|
||||
|
||||
if n == 0 {
|
||||
return Err(std::io::ErrorKind::UnexpectedEof.into());
|
||||
}
|
||||
|
||||
writer.write_all(&buf[0..n]).await?;
|
||||
|
||||
let n: u64 = n.try_into().unwrap();
|
||||
cursor += n;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn read_to_stream(
|
||||
self,
|
||||
from_start: Option<u64>,
|
||||
|
@ -393,6 +275,8 @@ mod io_uring {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Read;
|
||||
use streem::IntoStreamer;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
macro_rules! test_async {
|
||||
($fut:expr) => {
|
||||
|
@ -408,9 +292,16 @@ mod io_uring {
|
|||
let tmp = "/tmp/read-test";
|
||||
|
||||
test_async!(async move {
|
||||
let mut file = super::File::open(EARTH_GIF).await.unwrap();
|
||||
let file = super::File::open(EARTH_GIF).await.unwrap();
|
||||
let mut tmp_file = tokio::fs::File::create(tmp).await.unwrap();
|
||||
file.read_to_async_write(&mut tmp_file).await.unwrap();
|
||||
|
||||
let stream = file.read_to_stream(None, None).await.unwrap();
|
||||
let stream = std::pin::pin!(stream);
|
||||
let mut stream = stream.into_streamer();
|
||||
|
||||
while let Some(mut bytes) = stream.try_next().await.unwrap() {
|
||||
tmp_file.write_all_buf(&mut bytes).await.unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
let mut source = std::fs::File::open(EARTH_GIF).unwrap();
|
||||
|
@ -434,9 +325,12 @@ mod io_uring {
|
|||
let tmp = "/tmp/write-test";
|
||||
|
||||
test_async!(async move {
|
||||
let mut file = tokio::fs::File::open(EARTH_GIF).await.unwrap();
|
||||
let file = tokio::fs::File::open(EARTH_GIF).await.unwrap();
|
||||
let mut tmp_file = super::File::create(tmp).await.unwrap();
|
||||
tmp_file.write_from_async_read(&mut file).await.unwrap();
|
||||
tmp_file
|
||||
.write_from_stream(tokio_util::io::ReaderStream::new(file))
|
||||
.await
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
let mut source = std::fs::File::open(EARTH_GIF).unwrap();
|
||||
|
|
37
src/file_path.rs
Normal file
37
src/file_path.rs
Normal file
|
@ -0,0 +1,37 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use uuid::Uuid;
|
||||
|
||||
fn add_extension(filename: String, extension: Option<&str>) -> String {
|
||||
if let Some(extension) = extension {
|
||||
filename + extension
|
||||
} else {
|
||||
filename
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn generate_disk(mut path: PathBuf, extension: Option<&str>) -> PathBuf {
|
||||
let (directories, filename) = generate();
|
||||
path.extend(directories);
|
||||
path.push(add_extension(filename, extension));
|
||||
path
|
||||
}
|
||||
|
||||
pub(crate) fn generate_object(extension: Option<&str>) -> String {
|
||||
let (directories, filename) = generate();
|
||||
|
||||
format!(
|
||||
"{}/{}",
|
||||
directories.join("/"),
|
||||
add_extension(filename, extension)
|
||||
)
|
||||
}
|
||||
|
||||
fn generate() -> (Vec<String>, String) {
|
||||
let s = Uuid::now_v7().simple().to_string();
|
||||
|
||||
let directories = (0..10).map(|i| s[i * 2..i * 2 + 2].to_string()).collect();
|
||||
let filename = s[20..].to_string();
|
||||
|
||||
(directories, filename)
|
||||
}
|
|
@ -70,7 +70,7 @@ impl ImageFormat {
|
|||
}
|
||||
}
|
||||
|
||||
pub(super) const fn file_extension(self) -> &'static str {
|
||||
pub(crate) const fn file_extension(self) -> &'static str {
|
||||
match self {
|
||||
Self::Avif => ".avif",
|
||||
Self::Jpeg => ".jpeg",
|
||||
|
|
|
@ -59,9 +59,19 @@ pub(crate) trait WithMetrics: Future {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) trait WithPollTimer: Future {
|
||||
fn with_poll_timer(self, name: &'static str) -> PollTimer<Self>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
PollTimer { name, inner: self }
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> NowOrNever for F where F: Future {}
|
||||
impl<F> WithMetrics for F where F: Future {}
|
||||
impl<F> WithTimeout for F where F: Future {}
|
||||
impl<F> WithPollTimer for F where F: Future {}
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
pub(crate) struct MetricsFuture<F> {
|
||||
|
@ -104,3 +114,79 @@ impl Drop for Metrics {
|
|||
.record(self.start.elapsed().as_secs_f64());
|
||||
}
|
||||
}
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
pub(crate) struct PollTimer<F> {
|
||||
name: &'static str,
|
||||
|
||||
#[pin]
|
||||
inner: F,
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> Future for PollTimer<F>
|
||||
where
|
||||
F: Future,
|
||||
{
|
||||
type Output = F::Output;
|
||||
|
||||
fn poll(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Self::Output> {
|
||||
let start = Instant::now();
|
||||
|
||||
let this = self.project();
|
||||
|
||||
let out = this.inner.poll(cx);
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
if elapsed > Duration::from_micros(10) {
|
||||
metrics::counter!(crate::init_metrics::FUTURE_POLL_TIMER_EXCEEDED, "timer" => this.name.to_string()).increment(1);
|
||||
metrics::histogram!(crate::init_metrics::FUTURE_POLL_TIMER_EXCEEDED_SECONDS, "timer" => this.name.to_string()).record(elapsed.as_secs_f64());
|
||||
}
|
||||
|
||||
if elapsed > Duration::from_secs(1) {
|
||||
#[cfg(feature = "poll-timer-warnings")]
|
||||
tracing::warn!(
|
||||
"Future {} polled for {} seconds",
|
||||
this.name,
|
||||
elapsed.as_secs()
|
||||
);
|
||||
|
||||
#[cfg(not(feature = "poll-timer-warnings"))]
|
||||
tracing::debug!(
|
||||
"Future {} polled for {} seconds",
|
||||
this.name,
|
||||
elapsed.as_secs()
|
||||
);
|
||||
} else if elapsed > Duration::from_millis(1) {
|
||||
#[cfg(feature = "poll-timer-warnings")]
|
||||
tracing::warn!("Future {} polled for {} ms", this.name, elapsed.as_millis());
|
||||
|
||||
#[cfg(not(feature = "poll-timer-warnings"))]
|
||||
tracing::debug!("Future {} polled for {} ms", this.name, elapsed.as_millis());
|
||||
} else if elapsed > Duration::from_micros(200) {
|
||||
#[cfg(feature = "poll-timer-warnings")]
|
||||
tracing::debug!(
|
||||
"Future {} polled for {} microseconds",
|
||||
this.name,
|
||||
elapsed.as_micros(),
|
||||
);
|
||||
#[cfg(not(feature = "poll-timer-warnings"))]
|
||||
tracing::trace!(
|
||||
"Future {} polled for {} microseconds",
|
||||
this.name,
|
||||
elapsed.as_micros(),
|
||||
);
|
||||
} else if elapsed > Duration::from_micros(1) {
|
||||
tracing::trace!(
|
||||
"Future {} polled for {} microseconds",
|
||||
this.name,
|
||||
elapsed.as_micros()
|
||||
);
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
}
|
||||
|
|
347
src/generate.rs
347
src/generate.rs
|
@ -2,18 +2,17 @@ mod ffmpeg;
|
|||
mod magick;
|
||||
|
||||
use crate::{
|
||||
concurrent_processor::ProcessMap,
|
||||
details::Details,
|
||||
error::{Error, UploadError},
|
||||
formats::{ImageFormat, InputProcessableFormat, InternalVideoFormat, ProcessableFormat},
|
||||
future::{WithMetrics, WithTimeout},
|
||||
repo::{Hash, VariantAlreadyExists},
|
||||
future::{WithMetrics, WithPollTimer, WithTimeout},
|
||||
repo::{Hash, NotificationEntry, VariantAlreadyExists},
|
||||
state::State,
|
||||
store::Store,
|
||||
};
|
||||
use actix_web::web::Bytes;
|
||||
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
future::Future,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
@ -48,16 +47,15 @@ impl Drop for MetricsGuard {
|
|||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(state, process_map, hash))]
|
||||
#[tracing::instrument(skip(state, original_details, hash))]
|
||||
pub(crate) async fn generate<S: Store + 'static>(
|
||||
state: &State<S>,
|
||||
process_map: &ProcessMap,
|
||||
format: InputProcessableFormat,
|
||||
thumbnail_path: PathBuf,
|
||||
thumbnail_args: Vec<String>,
|
||||
variant: String,
|
||||
variant_args: Vec<String>,
|
||||
original_details: &Details,
|
||||
hash: Hash,
|
||||
) -> Result<(Details, Bytes), Error> {
|
||||
) -> Result<(Details, Arc<str>), Error> {
|
||||
if state.config.server.danger_dummy_mode {
|
||||
let identifier = state
|
||||
.repo
|
||||
|
@ -65,31 +63,124 @@ pub(crate) async fn generate<S: Store + 'static>(
|
|||
.await?
|
||||
.ok_or(UploadError::MissingIdentifier)?;
|
||||
|
||||
let bytes = state
|
||||
.store
|
||||
.to_bytes(&identifier, None, None)
|
||||
.await?
|
||||
.into_bytes();
|
||||
|
||||
Ok((original_details.clone(), bytes))
|
||||
Ok((original_details.clone(), identifier))
|
||||
} else {
|
||||
let process_fut = process(
|
||||
state,
|
||||
format,
|
||||
thumbnail_path.clone(),
|
||||
thumbnail_args,
|
||||
original_details,
|
||||
hash.clone(),
|
||||
);
|
||||
let mut attempts = 0;
|
||||
let tup = loop {
|
||||
if attempts > 2 {
|
||||
return Err(UploadError::ProcessTimeout.into());
|
||||
}
|
||||
|
||||
let (details, bytes) = process_map
|
||||
.process(hash, thumbnail_path, process_fut)
|
||||
.with_timeout(Duration::from_secs(state.config.media.process_timeout * 4))
|
||||
.with_metrics(crate::init_metrics::GENERATE_PROCESS)
|
||||
.await
|
||||
.map_err(|_| UploadError::ProcessTimeout)??;
|
||||
match state
|
||||
.repo
|
||||
.claim_variant_processing_rights(hash.clone(), variant.clone())
|
||||
.await?
|
||||
{
|
||||
Ok(()) => {
|
||||
// process
|
||||
let process_future = process(
|
||||
state,
|
||||
format,
|
||||
variant.clone(),
|
||||
variant_args,
|
||||
original_details,
|
||||
hash.clone(),
|
||||
)
|
||||
.with_poll_timer("process-future");
|
||||
|
||||
Ok((details, bytes))
|
||||
let res = heartbeat(state, hash.clone(), variant.clone(), process_future)
|
||||
.with_poll_timer("heartbeat-future")
|
||||
.with_timeout(Duration::from_secs(state.config.media.process_timeout * 4))
|
||||
.with_metrics(crate::init_metrics::GENERATE_PROCESS)
|
||||
.await
|
||||
.map_err(|_| Error::from(UploadError::ProcessTimeout));
|
||||
|
||||
state
|
||||
.repo
|
||||
.notify_variant(hash.clone(), variant.clone())
|
||||
.await?;
|
||||
|
||||
break res???;
|
||||
}
|
||||
Err(entry) => {
|
||||
if let Some(tuple) = wait_timeout(
|
||||
hash.clone(),
|
||||
variant.clone(),
|
||||
entry,
|
||||
state,
|
||||
Duration::from_secs(20),
|
||||
)
|
||||
.await?
|
||||
{
|
||||
break tuple;
|
||||
}
|
||||
|
||||
attempts += 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(tup)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn wait_timeout<S: Store + 'static>(
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
mut entry: NotificationEntry,
|
||||
state: &State<S>,
|
||||
timeout: Duration,
|
||||
) -> Result<Option<(Details, Arc<str>)>, Error> {
|
||||
let notified = entry.notified_timeout(timeout);
|
||||
|
||||
if let Some(identifier) = state
|
||||
.repo
|
||||
.variant_identifier(hash.clone(), variant.clone())
|
||||
.await?
|
||||
{
|
||||
let details = crate::ensure_details_identifier(state, &identifier).await?;
|
||||
|
||||
return Ok(Some((details, identifier)));
|
||||
}
|
||||
|
||||
match notified.await {
|
||||
Ok(()) => tracing::debug!("notified"),
|
||||
Err(_) => tracing::debug!("timeout"),
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn heartbeat<S, O>(
|
||||
state: &State<S>,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
future: impl Future<Output = O>,
|
||||
) -> Result<O, Error> {
|
||||
let repo = state.repo.clone();
|
||||
|
||||
let handle = crate::sync::abort_on_drop(crate::sync::spawn("heartbeat-task", async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(5));
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
if let Err(e) = repo.variant_heartbeat(hash.clone(), variant.clone()).await {
|
||||
break Error::from(e);
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
let future = std::pin::pin!(future);
|
||||
|
||||
tokio::select! {
|
||||
biased;
|
||||
output = future => {
|
||||
Ok(output)
|
||||
}
|
||||
res = handle => {
|
||||
Err(res.map_err(|_| UploadError::Canceled)?)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -97,11 +188,11 @@ pub(crate) async fn generate<S: Store + 'static>(
|
|||
async fn process<S: Store + 'static>(
|
||||
state: &State<S>,
|
||||
output_format: InputProcessableFormat,
|
||||
thumbnail_path: PathBuf,
|
||||
thumbnail_args: Vec<String>,
|
||||
variant: String,
|
||||
variant_args: Vec<String>,
|
||||
original_details: &Details,
|
||||
hash: Hash,
|
||||
) -> Result<(Details, Bytes), Error> {
|
||||
) -> Result<(Details, Arc<str>), Error> {
|
||||
let guard = MetricsGuard::guard();
|
||||
let permit = crate::process_semaphore().acquire().await?;
|
||||
|
||||
|
@ -123,47 +214,127 @@ async fn process<S: Store + 'static>(
|
|||
|
||||
let stream = state.store.to_stream(&identifier, None, None).await?;
|
||||
|
||||
let vec = crate::magick::process_image_stream_read(
|
||||
state,
|
||||
stream,
|
||||
thumbnail_args,
|
||||
input_format,
|
||||
format,
|
||||
quality,
|
||||
)
|
||||
.await?
|
||||
.into_vec()
|
||||
.instrument(tracing::info_span!("Reading processed image to vec"))
|
||||
.await?;
|
||||
|
||||
let bytes = Bytes::from(vec);
|
||||
let bytes =
|
||||
crate::magick::process_image_command(state, variant_args, input_format, format, quality)
|
||||
.await?
|
||||
.drive_with_stream(stream)
|
||||
.into_bytes_stream()
|
||||
.instrument(tracing::info_span!(
|
||||
"Reading processed image to BytesStream"
|
||||
))
|
||||
.await?;
|
||||
|
||||
drop(permit);
|
||||
|
||||
let details = Details::from_bytes(state, bytes.clone()).await?;
|
||||
let details = Details::from_bytes_stream(state, bytes.clone()).await?;
|
||||
|
||||
let identifier = state
|
||||
.store
|
||||
.save_bytes(bytes.clone(), details.media_type())
|
||||
.save_stream(
|
||||
bytes.into_io_stream(),
|
||||
details.media_type(),
|
||||
Some(details.file_extension()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if let Err(VariantAlreadyExists) = state
|
||||
let identifier = if let Err(VariantAlreadyExists) = state
|
||||
.repo
|
||||
.relate_variant_identifier(
|
||||
hash,
|
||||
thumbnail_path.to_string_lossy().to_string(),
|
||||
&identifier,
|
||||
)
|
||||
.relate_variant_identifier(hash.clone(), variant.clone(), &identifier)
|
||||
.await?
|
||||
{
|
||||
state.store.remove(&identifier).await?;
|
||||
}
|
||||
|
||||
state.repo.relate_details(&identifier, &details).await?;
|
||||
state
|
||||
.repo
|
||||
.variant_identifier(hash, variant)
|
||||
.await?
|
||||
.ok_or(UploadError::MissingIdentifier)?
|
||||
} else {
|
||||
state.repo.relate_details(&identifier, &details).await?;
|
||||
identifier
|
||||
};
|
||||
|
||||
guard.disarm();
|
||||
|
||||
Ok((details, bytes)) as Result<(Details, Bytes), Error>
|
||||
Ok((details, identifier)) as Result<(Details, Arc<str>), Error>
|
||||
}
|
||||
|
||||
pub(crate) async fn ensure_motion_identifier<S>(
|
||||
state: &State<S>,
|
||||
hash: Hash,
|
||||
original_details: &Details,
|
||||
) -> Result<Arc<str>, Error>
|
||||
where
|
||||
S: Store + 'static,
|
||||
{
|
||||
if let Some(identifier) = state.repo.motion_identifier(hash.clone()).await? {
|
||||
return Ok(identifier);
|
||||
};
|
||||
|
||||
let identifier = state
|
||||
.repo
|
||||
.identifier(hash.clone())
|
||||
.await?
|
||||
.ok_or(UploadError::MissingIdentifier)?;
|
||||
|
||||
let (reader, media_type, file_extension) =
|
||||
if let Some(processable_format) = original_details.internal_format().processable_format() {
|
||||
let thumbnail_format = state.config.media.image.format.unwrap_or(ImageFormat::Webp);
|
||||
|
||||
let stream = state.store.to_stream(&identifier, None, None).await?;
|
||||
|
||||
let process =
|
||||
magick::thumbnail_command(state, processable_format, thumbnail_format).await?;
|
||||
|
||||
(
|
||||
process.drive_with_stream(stream),
|
||||
thumbnail_format.media_type(),
|
||||
thumbnail_format.file_extension(),
|
||||
)
|
||||
} else {
|
||||
let thumbnail_format = match state.config.media.image.format {
|
||||
Some(ImageFormat::Webp | ImageFormat::Avif | ImageFormat::Jxl) => {
|
||||
ffmpeg::ThumbnailFormat::Webp
|
||||
}
|
||||
Some(ImageFormat::Png) => ffmpeg::ThumbnailFormat::Png,
|
||||
Some(ImageFormat::Jpeg) | None => ffmpeg::ThumbnailFormat::Jpeg,
|
||||
};
|
||||
|
||||
let reader = ffmpeg::thumbnail(
|
||||
state,
|
||||
identifier,
|
||||
original_details
|
||||
.video_format()
|
||||
.unwrap_or(InternalVideoFormat::Mp4),
|
||||
thumbnail_format,
|
||||
)
|
||||
.await?;
|
||||
|
||||
(
|
||||
reader,
|
||||
thumbnail_format.media_type(),
|
||||
thumbnail_format.file_extension(),
|
||||
)
|
||||
};
|
||||
|
||||
let motion_identifier = reader
|
||||
.with_stdout(|stdout| async {
|
||||
state
|
||||
.store
|
||||
.save_stream(
|
||||
tokio_util::io::ReaderStream::with_capacity(stdout, 1024 * 64),
|
||||
media_type,
|
||||
Some(file_extension),
|
||||
)
|
||||
.await
|
||||
})
|
||||
.await??;
|
||||
|
||||
state
|
||||
.repo
|
||||
.relate_motion_identifier(hash, &motion_identifier)
|
||||
.await?;
|
||||
|
||||
Ok(motion_identifier)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
|
@ -187,59 +358,7 @@ where
|
|||
};
|
||||
|
||||
if should_thumbnail {
|
||||
if let Some(identifier) = state.repo.motion_identifier(hash.clone()).await? {
|
||||
return Ok(identifier);
|
||||
};
|
||||
|
||||
let identifier = state
|
||||
.repo
|
||||
.identifier(hash.clone())
|
||||
.await?
|
||||
.ok_or(UploadError::MissingIdentifier)?;
|
||||
|
||||
let (reader, media_type) = if let Some(processable_format) =
|
||||
original_details.internal_format().processable_format()
|
||||
{
|
||||
let thumbnail_format = state.config.media.image.format.unwrap_or(ImageFormat::Webp);
|
||||
|
||||
let stream = state.store.to_stream(&identifier, None, None).await?;
|
||||
|
||||
let reader =
|
||||
magick::thumbnail(state, stream, processable_format, thumbnail_format).await?;
|
||||
|
||||
(reader, thumbnail_format.media_type())
|
||||
} else {
|
||||
let thumbnail_format = match state.config.media.image.format {
|
||||
Some(ImageFormat::Webp | ImageFormat::Avif | ImageFormat::Jxl) => {
|
||||
ffmpeg::ThumbnailFormat::Webp
|
||||
}
|
||||
Some(ImageFormat::Png) => ffmpeg::ThumbnailFormat::Png,
|
||||
Some(ImageFormat::Jpeg) | None => ffmpeg::ThumbnailFormat::Jpeg,
|
||||
};
|
||||
|
||||
let reader = ffmpeg::thumbnail(
|
||||
state,
|
||||
identifier,
|
||||
original_details
|
||||
.video_format()
|
||||
.unwrap_or(InternalVideoFormat::Mp4),
|
||||
thumbnail_format,
|
||||
)
|
||||
.await?;
|
||||
|
||||
(reader, thumbnail_format.media_type())
|
||||
};
|
||||
|
||||
let motion_identifier = reader
|
||||
.with_stdout(|stdout| async { state.store.save_async_read(stdout, media_type).await })
|
||||
.await??;
|
||||
|
||||
state
|
||||
.repo
|
||||
.relate_motion_identifier(hash, &motion_identifier)
|
||||
.await?;
|
||||
|
||||
return Ok(motion_identifier);
|
||||
return ensure_motion_identifier(state, hash.clone(), original_details).await;
|
||||
}
|
||||
|
||||
state
|
||||
|
|
|
@ -18,7 +18,7 @@ pub(super) enum ThumbnailFormat {
|
|||
}
|
||||
|
||||
impl ThumbnailFormat {
|
||||
const fn as_ffmpeg_codec(self) -> &'static str {
|
||||
const fn ffmpeg_codec(self) -> &'static str {
|
||||
match self {
|
||||
Self::Jpeg => "mjpeg",
|
||||
Self::Png => "png",
|
||||
|
@ -26,7 +26,7 @@ impl ThumbnailFormat {
|
|||
}
|
||||
}
|
||||
|
||||
const fn to_file_extension(self) -> &'static str {
|
||||
pub(super) const fn file_extension(self) -> &'static str {
|
||||
match self {
|
||||
Self::Jpeg => ".jpeg",
|
||||
Self::Png => ".png",
|
||||
|
@ -34,7 +34,7 @@ impl ThumbnailFormat {
|
|||
}
|
||||
}
|
||||
|
||||
const fn as_ffmpeg_format(self) -> &'static str {
|
||||
const fn ffmpeg_format(self) -> &'static str {
|
||||
match self {
|
||||
Self::Jpeg | Self::Png => "image2",
|
||||
Self::Webp => "webp",
|
||||
|
@ -57,69 +57,73 @@ pub(super) async fn thumbnail<S: Store>(
|
|||
input_format: InternalVideoFormat,
|
||||
format: ThumbnailFormat,
|
||||
) -> Result<ProcessRead, FfMpegError> {
|
||||
let input_file = state.tmp_dir.tmp_file(Some(input_format.file_extension()));
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
.await
|
||||
.map_err(FfMpegError::CreateDir)?;
|
||||
let output_file = state.tmp_dir.tmp_file(Some(format.file_extension()));
|
||||
|
||||
let output_file = state.tmp_dir.tmp_file(Some(format.to_file_extension()));
|
||||
crate::store::file_store::safe_create_parent(&output_file)
|
||||
.await
|
||||
.map_err(FfMpegError::CreateDir)?;
|
||||
|
||||
let mut tmp_one = crate::file::File::create(&input_file)
|
||||
.await
|
||||
.map_err(FfMpegError::CreateFile)?;
|
||||
let stream = state
|
||||
.store
|
||||
.to_stream(&from, None, None)
|
||||
.await
|
||||
.map_err(FfMpegError::Store)?;
|
||||
tmp_one
|
||||
.write_from_stream(stream)
|
||||
.await
|
||||
.map_err(FfMpegError::Write)?;
|
||||
tmp_one.close().await.map_err(FfMpegError::CloseFile)?;
|
||||
let output_path = output_file.as_os_str();
|
||||
|
||||
let process = Process::run(
|
||||
"ffmpeg",
|
||||
&[
|
||||
"-hide_banner".as_ref(),
|
||||
"-v".as_ref(),
|
||||
"warning".as_ref(),
|
||||
"-i".as_ref(),
|
||||
input_file.as_os_str(),
|
||||
"-frames:v".as_ref(),
|
||||
"1".as_ref(),
|
||||
"-codec".as_ref(),
|
||||
format.as_ffmpeg_codec().as_ref(),
|
||||
"-f".as_ref(),
|
||||
format.as_ffmpeg_format().as_ref(),
|
||||
output_file.as_os_str(),
|
||||
],
|
||||
&[],
|
||||
state.config.media.process_timeout,
|
||||
)?;
|
||||
let res = crate::ffmpeg::with_file(
|
||||
&state.tmp_dir,
|
||||
Some(input_format.file_extension()),
|
||||
|input_file| async move {
|
||||
let stream = state
|
||||
.store
|
||||
.to_stream(&from, None, None)
|
||||
.await
|
||||
.map_err(FfMpegError::Store)?;
|
||||
|
||||
let res = process.wait().await;
|
||||
input_file.cleanup().await.map_err(FfMpegError::Cleanup)?;
|
||||
res?;
|
||||
crate::file::write_from_stream(&input_file, stream)
|
||||
.await
|
||||
.map_err(FfMpegError::Write)?;
|
||||
|
||||
let tmp_two = crate::file::File::open(&output_file)
|
||||
.await
|
||||
.map_err(FfMpegError::OpenFile)?;
|
||||
let stream = tmp_two
|
||||
.read_to_stream(None, None)
|
||||
.await
|
||||
.map_err(FfMpegError::ReadFile)?;
|
||||
let reader = tokio_util::io::StreamReader::new(stream);
|
||||
Process::run(
|
||||
"ffmpeg",
|
||||
&[
|
||||
"-hide_banner".as_ref(),
|
||||
"-v".as_ref(),
|
||||
"warning".as_ref(),
|
||||
"-i".as_ref(),
|
||||
input_file.as_os_str(),
|
||||
"-frames:v".as_ref(),
|
||||
"1".as_ref(),
|
||||
"-codec".as_ref(),
|
||||
format.ffmpeg_codec().as_ref(),
|
||||
"-f".as_ref(),
|
||||
format.ffmpeg_format().as_ref(),
|
||||
output_path,
|
||||
],
|
||||
&[],
|
||||
state.config.media.process_timeout,
|
||||
)
|
||||
.await?
|
||||
.wait()
|
||||
.await
|
||||
.map_err(FfMpegError::Process)?;
|
||||
|
||||
let reader = ProcessRead::new(
|
||||
Box::pin(reader),
|
||||
Arc::from(String::from("ffmpeg")),
|
||||
Uuid::now_v7(),
|
||||
let out_file = crate::file::File::open(output_path)
|
||||
.await
|
||||
.map_err(FfMpegError::OpenFile)?;
|
||||
out_file
|
||||
.read_to_stream(None, None)
|
||||
.await
|
||||
.map_err(FfMpegError::ReadFile)
|
||||
},
|
||||
)
|
||||
.add_extras(output_file);
|
||||
.await;
|
||||
|
||||
Ok(reader)
|
||||
match res {
|
||||
Ok(Ok(stream)) => Ok(ProcessRead::new(
|
||||
Box::pin(tokio_util::io::StreamReader::new(stream)),
|
||||
Arc::from(String::from("ffmpeg")),
|
||||
Uuid::now_v7(),
|
||||
)
|
||||
.add_extras(output_file)),
|
||||
Ok(Err(e)) | Err(e) => {
|
||||
output_file.cleanup().await.map_err(FfMpegError::Cleanup)?;
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,25 +1,17 @@
|
|||
use std::ffi::OsStr;
|
||||
|
||||
use actix_web::web::Bytes;
|
||||
|
||||
use crate::{
|
||||
formats::{ImageFormat, ProcessableFormat},
|
||||
magick::{MagickError, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||
process::{Process, ProcessRead},
|
||||
process::Process,
|
||||
state::State,
|
||||
stream::LocalBoxStream,
|
||||
};
|
||||
|
||||
async fn thumbnail_animation<S, F, Fut>(
|
||||
pub(super) async fn thumbnail_command<S>(
|
||||
state: &State<S>,
|
||||
input_format: ProcessableFormat,
|
||||
thumbnail_format: ImageFormat,
|
||||
write_file: F,
|
||||
) -> Result<ProcessRead, MagickError>
|
||||
where
|
||||
F: FnOnce(crate::file::File) -> Fut,
|
||||
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
|
||||
{
|
||||
) -> Result<Process, MagickError> {
|
||||
let format = ProcessableFormat::Image(thumbnail_format);
|
||||
let quality = state.config.media.image.quality_for(thumbnail_format);
|
||||
|
||||
|
@ -29,22 +21,7 @@ where
|
|||
.await
|
||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||
|
||||
let input_file = state.tmp_dir.tmp_file(None);
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
.await
|
||||
.map_err(MagickError::CreateDir)?;
|
||||
|
||||
let tmp_one = crate::file::File::create(&input_file)
|
||||
.await
|
||||
.map_err(MagickError::CreateFile)?;
|
||||
let tmp_one = (write_file)(tmp_one).await?;
|
||||
tmp_one.close().await.map_err(MagickError::CloseFile)?;
|
||||
|
||||
let input_arg = [
|
||||
input_format.magick_format().as_ref(),
|
||||
input_file.as_os_str(),
|
||||
]
|
||||
.join(":".as_ref());
|
||||
let input_arg = format!("{}:-", input_format.magick_format());
|
||||
let output_arg = format!("{}:-", format.magick_format());
|
||||
let quality = quality.map(|q| q.to_string());
|
||||
|
||||
|
@ -52,7 +29,7 @@ where
|
|||
|
||||
let mut args: Vec<&OsStr> = Vec::with_capacity(len);
|
||||
args.push("convert".as_ref());
|
||||
args.push(&input_arg);
|
||||
args.push(input_arg.as_ref());
|
||||
if format.coalesce() {
|
||||
args.push("-coalesce".as_ref());
|
||||
}
|
||||
|
@ -66,31 +43,9 @@ where
|
|||
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||
];
|
||||
|
||||
let reader = Process::run("magick", &args, &envs, state.config.media.process_timeout)?
|
||||
.read()
|
||||
.add_extras(input_file)
|
||||
let process = Process::run("magick", &args, &envs, state.config.media.process_timeout)
|
||||
.await?
|
||||
.add_extras(temporary_path);
|
||||
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
pub(super) async fn thumbnail<S>(
|
||||
state: &State<S>,
|
||||
stream: LocalBoxStream<'static, std::io::Result<Bytes>>,
|
||||
input_format: ProcessableFormat,
|
||||
thumbnail_format: ImageFormat,
|
||||
) -> Result<ProcessRead, MagickError> {
|
||||
thumbnail_animation(
|
||||
state,
|
||||
input_format,
|
||||
thumbnail_format,
|
||||
|mut tmp_file| async move {
|
||||
tmp_file
|
||||
.write_from_stream(stream)
|
||||
.await
|
||||
.map_err(MagickError::Write)?;
|
||||
Ok(tmp_file)
|
||||
},
|
||||
)
|
||||
.await
|
||||
Ok(process)
|
||||
}
|
||||
|
|
3
src/http1.rs
Normal file
3
src/http1.rs
Normal file
|
@ -0,0 +1,3 @@
|
|||
pub(crate) fn to_actix_status(status: reqwest::StatusCode) -> actix_web::http::StatusCode {
|
||||
actix_web::http::StatusCode::from_u16(status.as_u16()).expect("status codes are always valid")
|
||||
}
|
|
@ -1,3 +1,6 @@
|
|||
mod hasher;
|
||||
mod validate;
|
||||
|
||||
use std::{cell::RefCell, rc::Rc, sync::Arc, time::Duration};
|
||||
|
||||
use crate::{
|
||||
|
@ -5,21 +8,21 @@ use crate::{
|
|||
details::Details,
|
||||
error::{Error, UploadError},
|
||||
formats::InternalFormat,
|
||||
future::WithMetrics,
|
||||
future::{WithMetrics, WithPollTimer},
|
||||
repo::{Alias, ArcRepo, DeleteToken, Hash},
|
||||
state::State,
|
||||
store::Store,
|
||||
UploadQuery,
|
||||
};
|
||||
use actix_web::web::Bytes;
|
||||
use futures_core::Stream;
|
||||
use reqwest::Body;
|
||||
|
||||
use streem::IntoStreamer;
|
||||
use tracing::{Instrument, Span};
|
||||
|
||||
mod hasher;
|
||||
use hasher::Hasher;
|
||||
|
||||
pub(crate) use validate::ValidationError;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Session {
|
||||
repo: ArcRepo,
|
||||
|
@ -29,28 +32,10 @@ pub(crate) struct Session {
|
|||
identifier: Option<Arc<str>>,
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(stream))]
|
||||
async fn aggregate<S>(stream: S) -> Result<Bytes, Error>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>>,
|
||||
{
|
||||
let mut buf = BytesStream::new();
|
||||
|
||||
let stream = std::pin::pin!(stream);
|
||||
let mut stream = stream.into_streamer();
|
||||
|
||||
while let Some(res) = stream.next().await {
|
||||
tracing::trace!("aggregate: looping");
|
||||
|
||||
buf.add_bytes(res?);
|
||||
}
|
||||
|
||||
Ok(buf.into_bytes())
|
||||
}
|
||||
|
||||
async fn process_ingest<S>(
|
||||
state: &State<S>,
|
||||
stream: impl Stream<Item = Result<Bytes, Error>> + 'static,
|
||||
stream: impl Stream<Item = Result<Bytes, Error>>,
|
||||
upload_query: &UploadQuery,
|
||||
) -> Result<
|
||||
(
|
||||
InternalFormat,
|
||||
|
@ -63,16 +48,29 @@ async fn process_ingest<S>(
|
|||
where
|
||||
S: Store,
|
||||
{
|
||||
let bytes = tokio::time::timeout(Duration::from_secs(60), aggregate(stream))
|
||||
.await
|
||||
.map_err(|_| UploadError::AggregateTimeout)??;
|
||||
let bytes = tokio::time::timeout(
|
||||
Duration::from_secs(60),
|
||||
BytesStream::try_from_stream(stream),
|
||||
)
|
||||
.with_poll_timer("try-from-stream")
|
||||
.await
|
||||
.map_err(|_| UploadError::AggregateTimeout)??;
|
||||
|
||||
let permit = crate::process_semaphore().acquire().await?;
|
||||
|
||||
tracing::trace!("Validating bytes");
|
||||
let (input_type, process_read) = crate::validate::validate_bytes(state, bytes).await?;
|
||||
let (input_type, process_read) =
|
||||
validate::validate_bytes_stream(state, bytes, &upload_query.limits)
|
||||
.with_poll_timer("validate-bytes-stream")
|
||||
.await?;
|
||||
|
||||
let process_read = if let Some(operations) = state.config.media.preprocess_steps() {
|
||||
let operations = if upload_query.operations.is_empty() {
|
||||
state.config.media.preprocess_steps()
|
||||
} else {
|
||||
Some(upload_query.operations.as_ref())
|
||||
};
|
||||
|
||||
let process_read = if let Some(operations) = operations {
|
||||
if let Some(format) = input_type.processable_format() {
|
||||
let (_, magick_args) =
|
||||
crate::processor::build_chain(operations, format.file_extension())?;
|
||||
|
@ -86,15 +84,11 @@ where
|
|||
}
|
||||
};
|
||||
|
||||
crate::magick::process_image_process_read(
|
||||
state,
|
||||
process_read,
|
||||
magick_args,
|
||||
format,
|
||||
format,
|
||||
quality,
|
||||
)
|
||||
.await?
|
||||
let process =
|
||||
crate::magick::process_image_command(state, magick_args, format, format, quality)
|
||||
.await?;
|
||||
|
||||
process_read.pipe(process)
|
||||
} else {
|
||||
process_read
|
||||
}
|
||||
|
@ -109,14 +103,22 @@ where
|
|||
|
||||
state
|
||||
.store
|
||||
.save_async_read(hasher_reader, input_type.media_type())
|
||||
.save_stream(
|
||||
tokio_util::io::ReaderStream::with_capacity(hasher_reader, 1024 * 64),
|
||||
input_type.media_type(),
|
||||
Some(input_type.file_extension()),
|
||||
)
|
||||
.with_poll_timer("save-hasher-reader")
|
||||
.await
|
||||
.map(move |identifier| (hash_state, identifier))
|
||||
})
|
||||
.with_poll_timer("save-process-stdout")
|
||||
.await??;
|
||||
|
||||
let bytes_stream = state.store.to_bytes(&identifier, None, None).await?;
|
||||
let details = Details::from_bytes(state, bytes_stream.into_bytes()).await?;
|
||||
let details = Details::from_bytes_stream(state, bytes_stream)
|
||||
.with_poll_timer("details-from-bytes-stream")
|
||||
.await?;
|
||||
|
||||
drop(permit);
|
||||
|
||||
|
@ -125,7 +127,7 @@ where
|
|||
|
||||
async fn dummy_ingest<S>(
|
||||
state: &State<S>,
|
||||
stream: impl Stream<Item = Result<Bytes, Error>> + 'static,
|
||||
stream: impl Stream<Item = Result<Bytes, Error>>,
|
||||
) -> Result<
|
||||
(
|
||||
InternalFormat,
|
||||
|
@ -143,7 +145,7 @@ where
|
|||
Err(e) => Err(std::io::Error::new(std::io::ErrorKind::Other, e)),
|
||||
});
|
||||
|
||||
let reader = Box::pin(tokio_util::io::StreamReader::new(stream));
|
||||
let reader = tokio_util::io::StreamReader::new(stream);
|
||||
|
||||
let hasher_reader = Hasher::new(reader);
|
||||
let hash_state = hasher_reader.state();
|
||||
|
@ -152,7 +154,11 @@ where
|
|||
|
||||
let identifier = state
|
||||
.store
|
||||
.save_async_read(hasher_reader, input_type.media_type())
|
||||
.save_stream(
|
||||
tokio_util::io::ReaderStream::with_capacity(hasher_reader, 1024 * 64),
|
||||
input_type.media_type(),
|
||||
Some(input_type.file_extension()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let details = Details::danger_dummy(input_type);
|
||||
|
@ -163,8 +169,9 @@ where
|
|||
#[tracing::instrument(skip(state, stream))]
|
||||
pub(crate) async fn ingest<S>(
|
||||
state: &State<S>,
|
||||
stream: impl Stream<Item = Result<Bytes, Error>> + 'static,
|
||||
stream: impl Stream<Item = Result<Bytes, Error>>,
|
||||
declared_alias: Option<Alias>,
|
||||
upload_query: &UploadQuery,
|
||||
) -> Result<Session, Error>
|
||||
where
|
||||
S: Store,
|
||||
|
@ -172,7 +179,9 @@ where
|
|||
let (input_type, identifier, details, hash_state) = if state.config.server.danger_dummy_mode {
|
||||
dummy_ingest(state, stream).await?
|
||||
} else {
|
||||
process_ingest(state, stream).await?
|
||||
process_ingest(state, stream, upload_query)
|
||||
.with_poll_timer("ingest-future")
|
||||
.await?
|
||||
};
|
||||
|
||||
let mut session = Session {
|
||||
|
|
|
@ -3,6 +3,7 @@ mod ffmpeg;
|
|||
mod magick;
|
||||
|
||||
use crate::{
|
||||
bytes_stream::BytesStream,
|
||||
discover::Discovery,
|
||||
error::Error,
|
||||
error_code::ErrorCode,
|
||||
|
@ -10,10 +11,11 @@ use crate::{
|
|||
AnimationFormat, AnimationOutput, ImageInput, ImageOutput, InputFile, InputVideoFormat,
|
||||
InternalFormat,
|
||||
},
|
||||
process::ProcessRead,
|
||||
future::WithPollTimer,
|
||||
process::{Process, ProcessRead},
|
||||
state::State,
|
||||
UploadLimits,
|
||||
};
|
||||
use actix_web::web::Bytes;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum ValidationError {
|
||||
|
@ -37,6 +39,9 @@ pub(crate) enum ValidationError {
|
|||
|
||||
#[error("Video is disabled")]
|
||||
VideoDisabled,
|
||||
|
||||
#[error("Media type wasn't allowed for this upload")]
|
||||
MediaDisallowed,
|
||||
}
|
||||
|
||||
impl ValidationError {
|
||||
|
@ -49,6 +54,7 @@ impl ValidationError {
|
|||
Self::Empty => ErrorCode::VALIDATE_FILE_EMPTY,
|
||||
Self::Filesize => ErrorCode::VALIDATE_FILE_SIZE,
|
||||
Self::VideoDisabled => ErrorCode::VIDEO_DISABLED,
|
||||
Self::MediaDisallowed => ErrorCode::MEDIA_DISALLOWED,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -56,9 +62,10 @@ impl ValidationError {
|
|||
const MEGABYTES: usize = 1024 * 1024;
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(crate) async fn validate_bytes<S>(
|
||||
pub(crate) async fn validate_bytes_stream<S>(
|
||||
state: &State<S>,
|
||||
bytes: Bytes,
|
||||
bytes: BytesStream,
|
||||
upload_limits: &UploadLimits,
|
||||
) -> Result<(InternalFormat, ProcessRead), Error> {
|
||||
if bytes.is_empty() {
|
||||
return Err(ValidationError::Empty.into());
|
||||
|
@ -69,37 +76,96 @@ pub(crate) async fn validate_bytes<S>(
|
|||
width,
|
||||
height,
|
||||
frames,
|
||||
} = crate::discover::discover_bytes(state, bytes.clone()).await?;
|
||||
} = crate::discover::discover_bytes_stream(state, bytes.clone())
|
||||
.with_poll_timer("discover-bytes-stream")
|
||||
.await?;
|
||||
|
||||
validate_upload(bytes.len(), width, height, frames, upload_limits)?;
|
||||
|
||||
match &input {
|
||||
InputFile::Image(input) => {
|
||||
let (format, process_read) = process_image(state, bytes, *input, width, height).await?;
|
||||
InputFile::Image(input) if *upload_limits.allow_image => {
|
||||
let (format, process) =
|
||||
process_image_command(state, *input, bytes.len(), width, height).await?;
|
||||
|
||||
Ok((format, process_read))
|
||||
Ok((format, process.drive_with_stream(bytes.into_io_stream())))
|
||||
}
|
||||
InputFile::Animation(input) => {
|
||||
let (format, process_read) =
|
||||
process_animation(state, bytes, *input, width, height, frames.unwrap_or(1)).await?;
|
||||
InputFile::Animation(input) if *upload_limits.allow_animation => {
|
||||
let (format, process) = process_animation_command(
|
||||
state,
|
||||
*input,
|
||||
bytes.len(),
|
||||
width,
|
||||
height,
|
||||
frames.unwrap_or(1),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok((format, process_read))
|
||||
Ok((format, process.drive_with_stream(bytes.into_io_stream())))
|
||||
}
|
||||
InputFile::Video(input) => {
|
||||
InputFile::Video(input) if *upload_limits.allow_video => {
|
||||
let (format, process_read) =
|
||||
process_video(state, bytes, *input, width, height, frames.unwrap_or(1)).await?;
|
||||
|
||||
Ok((format, process_read))
|
||||
}
|
||||
_ => Err(ValidationError::MediaDisallowed.into()),
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(state, bytes), fields(len = bytes.len()))]
|
||||
async fn process_image<S>(
|
||||
state: &State<S>,
|
||||
bytes: Bytes,
|
||||
input: ImageInput,
|
||||
fn validate_upload(
|
||||
size: usize,
|
||||
width: u16,
|
||||
height: u16,
|
||||
) -> Result<(InternalFormat, ProcessRead), Error> {
|
||||
frames: Option<u32>,
|
||||
upload_limits: &UploadLimits,
|
||||
) -> Result<(), ValidationError> {
|
||||
if upload_limits
|
||||
.max_width
|
||||
.is_some_and(|max_width| width > *max_width)
|
||||
{
|
||||
return Err(ValidationError::Width);
|
||||
}
|
||||
|
||||
if upload_limits
|
||||
.max_height
|
||||
.is_some_and(|max_height| height > *max_height)
|
||||
{
|
||||
return Err(ValidationError::Height);
|
||||
}
|
||||
|
||||
if upload_limits
|
||||
.max_frame_count
|
||||
.zip(frames)
|
||||
.is_some_and(|(max_frame_count, frames)| frames > *max_frame_count)
|
||||
{
|
||||
return Err(ValidationError::Frames);
|
||||
}
|
||||
|
||||
if upload_limits
|
||||
.max_area
|
||||
.is_some_and(|max_area| u32::from(width) * u32::from(height) > *max_area)
|
||||
{
|
||||
return Err(ValidationError::Area);
|
||||
}
|
||||
|
||||
if upload_limits
|
||||
.max_file_size
|
||||
.is_some_and(|max_file_size| size > *max_file_size * MEGABYTES)
|
||||
{
|
||||
return Err(ValidationError::Filesize);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(state))]
|
||||
async fn process_image_command<S>(
|
||||
state: &State<S>,
|
||||
input: ImageInput,
|
||||
size: usize,
|
||||
width: u16,
|
||||
height: u16,
|
||||
) -> Result<(InternalFormat, Process), Error> {
|
||||
let validations = &state.config.media.image;
|
||||
|
||||
if width > validations.max_width {
|
||||
|
@ -111,7 +177,7 @@ async fn process_image<S>(
|
|||
if u32::from(width) * u32::from(height) > validations.max_area {
|
||||
return Err(ValidationError::Area.into());
|
||||
}
|
||||
if bytes.len() > validations.max_file_size * MEGABYTES {
|
||||
if size > validations.max_file_size * MEGABYTES {
|
||||
return Err(ValidationError::Filesize.into());
|
||||
}
|
||||
|
||||
|
@ -120,15 +186,15 @@ async fn process_image<S>(
|
|||
needs_transcode,
|
||||
} = input.build_output(validations.format);
|
||||
|
||||
let process_read = if needs_transcode {
|
||||
let process = if needs_transcode {
|
||||
let quality = validations.quality_for(format);
|
||||
|
||||
magick::convert_image(state, input.format, format, quality, bytes).await?
|
||||
magick::convert_image_command(state, input.format, format, quality).await?
|
||||
} else {
|
||||
exiftool::clear_metadata_bytes_read(bytes, state.config.media.process_timeout)?
|
||||
exiftool::clear_metadata_command(state.config.media.process_timeout).await?
|
||||
};
|
||||
|
||||
Ok((InternalFormat::Image(format), process_read))
|
||||
Ok((InternalFormat::Image(format), process))
|
||||
}
|
||||
|
||||
fn validate_animation(
|
||||
|
@ -157,33 +223,33 @@ fn validate_animation(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(state, bytes), fields(len = bytes.len()))]
|
||||
async fn process_animation<S>(
|
||||
#[tracing::instrument(skip(state))]
|
||||
async fn process_animation_command<S>(
|
||||
state: &State<S>,
|
||||
bytes: Bytes,
|
||||
input: AnimationFormat,
|
||||
size: usize,
|
||||
width: u16,
|
||||
height: u16,
|
||||
frames: u32,
|
||||
) -> Result<(InternalFormat, ProcessRead), Error> {
|
||||
) -> Result<(InternalFormat, Process), Error> {
|
||||
let validations = &state.config.media.animation;
|
||||
|
||||
validate_animation(bytes.len(), width, height, frames, validations)?;
|
||||
validate_animation(size, width, height, frames, validations)?;
|
||||
|
||||
let AnimationOutput {
|
||||
format,
|
||||
needs_transcode,
|
||||
} = input.build_output(validations.format);
|
||||
|
||||
let process_read = if needs_transcode {
|
||||
let process = if needs_transcode {
|
||||
let quality = validations.quality_for(format);
|
||||
|
||||
magick::convert_animation(state, input, format, quality, bytes).await?
|
||||
magick::convert_animation_command(state, input, format, quality).await?
|
||||
} else {
|
||||
exiftool::clear_metadata_bytes_read(bytes, state.config.media.process_timeout)?
|
||||
exiftool::clear_metadata_command(state.config.media.process_timeout).await?
|
||||
};
|
||||
|
||||
Ok((InternalFormat::Animation(format), process_read))
|
||||
Ok((InternalFormat::Animation(format), process))
|
||||
}
|
||||
|
||||
fn validate_video(
|
||||
|
@ -218,7 +284,7 @@ fn validate_video(
|
|||
#[tracing::instrument(skip(state, bytes), fields(len = bytes.len()))]
|
||||
async fn process_video<S>(
|
||||
state: &State<S>,
|
||||
bytes: Bytes,
|
||||
bytes: BytesStream,
|
||||
input: InputVideoFormat,
|
||||
width: u16,
|
||||
height: u16,
|
||||
|
@ -244,6 +310,7 @@ async fn process_video<S>(
|
|||
state.config.media.process_timeout,
|
||||
bytes,
|
||||
)
|
||||
.with_poll_timer("transcode-bytes")
|
||||
.await?;
|
||||
|
||||
Ok((
|
6
src/ingest/validate/exiftool.rs
Normal file
6
src/ingest/validate/exiftool.rs
Normal file
|
@ -0,0 +1,6 @@
|
|||
use crate::{exiftool::ExifError, process::Process};
|
||||
|
||||
#[tracing::instrument(level = "trace", skip_all)]
|
||||
pub(super) async fn clear_metadata_command(timeout: u64) -> Result<Process, ExifError> {
|
||||
Ok(Process::run("exiftool", &["-all=", "-", "-out", "-"], &[], timeout).await?)
|
||||
}
|
|
@ -1,11 +1,12 @@
|
|||
use std::{ffi::OsStr, sync::Arc};
|
||||
|
||||
use actix_web::web::Bytes;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
bytes_stream::BytesStream,
|
||||
ffmpeg::FfMpegError,
|
||||
formats::{InputVideoFormat, OutputVideo},
|
||||
future::WithPollTimer,
|
||||
process::{Process, ProcessRead},
|
||||
tmp_file::TmpDir,
|
||||
};
|
||||
|
@ -16,54 +17,51 @@ pub(super) async fn transcode_bytes(
|
|||
output_format: OutputVideo,
|
||||
crf: u8,
|
||||
timeout: u64,
|
||||
bytes: Bytes,
|
||||
bytes: BytesStream,
|
||||
) -> Result<ProcessRead, FfMpegError> {
|
||||
let input_file = tmp_dir.tmp_file(None);
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
.await
|
||||
.map_err(FfMpegError::CreateDir)?;
|
||||
|
||||
let mut tmp_one = crate::file::File::create(&input_file)
|
||||
.await
|
||||
.map_err(FfMpegError::CreateFile)?;
|
||||
tmp_one
|
||||
.write_from_bytes(bytes)
|
||||
.await
|
||||
.map_err(FfMpegError::Write)?;
|
||||
tmp_one.close().await.map_err(FfMpegError::CloseFile)?;
|
||||
|
||||
let output_file = tmp_dir.tmp_file(None);
|
||||
let output_path = output_file.as_os_str();
|
||||
|
||||
let res = transcode_files(
|
||||
input_file.as_os_str(),
|
||||
input_format,
|
||||
output_file.as_os_str(),
|
||||
output_format,
|
||||
crf,
|
||||
timeout,
|
||||
)
|
||||
let res = crate::ffmpeg::with_file(tmp_dir, None, |input_file| async move {
|
||||
crate::file::write_from_stream(&input_file, bytes.into_io_stream())
|
||||
.with_poll_timer("write-from-stream")
|
||||
.await
|
||||
.map_err(FfMpegError::Write)?;
|
||||
|
||||
transcode_files(
|
||||
input_file.as_os_str(),
|
||||
input_format,
|
||||
output_path,
|
||||
output_format,
|
||||
crf,
|
||||
timeout,
|
||||
)
|
||||
.with_poll_timer("transcode-files")
|
||||
.await?;
|
||||
|
||||
let tmp_file = crate::file::File::open(output_path)
|
||||
.await
|
||||
.map_err(FfMpegError::OpenFile)?;
|
||||
|
||||
tmp_file
|
||||
.read_to_stream(None, None)
|
||||
.await
|
||||
.map_err(FfMpegError::ReadFile)
|
||||
})
|
||||
.await;
|
||||
|
||||
input_file.cleanup().await.map_err(FfMpegError::Cleanup)?;
|
||||
res?;
|
||||
|
||||
let tmp_two = crate::file::File::open(&output_file)
|
||||
.await
|
||||
.map_err(FfMpegError::OpenFile)?;
|
||||
let stream = tmp_two
|
||||
.read_to_stream(None, None)
|
||||
.await
|
||||
.map_err(FfMpegError::ReadFile)?;
|
||||
let reader = tokio_util::io::StreamReader::new(stream);
|
||||
|
||||
let process_read = ProcessRead::new(
|
||||
Box::pin(reader),
|
||||
Arc::from(String::from("ffmpeg")),
|
||||
Uuid::now_v7(),
|
||||
)
|
||||
.add_extras(output_file);
|
||||
|
||||
Ok(process_read)
|
||||
match res {
|
||||
Ok(Ok(stream)) => Ok(ProcessRead::new(
|
||||
Box::pin(tokio_util::io::StreamReader::new(stream)),
|
||||
Arc::from(String::from("ffmpeg")),
|
||||
Uuid::now_v7(),
|
||||
)
|
||||
.add_extras(output_file)),
|
||||
Ok(Err(e)) | Err(e) => {
|
||||
output_file.cleanup().await.map_err(FfMpegError::Cleanup)?;
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn transcode_files(
|
||||
|
@ -122,12 +120,25 @@ async fn transcode_files(
|
|||
}
|
||||
|
||||
args.extend([
|
||||
"-map_metadata".as_ref(),
|
||||
"-1".as_ref(),
|
||||
"-map_metadata:g".as_ref(),
|
||||
"-1".as_ref(),
|
||||
"-map_metadata:s".as_ref(),
|
||||
"-1".as_ref(),
|
||||
"-map_metadata:c".as_ref(),
|
||||
"-1".as_ref(),
|
||||
"-map_metadata:p".as_ref(),
|
||||
"-1".as_ref(),
|
||||
"-f".as_ref(),
|
||||
output_format.ffmpeg_format().as_ref(),
|
||||
output_path,
|
||||
]);
|
||||
|
||||
Process::run("ffmpeg", &args, &[], timeout)?.wait().await?;
|
||||
Process::run("ffmpeg", &args, &[], timeout)
|
||||
.await?
|
||||
.wait()
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -1,81 +1,60 @@
|
|||
use std::ffi::OsStr;
|
||||
|
||||
use actix_web::web::Bytes;
|
||||
|
||||
use crate::{
|
||||
formats::{AnimationFormat, ImageFormat},
|
||||
magick::{MagickError, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||
process::{Process, ProcessRead},
|
||||
process::Process,
|
||||
state::State,
|
||||
};
|
||||
|
||||
pub(super) async fn convert_image<S>(
|
||||
pub(super) async fn convert_image_command<S>(
|
||||
state: &State<S>,
|
||||
input: ImageFormat,
|
||||
output: ImageFormat,
|
||||
quality: Option<u8>,
|
||||
bytes: Bytes,
|
||||
) -> Result<ProcessRead, MagickError> {
|
||||
) -> Result<Process, MagickError> {
|
||||
convert(
|
||||
state,
|
||||
input.magick_format(),
|
||||
output.magick_format(),
|
||||
false,
|
||||
quality,
|
||||
bytes,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub(super) async fn convert_animation<S>(
|
||||
pub(super) async fn convert_animation_command<S>(
|
||||
state: &State<S>,
|
||||
input: AnimationFormat,
|
||||
output: AnimationFormat,
|
||||
quality: Option<u8>,
|
||||
bytes: Bytes,
|
||||
) -> Result<ProcessRead, MagickError> {
|
||||
) -> Result<Process, MagickError> {
|
||||
convert(
|
||||
state,
|
||||
input.magick_format(),
|
||||
output.magick_format(),
|
||||
true,
|
||||
quality,
|
||||
bytes,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn convert<S>(
|
||||
state: &State<S>,
|
||||
input: &'static str,
|
||||
output: &'static str,
|
||||
input_format: &'static str,
|
||||
output_format: &'static str,
|
||||
coalesce: bool,
|
||||
quality: Option<u8>,
|
||||
bytes: Bytes,
|
||||
) -> Result<ProcessRead, MagickError> {
|
||||
) -> Result<Process, MagickError> {
|
||||
let temporary_path = state
|
||||
.tmp_dir
|
||||
.tmp_folder()
|
||||
.await
|
||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||
|
||||
let input_file = state.tmp_dir.tmp_file(None);
|
||||
let input_arg = format!("{input_format}:-");
|
||||
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
.await
|
||||
.map_err(MagickError::CreateDir)?;
|
||||
|
||||
let mut tmp_one = crate::file::File::create(&input_file)
|
||||
.await
|
||||
.map_err(MagickError::CreateFile)?;
|
||||
tmp_one
|
||||
.write_from_bytes(bytes)
|
||||
.await
|
||||
.map_err(MagickError::Write)?;
|
||||
tmp_one.close().await.map_err(MagickError::CloseFile)?;
|
||||
|
||||
let input_arg = [input.as_ref(), input_file.as_os_str()].join(":".as_ref());
|
||||
let output_arg = format!("{output}:-");
|
||||
let output_arg = format!("{output_format}:-");
|
||||
let quality = quality.map(|q| q.to_string());
|
||||
|
||||
let mut args: Vec<&OsStr> = vec!["convert".as_ref()];
|
||||
|
@ -84,7 +63,11 @@ async fn convert<S>(
|
|||
args.push("-coalesce".as_ref());
|
||||
}
|
||||
|
||||
args.extend(["-strip".as_ref(), "-auto-orient".as_ref(), &input_arg] as [&OsStr; 3]);
|
||||
args.extend([
|
||||
"-strip".as_ref(),
|
||||
"-auto-orient".as_ref(),
|
||||
input_arg.as_ref(),
|
||||
] as [&OsStr; 3]);
|
||||
|
||||
if let Some(quality) = &quality {
|
||||
args.extend(["-quality".as_ref(), quality.as_ref()] as [&OsStr; 2]);
|
||||
|
@ -97,9 +80,9 @@ async fn convert<S>(
|
|||
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||
];
|
||||
|
||||
let reader = Process::run("magick", &args, &envs, state.config.media.process_timeout)?.read();
|
||||
let process = Process::run("magick", &args, &envs, state.config.media.process_timeout)
|
||||
.await?
|
||||
.add_extras(temporary_path);
|
||||
|
||||
let clean_reader = reader.add_extras(input_file).add_extras(temporary_path);
|
||||
|
||||
Ok(clean_reader)
|
||||
Ok(process)
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
pub(super) fn init_metrics() {
|
||||
describe_toplevel();
|
||||
describe_future();
|
||||
describe_queue_cleanup();
|
||||
describe_payload();
|
||||
describe_job();
|
||||
|
@ -26,6 +27,21 @@ fn describe_toplevel() {
|
|||
pub(crate) const FILES: &str = "pict-rs.files";
|
||||
pub(crate) const BACKGROUND_UPLOAD_CLAIM: &str = "pict-rs.background.upload.claim";
|
||||
|
||||
fn describe_future() {
|
||||
metrics::describe_counter!(
|
||||
FUTURE_POLL_TIMER_EXCEEDED,
|
||||
"How many times a given poll operation has lasted longer than 10 microseconds"
|
||||
);
|
||||
metrics::describe_histogram!(
|
||||
FUTURE_POLL_TIMER_EXCEEDED_SECONDS,
|
||||
"Durations for polls lasting longer than 10 microseconds"
|
||||
);
|
||||
}
|
||||
|
||||
pub(crate) const FUTURE_POLL_TIMER_EXCEEDED: &str = "pict-rs.future.poll-timer.exceeded";
|
||||
pub(crate) const FUTURE_POLL_TIMER_EXCEEDED_SECONDS: &str =
|
||||
"pict-rs.future.poll-timer.exceeded.seconds";
|
||||
|
||||
fn describe_queue_cleanup() {
|
||||
metrics::describe_counter!(
|
||||
CLEANUP_OUTDATED_PROXY,
|
||||
|
@ -255,6 +271,14 @@ fn describe_postgres() {
|
|||
POSTGRES_VARIANTS_REMOVE,
|
||||
"Timings for removing a variant for a provided hash"
|
||||
);
|
||||
metrics::describe_histogram!(
|
||||
POSTGRES_HASHES_RELATE_BLURHASH,
|
||||
"Timings for relating a blurhash with a provided hash"
|
||||
);
|
||||
metrics::describe_histogram!(
|
||||
POSTGRES_HASHES_BLURHASH,
|
||||
"Timings for fetching a blurhash for a provided hash"
|
||||
);
|
||||
metrics::describe_histogram!(
|
||||
POSTGRES_HASHES_RELATE_MOTION_IDENTIFIER,
|
||||
"Timings for relating a still image identifier for a provided hash representing a video"
|
||||
|
@ -336,6 +360,14 @@ fn describe_postgres() {
|
|||
POSTGRES_QUEUE_HEARTBEAT,
|
||||
"Timings for updating the provided job's keepalive heartbeat"
|
||||
);
|
||||
metrics::describe_histogram!(
|
||||
POSTGRES_QUEUE_RETRY,
|
||||
"Timings for updating retry count for a job"
|
||||
);
|
||||
metrics::describe_histogram!(
|
||||
POSTGRES_QUEUE_CLEANUP,
|
||||
"Timings for removing jobs with no more retries"
|
||||
);
|
||||
metrics::describe_histogram!(
|
||||
POSTGRES_QUEUE_COMPLETE,
|
||||
"Timings for removing a completed job from the queue"
|
||||
|
@ -438,6 +470,8 @@ pub(crate) const POSTGRES_VARIANTS_RELATE_VARIANT_IDENTIFIER: &str =
|
|||
pub(crate) const POSTGRES_VARIANTS_IDENTIFIER: &str = "pict-rs.postgres.variants.identifier";
|
||||
pub(crate) const POSTGRES_VARIANTS_FOR_HASH: &str = "pict-rs.postgres.variants.for-hash";
|
||||
pub(crate) const POSTGRES_VARIANTS_REMOVE: &str = "pict-rs.postgres.variants.remove";
|
||||
pub(crate) const POSTGRES_HASHES_RELATE_BLURHASH: &str = "pict-rs.postgres.hashes.relate-blurhash";
|
||||
pub(crate) const POSTGRES_HASHES_BLURHASH: &str = "pict-rs.postgres.hashes.blurhash";
|
||||
pub(crate) const POSTGRES_HASHES_RELATE_MOTION_IDENTIFIER: &str =
|
||||
"pict-rs.postgres.hashes.relate-motion-identifier";
|
||||
pub(crate) const POSTGRES_HASHES_MOTION_IDENTIFIER: &str =
|
||||
|
@ -461,6 +495,8 @@ pub(crate) const POSTGRES_QUEUE_LISTEN: &str = "pict-rs.postgres.queue.listen";
|
|||
pub(crate) const POSTGRES_QUEUE_REQUEUE: &str = "pict-rs.postgres.queue.requeue";
|
||||
pub(crate) const POSTGRES_QUEUE_CLAIM: &str = "pict-rs.postgres.queue.claim";
|
||||
pub(crate) const POSTGRES_QUEUE_HEARTBEAT: &str = "pict-rs.postgres.queue.heartbeat";
|
||||
pub(crate) const POSTGRES_QUEUE_RETRY: &str = "pict-rs.postgres.queue.retry";
|
||||
pub(crate) const POSTGRES_QUEUE_CLEANUP: &str = "pict-rs.postgres.queue.cleanup";
|
||||
pub(crate) const POSTGRES_QUEUE_COMPLETE: &str = "pict-rs.postgres.queue.complete";
|
||||
pub(crate) const POSTGRES_STORE_MIGRATION_COUNT: &str = "pict-rs.postgres.store-migration.count";
|
||||
pub(crate) const POSTGRES_STORE_MIGRATION_MARK_MIGRATED: &str =
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use crate::config::{LogFormat, OpenTelemetry, Tracing};
|
||||
use color_eyre::config::Theme;
|
||||
use console_subscriber::ConsoleLayer;
|
||||
use opentelemetry::KeyValue;
|
||||
use opentelemetry_otlp::WithExportConfig;
|
||||
|
@ -11,7 +12,15 @@ use tracing_subscriber::{
|
|||
};
|
||||
|
||||
pub(super) fn init_tracing(tracing: &Tracing) -> color_eyre::Result<()> {
|
||||
color_eyre::install()?;
|
||||
let eyre_theme = if tracing.logging.no_ansi {
|
||||
Theme::new()
|
||||
} else {
|
||||
Theme::dark()
|
||||
};
|
||||
|
||||
color_eyre::config::HookBuilder::new()
|
||||
.theme(eyre_theme)
|
||||
.install()?;
|
||||
|
||||
LogTracer::init()?;
|
||||
|
||||
|
@ -23,7 +32,9 @@ pub(super) fn init_tracing(tracing: &Tracing) -> color_eyre::Result<()> {
|
|||
FmtSpan::NONE
|
||||
};
|
||||
|
||||
let format_layer = tracing_subscriber::fmt::layer().with_span_events(fmt_span);
|
||||
let format_layer = tracing_subscriber::fmt::layer()
|
||||
.with_span_events(fmt_span)
|
||||
.with_ansi(!tracing.logging.no_ansi);
|
||||
|
||||
match tracing.logging.format {
|
||||
LogFormat::Compact => with_format(format_layer.compact(), tracing),
|
||||
|
|
671
src/lib.rs
671
src/lib.rs
File diff suppressed because it is too large
Load diff
117
src/magick.rs
117
src/magick.rs
|
@ -1,14 +1,11 @@
|
|||
use std::{ffi::OsStr, ops::Deref, path::Path, sync::Arc};
|
||||
|
||||
use actix_web::web::Bytes;
|
||||
|
||||
use crate::{
|
||||
config::Media,
|
||||
error_code::ErrorCode,
|
||||
formats::ProcessableFormat,
|
||||
process::{Process, ProcessError, ProcessRead},
|
||||
process::{Process, ProcessError},
|
||||
state::State,
|
||||
stream::LocalBoxStream,
|
||||
tmp_file::{TmpDir, TmpFolder},
|
||||
};
|
||||
|
||||
|
@ -23,21 +20,9 @@ pub(crate) enum MagickError {
|
|||
#[error("Invalid output format: {0}")]
|
||||
Json(String, #[source] serde_json::Error),
|
||||
|
||||
#[error("Error writing bytes")]
|
||||
Write(#[source] std::io::Error),
|
||||
|
||||
#[error("Error creating file")]
|
||||
CreateFile(#[source] std::io::Error),
|
||||
|
||||
#[error("Error creating directory")]
|
||||
CreateDir(#[source] crate::store::file_store::FileError),
|
||||
|
||||
#[error("Error creating temporary directory")]
|
||||
CreateTemporaryDirectory(#[source] std::io::Error),
|
||||
|
||||
#[error("Error closing file")]
|
||||
CloseFile(#[source] std::io::Error),
|
||||
|
||||
#[error("Error in metadata discovery")]
|
||||
Discover(#[source] crate::discover::DiscoverError),
|
||||
|
||||
|
@ -66,11 +51,7 @@ impl MagickError {
|
|||
Self::CommandFailed(_) => ErrorCode::COMMAND_FAILURE,
|
||||
Self::Process(e) => e.error_code(),
|
||||
Self::Json(_, _)
|
||||
| Self::Write(_)
|
||||
| Self::CreateFile(_)
|
||||
| Self::CreateDir(_)
|
||||
| Self::CreateTemporaryDirectory(_)
|
||||
| Self::CloseFile(_)
|
||||
| Self::Discover(_)
|
||||
| Self::Cleanup(_)
|
||||
| Self::Empty => ErrorCode::COMMAND_ERROR,
|
||||
|
@ -86,40 +67,20 @@ impl MagickError {
|
|||
}
|
||||
}
|
||||
|
||||
async fn process_image<S, F, Fut>(
|
||||
pub(crate) async fn process_image_command<S>(
|
||||
state: &State<S>,
|
||||
process_args: Vec<String>,
|
||||
input_format: ProcessableFormat,
|
||||
format: ProcessableFormat,
|
||||
quality: Option<u8>,
|
||||
write_file: F,
|
||||
) -> Result<ProcessRead, MagickError>
|
||||
where
|
||||
F: FnOnce(crate::file::File) -> Fut,
|
||||
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
|
||||
{
|
||||
) -> Result<Process, MagickError> {
|
||||
let temporary_path = state
|
||||
.tmp_dir
|
||||
.tmp_folder()
|
||||
.await
|
||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||
|
||||
let input_file = state.tmp_dir.tmp_file(None);
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
.await
|
||||
.map_err(MagickError::CreateDir)?;
|
||||
|
||||
let tmp_one = crate::file::File::create(&input_file)
|
||||
.await
|
||||
.map_err(MagickError::CreateFile)?;
|
||||
let tmp_one = (write_file)(tmp_one).await?;
|
||||
tmp_one.close().await.map_err(MagickError::CloseFile)?;
|
||||
|
||||
let input_arg = [
|
||||
input_format.magick_format().as_ref(),
|
||||
input_file.as_os_str(),
|
||||
]
|
||||
.join(":".as_ref());
|
||||
let input_arg = format!("{}:-", input_format.magick_format());
|
||||
let output_arg = format!("{}:-", format.magick_format());
|
||||
let quality = quality.map(|q| q.to_string());
|
||||
|
||||
|
@ -130,7 +91,7 @@ where
|
|||
|
||||
let mut args: Vec<&OsStr> = Vec::with_capacity(len);
|
||||
args.push("convert".as_ref());
|
||||
args.push(&input_arg);
|
||||
args.push(input_arg.as_ref());
|
||||
if input_format.coalesce() {
|
||||
args.push("-coalesce".as_ref());
|
||||
}
|
||||
|
@ -145,67 +106,11 @@ where
|
|||
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||
];
|
||||
|
||||
let reader = Process::run("magick", &args, &envs, state.config.media.process_timeout)?
|
||||
.read()
|
||||
.add_extras(input_file)
|
||||
let process = Process::run("magick", &args, &envs, state.config.media.process_timeout)
|
||||
.await?
|
||||
.add_extras(temporary_path);
|
||||
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
pub(crate) async fn process_image_stream_read<S>(
|
||||
state: &State<S>,
|
||||
stream: LocalBoxStream<'static, std::io::Result<Bytes>>,
|
||||
args: Vec<String>,
|
||||
input_format: ProcessableFormat,
|
||||
format: ProcessableFormat,
|
||||
quality: Option<u8>,
|
||||
) -> Result<ProcessRead, MagickError> {
|
||||
process_image(
|
||||
state,
|
||||
args,
|
||||
input_format,
|
||||
format,
|
||||
quality,
|
||||
|mut tmp_file| async move {
|
||||
tmp_file
|
||||
.write_from_stream(stream)
|
||||
.await
|
||||
.map_err(MagickError::Write)?;
|
||||
Ok(tmp_file)
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn process_image_process_read<S>(
|
||||
state: &State<S>,
|
||||
process_read: ProcessRead,
|
||||
args: Vec<String>,
|
||||
input_format: ProcessableFormat,
|
||||
format: ProcessableFormat,
|
||||
quality: Option<u8>,
|
||||
) -> Result<ProcessRead, MagickError> {
|
||||
process_image(
|
||||
state,
|
||||
args,
|
||||
input_format,
|
||||
format,
|
||||
quality,
|
||||
|mut tmp_file| async move {
|
||||
process_read
|
||||
.with_stdout(|stdout| async {
|
||||
tmp_file
|
||||
.write_from_async_read(stdout)
|
||||
.await
|
||||
.map_err(MagickError::Write)
|
||||
})
|
||||
.await??;
|
||||
|
||||
Ok(tmp_file)
|
||||
},
|
||||
)
|
||||
.await
|
||||
Ok(process)
|
||||
}
|
||||
|
||||
pub(crate) type ArcPolicyDir = Arc<PolicyDir>;
|
||||
|
@ -280,12 +185,12 @@ fn generate_policy(media: &Media) -> String {
|
|||
<policy domain="cache" name="synchronize" value="true"/>
|
||||
<policy domain="path" rights="none" pattern="@*" />
|
||||
<policy domain="coder" rights="none" pattern="*" />
|
||||
<policy domain="coder" rights="read | write" pattern="{{APNG,AVIF,GIF,HEIC,JPEG,JSON,JXL,PNG,WEBP,MP4,WEBM,TMP,PAM}}" />
|
||||
<policy domain="coder" rights="read | write" pattern="{{APNG,AVIF,GIF,HEIC,JPEG,JSON,JXL,PNG,RGB,RGBA,WEBP,MP4,WEBM,TMP,PAM}}" />
|
||||
<policy domain="delegate" rights="none" pattern="*" />
|
||||
<policy domain="delegate" rights="execute" pattern="FFMPEG" />
|
||||
<policy domain="delegate" rights="execute" pattern="ffmpeg" />
|
||||
<policy domain="filter" rights="none" pattern="*" />
|
||||
<policy domain="module" rights="none" pattern="*" />
|
||||
<policy domain="module" rights="read | write" pattern="{{APNG,AVIF,GIF,HEIC,JPEG,JSON,JXL,PNG,WEBP,TMP,PAM,PNM,VIDEO}}" />
|
||||
<policy domain="module" rights="read | write" pattern="{{APNG,AVIF,GIF,HEIC,JPEG,JSON,JXL,PNG,RGB,RGBA,WEBP,TMP,PAM,PNM,VIDEO}}" />
|
||||
<!-- indirect reads not permitted -->
|
||||
<policy domain="system" name="max-memory-request" value="256MiB"/>
|
||||
<policy domain="system" name="memory-map" value="anonymous"/>
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
#[cfg(feature = "io-uring")]
|
||||
fn main() -> color_eyre::Result<()> {
|
||||
tokio_uring::start(async move {
|
||||
actix_web::rt::System::new().block_on(async move {
|
||||
pict_rs::PictRsConfiguration::build_default()?
|
||||
.install_tracing()?
|
||||
.install_metrics()?
|
||||
.install_crypto_provider()
|
||||
.run()
|
||||
.await
|
||||
})
|
||||
|
@ -18,6 +19,7 @@ fn main() -> color_eyre::Result<()> {
|
|||
pict_rs::PictRsConfiguration::build_default()?
|
||||
.install_tracing()?
|
||||
.install_metrics()?
|
||||
.install_crypto_provider()
|
||||
.run_on_localset()
|
||||
.await
|
||||
})
|
||||
|
|
|
@ -45,10 +45,10 @@ impl Drop for MetricsGuard {
|
|||
}
|
||||
}
|
||||
|
||||
async fn drain(rx: flume::Receiver<actix_web::dev::Payload>) {
|
||||
async fn drain(mut rx: tokio::sync::mpsc::Receiver<actix_web::dev::Payload>) {
|
||||
let mut set = JoinSet::new();
|
||||
|
||||
while let Ok(payload) = rx.recv_async().await {
|
||||
while let Some(payload) = rx.recv().await {
|
||||
tracing::trace!("drain: looping");
|
||||
|
||||
// draining a payload is a best-effort task - if we can't collect in 2 minutes we bail
|
||||
|
@ -94,18 +94,18 @@ async fn drain(rx: flume::Receiver<actix_web::dev::Payload>) {
|
|||
struct DrainHandle(Option<Rc<tokio::task::JoinHandle<()>>>);
|
||||
|
||||
pub(crate) struct Payload {
|
||||
sender: flume::Sender<actix_web::dev::Payload>,
|
||||
sender: tokio::sync::mpsc::Sender<actix_web::dev::Payload>,
|
||||
handle: DrainHandle,
|
||||
}
|
||||
pub(crate) struct PayloadMiddleware<S> {
|
||||
inner: S,
|
||||
sender: flume::Sender<actix_web::dev::Payload>,
|
||||
sender: tokio::sync::mpsc::Sender<actix_web::dev::Payload>,
|
||||
_handle: DrainHandle,
|
||||
}
|
||||
|
||||
pub(crate) struct PayloadStream {
|
||||
inner: Option<actix_web::dev::Payload>,
|
||||
sender: flume::Sender<actix_web::dev::Payload>,
|
||||
sender: tokio::sync::mpsc::Sender<actix_web::dev::Payload>,
|
||||
}
|
||||
|
||||
impl DrainHandle {
|
||||
|
|
|
@ -396,7 +396,7 @@ where
|
|||
.await
|
||||
.map_err(From::from)
|
||||
.map_err(MigrateError::Details)?;
|
||||
let new_details = Details::from_bytes(to, bytes_stream.into_bytes())
|
||||
let new_details = Details::from_bytes_stream(to, bytes_stream)
|
||||
.await
|
||||
.map_err(MigrateError::Details)?;
|
||||
to.repo
|
||||
|
@ -409,7 +409,7 @@ where
|
|||
|
||||
let new_identifier = to
|
||||
.store
|
||||
.save_stream(stream, details.media_type())
|
||||
.save_stream(stream, details.media_type(), Some(details.file_extension()))
|
||||
.await
|
||||
.map_err(MigrateError::To)?;
|
||||
|
||||
|
|
171
src/process.rs
171
src/process.rs
|
@ -1,4 +1,3 @@
|
|||
use actix_web::web::Bytes;
|
||||
use std::{
|
||||
ffi::OsStr,
|
||||
future::Future,
|
||||
|
@ -6,14 +5,19 @@ use std::{
|
|||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use futures_core::Stream;
|
||||
use streem::IntoStreamer;
|
||||
use tokio::{
|
||||
io::{AsyncReadExt, AsyncWriteExt},
|
||||
process::{Child, ChildStdin, Command},
|
||||
};
|
||||
use tokio_util::{bytes::Bytes, io::ReaderStream};
|
||||
use tracing::Instrument;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
bytes_stream::BytesStream,
|
||||
error_code::ErrorCode,
|
||||
future::{LocalBoxFuture, WithTimeout},
|
||||
read::BoxRead,
|
||||
|
@ -55,14 +59,12 @@ impl Drop for MetricsGuard {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct StatusError(ExitStatus);
|
||||
|
||||
pub(crate) struct Process {
|
||||
command: Arc<str>,
|
||||
child: Child,
|
||||
guard: MetricsGuard,
|
||||
timeout: Duration,
|
||||
extras: Box<dyn Extras>,
|
||||
id: Uuid,
|
||||
}
|
||||
|
||||
|
@ -150,7 +152,7 @@ impl ProcessError {
|
|||
}
|
||||
|
||||
impl Process {
|
||||
pub(crate) fn run<T>(
|
||||
pub(crate) async fn run<T>(
|
||||
command: &str,
|
||||
args: &[T],
|
||||
envs: &[(&str, &OsStr)],
|
||||
|
@ -163,15 +165,10 @@ impl Process {
|
|||
|
||||
tracing::debug!("{envs:?} {command} {args:?}");
|
||||
|
||||
let res = tracing::trace_span!(parent: None, "Create command", %command).in_scope(|| {
|
||||
Self::spawn(
|
||||
command.clone(),
|
||||
Command::new(command.as_ref())
|
||||
.args(args)
|
||||
.envs(envs.iter().copied()),
|
||||
timeout,
|
||||
)
|
||||
});
|
||||
let mut cmd = Command::new(command.as_ref());
|
||||
cmd.args(args).envs(envs.iter().copied());
|
||||
|
||||
let res = Self::spawn(command.clone(), cmd, timeout).await;
|
||||
|
||||
match res {
|
||||
Ok(this) => Ok(this),
|
||||
|
@ -186,23 +183,31 @@ impl Process {
|
|||
}
|
||||
}
|
||||
|
||||
fn spawn(command: Arc<str>, cmd: &mut Command, timeout: u64) -> std::io::Result<Self> {
|
||||
tracing::trace_span!(parent: None, "Spawn command", %command).in_scope(|| {
|
||||
let guard = MetricsGuard::guard(command.clone());
|
||||
async fn spawn(command: Arc<str>, mut cmd: Command, timeout: u64) -> std::io::Result<Self> {
|
||||
let guard = MetricsGuard::guard(command.clone());
|
||||
|
||||
let cmd = cmd
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.kill_on_drop(true);
|
||||
cmd.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.kill_on_drop(true);
|
||||
|
||||
cmd.spawn().map(|child| Process {
|
||||
crate::sync::spawn_blocking("spawn-command", move || cmd.spawn())
|
||||
.await
|
||||
.expect("spawn panicked")
|
||||
.map(|child| Process {
|
||||
child,
|
||||
command,
|
||||
guard,
|
||||
timeout: Duration::from_secs(timeout),
|
||||
extras: Box::new(()),
|
||||
id: Uuid::now_v7(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn add_extras(self, extra: impl Extras + 'static) -> Self {
|
||||
Self {
|
||||
extras: Box::new((self.extras, extra)),
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), fields(command = %self.command, id = %self.id))]
|
||||
|
@ -212,11 +217,17 @@ impl Process {
|
|||
mut child,
|
||||
guard,
|
||||
timeout,
|
||||
mut extras,
|
||||
id: _,
|
||||
} = self;
|
||||
|
||||
let res = child.wait().with_timeout(timeout).await;
|
||||
|
||||
extras
|
||||
.consume()
|
||||
.await
|
||||
.map_err(|e| ProcessError::Cleanup(command.clone(), e))?;
|
||||
|
||||
match res {
|
||||
Ok(Ok(status)) if status.success() => {
|
||||
guard.disarm();
|
||||
|
@ -232,30 +243,35 @@ impl Process {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) fn bytes_read(self, input: Bytes) -> ProcessRead {
|
||||
self.spawn_fn(move |mut stdin| {
|
||||
let mut input = input;
|
||||
async move {
|
||||
match stdin.write_all_buf(&mut input).await {
|
||||
Ok(()) => Ok(()),
|
||||
// BrokenPipe means we finished reading from Stdout, so we don't need to write
|
||||
// to stdin. We'll still error out if the command failed so treat this as a
|
||||
// success
|
||||
Err(e) if e.kind() == std::io::ErrorKind::BrokenPipe => Ok(()),
|
||||
Err(e) => Err(e),
|
||||
pub(crate) fn drive_with_stream<S>(self, input: S) -> ProcessRead
|
||||
where
|
||||
S: Stream<Item = std::io::Result<Bytes>> + 'static,
|
||||
{
|
||||
self.drive(move |mut stdin| async move {
|
||||
let stream = std::pin::pin!(input);
|
||||
let mut stream = stream.into_streamer();
|
||||
|
||||
while let Some(mut bytes) = stream.try_next().await? {
|
||||
match stdin.write_all_buf(&mut bytes).await {
|
||||
Ok(()) => {}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::BrokenPipe => break,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
crate::sync::cooperate().await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn read(self) -> ProcessRead {
|
||||
self.spawn_fn(|_| async { Ok(()) })
|
||||
self.drive(|_| async { Ok(()) })
|
||||
}
|
||||
|
||||
#[allow(unknown_lints)]
|
||||
#[allow(clippy::let_with_type_underscore)]
|
||||
#[tracing::instrument(level = "trace", skip_all)]
|
||||
fn spawn_fn<F, Fut>(self, f: F) -> ProcessRead
|
||||
fn drive<F, Fut>(self, f: F) -> ProcessRead
|
||||
where
|
||||
F: FnOnce(ChildStdin) -> Fut + 'static,
|
||||
Fut: Future<Output = std::io::Result<()>>,
|
||||
|
@ -265,6 +281,7 @@ impl Process {
|
|||
mut child,
|
||||
guard,
|
||||
timeout,
|
||||
extras,
|
||||
id,
|
||||
} = self;
|
||||
|
||||
|
@ -301,7 +318,7 @@ impl Process {
|
|||
handle,
|
||||
command,
|
||||
id,
|
||||
extras: Box::new(()),
|
||||
extras,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -317,6 +334,16 @@ impl ProcessRead {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn into_bytes_stream(self) -> Result<BytesStream, ProcessError> {
|
||||
let cmd = self.command.clone();
|
||||
|
||||
self.with_stdout(move |stdout| {
|
||||
BytesStream::try_from_stream(ReaderStream::with_capacity(stdout, 1024 * 64))
|
||||
})
|
||||
.await?
|
||||
.map_err(move |e| ProcessError::Read(cmd, e))
|
||||
}
|
||||
|
||||
pub(crate) async fn into_vec(self) -> Result<Vec<u8>, ProcessError> {
|
||||
let cmd = self.command.clone();
|
||||
|
||||
|
@ -347,10 +374,72 @@ impl ProcessRead {
|
|||
.await?
|
||||
}
|
||||
|
||||
pub(crate) fn pipe(self, process: Process) -> ProcessRead {
|
||||
let Process {
|
||||
command,
|
||||
mut child,
|
||||
guard,
|
||||
timeout,
|
||||
extras,
|
||||
id,
|
||||
} = process;
|
||||
|
||||
let mut stdin = child.stdin.take().expect("stdin exists");
|
||||
let stdout = child.stdout.take().expect("stdout exists");
|
||||
|
||||
let command2 = command.clone();
|
||||
let handle = Box::pin(async move {
|
||||
self.with_stdout(move |mut stdout| async move {
|
||||
let child_fut = async {
|
||||
tokio::io::copy(&mut stdout, &mut stdin).await?;
|
||||
drop(stdout);
|
||||
drop(stdin);
|
||||
|
||||
child.wait().await
|
||||
};
|
||||
|
||||
match child_fut.with_timeout(timeout).await {
|
||||
Ok(Ok(status)) if status.success() => {
|
||||
guard.disarm();
|
||||
Ok(())
|
||||
}
|
||||
Ok(Ok(status)) => Err(ProcessError::Status(command2, status)),
|
||||
Ok(Err(e)) => Err(ProcessError::Other(command2, e)),
|
||||
Err(_) => {
|
||||
child
|
||||
.kill()
|
||||
.await
|
||||
.map_err(|e| ProcessError::Other(command2.clone(), e))?;
|
||||
Err(ProcessError::Timeout(command2))
|
||||
}
|
||||
}
|
||||
})
|
||||
.await?
|
||||
});
|
||||
|
||||
ProcessRead {
|
||||
reader: Box::pin(stdout),
|
||||
handle,
|
||||
command,
|
||||
id,
|
||||
extras,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn with_stdout<Fut>(
|
||||
self,
|
||||
f: impl FnOnce(BoxRead<'static>) -> Fut,
|
||||
) -> Result<Fut::Output, ProcessError>
|
||||
where
|
||||
Fut: Future,
|
||||
{
|
||||
self.with_stdout_inner(f).await
|
||||
}
|
||||
|
||||
async fn with_stdout_inner<Fut>(
|
||||
self,
|
||||
f: impl FnOnce(BoxRead<'static>) -> Fut,
|
||||
) -> Result<Fut::Output, ProcessError>
|
||||
where
|
||||
Fut: Future,
|
||||
{
|
||||
|
@ -395,11 +484,3 @@ impl ProcessRead {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for StatusError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(f, "Command failed with bad status: {}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for StatusError {}
|
||||
|
|
|
@ -91,7 +91,7 @@ impl ResizeKind {
|
|||
pub(crate) fn build_chain(
|
||||
args: &[(String, String)],
|
||||
ext: &str,
|
||||
) -> Result<(PathBuf, Vec<String>), Error> {
|
||||
) -> Result<(String, Vec<String>), Error> {
|
||||
fn parse<P: Processor>(key: &str, value: &str) -> Result<Option<P>, Error> {
|
||||
if key == P::NAME {
|
||||
return Ok(Some(P::parse(key, value).ok_or(UploadError::ParsePath)?));
|
||||
|
@ -122,7 +122,7 @@ pub(crate) fn build_chain(
|
|||
|
||||
path.push(ext);
|
||||
|
||||
Ok((path, args))
|
||||
Ok((path.to_string_lossy().to_string(), args))
|
||||
}
|
||||
|
||||
impl Processor for Identity {
|
||||
|
|
261
src/queue.rs
261
src/queue.rs
|
@ -1,19 +1,21 @@
|
|||
use crate::{
|
||||
concurrent_processor::ProcessMap,
|
||||
error::{Error, UploadError},
|
||||
formats::InputProcessableFormat,
|
||||
future::LocalBoxFuture,
|
||||
future::{LocalBoxFuture, WithPollTimer},
|
||||
repo::{Alias, ArcRepo, DeleteToken, Hash, JobId, UploadId},
|
||||
serde_str::Serde,
|
||||
state::State,
|
||||
store::Store,
|
||||
UploadQuery,
|
||||
};
|
||||
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
ops::Deref,
|
||||
rc::Rc,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::task::JoinError;
|
||||
use tracing::Instrument;
|
||||
|
||||
pub(crate) mod cleanup;
|
||||
|
@ -55,11 +57,13 @@ enum Process {
|
|||
identifier: String,
|
||||
upload_id: Serde<UploadId>,
|
||||
declared_alias: Option<Serde<Alias>>,
|
||||
#[serde(default)]
|
||||
upload_query: UploadQuery,
|
||||
},
|
||||
Generate {
|
||||
target_format: InputProcessableFormat,
|
||||
source: Serde<Alias>,
|
||||
process_path: PathBuf,
|
||||
process_path: String,
|
||||
process_args: Vec<String>,
|
||||
},
|
||||
}
|
||||
|
@ -157,11 +161,13 @@ pub(crate) async fn queue_ingest(
|
|||
identifier: &Arc<str>,
|
||||
upload_id: UploadId,
|
||||
declared_alias: Option<Alias>,
|
||||
upload_query: UploadQuery,
|
||||
) -> Result<(), Error> {
|
||||
let job = serde_json::to_value(Process::Ingest {
|
||||
identifier: identifier.to_string(),
|
||||
declared_alias: declared_alias.map(Serde::new),
|
||||
upload_id: Serde::new(upload_id),
|
||||
upload_query,
|
||||
})
|
||||
.map_err(UploadError::PushJob)?;
|
||||
repo.push(PROCESS_QUEUE, job, None).await?;
|
||||
|
@ -172,13 +178,13 @@ pub(crate) async fn queue_generate(
|
|||
repo: &ArcRepo,
|
||||
target_format: InputProcessableFormat,
|
||||
source: Alias,
|
||||
process_path: PathBuf,
|
||||
variant: String,
|
||||
process_args: Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
let job = serde_json::to_value(Process::Generate {
|
||||
target_format,
|
||||
source: Serde::new(source),
|
||||
process_path,
|
||||
process_path: variant,
|
||||
process_args,
|
||||
})
|
||||
.map_err(UploadError::PushJob)?;
|
||||
|
@ -190,8 +196,8 @@ pub(crate) async fn process_cleanup<S: Store + 'static>(state: State<S>) {
|
|||
process_jobs(state, CLEANUP_QUEUE, cleanup::perform).await
|
||||
}
|
||||
|
||||
pub(crate) async fn process_images<S: Store + 'static>(state: State<S>, process_map: ProcessMap) {
|
||||
process_image_jobs(state, process_map, PROCESS_QUEUE, process::perform).await
|
||||
pub(crate) async fn process_images<S: Store + 'static>(state: State<S>) {
|
||||
process_jobs(state, PROCESS_QUEUE, process::perform).await
|
||||
}
|
||||
|
||||
struct MetricsGuard {
|
||||
|
@ -225,145 +231,170 @@ impl Drop for MetricsGuard {
|
|||
}
|
||||
}
|
||||
|
||||
pub(super) enum JobError {
|
||||
Abort(Error),
|
||||
Retry(Error),
|
||||
}
|
||||
|
||||
impl AsRef<Error> for JobError {
|
||||
fn as_ref(&self) -> &Error {
|
||||
match self {
|
||||
Self::Abort(e) | Self::Retry(e) => e,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for JobError {
|
||||
type Target = Error;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
match self {
|
||||
Self::Abort(e) | Self::Retry(e) => e,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JobError> for Error {
|
||||
fn from(value: JobError) -> Self {
|
||||
match value {
|
||||
JobError::Abort(e) | JobError::Retry(e) => e,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type JobResult<T = ()> = Result<T, JobError>;
|
||||
|
||||
type JobFuture<'a> = LocalBoxFuture<'a, JobResult>;
|
||||
|
||||
trait JobContext {
|
||||
type Item;
|
||||
|
||||
fn abort(self) -> JobResult<Self::Item>
|
||||
where
|
||||
Self: Sized;
|
||||
|
||||
fn retry(self) -> JobResult<Self::Item>
|
||||
where
|
||||
Self: Sized;
|
||||
}
|
||||
|
||||
impl<T, E> JobContext for Result<T, E>
|
||||
where
|
||||
E: Into<Error>,
|
||||
{
|
||||
type Item = T;
|
||||
|
||||
fn abort(self) -> JobResult<Self::Item>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
self.map_err(Into::into).map_err(JobError::Abort)
|
||||
}
|
||||
|
||||
fn retry(self) -> JobResult<Self::Item>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
self.map_err(Into::into).map_err(JobError::Retry)
|
||||
}
|
||||
}
|
||||
|
||||
fn job_result(result: &Result<JobResult, JoinError>) -> crate::repo::JobResult {
|
||||
match result {
|
||||
Ok(Ok(())) => crate::repo::JobResult::Success,
|
||||
Ok(Err(JobError::Retry(_))) => crate::repo::JobResult::Failure,
|
||||
Ok(Err(JobError::Abort(_))) => crate::repo::JobResult::Aborted,
|
||||
Err(_) => crate::repo::JobResult::Aborted,
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_jobs<S, F>(state: State<S>, queue: &'static str, callback: F)
|
||||
where
|
||||
S: Store,
|
||||
for<'a> F: Fn(&'a State<S>, serde_json::Value) -> LocalBoxFuture<'a, Result<(), Error>> + Copy,
|
||||
S: Store + 'static,
|
||||
for<'a> F: Fn(&'a State<S>, serde_json::Value) -> JobFuture<'a> + Copy + 'static,
|
||||
{
|
||||
let worker_id = uuid::Uuid::new_v4();
|
||||
let state = Rc::new(state);
|
||||
|
||||
loop {
|
||||
tracing::trace!("process_jobs: looping");
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
crate::sync::cooperate().await;
|
||||
|
||||
let res = job_loop(&state, worker_id, queue, callback).await;
|
||||
// add a panic boundary by spawning a task
|
||||
let res = crate::sync::spawn(
|
||||
"job-loop",
|
||||
job_loop(state.clone(), worker_id, queue, callback),
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Err(e) = res {
|
||||
tracing::warn!("Error processing jobs: {}", format!("{e}"));
|
||||
tracing::warn!("{}", format!("{e:?}"));
|
||||
match res {
|
||||
// clean exit
|
||||
Ok(Ok(())) => break,
|
||||
|
||||
if e.is_disconnected() {
|
||||
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||
// job error
|
||||
Ok(Err(e)) => {
|
||||
tracing::warn!("Error processing jobs: {}", format!("{e}"));
|
||||
tracing::warn!("{}", format!("{e:?}"));
|
||||
|
||||
if e.is_disconnected() {
|
||||
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
}
|
||||
|
||||
continue;
|
||||
// job panic
|
||||
Err(_) => {
|
||||
tracing::warn!("Panic while processing jobs");
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
async fn job_loop<S, F>(
|
||||
state: &State<S>,
|
||||
state: Rc<State<S>>,
|
||||
worker_id: uuid::Uuid,
|
||||
queue: &'static str,
|
||||
callback: F,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: Store,
|
||||
for<'a> F: Fn(&'a State<S>, serde_json::Value) -> LocalBoxFuture<'a, Result<(), Error>> + Copy,
|
||||
S: Store + 'static,
|
||||
for<'a> F: Fn(&'a State<S>, serde_json::Value) -> JobFuture<'a> + Copy + 'static,
|
||||
{
|
||||
loop {
|
||||
tracing::trace!("job_loop: looping");
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
crate::sync::cooperate().with_poll_timer("cooperate").await;
|
||||
|
||||
async {
|
||||
let (job_id, job) = state.repo.pop(queue, worker_id).await?;
|
||||
let (job_id, job) = state
|
||||
.repo
|
||||
.pop(queue, worker_id)
|
||||
.with_poll_timer("pop-job")
|
||||
.await?;
|
||||
|
||||
let guard = MetricsGuard::guard(worker_id, queue);
|
||||
|
||||
let res = heartbeat(
|
||||
&state.repo,
|
||||
queue,
|
||||
worker_id,
|
||||
job_id,
|
||||
(callback)(state, job),
|
||||
)
|
||||
let state2 = state.clone();
|
||||
let res = crate::sync::spawn("job-and-heartbeat", async move {
|
||||
let state = state2;
|
||||
heartbeat(
|
||||
&state.repo,
|
||||
queue,
|
||||
worker_id,
|
||||
job_id,
|
||||
(callback)(&state, job),
|
||||
)
|
||||
.await
|
||||
})
|
||||
.await;
|
||||
|
||||
state.repo.complete_job(queue, worker_id, job_id).await?;
|
||||
state
|
||||
.repo
|
||||
.complete_job(queue, worker_id, job_id, job_result(&res))
|
||||
.with_poll_timer("job-complete")
|
||||
.await?;
|
||||
|
||||
res?;
|
||||
|
||||
guard.disarm();
|
||||
|
||||
Ok(()) as Result<(), Error>
|
||||
}
|
||||
.instrument(tracing::info_span!("tick", %queue, %worker_id))
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_image_jobs<S, F>(
|
||||
state: State<S>,
|
||||
process_map: ProcessMap,
|
||||
queue: &'static str,
|
||||
callback: F,
|
||||
) where
|
||||
S: Store,
|
||||
for<'a> F: Fn(&'a State<S>, &'a ProcessMap, serde_json::Value) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||
+ Copy,
|
||||
{
|
||||
let worker_id = uuid::Uuid::new_v4();
|
||||
|
||||
loop {
|
||||
tracing::trace!("process_image_jobs: looping");
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
let res = image_job_loop(&state, &process_map, worker_id, queue, callback).await;
|
||||
|
||||
if let Err(e) = res {
|
||||
tracing::warn!("Error processing jobs: {}", format!("{e}"));
|
||||
tracing::warn!("{}", format!("{e:?}"));
|
||||
|
||||
if e.is_disconnected() {
|
||||
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
async fn image_job_loop<S, F>(
|
||||
state: &State<S>,
|
||||
process_map: &ProcessMap,
|
||||
worker_id: uuid::Uuid,
|
||||
queue: &'static str,
|
||||
callback: F,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: Store,
|
||||
for<'a> F: Fn(&'a State<S>, &'a ProcessMap, serde_json::Value) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||
+ Copy,
|
||||
{
|
||||
loop {
|
||||
tracing::trace!("image_job_loop: looping");
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
async {
|
||||
let (job_id, job) = state.repo.pop(queue, worker_id).await?;
|
||||
|
||||
let guard = MetricsGuard::guard(worker_id, queue);
|
||||
|
||||
let res = heartbeat(
|
||||
&state.repo,
|
||||
queue,
|
||||
worker_id,
|
||||
job_id,
|
||||
(callback)(state, process_map, job),
|
||||
)
|
||||
.await;
|
||||
|
||||
state.repo.complete_job(queue, worker_id, job_id).await?;
|
||||
|
||||
res?;
|
||||
res.map_err(|_| UploadError::Canceled)??;
|
||||
|
||||
guard.disarm();
|
||||
|
||||
|
@ -385,7 +416,9 @@ async fn heartbeat<Fut>(
|
|||
where
|
||||
Fut: std::future::Future,
|
||||
{
|
||||
let mut fut = std::pin::pin!(fut.instrument(tracing::info_span!("job-future")));
|
||||
let mut fut = std::pin::pin!(fut
|
||||
.with_poll_timer("job-future")
|
||||
.instrument(tracing::info_span!("job-future")));
|
||||
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(5));
|
||||
|
||||
|
@ -394,7 +427,7 @@ where
|
|||
loop {
|
||||
tracing::trace!("heartbeat: looping");
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
crate::sync::cooperate().await;
|
||||
|
||||
tokio::select! {
|
||||
biased;
|
||||
|
|
|
@ -6,7 +6,7 @@ use tracing::{Instrument, Span};
|
|||
use crate::{
|
||||
config::Configuration,
|
||||
error::{Error, UploadError},
|
||||
future::LocalBoxFuture,
|
||||
future::WithPollTimer,
|
||||
queue::Cleanup,
|
||||
repo::{Alias, ArcRepo, DeleteToken, Hash},
|
||||
serde_str::Serde,
|
||||
|
@ -14,41 +14,76 @@ use crate::{
|
|||
store::Store,
|
||||
};
|
||||
|
||||
pub(super) fn perform<S>(
|
||||
state: &State<S>,
|
||||
job: serde_json::Value,
|
||||
) -> LocalBoxFuture<'_, Result<(), Error>>
|
||||
use super::{JobContext, JobFuture, JobResult};
|
||||
|
||||
pub(super) fn perform<S>(state: &State<S>, job: serde_json::Value) -> JobFuture<'_>
|
||||
where
|
||||
S: Store + 'static,
|
||||
{
|
||||
Box::pin(async move {
|
||||
match serde_json::from_value(job) {
|
||||
Ok(job) => match job {
|
||||
Cleanup::Hash { hash: in_hash } => hash(&state.repo, in_hash).await?,
|
||||
Cleanup::Identifier {
|
||||
identifier: in_identifier,
|
||||
} => identifier(&state.repo, &state.store, Arc::from(in_identifier)).await?,
|
||||
Cleanup::Alias {
|
||||
alias: stored_alias,
|
||||
token,
|
||||
} => {
|
||||
alias(
|
||||
&state.repo,
|
||||
Serde::into_inner(stored_alias),
|
||||
Serde::into_inner(token),
|
||||
)
|
||||
let job_text = format!("{job}");
|
||||
|
||||
#[cfg(feature = "random-errors")]
|
||||
{
|
||||
use nanorand::Rng;
|
||||
|
||||
if nanorand::tls_rng().generate_range(0..25) < 1 {
|
||||
return Err(crate::error::UploadError::RandomError).retry();
|
||||
}
|
||||
}
|
||||
|
||||
let job = serde_json::from_value(job)
|
||||
.map_err(|e| UploadError::InvalidJob(e, job_text))
|
||||
.abort()?;
|
||||
|
||||
match job {
|
||||
Cleanup::Hash { hash: in_hash } => {
|
||||
hash(&state.repo, in_hash)
|
||||
.with_poll_timer("cleanup-hash")
|
||||
.await?
|
||||
}
|
||||
Cleanup::Identifier {
|
||||
identifier: in_identifier,
|
||||
} => {
|
||||
identifier(&state.repo, &state.store, Arc::from(in_identifier))
|
||||
.with_poll_timer("cleanup-identifier")
|
||||
.await?
|
||||
}
|
||||
Cleanup::Alias {
|
||||
alias: stored_alias,
|
||||
token,
|
||||
} => {
|
||||
alias(
|
||||
&state.repo,
|
||||
Serde::into_inner(stored_alias),
|
||||
Serde::into_inner(token),
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Cleanup::Variant { hash, variant } => {
|
||||
hash_variant(&state.repo, hash, variant)
|
||||
.with_poll_timer("cleanup-hash-variant")
|
||||
.await?
|
||||
}
|
||||
Cleanup::AllVariants => {
|
||||
all_variants(&state.repo)
|
||||
.with_poll_timer("cleanup-all-variants")
|
||||
.await?
|
||||
}
|
||||
Cleanup::OutdatedVariants => {
|
||||
outdated_variants(&state.repo, &state.config)
|
||||
.with_poll_timer("cleanup-outdated-variants")
|
||||
.await?
|
||||
}
|
||||
Cleanup::OutdatedProxies => {
|
||||
outdated_proxies(&state.repo, &state.config)
|
||||
.with_poll_timer("cleanup-outdated-proxies")
|
||||
.await?
|
||||
}
|
||||
Cleanup::Prune => {
|
||||
prune(&state.repo, &state.store)
|
||||
.with_poll_timer("cleanup-prune")
|
||||
.await?
|
||||
}
|
||||
Cleanup::Variant { hash, variant } => {
|
||||
hash_variant(&state.repo, hash, variant).await?
|
||||
}
|
||||
Cleanup::AllVariants => all_variants(&state.repo).await?,
|
||||
Cleanup::OutdatedVariants => outdated_variants(&state.repo, &state.config).await?,
|
||||
Cleanup::OutdatedProxies => outdated_proxies(&state.repo, &state.config).await?,
|
||||
Cleanup::Prune => prune(&state.repo, &state.store).await?,
|
||||
},
|
||||
Err(e) => {
|
||||
tracing::warn!("Invalid job: {}", format!("{e}"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,36 +92,30 @@ where
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn identifier<S>(repo: &ArcRepo, store: &S, identifier: Arc<str>) -> Result<(), Error>
|
||||
async fn identifier<S>(repo: &ArcRepo, store: &S, identifier: Arc<str>) -> JobResult
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
let mut errors = Vec::new();
|
||||
|
||||
if let Err(e) = store.remove(&identifier).await {
|
||||
errors.push(UploadError::from(e));
|
||||
match store.remove(&identifier).await {
|
||||
Ok(_) => {}
|
||||
Err(e) if e.is_not_found() => {}
|
||||
Err(e) => return Err(e).retry(),
|
||||
}
|
||||
|
||||
if let Err(e) = repo.cleanup_details(&identifier).await {
|
||||
errors.push(UploadError::from(e));
|
||||
}
|
||||
|
||||
for error in errors {
|
||||
tracing::error!("{}", format!("{error:?}"));
|
||||
}
|
||||
repo.cleanup_details(&identifier).await.retry()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn hash(repo: &ArcRepo, hash: Hash) -> Result<(), Error> {
|
||||
let aliases = repo.aliases_for_hash(hash.clone()).await?;
|
||||
async fn hash(repo: &ArcRepo, hash: Hash) -> JobResult {
|
||||
let aliases = repo.aliases_for_hash(hash.clone()).await.retry()?;
|
||||
|
||||
if !aliases.is_empty() {
|
||||
for alias in aliases {
|
||||
// TODO: decide if it is okay to skip aliases without tokens
|
||||
if let Some(token) = repo.delete_token(&alias).await? {
|
||||
super::cleanup_alias(repo, alias, token).await?;
|
||||
if let Some(token) = repo.delete_token(&alias).await.retry()? {
|
||||
super::cleanup_alias(repo, alias, token).await.retry()?;
|
||||
} else {
|
||||
tracing::warn!("Not cleaning alias!");
|
||||
}
|
||||
|
@ -97,145 +126,152 @@ async fn hash(repo: &ArcRepo, hash: Hash) -> Result<(), Error> {
|
|||
|
||||
let mut idents = repo
|
||||
.variants(hash.clone())
|
||||
.await?
|
||||
.await
|
||||
.retry()?
|
||||
.into_iter()
|
||||
.map(|(_, v)| v)
|
||||
.collect::<Vec<_>>();
|
||||
idents.extend(repo.identifier(hash.clone()).await?);
|
||||
idents.extend(repo.motion_identifier(hash.clone()).await?);
|
||||
idents.extend(repo.identifier(hash.clone()).await.retry()?);
|
||||
idents.extend(repo.motion_identifier(hash.clone()).await.retry()?);
|
||||
|
||||
for identifier in idents {
|
||||
let _ = super::cleanup_identifier(repo, &identifier).await;
|
||||
super::cleanup_identifier(repo, &identifier).await.retry()?;
|
||||
}
|
||||
|
||||
repo.cleanup_hash(hash).await?;
|
||||
repo.cleanup_hash(hash).await.retry()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(crate) async fn alias(repo: &ArcRepo, alias: Alias, token: DeleteToken) -> Result<(), Error> {
|
||||
let saved_delete_token = repo.delete_token(&alias).await?;
|
||||
pub(crate) async fn alias(repo: &ArcRepo, alias: Alias, token: DeleteToken) -> JobResult {
|
||||
let saved_delete_token = repo.delete_token(&alias).await.retry()?;
|
||||
|
||||
if !saved_delete_token.is_some_and(|t| t.ct_eq(&token)) {
|
||||
return Err(UploadError::InvalidToken.into());
|
||||
return Err(UploadError::InvalidToken).abort();
|
||||
}
|
||||
|
||||
let hash = repo.hash(&alias).await?;
|
||||
let hash = repo.hash(&alias).await.retry()?;
|
||||
|
||||
repo.cleanup_alias(&alias).await?;
|
||||
repo.remove_relation(alias.clone()).await?;
|
||||
repo.remove_alias_access(alias.clone()).await?;
|
||||
repo.cleanup_alias(&alias).await.retry()?;
|
||||
repo.remove_relation(alias.clone()).await.retry()?;
|
||||
repo.remove_alias_access(alias.clone()).await.retry()?;
|
||||
|
||||
let Some(hash) = hash else {
|
||||
// hash doesn't exist, nothing to do
|
||||
return Ok(());
|
||||
};
|
||||
let hash = hash.ok_or(UploadError::MissingAlias).abort()?;
|
||||
|
||||
if repo.aliases_for_hash(hash.clone()).await?.is_empty() {
|
||||
super::cleanup_hash(repo, hash).await?;
|
||||
if repo
|
||||
.aliases_for_hash(hash.clone())
|
||||
.await
|
||||
.retry()?
|
||||
.is_empty()
|
||||
{
|
||||
super::cleanup_hash(repo, hash).await.retry()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn all_variants(repo: &ArcRepo) -> Result<(), Error> {
|
||||
async fn all_variants(repo: &ArcRepo) -> JobResult {
|
||||
let hash_stream = std::pin::pin!(repo.hashes());
|
||||
let mut hash_stream = hash_stream.into_streamer();
|
||||
|
||||
while let Some(res) = hash_stream.next().await {
|
||||
tracing::trace!("all_variants: looping");
|
||||
|
||||
let hash = res?;
|
||||
super::cleanup_variants(repo, hash, None).await?;
|
||||
let hash = res.retry()?;
|
||||
super::cleanup_variants(repo, hash, None).await.retry()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn outdated_variants(repo: &ArcRepo, config: &Configuration) -> Result<(), Error> {
|
||||
async fn outdated_variants(repo: &ArcRepo, config: &Configuration) -> JobResult {
|
||||
let now = time::OffsetDateTime::now_utc();
|
||||
let since = now.saturating_sub(config.media.retention.variants.to_duration());
|
||||
|
||||
let variant_stream = repo.older_variants(since).await?;
|
||||
let variant_stream = repo.older_variants(since).await.retry()?;
|
||||
let variant_stream = std::pin::pin!(crate::stream::take(variant_stream, 2048));
|
||||
let mut variant_stream = variant_stream.into_streamer();
|
||||
|
||||
let mut count = 0;
|
||||
|
||||
while let Some(res) = variant_stream.next().await {
|
||||
while let Some((hash, variant)) = variant_stream.try_next().await.retry()? {
|
||||
metrics::counter!(crate::init_metrics::CLEANUP_OUTDATED_VARIANT).increment(1);
|
||||
tracing::trace!("outdated_variants: looping");
|
||||
|
||||
let (hash, variant) = res?;
|
||||
super::cleanup_variants(repo, hash, Some(variant)).await?;
|
||||
super::cleanup_variants(repo, hash, Some(variant))
|
||||
.await
|
||||
.retry()?;
|
||||
count += 1;
|
||||
}
|
||||
|
||||
tracing::debug!("Queued {count} variant cleanup jobs");
|
||||
let queue_length = repo.queue_length().await?;
|
||||
let queue_length = repo.queue_length().await.abort()?;
|
||||
tracing::debug!("Total queue length: {queue_length}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn outdated_proxies(repo: &ArcRepo, config: &Configuration) -> Result<(), Error> {
|
||||
async fn outdated_proxies(repo: &ArcRepo, config: &Configuration) -> JobResult {
|
||||
let now = time::OffsetDateTime::now_utc();
|
||||
let since = now.saturating_sub(config.media.retention.proxy.to_duration());
|
||||
|
||||
let alias_stream = repo.older_aliases(since).await?;
|
||||
let alias_stream = repo.older_aliases(since).await.retry()?;
|
||||
let alias_stream = std::pin::pin!(crate::stream::take(alias_stream, 2048));
|
||||
let mut alias_stream = alias_stream.into_streamer();
|
||||
|
||||
let mut count = 0;
|
||||
|
||||
while let Some(res) = alias_stream.next().await {
|
||||
while let Some(alias) = alias_stream.try_next().await.retry()? {
|
||||
metrics::counter!(crate::init_metrics::CLEANUP_OUTDATED_PROXY).increment(1);
|
||||
tracing::trace!("outdated_proxies: looping");
|
||||
|
||||
let alias = res?;
|
||||
if let Some(token) = repo.delete_token(&alias).await? {
|
||||
super::cleanup_alias(repo, alias, token).await?;
|
||||
if let Some(token) = repo.delete_token(&alias).await.retry()? {
|
||||
super::cleanup_alias(repo, alias, token).await.retry()?;
|
||||
count += 1;
|
||||
} else {
|
||||
tracing::warn!("Skipping alias cleanup - no delete token");
|
||||
repo.remove_relation(alias.clone()).await?;
|
||||
repo.remove_alias_access(alias).await?;
|
||||
repo.remove_relation(alias.clone()).await.retry()?;
|
||||
repo.remove_alias_access(alias).await.retry()?;
|
||||
}
|
||||
}
|
||||
|
||||
tracing::debug!("Queued {count} alias cleanup jobs");
|
||||
let queue_length = repo.queue_length().await?;
|
||||
let queue_length = repo.queue_length().await.abort()?;
|
||||
tracing::debug!("Total queue length: {queue_length}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn hash_variant(
|
||||
repo: &ArcRepo,
|
||||
hash: Hash,
|
||||
target_variant: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
async fn hash_variant(repo: &ArcRepo, hash: Hash, target_variant: Option<String>) -> JobResult {
|
||||
if let Some(target_variant) = target_variant {
|
||||
if let Some(identifier) = repo
|
||||
.variant_identifier(hash.clone(), target_variant.clone())
|
||||
.await?
|
||||
.await
|
||||
.retry()?
|
||||
{
|
||||
super::cleanup_identifier(repo, &identifier).await?;
|
||||
super::cleanup_identifier(repo, &identifier).await.retry()?;
|
||||
}
|
||||
|
||||
repo.remove_variant(hash.clone(), target_variant.clone())
|
||||
.await?;
|
||||
repo.remove_variant_access(hash, target_variant).await?;
|
||||
.await
|
||||
.retry()?;
|
||||
repo.remove_variant_access(hash, target_variant)
|
||||
.await
|
||||
.retry()?;
|
||||
} else {
|
||||
for (variant, identifier) in repo.variants(hash.clone()).await? {
|
||||
repo.remove_variant(hash.clone(), variant.clone()).await?;
|
||||
repo.remove_variant_access(hash.clone(), variant).await?;
|
||||
super::cleanup_identifier(repo, &identifier).await?;
|
||||
for (variant, identifier) in repo.variants(hash.clone()).await.retry()? {
|
||||
repo.remove_variant(hash.clone(), variant.clone())
|
||||
.await
|
||||
.retry()?;
|
||||
repo.remove_variant_access(hash.clone(), variant)
|
||||
.await
|
||||
.retry()?;
|
||||
super::cleanup_identifier(repo, &identifier).await.retry()?;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -243,19 +279,20 @@ async fn hash_variant(
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn prune<S>(repo: &ArcRepo, store: &S) -> Result<(), Error>
|
||||
async fn prune<S>(repo: &ArcRepo, store: &S) -> JobResult
|
||||
where
|
||||
S: Store + 'static,
|
||||
{
|
||||
repo.set("prune-missing-started", b"1".to_vec().into())
|
||||
.await?;
|
||||
.await
|
||||
.retry()?;
|
||||
|
||||
let hash_stream = std::pin::pin!(repo.hashes());
|
||||
let mut hash_stream = hash_stream.into_streamer();
|
||||
|
||||
let mut count: u64 = 0;
|
||||
|
||||
while let Some(hash) = hash_stream.try_next().await? {
|
||||
while let Some(hash) = hash_stream.try_next().await.retry()? {
|
||||
tracing::trace!("prune: looping");
|
||||
|
||||
let repo = repo.clone();
|
||||
|
@ -307,7 +344,8 @@ where
|
|||
}
|
||||
|
||||
repo.set("prune-missing-complete", b"1".to_vec().into())
|
||||
.await?;
|
||||
.await
|
||||
.retry()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,63 +1,64 @@
|
|||
use time::Instant;
|
||||
use tracing::{Instrument, Span};
|
||||
|
||||
use crate::{
|
||||
concurrent_processor::ProcessMap,
|
||||
error::{Error, UploadError},
|
||||
formats::InputProcessableFormat,
|
||||
future::LocalBoxFuture,
|
||||
future::WithPollTimer,
|
||||
ingest::Session,
|
||||
queue::Process,
|
||||
repo::{Alias, UploadId, UploadResult},
|
||||
serde_str::Serde,
|
||||
state::State,
|
||||
store::Store,
|
||||
UploadQuery,
|
||||
};
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
use std::{sync::Arc, time::Instant};
|
||||
|
||||
pub(super) fn perform<'a, S>(
|
||||
state: &'a State<S>,
|
||||
process_map: &'a ProcessMap,
|
||||
job: serde_json::Value,
|
||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||
use super::{JobContext, JobFuture, JobResult};
|
||||
|
||||
pub(super) fn perform<S>(state: &State<S>, job: serde_json::Value) -> JobFuture<'_>
|
||||
where
|
||||
S: Store + 'static,
|
||||
{
|
||||
Box::pin(async move {
|
||||
match serde_json::from_value(job) {
|
||||
Ok(job) => match job {
|
||||
Process::Ingest {
|
||||
identifier,
|
||||
upload_id,
|
||||
declared_alias,
|
||||
} => {
|
||||
process_ingest(
|
||||
state,
|
||||
Arc::from(identifier),
|
||||
Serde::into_inner(upload_id),
|
||||
declared_alias.map(Serde::into_inner),
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Process::Generate {
|
||||
let job_text = format!("{job}");
|
||||
|
||||
let job = serde_json::from_value(job)
|
||||
.map_err(|e| UploadError::InvalidJob(e, job_text))
|
||||
.abort()?;
|
||||
|
||||
match job {
|
||||
Process::Ingest {
|
||||
identifier,
|
||||
upload_id,
|
||||
declared_alias,
|
||||
upload_query,
|
||||
} => {
|
||||
process_ingest(
|
||||
state,
|
||||
Arc::from(identifier),
|
||||
Serde::into_inner(upload_id),
|
||||
declared_alias.map(Serde::into_inner),
|
||||
upload_query,
|
||||
)
|
||||
.with_poll_timer("process-ingest")
|
||||
.await?
|
||||
}
|
||||
Process::Generate {
|
||||
target_format,
|
||||
source,
|
||||
process_path,
|
||||
process_args,
|
||||
} => {
|
||||
generate(
|
||||
state,
|
||||
target_format,
|
||||
source,
|
||||
Serde::into_inner(source),
|
||||
process_path,
|
||||
process_args,
|
||||
} => {
|
||||
generate(
|
||||
state,
|
||||
process_map,
|
||||
target_format,
|
||||
Serde::into_inner(source),
|
||||
process_path,
|
||||
process_args,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
tracing::warn!("Invalid job: {}", format!("{e}"));
|
||||
)
|
||||
.with_poll_timer("process-generate")
|
||||
.await?
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -88,7 +89,7 @@ impl UploadGuard {
|
|||
impl Drop for UploadGuard {
|
||||
fn drop(&mut self) {
|
||||
metrics::counter!(crate::init_metrics::BACKGROUND_UPLOAD_INGEST, "completed" => (!self.armed).to_string()).increment(1);
|
||||
metrics::histogram!(crate::init_metrics::BACKGROUND_UPLOAD_INGEST_DURATION, "completed" => (!self.armed).to_string()).record(self.start.elapsed().as_seconds_f64());
|
||||
metrics::histogram!(crate::init_metrics::BACKGROUND_UPLOAD_INGEST_DURATION, "completed" => (!self.armed).to_string()).record(self.start.elapsed().as_secs_f64());
|
||||
|
||||
if self.armed {
|
||||
tracing::warn!(
|
||||
|
@ -105,13 +106,14 @@ async fn process_ingest<S>(
|
|||
unprocessed_identifier: Arc<str>,
|
||||
upload_id: UploadId,
|
||||
declared_alias: Option<Alias>,
|
||||
) -> Result<(), Error>
|
||||
upload_query: UploadQuery,
|
||||
) -> JobResult
|
||||
where
|
||||
S: Store + 'static,
|
||||
{
|
||||
let guard = UploadGuard::guard(upload_id);
|
||||
|
||||
let fut = async {
|
||||
let res = async {
|
||||
let ident = unprocessed_identifier.clone();
|
||||
let state2 = state.clone();
|
||||
|
||||
|
@ -124,7 +126,8 @@ where
|
|||
let stream =
|
||||
crate::stream::from_err(state2.store.to_stream(&ident, None, None).await?);
|
||||
|
||||
let session = crate::ingest::ingest(&state2, stream, declared_alias).await?;
|
||||
let session =
|
||||
crate::ingest::ingest(&state2, stream, declared_alias, &upload_query).await?;
|
||||
|
||||
Ok(session) as Result<Session, Error>
|
||||
}
|
||||
|
@ -135,67 +138,78 @@ where
|
|||
state.store.remove(&unprocessed_identifier).await?;
|
||||
|
||||
error_boundary.map_err(|_| UploadError::Canceled)?
|
||||
};
|
||||
}
|
||||
.await;
|
||||
|
||||
let result = match fut.await {
|
||||
let (result, err) = match res {
|
||||
Ok(session) => {
|
||||
let alias = session.alias().take().expect("Alias should exist").clone();
|
||||
let token = session.disarm();
|
||||
UploadResult::Success { alias, token }
|
||||
(UploadResult::Success { alias, token }, None)
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("Failed to ingest\n{}\n{}", format!("{e}"), format!("{e:?}"));
|
||||
|
||||
Err(e) => (
|
||||
UploadResult::Failure {
|
||||
message: e.root_cause().to_string(),
|
||||
code: e.error_code().into_owned(),
|
||||
}
|
||||
}
|
||||
},
|
||||
Some(e),
|
||||
),
|
||||
};
|
||||
|
||||
state.repo.complete_upload(upload_id, result).await?;
|
||||
state
|
||||
.repo
|
||||
.complete_upload(upload_id, result)
|
||||
.await
|
||||
.retry()?;
|
||||
|
||||
if let Some(e) = err {
|
||||
return Err(e).abort();
|
||||
}
|
||||
|
||||
guard.disarm();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(state, process_map, process_path, process_args))]
|
||||
#[tracing::instrument(skip(state, variant, process_args))]
|
||||
async fn generate<S: Store + 'static>(
|
||||
state: &State<S>,
|
||||
process_map: &ProcessMap,
|
||||
target_format: InputProcessableFormat,
|
||||
source: Alias,
|
||||
process_path: PathBuf,
|
||||
variant: String,
|
||||
process_args: Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
let Some(hash) = state.repo.hash(&source).await? else {
|
||||
// Nothing to do
|
||||
return Ok(());
|
||||
};
|
||||
) -> JobResult {
|
||||
let hash = state
|
||||
.repo
|
||||
.hash(&source)
|
||||
.await
|
||||
.retry()?
|
||||
.ok_or(UploadError::MissingAlias)
|
||||
.abort()?;
|
||||
|
||||
let path_string = process_path.to_string_lossy().to_string();
|
||||
let identifier_opt = state
|
||||
.repo
|
||||
.variant_identifier(hash.clone(), path_string)
|
||||
.await?;
|
||||
.variant_identifier(hash.clone(), variant.clone())
|
||||
.await
|
||||
.retry()?;
|
||||
|
||||
if identifier_opt.is_some() {
|
||||
// don't generate already-generated variant
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let original_details = crate::ensure_details(state, &source).await?;
|
||||
let original_details = crate::ensure_details(state, &source).await.retry()?;
|
||||
|
||||
crate::generate::generate(
|
||||
state,
|
||||
process_map,
|
||||
target_format,
|
||||
process_path,
|
||||
variant,
|
||||
process_args,
|
||||
&original_details,
|
||||
hash,
|
||||
)
|
||||
.await?;
|
||||
.await
|
||||
.abort()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
15
src/range.rs
15
src/range.rs
|
@ -3,7 +3,6 @@ use std::sync::Arc;
|
|||
use crate::{
|
||||
error::{Error, UploadError},
|
||||
store::Store,
|
||||
stream::once,
|
||||
};
|
||||
use actix_web::{
|
||||
http::header::{ByteRangeSpec, ContentRange, ContentRangeSpec, Range},
|
||||
|
@ -11,20 +10,6 @@ use actix_web::{
|
|||
};
|
||||
use futures_core::Stream;
|
||||
|
||||
pub(crate) fn chop_bytes(
|
||||
byte_range: &ByteRangeSpec,
|
||||
bytes: Bytes,
|
||||
length: u64,
|
||||
) -> Result<impl Stream<Item = Result<Bytes, Error>>, Error> {
|
||||
if let Some((start, end)) = byte_range.to_satisfiable_range(length) {
|
||||
// END IS INCLUSIVE
|
||||
let end = end as usize + 1;
|
||||
return Ok(once(Ok(bytes.slice(start as usize..end))));
|
||||
}
|
||||
|
||||
Err(UploadError::Range.into())
|
||||
}
|
||||
|
||||
pub(crate) async fn chop_store<S: Store>(
|
||||
byte_range: &ByteRangeSpec,
|
||||
store: &S,
|
||||
|
|
140
src/repo.rs
140
src/repo.rs
|
@ -3,6 +3,7 @@ mod delete_token;
|
|||
mod hash;
|
||||
mod metrics;
|
||||
mod migrate;
|
||||
mod notification_map;
|
||||
|
||||
use crate::{
|
||||
config,
|
||||
|
@ -23,6 +24,7 @@ pub(crate) use alias::Alias;
|
|||
pub(crate) use delete_token::DeleteToken;
|
||||
pub(crate) use hash::Hash;
|
||||
pub(crate) use migrate::{migrate_04, migrate_repo};
|
||||
pub(crate) use notification_map::NotificationEntry;
|
||||
|
||||
pub(crate) type ArcRepo = Arc<dyn FullRepo>;
|
||||
|
||||
|
@ -103,6 +105,7 @@ pub(crate) trait FullRepo:
|
|||
+ AliasRepo
|
||||
+ QueueRepo
|
||||
+ HashRepo
|
||||
+ VariantRepo
|
||||
+ StoreMigrationRepo
|
||||
+ AliasAccessRepo
|
||||
+ VariantAccessRepo
|
||||
|
@ -337,6 +340,13 @@ where
|
|||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub(crate) struct JobId(Uuid);
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub(crate) enum JobResult {
|
||||
Success,
|
||||
Failure,
|
||||
Aborted,
|
||||
}
|
||||
|
||||
impl JobId {
|
||||
pub(crate) fn gen() -> Self {
|
||||
Self(Uuid::now_v7())
|
||||
|
@ -380,6 +390,7 @@ pub(crate) trait QueueRepo: BaseRepo {
|
|||
queue: &'static str,
|
||||
worker_id: Uuid,
|
||||
job_id: JobId,
|
||||
job_status: JobResult,
|
||||
) -> Result<(), RepoError>;
|
||||
}
|
||||
|
||||
|
@ -423,8 +434,9 @@ where
|
|||
queue: &'static str,
|
||||
worker_id: Uuid,
|
||||
job_id: JobId,
|
||||
job_status: JobResult,
|
||||
) -> Result<(), RepoError> {
|
||||
T::complete_job(self, queue, worker_id, job_id).await
|
||||
T::complete_job(self, queue, worker_id, job_id, job_status).await
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -432,7 +444,6 @@ where
|
|||
pub(crate) trait SettingsRepo: BaseRepo {
|
||||
async fn set(&self, key: &'static str, value: Arc<[u8]>) -> Result<(), RepoError>;
|
||||
async fn get(&self, key: &'static str) -> Result<Option<Arc<[u8]>>, RepoError>;
|
||||
async fn remove(&self, key: &'static str) -> Result<(), RepoError>;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
|
@ -447,10 +458,6 @@ where
|
|||
async fn get(&self, key: &'static str) -> Result<Option<Arc<[u8]>>, RepoError> {
|
||||
T::get(self, key).await
|
||||
}
|
||||
|
||||
async fn remove(&self, key: &'static str) -> Result<(), RepoError> {
|
||||
T::remove(self, key).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
|
@ -644,19 +651,8 @@ pub(crate) trait HashRepo: BaseRepo {
|
|||
|
||||
async fn identifier(&self, hash: Hash) -> Result<Option<Arc<str>>, RepoError>;
|
||||
|
||||
async fn relate_variant_identifier(
|
||||
&self,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
identifier: &Arc<str>,
|
||||
) -> Result<Result<(), VariantAlreadyExists>, RepoError>;
|
||||
async fn variant_identifier(
|
||||
&self,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
) -> Result<Option<Arc<str>>, RepoError>;
|
||||
async fn variants(&self, hash: Hash) -> Result<Vec<(String, Arc<str>)>, RepoError>;
|
||||
async fn remove_variant(&self, hash: Hash, variant: String) -> Result<(), RepoError>;
|
||||
async fn relate_blurhash(&self, hash: Hash, blurhash: Arc<str>) -> Result<(), RepoError>;
|
||||
async fn blurhash(&self, hash: Hash) -> Result<Option<Arc<str>>, RepoError>;
|
||||
|
||||
async fn relate_motion_identifier(
|
||||
&self,
|
||||
|
@ -714,6 +710,96 @@ where
|
|||
T::identifier(self, hash).await
|
||||
}
|
||||
|
||||
async fn relate_blurhash(&self, hash: Hash, blurhash: Arc<str>) -> Result<(), RepoError> {
|
||||
T::relate_blurhash(self, hash, blurhash).await
|
||||
}
|
||||
|
||||
async fn blurhash(&self, hash: Hash) -> Result<Option<Arc<str>>, RepoError> {
|
||||
T::blurhash(self, hash).await
|
||||
}
|
||||
|
||||
async fn relate_motion_identifier(
|
||||
&self,
|
||||
hash: Hash,
|
||||
identifier: &Arc<str>,
|
||||
) -> Result<(), RepoError> {
|
||||
T::relate_motion_identifier(self, hash, identifier).await
|
||||
}
|
||||
|
||||
async fn motion_identifier(&self, hash: Hash) -> Result<Option<Arc<str>>, RepoError> {
|
||||
T::motion_identifier(self, hash).await
|
||||
}
|
||||
|
||||
async fn cleanup_hash(&self, hash: Hash) -> Result<(), RepoError> {
|
||||
T::cleanup_hash(self, hash).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
pub(crate) trait VariantRepo: BaseRepo {
|
||||
async fn claim_variant_processing_rights(
|
||||
&self,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
) -> Result<Result<(), NotificationEntry>, RepoError>;
|
||||
|
||||
async fn variant_waiter(
|
||||
&self,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
) -> Result<NotificationEntry, RepoError>;
|
||||
|
||||
async fn variant_heartbeat(&self, hash: Hash, variant: String) -> Result<(), RepoError>;
|
||||
|
||||
async fn notify_variant(&self, hash: Hash, variant: String) -> Result<(), RepoError>;
|
||||
|
||||
async fn relate_variant_identifier(
|
||||
&self,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
identifier: &Arc<str>,
|
||||
) -> Result<Result<(), VariantAlreadyExists>, RepoError>;
|
||||
|
||||
async fn variant_identifier(
|
||||
&self,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
) -> Result<Option<Arc<str>>, RepoError>;
|
||||
|
||||
async fn variants(&self, hash: Hash) -> Result<Vec<(String, Arc<str>)>, RepoError>;
|
||||
|
||||
async fn remove_variant(&self, hash: Hash, variant: String) -> Result<(), RepoError>;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
impl<T> VariantRepo for Arc<T>
|
||||
where
|
||||
T: VariantRepo,
|
||||
{
|
||||
async fn claim_variant_processing_rights(
|
||||
&self,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
) -> Result<Result<(), NotificationEntry>, RepoError> {
|
||||
T::claim_variant_processing_rights(self, hash, variant).await
|
||||
}
|
||||
|
||||
async fn variant_waiter(
|
||||
&self,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
) -> Result<NotificationEntry, RepoError> {
|
||||
T::variant_waiter(self, hash, variant).await
|
||||
}
|
||||
|
||||
async fn variant_heartbeat(&self, hash: Hash, variant: String) -> Result<(), RepoError> {
|
||||
T::variant_heartbeat(self, hash, variant).await
|
||||
}
|
||||
|
||||
async fn notify_variant(&self, hash: Hash, variant: String) -> Result<(), RepoError> {
|
||||
T::notify_variant(self, hash, variant).await
|
||||
}
|
||||
|
||||
async fn relate_variant_identifier(
|
||||
&self,
|
||||
hash: Hash,
|
||||
|
@ -738,22 +824,6 @@ where
|
|||
async fn remove_variant(&self, hash: Hash, variant: String) -> Result<(), RepoError> {
|
||||
T::remove_variant(self, hash, variant).await
|
||||
}
|
||||
|
||||
async fn relate_motion_identifier(
|
||||
&self,
|
||||
hash: Hash,
|
||||
identifier: &Arc<str>,
|
||||
) -> Result<(), RepoError> {
|
||||
T::relate_motion_identifier(self, hash, identifier).await
|
||||
}
|
||||
|
||||
async fn motion_identifier(&self, hash: Hash) -> Result<Option<Arc<str>>, RepoError> {
|
||||
T::motion_identifier(self, hash).await
|
||||
}
|
||||
|
||||
async fn cleanup_hash(&self, hash: Hash) -> Result<(), RepoError> {
|
||||
T::cleanup_hash(self, hash).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
|
|
|
@ -387,10 +387,9 @@ async fn fetch_or_generate_details<S: Store>(
|
|||
Ok(details)
|
||||
} else {
|
||||
let bytes_stream = state.store.to_bytes(identifier, None, None).await?;
|
||||
let bytes = bytes_stream.into_bytes();
|
||||
|
||||
let guard = details_semaphore().acquire().await?;
|
||||
let details = Details::from_bytes(state, bytes).await?;
|
||||
let details = Details::from_bytes_stream(state, bytes_stream).await?;
|
||||
drop(guard);
|
||||
|
||||
Ok(details)
|
||||
|
|
92
src/repo/notification_map.rs
Normal file
92
src/repo/notification_map.rs
Normal file
|
@ -0,0 +1,92 @@
|
|||
use dashmap::{mapref::entry::Entry, DashMap};
|
||||
use std::{
|
||||
future::Future,
|
||||
sync::{Arc, Weak},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::sync::Notify;
|
||||
|
||||
use crate::future::WithTimeout;
|
||||
|
||||
type Map = Arc<DashMap<Arc<str>, Weak<NotificationEntryInner>>>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct NotificationMap {
|
||||
map: Map,
|
||||
}
|
||||
|
||||
pub(crate) struct NotificationEntry {
|
||||
inner: Arc<NotificationEntryInner>,
|
||||
}
|
||||
|
||||
struct NotificationEntryInner {
|
||||
key: Arc<str>,
|
||||
map: Map,
|
||||
notify: Notify,
|
||||
}
|
||||
|
||||
impl NotificationMap {
|
||||
pub(super) fn new() -> Self {
|
||||
Self {
|
||||
map: Arc::new(DashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn register_interest(&self, key: Arc<str>) -> NotificationEntry {
|
||||
match self.map.entry(key.clone()) {
|
||||
Entry::Occupied(mut occupied) => {
|
||||
if let Some(inner) = occupied.get().upgrade() {
|
||||
NotificationEntry { inner }
|
||||
} else {
|
||||
let inner = Arc::new(NotificationEntryInner {
|
||||
key,
|
||||
map: self.map.clone(),
|
||||
notify: crate::sync::bare_notify(),
|
||||
});
|
||||
|
||||
occupied.insert(Arc::downgrade(&inner));
|
||||
|
||||
NotificationEntry { inner }
|
||||
}
|
||||
}
|
||||
Entry::Vacant(vacant) => {
|
||||
let inner = Arc::new(NotificationEntryInner {
|
||||
key,
|
||||
map: self.map.clone(),
|
||||
notify: crate::sync::bare_notify(),
|
||||
});
|
||||
|
||||
vacant.insert(Arc::downgrade(&inner));
|
||||
|
||||
NotificationEntry { inner }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn notify(&self, key: &str) {
|
||||
if let Some(notifier) = self.map.get(key).and_then(|v| v.upgrade()) {
|
||||
notifier.notify.notify_waiters();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NotificationEntry {
|
||||
pub(crate) fn notified_timeout(
|
||||
&mut self,
|
||||
duration: Duration,
|
||||
) -> impl Future<Output = Result<(), tokio::time::error::Elapsed>> + '_ {
|
||||
self.inner.notify.notified().with_timeout(duration)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NotificationMap {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for NotificationEntryInner {
|
||||
fn drop(&mut self) {
|
||||
self.map.remove(&self.key);
|
||||
}
|
||||
}
|
|
@ -4,6 +4,7 @@ mod schema;
|
|||
|
||||
use std::{
|
||||
collections::{BTreeSet, VecDeque},
|
||||
future::Future,
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
|
@ -12,19 +13,20 @@ use std::{
|
|||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use bb8::CustomizeConnection;
|
||||
use dashmap::DashMap;
|
||||
use diesel::prelude::*;
|
||||
use diesel_async::{
|
||||
pooled_connection::{
|
||||
deadpool::{BuildError, Hook, Object, Pool, PoolError},
|
||||
AsyncDieselConnectionManager, ManagerConfig,
|
||||
bb8::{Pool, PooledConnection, RunError},
|
||||
AsyncDieselConnectionManager, ManagerConfig, PoolError,
|
||||
},
|
||||
AsyncConnection, AsyncPgConnection, RunQueryDsl,
|
||||
};
|
||||
use futures_core::Stream;
|
||||
use tokio::sync::Notify;
|
||||
use tokio_postgres::{AsyncMessage, Connection, NoTls, Notification, Socket};
|
||||
use tokio_postgres_rustls::MakeRustlsConnect;
|
||||
use tokio_postgres_generic_rustls::{AwsLcRsDigest, MakeRustlsConnect};
|
||||
use tracing::Instrument;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
@ -32,7 +34,7 @@ use uuid::Uuid;
|
|||
use crate::{
|
||||
details::Details,
|
||||
error_code::{ErrorCode, OwnedErrorCode},
|
||||
future::{WithMetrics, WithTimeout},
|
||||
future::{WithMetrics, WithPollTimer, WithTimeout},
|
||||
serde_str::Serde,
|
||||
stream::LocalBoxStream,
|
||||
sync::DropHandle,
|
||||
|
@ -42,10 +44,11 @@ use self::job_status::JobStatus;
|
|||
|
||||
use super::{
|
||||
metrics::{PopMetricsGuard, PushMetricsGuard, WaitMetricsGuard},
|
||||
notification_map::{NotificationEntry, NotificationMap},
|
||||
Alias, AliasAccessRepo, AliasAlreadyExists, AliasRepo, BaseRepo, DeleteToken, DetailsRepo,
|
||||
FullRepo, Hash, HashAlreadyExists, HashPage, HashRepo, JobId, OrderedHash, ProxyRepo,
|
||||
QueueRepo, RepoError, SettingsRepo, StoreMigrationRepo, UploadId, UploadRepo, UploadResult,
|
||||
VariantAccessRepo, VariantAlreadyExists,
|
||||
FullRepo, Hash, HashAlreadyExists, HashPage, HashRepo, JobId, JobResult, OrderedHash,
|
||||
ProxyRepo, QueueRepo, RepoError, SettingsRepo, StoreMigrationRepo, UploadId, UploadRepo,
|
||||
UploadResult, VariantAccessRepo, VariantAlreadyExists, VariantRepo,
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
|
@ -61,6 +64,7 @@ struct Inner {
|
|||
notifier_pool: Pool<AsyncPgConnection>,
|
||||
queue_notifications: DashMap<String, Arc<Notify>>,
|
||||
upload_notifications: DashMap<UploadId, Weak<Notify>>,
|
||||
keyed_notifications: NotificationMap,
|
||||
}
|
||||
|
||||
struct UploadInterest {
|
||||
|
@ -80,6 +84,10 @@ struct UploadNotifierState<'a> {
|
|||
inner: &'a Inner,
|
||||
}
|
||||
|
||||
struct KeyedNotifierState<'a> {
|
||||
inner: &'a Inner,
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum ConnectPostgresError {
|
||||
#[error("Failed to connect to postgres for migrations")]
|
||||
|
@ -92,16 +100,16 @@ pub(crate) enum ConnectPostgresError {
|
|||
Migration(#[source] Box<refinery::Error>),
|
||||
|
||||
#[error("Failed to build postgres connection pool")]
|
||||
BuildPool(#[source] BuildError),
|
||||
BuildPool(#[source] PoolError),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum PostgresError {
|
||||
#[error("Error in db pool")]
|
||||
Pool(#[source] PoolError),
|
||||
Pool(#[source] RunError),
|
||||
|
||||
#[error("Error in database")]
|
||||
Diesel(#[source] diesel::result::Error),
|
||||
Diesel(#[from] diesel::result::Error),
|
||||
|
||||
#[error("Error deserializing hex value")]
|
||||
Hex(#[source] hex::FromHexError),
|
||||
|
@ -154,22 +162,18 @@ impl PostgresError {
|
|||
pub(super) const fn is_disconnected(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
Self::Pool(
|
||||
PoolError::Closed
|
||||
| PoolError::Backend(
|
||||
diesel_async::pooled_connection::PoolError::ConnectionError(_)
|
||||
),
|
||||
) | Self::Diesel(diesel::result::Error::DatabaseError(
|
||||
diesel::result::DatabaseErrorKind::ClosedConnection,
|
||||
_,
|
||||
))
|
||||
Self::Pool(RunError::User(PoolError::ConnectionError(_)))
|
||||
| Self::Diesel(diesel::result::Error::DatabaseError(
|
||||
diesel::result::DatabaseErrorKind::ClosedConnection,
|
||||
_,
|
||||
))
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async fn build_tls_connector(
|
||||
certificate_file: Option<PathBuf>,
|
||||
) -> Result<MakeRustlsConnect, TlsError> {
|
||||
) -> Result<MakeRustlsConnect<AwsLcRsDigest>, TlsError> {
|
||||
let mut cert_store = rustls::RootCertStore {
|
||||
roots: Vec::from(webpki_roots::TLS_SERVER_ROOTS),
|
||||
};
|
||||
|
@ -195,14 +199,14 @@ async fn build_tls_connector(
|
|||
.with_root_certificates(cert_store)
|
||||
.with_no_client_auth();
|
||||
|
||||
let tls = MakeRustlsConnect::new(config);
|
||||
let tls = MakeRustlsConnect::new(config, AwsLcRsDigest);
|
||||
|
||||
Ok(tls)
|
||||
}
|
||||
|
||||
async fn connect_for_migrations(
|
||||
postgres_url: &Url,
|
||||
tls_connector: Option<MakeRustlsConnect>,
|
||||
tls_connector: Option<MakeRustlsConnect<AwsLcRsDigest>>,
|
||||
) -> Result<
|
||||
(
|
||||
tokio_postgres::Client,
|
||||
|
@ -233,11 +237,37 @@ async fn connect_for_migrations(
|
|||
Ok(tup)
|
||||
}
|
||||
|
||||
fn build_pool(
|
||||
#[derive(Debug)]
|
||||
struct OnConnect;
|
||||
|
||||
impl<C, E> CustomizeConnection<C, E> for OnConnect
|
||||
where
|
||||
C: Send + 'static,
|
||||
E: 'static,
|
||||
{
|
||||
fn on_acquire<'life0, 'life1, 'async_trait>(
|
||||
&'life0 self,
|
||||
_connection: &'life1 mut C,
|
||||
) -> core::pin::Pin<
|
||||
Box<dyn core::future::Future<Output = Result<(), E>> + core::marker::Send + 'async_trait>,
|
||||
>
|
||||
where
|
||||
'life0: 'async_trait,
|
||||
'life1: 'async_trait,
|
||||
Self: 'async_trait,
|
||||
{
|
||||
Box::pin(async {
|
||||
metrics::counter!(crate::init_metrics::POSTGRES_POOL_CONNECTION_CREATE).increment(1);
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn build_pool(
|
||||
postgres_url: &Url,
|
||||
tx: flume::Sender<Notification>,
|
||||
connector: Option<MakeRustlsConnect>,
|
||||
max_size: usize,
|
||||
tx: tokio::sync::mpsc::Sender<Notification>,
|
||||
connector: Option<MakeRustlsConnect<AwsLcRsDigest>>,
|
||||
max_size: u32,
|
||||
) -> Result<Pool<AsyncPgConnection>, ConnectPostgresError> {
|
||||
let mut config = ManagerConfig::default();
|
||||
config.custom_setup = build_handler(tx, connector);
|
||||
|
@ -247,21 +277,12 @@ fn build_pool(
|
|||
config,
|
||||
);
|
||||
|
||||
let pool = Pool::builder(mgr)
|
||||
.runtime(deadpool::Runtime::Tokio1)
|
||||
.wait_timeout(Some(Duration::from_secs(10)))
|
||||
.create_timeout(Some(Duration::from_secs(2)))
|
||||
.recycle_timeout(Some(Duration::from_secs(2)))
|
||||
.post_create(Hook::sync_fn(|_, _| {
|
||||
metrics::counter!(crate::init_metrics::POSTGRES_POOL_CONNECTION_CREATE).increment(1);
|
||||
Ok(())
|
||||
}))
|
||||
.post_recycle(Hook::sync_fn(|_, _| {
|
||||
metrics::counter!(crate::init_metrics::POSTGRES_POOL_CONNECTION_RECYCLE).increment(1);
|
||||
Ok(())
|
||||
}))
|
||||
let pool = Pool::builder()
|
||||
.max_size(max_size)
|
||||
.build()
|
||||
.connection_timeout(Duration::from_secs(10))
|
||||
.connection_customizer(Box::new(OnConnect))
|
||||
.build(mgr)
|
||||
.await
|
||||
.map_err(ConnectPostgresError::BuildPool)?;
|
||||
|
||||
Ok(pool)
|
||||
|
@ -298,22 +319,29 @@ impl PostgresRepo {
|
|||
.map(|u| u.into())
|
||||
.unwrap_or(1_usize);
|
||||
|
||||
let (tx, rx) = flume::bounded(10);
|
||||
let (tx, rx) = crate::sync::channel(10);
|
||||
|
||||
let pool = build_pool(
|
||||
&postgres_url,
|
||||
tx.clone(),
|
||||
connector.clone(),
|
||||
parallelism as u32 * 8,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let notifier_pool =
|
||||
build_pool(&postgres_url, tx, connector, parallelism.min(4) as u32).await?;
|
||||
|
||||
let inner = Arc::new(Inner {
|
||||
health_count: AtomicU64::new(0),
|
||||
pool: build_pool(
|
||||
&postgres_url,
|
||||
tx.clone(),
|
||||
connector.clone(),
|
||||
parallelism * 8,
|
||||
)?,
|
||||
notifier_pool: build_pool(&postgres_url, tx, connector, parallelism.min(4))?,
|
||||
pool,
|
||||
notifier_pool,
|
||||
queue_notifications: DashMap::new(),
|
||||
upload_notifications: DashMap::new(),
|
||||
keyed_notifications: NotificationMap::new(),
|
||||
});
|
||||
|
||||
let handle = crate::sync::abort_on_drop(crate::sync::spawn(
|
||||
let handle = crate::sync::abort_on_drop(crate::sync::spawn_sendable(
|
||||
"postgres-delegate-notifications",
|
||||
delegate_notifications(rx, inner.clone(), parallelism * 8),
|
||||
));
|
||||
|
@ -326,15 +354,114 @@ impl PostgresRepo {
|
|||
})
|
||||
}
|
||||
|
||||
async fn get_connection(&self) -> Result<Object<AsyncPgConnection>, PostgresError> {
|
||||
self.inner.get_connection().await
|
||||
async fn get_connection(
|
||||
&self,
|
||||
) -> Result<PooledConnection<'_, AsyncPgConnection>, PostgresError> {
|
||||
self.inner
|
||||
.get_connection()
|
||||
.with_poll_timer("postgres-get-connection")
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_notifier_connection(&self) -> Result<Object<AsyncPgConnection>, PostgresError> {
|
||||
self.inner.get_notifier_connection().await
|
||||
async fn get_notifier_connection(
|
||||
&self,
|
||||
) -> Result<PooledConnection<'_, AsyncPgConnection>, PostgresError> {
|
||||
self.inner
|
||||
.get_notifier_connection()
|
||||
.with_poll_timer("postgres-get-notifier-connection")
|
||||
.await
|
||||
}
|
||||
|
||||
async fn insert_keyed_notifier(
|
||||
&self,
|
||||
input_key: &str,
|
||||
) -> Result<Result<(), AlreadyInserted>, PostgresError> {
|
||||
use schema::keyed_notifications::dsl::*;
|
||||
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
let timestamp = to_primitive(time::OffsetDateTime::now_utc());
|
||||
|
||||
diesel::delete(keyed_notifications)
|
||||
.filter(heartbeat.le(timestamp.saturating_sub(time::Duration::minutes(2))))
|
||||
.execute(&mut conn)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
||||
let res = diesel::insert_into(keyed_notifications)
|
||||
.values(key.eq(input_key))
|
||||
.execute(&mut conn)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?;
|
||||
|
||||
match res {
|
||||
Ok(_) => Ok(Ok(())),
|
||||
Err(diesel::result::Error::DatabaseError(
|
||||
diesel::result::DatabaseErrorKind::UniqueViolation,
|
||||
_,
|
||||
)) => Ok(Err(AlreadyInserted)),
|
||||
Err(e) => Err(PostgresError::Diesel(e)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn keyed_notifier_heartbeat(&self, input_key: &str) -> Result<(), PostgresError> {
|
||||
use schema::keyed_notifications::dsl::*;
|
||||
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
let timestamp = to_primitive(time::OffsetDateTime::now_utc());
|
||||
|
||||
diesel::update(keyed_notifications)
|
||||
.filter(key.eq(input_key))
|
||||
.set(heartbeat.eq(timestamp))
|
||||
.execute(&mut conn)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn listen_on_key(&self, key: Arc<str>) -> NotificationEntry {
|
||||
self.inner.keyed_notifications.register_interest(key)
|
||||
}
|
||||
|
||||
async fn register_interest(&self) -> Result<(), PostgresError> {
|
||||
let mut notifier_conn = self.get_notifier_connection().await?;
|
||||
|
||||
diesel::sql_query("LISTEN keyed_notification_channel;")
|
||||
.execute(&mut notifier_conn)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clear_keyed_notifier(&self, input_key: String) -> Result<(), PostgresError> {
|
||||
use schema::keyed_notifications::dsl::*;
|
||||
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
diesel::delete(keyed_notifications)
|
||||
.filter(key.eq(input_key))
|
||||
.execute(&mut conn)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct AlreadyInserted;
|
||||
|
||||
struct GetConnectionMetricsGuard {
|
||||
start: Instant,
|
||||
armed: bool,
|
||||
|
@ -363,7 +490,9 @@ impl Drop for GetConnectionMetricsGuard {
|
|||
|
||||
impl Inner {
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn get_connection(&self) -> Result<Object<AsyncPgConnection>, PostgresError> {
|
||||
async fn get_connection(
|
||||
&self,
|
||||
) -> Result<PooledConnection<'_, AsyncPgConnection>, PostgresError> {
|
||||
let guard = GetConnectionMetricsGuard::guard();
|
||||
|
||||
let obj = self.pool.get().await.map_err(PostgresError::Pool)?;
|
||||
|
@ -374,7 +503,9 @@ impl Inner {
|
|||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn get_notifier_connection(&self) -> Result<Object<AsyncPgConnection>, PostgresError> {
|
||||
async fn get_notifier_connection(
|
||||
&self,
|
||||
) -> Result<PooledConnection<'_, AsyncPgConnection>, PostgresError> {
|
||||
let guard = GetConnectionMetricsGuard::guard();
|
||||
|
||||
let obj = self
|
||||
|
@ -403,13 +534,15 @@ impl Inner {
|
|||
}
|
||||
|
||||
impl UploadInterest {
|
||||
async fn notified_timeout(&self, timeout: Duration) -> Result<(), tokio::time::error::Elapsed> {
|
||||
fn notified_timeout(
|
||||
&self,
|
||||
timeout: Duration,
|
||||
) -> impl Future<Output = Result<(), tokio::time::error::Elapsed>> + '_ {
|
||||
self.interest
|
||||
.as_ref()
|
||||
.expect("interest exists")
|
||||
.notified()
|
||||
.with_timeout(timeout)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -477,12 +610,18 @@ impl<'a> UploadNotifierState<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> KeyedNotifierState<'a> {
|
||||
fn handle(&self, key: &str) {
|
||||
self.inner.keyed_notifications.notify(key);
|
||||
}
|
||||
}
|
||||
|
||||
type BoxFuture<'a, T> = std::pin::Pin<Box<dyn std::future::Future<Output = T> + Send + 'a>>;
|
||||
type ConfigFn =
|
||||
Box<dyn Fn(&str) -> BoxFuture<'_, ConnectionResult<AsyncPgConnection>> + Send + Sync + 'static>;
|
||||
|
||||
async fn delegate_notifications(
|
||||
receiver: flume::Receiver<Notification>,
|
||||
mut receiver: tokio::sync::mpsc::Receiver<Notification>,
|
||||
inner: Arc<Inner>,
|
||||
capacity: usize,
|
||||
) {
|
||||
|
@ -495,7 +634,9 @@ async fn delegate_notifications(
|
|||
|
||||
let upload_notifier_state = UploadNotifierState { inner: &inner };
|
||||
|
||||
while let Ok(notification) = receiver.recv_async().await {
|
||||
let keyed_notifier_state = KeyedNotifierState { inner: &inner };
|
||||
|
||||
while let Some(notification) = receiver.recv().await {
|
||||
tracing::trace!("delegate_notifications: looping");
|
||||
metrics::counter!(crate::init_metrics::POSTGRES_NOTIFICATION).increment(1);
|
||||
|
||||
|
@ -508,6 +649,10 @@ async fn delegate_notifications(
|
|||
// new upload finished
|
||||
upload_notifier_state.handle(notification.payload());
|
||||
}
|
||||
"keyed_notification_channel" => {
|
||||
// new keyed notification
|
||||
keyed_notifier_state.handle(notification.payload());
|
||||
}
|
||||
channel => {
|
||||
tracing::info!(
|
||||
"Unhandled postgres notification: {channel}: {}",
|
||||
|
@ -521,8 +666,8 @@ async fn delegate_notifications(
|
|||
}
|
||||
|
||||
fn build_handler(
|
||||
sender: flume::Sender<Notification>,
|
||||
connector: Option<MakeRustlsConnect>,
|
||||
sender: tokio::sync::mpsc::Sender<Notification>,
|
||||
connector: Option<MakeRustlsConnect<AwsLcRsDigest>>,
|
||||
) -> ConfigFn {
|
||||
Box::new(
|
||||
move |config: &str| -> BoxFuture<'_, ConnectionResult<AsyncPgConnection>> {
|
||||
|
@ -563,13 +708,16 @@ fn build_handler(
|
|||
}
|
||||
|
||||
fn spawn_db_notification_task<S>(
|
||||
sender: flume::Sender<Notification>,
|
||||
sender: tokio::sync::mpsc::Sender<Notification>,
|
||||
mut conn: Connection<Socket, S>,
|
||||
) where
|
||||
S: tokio_postgres::tls::TlsStream + Unpin + 'static,
|
||||
S: tokio_postgres::tls::TlsStream + Send + Unpin + 'static,
|
||||
{
|
||||
crate::sync::spawn("postgres-notifications", async move {
|
||||
while let Some(res) = std::future::poll_fn(|cx| conn.poll_message(cx)).await {
|
||||
crate::sync::spawn_sendable("postgres-notifications", async move {
|
||||
while let Some(res) = std::future::poll_fn(|cx| conn.poll_message(cx))
|
||||
.with_poll_timer("poll-message")
|
||||
.await
|
||||
{
|
||||
tracing::trace!("db_notification_task: looping");
|
||||
|
||||
match res {
|
||||
|
@ -581,7 +729,7 @@ fn spawn_db_notification_task<S>(
|
|||
tracing::warn!("Database Notice {e:?}");
|
||||
}
|
||||
Ok(AsyncMessage::Notification(notification)) => {
|
||||
if sender.send_async(notification).await.is_err() {
|
||||
if sender.send(notification).await.is_err() {
|
||||
tracing::warn!("Missed notification. Are we shutting down?");
|
||||
}
|
||||
}
|
||||
|
@ -826,6 +974,178 @@ impl HashRepo for PostgresRepo {
|
|||
Ok(opt.map(Arc::from))
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn relate_blurhash(
|
||||
&self,
|
||||
input_hash: Hash,
|
||||
input_blurhash: Arc<str>,
|
||||
) -> Result<(), RepoError> {
|
||||
use schema::hashes::dsl::*;
|
||||
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
diesel::update(hashes)
|
||||
.filter(hash.eq(&input_hash))
|
||||
.set(blurhash.eq(input_blurhash.as_ref()))
|
||||
.execute(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_HASHES_RELATE_BLURHASH)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn blurhash(&self, input_hash: Hash) -> Result<Option<Arc<str>>, RepoError> {
|
||||
use schema::hashes::dsl::*;
|
||||
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
let opt = hashes
|
||||
.select(blurhash)
|
||||
.filter(hash.eq(&input_hash))
|
||||
.get_result::<Option<String>>(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_HASHES_BLURHASH)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.optional()
|
||||
.map_err(PostgresError::Diesel)?
|
||||
.flatten()
|
||||
.map(Arc::from);
|
||||
|
||||
Ok(opt)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn relate_motion_identifier(
|
||||
&self,
|
||||
input_hash: Hash,
|
||||
input_identifier: &Arc<str>,
|
||||
) -> Result<(), RepoError> {
|
||||
use schema::hashes::dsl::*;
|
||||
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
diesel::update(hashes)
|
||||
.filter(hash.eq(&input_hash))
|
||||
.set(motion_identifier.eq(input_identifier.as_ref()))
|
||||
.execute(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_HASHES_RELATE_MOTION_IDENTIFIER)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn motion_identifier(&self, input_hash: Hash) -> Result<Option<Arc<str>>, RepoError> {
|
||||
use schema::hashes::dsl::*;
|
||||
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
let opt = hashes
|
||||
.select(motion_identifier)
|
||||
.filter(hash.eq(&input_hash))
|
||||
.get_result::<Option<String>>(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_HASHES_MOTION_IDENTIFIER)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.optional()
|
||||
.map_err(PostgresError::Diesel)?
|
||||
.flatten()
|
||||
.map(Arc::from);
|
||||
|
||||
Ok(opt)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn cleanup_hash(&self, input_hash: Hash) -> Result<(), RepoError> {
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
conn.transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
diesel::delete(schema::variants::dsl::variants)
|
||||
.filter(schema::variants::dsl::hash.eq(&input_hash))
|
||||
.execute(conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_VARIANTS_CLEANUP)
|
||||
.await?;
|
||||
|
||||
diesel::delete(schema::hashes::dsl::hashes)
|
||||
.filter(schema::hashes::dsl::hash.eq(&input_hash))
|
||||
.execute(conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_HASHES_CLEANUP)
|
||||
.await
|
||||
})
|
||||
})
|
||||
.await
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
impl VariantRepo for PostgresRepo {
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn claim_variant_processing_rights(
|
||||
&self,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
) -> Result<Result<(), NotificationEntry>, RepoError> {
|
||||
let key = Arc::from(format!("{}{variant}", hash.to_base64()));
|
||||
let entry = self.listen_on_key(Arc::clone(&key));
|
||||
|
||||
self.register_interest().await?;
|
||||
|
||||
if self
|
||||
.variant_identifier(hash.clone(), variant.clone())
|
||||
.await?
|
||||
.is_some()
|
||||
{
|
||||
return Ok(Err(entry));
|
||||
}
|
||||
|
||||
match self.insert_keyed_notifier(&key).await? {
|
||||
Ok(()) => Ok(Ok(())),
|
||||
Err(AlreadyInserted) => Ok(Err(entry)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn variant_waiter(
|
||||
&self,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
) -> Result<NotificationEntry, RepoError> {
|
||||
let key = Arc::from(format!("{}{variant}", hash.to_base64()));
|
||||
let entry = self.listen_on_key(key);
|
||||
|
||||
self.register_interest().await?;
|
||||
|
||||
Ok(entry)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn variant_heartbeat(&self, hash: Hash, variant: String) -> Result<(), RepoError> {
|
||||
let key = format!("{}{variant}", hash.to_base64());
|
||||
|
||||
self.keyed_notifier_heartbeat(&key)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn notify_variant(&self, hash: Hash, variant: String) -> Result<(), RepoError> {
|
||||
let key = format!("{}{variant}", hash.to_base64());
|
||||
|
||||
self.clear_keyed_notifier(key).await.map_err(Into::into)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn relate_variant_identifier(
|
||||
&self,
|
||||
|
@ -841,7 +1161,7 @@ impl HashRepo for PostgresRepo {
|
|||
.values((
|
||||
hash.eq(&input_hash),
|
||||
variant.eq(&input_variant),
|
||||
identifier.eq(input_identifier.as_ref()),
|
||||
identifier.eq(input_identifier.to_string()),
|
||||
))
|
||||
.execute(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_VARIANTS_RELATE_VARIANT_IDENTIFIER)
|
||||
|
@ -929,76 +1249,6 @@ impl HashRepo for PostgresRepo {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn relate_motion_identifier(
|
||||
&self,
|
||||
input_hash: Hash,
|
||||
input_identifier: &Arc<str>,
|
||||
) -> Result<(), RepoError> {
|
||||
use schema::hashes::dsl::*;
|
||||
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
diesel::update(hashes)
|
||||
.filter(hash.eq(&input_hash))
|
||||
.set(motion_identifier.eq(input_identifier.as_ref()))
|
||||
.execute(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_HASHES_RELATE_MOTION_IDENTIFIER)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn motion_identifier(&self, input_hash: Hash) -> Result<Option<Arc<str>>, RepoError> {
|
||||
use schema::hashes::dsl::*;
|
||||
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
let opt = hashes
|
||||
.select(motion_identifier)
|
||||
.filter(hash.eq(&input_hash))
|
||||
.get_result::<Option<String>>(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_HASHES_MOTION_IDENTIFIER)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.optional()
|
||||
.map_err(PostgresError::Diesel)?
|
||||
.flatten()
|
||||
.map(Arc::from);
|
||||
|
||||
Ok(opt)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn cleanup_hash(&self, input_hash: Hash) -> Result<(), RepoError> {
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
conn.transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
diesel::delete(schema::variants::dsl::variants)
|
||||
.filter(schema::variants::dsl::hash.eq(&input_hash))
|
||||
.execute(conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_VARIANTS_CLEANUP)
|
||||
.await?;
|
||||
|
||||
diesel::delete(schema::hashes::dsl::hashes)
|
||||
.filter(schema::hashes::dsl::hash.eq(&input_hash))
|
||||
.execute(conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_HASHES_CLEANUP)
|
||||
.await
|
||||
})
|
||||
})
|
||||
.await
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
|
@ -1162,24 +1412,6 @@ impl SettingsRepo for PostgresRepo {
|
|||
|
||||
Ok(opt)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn remove(&self, input_key: &'static str) -> Result<(), RepoError> {
|
||||
use schema::settings::dsl::*;
|
||||
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
diesel::delete(settings)
|
||||
.filter(key.eq(input_key))
|
||||
.execute(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_SETTINGS_REMOVE)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
|
@ -1197,16 +1429,22 @@ impl DetailsRepo for PostgresRepo {
|
|||
let value =
|
||||
serde_json::to_value(&input_details.inner).map_err(PostgresError::SerializeDetails)?;
|
||||
|
||||
diesel::insert_into(details)
|
||||
let res = diesel::insert_into(details)
|
||||
.values((identifier.eq(input_identifier.as_ref()), json.eq(&value)))
|
||||
.execute(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_DETAILS_RELATE)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
.map_err(|_| PostgresError::DbTimeout)?;
|
||||
|
||||
Ok(())
|
||||
match res {
|
||||
Ok(_)
|
||||
| Err(diesel::result::Error::DatabaseError(
|
||||
diesel::result::DatabaseErrorKind::UniqueViolation,
|
||||
_,
|
||||
)) => Ok(()),
|
||||
Err(e) => Err(PostgresError::Diesel(e).into()),
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
|
@ -1313,7 +1551,7 @@ impl QueueRepo for PostgresRepo {
|
|||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
#[tracing::instrument(level = "debug", skip_all, fields(job_id))]
|
||||
async fn pop(
|
||||
&self,
|
||||
queue_name: &'static str,
|
||||
|
@ -1339,6 +1577,7 @@ impl QueueRepo for PostgresRepo {
|
|||
.execute(&mut notifier_conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_QUEUE_LISTEN)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.with_poll_timer("pop-listen")
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
@ -1359,6 +1598,7 @@ impl QueueRepo for PostgresRepo {
|
|||
.execute(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_QUEUE_REQUEUE)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.with_poll_timer("pop-reset-jobs")
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
@ -1375,7 +1615,8 @@ impl QueueRepo for PostgresRepo {
|
|||
queue_alias
|
||||
.field(status)
|
||||
.eq(JobStatus::New)
|
||||
.and(queue_alias.field(queue).eq(queue_name)),
|
||||
.and(queue_alias.field(queue).eq(queue_name))
|
||||
.and(queue_alias.field(retry).ge(1)),
|
||||
)
|
||||
.order(queue_alias.field(queue_time))
|
||||
.for_update()
|
||||
|
@ -1394,26 +1635,29 @@ impl QueueRepo for PostgresRepo {
|
|||
.get_result(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_QUEUE_CLAIM)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.with_poll_timer("pop-claim-job")
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.optional()
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
||||
if let Some((job_id, job_json)) = opt {
|
||||
tracing::Span::current().record("job_id", &format!("{job_id}"));
|
||||
|
||||
guard.disarm();
|
||||
tracing::debug!("{job_json}");
|
||||
return Ok((JobId(job_id), job_json));
|
||||
}
|
||||
|
||||
drop(conn);
|
||||
if notifier
|
||||
match notifier
|
||||
.notified()
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.with_poll_timer("pop-wait-notify")
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
tracing::debug!("Notified");
|
||||
} else {
|
||||
tracing::debug!("Timed out");
|
||||
Ok(()) => tracing::debug!("Notified"),
|
||||
Err(_) => tracing::trace!("Timed out"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1454,23 +1698,62 @@ impl QueueRepo for PostgresRepo {
|
|||
queue_name: &'static str,
|
||||
worker_id: Uuid,
|
||||
job_id: JobId,
|
||||
job_status: JobResult,
|
||||
) -> Result<(), RepoError> {
|
||||
use schema::job_queue::dsl::*;
|
||||
|
||||
let mut conn = self.get_connection().await?;
|
||||
|
||||
diesel::delete(job_queue)
|
||||
.filter(
|
||||
id.eq(job_id.0)
|
||||
.and(queue.eq(queue_name))
|
||||
.and(worker.eq(worker_id)),
|
||||
)
|
||||
.execute(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_QUEUE_COMPLETE)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
let count = if matches!(job_status, JobResult::Failure) {
|
||||
diesel::update(job_queue)
|
||||
.filter(
|
||||
id.eq(job_id.0)
|
||||
.and(queue.eq(queue_name))
|
||||
.and(worker.eq(worker_id)),
|
||||
)
|
||||
.set((retry.eq(retry - 1), worker.eq(Option::<Uuid>::None)))
|
||||
.execute(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_QUEUE_RETRY)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?;
|
||||
|
||||
diesel::delete(job_queue)
|
||||
.filter(id.eq(job_id.0).and(retry.le(0)))
|
||||
.execute(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_QUEUE_CLEANUP)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?
|
||||
} else {
|
||||
diesel::delete(job_queue)
|
||||
.filter(
|
||||
id.eq(job_id.0)
|
||||
.and(queue.eq(queue_name))
|
||||
.and(worker.eq(worker_id)),
|
||||
)
|
||||
.execute(&mut conn)
|
||||
.with_metrics(crate::init_metrics::POSTGRES_QUEUE_COMPLETE)
|
||||
.with_timeout(Duration::from_secs(5))
|
||||
.await
|
||||
.map_err(|_| PostgresError::DbTimeout)?
|
||||
.map_err(PostgresError::Diesel)?
|
||||
};
|
||||
|
||||
match job_status {
|
||||
JobResult::Success => tracing::debug!("completed {job_id:?}"),
|
||||
JobResult::Failure if count == 0 => {
|
||||
tracing::info!("{job_id:?} failed, marked for retry")
|
||||
}
|
||||
JobResult::Failure => tracing::warn!("{job_id:?} failed permantently"),
|
||||
JobResult::Aborted => tracing::warn!("{job_id:?} dead"),
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
tracing::debug!("Deleted {count} jobs");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
use barrel::backend::Pg;
|
||||
use barrel::{types, Migration};
|
||||
|
||||
pub(crate) fn migration() -> String {
|
||||
let mut m = Migration::new();
|
||||
|
||||
m.change_table("hashes", |t| {
|
||||
t.add_column(
|
||||
"blurhash",
|
||||
types::text().size(60).nullable(true).unique(false),
|
||||
);
|
||||
});
|
||||
|
||||
m.make::<Pg>().to_string()
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
use barrel::backend::Pg;
|
||||
use barrel::{types, Migration};
|
||||
|
||||
pub(crate) fn migration() -> String {
|
||||
let mut m = Migration::new();
|
||||
|
||||
m.change_table("job_queue", |t| {
|
||||
t.add_column("retry", types::integer().nullable(false).default(5));
|
||||
});
|
||||
|
||||
m.make::<Pg>().to_string()
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
use barrel::backend::Pg;
|
||||
use barrel::functions::AutogenFunction;
|
||||
use barrel::{types, Migration};
|
||||
|
||||
pub(crate) fn migration() -> String {
|
||||
let mut m = Migration::new();
|
||||
|
||||
m.create_table("keyed_notifications", |t| {
|
||||
t.add_column(
|
||||
"key",
|
||||
types::text().primary(true).unique(true).nullable(false),
|
||||
);
|
||||
t.add_column(
|
||||
"heartbeat",
|
||||
types::datetime()
|
||||
.nullable(false)
|
||||
.default(AutogenFunction::CurrentTimestamp),
|
||||
);
|
||||
|
||||
t.add_index(
|
||||
"keyed_notifications_heartbeat_index",
|
||||
types::index(["heartbeat"]),
|
||||
);
|
||||
});
|
||||
|
||||
m.inject_custom(
|
||||
r#"
|
||||
CREATE OR REPLACE FUNCTION keyed_notify()
|
||||
RETURNS trigger AS
|
||||
$$
|
||||
BEGIN
|
||||
PERFORM pg_notify('keyed_notification_channel', OLD.key);
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
"#
|
||||
.trim(),
|
||||
);
|
||||
|
||||
m.inject_custom(
|
||||
r#"
|
||||
CREATE TRIGGER keyed_notification_removed
|
||||
AFTER DELETE
|
||||
ON keyed_notifications
|
||||
FOR EACH ROW
|
||||
EXECUTE PROCEDURE keyed_notify();
|
||||
"#,
|
||||
);
|
||||
m.make::<Pg>().to_string()
|
||||
}
|
|
@ -27,6 +27,7 @@ diesel::table! {
|
|||
identifier -> Text,
|
||||
motion_identifier -> Nullable<Text>,
|
||||
created_at -> Timestamp,
|
||||
blurhash -> Nullable<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,6 +44,14 @@ diesel::table! {
|
|||
queue_time -> Timestamp,
|
||||
heartbeat -> Nullable<Timestamp>,
|
||||
unique_key -> Nullable<Text>,
|
||||
retry -> Int4,
|
||||
}
|
||||
}
|
||||
|
||||
diesel::table! {
|
||||
keyed_notifications (key) {
|
||||
key -> Text,
|
||||
heartbeat -> Timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,6 +116,7 @@ diesel::allow_tables_to_appear_in_same_query!(
|
|||
details,
|
||||
hashes,
|
||||
job_queue,
|
||||
keyed_notifications,
|
||||
proxies,
|
||||
refinery_schema_history,
|
||||
settings,
|
||||
|
|
443
src/repo/sled.rs
443
src/repo/sled.rs
|
@ -1,9 +1,11 @@
|
|||
use crate::{
|
||||
details::HumanDate,
|
||||
error_code::{ErrorCode, OwnedErrorCode},
|
||||
future::{WithPollTimer, WithTimeout},
|
||||
serde_str::Serde,
|
||||
stream::{from_iterator, LocalBoxStream},
|
||||
};
|
||||
use dashmap::DashMap;
|
||||
use sled::{transaction::TransactionError, Db, IVec, Transactional, Tree};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
|
@ -12,6 +14,7 @@ use std::{
|
|||
atomic::{AtomicU64, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::sync::Notify;
|
||||
use url::Url;
|
||||
|
@ -20,10 +23,11 @@ use uuid::Uuid;
|
|||
use super::{
|
||||
hash::Hash,
|
||||
metrics::{PopMetricsGuard, PushMetricsGuard, WaitMetricsGuard},
|
||||
notification_map::{NotificationEntry, NotificationMap},
|
||||
Alias, AliasAccessRepo, AliasAlreadyExists, AliasRepo, BaseRepo, DeleteToken, Details,
|
||||
DetailsRepo, FullRepo, HashAlreadyExists, HashPage, HashRepo, JobId, OrderedHash, ProxyRepo,
|
||||
QueueRepo, RepoError, SettingsRepo, StoreMigrationRepo, UploadId, UploadRepo, UploadResult,
|
||||
VariantAccessRepo, VariantAlreadyExists,
|
||||
DetailsRepo, FullRepo, HashAlreadyExists, HashPage, HashRepo, JobId, JobResult, OrderedHash,
|
||||
ProxyRepo, QueueRepo, RepoError, SettingsRepo, StoreMigrationRepo, UploadId, UploadRepo,
|
||||
UploadResult, VariantAccessRepo, VariantAlreadyExists, VariantRepo,
|
||||
};
|
||||
|
||||
macro_rules! b {
|
||||
|
@ -91,6 +95,7 @@ pub(crate) struct SledRepo {
|
|||
hash_identifiers: Tree,
|
||||
hash_variant_identifiers: Tree,
|
||||
hash_motion_identifiers: Tree,
|
||||
hash_blurhashes: Tree,
|
||||
aliases: Tree,
|
||||
alias_hashes: Tree,
|
||||
alias_delete_tokens: Tree,
|
||||
|
@ -98,6 +103,7 @@ pub(crate) struct SledRepo {
|
|||
unique_jobs: Tree,
|
||||
unique_jobs_inverse: Tree,
|
||||
job_state: Tree,
|
||||
job_retries: Tree,
|
||||
alias_access: Tree,
|
||||
inverse_alias_access: Tree,
|
||||
variant_access: Tree,
|
||||
|
@ -109,6 +115,8 @@ pub(crate) struct SledRepo {
|
|||
migration_identifiers: Tree,
|
||||
cache_capacity: u64,
|
||||
export_path: PathBuf,
|
||||
variant_process_map: DashMap<(Hash, String), time::OffsetDateTime>,
|
||||
notifications: NotificationMap,
|
||||
db: Db,
|
||||
}
|
||||
|
||||
|
@ -132,6 +140,7 @@ impl SledRepo {
|
|||
hash_identifiers: db.open_tree("pict-rs-hash-identifiers-tree")?,
|
||||
hash_variant_identifiers: db.open_tree("pict-rs-hash-variant-identifiers-tree")?,
|
||||
hash_motion_identifiers: db.open_tree("pict-rs-hash-motion-identifiers-tree")?,
|
||||
hash_blurhashes: db.open_tree("pict-rs-hash-blurhashes-tree")?,
|
||||
aliases: db.open_tree("pict-rs-aliases-tree")?,
|
||||
alias_hashes: db.open_tree("pict-rs-alias-hashes-tree")?,
|
||||
alias_delete_tokens: db.open_tree("pict-rs-alias-delete-tokens-tree")?,
|
||||
|
@ -139,6 +148,7 @@ impl SledRepo {
|
|||
unique_jobs: db.open_tree("pict-rs-unique-jobs-tree")?,
|
||||
unique_jobs_inverse: db.open_tree("pict-rs-unique-jobs-inverse-tree")?,
|
||||
job_state: db.open_tree("pict-rs-job-state-tree")?,
|
||||
job_retries: db.open_tree("pict-rs-job-retries-tree")?,
|
||||
alias_access: db.open_tree("pict-rs-alias-access-tree")?,
|
||||
inverse_alias_access: db.open_tree("pict-rs-inverse-alias-access-tree")?,
|
||||
variant_access: db.open_tree("pict-rs-variant-access-tree")?,
|
||||
|
@ -150,6 +160,8 @@ impl SledRepo {
|
|||
migration_identifiers: db.open_tree("pict-rs-migration-identifiers-tree")?,
|
||||
cache_capacity,
|
||||
export_path,
|
||||
variant_process_map: DashMap::new(),
|
||||
notifications: NotificationMap::new(),
|
||||
db,
|
||||
})
|
||||
}
|
||||
|
@ -651,28 +663,37 @@ impl QueueRepo for SledRepo {
|
|||
let unique_jobs = self.unique_jobs.clone();
|
||||
let unique_jobs_inverse = self.unique_jobs_inverse.clone();
|
||||
let job_state = self.job_state.clone();
|
||||
let job_retries = self.job_retries.clone();
|
||||
|
||||
let res = crate::sync::spawn_blocking("sled-io", move || {
|
||||
(&queue, &unique_jobs, &unique_jobs_inverse, &job_state).transaction(
|
||||
|(queue, unique_jobs, unique_jobs_inverse, job_state)| {
|
||||
let state = JobState::pending();
|
||||
|
||||
queue.insert(&key[..], &job[..])?;
|
||||
if let Some(unique_key) = unique_key {
|
||||
if unique_jobs
|
||||
.insert(unique_key.as_bytes(), &key[..])?
|
||||
.is_some()
|
||||
{
|
||||
return sled::transaction::abort(());
|
||||
}
|
||||
|
||||
unique_jobs_inverse.insert(&key[..], unique_key.as_bytes())?;
|
||||
}
|
||||
job_state.insert(&key[..], state.as_bytes())?;
|
||||
|
||||
Ok(())
|
||||
},
|
||||
(
|
||||
&queue,
|
||||
&unique_jobs,
|
||||
&unique_jobs_inverse,
|
||||
&job_state,
|
||||
&job_retries,
|
||||
)
|
||||
.transaction(
|
||||
|(queue, unique_jobs, unique_jobs_inverse, job_state, job_retries)| {
|
||||
let state = JobState::pending();
|
||||
|
||||
queue.insert(&key[..], &job[..])?;
|
||||
if let Some(unique_key) = unique_key {
|
||||
if unique_jobs
|
||||
.insert(unique_key.as_bytes(), &key[..])?
|
||||
.is_some()
|
||||
{
|
||||
return sled::transaction::abort(());
|
||||
}
|
||||
|
||||
unique_jobs_inverse.insert(&key[..], unique_key.as_bytes())?;
|
||||
}
|
||||
job_state.insert(&key[..], state.as_bytes())?;
|
||||
job_retries.insert(&key[..], &(5_u64.to_be_bytes())[..])?;
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
})
|
||||
.await
|
||||
.map_err(|_| RepoError::Canceled)?;
|
||||
|
@ -701,7 +722,7 @@ impl QueueRepo for SledRepo {
|
|||
Ok(Some(id))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self, worker_id), fields(job_id))]
|
||||
#[tracing::instrument(skip_all, fields(job_id))]
|
||||
async fn pop(
|
||||
&self,
|
||||
queue_name: &'static str,
|
||||
|
@ -717,7 +738,6 @@ impl QueueRepo for SledRepo {
|
|||
let queue = self.queue.clone();
|
||||
let job_state = self.job_state.clone();
|
||||
|
||||
let span = tracing::Span::current();
|
||||
let opt = crate::sync::spawn_blocking("sled-io", move || {
|
||||
// Job IDs are generated with Uuid version 7 - defining their first bits as a
|
||||
// timestamp. Scanning a prefix should give us jobs in the order they were queued.
|
||||
|
@ -758,8 +778,6 @@ impl QueueRepo for SledRepo {
|
|||
|
||||
let job_id = JobId::from_bytes(id_bytes);
|
||||
|
||||
span.record("job_id", &format!("{job_id:?}"));
|
||||
|
||||
let opt = queue
|
||||
.get(&key)?
|
||||
.map(|ivec| serde_json::from_slice(&ivec[..]))
|
||||
|
@ -772,12 +790,16 @@ impl QueueRepo for SledRepo {
|
|||
|
||||
Ok(None)
|
||||
})
|
||||
.with_poll_timer("sled-pop-spawn-blocking")
|
||||
.await
|
||||
.map_err(|_| RepoError::Canceled)??;
|
||||
|
||||
if let Some(tup) = opt {
|
||||
if let Some((job_id, job_json)) = opt {
|
||||
tracing::Span::current().record("job_id", &format!("{}", job_id.0));
|
||||
|
||||
metrics_guard.disarm();
|
||||
return Ok(tup);
|
||||
tracing::debug!("{job_json}");
|
||||
return Ok((job_id, job_json));
|
||||
}
|
||||
|
||||
let opt = self
|
||||
|
@ -785,7 +807,7 @@ impl QueueRepo for SledRepo {
|
|||
.read()
|
||||
.unwrap()
|
||||
.get(&queue_name)
|
||||
.map(Arc::clone);
|
||||
.cloned();
|
||||
|
||||
let notify = if let Some(notify) = opt {
|
||||
notify
|
||||
|
@ -795,7 +817,15 @@ impl QueueRepo for SledRepo {
|
|||
Arc::clone(entry)
|
||||
};
|
||||
|
||||
notify.notified().await
|
||||
match notify
|
||||
.notified()
|
||||
.with_timeout(Duration::from_secs(30))
|
||||
.with_poll_timer("sled-pop-notify")
|
||||
.await
|
||||
{
|
||||
Ok(()) => tracing::debug!("Notified"),
|
||||
Err(_) => tracing::trace!("Timed out"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -834,31 +864,66 @@ impl QueueRepo for SledRepo {
|
|||
queue_name: &'static str,
|
||||
_worker_id: Uuid,
|
||||
job_id: JobId,
|
||||
job_status: JobResult,
|
||||
) -> Result<(), RepoError> {
|
||||
let retry = matches!(job_status, JobResult::Failure);
|
||||
|
||||
let key = job_key(queue_name, job_id);
|
||||
|
||||
let queue = self.queue.clone();
|
||||
let unique_jobs = self.unique_jobs.clone();
|
||||
let unique_jobs_inverse = self.unique_jobs_inverse.clone();
|
||||
let job_state = self.job_state.clone();
|
||||
let job_retries = self.job_retries.clone();
|
||||
|
||||
let res = crate::sync::spawn_blocking("sled-io", move || {
|
||||
(&queue, &unique_jobs, &unique_jobs_inverse, &job_state).transaction(
|
||||
|(queue, unique_jobs, unique_jobs_inverse, job_state)| {
|
||||
queue.remove(&key[..])?;
|
||||
if let Some(unique_key) = unique_jobs_inverse.remove(&key[..])? {
|
||||
unique_jobs.remove(unique_key)?;
|
||||
}
|
||||
job_state.remove(&key[..])?;
|
||||
Ok(())
|
||||
},
|
||||
(
|
||||
&queue,
|
||||
&unique_jobs,
|
||||
&unique_jobs_inverse,
|
||||
&job_state,
|
||||
&job_retries,
|
||||
)
|
||||
.transaction(
|
||||
|(queue, unique_jobs, unique_jobs_inverse, job_state, job_retries)| {
|
||||
let retries = job_retries.get(&key[..])?;
|
||||
|
||||
let retry_count = retries
|
||||
.and_then(|ivec| ivec[0..8].try_into().ok())
|
||||
.map(u64::from_be_bytes)
|
||||
.unwrap_or(5_u64)
|
||||
.saturating_sub(1);
|
||||
|
||||
if retry_count > 0 && retry {
|
||||
job_retries.insert(&key[..], &(retry_count.to_be_bytes())[..])?;
|
||||
} else {
|
||||
queue.remove(&key[..])?;
|
||||
if let Some(unique_key) = unique_jobs_inverse.remove(&key[..])? {
|
||||
unique_jobs.remove(unique_key)?;
|
||||
}
|
||||
job_state.remove(&key[..])?;
|
||||
job_retries.remove(&key[..])?;
|
||||
}
|
||||
|
||||
Ok(retry_count > 0 && retry)
|
||||
},
|
||||
)
|
||||
})
|
||||
.await
|
||||
.map_err(|_| RepoError::Canceled)?;
|
||||
|
||||
if let Err(TransactionError::Abort(e) | TransactionError::Storage(e)) = res {
|
||||
return Err(RepoError::from(SledError::from(e)));
|
||||
match res {
|
||||
Err(TransactionError::Abort(e) | TransactionError::Storage(e)) => {
|
||||
return Err(RepoError::from(SledError::from(e)));
|
||||
}
|
||||
Ok(retried) => match job_status {
|
||||
JobResult::Success => tracing::debug!("completed {job_id:?}"),
|
||||
JobResult::Failure if retried => {
|
||||
tracing::info!("{job_id:?} failed, marked for retry")
|
||||
}
|
||||
JobResult::Failure => tracing::warn!("{job_id:?} failed permantently"),
|
||||
JobResult::Aborted => tracing::warn!("{job_id:?} dead"),
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -880,13 +945,6 @@ impl SettingsRepo for SledRepo {
|
|||
|
||||
Ok(opt.map(|ivec| Arc::from(ivec.to_vec())))
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn remove(&self, key: &'static str) -> Result<(), RepoError> {
|
||||
b!(self.settings, settings.remove(key));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn variant_access_key(hash: &[u8], variant: &str) -> Vec<u8> {
|
||||
|
@ -1272,6 +1330,197 @@ impl HashRepo for SledRepo {
|
|||
Ok(opt.map(try_into_arc_str).transpose()?)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn relate_blurhash(&self, hash: Hash, blurhash: Arc<str>) -> Result<(), RepoError> {
|
||||
b!(
|
||||
self.hash_blurhashes,
|
||||
hash_blurhashes.insert(hash.to_bytes(), blurhash.as_bytes())
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn blurhash(&self, hash: Hash) -> Result<Option<Arc<str>>, RepoError> {
|
||||
let opt = b!(self.hash_blurhashes, hash_blurhashes.get(hash.to_ivec()));
|
||||
|
||||
Ok(opt.map(try_into_arc_str).transpose()?)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn relate_motion_identifier(
|
||||
&self,
|
||||
hash: Hash,
|
||||
identifier: &Arc<str>,
|
||||
) -> Result<(), RepoError> {
|
||||
let hash = hash.to_ivec();
|
||||
let bytes = identifier.clone();
|
||||
|
||||
b!(
|
||||
self.hash_motion_identifiers,
|
||||
hash_motion_identifiers.insert(hash, bytes.as_bytes())
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn motion_identifier(&self, hash: Hash) -> Result<Option<Arc<str>>, RepoError> {
|
||||
let hash = hash.to_ivec();
|
||||
|
||||
let opt = b!(
|
||||
self.hash_motion_identifiers,
|
||||
hash_motion_identifiers.get(hash)
|
||||
);
|
||||
|
||||
Ok(opt.map(try_into_arc_str).transpose()?)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn cleanup_hash(&self, hash: Hash) -> Result<(), RepoError> {
|
||||
let hash = hash.to_ivec();
|
||||
|
||||
let hashes = self.hashes.clone();
|
||||
let hashes_inverse = self.hashes_inverse.clone();
|
||||
let hash_identifiers = self.hash_identifiers.clone();
|
||||
let hash_motion_identifiers = self.hash_motion_identifiers.clone();
|
||||
let hash_variant_identifiers = self.hash_variant_identifiers.clone();
|
||||
let hash_blurhashes = self.hash_blurhashes.clone();
|
||||
|
||||
let hash2 = hash.clone();
|
||||
let variant_keys = b!(self.hash_variant_identifiers, {
|
||||
let v = hash_variant_identifiers
|
||||
.scan_prefix(hash2)
|
||||
.keys()
|
||||
.filter_map(Result::ok)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(v) as Result<Vec<_>, SledError>
|
||||
});
|
||||
|
||||
let res = crate::sync::spawn_blocking("sled-io", move || {
|
||||
(
|
||||
&hashes,
|
||||
&hashes_inverse,
|
||||
&hash_identifiers,
|
||||
&hash_motion_identifiers,
|
||||
&hash_variant_identifiers,
|
||||
&hash_blurhashes,
|
||||
)
|
||||
.transaction(
|
||||
|(
|
||||
hashes,
|
||||
hashes_inverse,
|
||||
hash_identifiers,
|
||||
hash_motion_identifiers,
|
||||
hash_variant_identifiers,
|
||||
hash_blurhashes,
|
||||
)| {
|
||||
if let Some(value) = hashes.remove(&hash)? {
|
||||
hashes_inverse.remove(value)?;
|
||||
}
|
||||
|
||||
hash_identifiers.remove(&hash)?;
|
||||
hash_motion_identifiers.remove(&hash)?;
|
||||
|
||||
for key in &variant_keys {
|
||||
hash_variant_identifiers.remove(key)?;
|
||||
}
|
||||
|
||||
hash_blurhashes.remove(&hash)?;
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
})
|
||||
.await
|
||||
.map_err(|_| RepoError::Canceled)?;
|
||||
|
||||
match res {
|
||||
Ok(()) => Ok(()),
|
||||
Err(TransactionError::Abort(e) | TransactionError::Storage(e)) => {
|
||||
Err(SledError::from(e).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
impl VariantRepo for SledRepo {
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn claim_variant_processing_rights(
|
||||
&self,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
) -> Result<Result<(), NotificationEntry>, RepoError> {
|
||||
let key = (hash.clone(), variant.clone());
|
||||
let now = time::OffsetDateTime::now_utc();
|
||||
let entry = self
|
||||
.notifications
|
||||
.register_interest(Arc::from(format!("{}{variant}", hash.to_base64())));
|
||||
|
||||
match self.variant_process_map.entry(key.clone()) {
|
||||
dashmap::mapref::entry::Entry::Occupied(mut occupied_entry) => {
|
||||
if occupied_entry
|
||||
.get()
|
||||
.saturating_add(time::Duration::minutes(2))
|
||||
> now
|
||||
{
|
||||
return Ok(Err(entry));
|
||||
}
|
||||
|
||||
occupied_entry.insert(now);
|
||||
}
|
||||
dashmap::mapref::entry::Entry::Vacant(vacant_entry) => {
|
||||
vacant_entry.insert(now);
|
||||
}
|
||||
}
|
||||
|
||||
if self.variant_identifier(hash, variant).await?.is_some() {
|
||||
self.variant_process_map.remove(&key);
|
||||
return Ok(Err(entry));
|
||||
}
|
||||
|
||||
Ok(Ok(()))
|
||||
}
|
||||
|
||||
async fn variant_waiter(
|
||||
&self,
|
||||
hash: Hash,
|
||||
variant: String,
|
||||
) -> Result<NotificationEntry, RepoError> {
|
||||
let entry = self
|
||||
.notifications
|
||||
.register_interest(Arc::from(format!("{}{variant}", hash.to_base64())));
|
||||
|
||||
Ok(entry)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn variant_heartbeat(&self, hash: Hash, variant: String) -> Result<(), RepoError> {
|
||||
let key = (hash, variant);
|
||||
let now = time::OffsetDateTime::now_utc();
|
||||
|
||||
if let dashmap::mapref::entry::Entry::Occupied(mut occupied_entry) =
|
||||
self.variant_process_map.entry(key)
|
||||
{
|
||||
occupied_entry.insert(now);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn notify_variant(&self, hash: Hash, variant: String) -> Result<(), RepoError> {
|
||||
let key = (hash.clone(), variant.clone());
|
||||
self.variant_process_map.remove(&key);
|
||||
|
||||
let key = format!("{}{variant}", hash.to_base64());
|
||||
self.notifications.notify(&key);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn relate_variant_identifier(
|
||||
&self,
|
||||
|
@ -1286,7 +1535,7 @@ impl HashRepo for SledRepo {
|
|||
|
||||
let hash_variant_identifiers = self.hash_variant_identifiers.clone();
|
||||
|
||||
crate::sync::spawn_blocking("sled-io", move || {
|
||||
let out = crate::sync::spawn_blocking("sled-io", move || {
|
||||
hash_variant_identifiers
|
||||
.compare_and_swap(key, Option::<&[u8]>::None, Some(value.as_bytes()))
|
||||
.map(|res| res.map_err(|_| VariantAlreadyExists))
|
||||
|
@ -1294,7 +1543,9 @@ impl HashRepo for SledRepo {
|
|||
.await
|
||||
.map_err(|_| RepoError::Canceled)?
|
||||
.map_err(SledError::from)
|
||||
.map_err(RepoError::from)
|
||||
.map_err(RepoError::from)?;
|
||||
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
|
@ -1353,98 +1604,6 @@ impl HashRepo for SledRepo {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn relate_motion_identifier(
|
||||
&self,
|
||||
hash: Hash,
|
||||
identifier: &Arc<str>,
|
||||
) -> Result<(), RepoError> {
|
||||
let hash = hash.to_ivec();
|
||||
let bytes = identifier.clone();
|
||||
|
||||
b!(
|
||||
self.hash_motion_identifiers,
|
||||
hash_motion_identifiers.insert(hash, bytes.as_bytes())
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn motion_identifier(&self, hash: Hash) -> Result<Option<Arc<str>>, RepoError> {
|
||||
let hash = hash.to_ivec();
|
||||
|
||||
let opt = b!(
|
||||
self.hash_motion_identifiers,
|
||||
hash_motion_identifiers.get(hash)
|
||||
);
|
||||
|
||||
Ok(opt.map(try_into_arc_str).transpose()?)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn cleanup_hash(&self, hash: Hash) -> Result<(), RepoError> {
|
||||
let hash = hash.to_ivec();
|
||||
|
||||
let hashes = self.hashes.clone();
|
||||
let hashes_inverse = self.hashes_inverse.clone();
|
||||
let hash_identifiers = self.hash_identifiers.clone();
|
||||
let hash_motion_identifiers = self.hash_motion_identifiers.clone();
|
||||
let hash_variant_identifiers = self.hash_variant_identifiers.clone();
|
||||
|
||||
let hash2 = hash.clone();
|
||||
let variant_keys = b!(self.hash_variant_identifiers, {
|
||||
let v = hash_variant_identifiers
|
||||
.scan_prefix(hash2)
|
||||
.keys()
|
||||
.filter_map(Result::ok)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(v) as Result<Vec<_>, SledError>
|
||||
});
|
||||
|
||||
let res = crate::sync::spawn_blocking("sled-io", move || {
|
||||
(
|
||||
&hashes,
|
||||
&hashes_inverse,
|
||||
&hash_identifiers,
|
||||
&hash_motion_identifiers,
|
||||
&hash_variant_identifiers,
|
||||
)
|
||||
.transaction(
|
||||
|(
|
||||
hashes,
|
||||
hashes_inverse,
|
||||
hash_identifiers,
|
||||
hash_motion_identifiers,
|
||||
hash_variant_identifiers,
|
||||
)| {
|
||||
if let Some(value) = hashes.remove(&hash)? {
|
||||
hashes_inverse.remove(value)?;
|
||||
}
|
||||
|
||||
hash_identifiers.remove(&hash)?;
|
||||
hash_motion_identifiers.remove(&hash)?;
|
||||
|
||||
for key in &variant_keys {
|
||||
hash_variant_identifiers.remove(key)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
})
|
||||
.await
|
||||
.map_err(|_| RepoError::Canceled)?;
|
||||
|
||||
match res {
|
||||
Ok(()) => Ok(()),
|
||||
Err(TransactionError::Abort(e) | TransactionError::Storage(e)) => {
|
||||
Err(SledError::from(e).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn hash_alias_key(hash: &IVec, alias: &IVec) -> Vec<u8> {
|
||||
|
|
|
@ -3,7 +3,7 @@ use std::{
|
|||
str::FromStr,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub(crate) struct Serde<T> {
|
||||
inner: T,
|
||||
}
|
||||
|
@ -44,6 +44,17 @@ impl<T> DerefMut for Serde<T> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<T> Default for Serde<T>
|
||||
where
|
||||
T: Default,
|
||||
{
|
||||
fn default() -> Self {
|
||||
Serde {
|
||||
inner: T::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> FromStr for Serde<T>
|
||||
where
|
||||
T: FromStr,
|
||||
|
|
151
src/store.rs
151
src/store.rs
|
@ -1,8 +1,6 @@
|
|||
use actix_web::web::Bytes;
|
||||
use futures_core::Stream;
|
||||
use std::{fmt::Debug, sync::Arc};
|
||||
use streem::IntoStreamer;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
|
||||
use crate::{bytes_stream::BytesStream, error_code::ErrorCode, stream::LocalBoxStream};
|
||||
|
||||
|
@ -74,7 +72,7 @@ impl From<crate::store::object_store::ObjectError> for StoreError {
|
|||
fn from(value: crate::store::object_store::ObjectError) -> Self {
|
||||
match value {
|
||||
e @ crate::store::object_store::ObjectError::Status(
|
||||
actix_web::http::StatusCode::NOT_FOUND,
|
||||
reqwest::StatusCode::NOT_FOUND,
|
||||
_,
|
||||
_,
|
||||
) => Self::ObjectNotFound(e),
|
||||
|
@ -86,27 +84,14 @@ impl From<crate::store::object_store::ObjectError> for StoreError {
|
|||
pub(crate) trait Store: Clone + Debug {
|
||||
async fn health_check(&self) -> Result<(), StoreError>;
|
||||
|
||||
async fn save_async_read<Reader>(
|
||||
&self,
|
||||
reader: Reader,
|
||||
content_type: mime::Mime,
|
||||
) -> Result<Arc<str>, StoreError>
|
||||
where
|
||||
Reader: AsyncRead + Unpin + 'static;
|
||||
|
||||
async fn save_stream<S>(
|
||||
&self,
|
||||
stream: S,
|
||||
content_type: mime::Mime,
|
||||
extension: Option<&str>,
|
||||
) -> Result<Arc<str>, StoreError>
|
||||
where
|
||||
S: Stream<Item = std::io::Result<Bytes>> + Unpin + 'static;
|
||||
|
||||
async fn save_bytes(
|
||||
&self,
|
||||
bytes: Bytes,
|
||||
content_type: mime::Mime,
|
||||
) -> Result<Arc<str>, StoreError>;
|
||||
S: Stream<Item = std::io::Result<Bytes>>;
|
||||
|
||||
fn public_url(&self, _: &Arc<str>) -> Option<url::Url>;
|
||||
|
||||
|
@ -123,30 +108,13 @@ pub(crate) trait Store: Clone + Debug {
|
|||
from_start: Option<u64>,
|
||||
len: Option<u64>,
|
||||
) -> Result<BytesStream, StoreError> {
|
||||
let mut buf = BytesStream::new();
|
||||
let stream = self.to_stream(identifier, from_start, len).await?;
|
||||
|
||||
let mut streamer = self
|
||||
.to_stream(identifier, from_start, len)
|
||||
.await?
|
||||
.into_streamer();
|
||||
|
||||
while let Some(bytes) = streamer.try_next().await.map_err(StoreError::ReadStream)? {
|
||||
tracing::trace!("to_bytes: looping");
|
||||
|
||||
buf.add_bytes(bytes);
|
||||
}
|
||||
|
||||
Ok(buf)
|
||||
BytesStream::try_from_stream(stream)
|
||||
.await
|
||||
.map_err(StoreError::ReadStream)
|
||||
}
|
||||
|
||||
async fn read_into<Writer>(
|
||||
&self,
|
||||
identifier: &Arc<str>,
|
||||
writer: &mut Writer,
|
||||
) -> Result<(), std::io::Error>
|
||||
where
|
||||
Writer: AsyncWrite + Unpin;
|
||||
|
||||
async fn len(&self, identifier: &Arc<str>) -> Result<u64, StoreError>;
|
||||
|
||||
async fn remove(&self, identifier: &Arc<str>) -> Result<(), StoreError>;
|
||||
|
@ -160,34 +128,16 @@ where
|
|||
T::health_check(self).await
|
||||
}
|
||||
|
||||
async fn save_async_read<Reader>(
|
||||
&self,
|
||||
reader: Reader,
|
||||
content_type: mime::Mime,
|
||||
) -> Result<Arc<str>, StoreError>
|
||||
where
|
||||
Reader: AsyncRead + Unpin + 'static,
|
||||
{
|
||||
T::save_async_read(self, reader, content_type).await
|
||||
}
|
||||
|
||||
async fn save_stream<S>(
|
||||
&self,
|
||||
stream: S,
|
||||
content_type: mime::Mime,
|
||||
extension: Option<&str>,
|
||||
) -> Result<Arc<str>, StoreError>
|
||||
where
|
||||
S: Stream<Item = std::io::Result<Bytes>> + Unpin + 'static,
|
||||
S: Stream<Item = std::io::Result<Bytes>>,
|
||||
{
|
||||
T::save_stream(self, stream, content_type).await
|
||||
}
|
||||
|
||||
async fn save_bytes(
|
||||
&self,
|
||||
bytes: Bytes,
|
||||
content_type: mime::Mime,
|
||||
) -> Result<Arc<str>, StoreError> {
|
||||
T::save_bytes(self, bytes, content_type).await
|
||||
T::save_stream(self, stream, content_type, extension).await
|
||||
}
|
||||
|
||||
fn public_url(&self, identifier: &Arc<str>) -> Option<url::Url> {
|
||||
|
@ -203,17 +153,6 @@ where
|
|||
T::to_stream(self, identifier, from_start, len).await
|
||||
}
|
||||
|
||||
async fn read_into<Writer>(
|
||||
&self,
|
||||
identifier: &Arc<str>,
|
||||
writer: &mut Writer,
|
||||
) -> Result<(), std::io::Error>
|
||||
where
|
||||
Writer: AsyncWrite + Unpin,
|
||||
{
|
||||
T::read_into(self, identifier, writer).await
|
||||
}
|
||||
|
||||
async fn len(&self, identifier: &Arc<str>) -> Result<u64, StoreError> {
|
||||
T::len(self, identifier).await
|
||||
}
|
||||
|
@ -231,34 +170,16 @@ where
|
|||
T::health_check(self).await
|
||||
}
|
||||
|
||||
async fn save_async_read<Reader>(
|
||||
&self,
|
||||
reader: Reader,
|
||||
content_type: mime::Mime,
|
||||
) -> Result<Arc<str>, StoreError>
|
||||
where
|
||||
Reader: AsyncRead + Unpin + 'static,
|
||||
{
|
||||
T::save_async_read(self, reader, content_type).await
|
||||
}
|
||||
|
||||
async fn save_stream<S>(
|
||||
&self,
|
||||
stream: S,
|
||||
content_type: mime::Mime,
|
||||
extension: Option<&str>,
|
||||
) -> Result<Arc<str>, StoreError>
|
||||
where
|
||||
S: Stream<Item = std::io::Result<Bytes>> + Unpin + 'static,
|
||||
S: Stream<Item = std::io::Result<Bytes>>,
|
||||
{
|
||||
T::save_stream(self, stream, content_type).await
|
||||
}
|
||||
|
||||
async fn save_bytes(
|
||||
&self,
|
||||
bytes: Bytes,
|
||||
content_type: mime::Mime,
|
||||
) -> Result<Arc<str>, StoreError> {
|
||||
T::save_bytes(self, bytes, content_type).await
|
||||
T::save_stream(self, stream, content_type, extension).await
|
||||
}
|
||||
|
||||
fn public_url(&self, identifier: &Arc<str>) -> Option<url::Url> {
|
||||
|
@ -274,17 +195,6 @@ where
|
|||
T::to_stream(self, identifier, from_start, len).await
|
||||
}
|
||||
|
||||
async fn read_into<Writer>(
|
||||
&self,
|
||||
identifier: &Arc<str>,
|
||||
writer: &mut Writer,
|
||||
) -> Result<(), std::io::Error>
|
||||
where
|
||||
Writer: AsyncWrite + Unpin,
|
||||
{
|
||||
T::read_into(self, identifier, writer).await
|
||||
}
|
||||
|
||||
async fn len(&self, identifier: &Arc<str>) -> Result<u64, StoreError> {
|
||||
T::len(self, identifier).await
|
||||
}
|
||||
|
@ -302,34 +212,16 @@ where
|
|||
T::health_check(self).await
|
||||
}
|
||||
|
||||
async fn save_async_read<Reader>(
|
||||
&self,
|
||||
reader: Reader,
|
||||
content_type: mime::Mime,
|
||||
) -> Result<Arc<str>, StoreError>
|
||||
where
|
||||
Reader: AsyncRead + Unpin + 'static,
|
||||
{
|
||||
T::save_async_read(self, reader, content_type).await
|
||||
}
|
||||
|
||||
async fn save_stream<S>(
|
||||
&self,
|
||||
stream: S,
|
||||
content_type: mime::Mime,
|
||||
extension: Option<&str>,
|
||||
) -> Result<Arc<str>, StoreError>
|
||||
where
|
||||
S: Stream<Item = std::io::Result<Bytes>> + Unpin + 'static,
|
||||
S: Stream<Item = std::io::Result<Bytes>>,
|
||||
{
|
||||
T::save_stream(self, stream, content_type).await
|
||||
}
|
||||
|
||||
async fn save_bytes(
|
||||
&self,
|
||||
bytes: Bytes,
|
||||
content_type: mime::Mime,
|
||||
) -> Result<Arc<str>, StoreError> {
|
||||
T::save_bytes(self, bytes, content_type).await
|
||||
T::save_stream(self, stream, content_type, extension).await
|
||||
}
|
||||
|
||||
fn public_url(&self, identifier: &Arc<str>) -> Option<url::Url> {
|
||||
|
@ -345,17 +237,6 @@ where
|
|||
T::to_stream(self, identifier, from_start, len).await
|
||||
}
|
||||
|
||||
async fn read_into<Writer>(
|
||||
&self,
|
||||
identifier: &Arc<str>,
|
||||
writer: &mut Writer,
|
||||
) -> Result<(), std::io::Error>
|
||||
where
|
||||
Writer: AsyncWrite + Unpin,
|
||||
{
|
||||
T::read_into(self, identifier, writer).await
|
||||
}
|
||||
|
||||
async fn len(&self, identifier: &Arc<str>) -> Result<u64, StoreError> {
|
||||
T::len(self, identifier).await
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use crate::{
|
||||
error_code::ErrorCode, file::File, repo::ArcRepo, store::Store, stream::LocalBoxStream,
|
||||
error_code::ErrorCode, file::File, future::WithPollTimer, store::Store, stream::LocalBoxStream,
|
||||
};
|
||||
use actix_web::web::Bytes;
|
||||
use futures_core::Stream;
|
||||
|
@ -7,26 +7,15 @@ use std::{
|
|||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use storage_path_generator::Generator;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio_util::io::StreamReader;
|
||||
use tracing::Instrument;
|
||||
|
||||
use super::StoreError;
|
||||
|
||||
// - Settings Tree
|
||||
// - last-path -> last generated path
|
||||
|
||||
const GENERATOR_KEY: &str = "last-path";
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum FileError {
|
||||
#[error("Failed to read or write file")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Failed to generate path")]
|
||||
PathGenerator(#[from] storage_path_generator::PathError),
|
||||
|
||||
#[error("Couldn't strip root dir")]
|
||||
PrefixError,
|
||||
|
||||
|
@ -41,7 +30,6 @@ impl FileError {
|
|||
pub(super) const fn error_code(&self) -> ErrorCode {
|
||||
match self {
|
||||
Self::Io(_) => ErrorCode::FILE_IO_ERROR,
|
||||
Self::PathGenerator(_) => ErrorCode::PARSE_PATH_ERROR,
|
||||
Self::FileExists => ErrorCode::FILE_EXISTS,
|
||||
Self::StringError | Self::PrefixError => ErrorCode::FORMAT_FILE_ID_ERROR,
|
||||
}
|
||||
|
@ -50,9 +38,7 @@ impl FileError {
|
|||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct FileStore {
|
||||
path_gen: Generator,
|
||||
root_dir: PathBuf,
|
||||
repo: ArcRepo,
|
||||
}
|
||||
|
||||
impl Store for FileStore {
|
||||
|
@ -65,46 +51,21 @@ impl Store for FileStore {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self, reader))]
|
||||
async fn save_async_read<Reader>(
|
||||
&self,
|
||||
mut reader: Reader,
|
||||
_content_type: mime::Mime,
|
||||
) -> Result<Arc<str>, StoreError>
|
||||
where
|
||||
Reader: AsyncRead + Unpin + 'static,
|
||||
{
|
||||
let path = self.next_file().await?;
|
||||
|
||||
if let Err(e) = self.safe_save_reader(&path, &mut reader).await {
|
||||
self.safe_remove_file(&path).await?;
|
||||
return Err(e.into());
|
||||
}
|
||||
|
||||
Ok(self.file_id_from_path(path)?)
|
||||
}
|
||||
|
||||
async fn save_stream<S>(
|
||||
&self,
|
||||
stream: S,
|
||||
content_type: mime::Mime,
|
||||
_content_type: mime::Mime,
|
||||
extension: Option<&str>,
|
||||
) -> Result<Arc<str>, StoreError>
|
||||
where
|
||||
S: Stream<Item = std::io::Result<Bytes>> + Unpin + 'static,
|
||||
S: Stream<Item = std::io::Result<Bytes>>,
|
||||
{
|
||||
self.save_async_read(StreamReader::new(stream), content_type)
|
||||
let path = self.next_file(extension);
|
||||
|
||||
if let Err(e) = self
|
||||
.safe_save_stream(&path, crate::stream::error_injector(stream))
|
||||
.await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self, bytes))]
|
||||
async fn save_bytes(
|
||||
&self,
|
||||
bytes: Bytes,
|
||||
_content_type: mime::Mime,
|
||||
) -> Result<Arc<str>, StoreError> {
|
||||
let path = self.next_file().await?;
|
||||
|
||||
if let Err(e) = self.safe_save_bytes(&path, bytes).await {
|
||||
{
|
||||
self.safe_remove_file(&path).await?;
|
||||
return Err(e.into());
|
||||
}
|
||||
|
@ -137,23 +98,7 @@ impl Store for FileStore {
|
|||
.instrument(file_span)
|
||||
.await?;
|
||||
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self, writer))]
|
||||
async fn read_into<Writer>(
|
||||
&self,
|
||||
identifier: &Arc<str>,
|
||||
writer: &mut Writer,
|
||||
) -> Result<(), std::io::Error>
|
||||
where
|
||||
Writer: AsyncWrite + Unpin,
|
||||
{
|
||||
let path = self.path_from_file_id(identifier);
|
||||
|
||||
File::open(&path).await?.read_to_async_write(writer).await?;
|
||||
|
||||
Ok(())
|
||||
Ok(Box::pin(crate::stream::error_injector(stream)))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
|
@ -179,17 +124,10 @@ impl Store for FileStore {
|
|||
}
|
||||
|
||||
impl FileStore {
|
||||
#[tracing::instrument(skip(repo))]
|
||||
pub(crate) async fn build(root_dir: PathBuf, repo: ArcRepo) -> color_eyre::Result<Self> {
|
||||
let path_gen = init_generator(&repo).await?;
|
||||
|
||||
pub(crate) async fn build(root_dir: PathBuf) -> color_eyre::Result<Self> {
|
||||
tokio::fs::create_dir_all(&root_dir).await?;
|
||||
|
||||
Ok(FileStore {
|
||||
root_dir,
|
||||
path_gen,
|
||||
repo,
|
||||
})
|
||||
Ok(FileStore { root_dir })
|
||||
}
|
||||
|
||||
fn file_id_from_path(&self, path: PathBuf) -> Result<Arc<str>, FileError> {
|
||||
|
@ -204,26 +142,8 @@ impl FileStore {
|
|||
self.root_dir.join(file_id.as_ref())
|
||||
}
|
||||
|
||||
async fn next_directory(&self) -> Result<PathBuf, StoreError> {
|
||||
let path = self.path_gen.next();
|
||||
|
||||
self.repo
|
||||
.set(GENERATOR_KEY, path.to_be_bytes().into())
|
||||
.await?;
|
||||
|
||||
let mut target_path = self.root_dir.clone();
|
||||
for dir in path.to_strings() {
|
||||
target_path.push(dir)
|
||||
}
|
||||
|
||||
Ok(target_path)
|
||||
}
|
||||
|
||||
async fn next_file(&self) -> Result<PathBuf, StoreError> {
|
||||
let target_path = self.next_directory().await?;
|
||||
let filename = uuid::Uuid::new_v4().to_string();
|
||||
|
||||
Ok(target_path.join(filename))
|
||||
fn next_file(&self, extension: Option<&str>) -> PathBuf {
|
||||
crate::file_path::generate_disk(self.root_dir.clone(), extension)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self, path), fields(path = ?path.as_ref()))]
|
||||
|
@ -249,40 +169,10 @@ impl FileStore {
|
|||
}
|
||||
}
|
||||
|
||||
// Try writing to a file
|
||||
async fn safe_save_bytes<P: AsRef<Path>>(
|
||||
&self,
|
||||
path: P,
|
||||
bytes: Bytes,
|
||||
) -> Result<(), FileError> {
|
||||
safe_create_parent(&path).await?;
|
||||
|
||||
// Only write the file if it doesn't already exist
|
||||
if let Err(e) = tokio::fs::metadata(&path).await {
|
||||
if e.kind() != std::io::ErrorKind::NotFound {
|
||||
return Err(e.into());
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Open the file for writing
|
||||
let mut file = File::create(&path).await?;
|
||||
|
||||
// try writing
|
||||
if let Err(e) = file.write_from_bytes(bytes).await {
|
||||
// remove file if writing failed before completion
|
||||
self.safe_remove_file(path).await?;
|
||||
return Err(e.into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn safe_save_reader<P: AsRef<Path>>(
|
||||
async fn safe_save_stream<P: AsRef<Path>>(
|
||||
&self,
|
||||
to: P,
|
||||
input: &mut (impl AsyncRead + Unpin + ?Sized),
|
||||
input: impl Stream<Item = std::io::Result<Bytes>>,
|
||||
) -> Result<(), FileError> {
|
||||
safe_create_parent(&to).await?;
|
||||
|
||||
|
@ -296,7 +186,11 @@ impl FileStore {
|
|||
|
||||
let mut file = File::create(to).await?;
|
||||
|
||||
file.write_from_async_read(input).await?;
|
||||
file.write_from_stream(input)
|
||||
.with_poll_timer("write-from-stream")
|
||||
.await?;
|
||||
|
||||
file.close().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -310,20 +204,9 @@ pub(crate) async fn safe_create_parent<P: AsRef<Path>>(path: P) -> Result<(), Fi
|
|||
Ok(())
|
||||
}
|
||||
|
||||
async fn init_generator(repo: &ArcRepo) -> Result<Generator, StoreError> {
|
||||
if let Some(ivec) = repo.get(GENERATOR_KEY).await? {
|
||||
Ok(Generator::from_existing(
|
||||
storage_path_generator::Path::from_be_bytes(ivec.to_vec()).map_err(FileError::from)?,
|
||||
))
|
||||
} else {
|
||||
Ok(Generator::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for FileStore {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("FileStore")
|
||||
.field("path_gen", &"generator")
|
||||
.field("root_dir", &self.root_dir)
|
||||
.finish()
|
||||
}
|
||||
|
|
|
@ -1,29 +1,26 @@
|
|||
use crate::{
|
||||
bytes_stream::BytesStream, error_code::ErrorCode, future::WithMetrics, repo::ArcRepo,
|
||||
store::Store, stream::LocalBoxStream,
|
||||
bytes_stream::BytesStream, error_code::ErrorCode, future::WithMetrics, store::Store,
|
||||
stream::LocalBoxStream, sync::DropHandle,
|
||||
};
|
||||
use actix_web::{
|
||||
error::BlockingError,
|
||||
http::{
|
||||
header::{ByteRangeSpec, Range, CONTENT_LENGTH},
|
||||
StatusCode,
|
||||
},
|
||||
http::header::{ByteRangeSpec, Range},
|
||||
rt::task::JoinError,
|
||||
web::Bytes,
|
||||
};
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
use futures_core::Stream;
|
||||
use reqwest::{header::RANGE, Body, Response};
|
||||
use reqwest::{
|
||||
header::{CONTENT_LENGTH, RANGE},
|
||||
Body, Response, StatusCode,
|
||||
};
|
||||
use reqwest_middleware::{ClientWithMiddleware, RequestBuilder};
|
||||
use rusty_s3::{
|
||||
actions::{CreateMultipartUpload, S3Action},
|
||||
Bucket, BucketError, Credentials, UrlStyle,
|
||||
};
|
||||
use std::{string::FromUtf8Error, sync::Arc, time::Duration};
|
||||
use storage_path_generator::{Generator, Path};
|
||||
use streem::IntoStreamer;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||
use tokio_util::io::ReaderStream;
|
||||
use tracing::Instrument;
|
||||
use url::Url;
|
||||
|
||||
|
@ -31,16 +28,8 @@ use super::StoreError;
|
|||
|
||||
const CHUNK_SIZE: usize = 8_388_608; // 8 Mebibytes, min is 5 (5_242_880);
|
||||
|
||||
// - Settings Tree
|
||||
// - last-path -> last generated path
|
||||
|
||||
const GENERATOR_KEY: &str = "last-path";
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum ObjectError {
|
||||
#[error("Failed to generate path")]
|
||||
PathGenerator(#[from] storage_path_generator::PathError),
|
||||
|
||||
#[error("Failed to generate request")]
|
||||
S3(#[from] BucketError),
|
||||
|
||||
|
@ -98,7 +87,6 @@ impl std::error::Error for XmlError {
|
|||
impl ObjectError {
|
||||
pub(super) const fn error_code(&self) -> ErrorCode {
|
||||
match self {
|
||||
Self::PathGenerator(_) => ErrorCode::PARSE_PATH_ERROR,
|
||||
Self::S3(_)
|
||||
| Self::RequestMiddleware(_)
|
||||
| Self::Request(_)
|
||||
|
@ -127,8 +115,6 @@ impl From<BlockingError> for ObjectError {
|
|||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ObjectStore {
|
||||
path_gen: Generator,
|
||||
repo: ArcRepo,
|
||||
bucket: Bucket,
|
||||
credentials: Credentials,
|
||||
client: ClientWithMiddleware,
|
||||
|
@ -139,8 +125,6 @@ pub(crate) struct ObjectStore {
|
|||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ObjectStoreConfig {
|
||||
path_gen: Generator,
|
||||
repo: ArcRepo,
|
||||
bucket: Bucket,
|
||||
credentials: Credentials,
|
||||
signature_expiration: u64,
|
||||
|
@ -151,8 +135,6 @@ pub(crate) struct ObjectStoreConfig {
|
|||
impl ObjectStoreConfig {
|
||||
pub(crate) fn build(self, client: ClientWithMiddleware) -> ObjectStore {
|
||||
ObjectStore {
|
||||
path_gen: self.path_gen,
|
||||
repo: self.repo,
|
||||
bucket: self.bucket,
|
||||
credentials: self.credentials,
|
||||
client,
|
||||
|
@ -170,7 +152,7 @@ fn payload_to_io_error(e: reqwest::Error) -> std::io::Error {
|
|||
#[tracing::instrument(level = "debug", skip(stream))]
|
||||
async fn read_chunk<S>(stream: &mut S) -> Result<BytesStream, ObjectError>
|
||||
where
|
||||
S: Stream<Item = std::io::Result<Bytes>> + Unpin + 'static,
|
||||
S: Stream<Item = std::io::Result<Bytes>> + Unpin,
|
||||
{
|
||||
let mut buf = BytesStream::new();
|
||||
|
||||
|
@ -186,6 +168,12 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
tracing::debug!(
|
||||
"BytesStream with {} chunks, avg length {}",
|
||||
buf.chunks_len(),
|
||||
buf.len() / buf.chunks_len()
|
||||
);
|
||||
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
|
@ -217,51 +205,222 @@ impl Store for ObjectStore {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
async fn save_async_read<Reader>(
|
||||
&self,
|
||||
reader: Reader,
|
||||
content_type: mime::Mime,
|
||||
) -> Result<Arc<str>, StoreError>
|
||||
where
|
||||
Reader: AsyncRead + Unpin + 'static,
|
||||
{
|
||||
self.save_stream(ReaderStream::new(reader), content_type)
|
||||
.await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn save_stream<S>(
|
||||
&self,
|
||||
mut stream: S,
|
||||
stream: S,
|
||||
content_type: mime::Mime,
|
||||
extension: Option<&str>,
|
||||
) -> Result<Arc<str>, StoreError>
|
||||
where
|
||||
S: Stream<Item = std::io::Result<Bytes>> + Unpin + 'static,
|
||||
S: Stream<Item = std::io::Result<Bytes>>,
|
||||
{
|
||||
match self
|
||||
.start_upload(
|
||||
crate::stream::error_injector(stream),
|
||||
content_type.clone(),
|
||||
extension,
|
||||
)
|
||||
.await?
|
||||
{
|
||||
UploadState::Single(first_chunk) => {
|
||||
let (req, object_id) = self
|
||||
.put_object_request(first_chunk.len(), content_type, extension)
|
||||
.await?;
|
||||
|
||||
let response = req
|
||||
.body(Body::wrap_stream(first_chunk.into_io_stream()))
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_PUT_OBJECT_REQUEST)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(status_error(response, None).await);
|
||||
}
|
||||
|
||||
return Ok(object_id);
|
||||
}
|
||||
UploadState::Multi(object_id, upload_id, futures) => {
|
||||
// hack-ish: use async block as Result boundary
|
||||
let res = async {
|
||||
let mut etags = Vec::new();
|
||||
|
||||
for future in futures {
|
||||
etags.push(future.await.map_err(ObjectError::from)??);
|
||||
}
|
||||
|
||||
let response = self
|
||||
.send_complete_multipart_request(
|
||||
&object_id,
|
||||
&upload_id,
|
||||
etags.iter().map(|s| s.as_ref()),
|
||||
)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(status_error(response, None).await);
|
||||
}
|
||||
|
||||
Ok(()) as Result<(), StoreError>
|
||||
}
|
||||
.await;
|
||||
|
||||
if let Err(e) = res {
|
||||
self.create_abort_multipart_request(&object_id, &upload_id)
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_ABORT_MULTIPART_REQUEST)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
Ok(object_id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn public_url(&self, identifier: &Arc<str>) -> Option<url::Url> {
|
||||
self.public_endpoint.clone().and_then(|mut endpoint| {
|
||||
endpoint
|
||||
.path_segments_mut()
|
||||
.ok()?
|
||||
.pop_if_empty()
|
||||
.extend(identifier.as_ref().split('/'));
|
||||
Some(endpoint)
|
||||
})
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn to_stream(
|
||||
&self,
|
||||
identifier: &Arc<str>,
|
||||
from_start: Option<u64>,
|
||||
len: Option<u64>,
|
||||
) -> Result<LocalBoxStream<'static, std::io::Result<Bytes>>, StoreError> {
|
||||
let response = self
|
||||
.get_object_request(identifier, from_start, len)
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_GET_OBJECT_REQUEST)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(status_error(response, Some(identifier.clone())).await);
|
||||
}
|
||||
|
||||
Ok(Box::pin(crate::stream::error_injector(
|
||||
crate::stream::metrics(
|
||||
crate::init_metrics::OBJECT_STORAGE_GET_OBJECT_REQUEST_STREAM,
|
||||
crate::stream::map_err(response.bytes_stream(), payload_to_io_error),
|
||||
),
|
||||
)))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn len(&self, identifier: &Arc<str>) -> Result<u64, StoreError> {
|
||||
let response = self
|
||||
.head_object_request(identifier)
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_HEAD_OBJECT_REQUEST)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(status_error(response, Some(identifier.clone())).await);
|
||||
}
|
||||
|
||||
let length = response
|
||||
.headers()
|
||||
.get(CONTENT_LENGTH)
|
||||
.ok_or(ObjectError::Length)?
|
||||
.to_str()
|
||||
.map_err(|_| ObjectError::Length)?
|
||||
.parse::<u64>()
|
||||
.map_err(|_| ObjectError::Length)?;
|
||||
|
||||
Ok(length)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn remove(&self, identifier: &Arc<str>) -> Result<(), StoreError> {
|
||||
let response = self
|
||||
.delete_object_request(identifier)
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_DELETE_OBJECT_REQUEST)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(status_error(response, Some(identifier.clone())).await);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
enum UploadState {
|
||||
Single(BytesStream),
|
||||
Multi(
|
||||
Arc<str>,
|
||||
String,
|
||||
Vec<DropHandle<Result<String, StoreError>>>,
|
||||
),
|
||||
}
|
||||
|
||||
impl ObjectStore {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(access_key, secret_key, session_token))]
|
||||
pub(crate) async fn build(
|
||||
endpoint: Url,
|
||||
bucket_name: String,
|
||||
url_style: UrlStyle,
|
||||
region: String,
|
||||
access_key: String,
|
||||
secret_key: String,
|
||||
session_token: Option<String>,
|
||||
signature_expiration: u64,
|
||||
client_timeout: u64,
|
||||
public_endpoint: Option<Url>,
|
||||
) -> Result<ObjectStoreConfig, StoreError> {
|
||||
Ok(ObjectStoreConfig {
|
||||
bucket: Bucket::new(endpoint, url_style, bucket_name, region)
|
||||
.map_err(ObjectError::from)?,
|
||||
credentials: if let Some(token) = session_token {
|
||||
Credentials::new_with_token(access_key, secret_key, token)
|
||||
} else {
|
||||
Credentials::new(access_key, secret_key)
|
||||
},
|
||||
signature_expiration,
|
||||
client_timeout,
|
||||
public_endpoint,
|
||||
})
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn start_upload<S>(
|
||||
&self,
|
||||
stream: S,
|
||||
content_type: mime::Mime,
|
||||
extension: Option<&str>,
|
||||
) -> Result<UploadState, StoreError>
|
||||
where
|
||||
S: Stream<Item = std::io::Result<Bytes>>,
|
||||
{
|
||||
let mut stream = std::pin::pin!(stream);
|
||||
|
||||
let first_chunk = read_chunk(&mut stream).await?;
|
||||
|
||||
if first_chunk.len() < CHUNK_SIZE {
|
||||
drop(stream);
|
||||
let (req, object_id) = self
|
||||
.put_object_request(first_chunk.len(), content_type)
|
||||
.await?;
|
||||
let response = req
|
||||
.body(Body::wrap_stream(first_chunk))
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_PUT_OBJECT_REQUEST)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(status_error(response, None).await);
|
||||
}
|
||||
|
||||
return Ok(object_id);
|
||||
return Ok(UploadState::Single(first_chunk));
|
||||
}
|
||||
|
||||
let mut first_chunk = Some(first_chunk);
|
||||
|
||||
let (req, object_id) = self.create_multipart_request(content_type).await?;
|
||||
let (req, object_id) = self
|
||||
.create_multipart_request(content_type, extension)
|
||||
.await?;
|
||||
let response = req
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_CREATE_MULTIPART_REQUEST)
|
||||
|
@ -312,7 +471,7 @@ impl Store for ObjectStore {
|
|||
&upload_id2,
|
||||
)
|
||||
.await?
|
||||
.body(Body::wrap_stream(buf))
|
||||
.body(Body::wrap_stream(buf.into_io_stream()))
|
||||
.send()
|
||||
.with_metrics(
|
||||
crate::init_metrics::OBJECT_STORAGE_CREATE_UPLOAD_PART_REQUEST,
|
||||
|
@ -343,215 +502,25 @@ impl Store for ObjectStore {
|
|||
futures.push(handle);
|
||||
}
|
||||
|
||||
// early-drop stream to allow the next Part to be polled concurrently
|
||||
drop(stream);
|
||||
|
||||
let mut etags = Vec::new();
|
||||
|
||||
for future in futures {
|
||||
etags.push(future.await.map_err(ObjectError::from)??);
|
||||
}
|
||||
|
||||
let response = self
|
||||
.send_complete_multipart_request(
|
||||
&object_id,
|
||||
upload_id,
|
||||
etags.iter().map(|s| s.as_ref()),
|
||||
)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(status_error(response, None).await);
|
||||
}
|
||||
|
||||
Ok(()) as Result<(), StoreError>
|
||||
Ok(futures)
|
||||
}
|
||||
.await;
|
||||
|
||||
if let Err(e) = res {
|
||||
self.create_abort_multipart_request(&object_id, upload_id)
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_ABORT_MULTIPART_REQUEST)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
return Err(e);
|
||||
match res {
|
||||
Ok(futures) => Ok(UploadState::Multi(
|
||||
object_id,
|
||||
upload_id.to_string(),
|
||||
futures,
|
||||
)),
|
||||
Err(e) => {
|
||||
self.create_abort_multipart_request(&object_id, upload_id)
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_ABORT_MULTIPART_REQUEST)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
|
||||
Ok(object_id)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn save_bytes(
|
||||
&self,
|
||||
bytes: Bytes,
|
||||
content_type: mime::Mime,
|
||||
) -> Result<Arc<str>, StoreError> {
|
||||
let (req, object_id) = self.put_object_request(bytes.len(), content_type).await?;
|
||||
|
||||
let response = req
|
||||
.body(bytes)
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_PUT_OBJECT_REQUEST)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(status_error(response, None).await);
|
||||
}
|
||||
|
||||
Ok(object_id)
|
||||
}
|
||||
|
||||
fn public_url(&self, identifier: &Arc<str>) -> Option<url::Url> {
|
||||
self.public_endpoint.clone().and_then(|mut endpoint| {
|
||||
endpoint
|
||||
.path_segments_mut()
|
||||
.ok()?
|
||||
.pop_if_empty()
|
||||
.extend(identifier.as_ref().split('/'));
|
||||
Some(endpoint)
|
||||
})
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn to_stream(
|
||||
&self,
|
||||
identifier: &Arc<str>,
|
||||
from_start: Option<u64>,
|
||||
len: Option<u64>,
|
||||
) -> Result<LocalBoxStream<'static, std::io::Result<Bytes>>, StoreError> {
|
||||
let response = self
|
||||
.get_object_request(identifier, from_start, len)
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_GET_OBJECT_REQUEST)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(status_error(response, Some(identifier.clone())).await);
|
||||
}
|
||||
|
||||
Ok(Box::pin(crate::stream::metrics(
|
||||
crate::init_metrics::OBJECT_STORAGE_GET_OBJECT_REQUEST_STREAM,
|
||||
crate::stream::map_err(response.bytes_stream(), payload_to_io_error),
|
||||
)))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self, writer))]
|
||||
async fn read_into<Writer>(
|
||||
&self,
|
||||
identifier: &Arc<str>,
|
||||
writer: &mut Writer,
|
||||
) -> Result<(), std::io::Error>
|
||||
where
|
||||
Writer: AsyncWrite + Unpin,
|
||||
{
|
||||
let response = self
|
||||
.get_object_request(identifier, None, None)
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_GET_OBJECT_REQUEST)
|
||||
.await
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, ObjectError::from(e)))?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
status_error(response, Some(identifier.clone())).await,
|
||||
));
|
||||
}
|
||||
|
||||
let stream = std::pin::pin!(crate::stream::metrics(
|
||||
crate::init_metrics::OBJECT_STORAGE_GET_OBJECT_REQUEST_STREAM,
|
||||
response.bytes_stream()
|
||||
));
|
||||
let mut stream = stream.into_streamer();
|
||||
|
||||
while let Some(res) = stream.next().await {
|
||||
tracing::trace!("read_into: looping");
|
||||
|
||||
let mut bytes = res.map_err(payload_to_io_error)?;
|
||||
writer.write_all_buf(&mut bytes).await?;
|
||||
}
|
||||
writer.flush().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn len(&self, identifier: &Arc<str>) -> Result<u64, StoreError> {
|
||||
let response = self
|
||||
.head_object_request(identifier)
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_HEAD_OBJECT_REQUEST)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(status_error(response, Some(identifier.clone())).await);
|
||||
}
|
||||
|
||||
let length = response
|
||||
.headers()
|
||||
.get(CONTENT_LENGTH)
|
||||
.ok_or(ObjectError::Length)?
|
||||
.to_str()
|
||||
.map_err(|_| ObjectError::Length)?
|
||||
.parse::<u64>()
|
||||
.map_err(|_| ObjectError::Length)?;
|
||||
|
||||
Ok(length)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn remove(&self, identifier: &Arc<str>) -> Result<(), StoreError> {
|
||||
let response = self
|
||||
.delete_object_request(identifier)
|
||||
.send()
|
||||
.with_metrics(crate::init_metrics::OBJECT_STORAGE_DELETE_OBJECT_REQUEST)
|
||||
.await
|
||||
.map_err(ObjectError::from)?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(status_error(response, Some(identifier.clone())).await);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ObjectStore {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(access_key, secret_key, session_token, repo))]
|
||||
pub(crate) async fn build(
|
||||
endpoint: Url,
|
||||
bucket_name: String,
|
||||
url_style: UrlStyle,
|
||||
region: String,
|
||||
access_key: String,
|
||||
secret_key: String,
|
||||
session_token: Option<String>,
|
||||
signature_expiration: u64,
|
||||
client_timeout: u64,
|
||||
public_endpoint: Option<Url>,
|
||||
repo: ArcRepo,
|
||||
) -> Result<ObjectStoreConfig, StoreError> {
|
||||
let path_gen = init_generator(&repo).await?;
|
||||
|
||||
Ok(ObjectStoreConfig {
|
||||
path_gen,
|
||||
repo,
|
||||
bucket: Bucket::new(endpoint, url_style, bucket_name, region)
|
||||
.map_err(ObjectError::from)?,
|
||||
credentials: if let Some(token) = session_token {
|
||||
Credentials::new_with_token(access_key, secret_key, token)
|
||||
} else {
|
||||
Credentials::new(access_key, secret_key)
|
||||
},
|
||||
signature_expiration,
|
||||
client_timeout,
|
||||
public_endpoint,
|
||||
})
|
||||
}
|
||||
|
||||
async fn head_bucket_request(&self) -> Result<RequestBuilder, StoreError> {
|
||||
|
@ -564,8 +533,9 @@ impl ObjectStore {
|
|||
&self,
|
||||
length: usize,
|
||||
content_type: mime::Mime,
|
||||
extension: Option<&str>,
|
||||
) -> Result<(RequestBuilder, Arc<str>), StoreError> {
|
||||
let path = self.next_file().await?;
|
||||
let path = self.next_file(extension);
|
||||
|
||||
let mut action = self.bucket.put_object(Some(&self.credentials), &path);
|
||||
|
||||
|
@ -582,8 +552,9 @@ impl ObjectStore {
|
|||
async fn create_multipart_request(
|
||||
&self,
|
||||
content_type: mime::Mime,
|
||||
extension: Option<&str>,
|
||||
) -> Result<(RequestBuilder, Arc<str>), StoreError> {
|
||||
let path = self.next_file().await?;
|
||||
let path = self.next_file(extension);
|
||||
|
||||
let mut action = self
|
||||
.bucket
|
||||
|
@ -753,39 +724,14 @@ impl ObjectStore {
|
|||
self.build_request(action)
|
||||
}
|
||||
|
||||
async fn next_directory(&self) -> Result<Path, StoreError> {
|
||||
let path = self.path_gen.next();
|
||||
|
||||
self.repo
|
||||
.set(GENERATOR_KEY, path.to_be_bytes().into())
|
||||
.await?;
|
||||
|
||||
Ok(path)
|
||||
}
|
||||
|
||||
async fn next_file(&self) -> Result<String, StoreError> {
|
||||
let path = self.next_directory().await?.to_strings().join("/");
|
||||
let filename = uuid::Uuid::new_v4().to_string();
|
||||
|
||||
Ok(format!("{path}/{filename}"))
|
||||
}
|
||||
}
|
||||
|
||||
async fn init_generator(repo: &ArcRepo) -> Result<Generator, StoreError> {
|
||||
if let Some(ivec) = repo.get(GENERATOR_KEY).await? {
|
||||
Ok(Generator::from_existing(
|
||||
storage_path_generator::Path::from_be_bytes(ivec.to_vec())
|
||||
.map_err(ObjectError::from)?,
|
||||
))
|
||||
} else {
|
||||
Ok(Generator::new())
|
||||
fn next_file(&self, extension: Option<&str>) -> String {
|
||||
crate::file_path::generate_object(extension)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for ObjectStore {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("ObjectStore")
|
||||
.field("path_gen", &"generator")
|
||||
.field("bucket", &self.bucket.name())
|
||||
.field("region", &self.bucket.region())
|
||||
.finish()
|
||||
|
|
|
@ -5,6 +5,38 @@ use streem::IntoStreamer;
|
|||
|
||||
use crate::future::WithMetrics;
|
||||
|
||||
#[cfg(not(feature = "random-errors"))]
|
||||
pub(crate) fn error_injector(
|
||||
stream: impl Stream<Item = std::io::Result<Bytes>>,
|
||||
) -> impl Stream<Item = std::io::Result<Bytes>> {
|
||||
stream
|
||||
}
|
||||
|
||||
#[cfg(feature = "random-errors")]
|
||||
pub(crate) fn error_injector(
|
||||
stream: impl Stream<Item = std::io::Result<Bytes>>,
|
||||
) -> impl Stream<Item = std::io::Result<Bytes>> {
|
||||
streem::try_from_fn(|yielder| async move {
|
||||
let stream = std::pin::pin!(stream);
|
||||
let mut streamer = stream.into_streamer();
|
||||
|
||||
while let Some(item) = streamer.try_next().await? {
|
||||
yielder.yield_ok(item).await;
|
||||
|
||||
use nanorand::Rng;
|
||||
|
||||
if nanorand::tls_rng().generate_range(0..1000) < 1 {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
crate::error::UploadError::RandomError,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn take<S>(stream: S, amount: usize) -> impl Stream<Item = S::Item>
|
||||
where
|
||||
S: Stream,
|
||||
|
@ -59,7 +91,7 @@ where
|
|||
S: Stream + 'static,
|
||||
S::Item: Send + Sync,
|
||||
{
|
||||
let (tx, rx) = crate::sync::channel(1);
|
||||
let (tx, mut rx) = crate::sync::channel(1);
|
||||
|
||||
let handle = crate::sync::abort_on_drop(crate::sync::spawn("send-stream", async move {
|
||||
let stream = std::pin::pin!(stream);
|
||||
|
@ -68,16 +100,14 @@ where
|
|||
while let Some(res) = streamer.next().await {
|
||||
tracing::trace!("make send tx: looping");
|
||||
|
||||
if tx.send_async(res).await.is_err() {
|
||||
if tx.send(res).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
streem::from_fn(|yiedler| async move {
|
||||
let mut stream = rx.into_stream().into_streamer();
|
||||
|
||||
while let Some(res) = stream.next().await {
|
||||
while let Some(res) = rx.recv().await {
|
||||
tracing::trace!("make send rx: looping");
|
||||
|
||||
yiedler.yield_(res).await;
|
||||
|
@ -92,35 +122,23 @@ where
|
|||
I: IntoIterator + Send + 'static,
|
||||
I::Item: Send + Sync,
|
||||
{
|
||||
let (tx, rx) = crate::sync::channel(buffer);
|
||||
let (tx, mut rx) = crate::sync::channel(buffer);
|
||||
|
||||
let handle = crate::sync::spawn_blocking("blocking-iterator", move || {
|
||||
for value in iterator {
|
||||
if tx.send(value).is_err() {
|
||||
if tx.blocking_send(value).is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
streem::from_fn(|yielder| async move {
|
||||
let mut stream = rx.into_stream().into_streamer();
|
||||
|
||||
let yield_count = buffer.max(8);
|
||||
let mut count = 0;
|
||||
|
||||
while let Some(res) = stream.next().await {
|
||||
while let Some(res) = rx.recv().await {
|
||||
tracing::trace!("from_iterator: looping");
|
||||
|
||||
count += 1;
|
||||
count %= yield_count;
|
||||
|
||||
yielder.yield_(res).await;
|
||||
|
||||
// every 8 (or buffer-size) items, yield to executor before looping
|
||||
// improves cooperation
|
||||
if count == 0 {
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
crate::sync::cooperate().await;
|
||||
}
|
||||
|
||||
let _ = handle.await;
|
||||
|
@ -183,13 +201,6 @@ where
|
|||
streem::from_fn(|_| std::future::ready(()))
|
||||
}
|
||||
|
||||
pub(crate) fn once<T>(value: T) -> impl Stream<Item = T>
|
||||
where
|
||||
T: 'static,
|
||||
{
|
||||
streem::from_fn(|yielder| yielder.yield_(value))
|
||||
}
|
||||
|
||||
pub(crate) fn timeout<S>(
|
||||
duration: Duration,
|
||||
stream: S,
|
||||
|
|
45
src/sync.rs
45
src/sync.rs
|
@ -5,6 +5,8 @@ use tokio::{
|
|||
task::JoinHandle,
|
||||
};
|
||||
|
||||
use crate::future::WithPollTimer;
|
||||
|
||||
pub(crate) struct DropHandle<T> {
|
||||
handle: JoinHandle<T>,
|
||||
}
|
||||
|
@ -37,11 +39,13 @@ impl<T> std::future::Future for DropHandle<T> {
|
|||
}
|
||||
|
||||
#[track_caller]
|
||||
pub(crate) fn channel<T>(bound: usize) -> (flume::Sender<T>, flume::Receiver<T>) {
|
||||
pub(crate) fn channel<T>(
|
||||
bound: usize,
|
||||
) -> (tokio::sync::mpsc::Sender<T>, tokio::sync::mpsc::Receiver<T>) {
|
||||
let span = tracing::trace_span!(parent: None, "make channel");
|
||||
let guard = span.enter();
|
||||
|
||||
let channel = flume::bounded(bound);
|
||||
let channel = tokio::sync::mpsc::channel(bound);
|
||||
|
||||
drop(guard);
|
||||
channel
|
||||
|
@ -74,14 +78,22 @@ pub(crate) fn bare_semaphore(permits: usize) -> Semaphore {
|
|||
semaphore
|
||||
}
|
||||
|
||||
// best effort cooperation mechanism
|
||||
pub(crate) async fn cooperate() {
|
||||
#[cfg(tokio_unstable)]
|
||||
tokio::task::consume_budget().await;
|
||||
|
||||
#[cfg(not(tokio_unstable))]
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
pub(crate) fn spawn<F>(name: &str, future: F) -> tokio::task::JoinHandle<F::Output>
|
||||
pub(crate) fn spawn<F>(name: &'static str, future: F) -> tokio::task::JoinHandle<F::Output>
|
||||
where
|
||||
F: std::future::Future + 'static,
|
||||
F::Output: 'static,
|
||||
{
|
||||
#[cfg(not(tokio_unstable))]
|
||||
let _ = name;
|
||||
let future = future.with_poll_timer(name);
|
||||
|
||||
let span = tracing::trace_span!(parent: None, "spawn task");
|
||||
let guard = span.enter();
|
||||
|
@ -98,6 +110,29 @@ where
|
|||
handle
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
pub(crate) fn spawn_sendable<F>(name: &'static str, future: F) -> tokio::task::JoinHandle<F::Output>
|
||||
where
|
||||
F: std::future::Future + Send + 'static,
|
||||
F::Output: Send + 'static,
|
||||
{
|
||||
let future = future.with_poll_timer(name);
|
||||
|
||||
let span = tracing::trace_span!(parent: None, "spawn task");
|
||||
let guard = span.enter();
|
||||
|
||||
#[cfg(tokio_unstable)]
|
||||
let handle = tokio::task::Builder::new()
|
||||
.name(name)
|
||||
.spawn(future)
|
||||
.expect("Failed to spawn");
|
||||
#[cfg(not(tokio_unstable))]
|
||||
let handle = tokio::task::spawn(future);
|
||||
|
||||
drop(guard);
|
||||
handle
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
pub(crate) fn spawn_blocking<F, Out>(name: &str, function: F) -> tokio::task::JoinHandle<Out>
|
||||
where
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use rustls::{crypto::ring::sign::any_supported_type, sign::CertifiedKey, Error};
|
||||
use rustls::{crypto::aws_lc_rs::sign::any_supported_type, sign::CertifiedKey, Error};
|
||||
|
||||
pub(super) struct Tls {
|
||||
certificate: PathBuf,
|
||||
|
|
|
@ -16,9 +16,17 @@ pub(crate) struct TmpDir {
|
|||
}
|
||||
|
||||
impl TmpDir {
|
||||
pub(crate) async fn init<P: AsRef<Path>>(path: P) -> std::io::Result<Arc<Self>> {
|
||||
let path = path.as_ref().join(Uuid::now_v7().to_string());
|
||||
tokio::fs::create_dir(&path).await?;
|
||||
pub(crate) async fn init<P: AsRef<Path>>(path: P, cleanup: bool) -> std::io::Result<Arc<Self>> {
|
||||
let base_path = path.as_ref().join("pict-rs");
|
||||
|
||||
if cleanup && tokio::fs::metadata(&base_path).await.is_ok() {
|
||||
tokio::fs::remove_dir_all(&base_path).await?;
|
||||
}
|
||||
|
||||
let path = base_path.join(Uuid::now_v7().to_string());
|
||||
|
||||
tokio::fs::create_dir_all(&path).await?;
|
||||
|
||||
Ok(Arc::new(TmpDir { path: Some(path) }))
|
||||
}
|
||||
|
||||
|
@ -47,8 +55,13 @@ impl TmpDir {
|
|||
}
|
||||
|
||||
pub(crate) async fn cleanup(self: Arc<Self>) -> std::io::Result<()> {
|
||||
if let Some(path) = Arc::into_inner(self).and_then(|mut this| this.path.take()) {
|
||||
tokio::fs::remove_dir_all(path).await?;
|
||||
if let Some(mut path) = Arc::into_inner(self).and_then(|mut this| this.path.take()) {
|
||||
tokio::fs::remove_dir_all(&path).await?;
|
||||
|
||||
if path.pop() {
|
||||
// attempt to remove parent directory if it is empty
|
||||
let _ = tokio::fs::remove_dir(path).await;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -57,9 +70,13 @@ impl TmpDir {
|
|||
|
||||
impl Drop for TmpDir {
|
||||
fn drop(&mut self) {
|
||||
if let Some(path) = self.path.take() {
|
||||
if let Some(mut path) = self.path.take() {
|
||||
tracing::warn!("TmpDir - Blocking remove of {path:?}");
|
||||
std::fs::remove_dir_all(path).expect("Removed directory");
|
||||
std::fs::remove_dir_all(&path).expect("Removed directory");
|
||||
if path.pop() {
|
||||
// attempt to remove parent directory if it is empty
|
||||
let _ = std::fs::remove_dir(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
use actix_web::web::Bytes;
|
||||
|
||||
use crate::{
|
||||
exiftool::ExifError,
|
||||
process::{Process, ProcessRead},
|
||||
};
|
||||
|
||||
#[tracing::instrument(level = "trace", skip_all)]
|
||||
pub(crate) fn clear_metadata_bytes_read(
|
||||
input: Bytes,
|
||||
timeout: u64,
|
||||
) -> Result<ProcessRead, ExifError> {
|
||||
Ok(Process::run("exiftool", &["-all=", "-", "-out", "-"], &[], timeout)?.bytes_read(input))
|
||||
}
|
Loading…
Reference in a new issue