mirror of
https://git.asonix.dog/asonix/relay.git
synced 2025-04-15 04:34:14 +00:00
Compare commits
339 commits
Author | SHA1 | Date | |
---|---|---|---|
|
6ff7b59778 | ||
|
d9da352558 | ||
|
aea64c726a | ||
|
e243bd4600 | ||
|
a452fb91ba | ||
|
35acc916f2 | ||
|
752067ffb7 | ||
|
b308e080af | ||
|
6ab37dc06f | ||
|
a23b30cc91 | ||
|
1b58a50d44 | ||
|
308a945283 | ||
|
86cab5d2d9 | ||
|
a70e75665b | ||
|
f1792c8eb3 | ||
|
d918ef1495 | ||
|
2870789e1f | ||
|
cda92e7523 | ||
|
43b03a176c | ||
|
a465d1ae5b | ||
|
4fa7674a35 | ||
|
8c14d613f7 | ||
|
aff2431681 | ||
|
5aa97212b3 | ||
|
97567cf598 | ||
|
4c663f399e | ||
|
8a3256f52a | ||
|
13a2653fe8 | ||
|
8dd9a86d22 | ||
|
5c0c0591dd | ||
|
04ca4e5401 | ||
|
1de1d76506 | ||
|
dd9225bb89 | ||
|
b577730836 | ||
|
21883c168b | ||
|
76a0c79369 | ||
|
6444782db9 | ||
|
14aea3256d | ||
|
f4f2aa2025 | ||
|
615271fe80 | ||
|
4aed601664 | ||
|
bf21f05aca | ||
|
e69f6c6edb | ||
|
1e05eb4fe4 | ||
|
7f09ac3edd | ||
|
4788ad332a | ||
|
1fd82915d3 | ||
|
0472082a97 | ||
|
c8250acce7 | ||
|
b074759eb4 | ||
|
ed399f1531 | ||
|
7e39acdcb0 | ||
|
894d096622 | ||
|
05e31254ba | ||
|
086ca9fbf2 | ||
|
603fcc6e57 | ||
|
6b8f15ee08 | ||
|
53939f8ae8 | ||
|
b53b34c515 | ||
|
6dcdf2fc87 | ||
|
83e5619eb4 | ||
|
9090bb5c62 | ||
|
d862bf8106 | ||
|
417553e643 | ||
|
a2456c3d5f | ||
|
2b3cb8db92 | ||
|
18f1096221 | ||
|
c640567206 | ||
|
36aa9120ea | ||
|
e377f3988b | ||
|
8c811710ac | ||
|
e4f665d75f | ||
|
4383357abe | ||
|
f70af22c6a | ||
|
8bce3d172f | ||
|
8540e93469 | ||
|
708e7da301 | ||
|
a0f9827e18 | ||
|
9ebed87cde | ||
|
ae3d19a774 | ||
|
2a5e769afb | ||
|
f4839d688e | ||
|
206db2079f | ||
|
6714fe48ed | ||
|
804d22ee81 | ||
|
5a6fbbcb77 | ||
|
ea926f73c4 | ||
|
53b14c3329 | ||
|
9b1fad0e2e | ||
|
a8ba53fe11 | ||
|
927fb91a5e | ||
|
4d4093c15a | ||
|
75df271b58 | ||
|
73b429ab51 | ||
|
2f57c855a4 | ||
|
cdbde9519e | ||
|
2cbe4864c3 | ||
|
731a831070 | ||
|
795d3238ad | ||
|
60abec2b96 | ||
|
e63e1f975e | ||
|
5430da58aa | ||
|
927f15c4ca | ||
|
ef57576c57 | ||
|
7438b0c5d0 | ||
|
f06316c6b2 | ||
|
f86bbc95ae | ||
|
a500824a7d | ||
|
433c981a21 | ||
|
f3ff8ae5f7 | ||
|
f24685e700 | ||
|
5de244b848 | ||
|
769f7451f9 | ||
|
fff9bf112d | ||
|
05c266c23c | ||
|
2a7fed743f | ||
|
240eee730c | ||
|
8071c6ce3f | ||
|
78dcce5a08 | ||
|
11d81683e3 | ||
|
5d526c60fe | ||
|
73c7150f97 | ||
|
7cfebd927e | ||
|
d97cc4e5a4 | ||
|
8ff4961ded | ||
|
970672a392 | ||
|
dfbd5c9035 | ||
|
d365e34f47 | ||
|
de97adc2d6 | ||
|
d1c6f6ff5d | ||
|
582f311a20 | ||
|
09436746c8 | ||
|
a65ff19f6a | ||
|
bcdef5caa1 | ||
|
4651fcc9d2 | ||
|
fb6d8af1ca | ||
|
9779518dc1 | ||
|
7a00229508 | ||
|
346664396c | ||
|
74f35faa22 | ||
|
e005adfcf8 | ||
|
d40db33eb5 | ||
|
246e79b261 | ||
|
8d565a1fbe | ||
|
18ff2864a0 | ||
|
4b71e56f31 | ||
|
9b4f6b47a6 | ||
|
5fa1d4983a | ||
|
d69a80ebe8 | ||
|
a9a47e8ee2 | ||
|
ab2dbfb439 | ||
|
73bf4d1597 | ||
|
2cb5ad9917 | ||
|
afd4105d0f | ||
|
d644e83733 | ||
|
ae91aa8fa7 | ||
|
73c016d418 | ||
|
a1ea5d676c | ||
|
667d586160 | ||
|
4a7775b56d | ||
|
9b809913ad | ||
|
a952b528df | ||
|
b5138fc16d | ||
|
0e9b88a7ae | ||
|
f9cad61049 | ||
|
96547230bc | ||
|
c11ff17192 | ||
|
e93dd2da56 | ||
|
34dc1a2281 | ||
|
9cdebeae4c | ||
|
662620be46 | ||
|
5488acb59d | ||
|
4998cd3a56 | ||
|
f0a8862922 | ||
|
b6a10c4e65 | ||
|
3a14242a91 | ||
|
f5fed2fce1 | ||
|
5faeaf6371 | ||
|
f291b24269 | ||
|
5f5c34640f | ||
|
d4e51a1afa | ||
|
fafba69258 | ||
|
07b961c28f | ||
|
30dd16a889 | ||
|
88b0383084 | ||
|
b49eeaf822 | ||
|
943f679a69 | ||
|
37b2afe344 | ||
|
4e5fabce5f | ||
|
689d85befb | ||
|
40eb12258d | ||
|
efcec29d7b | ||
|
62a886d0bf | ||
|
163e480076 | ||
|
675fddcfeb | ||
|
359ec68aa0 | ||
|
565a94d756 | ||
|
815c18b899 | ||
|
fbcbf141dd | ||
|
cf7a25f935 | ||
|
b56bddccb4 | ||
|
886c7d0ac6 | ||
|
178d23bcbd | ||
|
549eb47202 | ||
|
5968cb8953 | ||
|
c5e254dad6 | ||
|
430ebec810 | ||
|
c15f591bc8 | ||
|
5d69eaf2ab | ||
|
43b70f88a7 | ||
|
a0dc2363f6 | ||
|
9d68ccd834 | ||
|
a8b8325557 | ||
|
6082def854 | ||
|
31021e80e4 | ||
|
f4db90b699 | ||
|
d834537300 | ||
|
c18760d57f | ||
|
8575439d88 | ||
|
c543e8b4eb | ||
|
a0fbf9d236 | ||
|
b9dba28207 | ||
|
b5dc3e7c08 | ||
|
4d8e1a7241 | ||
|
89a9e20d4a | ||
|
39b8b1d3fa | ||
|
96eb028145 | ||
|
aad0cc990e | ||
|
aa8ddfa637 | ||
|
c6adc9f77b | ||
|
68a0b7c574 | ||
|
d7adaeb38d | ||
|
149ec1d14f | ||
|
d7a720b6c4 | ||
|
e2f3727d07 | ||
|
e9f312bed5 | ||
|
1a638f7f8d | ||
|
ed0ea6521e | ||
|
e987149757 | ||
|
01e283a065 | ||
|
ab7d940de9 | ||
|
5cd0b21ae3 | ||
|
b53ec4d980 | ||
|
c3b50bc94e | ||
|
88329a79e2 | ||
|
a77a4cde22 | ||
|
5043892981 | ||
|
8afc16786d | ||
|
cdaf3b2fa3 | ||
|
37e3b17966 | ||
|
9133dd7688 | ||
|
a0195d94aa | ||
|
d8f3f1d0e9 | ||
|
205e794b9e | ||
|
73cc4862d9 | ||
|
981a6779bf | ||
|
5d33dba103 | ||
|
a3eb785b9e | ||
|
efc918a826 | ||
|
13cd308358 | ||
|
df70a28ca3 | ||
|
162dd1cb0e | ||
|
df3063e75f | ||
|
d44db2eab5 | ||
|
7ec56d2af2 | ||
|
9f6e0bc722 | ||
|
3500f85f44 | ||
|
a154fbb504 | ||
|
9ede941ff7 | ||
|
4267f52a7e | ||
|
9272ba0d4c | ||
|
8d0d39b1fc | ||
|
787c8312bc | ||
|
95f98ec052 | ||
|
8fa24aa243 | ||
|
cecc35ae85 | ||
|
4e1a782bea | ||
|
9a9d09c0c4 | ||
|
99c3ec0b75 | ||
|
f892a50f2c | ||
|
c322798ba3 | ||
|
c8b81bb9aa | ||
|
902ce5d3c2 | ||
|
261805004b | ||
|
10777c32ab | ||
|
8f7d8b1f00 | ||
|
9333e9f8fd | ||
|
2023d7ba54 | ||
|
a0dc917dfe | ||
|
b901322706 | ||
|
350b1c6bd3 | ||
|
07557d31d2 | ||
|
e9303ad9f6 | ||
|
094331a447 | ||
|
959201fa97 | ||
|
4df14c7602 | ||
|
9ac7854081 | ||
|
bfc743354f | ||
|
547ef6c3e9 | ||
|
ebdc739c84 | ||
|
fe844a807f | ||
|
08374d0382 | ||
|
04fcc83d29 | ||
|
448a907ab0 | ||
|
2c4901d3fc | ||
|
2dd1dfe43f | ||
|
0d42f72f87 | ||
|
f55ea0a550 | ||
|
7e01cbfc41 | ||
|
1a1b10a6ba | ||
|
ffe9944739 | ||
|
e1137fadd8 | ||
|
debec2875d | ||
|
25e8b5a673 | ||
|
4509465e9c | ||
|
0768cb6ac6 | ||
|
6be72a836b | ||
|
853301297c | ||
|
5011e05c3d | ||
|
2df34c9e55 | ||
|
e46c9e230b | ||
|
a4cb7934b1 | ||
|
189bd71276 | ||
|
63dc505e61 | ||
|
6ca6a19178 | ||
|
88cce4e21e | ||
|
bc0bf40551 | ||
|
881654fed9 | ||
|
373072c482 | ||
|
a0afa3f3fa | ||
|
3358ae0461 | ||
|
cb7187a096 | ||
|
3b0e9fddc0 | ||
|
e37314355e | ||
|
fac40c1853 | ||
|
ea699a7978 | ||
|
64d06f210a | ||
|
4ae7e435eb | ||
|
d7e9e58de2 |
81 changed files with 7122 additions and 2897 deletions
|
@ -1,2 +0,0 @@
|
|||
[build]
|
||||
# rustflags = ["--cfg", "tokio_unstable"]
|
2
.cargo/config.toml
Normal file
2
.cargo/config.toml
Normal file
|
@ -0,0 +1,2 @@
|
|||
[build]
|
||||
rustflags = ["--cfg", "tokio_unstable"]
|
421
.drone.yml
421
.drone.yml
|
@ -1,421 +0,0 @@
|
|||
kind: pipeline
|
||||
type: docker
|
||||
name: clippy
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: clippy
|
||||
image: asonix/rust-builder:latest-linux-amd64
|
||||
pull: always
|
||||
commands:
|
||||
- rustup component add clippy
|
||||
- cargo clippy -- -D warnings
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
- tag
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: tests
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: tests
|
||||
image: asonix/rust-builder:latest-linux-amd64
|
||||
pull: always
|
||||
commands:
|
||||
- cargo test
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
- tag
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: check-amd64
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: check
|
||||
image: asonix/rust-builder:latest-linux-amd64
|
||||
pull: always
|
||||
commands:
|
||||
- cargo check --target=$TARGET
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: build-amd64
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: build
|
||||
image: asonix/rust-builder:latest-linux-amd64
|
||||
pull: always
|
||||
commands:
|
||||
- cargo build --target=$TARGET --release
|
||||
- $TOOL-strip target/$TARGET/release/relay
|
||||
- cp target/$TARGET/release/relay .
|
||||
- cp relay relay-linux-amd64
|
||||
|
||||
- name: push
|
||||
image: plugins/docker:20
|
||||
settings:
|
||||
username: asonix
|
||||
password:
|
||||
from_secret: dockerhub_token
|
||||
repo: asonix/relay
|
||||
dockerfile: docker/drone/Dockerfile
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-amd64
|
||||
build_args:
|
||||
- REPO_ARCH=amd64
|
||||
|
||||
- name: publish
|
||||
image: plugins/gitea-release:1
|
||||
settings:
|
||||
api_key:
|
||||
from_secret: gitea_token
|
||||
base_url: https://git.asonix.dog
|
||||
files:
|
||||
- relay-linux-amd64
|
||||
|
||||
depends_on:
|
||||
- clippy
|
||||
- tests
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: check-arm64v8
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: check
|
||||
image: asonix/rust-builder:latest-linux-arm64v8
|
||||
pull: always
|
||||
commands:
|
||||
- cargo check --target=$TARGET
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: build-arm64v8
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: build
|
||||
image: asonix/rust-builder:latest-linux-arm64v8
|
||||
pull: always
|
||||
commands:
|
||||
- cargo build --target=$TARGET --release
|
||||
- $TOOL-strip target/$TARGET/release/relay
|
||||
- cp target/$TARGET/release/relay .
|
||||
- cp relay relay-linux-arm64v8
|
||||
|
||||
- name: push
|
||||
image: plugins/docker:20
|
||||
settings:
|
||||
username: asonix
|
||||
password:
|
||||
from_secret: dockerhub_token
|
||||
repo: asonix/relay
|
||||
dockerfile: docker/drone/Dockerfile
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm64v8
|
||||
build_args:
|
||||
- REPO_ARCH=arm64v8
|
||||
|
||||
- name: publish
|
||||
image: plugins/gitea-release:1
|
||||
settings:
|
||||
api_key:
|
||||
from_secret: gitea_token
|
||||
base_url: https://git.asonix.dog
|
||||
files:
|
||||
- relay-linux-arm64v8
|
||||
|
||||
depends_on:
|
||||
- clippy
|
||||
- tests
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: check-arm32v7
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: check
|
||||
image: asonix/rust-builder:latest-linux-arm32v7
|
||||
pull: always
|
||||
commands:
|
||||
- cargo check --target=$TARGET
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: build-arm32v7
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: build
|
||||
image: asonix/rust-builder:latest-linux-arm32v7
|
||||
pull: always
|
||||
commands:
|
||||
- cargo build --target=$TARGET --release
|
||||
- $TOOL-strip target/$TARGET/release/relay
|
||||
- cp target/$TARGET/release/relay .
|
||||
- cp relay relay-linux-arm32v7
|
||||
|
||||
- name: push
|
||||
image: plugins/docker:20
|
||||
settings:
|
||||
username: asonix
|
||||
password:
|
||||
from_secret: dockerhub_token
|
||||
repo: asonix/relay
|
||||
dockerfile: docker/drone/Dockerfile
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm32v7
|
||||
build_args:
|
||||
- REPO_ARCH=arm32v7
|
||||
|
||||
- name: publish
|
||||
image: plugins/gitea-release:1
|
||||
settings:
|
||||
api_key:
|
||||
from_secret: gitea_token
|
||||
base_url: https://git.asonix.dog
|
||||
files:
|
||||
- relay-linux-arm32v7
|
||||
|
||||
depends_on:
|
||||
- clippy
|
||||
- tests
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: manifest
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: manifest
|
||||
image: plugins/manifest:1
|
||||
settings:
|
||||
username: asonix
|
||||
password:
|
||||
from_secret: dockerhub_token
|
||||
dump: true
|
||||
auto_tag: true
|
||||
ignore_missing: true
|
||||
spec: docker/drone/manifest.tmpl
|
||||
|
||||
|
||||
depends_on:
|
||||
- build-amd64
|
||||
- build-arm64v8
|
||||
- build-arm32v7
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: publish-crate
|
||||
|
||||
platform:
|
||||
arch: amd64
|
||||
|
||||
clone:
|
||||
disable: true
|
||||
|
||||
steps:
|
||||
- name: clone
|
||||
image: alpine/git:latest
|
||||
user: root
|
||||
commands:
|
||||
- git clone $DRONE_GIT_HTTP_URL .
|
||||
- git checkout $DRONE_COMMIT
|
||||
- chown -R 991:991 .
|
||||
|
||||
- name: publish
|
||||
image: asonix/rust-builder:latest-linux-amd64
|
||||
pull: always
|
||||
environment:
|
||||
CRATES_IO_TOKEN:
|
||||
from_secret: crates_io_token
|
||||
commands:
|
||||
- cargo publish --token $CRATES_IO_TOKEN
|
||||
|
||||
depends_on:
|
||||
- build-amd64
|
||||
- build-arm64v8
|
||||
- build-arm32v7
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
9
.env
9
.env
|
@ -1,4 +1,13 @@
|
|||
HOSTNAME=localhost:8079
|
||||
PORT=8079
|
||||
HTTPS=false
|
||||
DEBUG=true
|
||||
RESTRICTED_MODE=true
|
||||
VALIDATE_SIGNATURES=false
|
||||
API_TOKEN=somesecretpassword
|
||||
FOOTER_BLURB="Contact <a href=\"https://masto.asonix.dog/@asonix\">@asonix</a> for inquiries"
|
||||
LOCAL_DOMAINS="masto.asonix.dog"
|
||||
LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!</p>"
|
||||
# OPENTELEMETRY_URL=http://localhost:4317
|
||||
PROMETHEUS_ADDR=127.0.0.1
|
||||
PROMETHEUS_PORT=9000
|
||||
|
|
61
.forgejo/workflows/check.yaml
Normal file
61
.forgejo/workflows/check.yaml
Normal file
|
@ -0,0 +1,61 @@
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
clippy:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
-
|
||||
name: Checkout relay
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Clippy
|
||||
run: |
|
||||
cargo clippy --no-default-features -- -D warnings
|
||||
|
||||
tests:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
-
|
||||
name: Checkout relay
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Test
|
||||
run: cargo test
|
||||
|
||||
check:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- x86_64-unknown-linux-musl
|
||||
- armv7-unknown-linux-musleabihf
|
||||
- aarch64-unknown-linux-musl
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
-
|
||||
name: Checkout relay
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Debug builds
|
||||
run: cargo zigbuild --target ${{ matrix.target }}
|
226
.forgejo/workflows/publish.yaml
Normal file
226
.forgejo/workflows/publish.yaml
Normal file
|
@ -0,0 +1,226 @@
|
|||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
|
||||
env:
|
||||
REGISTRY_IMAGE: asonix/relay
|
||||
|
||||
jobs:
|
||||
clippy:
|
||||
runs-on: base-image
|
||||
container:
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
-
|
||||
name: Checkout relay
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Clippy
|
||||
run: |
|
||||
# cargo clippy --no-default-features -- -D warnings
|
||||
cargo clippy --no-default-features
|
||||
|
||||
tests:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
-
|
||||
name: Checkout relay
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Test
|
||||
run: cargo test
|
||||
|
||||
build:
|
||||
needs:
|
||||
- clippy
|
||||
- tests
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
info:
|
||||
- target: x86_64-unknown-linux-musl
|
||||
artifact: linux-amd64
|
||||
platform: linux/amd64
|
||||
- target: armv7-unknown-linux-musleabihf
|
||||
artifact: linux-arm32v7
|
||||
platform: linux/arm/v7
|
||||
- target: aarch64-unknown-linux-musl
|
||||
artifact: linux-arm64v8
|
||||
platform: linux/arm64
|
||||
steps:
|
||||
-
|
||||
name: Checkout relay
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Prepare Platform
|
||||
run: |
|
||||
platform=${{ matrix.info.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: https://github.com/docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY_IMAGE }}
|
||||
flavor: |
|
||||
latest=auto
|
||||
suffix=-${{ matrix.info.artifact }}
|
||||
tags: |
|
||||
type=raw,value=latest,enable={{ is_default_branch }}
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: https://github.com/docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: https://github.com/docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Docker login
|
||||
uses: https://github.com/docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Compile relay
|
||||
run: cargo zigbuild --target ${{ matrix.info.target }} --release
|
||||
-
|
||||
name: Prepare artifacts
|
||||
run: |
|
||||
mkdir artifacts
|
||||
cp target/${{ matrix.info.target }}/release/relay artifacts/relay-${{ matrix.info.artifact }}
|
||||
-
|
||||
uses: https://github.com/actions/upload-artifact@v3
|
||||
with:
|
||||
name: binaries
|
||||
path: artifacts/
|
||||
-
|
||||
name: Prepare binary
|
||||
run: |
|
||||
cp target/${{ matrix.info.target }}/release/relay docker/forgejo/relay
|
||||
-
|
||||
name: Build and push ${{ matrix.info.platform }} docker image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/forgejo
|
||||
platforms: ${{ matrix.info.platform }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},name-canonical=true,push=true
|
||||
-
|
||||
name: Export digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
echo "Created /tmp/digests/${digest#sha256:}"
|
||||
shell: bash
|
||||
-
|
||||
name: Upload ${{ matrix.info.platform }} digest
|
||||
uses: https://github.com/actions/upload-artifact@v3
|
||||
with:
|
||||
name: digests
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
publish-docker:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
needs: [build]
|
||||
steps:
|
||||
-
|
||||
name: Download digests
|
||||
uses: https://github.com/actions/download-artifact@v3
|
||||
with:
|
||||
name: digests
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Docker login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: https://github.com/docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY_IMAGE }}
|
||||
flavor: |
|
||||
latest=auto
|
||||
tags: |
|
||||
type=raw,value=latest,enable={{ is_default_branch }}
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
-
|
||||
name: Create manifest list and push
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
tags=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${DOCKER_METADATA_OUTPUT_JSON}")
|
||||
images=$(printf "${{ env.REGISTRY_IMAGE }}@sha256:%s " *)
|
||||
echo "Running 'docker buildx imagetools create ${tags[@]} ${images[@]}'"
|
||||
docker buildx imagetools create ${tags[@]} ${images[@]}
|
||||
shell: bash
|
||||
-
|
||||
name: Inspect Image
|
||||
run: |
|
||||
docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }}
|
||||
|
||||
publish-forgejo:
|
||||
needs: [build]
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
- uses: https://github.com/actions/download-artifact@v3
|
||||
with:
|
||||
name: binaries
|
||||
path: artifacts/
|
||||
merge-multiple: true
|
||||
- uses: actions/forgejo-release@v1
|
||||
with:
|
||||
direction: upload
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
release-dir: artifacts/
|
||||
|
||||
publish-crate:
|
||||
needs: [build]
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker.io/asonix/actions-base-image:0.1
|
||||
steps:
|
||||
-
|
||||
name: Checkout relay
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
-
|
||||
name: Cargo Cache
|
||||
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
|
||||
-
|
||||
name: Publish Crate
|
||||
run: cargo publish --token ${{ secrets.CRATES_IO_TOKEN }}
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -1,3 +1,6 @@
|
|||
/target
|
||||
/artifacts
|
||||
/sled
|
||||
/.direnv
|
||||
/.envrc
|
||||
/result
|
||||
|
|
3937
Cargo.lock
generated
3937
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
99
Cargo.toml
99
Cargo.toml
|
@ -1,11 +1,11 @@
|
|||
[package]
|
||||
name = "ap-relay"
|
||||
description = "A simple activitypub relay"
|
||||
version = "0.3.31"
|
||||
version = "0.3.116"
|
||||
authors = ["asonix <asonix@asonix.dog>"]
|
||||
license = "AGPL-3.0"
|
||||
readme = "README.md"
|
||||
repository = "https://git.asonix.dog/asonix/ap-relay"
|
||||
repository = "https://git.asonix.dog/asonix/relay"
|
||||
keywords = ["activitypub", "relay"]
|
||||
edition = "2021"
|
||||
build = "src/build.rs"
|
||||
|
@ -14,78 +14,101 @@ build = "src/build.rs"
|
|||
name = "relay"
|
||||
path = "src/main.rs"
|
||||
|
||||
[profile.release]
|
||||
strip = true
|
||||
|
||||
[features]
|
||||
console = ["console-subscriber"]
|
||||
console = ["dep:console-subscriber"]
|
||||
default = []
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
actix-rt = "2.7.0"
|
||||
actix-web = { version = "4.0.1", default-features = false }
|
||||
actix-webfinger = "0.4.0"
|
||||
activitystreams = "0.7.0-alpha.19"
|
||||
activitystreams-ext = "0.1.0-alpha.2"
|
||||
ammonia = "3.1.0"
|
||||
awc = { version = "3.0.0", default-features = false, features = ["rustls"] }
|
||||
base64 = "0.13"
|
||||
actix-web = { version = "4.4.0", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls-0_23"] }
|
||||
actix-webfinger = { version = "0.5.0", default-features = false }
|
||||
activitystreams = "0.7.0-alpha.25"
|
||||
activitystreams-ext = "0.1.0-alpha.3"
|
||||
ammonia = "4.0.0"
|
||||
async-cpupool = "0.3.0"
|
||||
bcrypt = "0.16"
|
||||
base64 = "0.22"
|
||||
clap = { version = "4.0.0", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
console-subscriber = { version = "0.1", optional = true }
|
||||
dashmap = "5.1.0"
|
||||
color-eyre = "0.6.2"
|
||||
config = { version = "0.14.0", default-features = false, features = ["toml", "json", "yaml"] }
|
||||
console-subscriber = { version = "0.4", optional = true }
|
||||
dashmap = "6.0.1"
|
||||
dotenv = "0.15.0"
|
||||
futures-util = "0.3.17"
|
||||
lru = "0.8.0"
|
||||
futures-core = "0.3.30"
|
||||
lru = "0.12.0"
|
||||
metrics = "0.23.0"
|
||||
metrics-exporter-prometheus = { version = "0.15.0", default-features = false, features = [
|
||||
"http-listener",
|
||||
] }
|
||||
metrics-util = "0.17.0"
|
||||
mime = "0.3.16"
|
||||
opentelemetry = { version = "0.18", features = ["rt-tokio"] }
|
||||
opentelemetry-otlp = "0.11"
|
||||
minify-html = "0.15.0"
|
||||
opentelemetry = "0.27.1"
|
||||
opentelemetry_sdk = { version = "0.27", features = ["rt-tokio"] }
|
||||
opentelemetry-otlp = { version = "0.27", features = ["grpc-tonic"] }
|
||||
pin-project-lite = "0.2.9"
|
||||
# pinned to metrics-util
|
||||
quanta = "0.12.0"
|
||||
rand = "0.8"
|
||||
rsa = "0.7"
|
||||
rsa-magic-public-key = "0.6.0"
|
||||
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "stream"]}
|
||||
reqwest-middleware = { version = "0.4", default-features = false, features = ["json"] }
|
||||
reqwest-tracing = "0.5.0"
|
||||
ring = "0.17.5"
|
||||
rsa = "0.9"
|
||||
rsa-magic-public-key = "0.8.0"
|
||||
rustls = { version = "0.23.0", default-features = false, features = ["ring", "logging", "std", "tls12"] }
|
||||
rustls-channel-resolver = "0.3.0"
|
||||
rustls-pemfile = "2"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
sha2 = { version = "0.10", features = ["oid"] }
|
||||
signature = "1.6.4"
|
||||
sled = "0.34.7"
|
||||
teloxide = { version = "0.11.1", default-features = false, features = [
|
||||
streem = "0.2.0"
|
||||
teloxide = { version = "0.13.0", default-features = false, features = [
|
||||
"ctrlc_handler",
|
||||
"macros",
|
||||
"rustls",
|
||||
] }
|
||||
thiserror = "1.0"
|
||||
thiserror = "2.0"
|
||||
time = { version = "0.3.17", features = ["serde"] }
|
||||
tracing = "0.1"
|
||||
tracing-awc = "0.1.6"
|
||||
tracing-error = "0.2"
|
||||
tracing-futures = "0.2"
|
||||
tracing-log = "0.1"
|
||||
tracing-opentelemetry = "0.18"
|
||||
tracing-log = "0.2"
|
||||
tracing-opentelemetry = "0.28"
|
||||
tracing-subscriber = { version = "0.3", features = [
|
||||
"ansi",
|
||||
"env-filter",
|
||||
"fmt",
|
||||
] }
|
||||
tokio = { version = "1", features = ["macros", "sync"] }
|
||||
tokio = { version = "1", features = ["full", "tracing"] }
|
||||
uuid = { version = "1", features = ["v4", "serde"] }
|
||||
|
||||
[dependencies.background-jobs]
|
||||
version = "0.13.0"
|
||||
version = "0.19.0"
|
||||
default-features = false
|
||||
features = ["background-jobs-actix", "error-logging"]
|
||||
features = ["error-logging", "metrics", "tokio"]
|
||||
|
||||
[dependencies.http-signature-normalization-actix]
|
||||
version = "0.6.0"
|
||||
version = "0.11.1"
|
||||
default-features = false
|
||||
features = ["client", "server", "sha-2"]
|
||||
features = ["server", "ring"]
|
||||
|
||||
[dependencies.http-signature-normalization-reqwest]
|
||||
version = "0.13.0"
|
||||
default-features = false
|
||||
features = ["middleware", "ring"]
|
||||
|
||||
[dependencies.tracing-actix-web]
|
||||
version = "0.6.1"
|
||||
version = "0.7.9"
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = "1.0"
|
||||
color-eyre = "0.6.2"
|
||||
dotenv = "0.15.0"
|
||||
ructe = { version = "0.15.0", features = ["sass", "mime03"] }
|
||||
toml = "0.5.8"
|
||||
ructe = { version = "0.17.0", features = ["sass", "mime03"] }
|
||||
toml = "0.8.0"
|
||||
|
||||
[profile.dev.package.rsa]
|
||||
opt-level = 3
|
||||
|
|
72
README.md
72
README.md
|
@ -6,11 +6,11 @@ _A simple and efficient activitypub relay_
|
|||
If running docker, you can start the relay with the following command:
|
||||
```
|
||||
$ sudo docker run --rm -it \
|
||||
-v "./:/mnt/" \
|
||||
-v "$(pwd):/mnt/" \
|
||||
-e ADDR=0.0.0.0 \
|
||||
-e SLED_PATH=/mnt/sled/db-0.34 \
|
||||
-p 8080:8080 \
|
||||
asonix/relay:0.3.23
|
||||
asonix/relay:0.3.85
|
||||
```
|
||||
This will launch the relay with the database stored in "./sled/db-0.34" and listening on port 8080
|
||||
#### Cargo
|
||||
|
@ -36,6 +36,9 @@ To simply run the server, the command is as follows
|
|||
$ ./relay
|
||||
```
|
||||
|
||||
#### Administration
|
||||
> **NOTE:** The server _must be running_ in order to update the lists with the following commands
|
||||
|
||||
To learn about any other tasks, the `--help` flag can be passed
|
||||
```bash
|
||||
An activitypub relay
|
||||
|
@ -90,9 +93,21 @@ HTTPS=true
|
|||
PRETTY_LOG=false
|
||||
PUBLISH_BLOCKS=true
|
||||
SLED_PATH=./sled/db-0.34
|
||||
RUST_LOG=warn
|
||||
API_TOKEN=somepasswordishtoken
|
||||
OPENTELEMETRY_URL=localhost:4317
|
||||
TELEGRAM_TOKEN=secret
|
||||
TELEGRAM_ADMIN_HANDLE=your_handle
|
||||
TLS_KEY=/path/to/key
|
||||
TLS_CERT=/path/to/cert
|
||||
FOOTER_BLURB="Contact <a href=\"https://masto.asonix.dog/@asonix\">@asonix</a> for inquiries"
|
||||
LOCAL_DOMAINS=masto.asonix.dog
|
||||
LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!</p>"
|
||||
PROMETHEUS_ADDR=0.0.0.0
|
||||
PROMETHEUS_PORT=9000
|
||||
CLIENT_TIMEOUT=10
|
||||
DELIVER_CONCURRENCY=8
|
||||
SIGNATURE_THREADS=2
|
||||
```
|
||||
|
||||
#### Descriptions
|
||||
|
@ -107,21 +122,57 @@ Whether to print incoming activities to the console when requests hit the /inbox
|
|||
##### `RESTRICTED_MODE`
|
||||
This setting enables an 'allowlist' setup where only servers that have been explicitly enabled through the `relay -a` command can join the relay. This is `false` by default. If `RESTRICTED_MODE` is not enabled, then manually allowing domains with `relay -a` has no effect.
|
||||
##### `VALIDATE_SIGNATURES`
|
||||
This setting enforces checking HTTP signatures on incoming activities. It defaults to `false` but should be set to `true` in production scenarios
|
||||
This setting enforces checking HTTP signatures on incoming activities. It defaults to `true`
|
||||
##### `HTTPS`
|
||||
Whether the current server is running on an HTTPS port or not. This is used for generating URLs to the current running relay. By default it is set to `false`, but should be `true` in production scenarios.
|
||||
Whether the current server is running on an HTTPS port or not. This is used for generating URLs to the current running relay. By default it is set to `true`
|
||||
##### `PUBLISH_BLOCKS`
|
||||
Whether or not to publish a list of blocked domains in the `nodeinfo` metadata for the server. It defaults to `false`.
|
||||
##### `SLED_PATH`
|
||||
Where to store the on-disk database of connected servers. This defaults to `./sled/db-0.34`.
|
||||
##### `RUST_LOG`
|
||||
The log level to print. Available levels are `ERROR`, `WARN`, `INFO`, `DEBUG`, and `TRACE`. You can also specify module paths to enable some logs but not others, such as `RUST_LOG=warn,tracing_actix_web=info,relay=info`. This defaults to `warn`
|
||||
##### `SOURCE_REPO`
|
||||
The URL to the source code for the relay. This defaults to `https://git.asonix.dog/asonix/relay`, but should be changed if you're running a fork hosted elsewhere.
|
||||
##### `REPOSITORY_COMMIT_BASE`
|
||||
The base path of the repository commit hash reference. For example, `/src/commit/` for Gitea, `/tree/` for GitLab.
|
||||
##### `API_TOKEN`
|
||||
The Secret token used to access the admin APIs. This must be set for the commandline to function
|
||||
##### `OPENTELEMETRY_URL`
|
||||
A URL for exporting opentelemetry spans. This is mostly useful for debugging. There is no default, since most people probably don't run an opentelemetry collector.
|
||||
##### `TELEGRAM_TOKEN`
|
||||
A Telegram Bot Token for running the relay administration bot. There is no default.
|
||||
##### `TELEGRAM_ADMIN_HANDLE`
|
||||
The handle of the telegram user allowed to administer the relay. There is no default.
|
||||
##### `TLS_KEY`
|
||||
Optional - This is specified if you are running the relay directly on the internet and have a TLS key to provide HTTPS for your relay
|
||||
##### `TLS_CERT`
|
||||
Optional - This is specified if you are running the relay directly on the internet and have a TLS certificate chain to provide HTTPS for your relay
|
||||
##### `FOOTER_BLURB`
|
||||
Optional - Add custom notes in the footer of the page
|
||||
##### `LOCAL_DOMAINS`
|
||||
Optional - domains of mastodon servers run by the same admin as the relay
|
||||
##### `LOCAL_BLURB`
|
||||
Optional - description for the relay
|
||||
##### `PROMETHEUS_ADDR`
|
||||
Optional - Address to bind to for serving the prometheus scrape endpoint
|
||||
##### `PROMETHEUS_PORT`
|
||||
Optional - Port to bind to for serving the prometheus scrape endpoint
|
||||
##### `CLIENT_TIMEOUT`
|
||||
Optional - How long the relay will hold open a connection (in seconds) to a remote server during
|
||||
fetches and deliveries. This defaults to 10
|
||||
##### `DELIVER_CONCURRENCY`
|
||||
Optional - How many deliver requests the relay should allow to be in-flight per thread. the default
|
||||
is 8
|
||||
##### `SIGNATURE_THREADS`
|
||||
Optional - Override number of threads used for signing and verifying requests. Default is
|
||||
`std::thread::available_parallelism()` (It tries to detect how many cores you have). If it cannot
|
||||
detect the correct number of cores, it falls back to 1.
|
||||
##### 'PROXY_URL'
|
||||
Optional - URL of an HTTP proxy to forward outbound requests through
|
||||
##### 'PROXY_USERNAME'
|
||||
Optional - username to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
|
||||
##### 'PROXY_PASSWORD'
|
||||
Optional - password to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
|
||||
|
||||
### Subscribing
|
||||
Mastodon admins can subscribe to this relay by adding the `/inbox` route to their relay settings.
|
||||
|
@ -141,10 +192,16 @@ example, if the server is `https://relay.my.tld`, the correct URL would be
|
|||
- Follow Public, become a listener of the relay
|
||||
- Undo Follow {self-actor}, stop listening on the relay, an Undo Follow will be sent back
|
||||
- Undo Follow Public, stop listening on the relay
|
||||
- Delete {anything}, the Delete {anything} is relayed verbatim to listening servers
|
||||
- Delete {anything}, the Delete {anything} is relayed verbatim to listening servers.
|
||||
Note that this activity will likely be rejected by the listening servers unless it has been
|
||||
signed with a JSON-LD signature
|
||||
- Update {anything}, the Update {anything} is relayed verbatim to listening servers
|
||||
- Update {anything}, the Update {anything} is relayed verbatim to listening servers.
|
||||
Note that this activity will likely be rejected by the listening servers unless it has been
|
||||
signed with a JSON-LD signature
|
||||
- Add {anything}, the Add {anything} is relayed verbatim to listening servers.
|
||||
Note that this activity will likely be rejected by the listening servers unless it has been
|
||||
signed with a JSON-LD signature
|
||||
- Remove {anything}, the Remove {anything} is relayed verbatim to listening servers.
|
||||
Note that this activity will likely be rejected by the listening servers unless it has been
|
||||
signed with a JSON-LD signature
|
||||
|
||||
|
@ -152,6 +209,9 @@ example, if the server is `https://relay.my.tld`, the correct URL would be
|
|||
- Webfinger
|
||||
- NodeInfo
|
||||
|
||||
### Known issues
|
||||
Pleroma and Akkoma do not support validating JSON-LD signatures, meaning many activities such as Delete, Update, Add, and Remove will be rejected with a message similar to `WARN: Response from https://example.com/inbox, "Invalid HTTP Signature"`. This is normal and not an issue with the relay.
|
||||
|
||||
### Contributing
|
||||
Feel free to open issues for anything you find an issue with. Please note that any contributed code will be licensed under the AGPLv3.
|
||||
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
ARG REPO_ARCH
|
||||
|
||||
FROM asonix/rust-runner:latest-linux-$REPO_ARCH
|
||||
|
||||
COPY relay /usr/local/bin/relay
|
||||
|
||||
USER app
|
||||
EXPOSE 8080
|
||||
VOLUME /mnt
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ["/usr/local/bin/relay"]
|
|
@ -1,25 +0,0 @@
|
|||
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}
|
||||
{{#if build.tags}}
|
||||
tags:
|
||||
{{#each build.tags}}
|
||||
- {{this}}
|
||||
{{/each}}
|
||||
{{/if}}
|
||||
manifests:
|
||||
-
|
||||
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-amd64
|
||||
platform:
|
||||
architecture: amd64
|
||||
os: linux
|
||||
-
|
||||
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm64v8
|
||||
platform:
|
||||
architecture: arm64
|
||||
os: linux
|
||||
variant: v8
|
||||
-
|
||||
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm32v7
|
||||
platform:
|
||||
architecture: arm
|
||||
os: linux
|
||||
variant: v7
|
24
docker/forgejo/Dockerfile
Normal file
24
docker/forgejo/Dockerfile
Normal file
|
@ -0,0 +1,24 @@
|
|||
FROM alpine:3.19
|
||||
|
||||
ARG UID=991
|
||||
ARG GID=991
|
||||
|
||||
ENV \
|
||||
UID=${UID} \
|
||||
GID=${GID}
|
||||
|
||||
USER root
|
||||
RUN \
|
||||
addgroup -g "${GID}" app && \
|
||||
adduser -D -G app -u "${UID}" -g "" -h /opt/app app && \
|
||||
apk add tini && \
|
||||
chown -R app:app /mnt
|
||||
|
||||
COPY relay /usr/local/bin/relay
|
||||
|
||||
USER app
|
||||
EXPOSE 6669
|
||||
EXPOSE 8080
|
||||
VOLUME /mnt
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ["/usr/local/bin/relay"]
|
|
@ -1,41 +0,0 @@
|
|||
ARG REPO_ARCH=amd64
|
||||
|
||||
# cross-build environment
|
||||
FROM asonix/rust-builder:$REPO_ARCH-latest AS builder
|
||||
|
||||
ARG TAG=main
|
||||
ARG BINARY=relay
|
||||
ARG PROJECT=relay
|
||||
ARG GIT_REPOSITORY=https://git.asonix.dog/asonix/$PROJECT
|
||||
|
||||
ENV \
|
||||
BINARY=${BINARY}
|
||||
|
||||
ADD \
|
||||
--chown=build:build \
|
||||
$GIT_REPOSITORY/archive/$TAG.tar.gz \
|
||||
/opt/build/repo.tar.gz
|
||||
|
||||
RUN \
|
||||
tar zxf repo.tar.gz
|
||||
|
||||
WORKDIR /opt/build/$PROJECT
|
||||
|
||||
RUN \
|
||||
build
|
||||
|
||||
# production environment
|
||||
FROM asonix/rust-runner:$REPO_ARCH-latest
|
||||
|
||||
ARG BINARY=relay
|
||||
|
||||
ENV \
|
||||
BINARY=${BINARY}
|
||||
|
||||
COPY \
|
||||
--from=builder \
|
||||
/opt/build/binary \
|
||||
/usr/bin/${BINARY}
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD /usr/bin/${BINARY}
|
|
@ -1,37 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
function require() {
|
||||
if [ "$1" = "" ]; then
|
||||
echo "input '$2' required"
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function print_help() {
|
||||
echo "deploy.sh"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " deploy.sh [repo] [tag] [arch]"
|
||||
echo ""
|
||||
echo "Args:"
|
||||
echo " repo: The docker repository to publish the image"
|
||||
echo " tag: The tag applied to the docker image"
|
||||
echo " arch: The architecuture of the doker image"
|
||||
}
|
||||
|
||||
REPO=$1
|
||||
TAG=$2
|
||||
ARCH=$3
|
||||
|
||||
require "$REPO" repo
|
||||
require "$TAG" tag
|
||||
require "$ARCH" arch
|
||||
|
||||
sudo docker build \
|
||||
--pull \
|
||||
--build-arg TAG=$TAG \
|
||||
--build-arg REPO_ARCH=$ARCH \
|
||||
-t $REPO:$ARCH-$TAG \
|
||||
-f Dockerfile \
|
||||
.
|
|
@ -1,87 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
function require() {
|
||||
if [ "$1" = "" ]; then
|
||||
echo "input '$2' required"
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function print_help() {
|
||||
echo "deploy.sh"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " deploy.sh [tag] [branch] [push]"
|
||||
echo ""
|
||||
echo "Args:"
|
||||
echo " tag: The git tag to be applied to the repository and docker build"
|
||||
echo " branch: The git branch to use for tagging and publishing"
|
||||
echo " push: Whether or not to push the image"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " ./deploy.sh v0.3.0-alpha.13 main true"
|
||||
echo " ./deploy.sh v0.3.0-alpha.13-shell-out asonix/shell-out false"
|
||||
}
|
||||
|
||||
function build_image() {
|
||||
tag=$1
|
||||
arch=$2
|
||||
push=$3
|
||||
|
||||
./build-image.sh asonix/relay $tag $arch
|
||||
|
||||
sudo docker tag asonix/relay:$arch-$tag asonix/relay:$arch-latest
|
||||
|
||||
if [ "$push" == "true" ]; then
|
||||
sudo docker push asonix/relay:$arch-$tag
|
||||
sudo docker push asonix/relay:$arch-latest
|
||||
fi
|
||||
}
|
||||
|
||||
# Creating the new tag
|
||||
new_tag="$1"
|
||||
branch="$2"
|
||||
push=$3
|
||||
|
||||
require "$new_tag" "tag"
|
||||
require "$branch" "branch"
|
||||
require "$push" "push"
|
||||
|
||||
if ! sudo docker run --rm -it arm64v8/alpine:3.11 /bin/sh -c 'echo "docker is configured correctly"'
|
||||
then
|
||||
echo "docker is not configured to run on qemu-emulated architectures, fixing will require sudo"
|
||||
sudo docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||
fi
|
||||
|
||||
set -xe
|
||||
|
||||
git checkout $branch
|
||||
|
||||
# Changing the docker-compose prod
|
||||
sed -i "s/asonix\/relay:.*/asonix\/relay:$new_tag/" docker-compose.yml
|
||||
git add ../prod/docker-compose.yml
|
||||
# The commit
|
||||
git commit -m"Version $new_tag"
|
||||
git tag $new_tag
|
||||
|
||||
# Push
|
||||
git push origin $new_tag
|
||||
git push
|
||||
|
||||
# Build for arm64v8, arm32v7 and amd64
|
||||
build_image $new_tag arm64v8 $push
|
||||
build_image $new_tag arm32v7 $push
|
||||
build_image $new_tag amd64 $push
|
||||
|
||||
# Build for other archs
|
||||
# TODO
|
||||
|
||||
if [ "$push" == "true" ]; then
|
||||
./manifest.sh relay $new_tag
|
||||
./manifest.sh relay latest
|
||||
|
||||
# pushd ../../
|
||||
# cargo publish
|
||||
# popd
|
||||
fi
|
|
@ -2,7 +2,7 @@ version: '3.3'
|
|||
|
||||
services:
|
||||
relay:
|
||||
image: asonix/relay:v0.3.8
|
||||
image: asonix/relay:0.3.115
|
||||
ports:
|
||||
- "8079:8079"
|
||||
restart: always
|
||||
|
@ -14,6 +14,7 @@ services:
|
|||
- RESTRICTED_MODE=false
|
||||
- VALIDATE_SIGNATURES=true
|
||||
- HTTPS=true
|
||||
- DATABASE_URL=postgres://pg_user:pg_pass@pg_host:pg_port/pg_database
|
||||
- SLED_PATH=/mnt/sled/db-0.34
|
||||
- PRETTY_LOG=false
|
||||
- PUBLISH_BLOCKS=true
|
||||
- API_TOKEN=somepasswordishtoken
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
function require() {
|
||||
if [ "$1" = "" ]; then
|
||||
echo "input '$2' required"
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
function print_help() {
|
||||
echo "deploy.sh"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " manifest.sh [repo] [tag]"
|
||||
echo ""
|
||||
echo "Args:"
|
||||
echo " repo: The docker repository to update"
|
||||
echo " tag: The git tag to be applied to the image manifest"
|
||||
}
|
||||
|
||||
REPO=$1
|
||||
TAG=$2
|
||||
|
||||
require "$REPO" "repo"
|
||||
require "$TAG" "tag"
|
||||
|
||||
set -xe
|
||||
|
||||
sudo docker manifest create asonix/$REPO:$TAG \
|
||||
-a asonix/$REPO:arm64v8-$TAG \
|
||||
-a asonix/$REPO:arm32v7-$TAG \
|
||||
-a asonix/$REPO:amd64-$TAG
|
||||
|
||||
sudo docker manifest annotate asonix/$REPO:$TAG \
|
||||
asonix/$REPO:arm64v8-$TAG --os linux --arch arm64 --variant v8
|
||||
|
||||
sudo docker manifest annotate asonix/$REPO:$TAG \
|
||||
asonix/$REPO:arm32v7-$TAG --os linux --arch arm --variant v7
|
||||
|
||||
sudo docker manifest annotate asonix/$REPO:$TAG \
|
||||
asonix/$REPO:amd64-$TAG --os linux --arch amd64
|
||||
|
||||
sudo docker manifest push asonix/$REPO:$TAG --purge
|
61
flake.lock
Normal file
61
flake.lock
Normal file
|
@ -0,0 +1,61 @@
|
|||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1710146030,
|
||||
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1733550349,
|
||||
"narHash": "sha256-NcGumB4Lr6KSDq+nIqXtNA8QwAQKDSZT7N9OTGWbTrs=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "e2605d0744c2417b09f8bf850dfca42fcf537d34",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-24.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
34
flake.nix
Normal file
34
flake.nix
Normal file
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
description = "relay";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, flake-utils }:
|
||||
flake-utils.lib.eachDefaultSystem (system:
|
||||
let
|
||||
pkgs = import nixpkgs {
|
||||
inherit system;
|
||||
};
|
||||
in
|
||||
{
|
||||
packages = rec {
|
||||
relay = pkgs.callPackage ./relay.nix { };
|
||||
|
||||
default = relay;
|
||||
};
|
||||
|
||||
apps = rec {
|
||||
dev = flake-utils.lib.mkApp { drv = self.packages.${system}.pict-rs-proxy; };
|
||||
default = dev;
|
||||
};
|
||||
|
||||
devShell = with pkgs; mkShell {
|
||||
nativeBuildInputs = [ cargo cargo-outdated cargo-zigbuild clippy gcc protobuf rust-analyzer rustc rustfmt ];
|
||||
|
||||
RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
|
||||
};
|
||||
});
|
||||
}
|
23
relay.nix
Normal file
23
relay.nix
Normal file
|
@ -0,0 +1,23 @@
|
|||
{ lib
|
||||
, nixosTests
|
||||
, rustPlatform
|
||||
}:
|
||||
|
||||
rustPlatform.buildRustPackage {
|
||||
pname = "relay";
|
||||
version = "0.3.116";
|
||||
src = ./.;
|
||||
cargoLock.lockFile = ./Cargo.lock;
|
||||
|
||||
RUSTFLAGS = "--cfg tokio_unstable";
|
||||
|
||||
nativeBuildInputs = [ ];
|
||||
|
||||
passthru.tests = { inherit (nixosTests) relay; };
|
||||
|
||||
meta = with lib; {
|
||||
description = "An ActivityPub relay";
|
||||
homepage = "https://git.asonix.dog/asonix/relay";
|
||||
license = with licenses; [ agpl3Plus ];
|
||||
};
|
||||
}
|
|
@ -41,7 +41,7 @@ header {
|
|||
}
|
||||
}
|
||||
|
||||
section {
|
||||
article {
|
||||
background-color: #fff;
|
||||
color: #333;
|
||||
border: 1px solid #e5e5e5;
|
||||
|
@ -51,8 +51,16 @@ section {
|
|||
max-width: 700px;
|
||||
padding-bottom: 32px;
|
||||
|
||||
> p:first-child {
|
||||
margin-top: 0;
|
||||
section {
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
|
||||
> h4:first-child,
|
||||
> p:first-child {
|
||||
margin-top: 0;
|
||||
}
|
||||
> p:last-child {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
}
|
||||
|
||||
h3 {
|
||||
|
@ -67,13 +75,13 @@ section {
|
|||
|
||||
li {
|
||||
padding-top: 36px;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.padded {
|
||||
padding: 0 24px;
|
||||
}
|
||||
|
||||
.local-explainer,
|
||||
.joining {
|
||||
padding: 24px;
|
||||
}
|
||||
|
@ -174,9 +182,11 @@ footer {
|
|||
|
||||
li {
|
||||
padding: 0;
|
||||
border-bottom: none;
|
||||
}
|
||||
}
|
||||
article section {
|
||||
border-bottom: none;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -226,7 +236,7 @@ footer {
|
|||
padding: 24px;
|
||||
}
|
||||
|
||||
section {
|
||||
article {
|
||||
border-left: none;
|
||||
border-right: none;
|
||||
border-radius: 0;
|
||||
|
|
32
src/admin.rs
Normal file
32
src/admin.rs
Normal file
|
@ -0,0 +1,32 @@
|
|||
use activitystreams::iri_string::types::IriString;
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
pub mod client;
|
||||
pub mod routes;
|
||||
|
||||
#[derive(serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct Domains {
|
||||
domains: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct AllowedDomains {
|
||||
pub(crate) allowed_domains: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct BlockedDomains {
|
||||
pub(crate) blocked_domains: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct ConnectedActors {
|
||||
pub(crate) connected_actors: Vec<IriString>,
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct LastSeen {
|
||||
pub(crate) last_seen: BTreeMap<OffsetDateTime, BTreeSet<String>>,
|
||||
pub(crate) never: Vec<String>,
|
||||
}
|
133
src/admin/client.rs
Normal file
133
src/admin/client.rs
Normal file
|
@ -0,0 +1,133 @@
|
|||
use crate::{
|
||||
admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains, LastSeen},
|
||||
collector::Snapshot,
|
||||
config::{AdminUrlKind, Config},
|
||||
error::{Error, ErrorKind},
|
||||
extractors::XApiToken,
|
||||
};
|
||||
use reqwest_middleware::ClientWithMiddleware;
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
pub(crate) async fn allow(
|
||||
client: &ClientWithMiddleware,
|
||||
config: &Config,
|
||||
domains: Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
post_domains(client, config, domains, AdminUrlKind::Allow).await
|
||||
}
|
||||
|
||||
pub(crate) async fn disallow(
|
||||
client: &ClientWithMiddleware,
|
||||
config: &Config,
|
||||
domains: Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
post_domains(client, config, domains, AdminUrlKind::Disallow).await
|
||||
}
|
||||
|
||||
pub(crate) async fn block(
|
||||
client: &ClientWithMiddleware,
|
||||
config: &Config,
|
||||
domains: Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
post_domains(client, config, domains, AdminUrlKind::Block).await
|
||||
}
|
||||
|
||||
pub(crate) async fn unblock(
|
||||
client: &ClientWithMiddleware,
|
||||
config: &Config,
|
||||
domains: Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
post_domains(client, config, domains, AdminUrlKind::Unblock).await
|
||||
}
|
||||
|
||||
pub(crate) async fn allowed(
|
||||
client: &ClientWithMiddleware,
|
||||
config: &Config,
|
||||
) -> Result<AllowedDomains, Error> {
|
||||
get_results(client, config, AdminUrlKind::Allowed).await
|
||||
}
|
||||
|
||||
pub(crate) async fn blocked(
|
||||
client: &ClientWithMiddleware,
|
||||
config: &Config,
|
||||
) -> Result<BlockedDomains, Error> {
|
||||
get_results(client, config, AdminUrlKind::Blocked).await
|
||||
}
|
||||
|
||||
pub(crate) async fn connected(
|
||||
client: &ClientWithMiddleware,
|
||||
config: &Config,
|
||||
) -> Result<ConnectedActors, Error> {
|
||||
get_results(client, config, AdminUrlKind::Connected).await
|
||||
}
|
||||
|
||||
pub(crate) async fn stats(
|
||||
client: &ClientWithMiddleware,
|
||||
config: &Config,
|
||||
) -> Result<Snapshot, Error> {
|
||||
get_results(client, config, AdminUrlKind::Stats).await
|
||||
}
|
||||
|
||||
pub(crate) async fn last_seen(
|
||||
client: &ClientWithMiddleware,
|
||||
config: &Config,
|
||||
) -> Result<LastSeen, Error> {
|
||||
get_results(client, config, AdminUrlKind::LastSeen).await
|
||||
}
|
||||
|
||||
async fn get_results<T: DeserializeOwned>(
|
||||
client: &ClientWithMiddleware,
|
||||
config: &Config,
|
||||
url_kind: AdminUrlKind,
|
||||
) -> Result<T, Error> {
|
||||
let x_api_token = config.x_api_token().ok_or(ErrorKind::MissingApiToken)?;
|
||||
|
||||
let iri = config.generate_admin_url(url_kind);
|
||||
|
||||
let res = client
|
||||
.get(iri.as_str())
|
||||
.header(XApiToken::http1_name(), x_api_token.to_string())
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
|
||||
|
||||
if !res.status().is_success() {
|
||||
return Err(ErrorKind::Status(
|
||||
iri.to_string(),
|
||||
crate::http1::status_to_http02(res.status()),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
let t = res
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| ErrorKind::ReceiveResponse(iri.to_string(), e.to_string()))?;
|
||||
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
async fn post_domains(
|
||||
client: &ClientWithMiddleware,
|
||||
config: &Config,
|
||||
domains: Vec<String>,
|
||||
url_kind: AdminUrlKind,
|
||||
) -> Result<(), Error> {
|
||||
let x_api_token = config.x_api_token().ok_or(ErrorKind::MissingApiToken)?;
|
||||
|
||||
let iri = config.generate_admin_url(url_kind);
|
||||
|
||||
let res = client
|
||||
.post(iri.as_str())
|
||||
.header(XApiToken::http1_name(), x_api_token.to_string())
|
||||
.json(&Domains { domains })
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
|
||||
|
||||
if !res.status().is_success() {
|
||||
tracing::warn!("Failed to allow domains");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
90
src/admin/routes.rs
Normal file
90
src/admin/routes.rs
Normal file
|
@ -0,0 +1,90 @@
|
|||
use crate::{
|
||||
admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains, LastSeen},
|
||||
collector::{MemoryCollector, Snapshot},
|
||||
error::Error,
|
||||
extractors::Admin,
|
||||
};
|
||||
use actix_web::{
|
||||
web::{Data, Json},
|
||||
HttpResponse,
|
||||
};
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
pub(crate) async fn allow(
|
||||
admin: Admin,
|
||||
Json(Domains { domains }): Json<Domains>,
|
||||
) -> Result<HttpResponse, Error> {
|
||||
admin.db_ref().add_allows(domains).await?;
|
||||
|
||||
Ok(HttpResponse::NoContent().finish())
|
||||
}
|
||||
|
||||
pub(crate) async fn disallow(
|
||||
admin: Admin,
|
||||
Json(Domains { domains }): Json<Domains>,
|
||||
) -> Result<HttpResponse, Error> {
|
||||
admin.db_ref().remove_allows(domains).await?;
|
||||
|
||||
Ok(HttpResponse::NoContent().finish())
|
||||
}
|
||||
|
||||
pub(crate) async fn block(
|
||||
admin: Admin,
|
||||
Json(Domains { domains }): Json<Domains>,
|
||||
) -> Result<HttpResponse, Error> {
|
||||
admin.db_ref().add_blocks(domains).await?;
|
||||
|
||||
Ok(HttpResponse::NoContent().finish())
|
||||
}
|
||||
|
||||
pub(crate) async fn unblock(
|
||||
admin: Admin,
|
||||
Json(Domains { domains }): Json<Domains>,
|
||||
) -> Result<HttpResponse, Error> {
|
||||
admin.db_ref().remove_blocks(domains).await?;
|
||||
|
||||
Ok(HttpResponse::NoContent().finish())
|
||||
}
|
||||
|
||||
pub(crate) async fn allowed(admin: Admin) -> Result<Json<AllowedDomains>, Error> {
|
||||
let allowed_domains = admin.db_ref().allows().await?;
|
||||
|
||||
Ok(Json(AllowedDomains { allowed_domains }))
|
||||
}
|
||||
|
||||
pub(crate) async fn blocked(admin: Admin) -> Result<Json<BlockedDomains>, Error> {
|
||||
let blocked_domains = admin.db_ref().blocks().await?;
|
||||
|
||||
Ok(Json(BlockedDomains { blocked_domains }))
|
||||
}
|
||||
|
||||
pub(crate) async fn connected(admin: Admin) -> Result<Json<ConnectedActors>, Error> {
|
||||
let connected_actors = admin.db_ref().connected_ids().await?;
|
||||
|
||||
Ok(Json(ConnectedActors { connected_actors }))
|
||||
}
|
||||
|
||||
pub(crate) async fn stats(
|
||||
_admin: Admin,
|
||||
collector: Data<MemoryCollector>,
|
||||
) -> Result<Json<Snapshot>, Error> {
|
||||
Ok(Json(collector.snapshot()))
|
||||
}
|
||||
|
||||
pub(crate) async fn last_seen(admin: Admin) -> Result<Json<LastSeen>, Error> {
|
||||
let nodes = admin.db_ref().last_seen().await?;
|
||||
|
||||
let mut last_seen: BTreeMap<OffsetDateTime, BTreeSet<String>> = BTreeMap::new();
|
||||
let mut never = Vec::new();
|
||||
|
||||
for (domain, datetime) in nodes {
|
||||
if let Some(datetime) = datetime {
|
||||
last_seen.entry(datetime).or_default().insert(domain);
|
||||
} else {
|
||||
never.push(domain);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Json(LastSeen { last_seen, never }))
|
||||
}
|
|
@ -34,11 +34,13 @@ pub struct PublicKey {
|
|||
#[serde(rename_all = "PascalCase")]
|
||||
pub enum ValidTypes {
|
||||
Accept,
|
||||
Add,
|
||||
Announce,
|
||||
Create,
|
||||
Delete,
|
||||
Follow,
|
||||
Reject,
|
||||
Remove,
|
||||
Undo,
|
||||
Update,
|
||||
}
|
||||
|
|
33
src/args.rs
33
src/args.rs
|
@ -11,9 +11,30 @@ pub(crate) struct Args {
|
|||
|
||||
#[arg(short, long, help = "Undo allowing or blocking domains")]
|
||||
undo: bool,
|
||||
|
||||
#[arg(short, long, help = "List allowed and blocked domains")]
|
||||
list: bool,
|
||||
|
||||
#[arg(short, long, help = "Get statistics from the server")]
|
||||
stats: bool,
|
||||
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
help = "List domains by when they were last succesfully contacted"
|
||||
)]
|
||||
contacted: bool,
|
||||
}
|
||||
|
||||
impl Args {
|
||||
pub(crate) fn any(&self) -> bool {
|
||||
!self.blocks.is_empty()
|
||||
|| !self.allowed.is_empty()
|
||||
|| self.list
|
||||
|| self.stats
|
||||
|| self.contacted
|
||||
}
|
||||
|
||||
pub(crate) fn new() -> Self {
|
||||
Self::parse()
|
||||
}
|
||||
|
@ -29,4 +50,16 @@ impl Args {
|
|||
pub(crate) fn undo(&self) -> bool {
|
||||
self.undo
|
||||
}
|
||||
|
||||
pub(crate) fn list(&self) -> bool {
|
||||
self.list
|
||||
}
|
||||
|
||||
pub(crate) fn stats(&self) -> bool {
|
||||
self.stats
|
||||
}
|
||||
|
||||
pub(crate) fn contacted(&self) -> bool {
|
||||
self.contacted
|
||||
}
|
||||
}
|
||||
|
|
15
src/build.rs
15
src/build.rs
|
@ -5,7 +5,8 @@ fn git_info() {
|
|||
if let Ok(output) = Command::new("git").args(["rev-parse", "HEAD"]).output() {
|
||||
if output.status.success() {
|
||||
let git_hash = String::from_utf8_lossy(&output.stdout);
|
||||
println!("cargo:rustc-env=GIT_HASH={}", git_hash);
|
||||
println!("cargo:rustc-env=GIT_HASH={git_hash}");
|
||||
println!("cargo:rustc-env=GIT_SHORT_HASH={}", &git_hash[..8])
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,15 +16,15 @@ fn git_info() {
|
|||
{
|
||||
if output.status.success() {
|
||||
let git_branch = String::from_utf8_lossy(&output.stdout);
|
||||
println!("cargo:rustc-env=GIT_BRANCH={}", git_branch);
|
||||
println!("cargo:rustc-env=GIT_BRANCH={git_branch}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn version_info() -> Result<(), anyhow::Error> {
|
||||
fn version_info() -> color_eyre::Result<()> {
|
||||
let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml");
|
||||
|
||||
let mut file = File::open(&cargo_toml)?;
|
||||
let mut file = File::open(cargo_toml)?;
|
||||
|
||||
let mut cargo_data = String::new();
|
||||
file.read_to_string(&mut cargo_data)?;
|
||||
|
@ -31,17 +32,17 @@ fn version_info() -> Result<(), anyhow::Error> {
|
|||
let data: toml::Value = toml::from_str(&cargo_data)?;
|
||||
|
||||
if let Some(version) = data["package"]["version"].as_str() {
|
||||
println!("cargo:rustc-env=PKG_VERSION={}", version);
|
||||
println!("cargo:rustc-env=PKG_VERSION={version}");
|
||||
}
|
||||
|
||||
if let Some(name) = data["package"]["name"].as_str() {
|
||||
println!("cargo:rustc-env=PKG_NAME={}", name);
|
||||
println!("cargo:rustc-env=PKG_NAME={name}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() -> Result<(), anyhow::Error> {
|
||||
fn main() -> color_eyre::Result<()> {
|
||||
dotenv::dotenv().ok();
|
||||
|
||||
git_info();
|
||||
|
|
425
src/collector.rs
Normal file
425
src/collector.rs
Normal file
|
@ -0,0 +1,425 @@
|
|||
use metrics::{Key, Metadata, Recorder, SetRecorderError};
|
||||
use metrics_util::{
|
||||
registry::{AtomicStorage, GenerationalStorage, Recency, Registry},
|
||||
MetricKindMask, Summary,
|
||||
};
|
||||
use quanta::Clock;
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
sync::{atomic::Ordering, Arc, RwLock},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
const SECONDS: u64 = 1;
|
||||
const MINUTES: u64 = 60 * SECONDS;
|
||||
const HOURS: u64 = 60 * MINUTES;
|
||||
const DAYS: u64 = 24 * HOURS;
|
||||
|
||||
pub(crate) fn recordable(len: usize) -> u32 {
|
||||
((len as u64) % u64::from(u32::MAX)) as u32
|
||||
}
|
||||
|
||||
type DistributionMap = BTreeMap<Vec<(String, String)>, Summary>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MemoryCollector {
|
||||
inner: Arc<Inner>,
|
||||
}
|
||||
|
||||
struct Inner {
|
||||
descriptions: RwLock<HashMap<String, metrics::SharedString>>,
|
||||
distributions: RwLock<HashMap<String, DistributionMap>>,
|
||||
recency: Recency<Key>,
|
||||
registry: Registry<Key, GenerationalStorage<AtomicStorage>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
||||
struct Counter {
|
||||
labels: BTreeMap<String, String>,
|
||||
value: u64,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Counter {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let labels = self
|
||||
.labels
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{k}: {v}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
write!(f, "{labels} - {}", self.value)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
||||
struct Gauge {
|
||||
labels: BTreeMap<String, String>,
|
||||
value: f64,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Gauge {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let labels = self
|
||||
.labels
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{k}: {v}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
write!(f, "{labels} - {}", self.value)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
||||
struct Histogram {
|
||||
labels: BTreeMap<String, String>,
|
||||
value: Vec<(f64, Option<f64>)>,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Histogram {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let labels = self
|
||||
.labels
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{k}: {v}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
let value = self
|
||||
.value
|
||||
.iter()
|
||||
.map(|(k, v)| {
|
||||
if let Some(v) = v {
|
||||
format!("{k}: {v:.6}")
|
||||
} else {
|
||||
format!("{k}: None,")
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
write!(f, "{labels} - {value}")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct Snapshot {
|
||||
counters: HashMap<String, Vec<Counter>>,
|
||||
gauges: HashMap<String, Vec<Gauge>>,
|
||||
histograms: HashMap<String, Vec<Histogram>>,
|
||||
}
|
||||
|
||||
const PAIRS: [((&str, &str), &str); 2] = [
|
||||
(
|
||||
(
|
||||
"background-jobs.worker.started",
|
||||
"background-jobs.worker.finished",
|
||||
),
|
||||
"background-jobs.worker.running",
|
||||
),
|
||||
(
|
||||
(
|
||||
"background-jobs.job.started",
|
||||
"background-jobs.job.finished",
|
||||
),
|
||||
"background-jobs.job.running",
|
||||
),
|
||||
];
|
||||
|
||||
#[derive(Default)]
|
||||
struct MergeCounter {
|
||||
start: Option<Counter>,
|
||||
finish: Option<Counter>,
|
||||
}
|
||||
|
||||
impl MergeCounter {
|
||||
fn merge(self) -> Option<Counter> {
|
||||
match (self.start, self.finish) {
|
||||
(Some(start), Some(end)) => Some(Counter {
|
||||
labels: start.labels,
|
||||
value: start.value.saturating_sub(end.value),
|
||||
}),
|
||||
(Some(only), None) => Some(only),
|
||||
(None, Some(only)) => Some(Counter {
|
||||
labels: only.labels,
|
||||
value: 0,
|
||||
}),
|
||||
(None, None) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Snapshot {
|
||||
pub(crate) fn present(self) {
|
||||
if !self.counters.is_empty() {
|
||||
println!("Counters");
|
||||
let mut merging = HashMap::new();
|
||||
for (key, counters) in self.counters {
|
||||
if let Some(((start, _), name)) = PAIRS
|
||||
.iter()
|
||||
.find(|((start, finish), _)| *start == key || *finish == key)
|
||||
{
|
||||
let entry = merging.entry(name).or_insert_with(HashMap::new);
|
||||
|
||||
for counter in counters {
|
||||
let merge_counter = entry
|
||||
.entry(counter.labels.clone())
|
||||
.or_insert_with(MergeCounter::default);
|
||||
if key == *start {
|
||||
merge_counter.start = Some(counter);
|
||||
} else {
|
||||
merge_counter.finish = Some(counter);
|
||||
}
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
println!("\t{key}");
|
||||
for counter in counters {
|
||||
println!("\t\t{counter}");
|
||||
}
|
||||
}
|
||||
|
||||
for (key, counters) in merging {
|
||||
println!("\t{key}");
|
||||
|
||||
for (_, counter) in counters {
|
||||
if let Some(counter) = counter.merge() {
|
||||
println!("\t\t{counter}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.gauges.is_empty() {
|
||||
println!("Gauges");
|
||||
for (key, gauges) in self.gauges {
|
||||
println!("\t{key}");
|
||||
|
||||
for gauge in gauges {
|
||||
println!("\t\t{gauge}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.histograms.is_empty() {
|
||||
println!("Histograms");
|
||||
for (key, histograms) in self.histograms {
|
||||
println!("\t{key}");
|
||||
|
||||
for histogram in histograms {
|
||||
println!("\t\t{histogram}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn key_to_parts(key: &Key) -> (String, Vec<(String, String)>) {
|
||||
let labels = key
|
||||
.labels()
|
||||
.map(|label| (label.key().to_string(), label.value().to_string()))
|
||||
.collect();
|
||||
let name = key.name().to_string();
|
||||
(name, labels)
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
fn snapshot_counters(&self) -> HashMap<String, Vec<Counter>> {
|
||||
let mut counters = HashMap::new();
|
||||
|
||||
for (key, counter) in self.registry.get_counter_handles() {
|
||||
let gen = counter.get_generation();
|
||||
if !self.recency.should_store_counter(&key, gen, &self.registry) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let (name, labels) = key_to_parts(&key);
|
||||
let value = counter.get_inner().load(Ordering::Acquire);
|
||||
counters.entry(name).or_insert_with(Vec::new).push(Counter {
|
||||
labels: labels.into_iter().collect(),
|
||||
value,
|
||||
});
|
||||
}
|
||||
|
||||
counters
|
||||
}
|
||||
|
||||
fn snapshot_gauges(&self) -> HashMap<String, Vec<Gauge>> {
|
||||
let mut gauges = HashMap::new();
|
||||
|
||||
for (key, gauge) in self.registry.get_gauge_handles() {
|
||||
let gen = gauge.get_generation();
|
||||
if !self.recency.should_store_gauge(&key, gen, &self.registry) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let (name, labels) = key_to_parts(&key);
|
||||
let value = f64::from_bits(gauge.get_inner().load(Ordering::Acquire));
|
||||
gauges.entry(name).or_insert_with(Vec::new).push(Gauge {
|
||||
labels: labels.into_iter().collect(),
|
||||
value,
|
||||
})
|
||||
}
|
||||
|
||||
gauges
|
||||
}
|
||||
|
||||
fn snapshot_histograms(&self) -> HashMap<String, Vec<Histogram>> {
|
||||
for (key, histogram) in self.registry.get_histogram_handles() {
|
||||
let gen = histogram.get_generation();
|
||||
let (name, labels) = key_to_parts(&key);
|
||||
|
||||
if !self
|
||||
.recency
|
||||
.should_store_histogram(&key, gen, &self.registry)
|
||||
{
|
||||
let mut d = self.distributions.write().unwrap();
|
||||
let delete_by_name = if let Some(by_name) = d.get_mut(&name) {
|
||||
by_name.remove(&labels);
|
||||
by_name.is_empty()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
drop(d);
|
||||
|
||||
if delete_by_name {
|
||||
self.descriptions.write().unwrap().remove(&name);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut d = self.distributions.write().unwrap();
|
||||
let outer_entry = d.entry(name.clone()).or_default();
|
||||
|
||||
let entry = outer_entry
|
||||
.entry(labels)
|
||||
.or_insert_with(Summary::with_defaults);
|
||||
|
||||
histogram.get_inner().clear_with(|samples| {
|
||||
for sample in samples {
|
||||
entry.add(*sample);
|
||||
}
|
||||
});
|
||||
|
||||
let mut total_len = 0;
|
||||
for dist_map in d.values() {
|
||||
total_len += dist_map.len();
|
||||
}
|
||||
|
||||
metrics::gauge!("relay.collector.distributions.size").set(recordable(total_len));
|
||||
}
|
||||
|
||||
let d = self.distributions.read().unwrap().clone();
|
||||
d.into_iter()
|
||||
.map(|(key, value)| {
|
||||
(
|
||||
key,
|
||||
value
|
||||
.into_iter()
|
||||
.map(|(labels, summary)| Histogram {
|
||||
labels: labels.into_iter().collect(),
|
||||
value: [0.001, 0.01, 0.05, 0.1, 0.5, 0.9, 0.99, 1.0]
|
||||
.into_iter()
|
||||
.map(|q| (q, summary.quantile(q)))
|
||||
.collect(),
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn snapshot(&self) -> Snapshot {
|
||||
Snapshot {
|
||||
counters: self.snapshot_counters(),
|
||||
gauges: self.snapshot_gauges(),
|
||||
histograms: self.snapshot_histograms(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MemoryCollector {
|
||||
pub(crate) fn new() -> Self {
|
||||
MemoryCollector {
|
||||
inner: Arc::new(Inner {
|
||||
descriptions: Default::default(),
|
||||
distributions: Default::default(),
|
||||
recency: Recency::new(
|
||||
Clock::new(),
|
||||
MetricKindMask::ALL,
|
||||
Some(Duration::from_secs(5 * DAYS)),
|
||||
),
|
||||
registry: Registry::new(GenerationalStorage::atomic()),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn snapshot(&self) -> Snapshot {
|
||||
self.inner.snapshot()
|
||||
}
|
||||
|
||||
fn add_description_if_missing(
|
||||
&self,
|
||||
key: &metrics::KeyName,
|
||||
description: metrics::SharedString,
|
||||
) {
|
||||
let mut d = self.inner.descriptions.write().unwrap();
|
||||
d.entry(key.as_str().to_owned()).or_insert(description);
|
||||
metrics::gauge!("relay.collector.descriptions.size").set(recordable(d.len()));
|
||||
}
|
||||
|
||||
pub(crate) fn install(&self) -> Result<(), SetRecorderError<Self>> {
|
||||
metrics::set_global_recorder(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Recorder for MemoryCollector {
|
||||
fn describe_counter(
|
||||
&self,
|
||||
key: metrics::KeyName,
|
||||
_: Option<metrics::Unit>,
|
||||
description: metrics::SharedString,
|
||||
) {
|
||||
self.add_description_if_missing(&key, description)
|
||||
}
|
||||
|
||||
fn describe_gauge(
|
||||
&self,
|
||||
key: metrics::KeyName,
|
||||
_: Option<metrics::Unit>,
|
||||
description: metrics::SharedString,
|
||||
) {
|
||||
self.add_description_if_missing(&key, description)
|
||||
}
|
||||
|
||||
fn describe_histogram(
|
||||
&self,
|
||||
key: metrics::KeyName,
|
||||
_: Option<metrics::Unit>,
|
||||
description: metrics::SharedString,
|
||||
) {
|
||||
self.add_description_if_missing(&key, description)
|
||||
}
|
||||
|
||||
fn register_counter(&self, key: &Key, _: &Metadata<'_>) -> metrics::Counter {
|
||||
self.inner
|
||||
.registry
|
||||
.get_or_create_counter(key, |c| c.clone().into())
|
||||
}
|
||||
|
||||
fn register_gauge(&self, key: &Key, _: &Metadata<'_>) -> metrics::Gauge {
|
||||
self.inner
|
||||
.registry
|
||||
.get_or_create_gauge(key, |c| c.clone().into())
|
||||
}
|
||||
|
||||
fn register_histogram(&self, key: &Key, _: &Metadata<'_>) -> metrics::Histogram {
|
||||
self.inner
|
||||
.registry
|
||||
.get_or_create_histogram(key, |c| c.clone().into())
|
||||
}
|
||||
}
|
371
src/config.rs
371
src/config.rs
|
@ -1,20 +1,22 @@
|
|||
use crate::{
|
||||
data::{ActorCache, State},
|
||||
error::Error,
|
||||
middleware::MyVerify,
|
||||
requests::Requests,
|
||||
extractors::{AdminConfig, XApiToken},
|
||||
};
|
||||
use activitystreams::{
|
||||
iri,
|
||||
iri_string::{
|
||||
format::ToDedicatedString,
|
||||
resolve::FixedBaseResolver,
|
||||
types::{IriAbsoluteString, IriFragmentStr, IriRelativeStr, IriString},
|
||||
},
|
||||
};
|
||||
use config::Environment;
|
||||
use http_signature_normalization_actix::prelude::{VerifyDigest, VerifySignature};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::{net::IpAddr, path::PathBuf};
|
||||
use http_signature_normalization_actix::{digest::ring::Sha256, prelude::VerifyDigest};
|
||||
use rustls::sign::CertifiedKey;
|
||||
use std::{
|
||||
net::{IpAddr, SocketAddr},
|
||||
path::PathBuf,
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone, Debug, serde::Deserialize)]
|
||||
|
@ -29,9 +31,24 @@ pub(crate) struct ParsedConfig {
|
|||
publish_blocks: bool,
|
||||
sled_path: PathBuf,
|
||||
source_repo: IriString,
|
||||
repository_commit_base: String,
|
||||
opentelemetry_url: Option<IriString>,
|
||||
telegram_token: Option<String>,
|
||||
telegram_admin_handle: Option<String>,
|
||||
api_token: Option<String>,
|
||||
tls_key: Option<PathBuf>,
|
||||
tls_cert: Option<PathBuf>,
|
||||
footer_blurb: Option<String>,
|
||||
local_domains: Option<String>,
|
||||
local_blurb: Option<String>,
|
||||
prometheus_addr: Option<IpAddr>,
|
||||
prometheus_port: Option<u16>,
|
||||
deliver_concurrency: u64,
|
||||
client_timeout: u64,
|
||||
proxy_url: Option<IriString>,
|
||||
proxy_username: Option<String>,
|
||||
proxy_password: Option<String>,
|
||||
signature_threads: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
|
@ -49,6 +66,34 @@ pub struct Config {
|
|||
opentelemetry_url: Option<IriString>,
|
||||
telegram_token: Option<String>,
|
||||
telegram_admin_handle: Option<String>,
|
||||
api_token: Option<String>,
|
||||
tls: Option<TlsConfig>,
|
||||
footer_blurb: Option<String>,
|
||||
local_domains: Vec<String>,
|
||||
local_blurb: Option<String>,
|
||||
prometheus_config: Option<PrometheusConfig>,
|
||||
deliver_concurrency: u64,
|
||||
client_timeout: u64,
|
||||
proxy_config: Option<ProxyConfig>,
|
||||
signature_threads: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct TlsConfig {
|
||||
key: PathBuf,
|
||||
cert: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct PrometheusConfig {
|
||||
addr: IpAddr,
|
||||
port: u16,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ProxyConfig {
|
||||
url: IriString,
|
||||
auth: Option<(String, String)>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -65,6 +110,19 @@ pub enum UrlKind {
|
|||
Outbox,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum AdminUrlKind {
|
||||
Allow,
|
||||
Disallow,
|
||||
Block,
|
||||
Unblock,
|
||||
Allowed,
|
||||
Blocked,
|
||||
Connected,
|
||||
Stats,
|
||||
LastSeen,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Config {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Config")
|
||||
|
@ -84,6 +142,17 @@ impl std::fmt::Debug for Config {
|
|||
)
|
||||
.field("telegram_token", &"[redacted]")
|
||||
.field("telegram_admin_handle", &self.telegram_admin_handle)
|
||||
.field("api_token", &"[redacted]")
|
||||
.field("tls_key", &"[redacted]")
|
||||
.field("tls_cert", &"[redacted]")
|
||||
.field("footer_blurb", &self.footer_blurb)
|
||||
.field("local_domains", &self.local_domains)
|
||||
.field("local_blurb", &self.local_blurb)
|
||||
.field("prometheus_config", &self.prometheus_config)
|
||||
.field("deliver_concurrency", &self.deliver_concurrency)
|
||||
.field("client_timeout", &self.client_timeout)
|
||||
.field("proxy_config", &self.proxy_config)
|
||||
.field("signature_threads", &self.signature_threads)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
@ -93,24 +162,102 @@ impl Config {
|
|||
let config = config::Config::builder()
|
||||
.set_default("hostname", "localhost:8080")?
|
||||
.set_default("addr", "127.0.0.1")?
|
||||
.set_default::<_, u64>("port", 8080)?
|
||||
.set_default("port", 8080u64)?
|
||||
.set_default("debug", true)?
|
||||
.set_default("restricted_mode", false)?
|
||||
.set_default("validate_signatures", false)?
|
||||
.set_default("https", false)?
|
||||
.set_default("validate_signatures", true)?
|
||||
.set_default("https", true)?
|
||||
.set_default("publish_blocks", false)?
|
||||
.set_default("sled_path", "./sled/db-0-34")?
|
||||
.set_default("source_repo", "https://git.asonix.dog/asonix/relay")?
|
||||
.set_default("repository_commit_base", "/src/commit/")?
|
||||
.set_default("opentelemetry_url", None as Option<&str>)?
|
||||
.set_default("telegram_token", None as Option<&str>)?
|
||||
.set_default("telegram_admin_handle", None as Option<&str>)?
|
||||
.set_default("api_token", None as Option<&str>)?
|
||||
.set_default("tls_key", None as Option<&str>)?
|
||||
.set_default("tls_cert", None as Option<&str>)?
|
||||
.set_default("footer_blurb", None as Option<&str>)?
|
||||
.set_default("local_domains", None as Option<&str>)?
|
||||
.set_default("local_blurb", None as Option<&str>)?
|
||||
.set_default("prometheus_addr", None as Option<&str>)?
|
||||
.set_default("prometheus_port", None as Option<u16>)?
|
||||
.set_default("deliver_concurrency", 8u64)?
|
||||
.set_default("client_timeout", 10u64)?
|
||||
.set_default("proxy_url", None as Option<&str>)?
|
||||
.set_default("proxy_username", None as Option<&str>)?
|
||||
.set_default("proxy_password", None as Option<&str>)?
|
||||
.set_default("signature_threads", None as Option<u64>)?
|
||||
.add_source(Environment::default())
|
||||
.build()?;
|
||||
|
||||
let config: ParsedConfig = config.try_deserialize()?;
|
||||
|
||||
let scheme = if config.https { "https" } else { "http" };
|
||||
let base_uri = iri!(format!("{}://{}", scheme, config.hostname)).into_absolute();
|
||||
let base_uri = iri!(format!("{scheme}://{}", config.hostname)).into_absolute();
|
||||
|
||||
let tls = match (config.tls_key, config.tls_cert) {
|
||||
(Some(key), Some(cert)) => Some(TlsConfig { key, cert }),
|
||||
(Some(_), None) => {
|
||||
tracing::warn!("TLS_KEY is set but TLS_CERT isn't , not building TLS config");
|
||||
None
|
||||
}
|
||||
(None, Some(_)) => {
|
||||
tracing::warn!("TLS_CERT is set but TLS_KEY isn't , not building TLS config");
|
||||
None
|
||||
}
|
||||
(None, None) => None,
|
||||
};
|
||||
|
||||
let local_domains = config
|
||||
.local_domains
|
||||
.iter()
|
||||
.flat_map(|s| s.split(','))
|
||||
.map(|d| d.to_string())
|
||||
.collect();
|
||||
|
||||
let prometheus_config = match (config.prometheus_addr, config.prometheus_port) {
|
||||
(Some(addr), Some(port)) => Some(PrometheusConfig { addr, port }),
|
||||
(Some(_), None) => {
|
||||
tracing::warn!("PROMETHEUS_ADDR is set but PROMETHEUS_PORT is not set, not building Prometheus config");
|
||||
None
|
||||
}
|
||||
(None, Some(_)) => {
|
||||
tracing::warn!("PROMETHEUS_PORT is set but PROMETHEUS_ADDR is not set, not building Prometheus config");
|
||||
None
|
||||
}
|
||||
(None, None) => None,
|
||||
};
|
||||
|
||||
let proxy_config = match (config.proxy_username, config.proxy_password) {
|
||||
(Some(username), Some(password)) => config.proxy_url.map(|url| ProxyConfig {
|
||||
url,
|
||||
auth: Some((username, password)),
|
||||
}),
|
||||
(Some(_), None) => {
|
||||
tracing::warn!(
|
||||
"PROXY_USERNAME is set but PROXY_PASSWORD is not set, not setting Proxy Auth"
|
||||
);
|
||||
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
|
||||
}
|
||||
(None, Some(_)) => {
|
||||
tracing::warn!(
|
||||
"PROXY_PASSWORD is set but PROXY_USERNAME is not set, not setting Proxy Auth"
|
||||
);
|
||||
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
|
||||
}
|
||||
(None, None) => config.proxy_url.map(|url| ProxyConfig { url, auth: None }),
|
||||
};
|
||||
|
||||
let source_url = match Self::git_hash() {
|
||||
Some(hash) => format!(
|
||||
"{}{}{hash}",
|
||||
config.source_repo, config.repository_commit_base
|
||||
)
|
||||
.parse()
|
||||
.expect("constructed source URL is valid"),
|
||||
None => config.source_repo.clone(),
|
||||
};
|
||||
|
||||
Ok(Config {
|
||||
hostname: config.hostname,
|
||||
|
@ -122,13 +269,118 @@ impl Config {
|
|||
publish_blocks: config.publish_blocks,
|
||||
base_uri,
|
||||
sled_path: config.sled_path,
|
||||
source_repo: config.source_repo,
|
||||
source_repo: source_url,
|
||||
opentelemetry_url: config.opentelemetry_url,
|
||||
telegram_token: config.telegram_token,
|
||||
telegram_admin_handle: config.telegram_admin_handle,
|
||||
api_token: config.api_token,
|
||||
tls,
|
||||
footer_blurb: config.footer_blurb,
|
||||
local_domains,
|
||||
local_blurb: config.local_blurb,
|
||||
prometheus_config,
|
||||
deliver_concurrency: config.deliver_concurrency,
|
||||
client_timeout: config.client_timeout,
|
||||
proxy_config,
|
||||
signature_threads: config.signature_threads,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn signature_threads(&self) -> usize {
|
||||
self.signature_threads
|
||||
.unwrap_or_else(|| {
|
||||
std::thread::available_parallelism()
|
||||
.map(usize::from)
|
||||
.map_err(|e| tracing::warn!("Failed to get parallelism, {e}"))
|
||||
.unwrap_or(1)
|
||||
})
|
||||
.max(1)
|
||||
}
|
||||
|
||||
pub(crate) fn client_timeout(&self) -> u64 {
|
||||
self.client_timeout
|
||||
}
|
||||
|
||||
pub(crate) fn deliver_concurrency(&self) -> u64 {
|
||||
self.deliver_concurrency
|
||||
}
|
||||
|
||||
pub(crate) fn prometheus_bind_address(&self) -> Option<SocketAddr> {
|
||||
let config = self.prometheus_config.as_ref()?;
|
||||
|
||||
Some((config.addr, config.port).into())
|
||||
}
|
||||
|
||||
pub(crate) async fn open_keys(&self) -> Result<Option<CertifiedKey>, Error> {
|
||||
let tls = if let Some(tls) = &self.tls {
|
||||
tls
|
||||
} else {
|
||||
tracing::info!("No TLS config present");
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let certs_bytes = tokio::fs::read(&tls.cert).await?;
|
||||
let certs =
|
||||
rustls_pemfile::certs(&mut certs_bytes.as_slice()).collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
if certs.is_empty() {
|
||||
tracing::warn!("No certs read from certificate file");
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let key_bytes = tokio::fs::read(&tls.key).await?;
|
||||
let key = if let Some(key) = rustls_pemfile::private_key(&mut key_bytes.as_slice())? {
|
||||
key
|
||||
} else {
|
||||
tracing::warn!("Failed to read private key");
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let key = rustls::crypto::ring::sign::any_supported_type(&key)?;
|
||||
|
||||
Ok(Some(CertifiedKey::new(certs, key)))
|
||||
}
|
||||
|
||||
pub(crate) fn footer_blurb(&self) -> Option<crate::templates::Html<String>> {
|
||||
if let Some(blurb) = &self.footer_blurb {
|
||||
if !blurb.is_empty() {
|
||||
return Some(crate::templates::Html(
|
||||
ammonia::Builder::new()
|
||||
.add_tag_attributes("a", &["rel"])
|
||||
.add_tag_attributes("area", &["rel"])
|
||||
.add_tag_attributes("link", &["rel"])
|
||||
.link_rel(None)
|
||||
.clean(blurb)
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub(crate) fn local_blurb(&self) -> Option<crate::templates::Html<String>> {
|
||||
if let Some(blurb) = &self.local_blurb {
|
||||
if !blurb.is_empty() {
|
||||
return Some(crate::templates::Html(
|
||||
ammonia::Builder::new()
|
||||
.add_tag_attributes("a", &["rel"])
|
||||
.add_tag_attributes("area", &["rel"])
|
||||
.add_tag_attributes("link", &["rel"])
|
||||
.link_rel(None)
|
||||
.clean(blurb)
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub(crate) fn local_domains(&self) -> &[String] {
|
||||
&self.local_domains
|
||||
}
|
||||
|
||||
pub(crate) fn sled_path(&self) -> &PathBuf {
|
||||
&self.sled_path
|
||||
}
|
||||
|
@ -145,16 +397,21 @@ impl Config {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) fn signature_middleware(
|
||||
&self,
|
||||
requests: Requests,
|
||||
actors: ActorCache,
|
||||
state: State,
|
||||
) -> VerifySignature<MyVerify> {
|
||||
if self.validate_signatures {
|
||||
VerifySignature::new(MyVerify(requests, actors, state), Default::default())
|
||||
pub(crate) fn x_api_token(&self) -> Option<XApiToken> {
|
||||
self.api_token.clone().map(XApiToken::new)
|
||||
}
|
||||
|
||||
pub(crate) fn admin_config(&self) -> Option<actix_web::web::Data<AdminConfig>> {
|
||||
if let Some(api_token) = &self.api_token {
|
||||
match AdminConfig::build(api_token) {
|
||||
Ok(conf) => Some(actix_web::web::Data::new(conf)),
|
||||
Err(e) => {
|
||||
tracing::error!("Error creating admin config: {e}");
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VerifySignature::new(MyVerify(requests, actors, state), Default::default()).optional()
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -188,7 +445,7 @@ impl Config {
|
|||
|
||||
pub(crate) fn software_version() -> String {
|
||||
if let Some(git) = Self::git_version() {
|
||||
return format!("v{}-{}", Self::version(), git);
|
||||
return format!("v{}-{git}", Self::version());
|
||||
}
|
||||
|
||||
format!("v{}", Self::version())
|
||||
|
@ -196,9 +453,9 @@ impl Config {
|
|||
|
||||
fn git_version() -> Option<String> {
|
||||
let branch = Self::git_branch()?;
|
||||
let hash = Self::git_hash()?;
|
||||
let hash = Self::git_short_hash()?;
|
||||
|
||||
Some(format!("{}-{}", branch, hash))
|
||||
Some(format!("{branch}-{hash}"))
|
||||
}
|
||||
|
||||
fn name() -> &'static str {
|
||||
|
@ -217,6 +474,10 @@ impl Config {
|
|||
option_env!("GIT_HASH")
|
||||
}
|
||||
|
||||
fn git_short_hash() -> Option<&'static str> {
|
||||
option_env!("GIT_SHORT_HASH")
|
||||
}
|
||||
|
||||
pub(crate) fn user_agent(&self) -> String {
|
||||
format!(
|
||||
"{} ({}/{}; +{})",
|
||||
|
@ -227,6 +488,12 @@ impl Config {
|
|||
)
|
||||
}
|
||||
|
||||
pub(crate) fn proxy_config(&self) -> Option<(&IriString, Option<(&str, &str)>)> {
|
||||
self.proxy_config.as_ref().map(|ProxyConfig { url, auth }| {
|
||||
(url, auth.as_ref().map(|(u, p)| (u.as_str(), p.as_str())))
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn source_code(&self) -> &IriString {
|
||||
&self.source_repo
|
||||
}
|
||||
|
@ -246,39 +513,71 @@ impl Config {
|
|||
self.do_generate_url(kind).expect("Generated valid IRI")
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip_all, fields(base_uri = tracing::field::debug(&self.base_uri), kind = tracing::field::debug(&kind)))]
|
||||
fn do_generate_url(&self, kind: UrlKind) -> Result<IriString, Error> {
|
||||
let iri = match kind {
|
||||
UrlKind::Activity => FixedBaseResolver::new(self.base_uri.as_ref()).try_resolve(
|
||||
IriRelativeStr::new(&format!("activity/{}", Uuid::new_v4()))?.as_ref(),
|
||||
)?,
|
||||
UrlKind::Activity => FixedBaseResolver::new(self.base_uri.as_ref())
|
||||
.resolve(IriRelativeStr::new(&format!("activity/{}", Uuid::new_v4()))?.as_ref())
|
||||
.try_to_dedicated_string()?,
|
||||
UrlKind::Actor => FixedBaseResolver::new(self.base_uri.as_ref())
|
||||
.try_resolve(IriRelativeStr::new("actor")?.as_ref())?,
|
||||
.resolve(IriRelativeStr::new("actor")?.as_ref())
|
||||
.try_to_dedicated_string()?,
|
||||
UrlKind::Followers => FixedBaseResolver::new(self.base_uri.as_ref())
|
||||
.try_resolve(IriRelativeStr::new("followers")?.as_ref())?,
|
||||
.resolve(IriRelativeStr::new("followers")?.as_ref())
|
||||
.try_to_dedicated_string()?,
|
||||
UrlKind::Following => FixedBaseResolver::new(self.base_uri.as_ref())
|
||||
.try_resolve(IriRelativeStr::new("following")?.as_ref())?,
|
||||
.resolve(IriRelativeStr::new("following")?.as_ref())
|
||||
.try_to_dedicated_string()?,
|
||||
UrlKind::Inbox => FixedBaseResolver::new(self.base_uri.as_ref())
|
||||
.try_resolve(IriRelativeStr::new("inbox")?.as_ref())?,
|
||||
.resolve(IriRelativeStr::new("inbox")?.as_ref())
|
||||
.try_to_dedicated_string()?,
|
||||
UrlKind::Index => self.base_uri.clone().into(),
|
||||
UrlKind::MainKey => {
|
||||
let actor = IriRelativeStr::new("actor")?;
|
||||
let fragment = IriFragmentStr::new("main-key")?;
|
||||
|
||||
let mut resolved =
|
||||
FixedBaseResolver::new(self.base_uri.as_ref()).try_resolve(actor.as_ref())?;
|
||||
let mut resolved = FixedBaseResolver::new(self.base_uri.as_ref())
|
||||
.resolve(actor.as_ref())
|
||||
.try_to_dedicated_string()?;
|
||||
|
||||
resolved.set_fragment(Some(fragment));
|
||||
resolved
|
||||
}
|
||||
UrlKind::Media(uuid) => FixedBaseResolver::new(self.base_uri.as_ref())
|
||||
.try_resolve(IriRelativeStr::new(&format!("media/{}", uuid))?.as_ref())?,
|
||||
.resolve(IriRelativeStr::new(&format!("media/{uuid}"))?.as_ref())
|
||||
.try_to_dedicated_string()?,
|
||||
UrlKind::NodeInfo => FixedBaseResolver::new(self.base_uri.as_ref())
|
||||
.try_resolve(IriRelativeStr::new("nodeinfo/2.0.json")?.as_ref())?,
|
||||
.resolve(IriRelativeStr::new("nodeinfo/2.0.json")?.as_ref())
|
||||
.try_to_dedicated_string()?,
|
||||
UrlKind::Outbox => FixedBaseResolver::new(self.base_uri.as_ref())
|
||||
.try_resolve(IriRelativeStr::new("outbox")?.as_ref())?,
|
||||
.resolve(IriRelativeStr::new("outbox")?.as_ref())
|
||||
.try_to_dedicated_string()?,
|
||||
};
|
||||
|
||||
Ok(iri)
|
||||
}
|
||||
|
||||
pub(crate) fn generate_admin_url(&self, kind: AdminUrlKind) -> IriString {
|
||||
self.do_generate_admin_url(kind)
|
||||
.expect("Generated valid IRI")
|
||||
}
|
||||
|
||||
fn do_generate_admin_url(&self, kind: AdminUrlKind) -> Result<IriString, Error> {
|
||||
let path = match kind {
|
||||
AdminUrlKind::Allow => "api/v1/admin/allow",
|
||||
AdminUrlKind::Disallow => "api/v1/admin/disallow",
|
||||
AdminUrlKind::Block => "api/v1/admin/block",
|
||||
AdminUrlKind::Unblock => "api/v1/admin/unblock",
|
||||
AdminUrlKind::Allowed => "api/v1/admin/allowed",
|
||||
AdminUrlKind::Blocked => "api/v1/admin/blocked",
|
||||
AdminUrlKind::Connected => "api/v1/admin/connected",
|
||||
AdminUrlKind::Stats => "api/v1/admin/stats",
|
||||
AdminUrlKind::LastSeen => "api/v1/admin/last_seen",
|
||||
};
|
||||
|
||||
let iri = FixedBaseResolver::new(self.base_uri.as_ref())
|
||||
.resolve(IriRelativeStr::new(path)?.as_ref())
|
||||
.try_to_dedicated_string()?;
|
||||
|
||||
Ok(iri)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
mod actor;
|
||||
mod last_online;
|
||||
mod media;
|
||||
mod node;
|
||||
mod state;
|
||||
|
||||
pub(crate) use actor::ActorCache;
|
||||
pub(crate) use last_online::LastOnline;
|
||||
pub(crate) use media::MediaCache;
|
||||
pub(crate) use node::{Node, NodeCache};
|
||||
pub(crate) use state::State;
|
|
@ -2,7 +2,7 @@ use crate::{
|
|||
apub::AcceptedActors,
|
||||
db::{Actor, Db},
|
||||
error::{Error, ErrorKind},
|
||||
requests::Requests,
|
||||
requests::{BreakerStrategy, Requests},
|
||||
};
|
||||
use activitystreams::{iri_string::types::IriString, prelude::*};
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
@ -37,7 +37,7 @@ impl ActorCache {
|
|||
ActorCache { db }
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Get Actor", skip_all, fields(id = id.to_string().as_str(), requests))]
|
||||
#[tracing::instrument(level = "debug" name = "Get Actor", skip_all, fields(id = id.to_string().as_str()))]
|
||||
pub(crate) async fn get(
|
||||
&self,
|
||||
id: &IriString,
|
||||
|
@ -54,28 +54,26 @@ impl ActorCache {
|
|||
.map(MaybeCached::Fetched)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Add Connection", skip(self))]
|
||||
#[tracing::instrument(level = "debug", name = "Add Connection", skip(self))]
|
||||
pub(crate) async fn add_connection(&self, actor: Actor) -> Result<(), Error> {
|
||||
let add_connection = self.db.add_connection(actor.id.clone());
|
||||
let save_actor = self.db.save_actor(actor);
|
||||
|
||||
tokio::try_join!(add_connection, save_actor)?;
|
||||
|
||||
Ok(())
|
||||
self.db.add_connection(actor.id.clone()).await?;
|
||||
self.db.save_actor(actor).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Remove Connection", skip(self))]
|
||||
#[tracing::instrument(level = "debug", name = "Remove Connection", skip(self))]
|
||||
pub(crate) async fn remove_connection(&self, actor: &Actor) -> Result<(), Error> {
|
||||
self.db.remove_connection(actor.id.clone()).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Fetch remote actor", skip_all, fields(id = id.to_string().as_str(), requests))]
|
||||
#[tracing::instrument(level = "debug", name = "Fetch remote actor", skip_all, fields(id = id.to_string().as_str()))]
|
||||
pub(crate) async fn get_no_cache(
|
||||
&self,
|
||||
id: &IriString,
|
||||
requests: &Requests,
|
||||
) -> Result<Actor, Error> {
|
||||
let accepted_actor = requests.fetch::<AcceptedActors>(id.as_str()).await?;
|
||||
let accepted_actor = requests
|
||||
.fetch::<AcceptedActors>(id, BreakerStrategy::Require2XX)
|
||||
.await?;
|
||||
|
||||
let input_authority = id.authority_components().ok_or(ErrorKind::MissingDomain)?;
|
||||
let accepted_actor_id = accepted_actor
|
||||
|
@ -101,6 +99,6 @@ impl ActorCache {
|
|||
fn get_inbox(actor: &AcceptedActors) -> Result<&IriString, Error> {
|
||||
Ok(actor
|
||||
.endpoints()?
|
||||
.and_then(|e| e.shared_inbox)
|
||||
.and_then(|e| e.shared_inbox.as_ref())
|
||||
.unwrap_or(actor.inbox()?))
|
||||
}
|
||||
|
|
28
src/data/last_online.rs
Normal file
28
src/data/last_online.rs
Normal file
|
@ -0,0 +1,28 @@
|
|||
use activitystreams::iri_string::types::IriStr;
|
||||
use std::{collections::HashMap, sync::Mutex};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
pub(crate) struct LastOnline {
|
||||
domains: Mutex<HashMap<String, OffsetDateTime>>,
|
||||
}
|
||||
|
||||
impl LastOnline {
|
||||
pub(crate) fn mark_seen(&self, iri: &IriStr) {
|
||||
if let Some(authority) = iri.authority_str() {
|
||||
let mut guard = self.domains.lock().unwrap();
|
||||
guard.insert(authority.to_string(), OffsetDateTime::now_utc());
|
||||
metrics::gauge!("relay.last-online.size",)
|
||||
.set(crate::collector::recordable(guard.len()));
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn take(&self) -> HashMap<String, OffsetDateTime> {
|
||||
std::mem::take(&mut *self.domains.lock().unwrap())
|
||||
}
|
||||
|
||||
pub(crate) fn empty() -> Self {
|
||||
Self {
|
||||
domains: Mutex::new(HashMap::default()),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,14 +1,7 @@
|
|||
use crate::{
|
||||
db::{Db, MediaMeta},
|
||||
error::Error,
|
||||
};
|
||||
use crate::{db::Db, error::Error};
|
||||
use activitystreams::iri_string::types::IriString;
|
||||
use actix_web::web::Bytes;
|
||||
use std::time::{Duration, SystemTime};
|
||||
use uuid::Uuid;
|
||||
|
||||
static MEDIA_DURATION: Duration = Duration::from_secs(60 * 60 * 24 * 2);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MediaCache {
|
||||
db: Db,
|
||||
|
@ -19,42 +12,16 @@ impl MediaCache {
|
|||
MediaCache { db }
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Get media uuid", skip_all, fields(url = url.to_string().as_str()))]
|
||||
#[tracing::instrument(level = "debug", name = "Get media uuid", skip_all, fields(url = url.to_string().as_str()))]
|
||||
pub(crate) async fn get_uuid(&self, url: IriString) -> Result<Option<Uuid>, Error> {
|
||||
self.db.media_id(url).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Get media url", skip(self))]
|
||||
#[tracing::instrument(level = "debug", name = "Get media url", skip(self))]
|
||||
pub(crate) async fn get_url(&self, uuid: Uuid) -> Result<Option<IriString>, Error> {
|
||||
self.db.media_url(uuid).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Is media outdated", skip(self))]
|
||||
pub(crate) async fn is_outdated(&self, uuid: Uuid) -> Result<bool, Error> {
|
||||
if let Some(meta) = self.db.media_meta(uuid).await? {
|
||||
if meta.saved_at + MEDIA_DURATION > SystemTime::now() {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Get media bytes", skip(self))]
|
||||
pub(crate) async fn get_bytes(&self, uuid: Uuid) -> Result<Option<(String, Bytes)>, Error> {
|
||||
if let Some(meta) = self.db.media_meta(uuid).await? {
|
||||
if meta.saved_at + MEDIA_DURATION > SystemTime::now() {
|
||||
return self
|
||||
.db
|
||||
.media_bytes(uuid)
|
||||
.await
|
||||
.map(|opt| opt.map(|bytes| (meta.media_type, bytes)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Store media url", skip_all, fields(url = url.to_string().as_str()))]
|
||||
pub(crate) async fn store_url(&self, url: IriString) -> Result<Uuid, Error> {
|
||||
let uuid = Uuid::new_v4();
|
||||
|
@ -63,23 +30,4 @@ impl MediaCache {
|
|||
|
||||
Ok(uuid)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "store media bytes", skip(self, bytes))]
|
||||
pub(crate) async fn store_bytes(
|
||||
&self,
|
||||
uuid: Uuid,
|
||||
media_type: String,
|
||||
bytes: Bytes,
|
||||
) -> Result<(), Error> {
|
||||
self.db
|
||||
.save_bytes(
|
||||
uuid,
|
||||
MediaMeta {
|
||||
media_type,
|
||||
saved_at: SystemTime::now(),
|
||||
},
|
||||
bytes,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,13 +34,11 @@ impl NodeCache {
|
|||
NodeCache { db }
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Get nodes", skip(self))]
|
||||
#[tracing::instrument(level = "debug", name = "Get nodes", skip(self))]
|
||||
pub(crate) async fn nodes(&self) -> Result<Vec<Node>, Error> {
|
||||
let infos = self.db.connected_info();
|
||||
let instances = self.db.connected_instance();
|
||||
let contacts = self.db.connected_contact();
|
||||
|
||||
let (infos, instances, contacts) = tokio::try_join!(infos, instances, contacts)?;
|
||||
let infos = self.db.connected_info().await?;
|
||||
let instances = self.db.connected_instance().await?;
|
||||
let contacts = self.db.connected_contact().await?;
|
||||
|
||||
let vec = self
|
||||
.db
|
||||
|
@ -59,7 +57,7 @@ impl NodeCache {
|
|||
Ok(vec)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Is NodeInfo Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))]
|
||||
#[tracing::instrument(level = "debug", name = "Is NodeInfo Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))]
|
||||
pub(crate) async fn is_nodeinfo_outdated(&self, actor_id: IriString) -> bool {
|
||||
self.db
|
||||
.info(actor_id)
|
||||
|
@ -68,7 +66,7 @@ impl NodeCache {
|
|||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Is Contact Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))]
|
||||
#[tracing::instrument(level = "debug", name = "Is Contact Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))]
|
||||
pub(crate) async fn is_contact_outdated(&self, actor_id: IriString) -> bool {
|
||||
self.db
|
||||
.contact(actor_id)
|
||||
|
@ -77,7 +75,7 @@ impl NodeCache {
|
|||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Is Instance Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))]
|
||||
#[tracing::instrument(level = "debug", name = "Is Instance Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))]
|
||||
pub(crate) async fn is_instance_outdated(&self, actor_id: IriString) -> bool {
|
||||
self.db
|
||||
.instance(actor_id)
|
||||
|
@ -86,7 +84,7 @@ impl NodeCache {
|
|||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Save node info", skip_all, fields(actor_id = actor_id.to_string().as_str(), software, version, reg))]
|
||||
#[tracing::instrument(level = "debug", name = "Save node info", skip_all, fields(actor_id = actor_id.to_string().as_str(), software, version, reg))]
|
||||
pub(crate) async fn set_info(
|
||||
&self,
|
||||
actor_id: IriString,
|
||||
|
@ -108,6 +106,7 @@ impl NodeCache {
|
|||
}
|
||||
|
||||
#[tracing::instrument(
|
||||
level = "debug",
|
||||
name = "Save instance info",
|
||||
skip_all,
|
||||
fields(
|
||||
|
@ -144,6 +143,7 @@ impl NodeCache {
|
|||
}
|
||||
|
||||
#[tracing::instrument(
|
||||
level = "debug",
|
||||
name = "Save contact info",
|
||||
skip_all,
|
||||
fields(
|
||||
|
@ -182,7 +182,7 @@ impl Node {
|
|||
let authority = url.authority_str().ok_or(ErrorKind::MissingDomain)?;
|
||||
let scheme = url.scheme_str();
|
||||
|
||||
let base = iri!(format!("{}://{}", scheme, authority));
|
||||
let base = iri!(format!("{scheme}://{authority}"));
|
||||
|
||||
Ok(Node {
|
||||
base,
|
||||
|
|
|
@ -1,25 +1,28 @@
|
|||
use crate::{
|
||||
config::{Config, UrlKind},
|
||||
data::NodeCache,
|
||||
db::Db,
|
||||
error::Error,
|
||||
requests::{Breakers, Requests},
|
||||
spawner::Spawner,
|
||||
};
|
||||
use activitystreams::iri_string::types::IriString;
|
||||
use actix_web::web;
|
||||
use lru::LruCache;
|
||||
use rand::thread_rng;
|
||||
use reqwest_middleware::ClientWithMiddleware;
|
||||
use rsa::{RsaPrivateKey, RsaPublicKey};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use super::LastOnline;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct State {
|
||||
pub(crate) requests: Requests,
|
||||
pub(crate) public_key: RsaPublicKey,
|
||||
private_key: RsaPrivateKey,
|
||||
object_cache: Arc<RwLock<LruCache<IriString, IriString>>>,
|
||||
node_cache: NodeCache,
|
||||
pub(crate) node_cache: NodeCache,
|
||||
breakers: Breakers,
|
||||
pub(crate) last_online: Arc<LastOnline>,
|
||||
pub(crate) db: Db,
|
||||
}
|
||||
|
||||
|
@ -34,20 +37,8 @@ impl std::fmt::Debug for State {
|
|||
}
|
||||
|
||||
impl State {
|
||||
pub(crate) fn node_cache(&self) -> NodeCache {
|
||||
self.node_cache.clone()
|
||||
}
|
||||
|
||||
pub(crate) fn requests(&self, config: &Config) -> Requests {
|
||||
Requests::new(
|
||||
config.generate_url(UrlKind::MainKey).to_string(),
|
||||
self.private_key.clone(),
|
||||
config.user_agent(),
|
||||
self.breakers.clone(),
|
||||
)
|
||||
}
|
||||
|
||||
#[tracing::instrument(
|
||||
level = "debug",
|
||||
name = "Get inboxes for other domains",
|
||||
skip_all,
|
||||
fields(
|
||||
|
@ -77,18 +68,29 @@ impl State {
|
|||
.collect())
|
||||
}
|
||||
|
||||
pub(crate) async fn is_cached(&self, object_id: &IriString) -> bool {
|
||||
self.object_cache.read().await.contains(object_id)
|
||||
pub(crate) fn is_cached(&self, object_id: &IriString) -> bool {
|
||||
self.object_cache.read().unwrap().contains(object_id)
|
||||
}
|
||||
|
||||
pub(crate) async fn cache(&self, object_id: IriString, actor_id: IriString) {
|
||||
self.object_cache.write().await.put(object_id, actor_id);
|
||||
pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) {
|
||||
let mut guard = self.object_cache.write().unwrap();
|
||||
guard.put(object_id, actor_id);
|
||||
metrics::gauge!("relay.object-cache.size").set(crate::collector::recordable(guard.len()));
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Building state", skip_all)]
|
||||
pub(crate) async fn build(db: Db) -> Result<Self, Error> {
|
||||
pub(crate) fn is_connected(&self, iri: &IriString) -> bool {
|
||||
self.breakers.should_try(iri)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", name = "Building state", skip_all)]
|
||||
pub(crate) async fn build(
|
||||
db: Db,
|
||||
key_id: String,
|
||||
spawner: Spawner,
|
||||
client: ClientWithMiddleware,
|
||||
) -> Result<Self, Error> {
|
||||
let private_key = if let Ok(Some(key)) = db.private_key().await {
|
||||
tracing::info!("Using existing key");
|
||||
tracing::debug!("Using existing key");
|
||||
key
|
||||
} else {
|
||||
tracing::info!("Generating new keys");
|
||||
|
@ -105,15 +107,28 @@ impl State {
|
|||
|
||||
let public_key = private_key.to_public_key();
|
||||
|
||||
let state = State {
|
||||
public_key,
|
||||
let breakers = Breakers::default();
|
||||
let last_online = Arc::new(LastOnline::empty());
|
||||
|
||||
let requests = Requests::new(
|
||||
key_id,
|
||||
private_key,
|
||||
breakers.clone(),
|
||||
last_online.clone(),
|
||||
spawner,
|
||||
client,
|
||||
);
|
||||
|
||||
let state = State {
|
||||
requests,
|
||||
public_key,
|
||||
object_cache: Arc::new(RwLock::new(LruCache::new(
|
||||
(1024 * 8).try_into().expect("nonzero"),
|
||||
))),
|
||||
node_cache: NodeCache::new(db.clone()),
|
||||
breakers: Breakers::default(),
|
||||
breakers,
|
||||
db,
|
||||
last_online,
|
||||
};
|
||||
|
||||
Ok(state)
|
||||
|
|
361
src/db.rs
361
src/db.rs
|
@ -3,13 +3,20 @@ use crate::{
|
|||
error::{Error, ErrorKind},
|
||||
};
|
||||
use activitystreams::iri_string::types::IriString;
|
||||
use actix_web::web::Bytes;
|
||||
use rsa::{
|
||||
pkcs8::{DecodePrivateKey, EncodePrivateKey},
|
||||
RsaPrivateKey,
|
||||
};
|
||||
use sled::Tree;
|
||||
use std::{collections::HashMap, sync::Arc, time::SystemTime};
|
||||
use sled::{transaction::TransactionError, Batch, Transactional, Tree};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc,
|
||||
},
|
||||
time::SystemTime,
|
||||
};
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
|
@ -18,6 +25,8 @@ pub(crate) struct Db {
|
|||
}
|
||||
|
||||
struct Inner {
|
||||
healthz: Tree,
|
||||
healthz_counter: Arc<AtomicU64>,
|
||||
actor_id_actor: Tree,
|
||||
public_key_id_actor_id: Tree,
|
||||
connected_actor_ids: Tree,
|
||||
|
@ -26,11 +35,10 @@ struct Inner {
|
|||
settings: Tree,
|
||||
media_url_media_id: Tree,
|
||||
media_id_media_url: Tree,
|
||||
media_id_media_bytes: Tree,
|
||||
media_id_media_meta: Tree,
|
||||
actor_id_info: Tree,
|
||||
actor_id_instance: Tree,
|
||||
actor_id_contact: Tree,
|
||||
last_seen: Tree,
|
||||
restricted_mode: bool,
|
||||
}
|
||||
|
||||
|
@ -63,12 +71,6 @@ impl std::fmt::Debug for Actor {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct MediaMeta {
|
||||
pub(crate) media_type: String,
|
||||
pub(crate) saved_at: SystemTime,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
pub struct Info {
|
||||
pub(crate) software: String,
|
||||
|
@ -142,6 +144,14 @@ impl Inner {
|
|||
.map(|s| String::from_utf8_lossy(&s).to_string())
|
||||
}
|
||||
|
||||
fn allowed(&self) -> impl DoubleEndedIterator<Item = String> {
|
||||
self.allowed_domains
|
||||
.iter()
|
||||
.values()
|
||||
.filter_map(|res| res.ok())
|
||||
.map(|s| String::from_utf8_lossy(&s).to_string())
|
||||
}
|
||||
|
||||
fn connected(&self) -> impl DoubleEndedIterator<Item = IriString> {
|
||||
self.connected_actor_ids
|
||||
.iter()
|
||||
|
@ -237,6 +247,8 @@ impl Db {
|
|||
fn build_inner(restricted_mode: bool, db: sled::Db) -> Result<Self, Error> {
|
||||
Ok(Db {
|
||||
inner: Arc::new(Inner {
|
||||
healthz: db.open_tree("healthz")?,
|
||||
healthz_counter: Arc::new(AtomicU64::new(0)),
|
||||
actor_id_actor: db.open_tree("actor-id-actor")?,
|
||||
public_key_id_actor_id: db.open_tree("public-key-id-actor-id")?,
|
||||
connected_actor_ids: db.open_tree("connected-actor-ids")?,
|
||||
|
@ -245,11 +257,10 @@ impl Db {
|
|||
settings: db.open_tree("settings")?,
|
||||
media_url_media_id: db.open_tree("media-url-media-id")?,
|
||||
media_id_media_url: db.open_tree("media-id-media-url")?,
|
||||
media_id_media_bytes: db.open_tree("media-id-media-bytes")?,
|
||||
media_id_media_meta: db.open_tree("media-id-media-meta")?,
|
||||
actor_id_info: db.open_tree("actor-id-info")?,
|
||||
actor_id_instance: db.open_tree("actor-id-instance")?,
|
||||
actor_id_contact: db.open_tree("actor-id-contact")?,
|
||||
last_seen: db.open_tree("last-seen")?,
|
||||
restricted_mode,
|
||||
}),
|
||||
})
|
||||
|
@ -257,7 +268,7 @@ impl Db {
|
|||
|
||||
async fn unblock<T>(
|
||||
&self,
|
||||
f: impl Fn(&Inner) -> Result<T, Error> + Send + 'static,
|
||||
f: impl FnOnce(&Inner) -> Result<T, Error> + Send + 'static,
|
||||
) -> Result<T, Error>
|
||||
where
|
||||
T: Send + 'static,
|
||||
|
@ -269,6 +280,68 @@ impl Db {
|
|||
Ok(t)
|
||||
}
|
||||
|
||||
pub(crate) async fn check_health(&self) -> Result<(), Error> {
|
||||
let next = self.inner.healthz_counter.fetch_add(1, Ordering::Relaxed);
|
||||
self.unblock(move |inner| {
|
||||
let res = inner
|
||||
.healthz
|
||||
.insert("healthz", &next.to_be_bytes()[..])
|
||||
.map_err(Error::from);
|
||||
|
||||
metrics::gauge!("relay.db.healthz.size")
|
||||
.set(crate::collector::recordable(inner.healthz.len()));
|
||||
|
||||
res
|
||||
})
|
||||
.await?;
|
||||
self.inner.healthz.flush_async().await?;
|
||||
self.unblock(move |inner| inner.healthz.get("healthz").map_err(Error::from))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn mark_last_seen(
|
||||
&self,
|
||||
nodes: HashMap<String, OffsetDateTime>,
|
||||
) -> Result<(), Error> {
|
||||
let mut batch = Batch::default();
|
||||
|
||||
for (domain, datetime) in nodes {
|
||||
let datetime_string = serde_json::to_vec(&datetime)?;
|
||||
|
||||
batch.insert(domain.as_bytes(), datetime_string);
|
||||
}
|
||||
|
||||
self.unblock(move |inner| inner.last_seen.apply_batch(batch).map_err(Error::from))
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn last_seen(
|
||||
&self,
|
||||
) -> Result<BTreeMap<String, Option<OffsetDateTime>>, Error> {
|
||||
self.unblock(|inner| {
|
||||
let mut map = BTreeMap::new();
|
||||
|
||||
for iri in inner.connected() {
|
||||
let Some(authority_str) = iri.authority_str() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if let Some(datetime) = inner.last_seen.get(authority_str)? {
|
||||
map.insert(
|
||||
authority_str.to_string(),
|
||||
Some(serde_json::from_slice(&datetime)?),
|
||||
);
|
||||
} else {
|
||||
map.insert(authority_str.to_string(), None);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(map)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn connected_ids(&self) -> Result<Vec<IriString>, Error> {
|
||||
self.unblock(|inner| Ok(inner.connected().collect())).await
|
||||
}
|
||||
|
@ -281,6 +354,9 @@ impl Db {
|
|||
.actor_id_info
|
||||
.insert(actor_id.as_str().as_bytes(), vec)?;
|
||||
|
||||
metrics::gauge!("relay.db.actor-id-info.size")
|
||||
.set(crate::collector::recordable(inner.actor_id_info.len()));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
|
@ -288,12 +364,12 @@ impl Db {
|
|||
|
||||
pub(crate) async fn info(&self, actor_id: IriString) -> Result<Option<Info>, Error> {
|
||||
self.unblock(move |inner| {
|
||||
if let Some(ivec) = inner.actor_id_info.get(actor_id.as_str().as_bytes())? {
|
||||
let info = serde_json::from_slice(&ivec)?;
|
||||
Ok(Some(info))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
inner
|
||||
.actor_id_info
|
||||
.get(actor_id.as_str().as_bytes())?
|
||||
.map(|ivec| serde_json::from_slice(&ivec))
|
||||
.transpose()
|
||||
.map_err(Error::from)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
@ -315,6 +391,9 @@ impl Db {
|
|||
.actor_id_instance
|
||||
.insert(actor_id.as_str().as_bytes(), vec)?;
|
||||
|
||||
metrics::gauge!("relay.db.actor-id-instance.size")
|
||||
.set(crate::collector::recordable(inner.actor_id_instance.len()));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
|
@ -322,12 +401,12 @@ impl Db {
|
|||
|
||||
pub(crate) async fn instance(&self, actor_id: IriString) -> Result<Option<Instance>, Error> {
|
||||
self.unblock(move |inner| {
|
||||
if let Some(ivec) = inner.actor_id_instance.get(actor_id.as_str().as_bytes())? {
|
||||
let instance = serde_json::from_slice(&ivec)?;
|
||||
Ok(Some(instance))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
inner
|
||||
.actor_id_instance
|
||||
.get(actor_id.as_str().as_bytes())?
|
||||
.map(|ivec| serde_json::from_slice(&ivec))
|
||||
.transpose()
|
||||
.map_err(Error::from)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
@ -349,6 +428,9 @@ impl Db {
|
|||
.actor_id_contact
|
||||
.insert(actor_id.as_str().as_bytes(), vec)?;
|
||||
|
||||
metrics::gauge!("relay.db.actor-id-contact.size")
|
||||
.set(crate::collector::recordable(inner.actor_id_contact.len()));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
|
@ -356,12 +438,12 @@ impl Db {
|
|||
|
||||
pub(crate) async fn contact(&self, actor_id: IriString) -> Result<Option<Contact>, Error> {
|
||||
self.unblock(move |inner| {
|
||||
if let Some(ivec) = inner.actor_id_contact.get(actor_id.as_str().as_bytes())? {
|
||||
let contact = serde_json::from_slice(&ivec)?;
|
||||
Ok(Some(contact))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
inner
|
||||
.actor_id_contact
|
||||
.get(actor_id.as_str().as_bytes())?
|
||||
.map(|ivec| serde_json::from_slice(&ivec))
|
||||
.transpose()
|
||||
.map_err(Error::from)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
@ -379,24 +461,11 @@ impl Db {
|
|||
inner
|
||||
.media_url_media_id
|
||||
.insert(url.as_str().as_bytes(), id.as_bytes())?;
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn save_bytes(
|
||||
&self,
|
||||
id: Uuid,
|
||||
meta: MediaMeta,
|
||||
bytes: Bytes,
|
||||
) -> Result<(), Error> {
|
||||
self.unblock(move |inner| {
|
||||
let vec = serde_json::to_vec(&meta)?;
|
||||
|
||||
inner
|
||||
.media_id_media_bytes
|
||||
.insert(id.as_bytes(), bytes.as_ref())?;
|
||||
inner.media_id_media_meta.insert(id.as_bytes(), vec)?;
|
||||
metrics::gauge!("relay.db.media-id-media-url.size")
|
||||
.set(crate::collector::recordable(inner.media_id_media_url.len()));
|
||||
metrics::gauge!("relay.db.media-url-media-id.size")
|
||||
.set(crate::collector::recordable(inner.media_url_media_id.len()));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
|
@ -405,45 +474,20 @@ impl Db {
|
|||
|
||||
pub(crate) async fn media_id(&self, url: IriString) -> Result<Option<Uuid>, Error> {
|
||||
self.unblock(move |inner| {
|
||||
if let Some(ivec) = inner.media_url_media_id.get(url.as_str().as_bytes())? {
|
||||
Ok(uuid_from_ivec(ivec))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
Ok(inner
|
||||
.media_url_media_id
|
||||
.get(url.as_str().as_bytes())?
|
||||
.and_then(uuid_from_ivec))
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn media_url(&self, id: Uuid) -> Result<Option<IriString>, Error> {
|
||||
self.unblock(move |inner| {
|
||||
if let Some(ivec) = inner.media_id_media_url.get(id.as_bytes())? {
|
||||
Ok(url_from_ivec(ivec))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn media_bytes(&self, id: Uuid) -> Result<Option<Bytes>, Error> {
|
||||
self.unblock(move |inner| {
|
||||
if let Some(ivec) = inner.media_id_media_bytes.get(id.as_bytes())? {
|
||||
Ok(Some(Bytes::copy_from_slice(&ivec)))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn media_meta(&self, id: Uuid) -> Result<Option<MediaMeta>, Error> {
|
||||
self.unblock(move |inner| {
|
||||
if let Some(ivec) = inner.media_id_media_meta.get(id.as_bytes())? {
|
||||
let meta = serde_json::from_slice(&ivec)?;
|
||||
Ok(Some(meta))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
Ok(inner
|
||||
.media_id_media_url
|
||||
.get(id.as_bytes())?
|
||||
.and_then(url_from_ivec))
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
@ -452,6 +496,10 @@ impl Db {
|
|||
self.unblock(|inner| Ok(inner.blocks().collect())).await
|
||||
}
|
||||
|
||||
pub(crate) async fn allows(&self) -> Result<Vec<String>, Error> {
|
||||
self.unblock(|inner| Ok(inner.allowed().collect())).await
|
||||
}
|
||||
|
||||
pub(crate) async fn inboxes(&self) -> Result<Vec<IriString>, Error> {
|
||||
self.unblock(|inner| Ok(inner.connected_actors().map(|actor| actor.inbox).collect()))
|
||||
.await
|
||||
|
@ -460,7 +508,7 @@ impl Db {
|
|||
pub(crate) async fn is_connected(&self, base_id: IriString) -> Result<bool, Error> {
|
||||
let scheme = base_id.scheme_str();
|
||||
let authority = base_id.authority_str().ok_or(ErrorKind::MissingDomain)?;
|
||||
let prefix = format!("{}://{}", scheme, authority);
|
||||
let prefix = format!("{scheme}://{authority}");
|
||||
|
||||
self.unblock(move |inner| {
|
||||
let connected = inner
|
||||
|
@ -479,26 +527,22 @@ impl Db {
|
|||
public_key_id: IriString,
|
||||
) -> Result<Option<IriString>, Error> {
|
||||
self.unblock(move |inner| {
|
||||
if let Some(ivec) = inner
|
||||
Ok(inner
|
||||
.public_key_id_actor_id
|
||||
.get(public_key_id.as_str().as_bytes())?
|
||||
{
|
||||
Ok(url_from_ivec(ivec))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
.and_then(url_from_ivec))
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn actor(&self, actor_id: IriString) -> Result<Option<Actor>, Error> {
|
||||
self.unblock(move |inner| {
|
||||
if let Some(ivec) = inner.actor_id_actor.get(actor_id.as_str().as_bytes())? {
|
||||
let actor = serde_json::from_slice(&ivec)?;
|
||||
Ok(Some(actor))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
inner
|
||||
.actor_id_actor
|
||||
.get(actor_id.as_str().as_bytes())?
|
||||
.map(|ivec| serde_json::from_slice(&ivec))
|
||||
.transpose()
|
||||
.map_err(Error::from)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
@ -514,30 +558,46 @@ impl Db {
|
|||
inner
|
||||
.actor_id_actor
|
||||
.insert(actor.id.as_str().as_bytes(), vec)?;
|
||||
|
||||
metrics::gauge!("relay.db.public-key-actor-id.size").set(crate::collector::recordable(
|
||||
inner.public_key_id_actor_id.len(),
|
||||
));
|
||||
|
||||
metrics::gauge!("relay.db.actor-id-actor.size").set(crate::collector::recordable(
|
||||
inner.public_key_id_actor_id.len(),
|
||||
));
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn remove_connection(&self, actor_id: IriString) -> Result<(), Error> {
|
||||
tracing::debug!("Removing Connection: {}", actor_id);
|
||||
tracing::debug!("Removing Connection: {actor_id}");
|
||||
self.unblock(move |inner| {
|
||||
inner
|
||||
.connected_actor_ids
|
||||
.remove(actor_id.as_str().as_bytes())?;
|
||||
|
||||
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
|
||||
inner.connected_actor_ids.len(),
|
||||
));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn add_connection(&self, actor_id: IriString) -> Result<(), Error> {
|
||||
tracing::debug!("Adding Connection: {}", actor_id);
|
||||
tracing::debug!("Adding Connection: {actor_id}");
|
||||
self.unblock(move |inner| {
|
||||
inner
|
||||
.connected_actor_ids
|
||||
.insert(actor_id.as_str().as_bytes(), actor_id.as_str().as_bytes())?;
|
||||
|
||||
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
|
||||
inner.connected_actor_ids.len(),
|
||||
));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
|
@ -545,30 +605,64 @@ impl Db {
|
|||
|
||||
pub(crate) async fn add_blocks(&self, domains: Vec<String>) -> Result<(), Error> {
|
||||
self.unblock(move |inner| {
|
||||
for connected in inner.connected_by_domain(&domains) {
|
||||
inner
|
||||
.connected_actor_ids
|
||||
.remove(connected.as_str().as_bytes())?;
|
||||
}
|
||||
let connected_by_domain = inner.connected_by_domain(&domains).collect::<Vec<_>>();
|
||||
|
||||
for authority in &domains {
|
||||
inner
|
||||
.blocked_domains
|
||||
.insert(domain_key(authority), authority.as_bytes())?;
|
||||
inner.allowed_domains.remove(domain_key(authority))?;
|
||||
}
|
||||
let res = (
|
||||
&inner.connected_actor_ids,
|
||||
&inner.blocked_domains,
|
||||
&inner.allowed_domains,
|
||||
)
|
||||
.transaction(|(connected, blocked, allowed)| {
|
||||
let mut connected_batch = Batch::default();
|
||||
let mut blocked_batch = Batch::default();
|
||||
let mut allowed_batch = Batch::default();
|
||||
|
||||
Ok(())
|
||||
for connected in &connected_by_domain {
|
||||
connected_batch.remove(connected.as_str().as_bytes());
|
||||
}
|
||||
|
||||
for authority in &domains {
|
||||
blocked_batch
|
||||
.insert(domain_key(authority).as_bytes(), authority.as_bytes());
|
||||
allowed_batch.remove(domain_key(authority).as_bytes());
|
||||
}
|
||||
|
||||
connected.apply_batch(&connected_batch)?;
|
||||
blocked.apply_batch(&blocked_batch)?;
|
||||
allowed.apply_batch(&allowed_batch)?;
|
||||
|
||||
Ok(())
|
||||
});
|
||||
|
||||
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
|
||||
inner.connected_actor_ids.len(),
|
||||
));
|
||||
metrics::gauge!("relay.db.blocked-domains.size")
|
||||
.set(crate::collector::recordable(inner.blocked_domains.len()));
|
||||
metrics::gauge!("relay.db.allowed-domains.size")
|
||||
.set(crate::collector::recordable(inner.allowed_domains.len()));
|
||||
|
||||
match res {
|
||||
Ok(()) => Ok(()),
|
||||
Err(TransactionError::Abort(e) | TransactionError::Storage(e)) => Err(e.into()),
|
||||
}
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn remove_blocks(&self, domains: Vec<String>) -> Result<(), Error> {
|
||||
self.unblock(move |inner| {
|
||||
let mut blocked_batch = Batch::default();
|
||||
|
||||
for authority in &domains {
|
||||
inner.blocked_domains.remove(domain_key(authority))?;
|
||||
blocked_batch.remove(domain_key(authority).as_bytes());
|
||||
}
|
||||
|
||||
inner.blocked_domains.apply_batch(blocked_batch)?;
|
||||
|
||||
metrics::gauge!("relay.db.blocked-domains.size")
|
||||
.set(crate::collector::recordable(inner.blocked_domains.len()));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
|
@ -576,12 +670,17 @@ impl Db {
|
|||
|
||||
pub(crate) async fn add_allows(&self, domains: Vec<String>) -> Result<(), Error> {
|
||||
self.unblock(move |inner| {
|
||||
let mut allowed_batch = Batch::default();
|
||||
|
||||
for authority in &domains {
|
||||
inner
|
||||
.allowed_domains
|
||||
.insert(domain_key(authority), authority.as_bytes())?;
|
||||
allowed_batch.insert(domain_key(authority).as_bytes(), authority.as_bytes());
|
||||
}
|
||||
|
||||
inner.allowed_domains.apply_batch(allowed_batch)?;
|
||||
|
||||
metrics::gauge!("relay.db.allowed-domains.size")
|
||||
.set(crate::collector::recordable(inner.allowed_domains.len()));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
|
@ -590,17 +689,32 @@ impl Db {
|
|||
pub(crate) async fn remove_allows(&self, domains: Vec<String>) -> Result<(), Error> {
|
||||
self.unblock(move |inner| {
|
||||
if inner.restricted_mode {
|
||||
for connected in inner.connected_by_domain(&domains) {
|
||||
inner
|
||||
.connected_actor_ids
|
||||
.remove(connected.as_str().as_bytes())?;
|
||||
let connected_by_domain = inner.connected_by_domain(&domains).collect::<Vec<_>>();
|
||||
|
||||
let mut connected_batch = Batch::default();
|
||||
|
||||
for connected in &connected_by_domain {
|
||||
connected_batch.remove(connected.as_str().as_bytes());
|
||||
}
|
||||
|
||||
inner.connected_actor_ids.apply_batch(connected_batch)?;
|
||||
|
||||
metrics::gauge!("relay.db.connected-actor-ids.size").set(
|
||||
crate::collector::recordable(inner.connected_actor_ids.len()),
|
||||
);
|
||||
}
|
||||
|
||||
let mut allowed_batch = Batch::default();
|
||||
|
||||
for authority in &domains {
|
||||
inner.allowed_domains.remove(domain_key(authority))?;
|
||||
allowed_batch.remove(domain_key(authority).as_bytes());
|
||||
}
|
||||
|
||||
inner.allowed_domains.apply_batch(allowed_batch)?;
|
||||
|
||||
metrics::gauge!("relay.db.allowed-domains.size")
|
||||
.set(crate::collector::recordable(inner.allowed_domains.len()));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
|
@ -641,6 +755,10 @@ impl Db {
|
|||
inner
|
||||
.settings
|
||||
.insert("private-key".as_bytes(), pem_pkcs8.as_bytes())?;
|
||||
|
||||
metrics::gauge!("relay.db.settings.size")
|
||||
.set(crate::collector::recordable(inner.settings.len()));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
|
@ -726,6 +844,11 @@ mod tests {
|
|||
{
|
||||
let db =
|
||||
Db::build_inner(true, sled::Config::new().temporary(true).open().unwrap()).unwrap();
|
||||
actix_rt::System::new().block_on((f)(db));
|
||||
|
||||
tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
.block_on((f)(db));
|
||||
}
|
||||
}
|
||||
|
|
169
src/error.rs
169
src/error.rs
|
@ -1,36 +1,85 @@
|
|||
use activitystreams::checked::CheckError;
|
||||
use actix_rt::task::JoinError;
|
||||
use actix_web::{
|
||||
error::{BlockingError, ResponseError},
|
||||
http::StatusCode,
|
||||
HttpResponse,
|
||||
};
|
||||
use http_signature_normalization_actix::PrepareSignError;
|
||||
use std::{convert::Infallible, fmt::Debug, io};
|
||||
use tracing::error;
|
||||
use tracing_error::SpanTrace;
|
||||
use background_jobs::BoxError;
|
||||
use color_eyre::eyre::Error as Report;
|
||||
use http_signature_normalization_reqwest::SignError;
|
||||
use std::{convert::Infallible, io, sync::Arc};
|
||||
use tokio::task::JoinError;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ArcKind {
|
||||
kind: Arc<ErrorKind>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for ArcKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.kind.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ArcKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.kind.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for ArcKind {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
self.kind.source()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct Error {
|
||||
context: SpanTrace,
|
||||
kind: ErrorKind,
|
||||
kind: ArcKind,
|
||||
display: Box<str>,
|
||||
debug: Box<str>,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
fn kind(&self) -> &ErrorKind {
|
||||
&self.kind.kind
|
||||
}
|
||||
|
||||
pub(crate) fn is_breaker(&self) -> bool {
|
||||
matches!(self.kind(), ErrorKind::Breaker)
|
||||
}
|
||||
|
||||
pub(crate) fn is_not_found(&self) -> bool {
|
||||
matches!(self.kind(), ErrorKind::Status(_, StatusCode::NOT_FOUND))
|
||||
}
|
||||
|
||||
pub(crate) fn is_bad_request(&self) -> bool {
|
||||
matches!(self.kind(), ErrorKind::Status(_, StatusCode::BAD_REQUEST))
|
||||
}
|
||||
|
||||
pub(crate) fn is_gone(&self) -> bool {
|
||||
matches!(self.kind(), ErrorKind::Status(_, StatusCode::GONE))
|
||||
}
|
||||
|
||||
pub(crate) fn is_malformed_json(&self) -> bool {
|
||||
matches!(self.kind(), ErrorKind::Json(_))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(f, "{:?}", self.kind)
|
||||
f.write_str(&self.debug)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(f, "{}", self.kind)?;
|
||||
std::fmt::Display::fmt(&self.context, f)
|
||||
f.write_str(&self.display)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Error {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
self.kind.source()
|
||||
self.kind().source()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -39,53 +88,82 @@ where
|
|||
ErrorKind: From<T>,
|
||||
{
|
||||
fn from(error: T) -> Self {
|
||||
let kind = ArcKind {
|
||||
kind: Arc::new(ErrorKind::from(error)),
|
||||
};
|
||||
let report = Report::new(kind.clone());
|
||||
let display = format!("{report}");
|
||||
let debug = format!("{report:?}");
|
||||
|
||||
Error {
|
||||
context: SpanTrace::capture(),
|
||||
kind: error.into(),
|
||||
kind,
|
||||
display: Box::from(display),
|
||||
debug: Box::from(debug),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum ErrorKind {
|
||||
#[error("Error queueing job, {0}")]
|
||||
Queue(anyhow::Error),
|
||||
#[error("Error in extractor")]
|
||||
Extractor(#[from] crate::extractors::ErrorKind),
|
||||
|
||||
#[error("Error in configuration, {0}")]
|
||||
#[error("Error queueing job")]
|
||||
Queue(#[from] BoxError),
|
||||
|
||||
#[error("Error in configuration")]
|
||||
Config(#[from] config::ConfigError),
|
||||
|
||||
#[error("Couldn't parse key, {0}")]
|
||||
#[error("Couldn't parse key")]
|
||||
Pkcs8(#[from] rsa::pkcs8::Error),
|
||||
|
||||
#[error("Couldn't encode public key, {0}")]
|
||||
#[error("Couldn't encode public key")]
|
||||
Spki(#[from] rsa::pkcs8::spki::Error),
|
||||
|
||||
#[error("Couldn't parse IRI, {0}")]
|
||||
#[error("Couldn't sign request")]
|
||||
SignRequest,
|
||||
|
||||
#[error("Response body from server exceeded limits")]
|
||||
BodyTooLarge,
|
||||
|
||||
#[error("Couldn't make request")]
|
||||
Reqwest(#[from] reqwest::Error),
|
||||
|
||||
#[error("Couldn't make request")]
|
||||
ReqwestMiddleware(#[from] reqwest_middleware::Error),
|
||||
|
||||
#[error("Couldn't parse IRI")]
|
||||
ParseIri(#[from] activitystreams::iri_string::validate::Error),
|
||||
|
||||
#[error("Couldn't normalize IRI, {0}")]
|
||||
NormalizeIri(
|
||||
#[from]
|
||||
activitystreams::iri_string::task::Error<activitystreams::iri_string::normalize::Error>,
|
||||
),
|
||||
#[error("Couldn't normalize IRI")]
|
||||
NormalizeIri(#[from] std::collections::TryReserveError),
|
||||
|
||||
#[error("Couldn't perform IO, {0}")]
|
||||
#[error("Couldn't perform IO")]
|
||||
Io(#[from] io::Error),
|
||||
|
||||
#[error("Couldn't sign string, {0}")]
|
||||
Rsa(rsa::errors::Error),
|
||||
|
||||
#[error("Couldn't use db, {0}")]
|
||||
#[error("Couldn't use db")]
|
||||
Sled(#[from] sled::Error),
|
||||
|
||||
#[error("Couldn't do the json thing, {0}")]
|
||||
#[error("Couldn't do the json thing")]
|
||||
Json(#[from] serde_json::Error),
|
||||
|
||||
#[error("Couldn't build signing string, {0}")]
|
||||
PrepareSign(#[from] PrepareSignError),
|
||||
#[error("Couldn't sign request")]
|
||||
Sign(#[from] SignError),
|
||||
|
||||
#[error("Couldn't sign digest")]
|
||||
Signature(#[from] signature::Error),
|
||||
Signature(#[from] rsa::signature::Error),
|
||||
|
||||
#[error("Couldn't prepare TLS private key")]
|
||||
PrepareKey(#[from] rustls::Error),
|
||||
|
||||
#[error("Couldn't verify signature")]
|
||||
VerifySignature,
|
||||
|
||||
#[error("Failed to encode key der")]
|
||||
DerEncode,
|
||||
|
||||
#[error("Couldn't parse the signature header")]
|
||||
HeaderValidation(#[from] actix_web::http::header::InvalidHeaderValue),
|
||||
|
@ -106,20 +184,17 @@ pub(crate) enum ErrorKind {
|
|||
BadActor(String, String),
|
||||
|
||||
#[error("Signature verification is required, but no signature was given")]
|
||||
NoSignature(String),
|
||||
NoSignature(Option<String>),
|
||||
|
||||
#[error("Wrong ActivityPub kind, {0}")]
|
||||
Kind(String),
|
||||
|
||||
#[error("Too many CPUs, {0}")]
|
||||
#[error("Too many CPUs")]
|
||||
CpuCount(#[from] std::num::TryFromIntError),
|
||||
|
||||
#[error("{0}")]
|
||||
#[error("Host mismatch")]
|
||||
HostMismatch(#[from] CheckError),
|
||||
|
||||
#[error("Invalid or missing content type")]
|
||||
ContentType,
|
||||
|
||||
#[error("Couldn't flush buffer")]
|
||||
FlushBuffer,
|
||||
|
||||
|
@ -164,11 +239,14 @@ pub(crate) enum ErrorKind {
|
|||
|
||||
#[error("Failed to extract fields from {0}")]
|
||||
Extract(&'static str),
|
||||
|
||||
#[error("No API Token supplied")]
|
||||
MissingApiToken,
|
||||
}
|
||||
|
||||
impl ResponseError for Error {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self.kind {
|
||||
match self.kind() {
|
||||
ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => {
|
||||
StatusCode::FORBIDDEN
|
||||
}
|
||||
|
@ -177,7 +255,8 @@ impl ResponseError for Error {
|
|||
ErrorKind::Kind(_)
|
||||
| ErrorKind::MissingKind
|
||||
| ErrorKind::MissingId
|
||||
| ErrorKind::ObjectCount => StatusCode::BAD_REQUEST,
|
||||
| ErrorKind::ObjectCount
|
||||
| ErrorKind::NoSignature(_) => StatusCode::BAD_REQUEST,
|
||||
_ => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
}
|
||||
}
|
||||
|
@ -187,7 +266,7 @@ impl ResponseError for Error {
|
|||
.insert_header(("Content-Type", "application/activity+json"))
|
||||
.body(
|
||||
serde_json::to_string(&serde_json::json!({
|
||||
"error": self.kind.to_string(),
|
||||
"error": self.kind().to_string(),
|
||||
}))
|
||||
.unwrap_or_else(|_| "{}".to_string()),
|
||||
)
|
||||
|
@ -217,3 +296,15 @@ impl From<rsa::errors::Error> for ErrorKind {
|
|||
ErrorKind::Rsa(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<http_signature_normalization_actix::Canceled> for ErrorKind {
|
||||
fn from(_: http_signature_normalization_actix::Canceled) -> Self {
|
||||
Self::Canceled
|
||||
}
|
||||
}
|
||||
|
||||
impl From<http_signature_normalization_reqwest::Canceled> for ErrorKind {
|
||||
fn from(_: http_signature_normalization_reqwest::Canceled) -> Self {
|
||||
Self::Canceled
|
||||
}
|
||||
}
|
||||
|
|
202
src/extractors.rs
Normal file
202
src/extractors.rs
Normal file
|
@ -0,0 +1,202 @@
|
|||
use actix_web::{
|
||||
dev::Payload,
|
||||
error::ParseError,
|
||||
http::header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue},
|
||||
web::Data,
|
||||
FromRequest, HttpMessage, HttpRequest,
|
||||
};
|
||||
use bcrypt::{BcryptError, DEFAULT_COST};
|
||||
use http_signature_normalization_actix::{prelude::InvalidHeaderValue, Canceled, Spawn};
|
||||
use std::{convert::Infallible, str::FromStr, time::Instant};
|
||||
|
||||
use crate::{db::Db, error::Error, future::LocalBoxFuture, spawner::Spawner};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct AdminConfig {
|
||||
hashed_api_token: String,
|
||||
}
|
||||
|
||||
impl AdminConfig {
|
||||
pub(crate) fn build(api_token: &str) -> Result<Self, Error> {
|
||||
Ok(AdminConfig {
|
||||
hashed_api_token: bcrypt::hash(api_token, DEFAULT_COST).map_err(Error::bcrypt_hash)?,
|
||||
})
|
||||
}
|
||||
|
||||
fn verify(&self, token: XApiToken) -> Result<bool, Error> {
|
||||
bcrypt::verify(token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct Admin {
|
||||
db: Data<Db>,
|
||||
}
|
||||
|
||||
type PrepareTuple = (Data<Db>, Data<AdminConfig>, Data<Spawner>, XApiToken);
|
||||
|
||||
impl Admin {
|
||||
fn prepare_verify(req: &HttpRequest) -> Result<PrepareTuple, Error> {
|
||||
let hashed_api_token = req
|
||||
.app_data::<Data<AdminConfig>>()
|
||||
.ok_or_else(Error::missing_config)?
|
||||
.clone();
|
||||
|
||||
let x_api_token = XApiToken::parse(req).map_err(Error::parse_header)?;
|
||||
|
||||
let db = req
|
||||
.app_data::<Data<Db>>()
|
||||
.ok_or_else(Error::missing_db)?
|
||||
.clone();
|
||||
|
||||
let spawner = req
|
||||
.app_data::<Data<Spawner>>()
|
||||
.ok_or_else(Error::missing_spawner)?
|
||||
.clone();
|
||||
|
||||
Ok((db, hashed_api_token, spawner, x_api_token))
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn verify(
|
||||
hashed_api_token: Data<AdminConfig>,
|
||||
spawner: Data<Spawner>,
|
||||
x_api_token: XApiToken,
|
||||
) -> Result<(), Error> {
|
||||
let span = tracing::Span::current();
|
||||
if spawner
|
||||
.spawn_blocking(move || span.in_scope(|| hashed_api_token.verify(x_api_token)))
|
||||
.await
|
||||
.map_err(Error::canceled)??
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err(Error::invalid())
|
||||
}
|
||||
|
||||
pub(crate) fn db_ref(&self) -> &Db {
|
||||
&self.db
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
fn invalid() -> Self {
|
||||
Error::from(ErrorKind::Invalid)
|
||||
}
|
||||
|
||||
fn missing_config() -> Self {
|
||||
Error::from(ErrorKind::MissingConfig)
|
||||
}
|
||||
|
||||
fn missing_db() -> Self {
|
||||
Error::from(ErrorKind::MissingDb)
|
||||
}
|
||||
|
||||
fn missing_spawner() -> Self {
|
||||
Error::from(ErrorKind::MissingSpawner)
|
||||
}
|
||||
|
||||
fn bcrypt_verify(e: BcryptError) -> Self {
|
||||
Error::from(ErrorKind::BCryptVerify(e))
|
||||
}
|
||||
|
||||
fn bcrypt_hash(e: BcryptError) -> Self {
|
||||
Error::from(ErrorKind::BCryptHash(e))
|
||||
}
|
||||
|
||||
fn parse_header(e: ParseError) -> Self {
|
||||
Error::from(ErrorKind::ParseHeader(e))
|
||||
}
|
||||
|
||||
fn canceled(_: Canceled) -> Self {
|
||||
Error::from(ErrorKind::Canceled)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum ErrorKind {
|
||||
#[error("Invalid API Token")]
|
||||
Invalid,
|
||||
|
||||
#[error("Missing Config")]
|
||||
MissingConfig,
|
||||
|
||||
#[error("Missing Db")]
|
||||
MissingDb,
|
||||
|
||||
#[error("Missing Spawner")]
|
||||
MissingSpawner,
|
||||
|
||||
#[error("Panic in verify")]
|
||||
Canceled,
|
||||
|
||||
#[error("Verifying")]
|
||||
BCryptVerify(#[source] BcryptError),
|
||||
|
||||
#[error("Hashing")]
|
||||
BCryptHash(#[source] BcryptError),
|
||||
|
||||
#[error("Parse Header")]
|
||||
ParseHeader(#[source] ParseError),
|
||||
}
|
||||
|
||||
impl FromRequest for Admin {
|
||||
type Error = Error;
|
||||
type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>;
|
||||
|
||||
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
|
||||
let now = Instant::now();
|
||||
let res = Self::prepare_verify(req);
|
||||
Box::pin(async move {
|
||||
let (db, c, s, t) = res?;
|
||||
Self::verify(c, s, t).await?;
|
||||
metrics::histogram!("relay.admin.verify")
|
||||
.record(now.elapsed().as_micros() as f64 / 1_000_000_f64);
|
||||
Ok(Admin { db })
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct XApiToken(String);
|
||||
|
||||
impl XApiToken {
|
||||
pub(crate) fn new(token: String) -> Self {
|
||||
Self(token)
|
||||
}
|
||||
|
||||
pub(crate) const fn http1_name() -> reqwest::header::HeaderName {
|
||||
reqwest::header::HeaderName::from_static("x-api-token")
|
||||
}
|
||||
}
|
||||
|
||||
impl Header for XApiToken {
|
||||
fn name() -> HeaderName {
|
||||
HeaderName::from_static("x-api-token")
|
||||
}
|
||||
|
||||
fn parse<M: HttpMessage>(msg: &M) -> Result<Self, ParseError> {
|
||||
from_one_raw_str(msg.headers().get(Self::name()))
|
||||
}
|
||||
}
|
||||
|
||||
impl TryIntoHeaderValue for XApiToken {
|
||||
type Error = InvalidHeaderValue;
|
||||
|
||||
fn try_into_value(self) -> Result<HeaderValue, Self::Error> {
|
||||
HeaderValue::from_str(&self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for XApiToken {
|
||||
type Err = Infallible;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(XApiToken(s.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for XApiToken {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
4
src/future.rs
Normal file
4
src/future.rs
Normal file
|
@ -0,0 +1,4 @@
|
|||
use std::{future::Future, pin::Pin};
|
||||
|
||||
pub(crate) type LocalBoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + 'a>>;
|
||||
pub(crate) type BoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;
|
18
src/http1.rs
Normal file
18
src/http1.rs
Normal file
|
@ -0,0 +1,18 @@
|
|||
pub(crate) fn name_to_http02(
|
||||
name: &reqwest::header::HeaderName,
|
||||
) -> actix_web::http::header::HeaderName {
|
||||
actix_web::http::header::HeaderName::from_bytes(name.as_ref())
|
||||
.expect("headername conversions always work")
|
||||
}
|
||||
|
||||
pub(crate) fn value_to_http02(
|
||||
value: &reqwest::header::HeaderValue,
|
||||
) -> actix_web::http::header::HeaderValue {
|
||||
actix_web::http::header::HeaderValue::from_bytes(value.as_bytes())
|
||||
.expect("headervalue conversions always work")
|
||||
}
|
||||
|
||||
pub(crate) fn status_to_http02(status: reqwest::StatusCode) -> actix_web::http::StatusCode {
|
||||
actix_web::http::StatusCode::from_u16(status.as_u16())
|
||||
.expect("statuscode conversions always work")
|
||||
}
|
201
src/jobs.rs
Normal file
201
src/jobs.rs
Normal file
|
@ -0,0 +1,201 @@
|
|||
pub mod apub;
|
||||
mod contact;
|
||||
mod deliver;
|
||||
mod deliver_many;
|
||||
mod instance;
|
||||
mod nodeinfo;
|
||||
mod process_listeners;
|
||||
mod record_last_online;
|
||||
|
||||
pub(crate) use self::{
|
||||
contact::QueryContact, deliver::Deliver, deliver_many::DeliverMany, instance::QueryInstance,
|
||||
nodeinfo::QueryNodeinfo,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config::Config,
|
||||
data::{ActorCache, MediaCache, State},
|
||||
error::{Error, ErrorKind},
|
||||
jobs::{process_listeners::Listeners, record_last_online::RecordLastOnline},
|
||||
};
|
||||
use background_jobs::{
|
||||
memory_storage::{Storage, TokioTimer},
|
||||
metrics::MetricsStorage,
|
||||
tokio::{QueueHandle, WorkerConfig},
|
||||
Job,
|
||||
};
|
||||
use std::time::Duration;
|
||||
|
||||
fn debug_object(activity: &serde_json::Value) -> &serde_json::Value {
|
||||
let mut object = &activity["object"]["type"];
|
||||
|
||||
if object.is_null() {
|
||||
object = &activity["object"]["id"];
|
||||
}
|
||||
|
||||
if object.is_null() {
|
||||
object = &activity["object"];
|
||||
}
|
||||
|
||||
object
|
||||
}
|
||||
|
||||
pub(crate) fn build_storage() -> MetricsStorage<Storage<TokioTimer>> {
|
||||
MetricsStorage::wrap(Storage::new(TokioTimer))
|
||||
}
|
||||
|
||||
pub(crate) fn create_workers(
|
||||
storage: MetricsStorage<Storage<TokioTimer>>,
|
||||
state: State,
|
||||
actors: ActorCache,
|
||||
media: MediaCache,
|
||||
config: Config,
|
||||
) -> std::io::Result<JobServer> {
|
||||
let deliver_concurrency = config.deliver_concurrency();
|
||||
|
||||
let queue_handle = WorkerConfig::new(storage, move |queue_handle| {
|
||||
JobState::new(
|
||||
state.clone(),
|
||||
actors.clone(),
|
||||
JobServer::new(queue_handle),
|
||||
media.clone(),
|
||||
config.clone(),
|
||||
)
|
||||
})
|
||||
.register::<Deliver>()
|
||||
.register::<DeliverMany>()
|
||||
.register::<QueryNodeinfo>()
|
||||
.register::<QueryInstance>()
|
||||
.register::<Listeners>()
|
||||
.register::<QueryContact>()
|
||||
.register::<RecordLastOnline>()
|
||||
.register::<apub::Announce>()
|
||||
.register::<apub::Follow>()
|
||||
.register::<apub::Forward>()
|
||||
.register::<apub::Reject>()
|
||||
.register::<apub::Undo>()
|
||||
.set_worker_count("maintenance", 2)
|
||||
.set_worker_count("apub", 2)
|
||||
.set_worker_count("deliver", deliver_concurrency)
|
||||
.start()?;
|
||||
|
||||
queue_handle.every(Duration::from_secs(60 * 5), Listeners)?;
|
||||
queue_handle.every(Duration::from_secs(60 * 10), RecordLastOnline)?;
|
||||
|
||||
Ok(JobServer::new(queue_handle))
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct JobState {
|
||||
state: State,
|
||||
actors: ActorCache,
|
||||
config: Config,
|
||||
media: MediaCache,
|
||||
job_server: JobServer,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct JobServer {
|
||||
remote: QueueHandle,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for JobServer {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("JobServer")
|
||||
.field("queue_handle", &"QueueHandle")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl JobState {
|
||||
fn new(
|
||||
state: State,
|
||||
actors: ActorCache,
|
||||
job_server: JobServer,
|
||||
media: MediaCache,
|
||||
config: Config,
|
||||
) -> Self {
|
||||
JobState {
|
||||
state,
|
||||
actors,
|
||||
config,
|
||||
media,
|
||||
job_server,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl JobServer {
|
||||
fn new(remote_handle: QueueHandle) -> Self {
|
||||
JobServer {
|
||||
remote: remote_handle,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn queue<J>(&self, job: J) -> Result<(), Error>
|
||||
where
|
||||
J: Job,
|
||||
{
|
||||
self.remote
|
||||
.queue(job)
|
||||
.await
|
||||
.map_err(ErrorKind::Queue)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
struct Boolish {
|
||||
inner: bool,
|
||||
}
|
||||
|
||||
impl std::ops::Deref for Boolish {
|
||||
type Target = bool;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> serde::Deserialize<'de> for Boolish {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
#[derive(serde::Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum BoolThing {
|
||||
Bool(bool),
|
||||
String(String),
|
||||
}
|
||||
|
||||
let thing: BoolThing = serde::Deserialize::deserialize(deserializer)?;
|
||||
|
||||
match thing {
|
||||
BoolThing::Bool(inner) => Ok(Boolish { inner }),
|
||||
BoolThing::String(s) if s.to_lowercase() == "false" => Ok(Boolish { inner: false }),
|
||||
BoolThing::String(_) => Ok(Boolish { inner: true }),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Boolish;
|
||||
|
||||
#[test]
|
||||
fn boolish_works() {
|
||||
const CASES: &[(&str, bool)] = &[
|
||||
("false", false),
|
||||
("\"false\"", false),
|
||||
("\"FALSE\"", false),
|
||||
("true", true),
|
||||
("\"true\"", true),
|
||||
("\"anything else\"", true),
|
||||
];
|
||||
|
||||
for (case, output) in CASES {
|
||||
let b: Boolish = serde_json::from_str(case).unwrap();
|
||||
assert_eq!(*b, *output);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -36,13 +36,13 @@ async fn get_inboxes(
|
|||
state.inboxes_without(&actor.inbox, &authority).await
|
||||
}
|
||||
|
||||
fn prepare_activity<T, U, V, Kind>(
|
||||
fn prepare_activity<T, U, V>(
|
||||
mut t: T,
|
||||
id: impl TryInto<IriString, Error = U>,
|
||||
to: impl TryInto<IriString, Error = V>,
|
||||
) -> Result<T, Error>
|
||||
where
|
||||
T: ObjectExt<Kind> + BaseExt<Kind>,
|
||||
T: ObjectExt + BaseExt,
|
||||
Error: From<U> + From<V>,
|
||||
{
|
||||
t.set_id(id.try_into()?)
|
|
@ -2,14 +2,14 @@ use crate::{
|
|||
config::{Config, UrlKind},
|
||||
db::Actor,
|
||||
error::Error,
|
||||
future::BoxFuture,
|
||||
jobs::{
|
||||
apub::{get_inboxes, prepare_activity},
|
||||
DeliverMany, JobState,
|
||||
},
|
||||
};
|
||||
use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString};
|
||||
use background_jobs::ActixJob;
|
||||
use std::{future::Future, pin::Pin};
|
||||
use background_jobs::Job;
|
||||
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct Announce {
|
||||
|
@ -21,7 +21,7 @@ impl std::fmt::Debug for Announce {
|
|||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Announce")
|
||||
.field("object_id", &self.object_id.to_string())
|
||||
.field("actor", &self.actor)
|
||||
.field("actor_id", &self.actor.id)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ impl Announce {
|
|||
.queue(DeliverMany::new(inboxes, announce)?)
|
||||
.await?;
|
||||
|
||||
state.state.cache(self.object_id, activity_id).await;
|
||||
state.state.cache(self.object_id, activity_id);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -62,13 +62,15 @@ fn generate_announce(
|
|||
)
|
||||
}
|
||||
|
||||
impl ActixJob for Announce {
|
||||
impl Job for Announce {
|
||||
type State = JobState;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<(), Self::Error>>;
|
||||
|
||||
const NAME: &'static str = "relay::jobs::apub::Announce";
|
||||
const QUEUE: &'static str = "apub";
|
||||
|
||||
fn run(self, state: Self::State) -> Self::Future {
|
||||
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
|
||||
Box::pin(self.perform(state))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ use crate::{
|
|||
config::{Config, UrlKind},
|
||||
db::Actor,
|
||||
error::{Error, ErrorKind},
|
||||
future::BoxFuture,
|
||||
jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo},
|
||||
};
|
||||
use activitystreams::{
|
||||
|
@ -10,15 +11,23 @@ use activitystreams::{
|
|||
iri_string::types::IriString,
|
||||
prelude::*,
|
||||
};
|
||||
use background_jobs::ActixJob;
|
||||
use std::{future::Future, pin::Pin};
|
||||
use background_jobs::Job;
|
||||
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct Follow {
|
||||
input: AcceptedActivities,
|
||||
actor: Actor,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Follow {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Follow")
|
||||
.field("input", &self.input.id_unchecked())
|
||||
.field("actor", &self.actor.id)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Follow {
|
||||
pub fn new(input: AcceptedActivities, actor: Actor) -> Self {
|
||||
Follow { input, actor }
|
||||
|
@ -102,13 +111,15 @@ fn generate_accept_follow(
|
|||
)
|
||||
}
|
||||
|
||||
impl ActixJob for Follow {
|
||||
impl Job for Follow {
|
||||
type State = JobState;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<(), Self::Error>>;
|
||||
|
||||
const NAME: &'static str = "relay::jobs::apub::Follow";
|
||||
const QUEUE: &'static str = "apub";
|
||||
|
||||
fn run(self, state: Self::State) -> Self::Future {
|
||||
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
|
||||
Box::pin(self.perform(state))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,18 +2,27 @@ use crate::{
|
|||
apub::AcceptedActivities,
|
||||
db::Actor,
|
||||
error::{Error, ErrorKind},
|
||||
future::BoxFuture,
|
||||
jobs::{apub::get_inboxes, DeliverMany, JobState},
|
||||
};
|
||||
use activitystreams::prelude::*;
|
||||
use background_jobs::ActixJob;
|
||||
use std::{future::Future, pin::Pin};
|
||||
use background_jobs::Job;
|
||||
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct Forward {
|
||||
input: AcceptedActivities,
|
||||
actor: Actor,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Forward {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Forward")
|
||||
.field("input", &self.input.id_unchecked())
|
||||
.field("actor", &self.actor.id)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Forward {
|
||||
pub fn new(input: AcceptedActivities, actor: Actor) -> Self {
|
||||
Forward { input, actor }
|
||||
|
@ -38,13 +47,15 @@ impl Forward {
|
|||
}
|
||||
}
|
||||
|
||||
impl ActixJob for Forward {
|
||||
impl Job for Forward {
|
||||
type State = JobState;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<(), Self::Error>>;
|
||||
|
||||
const NAME: &'static str = "relay::jobs::apub::Forward";
|
||||
const QUEUE: &'static str = "apub";
|
||||
|
||||
fn run(self, state: Self::State) -> Self::Future {
|
||||
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
|
||||
Box::pin(self.perform(state))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,14 +2,20 @@ use crate::{
|
|||
config::UrlKind,
|
||||
db::Actor,
|
||||
error::Error,
|
||||
future::BoxFuture,
|
||||
jobs::{apub::generate_undo_follow, Deliver, JobState},
|
||||
};
|
||||
use background_jobs::ActixJob;
|
||||
use std::{future::Future, pin::Pin};
|
||||
use background_jobs::Job;
|
||||
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct Reject(pub(crate) Actor);
|
||||
|
||||
impl std::fmt::Debug for Reject {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Reject").field("actor", &self.0.id).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Reject {
|
||||
#[tracing::instrument(name = "Reject", skip(state))]
|
||||
async fn perform(self, state: JobState) -> Result<(), Error> {
|
||||
|
@ -27,13 +33,15 @@ impl Reject {
|
|||
}
|
||||
}
|
||||
|
||||
impl ActixJob for Reject {
|
||||
impl Job for Reject {
|
||||
type State = JobState;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<(), Self::Error>>;
|
||||
|
||||
const NAME: &'static str = "relay::jobs::apub::Reject";
|
||||
const QUEUE: &'static str = "apub";
|
||||
|
||||
fn run(self, state: Self::State) -> Self::Future {
|
||||
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
|
||||
Box::pin(self.perform(state))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,17 +3,27 @@ use crate::{
|
|||
config::UrlKind,
|
||||
db::Actor,
|
||||
error::Error,
|
||||
future::BoxFuture,
|
||||
jobs::{apub::generate_undo_follow, Deliver, JobState},
|
||||
};
|
||||
use background_jobs::ActixJob;
|
||||
use std::{future::Future, pin::Pin};
|
||||
use activitystreams::prelude::BaseExt;
|
||||
use background_jobs::Job;
|
||||
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct Undo {
|
||||
input: AcceptedActivities,
|
||||
actor: Actor,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Undo {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Undo")
|
||||
.field("input", &self.input.id_unchecked())
|
||||
.field("actor", &self.actor.id)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Undo {
|
||||
pub(crate) fn new(input: AcceptedActivities, actor: Actor) -> Self {
|
||||
Undo { input, actor }
|
||||
|
@ -38,13 +48,15 @@ impl Undo {
|
|||
}
|
||||
}
|
||||
|
||||
impl ActixJob for Undo {
|
||||
impl Job for Undo {
|
||||
type State = JobState;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<(), Self::Error>>;
|
||||
|
||||
const NAME: &'static str = "relay::jobs::apub::Undo";
|
||||
const QUEUE: &'static str = "apub";
|
||||
|
||||
fn run(self, state: Self::State) -> Self::Future {
|
||||
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
|
||||
Box::pin(self.perform(state))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
use crate::{error::Error, jobs::JobState};
|
||||
use background_jobs::ActixJob;
|
||||
use std::{future::Future, pin::Pin};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct CacheMedia {
|
||||
uuid: Uuid,
|
||||
}
|
||||
|
||||
impl CacheMedia {
|
||||
pub(crate) fn new(uuid: Uuid) -> Self {
|
||||
CacheMedia { uuid }
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Cache media", skip(state))]
|
||||
async fn perform(self, state: JobState) -> Result<(), Error> {
|
||||
if !state.media.is_outdated(self.uuid).await? {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(url) = state.media.get_url(self.uuid).await? {
|
||||
let (content_type, bytes) = state.requests.fetch_bytes(url.as_str()).await?;
|
||||
|
||||
state
|
||||
.media
|
||||
.store_bytes(self.uuid, content_type, bytes)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ActixJob for CacheMedia {
|
||||
type State = JobState;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
|
||||
|
||||
const NAME: &'static str = "relay::jobs::CacheMedia";
|
||||
|
||||
fn run(self, state: Self::State) -> Self::Future {
|
||||
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
|
||||
}
|
||||
}
|
|
@ -1,11 +1,12 @@
|
|||
use crate::{
|
||||
apub::AcceptedActors,
|
||||
error::{Error, ErrorKind},
|
||||
future::BoxFuture,
|
||||
jobs::JobState,
|
||||
requests::BreakerStrategy,
|
||||
};
|
||||
use activitystreams::{iri_string::types::IriString, object::Image, prelude::*};
|
||||
use background_jobs::ActixJob;
|
||||
use std::{future::Future, pin::Pin};
|
||||
use background_jobs::Job;
|
||||
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct QueryContact {
|
||||
|
@ -32,6 +33,7 @@ impl QueryContact {
|
|||
|
||||
async fn perform(self, state: JobState) -> Result<(), Error> {
|
||||
let contact_outdated = state
|
||||
.state
|
||||
.node_cache
|
||||
.is_contact_outdated(self.actor_id.clone())
|
||||
.await;
|
||||
|
@ -40,15 +42,25 @@ impl QueryContact {
|
|||
return Ok(());
|
||||
}
|
||||
|
||||
let contact = state
|
||||
let contact = match state
|
||||
.state
|
||||
.requests
|
||||
.fetch::<AcceptedActors>(self.contact_id.as_str())
|
||||
.await?;
|
||||
.fetch::<AcceptedActors>(&self.contact_id, BreakerStrategy::Allow404AndBelow)
|
||||
.await
|
||||
{
|
||||
Ok(contact) => contact,
|
||||
Err(e) if e.is_breaker() => {
|
||||
tracing::debug!("Not retrying due to failed breaker");
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
let (username, display_name, url, avatar) =
|
||||
to_contact(contact).ok_or(ErrorKind::Extract("contact"))?;
|
||||
|
||||
state
|
||||
.state
|
||||
.node_cache
|
||||
.set_contact(self.actor_id, username, display_name, url, avatar)
|
||||
.await?;
|
||||
|
@ -73,14 +85,16 @@ fn to_contact(contact: AcceptedActors) -> Option<(String, String, IriString, Iri
|
|||
Some((username, display_name, url, avatar))
|
||||
}
|
||||
|
||||
impl ActixJob for QueryContact {
|
||||
impl Job for QueryContact {
|
||||
type State = JobState;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<(), Self::Error>>;
|
||||
|
||||
const NAME: &'static str = "relay::jobs::QueryContact";
|
||||
const QUEUE: &'static str = "maintenance";
|
||||
|
||||
fn run(self, state: Self::State) -> Self::Future {
|
||||
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
|
||||
Box::pin(self.perform(state))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
use crate::{error::Error, jobs::JobState};
|
||||
use crate::{
|
||||
error::Error,
|
||||
future::BoxFuture,
|
||||
jobs::{debug_object, JobState},
|
||||
requests::BreakerStrategy,
|
||||
};
|
||||
use activitystreams::iri_string::types::IriString;
|
||||
use background_jobs::{ActixJob, Backoff};
|
||||
use std::{future::Future, pin::Pin};
|
||||
use background_jobs::{Backoff, Job};
|
||||
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct Deliver {
|
||||
|
@ -13,7 +17,8 @@ impl std::fmt::Debug for Deliver {
|
|||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Deliver")
|
||||
.field("to", &self.to.to_string())
|
||||
.field("data", &self.data)
|
||||
.field("activity", &self.data["type"])
|
||||
.field("object", debug_object(&self.data))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
@ -30,20 +35,37 @@ impl Deliver {
|
|||
}
|
||||
|
||||
#[tracing::instrument(name = "Deliver", skip(state))]
|
||||
async fn permform(self, state: JobState) -> Result<(), Error> {
|
||||
state.requests.deliver(self.to, &self.data).await?;
|
||||
async fn perform(self, state: JobState) -> Result<(), Error> {
|
||||
if let Err(e) = state
|
||||
.state
|
||||
.requests
|
||||
.deliver(&self.to, &self.data, BreakerStrategy::Allow401AndBelow)
|
||||
.await
|
||||
{
|
||||
if e.is_breaker() {
|
||||
tracing::debug!("Not trying due to failed breaker");
|
||||
return Ok(());
|
||||
}
|
||||
if e.is_bad_request() {
|
||||
tracing::debug!("Server didn't understand the activity");
|
||||
return Ok(());
|
||||
}
|
||||
return Err(e);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ActixJob for Deliver {
|
||||
impl Job for Deliver {
|
||||
type State = JobState;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<(), Self::Error>>;
|
||||
|
||||
const NAME: &'static str = "relay::jobs::Deliver";
|
||||
const QUEUE: &'static str = "deliver";
|
||||
const BACKOFF: Backoff = Backoff::Exponential(8);
|
||||
|
||||
fn run(self, state: Self::State) -> Self::Future {
|
||||
Box::pin(async move { self.permform(state).await.map_err(Into::into) })
|
||||
Box::pin(self.perform(state))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
use crate::{
|
||||
error::Error,
|
||||
jobs::{Deliver, JobState},
|
||||
future::BoxFuture,
|
||||
jobs::{debug_object, Deliver, JobState},
|
||||
};
|
||||
use activitystreams::iri_string::types::IriString;
|
||||
use background_jobs::ActixJob;
|
||||
use futures_util::future::LocalBoxFuture;
|
||||
use background_jobs::Job;
|
||||
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct DeliverMany {
|
||||
|
@ -14,17 +14,9 @@ pub(crate) struct DeliverMany {
|
|||
|
||||
impl std::fmt::Debug for DeliverMany {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let to = format!(
|
||||
"[{}]",
|
||||
self.to
|
||||
.iter()
|
||||
.map(|u| u.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
);
|
||||
f.debug_struct("DeliverMany")
|
||||
.field("to", &to)
|
||||
.field("data", &self.data)
|
||||
.field("activity", &self.data["type"])
|
||||
.field("object", debug_object(&self.data))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
@ -53,13 +45,15 @@ impl DeliverMany {
|
|||
}
|
||||
}
|
||||
|
||||
impl ActixJob for DeliverMany {
|
||||
impl Job for DeliverMany {
|
||||
type State = JobState;
|
||||
type Future = LocalBoxFuture<'static, Result<(), anyhow::Error>>;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<(), Self::Error>>;
|
||||
|
||||
const NAME: &'static str = "relay::jobs::DeliverMany";
|
||||
const QUEUE: &'static str = "deliver";
|
||||
|
||||
fn run(self, state: Self::State) -> Self::Future {
|
||||
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
|
||||
Box::pin(self.perform(state))
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because one or more lines are too long
126
src/jobs/mod.rs
126
src/jobs/mod.rs
|
@ -1,126 +0,0 @@
|
|||
pub mod apub;
|
||||
mod cache_media;
|
||||
mod contact;
|
||||
mod deliver;
|
||||
mod deliver_many;
|
||||
mod instance;
|
||||
mod nodeinfo;
|
||||
mod process_listeners;
|
||||
|
||||
pub(crate) use self::{
|
||||
cache_media::CacheMedia, contact::QueryContact, deliver::Deliver, deliver_many::DeliverMany,
|
||||
instance::QueryInstance, nodeinfo::QueryNodeinfo,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config::Config,
|
||||
data::{ActorCache, MediaCache, NodeCache, State},
|
||||
error::{Error, ErrorKind},
|
||||
jobs::process_listeners::Listeners,
|
||||
requests::Requests,
|
||||
};
|
||||
use background_jobs::{
|
||||
memory_storage::{ActixTimer, Storage},
|
||||
Job, Manager, QueueHandle, WorkerConfig,
|
||||
};
|
||||
use std::time::Duration;
|
||||
|
||||
pub(crate) fn create_workers(
|
||||
state: State,
|
||||
actors: ActorCache,
|
||||
media: MediaCache,
|
||||
config: Config,
|
||||
) -> (Manager, JobServer) {
|
||||
let shared = WorkerConfig::new_managed(Storage::new(ActixTimer), move |queue_handle| {
|
||||
JobState::new(
|
||||
state.clone(),
|
||||
actors.clone(),
|
||||
JobServer::new(queue_handle),
|
||||
media.clone(),
|
||||
config.clone(),
|
||||
)
|
||||
})
|
||||
.register::<Deliver>()
|
||||
.register::<DeliverMany>()
|
||||
.register::<QueryNodeinfo>()
|
||||
.register::<QueryInstance>()
|
||||
.register::<Listeners>()
|
||||
.register::<CacheMedia>()
|
||||
.register::<QueryContact>()
|
||||
.register::<apub::Announce>()
|
||||
.register::<apub::Follow>()
|
||||
.register::<apub::Forward>()
|
||||
.register::<apub::Reject>()
|
||||
.register::<apub::Undo>()
|
||||
.set_worker_count("default", 16)
|
||||
.start();
|
||||
|
||||
shared.every(Duration::from_secs(60 * 5), Listeners);
|
||||
|
||||
let job_server = JobServer::new(shared.queue_handle().clone());
|
||||
|
||||
(shared, job_server)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct JobState {
|
||||
requests: Requests,
|
||||
state: State,
|
||||
actors: ActorCache,
|
||||
config: Config,
|
||||
media: MediaCache,
|
||||
node_cache: NodeCache,
|
||||
job_server: JobServer,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct JobServer {
|
||||
remote: QueueHandle,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for JobServer {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("JobServer")
|
||||
.field("queue_handle", &"QueueHandle")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl JobState {
|
||||
fn new(
|
||||
state: State,
|
||||
actors: ActorCache,
|
||||
job_server: JobServer,
|
||||
media: MediaCache,
|
||||
config: Config,
|
||||
) -> Self {
|
||||
JobState {
|
||||
requests: state.requests(&config),
|
||||
node_cache: state.node_cache(),
|
||||
actors,
|
||||
config,
|
||||
media,
|
||||
state,
|
||||
job_server,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl JobServer {
|
||||
fn new(remote_handle: QueueHandle) -> Self {
|
||||
JobServer {
|
||||
remote: remote_handle,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn queue<J>(&self, job: J) -> Result<(), Error>
|
||||
where
|
||||
J: Job,
|
||||
{
|
||||
self.remote
|
||||
.queue(job)
|
||||
.await
|
||||
.map_err(ErrorKind::Queue)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
File diff suppressed because one or more lines are too long
|
@ -1,9 +1,9 @@
|
|||
use crate::{
|
||||
error::Error,
|
||||
future::BoxFuture,
|
||||
jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState},
|
||||
};
|
||||
use background_jobs::ActixJob;
|
||||
use std::{future::Future, pin::Pin};
|
||||
use background_jobs::Job;
|
||||
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct Listeners;
|
||||
|
@ -23,13 +23,15 @@ impl Listeners {
|
|||
}
|
||||
}
|
||||
|
||||
impl ActixJob for Listeners {
|
||||
impl Job for Listeners {
|
||||
type State = JobState;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<(), Self::Error>>;
|
||||
|
||||
const NAME: &'static str = "relay::jobs::Listeners";
|
||||
const QUEUE: &'static str = "maintenance";
|
||||
|
||||
fn run(self, state: Self::State) -> Self::Future {
|
||||
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
|
||||
Box::pin(self.perform(state))
|
||||
}
|
||||
}
|
||||
|
|
28
src/jobs/record_last_online.rs
Normal file
28
src/jobs/record_last_online.rs
Normal file
|
@ -0,0 +1,28 @@
|
|||
use crate::{error::Error, future::BoxFuture, jobs::JobState};
|
||||
use background_jobs::{Backoff, Job};
|
||||
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
pub(crate) struct RecordLastOnline;
|
||||
|
||||
impl RecordLastOnline {
|
||||
#[tracing::instrument(skip(state))]
|
||||
async fn perform(self, state: JobState) -> Result<(), Error> {
|
||||
let nodes = state.state.last_online.take();
|
||||
|
||||
state.state.db.mark_last_seen(nodes).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Job for RecordLastOnline {
|
||||
type State = JobState;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<(), Self::Error>>;
|
||||
|
||||
const NAME: &'static str = "relay::jobs::RecordLastOnline";
|
||||
const QUEUE: &'static str = "maintenance";
|
||||
const BACKOFF: Backoff = Backoff::Linear(1);
|
||||
|
||||
fn run(self, state: Self::State) -> Self::Future {
|
||||
Box::pin(self.perform(state))
|
||||
}
|
||||
}
|
380
src/main.rs
380
src/main.rs
|
@ -1,52 +1,72 @@
|
|||
// need this for ructe
|
||||
#![allow(clippy::needless_borrow)]
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use activitystreams::iri_string::types::IriString;
|
||||
use actix_web::{web, App, HttpServer};
|
||||
use actix_web::{middleware::Compress, web, App, HttpServer};
|
||||
use collector::MemoryCollector;
|
||||
#[cfg(feature = "console")]
|
||||
use console_subscriber::ConsoleLayer;
|
||||
use opentelemetry::{sdk::Resource, KeyValue};
|
||||
use error::Error;
|
||||
use http_signature_normalization_actix::middleware::VerifySignature;
|
||||
use metrics_exporter_prometheus::PrometheusBuilder;
|
||||
use metrics_util::layers::FanoutBuilder;
|
||||
use opentelemetry::{trace::TracerProvider, KeyValue};
|
||||
use opentelemetry_otlp::WithExportConfig;
|
||||
use opentelemetry_sdk::Resource;
|
||||
use reqwest_middleware::ClientWithMiddleware;
|
||||
use rustls::ServerConfig;
|
||||
use tokio::task::JoinHandle;
|
||||
use tracing_actix_web::TracingLogger;
|
||||
use tracing_error::ErrorLayer;
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::{filter::Targets, fmt::format::FmtSpan, layer::SubscriberExt, Layer};
|
||||
use tracing_subscriber::{filter::Targets, layer::SubscriberExt, Layer};
|
||||
|
||||
mod admin;
|
||||
mod apub;
|
||||
mod args;
|
||||
mod collector;
|
||||
mod config;
|
||||
mod data;
|
||||
mod db;
|
||||
mod error;
|
||||
mod extractors;
|
||||
mod future;
|
||||
mod http1;
|
||||
mod jobs;
|
||||
mod middleware;
|
||||
mod requests;
|
||||
mod routes;
|
||||
mod spawner;
|
||||
mod stream;
|
||||
mod telegram;
|
||||
|
||||
use crate::config::UrlKind;
|
||||
|
||||
use self::{
|
||||
args::Args,
|
||||
config::Config,
|
||||
data::{ActorCache, MediaCache, State},
|
||||
db::Db,
|
||||
jobs::create_workers,
|
||||
middleware::{DebugPayload, RelayResolver},
|
||||
routes::{actor, inbox, index, nodeinfo, nodeinfo_meta, statics},
|
||||
middleware::{DebugPayload, MyVerify, RelayResolver, Timings},
|
||||
routes::{actor, healthz, inbox, index, nodeinfo, nodeinfo_meta, statics},
|
||||
spawner::Spawner,
|
||||
};
|
||||
|
||||
fn init_subscriber(
|
||||
software_name: &'static str,
|
||||
opentelemetry_url: Option<&IriString>,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
) -> color_eyre::Result<()> {
|
||||
LogTracer::init()?;
|
||||
color_eyre::install()?;
|
||||
|
||||
let targets: Targets = std::env::var("RUST_LOG")
|
||||
.unwrap_or_else(|_| "info".into())
|
||||
.parse()?;
|
||||
|
||||
let format_layer = tracing_subscriber::fmt::layer()
|
||||
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
|
||||
.with_filter(targets.clone());
|
||||
let format_layer = tracing_subscriber::fmt::layer().with_filter(targets.clone());
|
||||
|
||||
#[cfg(feature = "console")]
|
||||
let console_layer = ConsoleLayer::builder()
|
||||
|
@ -63,21 +83,21 @@ fn init_subscriber(
|
|||
let subscriber = subscriber.with(console_layer);
|
||||
|
||||
if let Some(url) = opentelemetry_url {
|
||||
let tracer =
|
||||
opentelemetry_otlp::new_pipeline()
|
||||
.tracing()
|
||||
.with_trace_config(opentelemetry::sdk::trace::config().with_resource(
|
||||
Resource::new(vec![KeyValue::new("service.name", software_name)]),
|
||||
))
|
||||
.with_exporter(
|
||||
opentelemetry_otlp::new_exporter()
|
||||
.tonic()
|
||||
.with_endpoint(url.as_str()),
|
||||
)
|
||||
.install_batch(opentelemetry::runtime::Tokio)?;
|
||||
let exporter = opentelemetry_otlp::SpanExporter::builder()
|
||||
.with_tonic()
|
||||
.with_endpoint(url.as_str())
|
||||
.build()?;
|
||||
|
||||
let tracer_provider = opentelemetry_sdk::trace::TracerProvider::builder()
|
||||
.with_resource(Resource::new(vec![KeyValue::new(
|
||||
"service.name",
|
||||
software_name,
|
||||
)]))
|
||||
.with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio)
|
||||
.build();
|
||||
|
||||
let otel_layer = tracing_opentelemetry::layer()
|
||||
.with_tracer(tracer)
|
||||
.with_tracer(tracer_provider.tracer(software_name))
|
||||
.with_filter(targets);
|
||||
|
||||
let subscriber = subscriber.with(otel_layer);
|
||||
|
@ -89,60 +109,266 @@ fn init_subscriber(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> Result<(), anyhow::Error> {
|
||||
fn build_client(
|
||||
user_agent: &str,
|
||||
timeout_seconds: u64,
|
||||
proxy: Option<(&IriString, Option<(&str, &str)>)>,
|
||||
) -> Result<ClientWithMiddleware, Error> {
|
||||
let builder = reqwest::Client::builder().user_agent(user_agent.to_string());
|
||||
|
||||
let builder = if let Some((url, auth)) = proxy {
|
||||
let proxy = reqwest::Proxy::all(url.as_str())?;
|
||||
|
||||
let proxy = if let Some((username, password)) = auth {
|
||||
proxy.basic_auth(username, password)
|
||||
} else {
|
||||
proxy
|
||||
};
|
||||
|
||||
builder.proxy(proxy)
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
|
||||
let client = builder
|
||||
.timeout(Duration::from_secs(timeout_seconds))
|
||||
.build()?;
|
||||
|
||||
let client_with_middleware = reqwest_middleware::ClientBuilder::new(client)
|
||||
.with(reqwest_tracing::TracingMiddleware::default())
|
||||
.build();
|
||||
|
||||
Ok(client_with_middleware)
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> color_eyre::Result<()> {
|
||||
dotenv::dotenv().ok();
|
||||
|
||||
let config = Config::build()?;
|
||||
|
||||
init_subscriber(Config::software_name(), config.opentelemetry_url())?;
|
||||
|
||||
let db = Db::build(&config)?;
|
||||
|
||||
let args = Args::new();
|
||||
|
||||
if !args.blocks().is_empty() || !args.allowed().is_empty() {
|
||||
if args.undo() {
|
||||
db.remove_blocks(args.blocks().to_vec()).await?;
|
||||
db.remove_allows(args.allowed().to_vec()).await?;
|
||||
} else {
|
||||
db.add_blocks(args.blocks().to_vec()).await?;
|
||||
db.add_allows(args.allowed().to_vec()).await?;
|
||||
}
|
||||
if args.any() {
|
||||
client_main(config, args).await??;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let media = MediaCache::new(db.clone());
|
||||
let state = State::build(db.clone()).await?;
|
||||
let actors = ActorCache::new(db.clone());
|
||||
let collector = MemoryCollector::new();
|
||||
|
||||
let (manager, job_server) =
|
||||
create_workers(state.clone(), actors.clone(), media.clone(), config.clone());
|
||||
if let Some(bind_addr) = config.prometheus_bind_address() {
|
||||
let (recorder, exporter) = PrometheusBuilder::new()
|
||||
.with_http_listener(bind_addr)
|
||||
.build()?;
|
||||
|
||||
tokio::spawn(exporter);
|
||||
let recorder = FanoutBuilder::default()
|
||||
.add_recorder(recorder)
|
||||
.add_recorder(collector.clone())
|
||||
.build();
|
||||
metrics::set_global_recorder(recorder).map_err(|e| color_eyre::eyre::eyre!("{e}"))?;
|
||||
} else {
|
||||
collector.install()?;
|
||||
}
|
||||
|
||||
tracing::info!("Opening DB");
|
||||
let db = Db::build(&config)?;
|
||||
|
||||
tracing::info!("Building caches");
|
||||
let actors = ActorCache::new(db.clone());
|
||||
let media = MediaCache::new(db.clone());
|
||||
|
||||
server_main(db, actors, media, collector, config).await?;
|
||||
|
||||
tracing::info!("Application exit");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn client_main(config: Config, args: Args) -> JoinHandle<color_eyre::Result<()>> {
|
||||
tokio::spawn(do_client_main(config, args))
|
||||
}
|
||||
|
||||
async fn do_client_main(config: Config, args: Args) -> color_eyre::Result<()> {
|
||||
let client = build_client(
|
||||
&config.user_agent(),
|
||||
config.client_timeout(),
|
||||
config.proxy_config(),
|
||||
)?;
|
||||
|
||||
if !args.blocks().is_empty() || !args.allowed().is_empty() {
|
||||
if args.undo() {
|
||||
admin::client::unblock(&client, &config, args.blocks().to_vec()).await?;
|
||||
admin::client::disallow(&client, &config, args.allowed().to_vec()).await?;
|
||||
} else {
|
||||
admin::client::block(&client, &config, args.blocks().to_vec()).await?;
|
||||
admin::client::allow(&client, &config, args.allowed().to_vec()).await?;
|
||||
}
|
||||
println!("Updated lists");
|
||||
}
|
||||
|
||||
if args.contacted() {
|
||||
let last_seen = admin::client::last_seen(&client, &config).await?;
|
||||
|
||||
let mut report = String::from("Contacted:");
|
||||
|
||||
if !last_seen.never.is_empty() {
|
||||
report += "\nNever seen:\n";
|
||||
}
|
||||
|
||||
for domain in last_seen.never {
|
||||
report += "\t";
|
||||
report += &domain;
|
||||
report += "\n";
|
||||
}
|
||||
|
||||
if !last_seen.last_seen.is_empty() {
|
||||
report += "\nSeen:\n";
|
||||
}
|
||||
|
||||
for (datetime, domains) in last_seen.last_seen {
|
||||
for domain in domains {
|
||||
report += "\t";
|
||||
report += &datetime.to_string();
|
||||
report += " - ";
|
||||
report += &domain;
|
||||
report += "\n";
|
||||
}
|
||||
}
|
||||
|
||||
report += "\n";
|
||||
println!("{report}");
|
||||
}
|
||||
|
||||
if args.list() {
|
||||
let (blocked, allowed, connected) = tokio::try_join!(
|
||||
admin::client::blocked(&client, &config),
|
||||
admin::client::allowed(&client, &config),
|
||||
admin::client::connected(&client, &config)
|
||||
)?;
|
||||
|
||||
let mut report = String::from("Report:\n");
|
||||
if !allowed.allowed_domains.is_empty() {
|
||||
report += "\nAllowed\n\t";
|
||||
report += &allowed.allowed_domains.join("\n\t");
|
||||
}
|
||||
if !blocked.blocked_domains.is_empty() {
|
||||
report += "\n\nBlocked\n\t";
|
||||
report += &blocked.blocked_domains.join("\n\t");
|
||||
}
|
||||
if !connected.connected_actors.is_empty() {
|
||||
report += "\n\nConnected\n\t";
|
||||
report += &connected.connected_actors.join("\n\t");
|
||||
}
|
||||
report += "\n";
|
||||
println!("{report}");
|
||||
}
|
||||
|
||||
if args.stats() {
|
||||
let stats = admin::client::stats(&client, &config).await?;
|
||||
stats.present();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const VERIFY_RATIO: usize = 7;
|
||||
|
||||
async fn server_main(
|
||||
db: Db,
|
||||
actors: ActorCache,
|
||||
media: MediaCache,
|
||||
collector: MemoryCollector,
|
||||
config: Config,
|
||||
) -> color_eyre::Result<()> {
|
||||
let client = build_client(
|
||||
&config.user_agent(),
|
||||
config.client_timeout(),
|
||||
config.proxy_config(),
|
||||
)?;
|
||||
|
||||
tracing::info!("Creating state");
|
||||
|
||||
let (signature_threads, verify_threads) = match config.signature_threads() {
|
||||
0 | 1 => (1, 1),
|
||||
n if n <= VERIFY_RATIO => (n, 1),
|
||||
n => {
|
||||
let verify_threads = (n / VERIFY_RATIO).max(1);
|
||||
let signature_threads = n.saturating_sub(verify_threads).max(VERIFY_RATIO);
|
||||
|
||||
(signature_threads, verify_threads)
|
||||
}
|
||||
};
|
||||
|
||||
let verify_spawner = Spawner::build("verify-cpu", verify_threads.try_into()?)?;
|
||||
let sign_spawner = Spawner::build("sign-cpu", signature_threads.try_into()?)?;
|
||||
|
||||
let key_id = config.generate_url(UrlKind::MainKey).to_string();
|
||||
let state = State::build(db.clone(), key_id, sign_spawner.clone(), client).await?;
|
||||
|
||||
if let Some((token, admin_handle)) = config.telegram_info() {
|
||||
tracing::info!("Creating telegram handler");
|
||||
telegram::start(admin_handle.to_owned(), db.clone(), token);
|
||||
}
|
||||
|
||||
let cert_resolver = config
|
||||
.open_keys()
|
||||
.await?
|
||||
.map(rustls_channel_resolver::channel::<32>);
|
||||
|
||||
let bind_address = config.bind_address();
|
||||
HttpServer::new(move || {
|
||||
App::new()
|
||||
.wrap(TracingLogger::default())
|
||||
let sign_spawner2 = sign_spawner.clone();
|
||||
let verify_spawner2 = verify_spawner.clone();
|
||||
let config2 = config.clone();
|
||||
let job_store = jobs::build_storage();
|
||||
let server = HttpServer::new(move || {
|
||||
let job_server = create_workers(
|
||||
job_store.clone(),
|
||||
state.clone(),
|
||||
actors.clone(),
|
||||
media.clone(),
|
||||
config.clone(),
|
||||
)
|
||||
.expect("Failed to create job server");
|
||||
|
||||
let app = App::new()
|
||||
.app_data(web::Data::new(db.clone()))
|
||||
.app_data(web::Data::new(state.clone()))
|
||||
.app_data(web::Data::new(state.requests(&config)))
|
||||
.app_data(web::Data::new(
|
||||
state.requests.clone().spawner(verify_spawner.clone()),
|
||||
))
|
||||
.app_data(web::Data::new(actors.clone()))
|
||||
.app_data(web::Data::new(config.clone()))
|
||||
.app_data(web::Data::new(job_server.clone()))
|
||||
.app_data(web::Data::new(job_server))
|
||||
.app_data(web::Data::new(media.clone()))
|
||||
.app_data(web::Data::new(collector.clone()))
|
||||
.app_data(web::Data::new(verify_spawner.clone()));
|
||||
|
||||
let app = if let Some(data) = config.admin_config() {
|
||||
app.app_data(data)
|
||||
} else {
|
||||
app
|
||||
};
|
||||
|
||||
app.wrap(Compress::default())
|
||||
.wrap(TracingLogger::default())
|
||||
.wrap(Timings)
|
||||
.route("/healthz", web::get().to(healthz))
|
||||
.service(web::resource("/").route(web::get().to(index)))
|
||||
.service(web::resource("/media/{path}").route(web::get().to(routes::media)))
|
||||
.service(
|
||||
web::resource("/inbox")
|
||||
.wrap(config.digest_middleware())
|
||||
.wrap(config.signature_middleware(
|
||||
state.requests(&config),
|
||||
actors.clone(),
|
||||
state.clone(),
|
||||
.wrap(config.digest_middleware().spawner(verify_spawner.clone()))
|
||||
.wrap(VerifySignature::new(
|
||||
MyVerify(
|
||||
state.requests.clone().spawner(verify_spawner.clone()),
|
||||
actors.clone(),
|
||||
state.clone(),
|
||||
verify_spawner.clone(),
|
||||
),
|
||||
http_signature_normalization_actix::Config::new(),
|
||||
))
|
||||
.wrap(DebugPayload(config.debug()))
|
||||
.route(web::post().to(inbox)),
|
||||
|
@ -155,12 +381,58 @@ async fn main() -> Result<(), anyhow::Error> {
|
|||
.service(web::resource("/nodeinfo").route(web::get().to(nodeinfo_meta))),
|
||||
)
|
||||
.service(web::resource("/static/{filename}").route(web::get().to(statics)))
|
||||
})
|
||||
.bind(bind_address)?
|
||||
.run()
|
||||
.await?;
|
||||
.service(
|
||||
web::scope("/api/v1").service(
|
||||
web::scope("/admin")
|
||||
.route("/allow", web::post().to(admin::routes::allow))
|
||||
.route("/disallow", web::post().to(admin::routes::disallow))
|
||||
.route("/block", web::post().to(admin::routes::block))
|
||||
.route("/unblock", web::post().to(admin::routes::unblock))
|
||||
.route("/allowed", web::get().to(admin::routes::allowed))
|
||||
.route("/blocked", web::get().to(admin::routes::blocked))
|
||||
.route("/connected", web::get().to(admin::routes::connected))
|
||||
.route("/stats", web::get().to(admin::routes::stats))
|
||||
.route("/last_seen", web::get().to(admin::routes::last_seen)),
|
||||
),
|
||||
)
|
||||
});
|
||||
|
||||
drop(manager);
|
||||
if let Some((cert_tx, cert_rx)) = cert_resolver {
|
||||
let handle = tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(30));
|
||||
interval.tick().await;
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
match config2.open_keys().await {
|
||||
Ok(Some(key)) => cert_tx.update(key),
|
||||
Ok(None) => tracing::warn!("Missing TLS keys"),
|
||||
Err(e) => tracing::error!("Failed to read TLS keys {e}"),
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
tracing::info!("Binding to {}:{} with TLS", bind_address.0, bind_address.1);
|
||||
let server_config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(cert_rx);
|
||||
server
|
||||
.bind_rustls_0_23(bind_address, server_config)?
|
||||
.run()
|
||||
.await?;
|
||||
|
||||
handle.abort();
|
||||
let _ = handle.await;
|
||||
} else {
|
||||
tracing::info!("Binding to {}:{}", bind_address.0, bind_address.1);
|
||||
server.bind(bind_address)?.run().await?;
|
||||
}
|
||||
|
||||
sign_spawner2.close().await;
|
||||
verify_spawner2.close().await;
|
||||
|
||||
tracing::info!("Server closed");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
mod payload;
|
||||
mod timings;
|
||||
mod verifier;
|
||||
mod webfinger;
|
||||
|
||||
pub(crate) use payload::DebugPayload;
|
||||
pub(crate) use timings::Timings;
|
||||
pub(crate) use verifier::MyVerify;
|
||||
pub(crate) use webfinger::RelayResolver;
|
|
@ -4,14 +4,11 @@ use actix_web::{
|
|||
web::BytesMut,
|
||||
HttpMessage,
|
||||
};
|
||||
use futures_util::{
|
||||
future::{LocalBoxFuture, TryFutureExt},
|
||||
stream::{once, TryStreamExt},
|
||||
};
|
||||
use std::{
|
||||
future::{ready, Ready},
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use streem::IntoStreamer;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct DebugPayload(pub bool);
|
||||
|
@ -45,7 +42,7 @@ where
|
|||
{
|
||||
type Response = S::Response;
|
||||
type Error = S::Error;
|
||||
type Future = LocalBoxFuture<'static, Result<S::Response, S::Error>>;
|
||||
type Future = S::Future;
|
||||
|
||||
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.1.poll_ready(cx)
|
||||
|
@ -53,28 +50,28 @@ where
|
|||
|
||||
fn call(&self, mut req: ServiceRequest) -> Self::Future {
|
||||
if self.0 && req.method() == Method::POST {
|
||||
let pl = req.take_payload();
|
||||
let mut pl = req.take_payload().into_streamer();
|
||||
|
||||
req.set_payload(Payload::Stream {
|
||||
payload: Box::pin(once(
|
||||
pl.try_fold(BytesMut::new(), |mut acc, bytes| async {
|
||||
acc.extend(bytes);
|
||||
Ok(acc)
|
||||
})
|
||||
.map_ok(|bytes| {
|
||||
let bytes = bytes.freeze();
|
||||
tracing::info!("{}", String::from_utf8_lossy(&bytes));
|
||||
bytes
|
||||
}),
|
||||
)),
|
||||
payload: Box::pin(streem::try_from_fn(|yielder| async move {
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
while let Some(bytes) = pl.try_next().await? {
|
||||
buf.extend(bytes);
|
||||
}
|
||||
|
||||
let bytes = buf.freeze();
|
||||
tracing::info!("{}", String::from_utf8_lossy(&bytes));
|
||||
|
||||
yielder.yield_ok(bytes).await;
|
||||
|
||||
Ok(())
|
||||
})),
|
||||
});
|
||||
|
||||
let fut = self.1.call(req);
|
||||
|
||||
Box::pin(async move { fut.await })
|
||||
self.1.call(req)
|
||||
} else {
|
||||
let fut = self.1.call(req);
|
||||
|
||||
Box::pin(async move { fut.await })
|
||||
self.1.call(req)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
143
src/middleware/timings.rs
Normal file
143
src/middleware/timings.rs
Normal file
|
@ -0,0 +1,143 @@
|
|||
use actix_web::{
|
||||
body::MessageBody,
|
||||
dev::{Service, ServiceRequest, ServiceResponse, Transform},
|
||||
http::StatusCode,
|
||||
};
|
||||
use std::{
|
||||
future::{ready, Future, Ready},
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
pub(crate) struct Timings;
|
||||
pub(crate) struct TimingsMiddleware<S>(S);
|
||||
|
||||
struct LogOnDrop {
|
||||
begin: Instant,
|
||||
path: String,
|
||||
method: String,
|
||||
arm: bool,
|
||||
}
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
pub(crate) struct TimingsFuture<F> {
|
||||
#[pin]
|
||||
future: F,
|
||||
|
||||
log_on_drop: Option<LogOnDrop>,
|
||||
}
|
||||
}
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
pub(crate) struct TimingsBody<B> {
|
||||
#[pin]
|
||||
body: B,
|
||||
|
||||
log_on_drop: LogOnDrop,
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for LogOnDrop {
|
||||
fn drop(&mut self) {
|
||||
if self.arm {
|
||||
let duration = self.begin.elapsed();
|
||||
metrics::histogram!("relay.request.complete", "path" => self.path.clone(), "method" => self.method.clone()).record(duration);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, B> Transform<S, ServiceRequest> for Timings
|
||||
where
|
||||
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = actix_web::Error>,
|
||||
S::Future: 'static,
|
||||
{
|
||||
type Response = ServiceResponse<TimingsBody<B>>;
|
||||
type Error = S::Error;
|
||||
type InitError = ();
|
||||
type Transform = TimingsMiddleware<S>;
|
||||
type Future = Ready<Result<Self::Transform, Self::InitError>>;
|
||||
|
||||
fn new_transform(&self, service: S) -> Self::Future {
|
||||
ready(Ok(TimingsMiddleware(service)))
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, B> Service<ServiceRequest> for TimingsMiddleware<S>
|
||||
where
|
||||
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = actix_web::Error>,
|
||||
S::Future: 'static,
|
||||
{
|
||||
type Response = ServiceResponse<TimingsBody<B>>;
|
||||
type Error = S::Error;
|
||||
type Future = TimingsFuture<S::Future>;
|
||||
|
||||
fn poll_ready(
|
||||
&self,
|
||||
ctx: &mut core::task::Context<'_>,
|
||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||
self.0.poll_ready(ctx)
|
||||
}
|
||||
|
||||
fn call(&self, req: ServiceRequest) -> Self::Future {
|
||||
let log_on_drop = LogOnDrop {
|
||||
begin: Instant::now(),
|
||||
path: format!("{:?}", req.match_pattern()),
|
||||
method: req.method().to_string(),
|
||||
arm: false,
|
||||
};
|
||||
|
||||
let future = self.0.call(req);
|
||||
|
||||
TimingsFuture {
|
||||
future,
|
||||
log_on_drop: Some(log_on_drop),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, B> Future for TimingsFuture<F>
|
||||
where
|
||||
F: Future<Output = Result<ServiceResponse<B>, actix_web::Error>>,
|
||||
{
|
||||
type Output = Result<ServiceResponse<TimingsBody<B>>, actix_web::Error>;
|
||||
|
||||
fn poll(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Self::Output> {
|
||||
let this = self.project();
|
||||
|
||||
let res = std::task::ready!(this.future.poll(cx));
|
||||
|
||||
let mut log_on_drop = this
|
||||
.log_on_drop
|
||||
.take()
|
||||
.expect("TimingsFuture polled after completion");
|
||||
|
||||
let status = match &res {
|
||||
Ok(res) => res.status(),
|
||||
Err(e) => e.as_response_error().status_code(),
|
||||
};
|
||||
|
||||
log_on_drop.arm =
|
||||
status != StatusCode::NOT_FOUND && status != StatusCode::METHOD_NOT_ALLOWED;
|
||||
|
||||
let res = res.map(|r| r.map_body(|_, body| TimingsBody { body, log_on_drop }));
|
||||
|
||||
std::task::Poll::Ready(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: MessageBody> MessageBody for TimingsBody<B> {
|
||||
type Error = B::Error;
|
||||
|
||||
fn size(&self) -> actix_web::body::BodySize {
|
||||
self.body.size()
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Result<actix_web::web::Bytes, Self::Error>>> {
|
||||
self.project().body.poll_next(cx)
|
||||
}
|
||||
}
|
|
@ -2,21 +2,20 @@ use crate::{
|
|||
apub::AcceptedActors,
|
||||
data::{ActorCache, State},
|
||||
error::{Error, ErrorKind},
|
||||
requests::Requests,
|
||||
requests::{BreakerStrategy, Requests},
|
||||
spawner::Spawner,
|
||||
};
|
||||
use activitystreams::{base::BaseExt, iri, iri_string::types::IriString};
|
||||
use actix_web::web;
|
||||
use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm};
|
||||
use rsa::{pkcs1v15::VerifyingKey, pkcs8::DecodePublicKey, RsaPublicKey};
|
||||
use sha2::{Digest, Sha256};
|
||||
use signature::{DigestVerifier, Signature};
|
||||
use base64::{engine::general_purpose::STANDARD, Engine};
|
||||
use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm, Spawn};
|
||||
use rsa::{pkcs1::EncodeRsaPublicKey, pkcs8::DecodePublicKey, RsaPublicKey};
|
||||
use std::{future::Future, pin::Pin};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct MyVerify(pub Requests, pub ActorCache, pub State);
|
||||
pub(crate) struct MyVerify(pub Requests, pub ActorCache, pub State, pub Spawner);
|
||||
|
||||
impl MyVerify {
|
||||
#[tracing::instrument("Verify signature", skip(self))]
|
||||
#[tracing::instrument("Verify request", skip(self, signature, signing_string))]
|
||||
async fn verify(
|
||||
&self,
|
||||
algorithm: Option<Algorithm>,
|
||||
|
@ -26,6 +25,9 @@ impl MyVerify {
|
|||
) -> Result<bool, Error> {
|
||||
let public_key_id = iri!(key_id);
|
||||
|
||||
// receiving an activity from a domain indicates it is probably online
|
||||
self.0.reset_breaker(&public_key_id);
|
||||
|
||||
let actor_id = if let Some(mut actor_id) = self
|
||||
.2
|
||||
.db
|
||||
|
@ -50,7 +52,13 @@ impl MyVerify {
|
|||
None => (),
|
||||
};
|
||||
|
||||
let res = do_verify(&actor.public_key, signature.clone(), signing_string.clone()).await;
|
||||
let res = do_verify(
|
||||
&self.3,
|
||||
&actor.public_key,
|
||||
signature.clone(),
|
||||
signing_string.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Err(e) = res {
|
||||
if !was_cached {
|
||||
|
@ -62,11 +70,21 @@ impl MyVerify {
|
|||
|
||||
actor_id
|
||||
} else {
|
||||
self.0
|
||||
.fetch::<PublicKeyResponse>(public_key_id.as_str())
|
||||
.await?
|
||||
.actor_id()
|
||||
.ok_or(ErrorKind::MissingId)?
|
||||
match self
|
||||
.0
|
||||
.fetch::<PublicKeyResponse>(&public_key_id, BreakerStrategy::Require2XX)
|
||||
.await
|
||||
{
|
||||
Ok(res) => res.actor_id().ok_or(ErrorKind::MissingId),
|
||||
Err(e) => {
|
||||
if e.is_gone() {
|
||||
tracing::warn!("Actor gone: {public_key_id}");
|
||||
return Ok(false);
|
||||
} else {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}?
|
||||
};
|
||||
|
||||
// Previously we verified the sig from an actor's local cache
|
||||
|
@ -74,7 +92,7 @@ impl MyVerify {
|
|||
// Now we make sure we fetch an updated actor
|
||||
let actor = self.1.get_no_cache(&actor_id, &self.0).await?;
|
||||
|
||||
do_verify(&actor.public_key, signature, signing_string).await?;
|
||||
do_verify(&self.3, &actor.public_key, signature, signing_string).await?;
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
@ -103,24 +121,36 @@ impl PublicKeyResponse {
|
|||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument("Verify signature")]
|
||||
async fn do_verify(
|
||||
spawner: &Spawner,
|
||||
public_key: &str,
|
||||
signature: String,
|
||||
signing_string: String,
|
||||
) -> Result<(), Error> {
|
||||
let public_key = RsaPublicKey::from_public_key_pem(public_key.trim())?;
|
||||
let public_key_der = public_key
|
||||
.to_pkcs1_der()
|
||||
.map_err(|_| ErrorKind::DerEncode)?;
|
||||
let public_key = ring::signature::UnparsedPublicKey::new(
|
||||
&ring::signature::RSA_PKCS1_2048_8192_SHA256,
|
||||
public_key_der,
|
||||
);
|
||||
|
||||
web::block(move || {
|
||||
let decoded = base64::decode(signature)?;
|
||||
let signature = Signature::from_bytes(&decoded)?;
|
||||
let hashed = Sha256::new_with_prefix(signing_string.as_bytes());
|
||||
let span = tracing::Span::current();
|
||||
spawner
|
||||
.spawn_blocking(move || {
|
||||
span.in_scope(|| {
|
||||
let decoded = STANDARD.decode(signature)?;
|
||||
|
||||
let verifying_key = VerifyingKey::new_with_prefix(public_key);
|
||||
verifying_key.verify_digest(hashed, &signature)?;
|
||||
public_key
|
||||
.verify(signing_string.as_bytes(), decoded.as_slice())
|
||||
.map_err(|_| ErrorKind::VerifySignature)?;
|
||||
|
||||
Ok(()) as Result<(), Error>
|
||||
})
|
||||
.await??;
|
||||
Ok(()) as Result<(), Error>
|
||||
})
|
||||
})
|
||||
.await??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -150,20 +180,20 @@ mod tests {
|
|||
use crate::apub::AcceptedActors;
|
||||
use rsa::{pkcs8::DecodePublicKey, RsaPublicKey};
|
||||
|
||||
const ASONIX_DOG_ACTOR: &'static str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://w3id.org/security/v1",{"manuallyApprovesFollowers":"as:manuallyApprovesFollowers","toot":"http://joinmastodon.org/ns#","featured":{"@id":"toot:featured","@type":"@id"},"featuredTags":{"@id":"toot:featuredTags","@type":"@id"},"alsoKnownAs":{"@id":"as:alsoKnownAs","@type":"@id"},"movedTo":{"@id":"as:movedTo","@type":"@id"},"schema":"http://schema.org#","PropertyValue":"schema:PropertyValue","value":"schema:value","discoverable":"toot:discoverable","Device":"toot:Device","Ed25519Signature":"toot:Ed25519Signature","Ed25519Key":"toot:Ed25519Key","Curve25519Key":"toot:Curve25519Key","EncryptedMessage":"toot:EncryptedMessage","publicKeyBase64":"toot:publicKeyBase64","deviceId":"toot:deviceId","claim":{"@type":"@id","@id":"toot:claim"},"fingerprintKey":{"@type":"@id","@id":"toot:fingerprintKey"},"identityKey":{"@type":"@id","@id":"toot:identityKey"},"devices":{"@type":"@id","@id":"toot:devices"},"messageFranking":"toot:messageFranking","messageType":"toot:messageType","cipherText":"toot:cipherText","suspended":"toot:suspended"}],"id":"https://masto.asonix.dog/actor","type":"Application","inbox":"https://masto.asonix.dog/actor/inbox","outbox":"https://masto.asonix.dog/actor/outbox","preferredUsername":"masto.asonix.dog","url":"https://masto.asonix.dog/about/more?instance_actor=true","manuallyApprovesFollowers":true,"publicKey":{"id":"https://masto.asonix.dog/actor#main-key","owner":"https://masto.asonix.dog/actor","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n"},"endpoints":{"sharedInbox":"https://masto.asonix.dog/inbox"}}"#;
|
||||
const KARJALAZET_RELAY: &'static str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://pleroma.karjalazet.se/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[],"capabilities":{},"discoverable":false,"endpoints":{"oauthAuthorizationEndpoint":"https://pleroma.karjalazet.se/oauth/authorize","oauthRegistrationEndpoint":"https://pleroma.karjalazet.se/api/v1/apps","oauthTokenEndpoint":"https://pleroma.karjalazet.se/oauth/token","sharedInbox":"https://pleroma.karjalazet.se/inbox","uploadMedia":"https://pleroma.karjalazet.se/api/ap/upload_media"},"featured":"https://pleroma.karjalazet.se/relay/collections/featured","followers":"https://pleroma.karjalazet.se/relay/followers","following":"https://pleroma.karjalazet.se/relay/following","id":"https://pleroma.karjalazet.se/relay","inbox":"https://pleroma.karjalazet.se/relay/inbox","manuallyApprovesFollowers":false,"name":null,"outbox":"https://pleroma.karjalazet.se/relay/outbox","preferredUsername":"relay","publicKey":{"id":"https://pleroma.karjalazet.se/relay#main-key","owner":"https://pleroma.karjalazet.se/relay","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"","tag":[],"type":"Person","url":"https://pleroma.karjalazet.se/relay"}"#;
|
||||
const ASONIX_DOG_KEY: &'static str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n";
|
||||
const KARJALAZET_KEY: &'static str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n";
|
||||
const ASONIX_DOG_ACTOR: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://w3id.org/security/v1",{"manuallyApprovesFollowers":"as:manuallyApprovesFollowers","toot":"http://joinmastodon.org/ns#","featured":{"@id":"toot:featured","@type":"@id"},"featuredTags":{"@id":"toot:featuredTags","@type":"@id"},"alsoKnownAs":{"@id":"as:alsoKnownAs","@type":"@id"},"movedTo":{"@id":"as:movedTo","@type":"@id"},"schema":"http://schema.org#","PropertyValue":"schema:PropertyValue","value":"schema:value","discoverable":"toot:discoverable","Device":"toot:Device","Ed25519Signature":"toot:Ed25519Signature","Ed25519Key":"toot:Ed25519Key","Curve25519Key":"toot:Curve25519Key","EncryptedMessage":"toot:EncryptedMessage","publicKeyBase64":"toot:publicKeyBase64","deviceId":"toot:deviceId","claim":{"@type":"@id","@id":"toot:claim"},"fingerprintKey":{"@type":"@id","@id":"toot:fingerprintKey"},"identityKey":{"@type":"@id","@id":"toot:identityKey"},"devices":{"@type":"@id","@id":"toot:devices"},"messageFranking":"toot:messageFranking","messageType":"toot:messageType","cipherText":"toot:cipherText","suspended":"toot:suspended"}],"id":"https://masto.asonix.dog/actor","type":"Application","inbox":"https://masto.asonix.dog/actor/inbox","outbox":"https://masto.asonix.dog/actor/outbox","preferredUsername":"masto.asonix.dog","url":"https://masto.asonix.dog/about/more?instance_actor=true","manuallyApprovesFollowers":true,"publicKey":{"id":"https://masto.asonix.dog/actor#main-key","owner":"https://masto.asonix.dog/actor","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n"},"endpoints":{"sharedInbox":"https://masto.asonix.dog/inbox"}}"#;
|
||||
const KARJALAZET_RELAY: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://pleroma.karjalazet.se/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[],"capabilities":{},"discoverable":false,"endpoints":{"oauthAuthorizationEndpoint":"https://pleroma.karjalazet.se/oauth/authorize","oauthRegistrationEndpoint":"https://pleroma.karjalazet.se/api/v1/apps","oauthTokenEndpoint":"https://pleroma.karjalazet.se/oauth/token","sharedInbox":"https://pleroma.karjalazet.se/inbox","uploadMedia":"https://pleroma.karjalazet.se/api/ap/upload_media"},"featured":"https://pleroma.karjalazet.se/relay/collections/featured","followers":"https://pleroma.karjalazet.se/relay/followers","following":"https://pleroma.karjalazet.se/relay/following","id":"https://pleroma.karjalazet.se/relay","inbox":"https://pleroma.karjalazet.se/relay/inbox","manuallyApprovesFollowers":false,"name":null,"outbox":"https://pleroma.karjalazet.se/relay/outbox","preferredUsername":"relay","publicKey":{"id":"https://pleroma.karjalazet.se/relay#main-key","owner":"https://pleroma.karjalazet.se/relay","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"","tag":[],"type":"Person","url":"https://pleroma.karjalazet.se/relay"}"#;
|
||||
const ASONIX_DOG_KEY: &str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n";
|
||||
const KARJALAZET_KEY: &str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n";
|
||||
|
||||
#[test]
|
||||
fn handles_masto_keys() {
|
||||
println!("{}", ASONIX_DOG_KEY);
|
||||
println!("{ASONIX_DOG_KEY}");
|
||||
let _ = RsaPublicKey::from_public_key_pem(ASONIX_DOG_KEY.trim()).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn handles_pleromo_keys() {
|
||||
println!("{}", KARJALAZET_KEY);
|
||||
println!("{KARJALAZET_KEY}");
|
||||
let _ = RsaPublicKey::from_public_key_pem(KARJALAZET_KEY.trim()).unwrap();
|
||||
}
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
use crate::{
|
||||
config::{Config, UrlKind},
|
||||
data::State,
|
||||
future::LocalBoxFuture,
|
||||
};
|
||||
use actix_web::web::Data;
|
||||
use actix_webfinger::{Resolver, Webfinger};
|
||||
use futures_util::future::LocalBoxFuture;
|
||||
use rsa_magic_public_key::AsMagicPublicKey;
|
||||
|
||||
pub(crate) struct RelayResolver;
|
||||
|
|
461
src/requests.rs
461
src/requests.rs
|
@ -1,30 +1,43 @@
|
|||
use crate::error::{Error, ErrorKind};
|
||||
use crate::{
|
||||
data::LastOnline,
|
||||
error::{Error, ErrorKind},
|
||||
spawner::Spawner,
|
||||
stream::{aggregate, limit_stream},
|
||||
};
|
||||
use activitystreams::iri_string::types::IriString;
|
||||
use actix_web::{http::header::Date, web::Bytes};
|
||||
use awc::Client;
|
||||
use actix_web::http::header::Date;
|
||||
use base64::{engine::general_purpose::STANDARD, Engine};
|
||||
use dashmap::DashMap;
|
||||
use http_signature_normalization_actix::prelude::*;
|
||||
use rand::thread_rng;
|
||||
use rsa::{pkcs1v15::SigningKey, RsaPrivateKey};
|
||||
use sha2::{Digest, Sha256};
|
||||
use signature::RandomizedSigner;
|
||||
use http_signature_normalization_reqwest::{digest::ring::Sha256, prelude::*};
|
||||
use reqwest_middleware::ClientWithMiddleware;
|
||||
use ring::{
|
||||
rand::SystemRandom,
|
||||
signature::{RsaKeyPair, RSA_PKCS1_SHA256},
|
||||
};
|
||||
use rsa::{pkcs1::EncodeRsaPrivateKey, RsaPrivateKey};
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
rc::Rc,
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
},
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tracing::{debug, info, warn};
|
||||
use tracing_awc::Tracing;
|
||||
|
||||
const ONE_SECOND: u64 = 1;
|
||||
const ONE_MINUTE: u64 = 60 * ONE_SECOND;
|
||||
const ONE_HOUR: u64 = 60 * ONE_MINUTE;
|
||||
const ONE_DAY: u64 = 24 * ONE_HOUR;
|
||||
|
||||
// 20 KB
|
||||
const JSON_SIZE_LIMIT: usize = 20 * 1024;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum BreakerStrategy {
|
||||
// Requires a successful response
|
||||
Require2XX,
|
||||
// Allows HTTP 2xx-401
|
||||
Allow401AndBelow,
|
||||
// Allows HTTP 2xx-404
|
||||
Allow404AndBelow,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct Breakers {
|
||||
inner: Arc<DashMap<String, Breaker>>,
|
||||
|
@ -37,7 +50,7 @@ impl std::fmt::Debug for Breakers {
|
|||
}
|
||||
|
||||
impl Breakers {
|
||||
fn should_try(&self, url: &IriString) -> bool {
|
||||
pub(crate) fn should_try(&self, url: &IriString) -> bool {
|
||||
if let Some(authority) = url.authority_str() {
|
||||
if let Some(breaker) = self.inner.get(authority) {
|
||||
breaker.should_try()
|
||||
|
@ -54,6 +67,9 @@ impl Breakers {
|
|||
let should_write = {
|
||||
if let Some(mut breaker) = self.inner.get_mut(authority) {
|
||||
breaker.fail();
|
||||
if !breaker.should_try() {
|
||||
tracing::warn!("Failed breaker for {authority}");
|
||||
}
|
||||
false
|
||||
} else {
|
||||
true
|
||||
|
@ -102,17 +118,12 @@ struct Breaker {
|
|||
}
|
||||
|
||||
impl Breaker {
|
||||
const fn failure_threshold() -> usize {
|
||||
10
|
||||
}
|
||||
|
||||
fn failure_wait() -> Duration {
|
||||
Duration::from_secs(ONE_DAY)
|
||||
}
|
||||
const FAILURE_WAIT: Duration = Duration::from_secs(ONE_DAY);
|
||||
const FAILURE_THRESHOLD: usize = 10;
|
||||
|
||||
fn should_try(&self) -> bool {
|
||||
self.failures < Self::failure_threshold()
|
||||
|| self.last_attempt + Self::failure_wait() < SystemTime::now()
|
||||
self.failures < Self::FAILURE_THRESHOLD
|
||||
|| self.last_attempt + Self::FAILURE_WAIT < SystemTime::now()
|
||||
}
|
||||
|
||||
fn fail(&mut self) {
|
||||
|
@ -141,22 +152,19 @@ impl Default for Breaker {
|
|||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct Requests {
|
||||
client: Rc<RefCell<Client>>,
|
||||
consecutive_errors: Rc<AtomicUsize>,
|
||||
error_limit: usize,
|
||||
client: ClientWithMiddleware,
|
||||
key_id: String,
|
||||
user_agent: String,
|
||||
private_key: RsaPrivateKey,
|
||||
config: Config,
|
||||
private_key: Arc<RsaKeyPair>,
|
||||
rng: SystemRandom,
|
||||
config: Config<Spawner>,
|
||||
breakers: Breakers,
|
||||
last_online: Arc<LastOnline>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Requests {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Requests")
|
||||
.field("error_limit", &self.error_limit)
|
||||
.field("key_id", &self.key_id)
|
||||
.field("user_agent", &self.user_agent)
|
||||
.field("config", &self.config)
|
||||
.field("breakers", &self.breakers)
|
||||
.finish()
|
||||
|
@ -164,196 +172,225 @@ impl std::fmt::Debug for Requests {
|
|||
}
|
||||
|
||||
impl Requests {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn new(
|
||||
key_id: String,
|
||||
private_key: RsaPrivateKey,
|
||||
user_agent: String,
|
||||
breakers: Breakers,
|
||||
last_online: Arc<LastOnline>,
|
||||
spawner: Spawner,
|
||||
client: ClientWithMiddleware,
|
||||
) -> Self {
|
||||
let private_key_der = private_key.to_pkcs1_der().expect("Can encode der");
|
||||
let private_key = ring::signature::RsaKeyPair::from_der(private_key_der.as_bytes())
|
||||
.expect("Key is valid");
|
||||
Requests {
|
||||
client: Rc::new(RefCell::new(
|
||||
Client::builder()
|
||||
.wrap(Tracing)
|
||||
.add_default_header(("User-Agent", user_agent.clone()))
|
||||
.finish(),
|
||||
)),
|
||||
consecutive_errors: Rc::new(AtomicUsize::new(0)),
|
||||
error_limit: 3,
|
||||
client,
|
||||
key_id,
|
||||
user_agent,
|
||||
private_key,
|
||||
config: Config::default().mastodon_compat(),
|
||||
private_key: Arc::new(private_key),
|
||||
rng: SystemRandom::new(),
|
||||
config: Config::new_with_spawner(spawner).mastodon_compat(),
|
||||
breakers,
|
||||
last_online,
|
||||
}
|
||||
}
|
||||
|
||||
fn count_err(&self) {
|
||||
let count = self.consecutive_errors.fetch_add(1, Ordering::Relaxed);
|
||||
if count + 1 >= self.error_limit {
|
||||
warn!("{} consecutive errors, rebuilding http client", count);
|
||||
*self.client.borrow_mut() = Client::builder()
|
||||
.wrap(Tracing)
|
||||
.add_default_header(("User-Agent", self.user_agent.clone()))
|
||||
.finish();
|
||||
self.reset_err();
|
||||
}
|
||||
pub(crate) fn spawner(mut self, spawner: Spawner) -> Self {
|
||||
self.config = self.config.set_spawner(spawner);
|
||||
self
|
||||
}
|
||||
|
||||
fn reset_err(&self) {
|
||||
self.consecutive_errors.swap(0, Ordering::Relaxed);
|
||||
pub(crate) fn reset_breaker(&self, iri: &IriString) {
|
||||
self.breakers.succeed(iri);
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Fetch Json", skip(self))]
|
||||
pub(crate) async fn fetch_json<T>(&self, url: &str) -> Result<T, Error>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
self.do_fetch(url, "application/json").await
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Fetch Activity+Json", skip(self))]
|
||||
pub(crate) async fn fetch<T>(&self, url: &str) -> Result<T, Error>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
self.do_fetch(url, "application/activity+json").await
|
||||
}
|
||||
|
||||
async fn do_fetch<T>(&self, url: &str, accept: &str) -> Result<T, Error>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let parsed_url = url.parse::<IriString>()?;
|
||||
|
||||
if !self.breakers.should_try(&parsed_url) {
|
||||
return Err(ErrorKind::Breaker.into());
|
||||
}
|
||||
|
||||
let signer = self.signer();
|
||||
|
||||
let client: Client = self.client.borrow().clone();
|
||||
let res = client
|
||||
.get(url)
|
||||
.insert_header(("Accept", accept))
|
||||
.insert_header(Date(SystemTime::now().into()))
|
||||
.signature(
|
||||
self.config.clone(),
|
||||
self.key_id.clone(),
|
||||
move |signing_string| signer.sign(signing_string),
|
||||
)
|
||||
.await?
|
||||
.send()
|
||||
.await;
|
||||
|
||||
async fn check_response(
|
||||
&self,
|
||||
parsed_url: &IriString,
|
||||
strategy: BreakerStrategy,
|
||||
res: Result<reqwest::Response, reqwest_middleware::Error>,
|
||||
) -> Result<reqwest::Response, Error> {
|
||||
if res.is_err() {
|
||||
self.count_err();
|
||||
self.breakers.fail(&parsed_url);
|
||||
}
|
||||
|
||||
let mut res = res.map_err(|e| ErrorKind::SendRequest(url.to_string(), e.to_string()))?;
|
||||
let res = res?;
|
||||
|
||||
self.reset_err();
|
||||
let status = res.status();
|
||||
|
||||
if !res.status().is_success() {
|
||||
if let Ok(bytes) = res.body().await {
|
||||
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
|
||||
if !s.is_empty() {
|
||||
debug!("Response from {}, {}", url, s);
|
||||
}
|
||||
let success = match strategy {
|
||||
BreakerStrategy::Require2XX => status.is_success(),
|
||||
BreakerStrategy::Allow401AndBelow => (200..=401).contains(&status.as_u16()),
|
||||
BreakerStrategy::Allow404AndBelow => (200..=404).contains(&status.as_u16()),
|
||||
};
|
||||
|
||||
if !success {
|
||||
self.breakers.fail(&parsed_url);
|
||||
|
||||
if let Ok(s) = res.text().await {
|
||||
if !s.is_empty() {
|
||||
tracing::debug!("Response from {parsed_url}, {s}");
|
||||
}
|
||||
}
|
||||
|
||||
self.breakers.fail(&parsed_url);
|
||||
|
||||
return Err(ErrorKind::Status(url.to_string(), res.status()).into());
|
||||
return Err(ErrorKind::Status(
|
||||
parsed_url.to_string(),
|
||||
crate::http1::status_to_http02(status),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
self.breakers.succeed(&parsed_url);
|
||||
// only actually succeed a breaker on 2xx response
|
||||
if status.is_success() {
|
||||
self.last_online.mark_seen(&parsed_url);
|
||||
self.breakers.succeed(&parsed_url);
|
||||
}
|
||||
|
||||
let body = res
|
||||
.body()
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))]
|
||||
pub(crate) async fn fetch_json<T>(
|
||||
&self,
|
||||
url: &IriString,
|
||||
strategy: BreakerStrategy,
|
||||
) -> Result<T, Error>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
self.do_fetch(url, "application/json", strategy).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))]
|
||||
pub(crate) async fn fetch_json_msky<T>(
|
||||
&self,
|
||||
url: &IriString,
|
||||
strategy: BreakerStrategy,
|
||||
) -> Result<T, Error>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let stream = self
|
||||
.do_deliver(
|
||||
url,
|
||||
&serde_json::json!({}),
|
||||
"application/json",
|
||||
"application/json",
|
||||
strategy,
|
||||
)
|
||||
.await?
|
||||
.bytes_stream();
|
||||
|
||||
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
|
||||
|
||||
Ok(serde_json::from_slice(&body)?)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Fetch Activity+Json", skip(self), fields(signing_string))]
|
||||
pub(crate) async fn fetch<T>(
|
||||
&self,
|
||||
url: &IriString,
|
||||
strategy: BreakerStrategy,
|
||||
) -> Result<T, Error>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
self.do_fetch(url, "application/activity+json", strategy)
|
||||
.await
|
||||
.map_err(|e| ErrorKind::ReceiveResponse(url.to_string(), e.to_string()))?;
|
||||
|
||||
Ok(serde_json::from_slice(body.as_ref())?)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Fetch Bytes", skip(self))]
|
||||
pub(crate) async fn fetch_bytes(&self, url: &str) -> Result<(String, Bytes), Error> {
|
||||
let parsed_url = url.parse::<IriString>()?;
|
||||
async fn do_fetch<T>(
|
||||
&self,
|
||||
url: &IriString,
|
||||
accept: &str,
|
||||
strategy: BreakerStrategy,
|
||||
) -> Result<T, Error>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let stream = self
|
||||
.do_fetch_response(url, accept, strategy)
|
||||
.await?
|
||||
.bytes_stream();
|
||||
|
||||
if !self.breakers.should_try(&parsed_url) {
|
||||
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
|
||||
|
||||
Ok(serde_json::from_slice(&body)?)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Fetch response", skip(self), fields(signing_string))]
|
||||
pub(crate) async fn fetch_response(
|
||||
&self,
|
||||
url: &IriString,
|
||||
strategy: BreakerStrategy,
|
||||
) -> Result<reqwest::Response, Error> {
|
||||
self.do_fetch_response(url, "*/*", strategy).await
|
||||
}
|
||||
|
||||
pub(crate) async fn do_fetch_response(
|
||||
&self,
|
||||
url: &IriString,
|
||||
accept: &str,
|
||||
strategy: BreakerStrategy,
|
||||
) -> Result<reqwest::Response, Error> {
|
||||
if !self.breakers.should_try(url) {
|
||||
return Err(ErrorKind::Breaker.into());
|
||||
}
|
||||
|
||||
info!("Fetching bytes for {}", url);
|
||||
let signer = self.signer();
|
||||
let span = tracing::Span::current();
|
||||
|
||||
let client: Client = self.client.borrow().clone();
|
||||
let res = client
|
||||
.get(url)
|
||||
.insert_header(("Accept", "*/*"))
|
||||
.insert_header(Date(SystemTime::now().into()))
|
||||
.signature(
|
||||
self.config.clone(),
|
||||
self.key_id.clone(),
|
||||
move |signing_string| signer.sign(signing_string),
|
||||
)
|
||||
.await?
|
||||
.send()
|
||||
.await;
|
||||
let request = self
|
||||
.client
|
||||
.get(url.as_str())
|
||||
.header("Accept", accept)
|
||||
.header("Date", Date(SystemTime::now().into()).to_string())
|
||||
.signature(&self.config, self.key_id.clone(), move |signing_string| {
|
||||
span.record("signing_string", signing_string);
|
||||
span.in_scope(|| signer.sign(signing_string))
|
||||
})
|
||||
.await?;
|
||||
|
||||
if res.is_err() {
|
||||
self.breakers.fail(&parsed_url);
|
||||
self.count_err();
|
||||
}
|
||||
let res = self.client.execute(request).await;
|
||||
|
||||
let mut res = res.map_err(|e| ErrorKind::SendRequest(url.to_string(), e.to_string()))?;
|
||||
let res = self.check_response(url, strategy, res).await?;
|
||||
|
||||
self.reset_err();
|
||||
|
||||
let content_type = if let Some(content_type) = res.headers().get("content-type") {
|
||||
if let Ok(s) = content_type.to_str() {
|
||||
s.to_owned()
|
||||
} else {
|
||||
return Err(ErrorKind::ContentType.into());
|
||||
}
|
||||
} else {
|
||||
return Err(ErrorKind::ContentType.into());
|
||||
};
|
||||
|
||||
if !res.status().is_success() {
|
||||
if let Ok(bytes) = res.body().await {
|
||||
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
|
||||
if !s.is_empty() {
|
||||
debug!("Response from {}, {}", url, s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.breakers.fail(&parsed_url);
|
||||
|
||||
return Err(ErrorKind::Status(url.to_string(), res.status()).into());
|
||||
}
|
||||
|
||||
self.breakers.succeed(&parsed_url);
|
||||
|
||||
let bytes = match res.body().limit(1024 * 1024 * 4).await {
|
||||
Err(e) => {
|
||||
return Err(ErrorKind::ReceiveResponse(url.to_string(), e.to_string()).into());
|
||||
}
|
||||
Ok(bytes) => bytes,
|
||||
};
|
||||
|
||||
Ok((content_type, bytes))
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[tracing::instrument(
|
||||
"Deliver to Inbox",
|
||||
skip_all,
|
||||
fields(inbox = inbox.to_string().as_str(), item)
|
||||
fields(inbox = inbox.to_string().as_str(), signing_string)
|
||||
)]
|
||||
pub(crate) async fn deliver<T>(&self, inbox: IriString, item: &T) -> Result<(), Error>
|
||||
pub(crate) async fn deliver<T>(
|
||||
&self,
|
||||
inbox: &IriString,
|
||||
item: &T,
|
||||
strategy: BreakerStrategy,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: serde::ser::Serialize + std::fmt::Debug,
|
||||
{
|
||||
self.do_deliver(
|
||||
inbox,
|
||||
item,
|
||||
"application/activity+json",
|
||||
"application/activity+json",
|
||||
strategy,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn do_deliver<T>(
|
||||
&self,
|
||||
inbox: &IriString,
|
||||
item: &T,
|
||||
content_type: &str,
|
||||
accept: &str,
|
||||
strategy: BreakerStrategy,
|
||||
) -> Result<reqwest::Response, Error>
|
||||
where
|
||||
T: serde::ser::Serialize + std::fmt::Debug,
|
||||
{
|
||||
|
@ -362,68 +399,60 @@ impl Requests {
|
|||
}
|
||||
|
||||
let signer = self.signer();
|
||||
let span = tracing::Span::current();
|
||||
let item_string = serde_json::to_string(item)?;
|
||||
|
||||
let client: Client = self.client.borrow().clone();
|
||||
let (req, body) = client
|
||||
let request = self
|
||||
.client
|
||||
.post(inbox.as_str())
|
||||
.insert_header(("Accept", "application/activity+json"))
|
||||
.insert_header(("Content-Type", "application/activity+json"))
|
||||
.insert_header(Date(SystemTime::now().into()))
|
||||
.header("Accept", accept)
|
||||
.header("Content-Type", content_type)
|
||||
.header("Date", Date(SystemTime::now().into()).to_string())
|
||||
.signature_with_digest(
|
||||
self.config.clone(),
|
||||
self.key_id.clone(),
|
||||
Sha256::new(),
|
||||
item_string,
|
||||
move |signing_string| signer.sign(signing_string),
|
||||
move |signing_string| {
|
||||
span.record("signing_string", signing_string);
|
||||
span.in_scope(|| signer.sign(signing_string))
|
||||
},
|
||||
)
|
||||
.await?
|
||||
.split();
|
||||
.await?;
|
||||
|
||||
let res = req.send_body(body).await;
|
||||
let res = self.client.execute(request).await;
|
||||
|
||||
if res.is_err() {
|
||||
self.count_err();
|
||||
self.breakers.fail(&inbox);
|
||||
}
|
||||
let res = self.check_response(inbox, strategy, res).await?;
|
||||
|
||||
let mut res = res.map_err(|e| ErrorKind::SendRequest(inbox.to_string(), e.to_string()))?;
|
||||
|
||||
self.reset_err();
|
||||
|
||||
if !res.status().is_success() {
|
||||
if let Ok(bytes) = res.body().await {
|
||||
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
|
||||
if !s.is_empty() {
|
||||
debug!("Response from {}, {}", inbox.as_str(), s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.breakers.fail(&inbox);
|
||||
return Err(ErrorKind::Status(inbox.to_string(), res.status()).into());
|
||||
}
|
||||
|
||||
self.breakers.succeed(&inbox);
|
||||
|
||||
Ok(())
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
fn signer(&self) -> Signer {
|
||||
Signer {
|
||||
private_key: self.private_key.clone(),
|
||||
rng: self.rng.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct Signer {
|
||||
private_key: RsaPrivateKey,
|
||||
private_key: Arc<RsaKeyPair>,
|
||||
rng: SystemRandom,
|
||||
}
|
||||
|
||||
impl Signer {
|
||||
fn sign(&self, signing_string: &str) -> Result<String, Error> {
|
||||
let signing_key = SigningKey::<Sha256>::new_with_prefix(self.private_key.clone());
|
||||
let signature = signing_key.try_sign_with_rng(thread_rng(), signing_string.as_bytes())?;
|
||||
Ok(base64::encode(signature.as_ref()))
|
||||
let mut signature = vec![0; self.private_key.public().modulus_len()];
|
||||
|
||||
self.private_key
|
||||
.sign(
|
||||
&RSA_PKCS1_SHA256,
|
||||
&self.rng,
|
||||
signing_string.as_bytes(),
|
||||
&mut signature,
|
||||
)
|
||||
.map_err(|_| ErrorKind::SignRequest)?;
|
||||
|
||||
Ok(STANDARD.encode(&signature))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
mod actor;
|
||||
mod healthz;
|
||||
mod inbox;
|
||||
mod index;
|
||||
mod media;
|
||||
|
@ -7,6 +8,7 @@ mod statics;
|
|||
|
||||
pub(crate) use self::{
|
||||
actor::route as actor,
|
||||
healthz::route as healthz,
|
||||
inbox::route as inbox,
|
||||
index::route as index,
|
||||
media::route as media,
|
7
src/routes/healthz.rs
Normal file
7
src/routes/healthz.rs
Normal file
|
@ -0,0 +1,7 @@
|
|||
use crate::{data::State, error::Error};
|
||||
use actix_web::{web, HttpResponse};
|
||||
|
||||
pub(crate) async fn route(state: web::Data<State>) -> Result<HttpResponse, Error> {
|
||||
state.db.check_health().await?;
|
||||
Ok(HttpResponse::Ok().finish())
|
||||
}
|
|
@ -16,7 +16,8 @@ use activitystreams::{
|
|||
use actix_web::{web, HttpResponse};
|
||||
use http_signature_normalization_actix::prelude::{DigestVerified, SignatureVerified};
|
||||
|
||||
#[tracing::instrument(name = "Inbox", skip(actors, client, jobs, config, state))]
|
||||
#[tracing::instrument(name = "Inbox", skip_all, fields(id = tracing::field::debug(&input.id_unchecked()), kind = tracing::field::debug(&input.kind())))]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn route(
|
||||
state: web::Data<State>,
|
||||
actors: web::Data<ActorCache>,
|
||||
|
@ -24,22 +25,48 @@ pub(crate) async fn route(
|
|||
client: web::Data<Requests>,
|
||||
jobs: web::Data<JobServer>,
|
||||
input: web::Json<AcceptedActivities>,
|
||||
verified: Option<(SignatureVerified, DigestVerified)>,
|
||||
digest_verified: Option<DigestVerified>,
|
||||
signature_verified: Option<SignatureVerified>,
|
||||
) -> Result<HttpResponse, Error> {
|
||||
let input = input.into_inner();
|
||||
|
||||
let actor = actors
|
||||
.get(
|
||||
input.actor()?.as_single_id().ok_or(ErrorKind::MissingId)?,
|
||||
&client,
|
||||
)
|
||||
.await?
|
||||
.into_inner();
|
||||
let kind = input.kind().ok_or(ErrorKind::MissingKind)?;
|
||||
|
||||
let is_allowed = state.db.is_allowed(actor.id.clone());
|
||||
let is_connected = state.db.is_connected(actor.id.clone());
|
||||
if digest_verified.is_some() && signature_verified.is_none() && *kind == ValidTypes::Delete {
|
||||
return Ok(accepted(serde_json::json!({})));
|
||||
} else if config.validate_signatures()
|
||||
&& (digest_verified.is_none() || signature_verified.is_none())
|
||||
{
|
||||
return Err(ErrorKind::NoSignature(None).into());
|
||||
}
|
||||
|
||||
let (is_allowed, is_connected) = tokio::try_join!(is_allowed, is_connected)?;
|
||||
let actor_id = if input.id_unchecked().is_some() {
|
||||
input.actor()?.as_single_id().ok_or(ErrorKind::MissingId)?
|
||||
} else {
|
||||
input
|
||||
.actor_unchecked()
|
||||
.as_single_id()
|
||||
.ok_or(ErrorKind::MissingId)?
|
||||
};
|
||||
|
||||
let actor = actors.get(actor_id, &client).await?.into_inner();
|
||||
|
||||
if let Some(verified) = signature_verified {
|
||||
if actor.public_key_id.as_str() != verified.key_id() {
|
||||
tracing::error!("Actor signed with wrong key");
|
||||
return Err(ErrorKind::BadActor(
|
||||
actor.public_key_id.to_string(),
|
||||
verified.key_id().to_owned(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
} else if config.validate_signatures() {
|
||||
tracing::error!("This case should never be reachable, since I handle signature checks earlier in the flow. If you see this in a log it means I did it wrong");
|
||||
return Err(ErrorKind::NoSignature(Some(actor.public_key_id.to_string())).into());
|
||||
}
|
||||
|
||||
let is_allowed = state.db.is_allowed(actor.id.clone()).await?;
|
||||
let is_connected = state.db.is_connected(actor.id.clone()).await?;
|
||||
|
||||
if !is_allowed {
|
||||
return Err(ErrorKind::NotAllowed(actor.id.to_string()).into());
|
||||
|
@ -49,29 +76,16 @@ pub(crate) async fn route(
|
|||
return Err(ErrorKind::NotSubscribed(actor.id.to_string()).into());
|
||||
}
|
||||
|
||||
if config.validate_signatures() && verified.is_none() {
|
||||
return Err(ErrorKind::NoSignature(actor.public_key_id.to_string()).into());
|
||||
} else if config.validate_signatures() {
|
||||
if let Some((verified, _)) = verified {
|
||||
if actor.public_key_id.as_str() != verified.key_id() {
|
||||
tracing::error!("Bad actor, more info: {:?}", input);
|
||||
return Err(ErrorKind::BadActor(
|
||||
actor.public_key_id.to_string(),
|
||||
verified.key_id().to_owned(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match input.kind().ok_or(ErrorKind::MissingKind)? {
|
||||
match kind {
|
||||
ValidTypes::Accept => handle_accept(&config, input).await?,
|
||||
ValidTypes::Reject => handle_reject(&config, &jobs, input, actor).await?,
|
||||
ValidTypes::Announce | ValidTypes::Create => {
|
||||
handle_announce(&state, &jobs, input, actor).await?
|
||||
}
|
||||
ValidTypes::Follow => handle_follow(&config, &jobs, input, actor).await?,
|
||||
ValidTypes::Delete | ValidTypes::Update => handle_forward(&jobs, input, actor).await?,
|
||||
ValidTypes::Add | ValidTypes::Delete | ValidTypes::Remove | ValidTypes::Update => {
|
||||
handle_forward(&jobs, input, actor).await?
|
||||
}
|
||||
ValidTypes::Undo => handle_undo(&config, &jobs, input, actor, is_connected).await?,
|
||||
};
|
||||
|
||||
|
@ -203,7 +217,7 @@ async fn handle_announce(
|
|||
.as_single_id()
|
||||
.ok_or(ErrorKind::MissingId)?;
|
||||
|
||||
if state.is_cached(object_id).await {
|
||||
if state.is_cached(object_id) {
|
||||
return Err(ErrorKind::Duplicate.into());
|
||||
}
|
||||
|
||||
|
|
|
@ -1,26 +1,91 @@
|
|||
use crate::{
|
||||
config::Config,
|
||||
data::State,
|
||||
data::{Node, State},
|
||||
error::{Error, ErrorKind},
|
||||
};
|
||||
use actix_web::{web, HttpResponse};
|
||||
use rand::{seq::SliceRandom, thread_rng};
|
||||
use std::io::BufWriter;
|
||||
|
||||
const MINIFY_CONFIG: minify_html::Cfg = minify_html::Cfg {
|
||||
do_not_minify_doctype: true,
|
||||
ensure_spec_compliant_unquoted_attribute_values: true,
|
||||
keep_closing_tags: true,
|
||||
keep_html_and_head_opening_tags: false,
|
||||
keep_spaces_between_attributes: true,
|
||||
keep_comments: false,
|
||||
keep_input_type_text_attr: true,
|
||||
keep_ssi_comments: false,
|
||||
preserve_brace_template_syntax: false,
|
||||
preserve_chevron_percent_template_syntax: false,
|
||||
minify_css: true,
|
||||
minify_js: true,
|
||||
remove_bangs: true,
|
||||
remove_processing_instructions: true,
|
||||
};
|
||||
|
||||
fn open_reg(node: &Node) -> bool {
|
||||
node.instance
|
||||
.as_ref()
|
||||
.map(|i| i.reg)
|
||||
.or_else(|| node.info.as_ref().map(|i| i.reg))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "Index", skip(config, state))]
|
||||
pub(crate) async fn route(
|
||||
state: web::Data<State>,
|
||||
config: web::Data<Config>,
|
||||
) -> Result<HttpResponse, Error> {
|
||||
let mut nodes = state.node_cache().nodes().await?;
|
||||
nodes.shuffle(&mut thread_rng());
|
||||
let all_nodes = state.node_cache.nodes().await?;
|
||||
|
||||
let mut nodes = Vec::new();
|
||||
let mut local = Vec::new();
|
||||
|
||||
for node in all_nodes {
|
||||
if !state.is_connected(&node.base) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if node
|
||||
.base
|
||||
.authority_str()
|
||||
.map(|authority| {
|
||||
config
|
||||
.local_domains()
|
||||
.iter()
|
||||
.any(|domain| domain.as_str() == authority)
|
||||
})
|
||||
.unwrap_or(false)
|
||||
{
|
||||
local.push(node);
|
||||
} else {
|
||||
nodes.push(node);
|
||||
}
|
||||
}
|
||||
|
||||
nodes.sort_by(|lhs, rhs| match (open_reg(lhs), open_reg(rhs)) {
|
||||
(true, true) | (false, false) => std::cmp::Ordering::Equal,
|
||||
(true, false) => std::cmp::Ordering::Less,
|
||||
(false, true) => std::cmp::Ordering::Greater,
|
||||
});
|
||||
|
||||
if let Some((i, _)) = nodes.iter().enumerate().find(|(_, node)| !open_reg(node)) {
|
||||
nodes[..i].shuffle(&mut thread_rng());
|
||||
nodes[i..].shuffle(&mut thread_rng());
|
||||
} else {
|
||||
nodes.shuffle(&mut thread_rng());
|
||||
}
|
||||
|
||||
let mut buf = BufWriter::new(Vec::new());
|
||||
|
||||
crate::templates::index(&mut buf, &nodes, &config)?;
|
||||
let buf = buf.into_inner().map_err(|e| {
|
||||
crate::templates::index_html(&mut buf, &local, &nodes, &config)?;
|
||||
let html = buf.into_inner().map_err(|e| {
|
||||
tracing::error!("Error rendering template, {}", e.error());
|
||||
ErrorKind::FlushBuffer
|
||||
})?;
|
||||
|
||||
Ok(HttpResponse::Ok().content_type("text/html").body(buf))
|
||||
let html = minify_html::minify(&html, &MINIFY_CONFIG);
|
||||
|
||||
Ok(HttpResponse::Ok().content_type("text/html").body(html))
|
||||
}
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
use crate::{data::MediaCache, error::Error, requests::Requests};
|
||||
use actix_web::{
|
||||
http::header::{CacheControl, CacheDirective},
|
||||
web, HttpResponse,
|
||||
use crate::{
|
||||
data::MediaCache,
|
||||
error::Error,
|
||||
requests::{BreakerStrategy, Requests},
|
||||
stream::limit_stream,
|
||||
};
|
||||
use actix_web::{body::BodyStream, web, HttpResponse};
|
||||
use uuid::Uuid;
|
||||
|
||||
// 16 MB
|
||||
const IMAGE_SIZE_LIMIT: usize = 16 * 1024 * 1024;
|
||||
|
||||
#[tracing::instrument(name = "Media", skip(media, requests))]
|
||||
pub(crate) async fn route(
|
||||
media: web::Data<MediaCache>,
|
||||
|
@ -13,30 +18,25 @@ pub(crate) async fn route(
|
|||
) -> Result<HttpResponse, Error> {
|
||||
let uuid = uuid.into_inner();
|
||||
|
||||
if let Some((content_type, bytes)) = media.get_bytes(uuid).await? {
|
||||
return Ok(cached(content_type, bytes));
|
||||
}
|
||||
|
||||
if let Some(url) = media.get_url(uuid).await? {
|
||||
let (content_type, bytes) = requests.fetch_bytes(url.as_str()).await?;
|
||||
|
||||
media
|
||||
.store_bytes(uuid, content_type.clone(), bytes.clone())
|
||||
let res = requests
|
||||
.fetch_response(&url, BreakerStrategy::Allow404AndBelow)
|
||||
.await?;
|
||||
|
||||
return Ok(cached(content_type, bytes));
|
||||
let mut response = HttpResponse::build(crate::http1::status_to_http02(res.status()));
|
||||
|
||||
for (name, value) in res.headers().iter().filter(|(h, _)| *h != "connection") {
|
||||
response.insert_header((
|
||||
crate::http1::name_to_http02(name),
|
||||
crate::http1::value_to_http02(value),
|
||||
));
|
||||
}
|
||||
|
||||
return Ok(response.body(BodyStream::new(limit_stream(
|
||||
res.bytes_stream(),
|
||||
IMAGE_SIZE_LIMIT,
|
||||
))));
|
||||
}
|
||||
|
||||
Ok(HttpResponse::NotFound().finish())
|
||||
}
|
||||
|
||||
fn cached(content_type: String, bytes: web::Bytes) -> HttpResponse {
|
||||
HttpResponse::Ok()
|
||||
.insert_header(CacheControl(vec![
|
||||
CacheDirective::Public,
|
||||
CacheDirective::MaxAge(60 * 60 * 24),
|
||||
CacheDirective::Extension("immutable".to_owned(), None),
|
||||
]))
|
||||
.content_type(content_type)
|
||||
.body(bytes)
|
||||
}
|
||||
|
|
|
@ -24,18 +24,18 @@ struct Links {
|
|||
links: Vec<Link>,
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "NodeInfo")]
|
||||
#[tracing::instrument(name = "NodeInfo", skip_all)]
|
||||
pub(crate) async fn route(
|
||||
config: web::Data<Config>,
|
||||
state: web::Data<State>,
|
||||
) -> web::Json<NodeInfo> {
|
||||
let (inboxes, blocks) = tokio::join!(state.db.inboxes(), async {
|
||||
if config.publish_blocks() {
|
||||
Some(state.db.blocks().await.unwrap_or_default())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
let inboxes = state.db.inboxes().await;
|
||||
|
||||
let blocks = if config.publish_blocks() {
|
||||
Some(state.db.blocks().await.unwrap_or_default())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let peers = inboxes
|
||||
.unwrap_or_default()
|
||||
|
@ -44,6 +44,8 @@ pub(crate) async fn route(
|
|||
.map(|s| s.to_owned())
|
||||
.collect();
|
||||
|
||||
let open_registrations = !config.restricted_mode();
|
||||
|
||||
web::Json(NodeInfo {
|
||||
version: NodeInfoVersion,
|
||||
software: Software {
|
||||
|
@ -55,7 +57,7 @@ pub(crate) async fn route(
|
|||
inbound: vec![],
|
||||
outbound: vec![],
|
||||
},
|
||||
open_registrations: false,
|
||||
open_registrations,
|
||||
usage: Usage {
|
||||
users: Users {
|
||||
total: 1,
|
||||
|
|
|
@ -5,7 +5,7 @@ use actix_web::{
|
|||
};
|
||||
|
||||
#[allow(clippy::async_yields_async)]
|
||||
#[tracing::instrument(name = "Statistics")]
|
||||
#[tracing::instrument(name = "Statics")]
|
||||
pub(crate) async fn route(filename: web::Path<String>) -> HttpResponse {
|
||||
if let Some(data) = StaticFile::get(&filename.into_inner()) {
|
||||
HttpResponse::Ok()
|
||||
|
|
92
src/spawner.rs
Normal file
92
src/spawner.rs
Normal file
|
@ -0,0 +1,92 @@
|
|||
use async_cpupool::CpuPool;
|
||||
use http_signature_normalization_actix::{Canceled, Spawn};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct Spawner {
|
||||
pool: CpuPool,
|
||||
}
|
||||
|
||||
impl Spawner {
|
||||
pub(crate) fn build(name: &'static str, threads: u16) -> color_eyre::Result<Self> {
|
||||
let pool = CpuPool::configure()
|
||||
.name(name)
|
||||
.max_threads(threads)
|
||||
.build()?;
|
||||
|
||||
Ok(Spawner { pool })
|
||||
}
|
||||
|
||||
pub(crate) async fn close(self) {
|
||||
self.pool.close().await;
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Spawner {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Spawner").finish()
|
||||
}
|
||||
}
|
||||
|
||||
async fn timer<Fut>(fut: Fut) -> Fut::Output
|
||||
where
|
||||
Fut: std::future::Future,
|
||||
{
|
||||
let id = uuid::Uuid::new_v4();
|
||||
|
||||
metrics::counter!("relay.spawner.wait-timer.start").increment(1);
|
||||
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(5));
|
||||
|
||||
// pass the first tick (instant)
|
||||
interval.tick().await;
|
||||
|
||||
let mut fut = std::pin::pin!(fut);
|
||||
|
||||
let mut counter = 0;
|
||||
loop {
|
||||
tokio::select! {
|
||||
out = &mut fut => {
|
||||
metrics::counter!("relay.spawner.wait-timer.end").increment(1);
|
||||
return out;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
counter += 1;
|
||||
metrics::counter!("relay.spawner.wait-timer.pending").increment(1);
|
||||
tracing::warn!("Blocking operation {id} is taking a long time, {} seconds", counter * 5);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Spawn for Spawner {
|
||||
type Future<T> = std::pin::Pin<Box<dyn std::future::Future<Output = Result<T, Canceled>>>>;
|
||||
|
||||
fn spawn_blocking<Func, Out>(&self, func: Func) -> Self::Future<Out>
|
||||
where
|
||||
Func: FnOnce() -> Out + Send + 'static,
|
||||
Out: Send + 'static,
|
||||
{
|
||||
let pool = self.pool.clone();
|
||||
|
||||
Box::pin(async move { timer(pool.spawn(func)).await.map_err(|_| Canceled) })
|
||||
}
|
||||
}
|
||||
|
||||
impl http_signature_normalization_reqwest::Spawn for Spawner {
|
||||
type Future<T> = std::pin::Pin<Box<dyn std::future::Future<Output = Result<T, http_signature_normalization_reqwest::Canceled>> + Send>> where T: Send;
|
||||
|
||||
fn spawn_blocking<Func, Out>(&self, func: Func) -> Self::Future<Out>
|
||||
where
|
||||
Func: FnOnce() -> Out + Send + 'static,
|
||||
Out: Send + 'static,
|
||||
{
|
||||
let pool = self.pool.clone();
|
||||
|
||||
Box::pin(async move {
|
||||
timer(pool.spawn(func))
|
||||
.await
|
||||
.map_err(|_| http_signature_normalization_reqwest::Canceled)
|
||||
})
|
||||
}
|
||||
}
|
59
src/stream.rs
Normal file
59
src/stream.rs
Normal file
|
@ -0,0 +1,59 @@
|
|||
use crate::error::{Error, ErrorKind};
|
||||
use actix_web::web::{Bytes, BytesMut};
|
||||
use futures_core::Stream;
|
||||
use streem::IntoStreamer;
|
||||
|
||||
pub(crate) fn limit_stream<'a, S>(
|
||||
input: S,
|
||||
limit: usize,
|
||||
) -> impl Stream<Item = Result<Bytes, Error>> + Send + 'a
|
||||
where
|
||||
S: Stream<Item = reqwest::Result<Bytes>> + Send + 'a,
|
||||
{
|
||||
streem::try_from_fn(move |yielder| async move {
|
||||
let stream = std::pin::pin!(input);
|
||||
let mut stream = stream.into_streamer();
|
||||
|
||||
let mut count = 0;
|
||||
|
||||
while let Some(bytes) = stream.try_next().await? {
|
||||
count += bytes.len();
|
||||
|
||||
if count > limit {
|
||||
return Err(ErrorKind::BodyTooLarge.into());
|
||||
}
|
||||
|
||||
yielder.yield_ok(bytes).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn aggregate<S>(input: S) -> Result<Bytes, Error>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>>,
|
||||
{
|
||||
let stream = std::pin::pin!(input);
|
||||
let mut streamer = stream.into_streamer();
|
||||
|
||||
let mut buf = Vec::new();
|
||||
|
||||
while let Some(bytes) = streamer.try_next().await? {
|
||||
buf.push(bytes);
|
||||
}
|
||||
|
||||
if buf.len() == 1 {
|
||||
return Ok(buf.pop().expect("buf has exactly one element"));
|
||||
}
|
||||
|
||||
let total_size: usize = buf.iter().map(|b| b.len()).sum();
|
||||
|
||||
let mut bytes_mut = BytesMut::with_capacity(total_size);
|
||||
|
||||
for bytes in &buf {
|
||||
bytes_mut.extend_from_slice(&bytes);
|
||||
}
|
||||
|
||||
Ok(bytes_mut.freeze())
|
||||
}
|
|
@ -31,13 +31,22 @@ enum Command {
|
|||
|
||||
#[command(description = "Disallow a domain to connect to the relay (for RESTRICTED_MODE)")]
|
||||
Disallow { domain: String },
|
||||
|
||||
#[command(description = "List blocked domains")]
|
||||
ListBlocks,
|
||||
|
||||
#[command(description = "List allowed domains")]
|
||||
ListAllowed,
|
||||
|
||||
#[command(description = "List connected domains")]
|
||||
ListConnected,
|
||||
}
|
||||
|
||||
pub(crate) fn start(admin_handle: String, db: Db, token: &str) {
|
||||
let bot = Bot::new(token);
|
||||
let admin_handle = Arc::new(admin_handle);
|
||||
|
||||
actix_rt::spawn(async move {
|
||||
tokio::spawn(async move {
|
||||
let command_handler = teloxide::filter_command::<Command, _>().endpoint(
|
||||
move |bot: Bot, msg: Message, cmd: Command| {
|
||||
let admin_handle = admin_handle.clone();
|
||||
|
@ -66,7 +75,8 @@ pub(crate) fn start(admin_handle: String, db: Db, token: &str) {
|
|||
|
||||
fn is_admin(admin_handle: &str, message: &Message) -> bool {
|
||||
message
|
||||
.from()
|
||||
.from
|
||||
.as_ref()
|
||||
.and_then(|user| user.username.as_deref())
|
||||
.map(|username| username == admin_handle)
|
||||
.unwrap_or(false)
|
||||
|
@ -79,29 +89,40 @@ async fn answer(bot: Bot, msg: Message, cmd: Command, db: Db) -> ResponseResult<
|
|||
bot.send_message(msg.chat.id, Command::descriptions().to_string())
|
||||
.await?;
|
||||
}
|
||||
Command::Block { domain } => {
|
||||
if db.add_blocks(vec![domain.clone()]).await.is_ok() {
|
||||
bot.send_message(msg.chat.id, format!("{} has been blocked", domain))
|
||||
.await?;
|
||||
Command::Block { domain } if db.add_blocks(vec![domain.clone()]).await.is_ok() => {
|
||||
bot.send_message(msg.chat.id, format!("{domain} has been blocked"))
|
||||
.await?;
|
||||
}
|
||||
Command::Unblock { domain } if db.remove_blocks(vec![domain.clone()]).await.is_ok() => {
|
||||
bot.send_message(msg.chat.id, format!("{domain} has been unblocked"))
|
||||
.await?;
|
||||
}
|
||||
Command::Allow { domain } if db.add_allows(vec![domain.clone()]).await.is_ok() => {
|
||||
bot.send_message(msg.chat.id, format!("{domain} has been allowed"))
|
||||
.await?;
|
||||
}
|
||||
Command::Disallow { domain } if db.remove_allows(vec![domain.clone()]).await.is_ok() => {
|
||||
bot.send_message(msg.chat.id, format!("{domain} has been disallowed"))
|
||||
.await?;
|
||||
}
|
||||
Command::ListAllowed => {
|
||||
if let Ok(allowed) = db.allows().await {
|
||||
bot.send_message(msg.chat.id, allowed.join("\n")).await?;
|
||||
}
|
||||
}
|
||||
Command::Unblock { domain } => {
|
||||
if db.remove_blocks(vec![domain.clone()]).await.is_ok() {
|
||||
bot.send_message(msg.chat.id, format!("{} has been unblocked", domain))
|
||||
.await?;
|
||||
Command::ListBlocks => {
|
||||
if let Ok(blocks) = db.blocks().await {
|
||||
bot.send_message(msg.chat.id, blocks.join("\n")).await?;
|
||||
}
|
||||
}
|
||||
Command::Allow { domain } => {
|
||||
if db.add_allows(vec![domain.clone()]).await.is_ok() {
|
||||
bot.send_message(msg.chat.id, format!("{} has been allowed", domain))
|
||||
.await?;
|
||||
Command::ListConnected => {
|
||||
if let Ok(connected) = db.connected_ids().await {
|
||||
bot.send_message(msg.chat.id, connected.join("\n")).await?;
|
||||
}
|
||||
}
|
||||
Command::Disallow { domain } => {
|
||||
if db.remove_allows(vec![domain.clone()]).await.is_ok() {
|
||||
bot.send_message(msg.chat.id, format!("{} has been disallowed", domain))
|
||||
.await?;
|
||||
}
|
||||
_ => {
|
||||
bot.send_message(msg.chat.id, "Internal server error")
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
15
systemd/example-relay.service
Normal file
15
systemd/example-relay.service
Normal file
|
@ -0,0 +1,15 @@
|
|||
[Unit]
|
||||
Description=Activitypub Relay
|
||||
Documentation=https://git.asonix.dog/asonix/relay
|
||||
Wants=network.target
|
||||
After=network.target
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
EnvironmentFile=/etc/systemd/system/example-relay.service.env
|
||||
ExecStart=/path/to/relay
|
||||
Restart=always
|
||||
|
19
systemd/example-relay.service.env
Normal file
19
systemd/example-relay.service.env
Normal file
|
@ -0,0 +1,19 @@
|
|||
HOSTNAME='relay.example.com'
|
||||
ADDR='0.0.0.0'
|
||||
PORT='8080'
|
||||
RESTRICTED_MODE='true'
|
||||
VALIDATE_SIGNATURES='true'
|
||||
HTTPS='true'
|
||||
PRETTY_LOG='false'
|
||||
PUBLISH_BLOCKS='true'
|
||||
DEBUG='false'
|
||||
SLED_PATH='/opt/sled'
|
||||
TELEGRAM_ADMIN_HANDLE='myhandle'
|
||||
RUST_BACKTRACE='full'
|
||||
FOOTER_BLURB='Contact <a href="https://masto.example.com/@example">@example</a> for inquiries.'
|
||||
LOCAL_DOMAINS='masto.example.com'
|
||||
LOCAL_BLURB='<p>An ActivityPub relay for servers. Currently running somewhere. Let me know if you want to join!</p>'
|
||||
OPENTELEMETRY_URL='http://otel.example.com:4317'
|
||||
API_TOKEN='blahblahblahblahblahblahblah'
|
||||
TELEGRAM_TOKEN='blahblahblahblahblahblahblah'
|
||||
|
11
systemd/example-relay.socket
Normal file
11
systemd/example-relay.socket
Normal file
|
@ -0,0 +1,11 @@
|
|||
[Unit]
|
||||
Description=Activitypub Relay Socket
|
||||
Before=multi-user.target
|
||||
After=network.target
|
||||
|
||||
[Socket]
|
||||
Service=example-relay.service
|
||||
ListenStream=8080
|
||||
|
||||
[Install]
|
||||
WantedBy=sockets.target
|
|
@ -4,15 +4,15 @@
|
|||
@(contact: &Contact, base: &IriString)
|
||||
|
||||
<div class="admin">
|
||||
<div class="left">
|
||||
<figure class="avatar">
|
||||
<img src="@contact.avatar" alt="@contact.display_name's avatar">
|
||||
</figure>
|
||||
</div>
|
||||
<div class="right">
|
||||
<p class="display-name"><a href="@contact.url">@contact.display_name</a></p>
|
||||
<p class="username">
|
||||
@@@contact.username@if let Some(authority) = base.authority_str() {@@@authority}
|
||||
</p>
|
||||
</div>
|
||||
<div class="left">
|
||||
<figure class="avatar">
|
||||
<img loading="lazy" src="@contact.avatar" alt="@contact.display_name's avatar">
|
||||
</figure>
|
||||
</div>
|
||||
<div class="right">
|
||||
<p class="display-name"><a href="@contact.url">@contact.display_name</a></p>
|
||||
<p class="username">
|
||||
@@@contact.username@if let Some(authority) = base.authority_str() {@@@authority}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
@use crate::{
|
||||
config::{Config, UrlKind},
|
||||
data::Node,
|
||||
templates::{info, instance, statics::index_css},
|
||||
templates::{info_html, instance_html, statics::index_css},
|
||||
};
|
||||
|
||||
@(nodes: &[Node], config: &Config)
|
||||
@(local: &[Node], nodes: &[Node], config: &Config)
|
||||
|
||||
<!doctype html>
|
||||
<html>
|
||||
|
@ -24,31 +24,39 @@ templates::{info, instance, statics::index_css},
|
|||
</div>
|
||||
</header>
|
||||
<main>
|
||||
<section>
|
||||
<h3>Connected Servers</h3>
|
||||
@if nodes.is_empty() {
|
||||
<p>There are no connected servers at this time.</p>
|
||||
} else {
|
||||
@if !local.is_empty() || config.local_blurb().is_some() {
|
||||
<article>
|
||||
<h3>About</h3>
|
||||
<section class="local-explainer">
|
||||
@if let Some(blurb) = config.local_blurb() {
|
||||
@blurb
|
||||
} else {
|
||||
<p>These domains are run by the same admins as this relay.</p>
|
||||
}
|
||||
</section>
|
||||
@if !local.is_empty() {
|
||||
<ul>
|
||||
@for node in nodes {
|
||||
@for node in local {
|
||||
@if let Some(inst) = node.instance.as_ref() {
|
||||
<li>
|
||||
@:instance(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(), &node.base)
|
||||
@:instance_html(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(),
|
||||
&node.base)
|
||||
</li>
|
||||
} else {
|
||||
@if let Some(inf) = node.info.as_ref() {
|
||||
<li>
|
||||
@:info(inf, &node.base)
|
||||
@:info_html(inf, &node.base)
|
||||
</li>
|
||||
}
|
||||
}
|
||||
}
|
||||
</ul>
|
||||
}
|
||||
</section>
|
||||
<section>
|
||||
</article>
|
||||
}
|
||||
<article>
|
||||
<h3>Joining</h3>
|
||||
<article class="joining">
|
||||
<section class="joining">
|
||||
@if config.restricted_mode() {
|
||||
<h4>
|
||||
This relay is Restricted
|
||||
|
@ -71,19 +79,41 @@ templates::{info, instance, statics::index_css},
|
|||
<h4>Pleroma</h4>
|
||||
<p>
|
||||
Pleroma admins can add this relay by adding
|
||||
<pre>@config.generate_url(UrlKind::Actor)</pre>
|
||||
to their relay settings (I don't actually know how pleroma handles adding
|
||||
relays, is it still a mix command?).
|
||||
<pre>@config.generate_url(UrlKind::Actor)</pre> to their relay settings.
|
||||
</p>
|
||||
<h4>Others</h4>
|
||||
<p>
|
||||
Consult the documentation for your server. It's likely that it follows either
|
||||
Mastodon or Pleroma's relay formatting.
|
||||
</p>
|
||||
</article>
|
||||
</section>
|
||||
</section>
|
||||
</article>
|
||||
@if !nodes.is_empty() {
|
||||
<article>
|
||||
<h3>@nodes.len() Connected Servers</h3>
|
||||
<ul>
|
||||
@for node in nodes {
|
||||
@if let Some(inst) = node.instance.as_ref() {
|
||||
<li>
|
||||
@:instance_html(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(),
|
||||
&node.base)
|
||||
</li>
|
||||
} else {
|
||||
@if let Some(inf) = node.info.as_ref() {
|
||||
<li>
|
||||
@:info_html(inf, &node.base)
|
||||
</li>
|
||||
}
|
||||
}
|
||||
}
|
||||
</ul>
|
||||
</article>
|
||||
}
|
||||
</main>
|
||||
<footer>
|
||||
@if let Some(blurb) = config.footer_blurb() {
|
||||
<div>@blurb</div>
|
||||
}
|
||||
<p>
|
||||
The source code for this project can be found at
|
||||
<a href="@config.source_code()">@config.source_code()</a>
|
||||
|
|
|
@ -3,14 +3,14 @@
|
|||
|
||||
@(info: &Info, base: &IriString)
|
||||
|
||||
<article class="info">
|
||||
@if let Some(authority) = base.authority_str() {
|
||||
<h4 class="padded"><a href="@base">@authority</a></h4>
|
||||
<section class="info">
|
||||
@if let Some(authority) = base.authority_str() {
|
||||
<h4 class="padded"><a href="@base">@authority</a></h4>
|
||||
}
|
||||
<p class="padded">
|
||||
Running @info.software, version @info.version.
|
||||
@if info.reg {
|
||||
Registration is open
|
||||
}
|
||||
<p class="padded">
|
||||
Running @info.software, version @info.version.
|
||||
@if info.reg {
|
||||
Registration is open
|
||||
}
|
||||
</p>
|
||||
</article>
|
||||
</p>
|
||||
</section>
|
||||
|
|
|
@ -1,37 +1,37 @@
|
|||
@use crate::{db::{Contact, Instance}, templates::admin};
|
||||
@use crate::{db::{Contact, Instance}, templates::admin_html};
|
||||
@use activitystreams::iri_string::types::IriString;
|
||||
|
||||
@(instance: &Instance, software: Option<&str>, contact: Option<&Contact>, base: &IriString)
|
||||
|
||||
<article class="instance">
|
||||
<h4 class="padded"><a href="@base">@instance.title</a></h4>
|
||||
<p class="padded">
|
||||
<section class="instance">
|
||||
<h4 class="padded"><a href="@base">@instance.title</a></h4>
|
||||
<p class="padded">
|
||||
@if let Some(software) = software {
|
||||
Running @software, version @instance.version.
|
||||
Running @software, version @instance.version.
|
||||
}
|
||||
@if instance.reg {
|
||||
<br>Registration is open.
|
||||
@if instance.requires_approval {
|
||||
Accounts must be approved by an admin.
|
||||
}
|
||||
} else{
|
||||
Registration is closed
|
||||
<br>Registration is open.
|
||||
@if instance.requires_approval {
|
||||
Accounts must be approved by an admin.
|
||||
}
|
||||
</p>
|
||||
@if !instance.description.trim().is_empty() || contact.is_some() {
|
||||
<div class="instance-info">
|
||||
@if !instance.description.trim().is_empty() {
|
||||
<h5 class="instance-description">@instance.title's description:</h5>
|
||||
<div class="description">
|
||||
<div class="please-stay">
|
||||
@Html(instance.description.trim())
|
||||
</div>
|
||||
</div>
|
||||
}
|
||||
@if let Some(contact) = contact {
|
||||
<h5 class="instance-admin">@instance.title's admin:</h5>
|
||||
@:admin(contact, base)
|
||||
}
|
||||
} else{
|
||||
Registration is closed
|
||||
}
|
||||
</p>
|
||||
@if !instance.description.trim().is_empty() || contact.is_some() {
|
||||
<div class="instance-info">
|
||||
@if !instance.description.trim().is_empty() {
|
||||
<h5 class="instance-description">@instance.title's description:</h5>
|
||||
<div class="description">
|
||||
<div class="please-stay">
|
||||
@Html(instance.description.trim())
|
||||
</div>
|
||||
</div>
|
||||
}
|
||||
</article>
|
||||
}
|
||||
@if let Some(contact) = contact {
|
||||
<h5 class="instance-admin">@instance.title's admin:</h5>
|
||||
@:admin_html(contact, base)
|
||||
}
|
||||
</div>
|
||||
}
|
||||
</section>
|
||||
|
|
Loading…
Reference in a new issue