Add back activity send queue as optional feature (#94)

* Add back activity send queue as optional feature

* fix port collision in tests

* improve docs

* serialize fn

* deduplicate

* more dedup

* more dedup

* dedupupup

* test cleanup

* remove fn
This commit is contained in:
Nutomic 2024-03-04 14:53:33 +01:00 committed by GitHub
parent a859db05bb
commit 636b47c8b2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 687 additions and 92 deletions

View file

@ -4,6 +4,46 @@ To send an activity we need to initialize our previously defined struct, and pic
``` ```
# use activitypub_federation::config::FederationConfig; # use activitypub_federation::config::FederationConfig;
# use activitypub_federation::activity_queue::queue_activity;
# use activitypub_federation::http_signatures::generate_actor_keypair;
# use activitypub_federation::traits::Actor;
# use activitypub_federation::fetch::object_id::ObjectId;
# use activitypub_federation::traits::tests::{DB_USER, DbConnection, Follow};
# tokio::runtime::Runtime::new().unwrap().block_on(async {
# let db_connection = DbConnection;
# let config = FederationConfig::builder()
# .domain("example.com")
# .app_data(db_connection)
# .build().await?;
# let data = config.to_request_data();
# let sender = DB_USER.clone();
# let recipient = DB_USER.clone();
let activity = Follow {
actor: ObjectId::parse("https://lemmy.ml/u/nutomic")?,
object: recipient.federation_id.clone().into(),
kind: Default::default(),
id: "https://lemmy.ml/activities/321".try_into()?
};
let inboxes = vec![recipient.shared_inbox_or_inbox()];
queue_activity(&activity, &sender, inboxes, &data).await?;
# Ok::<(), anyhow::Error>(())
# }).unwrap()
```
The list of inboxes gets deduplicated (important for shared inbox). All inboxes on the local domain and those which fail the [crate::config::UrlVerifier] check are excluded from delivery. For each remaining inbox a background tasks is created. It signs the HTTP header with the given private key. Finally the activity is delivered to the inbox.
It is possible that delivery fails because the target instance is temporarily unreachable. In this case the task is scheduled for retry after a certain waiting time. For each task delivery is retried up to 3 times after the initial attempt. The retry intervals are as follows:
- one minute, in case of service restart
- one hour, in case of instance maintenance
- 2.5 days, in case of major incident with rebuild from backup
In case [crate::config::FederationConfigBuilder::debug] is enabled, no background thread is used but activities are sent directly on the foreground. This makes it easier to catch delivery errors and avoids complicated steps to await delivery in tests.
In some cases you may want to bypass the builtin activity queue, and implement your own. For example to specify different retry intervals, or to persist retries across application restarts. You can do it with the following code:
```rust
# use activitypub_federation::config::FederationConfig;
# use activitypub_federation::activity_sending::SendActivityTask; # use activitypub_federation::activity_sending::SendActivityTask;
# use activitypub_federation::http_signatures::generate_actor_keypair; # use activitypub_federation::http_signatures::generate_actor_keypair;
# use activitypub_federation::traits::Actor; # use activitypub_federation::traits::Actor;
@ -33,18 +73,3 @@ for send in sends {
# Ok::<(), anyhow::Error>(()) # Ok::<(), anyhow::Error>(())
# }).unwrap() # }).unwrap()
``` ```
The list of inboxes gets deduplicated (important for shared inbox). All inboxes on the local
domain and those which fail the [crate::config::UrlVerifier] check are excluded from delivery.
For each remaining inbox a background tasks is created. It signs the HTTP header with the given
private key. Finally the activity is delivered to the inbox.
It is possible that delivery fails because the target instance is temporarily unreachable. In
this case the task is scheduled for retry after a certain waiting time. For each task delivery
is retried up to 3 times after the initial attempt. The retry intervals are as follows:
- one minute, in case of service restart
- one hour, in case of instance maintenance
- 2.5 days, in case of major incident with rebuild from backup
In case [crate::config::FederationConfigBuilder::debug] is enabled, no background thread is used but activities are sent directly on the foreground. This makes it easier to catch delivery errors and avoids complicated steps to await delivery in tests.

View file

@ -67,7 +67,7 @@ impl ActivityHandler for Follow {
let id = generate_object_id(data.domain())?; let id = generate_object_id(data.domain())?;
let accept = Accept::new(local_user.ap_id.clone(), self, id.clone()); let accept = Accept::new(local_user.ap_id.clone(), self, id.clone());
local_user local_user
.send(accept, vec![follower.shared_inbox_or_inbox()], data) .send(accept, vec![follower.shared_inbox_or_inbox()], false, data)
.await?; .await?;
Ok(()) Ok(())
} }

View file

@ -6,6 +6,7 @@ use crate::{
utils::generate_object_id, utils::generate_object_id,
}; };
use activitypub_federation::{ use activitypub_federation::{
activity_queue::queue_activity,
activity_sending::SendActivityTask, activity_sending::SendActivityTask,
config::Data, config::Data,
fetch::{object_id::ObjectId, webfinger::webfinger_resolve_actor}, fetch::{object_id::ObjectId, webfinger::webfinger_resolve_actor},
@ -85,7 +86,7 @@ impl DbUser {
let other: DbUser = webfinger_resolve_actor(other, data).await?; let other: DbUser = webfinger_resolve_actor(other, data).await?;
let id = generate_object_id(data.domain())?; let id = generate_object_id(data.domain())?;
let follow = Follow::new(self.ap_id.clone(), other.ap_id.clone(), id.clone()); let follow = Follow::new(self.ap_id.clone(), other.ap_id.clone(), id.clone());
self.send(follow, vec![other.shared_inbox_or_inbox()], data) self.send(follow, vec![other.shared_inbox_or_inbox()], false, data)
.await?; .await?;
Ok(()) Ok(())
} }
@ -98,7 +99,7 @@ impl DbUser {
let user: DbUser = ObjectId::from(f).dereference(data).await?; let user: DbUser = ObjectId::from(f).dereference(data).await?;
inboxes.push(user.shared_inbox_or_inbox()); inboxes.push(user.shared_inbox_or_inbox());
} }
self.send(create, inboxes, data).await?; self.send(create, inboxes, true, data).await?;
Ok(()) Ok(())
} }
@ -106,6 +107,7 @@ impl DbUser {
&self, &self,
activity: Activity, activity: Activity,
recipients: Vec<Url>, recipients: Vec<Url>,
use_queue: bool,
data: &Data<DatabaseHandle>, data: &Data<DatabaseHandle>,
) -> Result<(), Error> ) -> Result<(), Error>
where where
@ -113,10 +115,15 @@ impl DbUser {
<Activity as ActivityHandler>::Error: From<anyhow::Error> + From<serde_json::Error>, <Activity as ActivityHandler>::Error: From<anyhow::Error> + From<serde_json::Error>,
{ {
let activity = WithContext::new_default(activity); let activity = WithContext::new_default(activity);
// Send through queue in some cases and bypass it in others to test both code paths
if use_queue {
queue_activity(&activity, self, recipients, data).await?;
} else {
let sends = SendActivityTask::prepare(&activity, self, recipients, data).await?; let sends = SendActivityTask::prepare(&activity, self, recipients, data).await?;
for send in sends { for send in sends {
send.sign_and_send(data).await?; send.sign_and_send(data).await?;
} }
}
Ok(()) Ok(())
} }
} }

520
src/activity_queue.rs Normal file
View file

@ -0,0 +1,520 @@
//! Queue for signing and sending outgoing activities with retry
//!
#![doc = include_str!("../docs/09_sending_activities.md")]
use crate::{
activity_sending::{build_tasks, SendActivityTask},
config::Data,
error::Error,
traits::{ActivityHandler, Actor},
};
use futures_core::Future;
use reqwest_middleware::ClientWithMiddleware;
use serde::Serialize;
use std::{
fmt::{Debug, Display},
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::Duration,
};
use tokio::{
sync::mpsc::{unbounded_channel, UnboundedSender},
task::{JoinHandle, JoinSet},
};
use tracing::{info, warn};
use url::Url;
/// Send a new activity to the given inboxes with automatic retry on failure. Alternatively you
/// can implement your own queue and then send activities using [[crate::activity_sending::SendActivityTask]].
///
/// - `activity`: The activity to be sent, gets converted to json
/// - `private_key`: Private key belonging to the actor who sends the activity, for signing HTTP
/// signature. Generated with [crate::http_signatures::generate_actor_keypair].
/// - `inboxes`: List of remote actor inboxes that should receive the activity. Ignores local actor
/// inboxes. Should be built by calling [crate::traits::Actor::shared_inbox_or_inbox]
/// for each target actor.
pub async fn queue_activity<Activity, Datatype, ActorType>(
activity: &Activity,
actor: &ActorType,
inboxes: Vec<Url>,
data: &Data<Datatype>,
) -> Result<(), Error>
where
Activity: ActivityHandler + Serialize + Debug,
Datatype: Clone,
ActorType: Actor,
{
let config = &data.config;
let tasks = build_tasks(activity, actor, inboxes, data).await?;
for task in tasks {
// Don't use the activity queue if this is in debug mode, send and wait directly
if config.debug {
if let Err(err) = sign_and_send(
&task,
&config.client,
config.request_timeout,
Default::default(),
)
.await
{
warn!("{err}");
}
} else {
// This field is only optional to make builder work, its always present at this point
let activity_queue = config
.activity_queue
.as_ref()
.expect("Config has activity queue");
activity_queue.queue(task).await?;
let stats = activity_queue.get_stats();
let running = stats.running.load(Ordering::Relaxed);
if running == config.queue_worker_count && config.queue_worker_count != 0 {
warn!("Reached max number of send activity workers ({}). Consider increasing worker count to avoid federation delays", config.queue_worker_count);
warn!("{:?}", stats);
} else {
info!("{:?}", stats);
}
}
}
Ok(())
}
async fn sign_and_send(
task: &SendActivityTask,
client: &ClientWithMiddleware,
timeout: Duration,
retry_strategy: RetryStrategy,
) -> Result<(), Error> {
retry(
|| task.sign_and_send_internal(client, timeout),
retry_strategy,
)
.await
}
/// A simple activity queue which spawns tokio workers to send out requests
/// When creating a queue, it will spawn a task per worker thread
/// Uses an unbounded mpsc queue for communication (i.e, all messages are in memory)
pub(crate) struct ActivityQueue {
// Stats shared between the queue and workers
stats: Arc<Stats>,
sender: UnboundedSender<SendActivityTask>,
sender_task: JoinHandle<()>,
retry_sender_task: JoinHandle<()>,
}
/// Simple stat counter to show where we're up to with sending messages
/// This is a lock-free way to share things between tasks
/// When reading these values it's possible (but extremely unlikely) to get stale data if a worker task is in the middle of transitioning
#[derive(Default)]
pub(crate) struct Stats {
pending: AtomicUsize,
running: AtomicUsize,
retries: AtomicUsize,
dead_last_hour: AtomicUsize,
completed_last_hour: AtomicUsize,
}
impl Debug for Stats {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Activity queue stats: pending: {}, running: {}, retries: {}, dead: {}, complete: {}",
self.pending.load(Ordering::Relaxed),
self.running.load(Ordering::Relaxed),
self.retries.load(Ordering::Relaxed),
self.dead_last_hour.load(Ordering::Relaxed),
self.completed_last_hour.load(Ordering::Relaxed)
)
}
}
#[derive(Clone, Copy, Default)]
struct RetryStrategy {
/// Amount of time in seconds to back off
backoff: usize,
/// Amount of times to retry
retries: usize,
/// If this particular request has already been retried, you can add an offset here to increment the count to start
offset: usize,
/// Number of seconds to sleep before trying
initial_sleep: usize,
}
/// A tokio spawned worker which is responsible for submitting requests to federated servers
/// This will retry up to one time with the same signature, and if it fails, will move it to the retry queue.
/// We need to retry activity sending in case the target instances is temporarily unreachable.
/// In this case, the task is stored and resent when the instance is hopefully back up. This
/// list shows the retry intervals, and which events of the target instance can be covered:
/// - 60s (one minute, service restart) -- happens in the worker w/ same signature
/// - 60min (one hour, instance maintenance) --- happens in the retry worker
/// - 60h (2.5 days, major incident with rebuild from backup) --- happens in the retry worker
async fn worker(
client: ClientWithMiddleware,
timeout: Duration,
message: SendActivityTask,
retry_queue: UnboundedSender<SendActivityTask>,
stats: Arc<Stats>,
strategy: RetryStrategy,
) {
stats.pending.fetch_sub(1, Ordering::Relaxed);
stats.running.fetch_add(1, Ordering::Relaxed);
let outcome = sign_and_send(&message, &client, timeout, strategy).await;
// "Running" has finished, check the outcome
stats.running.fetch_sub(1, Ordering::Relaxed);
match outcome {
Ok(_) => {
stats.completed_last_hour.fetch_add(1, Ordering::Relaxed);
}
Err(_err) => {
stats.retries.fetch_add(1, Ordering::Relaxed);
warn!(
"Sending activity {} to {} to the retry queue to be tried again later",
message.activity_id, message.inbox
);
// Send to the retry queue. Ignoring whether it succeeds or not
retry_queue.send(message).ok();
}
}
}
async fn retry_worker(
client: ClientWithMiddleware,
timeout: Duration,
message: SendActivityTask,
stats: Arc<Stats>,
strategy: RetryStrategy,
) {
// Because the times are pretty extravagant between retries, we have to re-sign each time
let outcome = retry(
|| {
sign_and_send(
&message,
&client,
timeout,
RetryStrategy {
backoff: 0,
retries: 0,
offset: 0,
initial_sleep: 0,
},
)
},
strategy,
)
.await;
stats.retries.fetch_sub(1, Ordering::Relaxed);
match outcome {
Ok(_) => {
stats.completed_last_hour.fetch_add(1, Ordering::Relaxed);
}
Err(_err) => {
stats.dead_last_hour.fetch_add(1, Ordering::Relaxed);
}
}
}
impl ActivityQueue {
fn new(
client: ClientWithMiddleware,
worker_count: usize,
retry_count: usize,
timeout: Duration,
backoff: usize, // This should be 60 seconds by default or 1 second in tests
) -> Self {
let stats: Arc<Stats> = Default::default();
// This task clears the dead/completed stats every hour
let hour_stats = stats.clone();
tokio::spawn(async move {
let duration = Duration::from_secs(3600);
loop {
tokio::time::sleep(duration).await;
hour_stats.completed_last_hour.store(0, Ordering::Relaxed);
hour_stats.dead_last_hour.store(0, Ordering::Relaxed);
}
});
let (retry_sender, mut retry_receiver) = unbounded_channel();
let retry_stats = stats.clone();
let retry_client = client.clone();
// The "fast path" retry
// The backoff should be < 5 mins for this to work otherwise signatures may expire
// This strategy is the one that is used with the *same* signature
let strategy = RetryStrategy {
backoff,
retries: 1,
offset: 0,
initial_sleep: 0,
};
// The "retry path" strategy
// After the fast path fails, a task will sleep up to backoff ^ 2 and then retry again
let retry_strategy = RetryStrategy {
backoff,
retries: 3,
offset: 2,
initial_sleep: backoff.pow(2), // wait 60 mins before even trying
};
let retry_sender_task = tokio::spawn(async move {
let mut join_set = JoinSet::new();
while let Some(message) = retry_receiver.recv().await {
let retry_task = retry_worker(
retry_client.clone(),
timeout,
message,
retry_stats.clone(),
retry_strategy,
);
if retry_count > 0 {
// If we're over the limit of retries, wait for them to finish before spawning
while join_set.len() >= retry_count {
join_set.join_next().await;
}
join_set.spawn(retry_task);
} else {
// If the retry worker count is `0` then just spawn and don't use the join_set
tokio::spawn(retry_task);
}
}
while !join_set.is_empty() {
join_set.join_next().await;
}
});
let (sender, mut receiver) = unbounded_channel();
let sender_stats = stats.clone();
let sender_task = tokio::spawn(async move {
let mut join_set = JoinSet::new();
while let Some(message) = receiver.recv().await {
let task = worker(
client.clone(),
timeout,
message,
retry_sender.clone(),
sender_stats.clone(),
strategy,
);
if worker_count > 0 {
// If we're over the limit of workers, wait for them to finish before spawning
while join_set.len() >= worker_count {
join_set.join_next().await;
}
join_set.spawn(task);
} else {
// If the worker count is `0` then just spawn and don't use the join_set
tokio::spawn(task);
}
}
drop(retry_sender);
while !join_set.is_empty() {
join_set.join_next().await;
}
});
Self {
stats,
sender,
sender_task,
retry_sender_task,
}
}
async fn queue(&self, message: SendActivityTask) -> Result<(), Error> {
self.stats.pending.fetch_add(1, Ordering::Relaxed);
self.sender
.send(message)
.map_err(|e| Error::ActivityQueueError(e.0.activity_id))?;
Ok(())
}
fn get_stats(&self) -> &Stats {
&self.stats
}
#[allow(unused)]
// Drops all the senders and shuts down the workers
pub(crate) async fn shutdown(self, wait_for_retries: bool) -> Result<Arc<Stats>, Error> {
drop(self.sender);
self.sender_task.await?;
if wait_for_retries {
self.retry_sender_task.await?;
}
Ok(self.stats)
}
}
/// Creates an activity queue using tokio spawned tasks
/// Note: requires a tokio runtime
pub(crate) fn create_activity_queue(
client: ClientWithMiddleware,
worker_count: usize,
retry_count: usize,
request_timeout: Duration,
) -> ActivityQueue {
ActivityQueue::new(client, worker_count, retry_count, request_timeout, 60)
}
/// Retries a future action factory function up to `amount` times with an exponential backoff timer between tries
async fn retry<T, E: Display + Debug, F: Future<Output = Result<T, E>>, A: FnMut() -> F>(
mut action: A,
strategy: RetryStrategy,
) -> Result<T, E> {
let mut count = strategy.offset;
// Do an initial sleep if it's called for
if strategy.initial_sleep > 0 {
let sleep_dur = Duration::from_secs(strategy.initial_sleep as u64);
tokio::time::sleep(sleep_dur).await;
}
loop {
match action().await {
Ok(val) => return Ok(val),
Err(err) => {
if count < strategy.retries {
count += 1;
let sleep_amt = strategy.backoff.pow(count as u32) as u64;
let sleep_dur = Duration::from_secs(sleep_amt);
warn!("{err:?}. Sleeping for {sleep_dur:?} and trying again");
tokio::time::sleep(sleep_dur).await;
continue;
} else {
return Err(err);
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::http_signatures::generate_actor_keypair;
use axum::extract::State;
use bytes::Bytes;
use http::{HeaderMap, StatusCode};
use std::time::Instant;
use tracing::debug;
// This will periodically send back internal errors to test the retry
async fn dodgy_handler(
State(state): State<Arc<AtomicUsize>>,
headers: HeaderMap,
body: Bytes,
) -> Result<(), StatusCode> {
debug!("Headers:{:?}", headers);
debug!("Body len:{}", body.len());
if state.fetch_add(1, Ordering::Relaxed) % 20 == 0 {
return Err(StatusCode::INTERNAL_SERVER_ERROR);
}
Ok(())
}
async fn test_server() {
use axum::{routing::post, Router};
// We should break every now and then ;)
let state = Arc::new(AtomicUsize::new(0));
let app = Router::new()
.route("/", post(dodgy_handler))
.with_state(state);
axum::Server::bind(&"0.0.0.0:8002".parse().unwrap())
.serve(app.into_make_service())
.await
.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
// Queues 100 messages and then asserts that the worker runs them
async fn test_activity_queue_workers() {
let num_workers = 64;
let num_messages: usize = 100;
tokio::spawn(test_server());
/*
// uncomment for debug logs & stats
use tracing::log::LevelFilter;
env_logger::builder()
.filter_level(LevelFilter::Warn)
.filter_module("activitypub_federation", LevelFilter::Info)
.format_timestamp(None)
.init();
*/
let activity_queue = ActivityQueue::new(
reqwest::Client::default().into(),
num_workers,
num_workers,
Duration::from_secs(10),
1,
);
let keypair = generate_actor_keypair().unwrap();
let message = SendActivityTask {
actor_id: "http://localhost:8002".parse().unwrap(),
activity_id: "http://localhost:8002/activity".parse().unwrap(),
activity: "{}".into(),
inbox: "http://localhost:8002".parse().unwrap(),
private_key: keypair.private_key().unwrap(),
http_signature_compat: true,
};
let start = Instant::now();
for _ in 0..num_messages {
activity_queue.queue(message.clone()).await.unwrap();
}
info!("Queue Sent: {:?}", start.elapsed());
let stats = activity_queue.shutdown(true).await.unwrap();
info!(
"Queue Finished. Num msgs: {}, Time {:?}, msg/s: {:0.0}",
num_messages,
start.elapsed(),
num_messages as f64 / start.elapsed().as_secs_f64()
);
assert_eq!(
stats.completed_last_hour.load(Ordering::Relaxed),
num_messages
);
}
}

View file

@ -10,98 +10,80 @@ use crate::{
traits::{ActivityHandler, Actor}, traits::{ActivityHandler, Actor},
FEDERATION_CONTENT_TYPE, FEDERATION_CONTENT_TYPE,
}; };
use bytes::Bytes; use bytes::Bytes;
use futures::StreamExt; use futures::StreamExt;
use httpdate::fmt_http_date; use httpdate::fmt_http_date;
use itertools::Itertools; use itertools::Itertools;
use openssl::pkey::{PKey, Private}; use openssl::pkey::{PKey, Private};
use reqwest::header::{HeaderMap, HeaderName, HeaderValue}; use reqwest::header::{HeaderMap, HeaderName, HeaderValue};
use reqwest_middleware::ClientWithMiddleware;
use serde::Serialize; use serde::Serialize;
use std::{ use std::{
self, self,
fmt::{Debug, Display}, fmt::{Debug, Display},
time::SystemTime, time::{Duration, SystemTime},
}; };
use tracing::debug; use tracing::debug;
use url::Url; use url::Url;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
/// all info needed to send one activity to one inbox /// All info needed to sign and send one activity to one inbox. You should generally use
pub struct SendActivityTask<'a> { /// [[crate::activity_queue::queue_activity]] unless you want implement your own queue.
actor_id: &'a Url, pub struct SendActivityTask {
activity_id: &'a Url, pub(crate) actor_id: Url,
activity: Bytes, pub(crate) activity_id: Url,
inbox: Url, pub(crate) activity: Bytes,
private_key: PKey<Private>, pub(crate) inbox: Url,
http_signature_compat: bool, pub(crate) private_key: PKey<Private>,
pub(crate) http_signature_compat: bool,
} }
impl Display for SendActivityTask<'_> {
impl Display for SendActivityTask {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{} to {}", self.activity_id, self.inbox) write!(f, "{} to {}", self.activity_id, self.inbox)
} }
} }
impl SendActivityTask<'_> { impl SendActivityTask {
/// prepare an activity for sending /// Prepare an activity for sending
/// ///
/// - `activity`: The activity to be sent, gets converted to json /// - `activity`: The activity to be sent, gets converted to json
/// - `inboxes`: List of remote actor inboxes that should receive the activity. Ignores local actor /// - `inboxes`: List of remote actor inboxes that should receive the activity. Ignores local actor
/// inboxes. Should be built by calling [crate::traits::Actor::shared_inbox_or_inbox] /// inboxes. Should be built by calling [crate::traits::Actor::shared_inbox_or_inbox]
/// for each target actor. /// for each target actor.
pub async fn prepare<'a, Activity, Datatype, ActorType>( pub async fn prepare<Activity, Datatype, ActorType>(
activity: &'a Activity, activity: &Activity,
actor: &ActorType, actor: &ActorType,
inboxes: Vec<Url>, inboxes: Vec<Url>,
data: &Data<Datatype>, data: &Data<Datatype>,
) -> Result<Vec<SendActivityTask<'a>>, Error> ) -> Result<Vec<SendActivityTask>, Error>
where where
Activity: ActivityHandler + Serialize + Debug, Activity: ActivityHandler + Serialize + Debug,
Datatype: Clone, Datatype: Clone,
ActorType: Actor, ActorType: Actor,
{ {
let config = &data.config; build_tasks(activity, actor, inboxes, data).await
let actor_id = activity.actor();
let activity_id = activity.id();
let activity_serialized: Bytes = serde_json::to_vec(&activity)
.map_err(|e| Error::SerializeOutgoingActivity(e, format!("{:?}", activity)))?
.into();
let private_key = get_pkey_cached(data, actor).await?;
Ok(futures::stream::iter(
inboxes
.into_iter()
.unique()
.filter(|i| !config.is_local_url(i)),
)
.filter_map(|inbox| async {
if let Err(err) = config.verify_url_valid(&inbox).await {
debug!("inbox url invalid, skipping: {inbox}: {err}");
return None;
};
Some(SendActivityTask {
actor_id,
activity_id,
inbox,
activity: activity_serialized.clone(),
private_key: private_key.clone(),
http_signature_compat: config.http_signature_compat,
})
})
.collect()
.await)
} }
/// convert a sendactivitydata to a request, signing and sending it /// convert a sendactivitydata to a request, signing and sending it
pub async fn sign_and_send<Datatype: Clone>(&self, data: &Data<Datatype>) -> Result<(), Error> { pub async fn sign_and_send<Datatype: Clone>(&self, data: &Data<Datatype>) -> Result<(), Error> {
let client = &data.config.client; self.sign_and_send_internal(&data.config.client, data.config.request_timeout)
.await
}
pub(crate) async fn sign_and_send_internal(
&self,
client: &ClientWithMiddleware,
timeout: Duration,
) -> Result<(), Error> {
debug!("Sending {} to {}", self.activity_id, self.inbox,);
let request_builder = client let request_builder = client
.post(self.inbox.to_string()) .post(self.inbox.to_string())
.timeout(data.config.request_timeout) .timeout(timeout)
.headers(generate_request_headers(&self.inbox)); .headers(generate_request_headers(&self.inbox));
let request = sign_request( let request = sign_request(
request_builder, request_builder,
self.actor_id, &self.actor_id,
self.activity.clone(), self.activity.clone(),
self.private_key.clone(), self.private_key.clone(),
self.http_signature_compat, self.http_signature_compat,
@ -131,7 +113,50 @@ impl SendActivityTask<'_> {
} }
} }
async fn get_pkey_cached<ActorType>( pub(crate) async fn build_tasks<'a, Activity, Datatype, ActorType>(
activity: &'a Activity,
actor: &ActorType,
inboxes: Vec<Url>,
data: &Data<Datatype>,
) -> Result<Vec<SendActivityTask>, Error>
where
Activity: ActivityHandler + Serialize + Debug,
Datatype: Clone,
ActorType: Actor,
{
let config = &data.config;
let actor_id = activity.actor();
let activity_id = activity.id();
let activity_serialized: Bytes = serde_json::to_vec(activity)
.map_err(|e| Error::SerializeOutgoingActivity(e, format!("{:?}", activity)))?
.into();
let private_key = get_pkey_cached(data, actor).await?;
Ok(futures::stream::iter(
inboxes
.into_iter()
.unique()
.filter(|i| !config.is_local_url(i)),
)
.filter_map(|inbox| async {
if let Err(err) = config.verify_url_valid(&inbox).await {
debug!("inbox url invalid, skipping: {inbox}: {err}");
return None;
};
Some(SendActivityTask {
actor_id: actor_id.clone(),
activity_id: activity_id.clone(),
inbox,
activity: activity_serialized.clone(),
private_key: private_key.clone(),
http_signature_compat: config.http_signature_compat,
})
})
.collect()
.await)
}
pub(crate) async fn get_pkey_cached<ActorType>(
data: &Data<impl Clone>, data: &Data<impl Clone>,
actor: &ActorType, actor: &ActorType,
) -> Result<PKey<Private>, Error> ) -> Result<PKey<Private>, Error>
@ -187,7 +212,8 @@ pub(crate) fn generate_request_headers(inbox_url: &Url) -> HeaderMap {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use axum::extract::State; use super::*;
use crate::{config::FederationConfig, http_signatures::generate_actor_keypair};
use bytes::Bytes; use bytes::Bytes;
use http::StatusCode; use http::StatusCode;
use std::{ use std::{
@ -196,23 +222,10 @@ mod tests {
}; };
use tracing::info; use tracing::info;
use crate::{config::FederationConfig, http_signatures::generate_actor_keypair};
use super::*;
#[allow(unused)]
// This will periodically send back internal errors to test the retry // This will periodically send back internal errors to test the retry
async fn dodgy_handler( async fn dodgy_handler(headers: HeaderMap, body: Bytes) -> Result<(), StatusCode> {
State(state): State<Arc<AtomicUsize>>,
headers: HeaderMap,
body: Bytes,
) -> Result<(), StatusCode> {
debug!("Headers:{:?}", headers); debug!("Headers:{:?}", headers);
debug!("Body len:{}", body.len()); debug!("Body len:{}", body.len());
/*if state.fetch_add(1, Ordering::Relaxed) % 20 == 0 {
return Err(StatusCode::INTERNAL_SERVER_ERROR);
}*/
Ok(()) Ok(())
} }
@ -253,8 +266,8 @@ mod tests {
let keypair = generate_actor_keypair().unwrap(); let keypair = generate_actor_keypair().unwrap();
let message = SendActivityTask { let message = SendActivityTask {
actor_id: &"http://localhost:8001".parse().unwrap(), actor_id: "http://localhost:8001".parse().unwrap(),
activity_id: &"http://localhost:8001/activity".parse().unwrap(), activity_id: "http://localhost:8001/activity".parse().unwrap(),
activity: "{}".into(), activity: "{}".into(),
inbox: "http://localhost:8001".parse().unwrap(), inbox: "http://localhost:8001".parse().unwrap(),
private_key: keypair.private_key().unwrap(), private_key: keypair.private_key().unwrap(),

View file

@ -15,6 +15,7 @@
//! ``` //! ```
use crate::{ use crate::{
activity_queue::{create_activity_queue, ActivityQueue},
error::Error, error::Error,
protocol::verification::verify_domains_match, protocol::verification::verify_domains_match,
traits::{ActivityHandler, Actor}, traits::{ActivityHandler, Actor},
@ -85,6 +86,20 @@ pub struct FederationConfig<T: Clone> {
setter(custom) setter(custom)
)] )]
pub(crate) actor_pkey_cache: Cache<Url, PKey<Private>>, pub(crate) actor_pkey_cache: Cache<Url, PKey<Private>>,
/// Queue for sending outgoing activities. Only optional to make builder work, its always
/// present once constructed.
#[builder(setter(skip))]
pub(crate) activity_queue: Option<Arc<ActivityQueue>>,
/// When sending with activity queue: Number of tasks that can be in-flight concurrently.
/// Tasks are retried once after a minute, then put into the retry queue.
/// Setting this count to `0` means that there is no limit to concurrency
#[builder(default = "0")]
pub(crate) queue_worker_count: usize,
/// When sending with activity queue: Number of concurrent tasks that are being retried
/// in-flight concurrently. Tasks are retried after an hour, then again in 60 hours.
/// Setting this count to `0` means that there is no limit to concurrency
#[builder(default = "0")]
pub(crate) queue_retry_count: usize,
} }
impl<T: Clone> FederationConfig<T> { impl<T: Clone> FederationConfig<T> {
@ -197,7 +212,14 @@ impl<T: Clone> FederationConfigBuilder<T> {
/// queue for outgoing activities, which is stored internally in the config struct. /// queue for outgoing activities, which is stored internally in the config struct.
/// Requires a tokio runtime for the background queue. /// Requires a tokio runtime for the background queue.
pub async fn build(&mut self) -> Result<FederationConfig<T>, FederationConfigBuilderError> { pub async fn build(&mut self) -> Result<FederationConfig<T>, FederationConfigBuilderError> {
let config = self.partial_build()?; let mut config = self.partial_build()?;
let queue = create_activity_queue(
config.client.clone(),
config.queue_worker_count,
config.queue_retry_count,
config.request_timeout,
);
config.activity_queue = Some(Arc::new(queue));
Ok(config) Ok(config)
} }
} }

View file

@ -4,6 +4,7 @@ use crate::fetch::webfinger::WebFingerError;
use http_signature_normalization_reqwest::SignError; use http_signature_normalization_reqwest::SignError;
use openssl::error::ErrorStack; use openssl::error::ErrorStack;
use std::string::FromUtf8Error; use std::string::FromUtf8Error;
use tokio::task::JoinError;
use url::Url; use url::Url;
/// Error messages returned by this library /// Error messages returned by this library
@ -60,6 +61,12 @@ pub enum Error {
/// Signing errors /// Signing errors
#[error(transparent)] #[error(transparent)]
SignError(#[from] SignError), SignError(#[from] SignError),
/// Failed to queue activity for sending
#[error("Failed to queue activity {0} for sending")]
ActivityQueueError(Url),
/// Stop activity queue
#[error(transparent)]
StopActivityQueue(#[from] JoinError),
/// Attempted to fetch object which doesn't have valid ActivityPub Content-Type /// Attempted to fetch object which doesn't have valid ActivityPub Content-Type
#[error( #[error(
"Attempted to fetch object from {0} which doesn't have valid ActivityPub Content-Type" "Attempted to fetch object from {0} which doesn't have valid ActivityPub Content-Type"

View file

@ -1,7 +1,7 @@
//! Generating keypairs, creating and verifying signatures //! Generating keypairs, creating and verifying signatures
//! //!
//! Signature creation and verification is handled internally in the library. See //! Signature creation and verification is handled internally in the library. See
//! [send_activity](crate::activity_sending::send_activity) and //! [send_activity](crate::activity_sending::SendActivityTask::sign_and_send) and
//! [receive_activity (actix-web)](crate::actix_web::inbox::receive_activity) / //! [receive_activity (actix-web)](crate::actix_web::inbox::receive_activity) /
//! [receive_activity (axum)](crate::axum::inbox::receive_activity). //! [receive_activity (axum)](crate::axum::inbox::receive_activity).

View file

@ -10,6 +10,7 @@
#![doc = include_str!("../docs/10_fetching_objects_with_unknown_type.md")] #![doc = include_str!("../docs/10_fetching_objects_with_unknown_type.md")]
#![deny(missing_docs)] #![deny(missing_docs)]
pub mod activity_queue;
pub mod activity_sending; pub mod activity_sending;
#[cfg(feature = "actix-web")] #[cfg(feature = "actix-web")]
pub mod actix_web; pub mod actix_web;