background-jobs/jobs-actix/src/lib.rs

155 lines
4.2 KiB
Rust
Raw Normal View History

use std::{collections::BTreeMap, sync::Arc};
2018-12-16 18:43:44 +00:00
use actix::{Actor, Addr, SyncArbiter};
2019-05-28 00:01:21 +00:00
use background_jobs_core::{Job, Processor, ProcessorMap, Stats, Storage};
2018-12-16 18:43:44 +00:00
use failure::Error;
use futures::Future;
2018-12-16 18:43:44 +00:00
mod pinger;
mod server;
2019-05-27 17:29:11 +00:00
mod storage;
2018-12-16 18:43:44 +00:00
mod worker;
pub use self::{server::Server, worker::LocalWorker};
use self::{
pinger::Pinger,
server::{CheckDb, GetStats, NewJob, RequestJob, ReturningJob},
2019-05-27 17:29:11 +00:00
storage::{ActixStorage, StorageWrapper},
worker::Worker,
2018-12-16 18:43:44 +00:00
};
pub struct ServerConfig<S> {
storage: S,
2019-05-27 17:29:11 +00:00
threads: usize,
2018-12-16 18:43:44 +00:00
}
impl<S> ServerConfig<S>
where
S: Storage + Sync + 'static,
{
2019-05-27 17:29:11 +00:00
/// Create a new ServerConfig
pub fn new(storage: S) -> Self {
2019-05-27 17:29:11 +00:00
ServerConfig {
storage,
threads: num_cpus::get(),
}
2018-12-16 18:43:44 +00:00
}
2019-05-27 17:29:11 +00:00
/// Set the number of threads to use for the server.
///
/// This is not related to the number of workers or the number of worker threads. This is
/// purely how many threads will be used to manage access to the job store.
///
/// By default, this is the number of processor cores available to the application. On systems
/// with logical cores (such as Intel hyperthreads), this will be the total number of logical
/// cores.
///
/// In certain cases, it may be beneficial to limit the server process count to 1.
///
/// When using actix-web, any configuration performed inside `HttpServer::new` closure will
/// happen on each thread started by the web server. In order to reduce the number of running
/// threads, one job server can be started per web server thread.
///
/// Another case to use a single server is if your job store has not locking guarantee, and you
/// want to enforce that no job can be requested more than once. The default storage
/// implementation does provide this guarantee, but other implementations may not.
pub fn thread_count(mut self, threads: usize) -> Self {
self.threads = threads;
self
}
2018-12-16 18:43:44 +00:00
2019-05-27 17:29:11 +00:00
/// Spin up the server processes
pub fn start(self) -> QueueHandle {
let ServerConfig { storage, threads } = self;
let server = SyncArbiter::start(threads, move || {
Server::new(StorageWrapper(storage.clone()))
});
2018-12-16 18:43:44 +00:00
2019-05-28 00:01:21 +00:00
Pinger::new(server.clone(), threads).start();
2018-12-16 18:43:44 +00:00
QueueHandle { inner: server }
}
}
pub struct WorkerConfig<State>
2018-12-16 18:43:44 +00:00
where
State: Clone + 'static,
2018-12-16 18:43:44 +00:00
{
processors: ProcessorMap<State>,
queues: BTreeMap<String, u64>,
2018-12-16 18:43:44 +00:00
}
impl<State> WorkerConfig<State>
2018-12-16 18:43:44 +00:00
where
State: Clone + 'static,
2018-12-16 18:43:44 +00:00
{
pub fn new(state_fn: impl Fn() -> State + Send + Sync + 'static) -> Self {
2018-12-16 18:43:44 +00:00
WorkerConfig {
2019-05-24 03:41:34 +00:00
processors: ProcessorMap::new(Box::new(state_fn)),
2018-12-16 18:43:44 +00:00
queues: BTreeMap::new(),
}
}
2019-05-28 00:01:21 +00:00
pub fn register(
mut self,
processor: impl Processor<Job = impl Job<State = State> + Send + 'static> + Send + 'static,
) -> Self {
self.queues.insert(processor.queue().to_owned(), 4);
2018-12-16 18:43:44 +00:00
self.processors.register_processor(processor);
2019-05-27 17:29:11 +00:00
self
2018-12-16 18:43:44 +00:00
}
2019-05-27 17:29:11 +00:00
pub fn set_processor_count(mut self, queue: &str, count: u64) -> Self {
2018-12-16 18:43:44 +00:00
self.queues.insert(queue.to_owned(), count);
2019-05-27 17:29:11 +00:00
self
2018-12-16 18:43:44 +00:00
}
2019-05-27 17:29:11 +00:00
pub fn start(self, queue_handle: QueueHandle) {
2018-12-16 18:43:44 +00:00
let processors = Arc::new(self.processors);
self.queues.into_iter().fold(0, |acc, (key, count)| {
(0..count).for_each(|i| {
LocalWorker::new(
acc + i + 1000,
key.clone(),
processors.clone(),
queue_handle.inner.clone(),
)
.start();
});
acc + count
});
}
}
#[derive(Clone)]
2019-05-27 17:29:11 +00:00
pub struct QueueHandle {
inner: Addr<Server>,
2018-12-16 18:43:44 +00:00
}
2019-05-27 17:29:11 +00:00
impl QueueHandle {
2019-05-28 00:01:21 +00:00
pub fn queue<J>(&self, job: J) -> Result<(), Error>
2018-12-16 18:43:44 +00:00
where
2019-05-28 00:01:21 +00:00
J: Job,
2018-12-16 18:43:44 +00:00
{
2019-05-28 00:01:21 +00:00
self.inner.do_send(NewJob(J::Processor::new_job(job)?));
2018-12-16 18:43:44 +00:00
Ok(())
}
pub fn get_stats(&self) -> Box<dyn Future<Item = Stats, Error = Error> + Send> {
Box::new(self.inner.send(GetStats).then(coerce))
}
}
fn coerce<I, E, F>(res: Result<Result<I, E>, F>) -> Result<I, E>
where
E: From<F>,
{
match res {
Ok(inner) => inner,
Err(e) => Err(e.into()),
}
2018-12-16 18:43:44 +00:00
}