Make actix handler and frontend

This commit is contained in:
asonix 2018-11-07 19:50:21 -06:00
parent ba758b19ab
commit 5c8b50643d
19 changed files with 527 additions and 145 deletions

View file

@ -6,22 +6,23 @@ edition = "2018"
[workspace] [workspace]
members = [ members = [
"jobs-actix",
"jobs-core", "jobs-core",
"jobs-executor",
"jobs-tokio", "jobs-tokio",
"examples/process-jobs", "examples/tokio-jobs-example",
"examples/actix-jobs-example",
] ]
[features] [features]
default = ["jobs-tokio"] default = ["jobs-tokio", "jobs-actix"]
[dependencies.jobs-core] [dependencies.jobs-core]
version = "0.1" version = "0.1"
path = "jobs-core" path = "jobs-core"
[dependencies.jobs-executor] [dependencies.jobs-actix]
version = "0.1" version = "0.1"
path = "jobs-executor" path = "jobs-actix"
optional = true optional = true
[dependencies.jobs-tokio] [dependencies.jobs-tokio]

View file

@ -0,0 +1 @@
RUST_LOG=actix_jobs_example,jobs_actix=trace

View file

@ -0,0 +1,21 @@
[package]
name = "actix-jobs-example"
version = "0.1.0"
authors = ["asonix <asonix@asonix.dog>"]
edition = "2018"
[dependencies]
actix = "0.7"
dotenv = "0.13"
env_logger = "0.5"
failure = "0.1"
futures = "0.1"
log = "0.4"
serde = "1.0"
serde_derive = "1.0"
[dependencies.jobs]
version = "0.1"
path = "../.."
default-features = false
features = ["jobs-actix"]

View file

@ -0,0 +1,99 @@
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
use failure::Error;
use futures::{future::IntoFuture, Future};
use jobs::{Backoff, JobsBuilder, MaxRetries, Processor, QueueJob};
#[derive(Clone, Debug, Deserialize, Serialize)]
struct MyJobArguments {
some_usize: usize,
other_usize: usize,
}
#[derive(Clone, Debug)]
struct MyProcessor;
impl Processor for MyProcessor {
type Arguments = MyJobArguments;
fn name() -> &'static str {
"MyProcessor"
}
fn max_retries() -> MaxRetries {
MaxRetries::Count(1)
}
fn backoff_strategy() -> Backoff {
Backoff::Exponential(2)
}
fn process(&self, args: Self::Arguments) -> Box<dyn Future<Item = (), Error = Error> + Send> {
info!("args: {:?}", args);
Box::new(Ok(()).into_future())
}
}
fn main() -> Result<(), Error> {
dotenv::dotenv().ok();
env_logger::init();
let sys = actix::System::new("jobs-system");
let mut builder = JobsBuilder::new(1234, 4, "example-db");
builder.register_processor(MyProcessor);
let jobs_actor = builder.build()?;
let jobs = vec![
MyJobArguments {
some_usize: 0,
other_usize: 1,
},
MyJobArguments {
some_usize: 1,
other_usize: 2,
},
MyJobArguments {
some_usize: 3,
other_usize: 5,
},
MyJobArguments {
some_usize: 8,
other_usize: 13,
},
MyJobArguments {
some_usize: 21,
other_usize: 34,
},
MyJobArguments {
some_usize: 55,
other_usize: 89,
},
MyJobArguments {
some_usize: 144,
other_usize: 233,
},
MyJobArguments {
some_usize: 377,
other_usize: 610,
},
MyJobArguments {
some_usize: 987,
other_usize: 1597,
},
];
for job in jobs {
jobs_actor.do_send(QueueJob(MyProcessor::new_job(job, None, None)?));
}
let _ = sys.run();
Ok(())
}

View file

@ -1 +0,0 @@
RUST_LOG=jobs_tokio,process_jobs=trace

View file

@ -1,115 +0,0 @@
#[macro_use]
extern crate serde_derive;
use std::time::Duration;
use failure::Error;
use futures::{
future::{lazy, IntoFuture},
Future,
};
use jobs::{Backoff, JobRunner, MaxRetries, Processor};
#[derive(Clone, Debug, Deserialize, Serialize)]
struct MyJobArguments {
some_usize: usize,
other_usize: usize,
}
struct MyProcessor;
impl Processor for MyProcessor {
type Arguments = MyJobArguments;
fn name() -> &'static str {
"MyProcessor"
}
fn max_retries() -> MaxRetries {
MaxRetries::Count(1)
}
fn backoff_strategy() -> Backoff {
Backoff::Exponential(2)
}
fn process(&self, args: Self::Arguments) -> Box<dyn Future<Item = (), Error = Error> + Send> {
println!("args: {:?}", args);
Box::new(Ok(()).into_future())
}
}
fn main() {
dotenv::dotenv().ok();
env_logger::init();
tokio::run(
lazy(|| {
let mut runner = JobRunner::new(1234, 4, "example-db");
runner.register_processor(MyProcessor);
let handle = runner.spawn();
let jobs = vec![
MyJobArguments {
some_usize: 0,
other_usize: 1,
},
MyJobArguments {
some_usize: 1,
other_usize: 2,
},
MyJobArguments {
some_usize: 3,
other_usize: 5,
},
MyJobArguments {
some_usize: 8,
other_usize: 13,
},
MyJobArguments {
some_usize: 21,
other_usize: 34,
},
MyJobArguments {
some_usize: 55,
other_usize: 89,
},
MyJobArguments {
some_usize: 144,
other_usize: 233,
},
MyJobArguments {
some_usize: 377,
other_usize: 610,
},
MyJobArguments {
some_usize: 987,
other_usize: 1597,
},
];
let _: Vec<_> = jobs
.into_iter()
.map(|job| {
tokio::spawn(
handle
.queue(MyProcessor::new_job(job, None, None).unwrap())
.then(|_| Ok(())),
);
})
.collect();
Ok(handle)
})
.and_then(|handle| {
tokio::timer::Delay::new(tokio::clock::now() + Duration::from_secs(2))
.map(move |_| {
let _ = handle;
()
})
.map_err(|_| ())
}),
);
}

View file

@ -0,0 +1 @@
RUST_LOG=tokio_jobs_example,jobs_tokio=trace

View file

@ -0,0 +1 @@
example-db

View file

@ -1,5 +1,5 @@
[package] [package]
name = "process-jobs" name = "tokio-jobs-example"
version = "0.1.0" version = "0.1.0"
authors = ["asonix <asonix@asonix.dog>"] authors = ["asonix <asonix@asonix.dog>"]
edition = "2018" edition = "2018"
@ -17,4 +17,5 @@ tokio = "0.1"
[dependencies.jobs] [dependencies.jobs]
version = "0.1" version = "0.1"
path = "../.." path = "../.."
default-features = false
features = ["jobs-tokio"] features = ["jobs-tokio"]

View file

@ -0,0 +1,110 @@
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
use std::time::Duration;
use failure::Error;
use futures::{
future::{lazy, IntoFuture},
Future,
};
use jobs::{Backoff, JobRunner, MaxRetries, Processor};
#[derive(Clone, Debug, Deserialize, Serialize)]
struct MyJobArguments {
some_usize: usize,
other_usize: usize,
}
#[derive(Clone, Debug)]
struct MyProcessor;
impl Processor for MyProcessor {
type Arguments = MyJobArguments;
fn name() -> &'static str {
"MyProcessor"
}
fn max_retries() -> MaxRetries {
MaxRetries::Count(1)
}
fn backoff_strategy() -> Backoff {
Backoff::Exponential(2)
}
fn process(&self, args: Self::Arguments) -> Box<dyn Future<Item = (), Error = Error> + Send> {
info!("args: {:?}", args);
Box::new(Ok(()).into_future())
}
}
fn main() {
dotenv::dotenv().ok();
env_logger::init();
tokio::run(lazy(|| {
let mut runner = JobRunner::new(1234, 4, "example-db");
runner.register_processor(MyProcessor);
let handle = runner.spawn();
let jobs = vec![
MyJobArguments {
some_usize: 0,
other_usize: 1,
},
MyJobArguments {
some_usize: 1,
other_usize: 2,
},
MyJobArguments {
some_usize: 3,
other_usize: 5,
},
MyJobArguments {
some_usize: 8,
other_usize: 13,
},
MyJobArguments {
some_usize: 21,
other_usize: 34,
},
MyJobArguments {
some_usize: 55,
other_usize: 89,
},
MyJobArguments {
some_usize: 144,
other_usize: 233,
},
MyJobArguments {
some_usize: 377,
other_usize: 610,
},
MyJobArguments {
some_usize: 987,
other_usize: 1597,
},
];
for job in jobs {
tokio::spawn(
handle
.queue(MyProcessor::new_job(job, None, None).unwrap())
.then(|_| Ok(())),
);
}
tokio::timer::Delay::new(tokio::clock::now() + Duration::from_secs(1))
.map(move |_| {
let _ = handle;
()
})
.map_err(|_| ())
}));
}

15
jobs-actix/Cargo.toml Normal file
View file

@ -0,0 +1,15 @@
[package]
name = "jobs-actix"
version = "0.1.0"
authors = ["asonix <asonix@asonix.dog>"]
edition = "2018"
[dependencies]
actix = "0.7"
failure = "0.1"
futures = "0.1"
log = "0.4"
[dependencies.jobs-core]
version = "0.1"
path = "../jobs-core"

267
jobs-actix/src/lib.rs Normal file
View file

@ -0,0 +1,267 @@
#[macro_use]
extern crate failure;
#[macro_use]
extern crate log;
use std::{
fs::create_dir_all,
path::{Path, PathBuf},
time::Duration,
};
use actix::{
fut::wrap_future, utils::IntervalFunc, Actor, ActorFuture, ActorStream, Addr, AsyncContext,
Context, ContextFutureSpawner, Handler, Message, ResponseFuture, SyncArbiter, SyncContext,
};
use failure::Error;
use futures::Future;
use jobs_core::{storage::Storage, JobInfo, Processor, Processors};
fn coerce<I, E, F>(res: Result<Result<I, E>, F>) -> Result<I, E>
where
F: Into<E>,
{
match res {
Ok(inner_res) => inner_res,
Err(f) => Err(f.into()),
}
}
#[derive(Clone)]
pub struct KvActor {
storage: Storage,
}
impl KvActor {
pub fn init(runner_id: usize, db_path: PathBuf) -> Result<Self, Error> {
create_dir_all(db_path.clone())?;
let storage = Storage::init(runner_id, db_path)?;
let actor = KvActor { storage };
Ok(actor)
}
pub fn store_job(&self, job: JobInfo) -> Result<(), Error> {
self.storage.store_job(job)?;
Ok(())
}
pub fn dequeue_jobs(&self, limit: usize) -> Result<Vec<JobInfo>, Error> {
let jobs = self.storage.dequeue_job(limit)?;
Ok(jobs)
}
}
impl Actor for KvActor {
type Context = SyncContext<Self>;
}
impl Handler<StoreJob> for KvActor {
type Result = Result<(), Error>;
fn handle(&mut self, msg: StoreJob, _: &mut Self::Context) -> Self::Result {
self.store_job(msg.0)
}
}
impl Handler<DequeueJobs> for KvActor {
type Result = Result<Vec<JobInfo>, Error>;
fn handle(&mut self, msg: DequeueJobs, _: &mut Self::Context) -> Self::Result {
self.dequeue_jobs(msg.0)
}
}
#[derive(Debug)]
pub struct StoreJob(JobInfo);
impl Message for StoreJob {
type Result = Result<(), Error>;
}
#[derive(Debug)]
pub struct DequeueJobs(usize);
impl Message for DequeueJobs {
type Result = Result<Vec<JobInfo>, Error>;
}
pub struct JobsActor {
store: Addr<KvActor>,
}
impl JobsActor {
fn new(store: Addr<KvActor>) -> Self {
JobsActor { store }
}
fn store_job(&mut self, job: JobInfo) -> impl Future<Item = (), Error = Error> {
self.store.send(StoreJob(job)).then(coerce)
}
}
impl Actor for JobsActor {
type Context = Context<Self>;
}
impl Handler<QueueJob> for JobsActor {
type Result = ResponseFuture<(), Error>;
fn handle(&mut self, msg: QueueJob, _: &mut Self::Context) -> Self::Result {
Box::new(self.store_job(msg.0))
}
}
#[derive(Debug)]
pub struct QueueJob(pub JobInfo);
impl Message for QueueJob {
type Result = Result<(), Error>;
}
pub struct ProcessorActor {
processors: Processors,
store: Addr<KvActor>,
}
impl ProcessorActor {
fn new(processors: Processors, store: Addr<KvActor>) -> Self {
ProcessorActor { processors, store }
}
fn process_job(&mut self, ctx: &mut Context<Self>) {
Self::fetch_job((), self, ctx)
.and_then(Self::run_job)
.and_then(Self::return_job)
.map(Self::call_process_job)
.spawn(ctx);
}
fn fetch_job(
_: (),
actor: &mut Self,
_: &mut Context<Self>,
) -> impl ActorFuture<Item = JobInfo, Error = (), Actor = Self> {
wrap_future(
actor
.store
.send(DequeueJobs(1))
.then(coerce)
.map_err(|e| error!("Error fetching jobs, {}", e))
.and_then(|jobs| jobs.into_iter().next().ok_or(())),
)
}
fn run_job(
job: JobInfo,
actor: &mut Self,
_: &mut Context<Self>,
) -> impl ActorFuture<Item = JobInfo, Error = (), Actor = Self> {
wrap_future(actor.processors.process_job(job))
}
fn return_job(
job: JobInfo,
actor: &mut Self,
_: &mut Context<Self>,
) -> impl ActorFuture<Item = (), Error = (), Actor = Self> {
wrap_future(
actor
.store
.send(StoreJob(job))
.then(coerce)
.map_err(|e| error!("Error returning jobs, {}", e)),
)
}
fn call_process_job(_: (), _: &mut Self, ctx: &mut Context<Self>) {
ctx.address().do_send(ProcessJob);
}
}
impl Actor for ProcessorActor {
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
IntervalFunc::new(Duration::from_millis(500), Self::process_job)
.finish()
.spawn(ctx);
}
}
impl Handler<ProcessJob> for ProcessorActor {
type Result = ();
fn handle(&mut self, _: ProcessJob, ctx: &mut Self::Context) -> Self::Result {
self.process_job(ctx)
}
}
struct ProcessJob;
impl Message for ProcessJob {
type Result = ();
}
#[derive(Clone, Debug, Fail)]
#[fail(display = "No jobs to process")]
pub struct NoJobs;
#[derive(Clone, Debug, Fail)]
#[fail(display = "Error processing jobs")]
pub struct ProcessError;
#[derive(Clone, Debug, Fail)]
#[fail(display = "Error in Interval")]
pub struct IntervalError;
pub struct JobsBuilder {
num_processors: usize,
runner_id: usize,
db_path: PathBuf,
processors: Vec<Processors>,
}
impl JobsBuilder {
pub fn new<P: AsRef<Path>>(runner_id: usize, num_processors: usize, db_path: P) -> Self {
JobsBuilder {
num_processors,
runner_id,
db_path: db_path.as_ref().to_owned(),
processors: (0..num_processors).map(|_| Processors::new()).collect(),
}
}
pub fn register_processor<P>(&mut self, processor: P)
where
P: Processor + Send + Sync + 'static,
{
for processors in self.processors.iter_mut() {
processors.register_processor(processor.clone());
}
}
pub fn build(self) -> Result<Addr<JobsActor>, Error> {
let JobsBuilder {
num_processors,
runner_id,
db_path,
processors,
} = self;
let kv_actor = KvActor::init(runner_id, db_path)?;
let store = SyncArbiter::start(num_processors + 1, move || kv_actor.clone());
for processors in processors {
ProcessorActor::new(processors, store.clone()).start();
}
let actor = JobsActor::new(store).start();
Ok(actor)
}
}

View file

@ -28,7 +28,7 @@ pub enum JobError {
/// The Processor trait /// The Processor trait
/// ///
/// Processors define the logic for executing jobs /// Processors define the logic for executing jobs
pub trait Processor { pub trait Processor: Clone {
type Arguments: Serialize + DeserializeOwned; type Arguments: Serialize + DeserializeOwned;
/// The name of the processor /// The name of the processor

View file

@ -1,7 +0,0 @@
[package]
name = "jobs-executor"
version = "0.1.0"
authors = ["asonix <asonix@asonix.dog>"]
edition = "2018"
[dependencies]

View file

@ -1,7 +0,0 @@
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}

View file

@ -242,11 +242,3 @@ impl JobRunner {
}) })
} }
} }
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}

View file

@ -5,3 +5,6 @@ pub use jobs_core::{
#[cfg(feature = "jobs-tokio")] #[cfg(feature = "jobs-tokio")]
pub use jobs_tokio::{JobRunner, ProcessorHandle}; pub use jobs_tokio::{JobRunner, ProcessorHandle};
#[cfg(feature = "jobs-actix")]
pub use jobs_actix::{JobsActor, JobsBuilder, QueueJob};