Use Push and Pull to transmit jobs. No more req/rep issues

This commit is contained in:
asonix 2018-11-10 15:58:19 -06:00
parent 87db89b35a
commit dbb8144673
10 changed files with 318 additions and 309 deletions

View file

@ -5,7 +5,7 @@ fn main() -> Result<(), Error> {
dotenv::dotenv().ok(); dotenv::dotenv().ok();
env_logger::init(); env_logger::init();
let config = ServerConfig::init("127.0.0.1", 5555, 1234, 1, "example-db")?; let config = ServerConfig::init("127.0.0.1", 5555, 5556, 1, "example-db")?;
tokio::run(config.run()); tokio::run(config.run());

View file

@ -9,7 +9,7 @@ fn main() {
(y, x + y, acc) (y, x + y, acc)
}); });
let spawner = SpawnerConfig::new("localhost", 5555); let spawner = SpawnerConfig::new("localhost", 5556);
tokio::run(lazy(move || { tokio::run(lazy(move || {
for job in jobs { for job in jobs {

View file

@ -1,13 +1,13 @@
use failure::Error; use failure::Error;
use jobs::ClientConfig; use jobs::WorkerConfig;
use server_jobs_example::MyProcessor; use server_jobs_example::MyProcessor;
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
let mut client = ClientConfig::init(16, "localhost", 5555)?; let mut worker = WorkerConfig::init(16, "localhost", 5555, 5556)?;
client.register_processor(MyProcessor); worker.register_processor(MyProcessor);
tokio::run(client.run()); tokio::run(worker.run());
Ok(()) Ok(())
} }

View file

@ -9,7 +9,6 @@ failure = "0.1"
futures = "0.1" futures = "0.1"
log = "0.4" log = "0.4"
serde = "1.0" serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0" serde_json = "1.0"
tokio = "0.1" tokio = "0.1"
tokio-threadpool = "0.1" tokio-threadpool = "0.1"

View file

@ -1,166 +0,0 @@
use std::{sync::Arc, time::Duration};
use failure::Error;
use futures::{
future::{lazy, Either, IntoFuture},
Future, Stream,
};
use jobs_core::{Processor, Processors};
use tokio::timer::Delay;
use tokio_zmq::{prelude::*, Multipart, Req};
use zmq::{Context, Message};
use crate::{ServerRequest, ServerResponse};
pub struct ClientConfig {
processors: Vec<Processors>,
clients: Vec<Req>,
}
impl ClientConfig {
pub fn init(
num_processors: usize,
server_host: &str,
server_port: usize,
) -> Result<Self, Error> {
let ctx = Arc::new(Context::new());
let mut clients = Vec::new();
let processors = (0..num_processors).map(|_| Processors::new()).collect();
for _ in 0..num_processors {
clients.push(
Req::builder(ctx.clone())
.connect(&format!("tcp://{}:{}", server_host, server_port))
.build()?,
);
}
let cfg = ClientConfig {
processors,
clients,
};
Ok(cfg)
}
pub fn register_processor<P>(&mut self, processor: P)
where
P: Processor + Send + Sync + 'static,
{
for processors in self.processors.iter_mut() {
processors.register_processor(processor.clone());
}
}
pub fn run(self) -> impl Future<Item = (), Error = ()> {
let ClientConfig {
processors,
clients,
} = self;
lazy(|| {
for (client, processors) in clients.into_iter().zip(processors) {
tokio::spawn(client_future(client, processors));
}
Ok(())
})
}
}
fn client_future(req: Req, processors: Processors) -> impl Future<Item = (), Error = ()> {
request_one_job()
.into_future()
.and_then(|multipart| req.send(multipart).from_err())
.and_then(|req| {
let (sink, stream) = req.sink_stream().split();
stream
.from_err()
.and_then(move |multipart| wrap_response(multipart, &processors))
.forward(sink)
})
.map_err(|e| error!("Error in client, {}", e))
.map(|_| ())
}
fn request_one_job() -> Result<Multipart, Error> {
serialize_request(ServerRequest::FetchJobs(1))
}
fn serialize_request(request: ServerRequest) -> Result<Multipart, Error> {
let request = serde_json::to_string(&request)?;
let msg = Message::from_slice(request.as_ref())?;
Ok(msg.into())
}
fn parse_multipart(mut multipart: Multipart) -> Result<ServerResponse, Error> {
let message = multipart.pop_front().ok_or(ParseError)?;
let parsed = serde_json::from_slice(&message)?;
Ok(parsed)
}
fn wrap_response(
multipart: Multipart,
processors: &Processors,
) -> impl Future<Item = Multipart, Error = Error> {
let default_request = Either::A(request_one_job().into_future());
let msg = match parse_multipart(multipart) {
Ok(msg) => msg,
Err(e) => {
error!("Error parsing response, {}", e);
return default_request;
}
};
let fut = process_response(msg, processors).then(move |res| match res {
Ok(request) => serialize_request(request),
Err(e) => {
error!("Error processing response, {}", e);
request_one_job()
}
});
Either::B(fut)
}
fn process_response(
response: ServerResponse,
processors: &Processors,
) -> impl Future<Item = ServerRequest, Error = Error> {
let either_a = Either::A(
Delay::new(tokio::clock::now() + Duration::from_millis(500))
.from_err()
.and_then(|_| Ok(ServerRequest::FetchJobs(1))),
);
match response {
ServerResponse::FetchJobs(jobs) => {
let job = match jobs.into_iter().next() {
Some(job) => job,
None => return either_a,
};
let fut = processors
.process_job(job)
.map(ServerRequest::ReturnJob)
.or_else(|_| Ok(ServerRequest::FetchJobs(1)));
Either::B(fut)
}
e => {
error!("Error from server, {:?}", e);
return either_a;
}
}
}
#[derive(Clone, Debug, Fail)]
#[fail(display = "Error parsing response")]
struct ParseError;

View file

@ -2,20 +2,14 @@
extern crate failure; extern crate failure;
#[macro_use] #[macro_use]
extern crate log; extern crate log;
#[macro_use]
extern crate serde_derive;
use failure::Error; use failure::Error;
mod client;
mod server; mod server;
mod spawner; mod spawner;
mod worker;
pub use crate::{ pub use crate::{server::ServerConfig, spawner::SpawnerConfig, worker::WorkerConfig};
client::ClientConfig,
server::{ServerConfig, ServerRequest, ServerResponse},
spawner::SpawnerConfig,
};
fn coerce<T, F>(res: Result<Result<T, Error>, F>) -> Result<T, Error> fn coerce<T, F>(res: Result<Result<T, Error>, F>) -> Result<T, Error>
where where

View file

@ -1,169 +1,192 @@
use std::{path::Path, sync::Arc}; use std::{
path::{Path, PathBuf},
sync::Arc,
time::Duration,
};
use failure::Error; use failure::Error;
use futures::{ use futures::{
future::{lazy, poll_fn}, future::{lazy, poll_fn},
stream::iter_ok,
Future, Stream, Future, Stream,
}; };
use jobs_core::{JobInfo, Storage}; use jobs_core::{JobInfo, Storage};
use tokio::timer::Interval;
use tokio_threadpool::blocking; use tokio_threadpool::blocking;
use tokio_zmq::{prelude::*, Dealer, Multipart, Rep, Router}; use tokio_zmq::{prelude::*, Multipart, Pull, Push};
use zmq::{Context, Message}; use zmq::{Context, Message};
use crate::coerce; use crate::coerce;
/// Messages from the client to the server #[derive(Clone)]
#[derive(Clone, Debug, Deserialize, Serialize)] struct Config {
pub enum ServerRequest { ip: String,
/// Request a number of jobs from the server job_port: usize,
FetchJobs(usize), queue_port: usize,
runner_id: usize,
/// Return a processed job to the server db_path: PathBuf,
ReturnJob(JobInfo), context: Arc<Context>,
} }
/// How the server responds to the client impl Config {
#[derive(Clone, Debug, Deserialize, Serialize)] fn create_server(&self) -> Result<ServerConfig, Error> {
pub enum ServerResponse { let pusher = Push::builder(self.context.clone())
/// Send a list of jobs to the client .bind(&format!("tcp://{}:{}", self.ip, self.job_port))
FetchJobs(Vec<JobInfo>), .build()?;
/// Send an OK to the client after a job is returned let puller = Pull::builder(self.context.clone())
JobReturned, .bind(&format!("tcp://{}:{}", self.ip, self.queue_port))
.build()?;
/// Could not parse the client's message let storage = Storage::init(self.runner_id, self.db_path.clone())?;
Unparsable,
/// Server experienced error let server = ServerConfig {
InternalServerError, pusher,
puller,
storage,
config: self.clone(),
};
Ok(server)
}
} }
pub struct ServerConfig { pub struct ServerConfig {
servers: Vec<Rep>, pusher: Push,
dealer: Dealer, puller: Pull,
router: Router,
storage: Storage, storage: Storage,
// TODO: Recover from failure
#[allow(dead_code)]
config: Config,
} }
impl ServerConfig { impl ServerConfig {
pub fn init<P: AsRef<Path>>( pub fn init<P: AsRef<Path>>(
ip: &str, ip: &str,
port: usize, job_port: usize,
queue_port: usize,
runner_id: usize, runner_id: usize,
server_count: usize,
db_path: P, db_path: P,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let context = Arc::new(Context::new()); let context = Arc::new(Context::new());
let inproc_name = "inproc://jobs-server-tokio"; Self::init_with_context(ip, job_port, queue_port, runner_id, db_path, context)
let dealer = Dealer::builder(context.clone()).bind(inproc_name).build()?;
let router = Router::builder(context.clone())
.bind(&format!("tcp://{}:{}", ip, port))
.build()?;
let mut servers = Vec::new();
for _ in 0..server_count {
servers.push(Rep::builder(context.clone()).connect(inproc_name).build()?);
} }
let storage = Storage::init(runner_id, db_path.as_ref().to_owned())?; pub fn init_with_context<P: AsRef<Path>>(
ip: &str,
let cfg = ServerConfig { job_port: usize,
servers, queue_port: usize,
dealer, runner_id: usize,
router, db_path: P,
storage, context: Arc<Context>,
) -> Result<Self, Error> {
let config = Config {
ip: ip.to_owned(),
job_port,
queue_port,
runner_id,
db_path: db_path.as_ref().to_owned(),
context,
}; };
Ok(cfg) config.create_server()
} }
pub fn run(self) -> impl Future<Item = (), Error = ()> { pub fn run(self) -> impl Future<Item = (), Error = ()> {
lazy(|| { lazy(|| {
let ServerConfig { let ServerConfig {
servers, pusher,
dealer, puller,
router,
storage, storage,
config: _,
} = self; } = self;
for server in servers { let storage2 = storage.clone();
let (sink, stream) = server.sink_stream().split();
let storage = storage.clone();
let fut = stream let fut = Interval::new(tokio::clock::now(), Duration::from_millis(250))
.from_err() .from_err()
.and_then(move |multipart| { .and_then(move |_| dequeue_jobs(storage.clone()))
let storage = storage.clone(); .flatten()
let res = parse_multipart(multipart); .fold(pusher, move |pusher, multipart| {
Box::new(push_job(pusher, multipart))
poll_fn(move || { });
let res = res.clone();
let storage = storage.clone();
blocking(move || wrap_request(res, storage))
})
.then(coerce)
})
.forward(sink);
tokio::spawn( tokio::spawn(
fut.map(|_| ()) fut.map(|_| ())
.map_err(|e| error!("Error in server, {}", e)), .map_err(move |e| error!("Error in server, {}", e)),
); );
}
let (deal_sink, deal_stream) = dealer.sink_stream().split(); puller
let (rout_sink, rout_stream) = router.sink_stream().split(); .stream()
.from_err()
deal_stream .and_then(parse_job)
.forward(rout_sink) .and_then(move |job| store_job(job, storage2.clone()))
.join(rout_stream.forward(deal_sink)) .or_else(|e| Ok(error!("Error storing job, {}", e)))
.map_err(|e| error!("Error in broker, {}", e)) .for_each(|_| Ok(()))
.map(|_| ())
}) })
} }
} }
fn wrap_request( fn dequeue_jobs(
res: Result<ServerRequest, ServerResponse>,
storage: Storage, storage: Storage,
) -> Result<Multipart, Error> { ) -> impl Future<Item = impl Stream<Item = Multipart, Error = Error>, Error = Error> {
let res = res.map(move |msg| process_request(msg, storage)); poll_fn(move || {
let storage = storage.clone();
let response = match res { blocking(move || wrap_fetch_queue(storage))
Ok(response) => response, })
Err(response) => response, .then(coerce)
}; .map(|jobs| iter_ok(jobs))
.or_else(|e| {
Ok(Message::from_slice(serde_json::to_string(&response)?.as_ref())?.into()) error!("Error fetching jobs, {}", e);
Ok(iter_ok(vec![]))
})
} }
fn parse_multipart(mut multipart: Multipart) -> Result<ServerRequest, ServerResponse> { fn push_job(pusher: Push, message: Multipart) -> impl Future<Item = Push, Error = Error> {
let unparsed_msg = match multipart.pop_front() { pusher.send(message).map_err(Error::from)
Some(msg) => msg,
None => return Err(ServerResponse::Unparsable),
};
match serde_json::from_slice(&unparsed_msg) {
Ok(msg) => Ok(msg),
Err(_) => Err(ServerResponse::Unparsable),
}
} }
fn process_request(request: ServerRequest, storage: Storage) -> ServerResponse { fn store_job(job: JobInfo, storage: Storage) -> impl Future<Item = (), Error = Error> {
match request { let storage = storage.clone();
ServerRequest::FetchJobs(limit) => storage
.dequeue_job(limit) poll_fn(move || {
.map(ServerResponse::FetchJobs) let job = job.clone();
.map_err(|e| error!("Error fetching jobs, {}", e)) let storage = storage.clone();
.unwrap_or(ServerResponse::InternalServerError),
ServerRequest::ReturnJob(job) => storage blocking(move || storage.store_job(job).map_err(Error::from)).map_err(Error::from)
.store_job(job) })
.map(|_| ServerResponse::JobReturned) .then(coerce)
.map_err(|e| error!("Error returning job, {}", e))
.unwrap_or(ServerResponse::InternalServerError),
} }
fn wrap_fetch_queue(storage: Storage) -> Result<Vec<Multipart>, Error> {
let response = fetch_queue(storage)?;
let jobs = response
.into_iter()
.map(|job| {
serde_json::to_string(&job)
.map_err(Error::from)
.and_then(|json| Message::from_slice(json.as_ref()).map_err(Error::from))
.map(Multipart::from)
})
.collect::<Result<Vec<_>, Error>>()?;
Ok(jobs)
} }
fn fetch_queue(storage: Storage) -> Result<Vec<JobInfo>, Error> {
storage.dequeue_job(100).map_err(Error::from)
}
fn parse_job(mut multipart: Multipart) -> Result<JobInfo, Error> {
let unparsed_msg = multipart.pop_front().ok_or(EmptyMessage)?;
let parsed = serde_json::from_slice(&unparsed_msg)?;
Ok(parsed)
}
#[derive(Clone, Debug, Fail)]
#[fail(display = "Message was empty")]
pub struct EmptyMessage;

View file

@ -3,28 +3,26 @@ use std::sync::Arc;
use failure::Error; use failure::Error;
use futures::{future::IntoFuture, Future}; use futures::{future::IntoFuture, Future};
use jobs_core::JobInfo; use jobs_core::JobInfo;
use tokio_zmq::{prelude::*, Req}; use tokio_zmq::{prelude::*, Push};
use zmq::{Context, Message}; use zmq::{Context, Message};
use crate::ServerRequest;
pub struct SpawnerConfig { pub struct SpawnerConfig {
server: String, server: String,
ctx: Arc<Context>, ctx: Arc<Context>,
} }
impl SpawnerConfig { impl SpawnerConfig {
pub fn new(server_host: &str, server_port: usize) -> Self { pub fn new(server_host: &str, queue_port: usize) -> Self {
let ctx = Arc::new(Context::new()); let ctx = Arc::new(Context::new());
SpawnerConfig { SpawnerConfig {
server: format!("tcp://{}:{}", server_host, server_port), server: format!("tcp://{}:{}", server_host, queue_port),
ctx, ctx,
} }
} }
pub fn queue(&self, job: JobInfo) -> impl Future<Item = (), Error = Error> { pub fn queue(&self, job: JobInfo) -> impl Future<Item = (), Error = Error> {
let msg = serde_json::to_string(&ServerRequest::ReturnJob(job)) let msg = serde_json::to_string(&job)
.map_err(Error::from) .map_err(Error::from)
.and_then(|s| { .and_then(|s| {
Message::from_slice(s.as_ref()) Message::from_slice(s.as_ref())
@ -33,17 +31,12 @@ impl SpawnerConfig {
}) })
.into_future(); .into_future();
Req::builder(self.ctx.clone()) Push::builder(self.ctx.clone())
.connect(&self.server) .connect(&self.server)
.build() .build()
.into_future() .into_future()
.from_err() .from_err()
.join(msg) .join(msg)
.and_then(move |(req, msg)| { .and_then(move |(req, msg)| req.send(msg).from_err().map(|_| ()))
req.send(msg)
.from_err()
.and_then(|req| req.recv().from_err())
.map(|_| ())
})
} }
} }

View file

@ -0,0 +1,166 @@
use std::sync::Arc;
use failure::Error;
use futures::{
future::{lazy, Either, IntoFuture},
Future, Stream,
};
use jobs_core::{JobInfo, Processor, Processors};
use tokio_zmq::{prelude::*, Multipart, Pull, Push};
use zmq::{Context, Message};
struct Worker {
processors: Processors,
pull: Pull,
push: Push,
}
impl Worker {
pub fn init(
server_host: &str,
job_port: usize,
queue_port: usize,
ctx: Arc<Context>,
) -> Result<Self, Error> {
let pull = Pull::builder(ctx.clone())
.connect(&format!("tcp://{}:{}", server_host, job_port))
.build()?;
let push = Push::builder(ctx.clone())
.connect(&format!("tcp://{}:{}", server_host, queue_port))
.build()?;
let processors = Processors::new();
let worker = Worker {
processors,
push,
pull,
};
Ok(worker)
}
fn register_processor<P>(&mut self, processor: P)
where
P: Processor + Send + Sync + 'static,
{
self.processors.register_processor(processor);
}
}
pub struct WorkerConfig {
workers: Vec<Worker>,
}
impl WorkerConfig {
pub fn init(
num_processors: usize,
server_host: &str,
job_port: usize,
queue_port: usize,
) -> Result<Self, Error> {
let ctx = Arc::new(Context::new());
let mut workers = Vec::new();
for _ in 0..num_processors {
let worker = Worker::init(server_host, job_port, queue_port, ctx.clone())?;
workers.push(worker);
}
let cfg = WorkerConfig { workers };
Ok(cfg)
}
pub fn register_processor<P>(&mut self, processor: P)
where
P: Processor + Send + Sync + 'static,
{
for worker in self.workers.iter_mut() {
worker.register_processor(processor.clone());
}
}
pub fn run(self) -> impl Future<Item = (), Error = ()> {
let WorkerConfig { workers } = self;
lazy(|| {
for worker in workers.into_iter() {
tokio::spawn(worker_future(worker));
}
Ok(())
})
}
}
fn worker_future(worker: Worker) -> impl Future<Item = (), Error = ()> {
let Worker {
push,
pull,
processors,
} = worker;
pull.stream()
.from_err()
.and_then(move |multipart| wrap_processing(multipart, &processors))
.map(Some)
.or_else(|e| {
error!("Error processing job, {}", e);
Ok(None)
})
.filter_map(|item| item)
.forward(push.sink())
.map_err(|e: Error| error!("Error pushing job, {}", e))
.map(|_| ())
}
fn serialize_request(job: JobInfo) -> Result<Multipart, Error> {
let request = serde_json::to_string(&job)?;
let msg = Message::from_slice(request.as_ref())?;
Ok(msg.into())
}
fn parse_multipart(mut multipart: Multipart) -> Result<JobInfo, Error> {
let message = multipart.pop_front().ok_or(ParseError)?;
let parsed = serde_json::from_slice(&message)?;
Ok(parsed)
}
fn wrap_processing(
multipart: Multipart,
processors: &Processors,
) -> impl Future<Item = Multipart, Error = Error> {
let msg = match parse_multipart(multipart) {
Ok(msg) => msg,
Err(e) => return Either::A(Err(e).into_future()),
};
let fut = process_job(msg, processors).and_then(serialize_request);
Either::B(fut)
}
fn process_job(
job: JobInfo,
processors: &Processors,
) -> impl Future<Item = JobInfo, Error = Error> {
processors
.process_job(job.clone())
.map_err(|_| ProcessError)
.from_err()
}
#[derive(Clone, Debug, Fail)]
#[fail(display = "Error parsing job")]
struct ParseError;
#[derive(Clone, Debug, Fail)]
#[fail(display = "Error processing job")]
struct ProcessError;

View file

@ -9,4 +9,4 @@ pub use jobs_tokio::{JobRunner, ProcessorHandle};
pub use jobs_actix::{JobsActor, JobsBuilder, QueueJob}; pub use jobs_actix::{JobsActor, JobsBuilder, QueueJob};
#[cfg(feature = "jobs-server-tokio")] #[cfg(feature = "jobs-server-tokio")]
pub use jobs_server_tokio::{ClientConfig, ServerConfig, SpawnerConfig}; pub use jobs_server_tokio::{ServerConfig, SpawnerConfig, WorkerConfig};