2018-03-15 18:52:38 +00:00
|
|
|
// Copyright (C) 2018 Sebastian Dröge <sebastian@centricular.com>
|
|
|
|
//
|
|
|
|
// This library is free software; you can redistribute it and/or
|
|
|
|
// modify it under the terms of the GNU Library General Public
|
|
|
|
// License as published by the Free Software Foundation; either
|
|
|
|
// version 2 of the License, or (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
// Library General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Library General Public
|
|
|
|
// License along with this library; if not, write to the
|
|
|
|
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
|
|
|
|
// Boston, MA 02110-1335, USA.
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
//! The `Executor` for the `threadshare` GStreamer plugins framework.
|
|
|
|
//!
|
|
|
|
//! The [`threadshare`]'s `Executor` consists in a set of [`Context`]s. Each [`Context`] is
|
|
|
|
//! identified by a `name` and runs a loop in a dedicated `thread`. Users can use the [`Context`]
|
|
|
|
//! to spawn `Future`s. `Future`s are asynchronous processings which allow waiting for resources
|
|
|
|
//! in a non-blocking way. Examples of non-blocking operations are:
|
|
|
|
//!
|
|
|
|
//! * Waiting for an incoming packet on a Socket.
|
|
|
|
//! * Waiting for an asynchronous `Mutex` `lock` to succeed.
|
|
|
|
//! * Waiting for a `Timeout` to be elapsed.
|
|
|
|
//!
|
|
|
|
//! [`Context`]s instantiators define the minimum time between two iterations of the [`Context`]
|
|
|
|
//! loop, which acts as a throttle, saving CPU usage when no operations are to be executed.
|
|
|
|
//!
|
|
|
|
//! `Element` implementations should use [`PadSrc`] & [`PadSink`] which provides high-level features.
|
|
|
|
//!
|
|
|
|
//! [`threadshare`]: ../index.html
|
|
|
|
//! [`Context`]: struct.Context.html
|
|
|
|
//! [`PadSrc`]: struct.PadSrc.html
|
|
|
|
//! [`PadSink`]: struct.PadSink.html
|
|
|
|
|
|
|
|
use futures::channel::mpsc as future_mpsc;
|
|
|
|
use futures::future::BoxFuture;
|
2019-11-24 20:12:40 +00:00
|
|
|
use futures::prelude::*;
|
|
|
|
use futures::ready;
|
|
|
|
use futures::stream::futures_unordered::FuturesUnordered;
|
|
|
|
|
|
|
|
use glib;
|
|
|
|
use glib::{glib_boxed_derive_traits, glib_boxed_type};
|
|
|
|
|
|
|
|
use gst;
|
|
|
|
use gst::{gst_debug, gst_log, gst_trace};
|
|
|
|
|
|
|
|
use lazy_static::lazy_static;
|
|
|
|
|
2019-04-18 10:38:10 +00:00
|
|
|
use std::cmp;
|
|
|
|
use std::collections::{BinaryHeap, HashMap};
|
2018-03-16 18:24:36 +00:00
|
|
|
use std::io;
|
2018-03-26 14:49:42 +00:00
|
|
|
use std::mem;
|
2019-11-24 20:12:40 +00:00
|
|
|
use std::pin::Pin;
|
2019-12-02 09:30:07 +00:00
|
|
|
use std::sync::mpsc as sync_mpsc;
|
|
|
|
use std::sync::{atomic, Arc, Mutex, Weak};
|
|
|
|
use std::task::Poll;
|
2018-04-02 07:53:40 +00:00
|
|
|
use std::thread;
|
2019-12-02 09:30:07 +00:00
|
|
|
use std::time::{Duration, Instant};
|
2018-03-15 18:52:38 +00:00
|
|
|
|
2019-11-24 20:12:40 +00:00
|
|
|
use tokio_executor::current_thread as tokio_current_thread;
|
2019-12-02 09:30:07 +00:00
|
|
|
use tokio_executor::park::Unpark;
|
2018-03-15 18:52:38 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
use super::RUNTIME_CAT;
|
|
|
|
|
|
|
|
// We are bound to using `sync` for the `runtime` `Mutex`es. Attempts to use `async` `Mutex`es
|
|
|
|
// lead to the following issues:
|
|
|
|
//
|
|
|
|
// * `CONTEXTS`: can't `spawn` a `Future` when called from a `Context` thread via `ffi`.
|
|
|
|
// * `timers`: can't automatically `remove` the timer from `BinaryHeap` because `async drop`
|
|
|
|
// is not available.
|
|
|
|
// * `task_queues`: can't `add` a pending task when called from a `Context` thread via `ffi`.
|
|
|
|
//
|
|
|
|
// Also, we want to be able to `acquire` a `Context` outside of an `async` context.
|
|
|
|
// These `Mutex`es must be `lock`ed for a short period.
|
2018-03-26 14:49:42 +00:00
|
|
|
lazy_static! {
|
2019-12-02 09:30:07 +00:00
|
|
|
static ref CONTEXTS: Mutex<HashMap<String, Weak<ContextInner>>> = Mutex::new(HashMap::new());
|
2018-03-15 18:52:38 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
struct ContextThread {
|
2018-03-15 18:52:38 +00:00
|
|
|
name: String,
|
2019-12-02 09:30:07 +00:00
|
|
|
shutdown: Arc<atomic::AtomicBool>,
|
2018-03-15 18:52:38 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
impl ContextThread {
|
2018-06-30 16:24:38 +00:00
|
|
|
fn start(
|
2018-03-15 18:52:38 +00:00
|
|
|
name: &str,
|
|
|
|
wait: u32,
|
2019-11-24 20:12:40 +00:00
|
|
|
reactor: tokio_net::driver::Reactor,
|
2019-04-18 10:38:10 +00:00
|
|
|
timers: Arc<Mutex<BinaryHeap<TimerEntry>>>,
|
2019-12-02 09:30:07 +00:00
|
|
|
) -> (tokio_current_thread::Handle, ContextShutdown) {
|
|
|
|
let handle = reactor.handle();
|
|
|
|
let shutdown = Arc::new(atomic::AtomicBool::new(false));
|
2018-03-15 18:52:38 +00:00
|
|
|
let shutdown_clone = shutdown.clone();
|
|
|
|
let name_clone = name.into();
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
let mut context_thread = ContextThread {
|
2018-03-15 18:52:38 +00:00
|
|
|
shutdown: shutdown_clone,
|
|
|
|
name: name_clone,
|
|
|
|
};
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
let (sender, receiver) = sync_mpsc::channel();
|
2018-03-15 18:52:38 +00:00
|
|
|
|
|
|
|
let join = thread::spawn(move || {
|
2019-12-02 09:30:07 +00:00
|
|
|
context_thread.spawn(wait, reactor, sender, timers);
|
2018-03-15 18:52:38 +00:00
|
|
|
});
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
let shutdown = ContextShutdown {
|
2018-03-15 18:52:38 +00:00
|
|
|
name: name.into(),
|
2018-06-30 16:24:38 +00:00
|
|
|
shutdown,
|
|
|
|
handle,
|
2018-03-15 18:52:38 +00:00
|
|
|
join: Some(join),
|
|
|
|
};
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
let thread_handle = receiver.recv().expect("Context thread init failed");
|
2018-06-30 16:24:38 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
(thread_handle, shutdown)
|
2018-03-15 18:52:38 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
fn spawn(
|
2018-07-24 10:40:58 +00:00
|
|
|
&mut self,
|
|
|
|
wait: u32,
|
2019-11-24 20:12:40 +00:00
|
|
|
reactor: tokio_net::driver::Reactor,
|
2019-12-02 09:30:07 +00:00
|
|
|
sender: sync_mpsc::Sender<tokio_current_thread::Handle>,
|
2019-04-18 10:38:10 +00:00
|
|
|
timers: Arc<Mutex<BinaryHeap<TimerEntry>>>,
|
2018-07-24 10:40:58 +00:00
|
|
|
) {
|
2019-12-02 09:30:07 +00:00
|
|
|
gst_debug!(RUNTIME_CAT, "Started context thread '{}'", self.name);
|
2018-03-15 18:52:38 +00:00
|
|
|
|
2019-11-24 20:12:40 +00:00
|
|
|
let wait = Duration::from_millis(wait as u64);
|
|
|
|
|
2018-06-30 16:24:38 +00:00
|
|
|
let handle = reactor.handle();
|
2019-11-24 20:12:40 +00:00
|
|
|
let timer = tokio_timer::Timer::new(reactor);
|
2018-06-30 16:24:38 +00:00
|
|
|
let timer_handle = timer.handle();
|
2019-11-24 20:12:40 +00:00
|
|
|
|
2018-06-30 16:24:38 +00:00
|
|
|
let mut current_thread = tokio_current_thread::CurrentThread::new_with_park(timer);
|
|
|
|
|
2019-11-24 20:12:40 +00:00
|
|
|
sender
|
|
|
|
.send(current_thread.handle())
|
2019-12-02 09:30:07 +00:00
|
|
|
.expect("Couldn't send context thread handle");
|
2018-06-30 16:24:38 +00:00
|
|
|
|
2019-11-24 20:12:40 +00:00
|
|
|
let _timer_guard = tokio_timer::set_default(&timer_handle);
|
|
|
|
let _reactor_guard = tokio_net::driver::set_default(&handle);
|
2019-04-15 15:40:17 +00:00
|
|
|
|
2019-11-24 20:12:40 +00:00
|
|
|
let mut now = Instant::now();
|
2018-03-15 18:52:38 +00:00
|
|
|
|
2019-11-24 20:12:40 +00:00
|
|
|
loop {
|
2019-12-02 09:30:07 +00:00
|
|
|
if self.shutdown.load(atomic::Ordering::SeqCst) {
|
|
|
|
gst_debug!(RUNTIME_CAT, "Shutting down loop");
|
2019-11-24 20:12:40 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-04-18 10:38:10 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
gst_trace!(RUNTIME_CAT, "Elapsed {:?} since last loop", now.elapsed());
|
2019-11-24 20:12:40 +00:00
|
|
|
|
|
|
|
// Handle timers
|
|
|
|
{
|
|
|
|
// Trigger all timers that would be expired before the middle of the loop wait
|
|
|
|
// time
|
|
|
|
let timer_threshold = now + wait / 2;
|
|
|
|
let mut timers = timers.lock().unwrap();
|
|
|
|
while timers
|
|
|
|
.peek()
|
|
|
|
.and_then(|entry| {
|
|
|
|
if entry.time < timer_threshold {
|
|
|
|
Some(())
|
|
|
|
} else {
|
|
|
|
None
|
2019-04-18 10:38:10 +00:00
|
|
|
}
|
2019-11-24 20:12:40 +00:00
|
|
|
})
|
|
|
|
.is_some()
|
|
|
|
{
|
|
|
|
let TimerEntry {
|
|
|
|
time,
|
|
|
|
interval,
|
|
|
|
sender,
|
|
|
|
..
|
|
|
|
} = timers.pop().unwrap();
|
|
|
|
|
|
|
|
if sender.is_closed() {
|
|
|
|
continue;
|
|
|
|
}
|
2019-04-18 10:38:10 +00:00
|
|
|
|
2019-11-24 20:12:40 +00:00
|
|
|
let _ = sender.unbounded_send(());
|
|
|
|
if let Some(interval) = interval {
|
|
|
|
timers.push(TimerEntry {
|
|
|
|
time: time + interval,
|
|
|
|
id: TIMER_ENTRY_ID.fetch_add(1, atomic::Ordering::Relaxed),
|
|
|
|
interval: Some(interval),
|
|
|
|
sender,
|
|
|
|
});
|
2019-04-18 10:38:10 +00:00
|
|
|
}
|
|
|
|
}
|
2019-11-24 20:12:40 +00:00
|
|
|
}
|
2019-04-18 10:38:10 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
gst_trace!(RUNTIME_CAT, "Turning thread '{}'", self.name);
|
2019-11-24 20:12:40 +00:00
|
|
|
while current_thread
|
2019-12-02 09:30:07 +00:00
|
|
|
.turn(Some(Duration::from_millis(0)))
|
2019-11-24 20:12:40 +00:00
|
|
|
.unwrap()
|
|
|
|
.has_polled()
|
|
|
|
{}
|
2019-12-02 09:30:07 +00:00
|
|
|
gst_trace!(RUNTIME_CAT, "Turned thread '{}'", self.name);
|
2019-11-24 20:12:40 +00:00
|
|
|
|
|
|
|
// We have to check again after turning in case we're supposed to shut down now
|
|
|
|
// and already handled the unpark above
|
2019-12-02 09:30:07 +00:00
|
|
|
if self.shutdown.load(atomic::Ordering::SeqCst) {
|
|
|
|
gst_debug!(RUNTIME_CAT, "Shutting down loop");
|
2019-11-24 20:12:40 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-10-02 09:00:03 +00:00
|
|
|
|
2019-11-24 20:12:40 +00:00
|
|
|
let elapsed = now.elapsed();
|
2019-12-02 09:30:07 +00:00
|
|
|
gst_trace!(RUNTIME_CAT, "Elapsed {:?} after handling futures", elapsed);
|
2019-11-24 20:12:40 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
if wait == Duration::from_millis(0) {
|
2019-11-24 20:12:40 +00:00
|
|
|
let timers = timers.lock().unwrap();
|
|
|
|
let wait = match timers.peek().map(|entry| entry.time) {
|
|
|
|
None => None,
|
|
|
|
Some(time) => Some({
|
2019-12-02 09:30:07 +00:00
|
|
|
let tmp = Instant::now();
|
2019-04-18 10:38:10 +00:00
|
|
|
|
2019-11-24 20:12:40 +00:00
|
|
|
if time < tmp {
|
2019-12-02 09:30:07 +00:00
|
|
|
Duration::from_millis(0)
|
2019-11-24 20:12:40 +00:00
|
|
|
} else {
|
|
|
|
time.duration_since(tmp)
|
|
|
|
}
|
|
|
|
}),
|
|
|
|
};
|
|
|
|
drop(timers);
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
gst_trace!(RUNTIME_CAT, "Sleeping for up to {:?}", wait);
|
2019-11-24 20:12:40 +00:00
|
|
|
current_thread.turn(wait).unwrap();
|
2019-12-02 09:30:07 +00:00
|
|
|
gst_trace!(RUNTIME_CAT, "Slept for {:?}", now.elapsed());
|
|
|
|
now = Instant::now();
|
2019-11-24 20:12:40 +00:00
|
|
|
} else {
|
|
|
|
if elapsed < wait {
|
|
|
|
gst_trace!(
|
2019-12-02 09:30:07 +00:00
|
|
|
RUNTIME_CAT,
|
2019-11-24 20:12:40 +00:00
|
|
|
"Waiting for {:?} before polling again",
|
|
|
|
wait - elapsed
|
|
|
|
);
|
|
|
|
thread::sleep(wait - elapsed);
|
2019-12-02 09:30:07 +00:00
|
|
|
gst_trace!(RUNTIME_CAT, "Slept for {:?}", now.elapsed());
|
2018-03-15 18:52:38 +00:00
|
|
|
}
|
2019-11-24 20:12:40 +00:00
|
|
|
|
|
|
|
now += wait;
|
|
|
|
}
|
|
|
|
}
|
2018-03-15 18:52:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
impl Drop for ContextThread {
|
2018-03-15 18:52:38 +00:00
|
|
|
fn drop(&mut self) {
|
2019-12-02 09:30:07 +00:00
|
|
|
gst_debug!(RUNTIME_CAT, "Terminated: context thread '{}'", self.name);
|
2018-03-15 18:52:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
struct ContextShutdown {
|
2018-03-15 18:52:38 +00:00
|
|
|
name: String,
|
2019-12-02 09:30:07 +00:00
|
|
|
shutdown: Arc<atomic::AtomicBool>,
|
2019-11-24 20:12:40 +00:00
|
|
|
handle: tokio_net::driver::Handle,
|
2018-03-15 18:52:38 +00:00
|
|
|
join: Option<thread::JoinHandle<()>>,
|
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
impl Drop for ContextShutdown {
|
2018-03-15 18:52:38 +00:00
|
|
|
fn drop(&mut self) {
|
2019-12-02 09:30:07 +00:00
|
|
|
gst_debug!(
|
|
|
|
RUNTIME_CAT,
|
|
|
|
"Shutting down context thread thread '{}'",
|
|
|
|
self.name
|
|
|
|
);
|
|
|
|
self.shutdown.store(true, atomic::Ordering::SeqCst);
|
|
|
|
gst_trace!(
|
|
|
|
RUNTIME_CAT,
|
|
|
|
"Waiting for context thread '{}' to shutdown",
|
|
|
|
self.name
|
|
|
|
);
|
2018-03-15 18:52:38 +00:00
|
|
|
// After being unparked, the next turn() is guaranteed to finish immediately,
|
|
|
|
// as such there is no race condition between checking for shutdown and setting
|
|
|
|
// shutdown.
|
|
|
|
self.handle.unpark();
|
|
|
|
let _ = self.join.take().unwrap().join();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug)]
|
|
|
|
pub struct TaskQueueId(u64);
|
2018-03-15 18:52:38 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
impl glib::subclass::boxed::BoxedType for TaskQueueId {
|
|
|
|
const NAME: &'static str = "TsTaskQueueId";
|
2018-11-29 19:01:02 +00:00
|
|
|
|
|
|
|
glib_boxed_type!();
|
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
glib_boxed_derive_traits!(TaskQueueId);
|
2018-11-29 19:01:02 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
pub type TaskOutput = Result<(), gst::FlowError>;
|
|
|
|
type TaskQueue = FuturesUnordered<BoxFuture<'static, TaskOutput>>;
|
2019-09-06 17:19:54 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
struct ContextInner {
|
2018-03-15 18:52:38 +00:00
|
|
|
name: String,
|
2019-12-02 09:30:07 +00:00
|
|
|
thread_handle: Mutex<tokio_current_thread::Handle>,
|
2019-11-24 20:12:40 +00:00
|
|
|
reactor_handle: tokio_net::driver::Handle,
|
2019-04-18 10:38:10 +00:00
|
|
|
timers: Arc<Mutex<BinaryHeap<TimerEntry>>>,
|
2018-03-15 18:52:38 +00:00
|
|
|
// Only used for dropping
|
2019-12-02 09:30:07 +00:00
|
|
|
_shutdown: ContextShutdown,
|
|
|
|
task_queues: Mutex<(u64, HashMap<u64, TaskQueue>)>,
|
2018-03-15 18:52:38 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
impl Drop for ContextInner {
|
2018-03-15 18:52:38 +00:00
|
|
|
fn drop(&mut self) {
|
|
|
|
let mut contexts = CONTEXTS.lock().unwrap();
|
2019-12-02 09:30:07 +00:00
|
|
|
gst_debug!(RUNTIME_CAT, "Finalizing context '{}'", self.name);
|
2018-03-15 18:52:38 +00:00
|
|
|
contexts.remove(&self.name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
pub struct ContextWeak(Weak<ContextInner>);
|
|
|
|
|
|
|
|
impl ContextWeak {
|
|
|
|
pub fn upgrade(&self) -> Option<Context> {
|
|
|
|
self.0.upgrade().map(Context)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A `threadshare` `runtime` `Context`.
|
|
|
|
///
|
|
|
|
/// The `Context` provides low-level asynchronous processing features to
|
|
|
|
/// multiplex task execution on a single thread.
|
|
|
|
///
|
|
|
|
/// `Element` implementations should use [`PadSrc`] and [`PadSink`] which
|
|
|
|
/// provide high-level features.
|
|
|
|
///
|
|
|
|
/// See the [module-level documentation](index.html) for more.
|
|
|
|
///
|
|
|
|
/// [`PadSrc`]: ../struct.PadSrc.html
|
|
|
|
/// [`PadSink`]: ../struct.PadSink.html
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
pub struct Context(Arc<ContextInner>);
|
|
|
|
|
|
|
|
impl Context {
|
|
|
|
pub fn acquire(context_name: &str, wait: u32) -> Result<Self, io::Error> {
|
2018-03-15 18:52:38 +00:00
|
|
|
let mut contexts = CONTEXTS.lock().unwrap();
|
2019-12-02 09:30:07 +00:00
|
|
|
|
|
|
|
if let Some(inner_weak) = contexts.get(context_name) {
|
|
|
|
if let Some(inner_strong) = inner_weak.upgrade() {
|
|
|
|
gst_debug!(RUNTIME_CAT, "Joining Context '{}'", inner_strong.name);
|
|
|
|
return Ok(Context(inner_strong));
|
2018-03-15 18:52:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-24 20:12:40 +00:00
|
|
|
let reactor = tokio_net::driver::Reactor::new()?;
|
2019-12-02 09:30:07 +00:00
|
|
|
let reactor_handle = reactor.handle();
|
2018-03-15 18:52:38 +00:00
|
|
|
|
2019-04-18 10:38:10 +00:00
|
|
|
let timers = Arc::new(Mutex::new(BinaryHeap::new()));
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
let (thread_handle, shutdown) =
|
|
|
|
ContextThread::start(context_name, wait, reactor, timers.clone());
|
2018-03-15 18:52:38 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
let context = Context(Arc::new(ContextInner {
|
|
|
|
name: context_name.into(),
|
|
|
|
thread_handle: Mutex::new(thread_handle),
|
2018-06-30 16:24:38 +00:00
|
|
|
reactor_handle,
|
2019-04-18 10:38:10 +00:00
|
|
|
timers,
|
2018-03-15 18:52:38 +00:00
|
|
|
_shutdown: shutdown,
|
2019-12-02 09:30:07 +00:00
|
|
|
task_queues: Mutex::new((0, HashMap::new())),
|
|
|
|
}));
|
|
|
|
contexts.insert(context_name.into(), Arc::downgrade(&context.0));
|
2018-03-15 18:52:38 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
gst_debug!(RUNTIME_CAT, "New Context '{}'", context.0.name);
|
|
|
|
Ok(context)
|
2018-03-15 18:52:38 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
pub fn downgrade(&self) -> ContextWeak {
|
|
|
|
ContextWeak(Arc::downgrade(&self.0))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn acquire_task_queue_id(&self) -> TaskQueueId {
|
|
|
|
let mut task_queues = self.0.task_queues.lock().unwrap();
|
|
|
|
let id = task_queues.0;
|
|
|
|
task_queues.0 += 1;
|
|
|
|
task_queues.1.insert(id, FuturesUnordered::new());
|
|
|
|
|
|
|
|
TaskQueueId(id)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn name(&self) -> &str {
|
|
|
|
self.0.name.as_str()
|
2018-03-15 18:52:38 +00:00
|
|
|
}
|
2018-03-26 14:49:42 +00:00
|
|
|
|
2019-11-24 20:12:40 +00:00
|
|
|
pub fn reactor_handle(&self) -> &tokio_net::driver::Handle {
|
2018-06-30 16:24:38 +00:00
|
|
|
&self.0.reactor_handle
|
2018-05-23 07:32:06 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
pub fn spawn<Fut>(&self, future: Fut)
|
|
|
|
where
|
|
|
|
Fut: Future<Output = ()> + Send + 'static,
|
|
|
|
{
|
|
|
|
self.0.thread_handle.lock().unwrap().spawn(future).unwrap();
|
2018-03-26 14:49:42 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
pub fn release_task_queue(&self, id: TaskQueueId) -> Option<TaskQueue> {
|
|
|
|
let mut task_queues = self.0.task_queues.lock().unwrap();
|
|
|
|
task_queues.1.remove(&id.0)
|
2018-03-26 14:49:42 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
pub fn add_task<T>(&self, id: TaskQueueId, task: T) -> Result<(), ()>
|
2018-03-26 14:49:42 +00:00
|
|
|
where
|
2019-12-02 09:30:07 +00:00
|
|
|
T: Future<Output = TaskOutput> + Send + 'static,
|
2018-03-26 14:49:42 +00:00
|
|
|
{
|
2019-12-02 09:30:07 +00:00
|
|
|
let mut task_queues = self.0.task_queues.lock().unwrap();
|
|
|
|
match task_queues.1.get_mut(&id.0) {
|
|
|
|
Some(task_queue) => {
|
|
|
|
task_queue.push(task.boxed());
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
None => Err(()),
|
|
|
|
}
|
2018-03-26 14:49:42 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
pub fn clear_task_queue(&self, id: TaskQueueId) {
|
|
|
|
let mut task_queues = self.0.task_queues.lock().unwrap();
|
|
|
|
let task_queue = task_queues.1.get_mut(&id.0).unwrap();
|
2018-03-26 14:49:42 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
*task_queue = FuturesUnordered::new();
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn drain_task_queue(&self, id: TaskQueueId) -> Option<impl Future<Output = TaskOutput>> {
|
|
|
|
let task_queue = {
|
|
|
|
let mut task_queues = self.0.task_queues.lock().unwrap();
|
|
|
|
let task_queue = task_queues.1.get_mut(&id.0).unwrap();
|
2018-04-05 09:49:12 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
mem::replace(task_queue, FuturesUnordered::new())
|
|
|
|
};
|
|
|
|
|
|
|
|
if !task_queue.is_empty() {
|
2018-04-05 09:49:12 +00:00
|
|
|
gst_log!(
|
2019-12-02 09:30:07 +00:00
|
|
|
RUNTIME_CAT,
|
|
|
|
"Scheduling {} tasks from {:?} on '{}'",
|
|
|
|
task_queue.len(),
|
2018-04-05 09:49:12 +00:00
|
|
|
id,
|
2019-12-02 09:30:07 +00:00
|
|
|
self.0.name,
|
2018-04-05 09:49:12 +00:00
|
|
|
);
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
Some(task_queue.try_for_each(|_| future::ok(())))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
2018-04-05 09:49:12 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
pub fn add_timer(
|
|
|
|
&self,
|
|
|
|
time: Instant,
|
|
|
|
interval: Option<Duration>,
|
|
|
|
) -> future_mpsc::UnboundedReceiver<()> {
|
|
|
|
let (sender, receiver) = future_mpsc::unbounded();
|
2018-04-05 09:49:12 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
let mut timers = self.0.timers.lock().unwrap();
|
|
|
|
let entry = TimerEntry {
|
|
|
|
time,
|
|
|
|
id: TIMER_ENTRY_ID.fetch_add(1, atomic::Ordering::Relaxed),
|
|
|
|
interval,
|
|
|
|
sender,
|
|
|
|
};
|
2019-11-24 20:12:40 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
timers.push(entry);
|
|
|
|
self.0.reactor_handle.unpark();
|
|
|
|
|
|
|
|
receiver
|
2018-03-26 14:49:42 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
pub fn new_interval(&self, interval: Duration) -> Interval {
|
|
|
|
Interval::new(&self, interval)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Builds a `Future` to execute an `action` at [`Interval`]s.
|
|
|
|
///
|
|
|
|
/// [`Interval`]: struct.Interval.html
|
|
|
|
pub fn interval<F, Fut>(&self, interval: Duration, f: F) -> impl Future<Output = Fut::Output>
|
|
|
|
where
|
|
|
|
F: Fn() -> Fut + Send + Sync + 'static,
|
|
|
|
Fut: Future<Output = Result<(), ()>> + Send + 'static,
|
|
|
|
{
|
|
|
|
let f = Arc::new(f);
|
|
|
|
self.new_interval(interval).try_for_each(move |_| {
|
|
|
|
let f = Arc::clone(&f);
|
|
|
|
f()
|
|
|
|
})
|
|
|
|
}
|
2018-11-29 19:01:02 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
pub fn new_timeout(&self, timeout: Duration) -> Timeout {
|
|
|
|
Timeout::new(&self, timeout)
|
|
|
|
}
|
2018-11-29 19:01:02 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
/// Builds a `Future` to execute an action after the given `delay` has elapsed.
|
|
|
|
pub fn delay_for<F, Fut>(&self, delay: Duration, f: F) -> impl Future<Output = Fut::Output>
|
|
|
|
where
|
|
|
|
F: FnOnce() -> Fut + Send + Sync + 'static,
|
|
|
|
Fut: Future<Output = ()> + Send + 'static,
|
|
|
|
{
|
|
|
|
self.new_timeout(delay).then(move |_| f())
|
|
|
|
}
|
2018-11-29 19:01:02 +00:00
|
|
|
}
|
|
|
|
|
2019-04-18 10:38:10 +00:00
|
|
|
static TIMER_ENTRY_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
|
|
|
|
|
|
|
|
// Ad-hoc interval timer implementation for our throttled event loop above
|
2019-12-02 09:30:07 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
struct TimerEntry {
|
|
|
|
time: Instant,
|
2019-04-18 10:38:10 +00:00
|
|
|
id: usize, // for producing a total order
|
2019-12-02 09:30:07 +00:00
|
|
|
interval: Option<Duration>,
|
|
|
|
sender: future_mpsc::UnboundedSender<()>,
|
2019-04-18 10:38:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl PartialEq for TimerEntry {
|
|
|
|
fn eq(&self, other: &Self) -> bool {
|
|
|
|
self.time.eq(&other.time) && self.id.eq(&other.id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Eq for TimerEntry {}
|
|
|
|
|
|
|
|
impl PartialOrd for TimerEntry {
|
|
|
|
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
|
|
|
|
Some(self.cmp(&other))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Ord for TimerEntry {
|
|
|
|
fn cmp(&self, other: &Self) -> cmp::Ordering {
|
|
|
|
other
|
|
|
|
.time
|
|
|
|
.cmp(&self.time)
|
|
|
|
.then_with(|| other.id.cmp(&self.id))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
/// A `Stream` that yields a tick at `interval`s.
|
|
|
|
#[derive(Debug)]
|
2019-04-18 10:38:10 +00:00
|
|
|
pub struct Interval {
|
2019-12-02 09:30:07 +00:00
|
|
|
receiver: future_mpsc::UnboundedReceiver<()>,
|
2019-04-18 10:38:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Interval {
|
2019-12-02 09:30:07 +00:00
|
|
|
fn new(context: &Context, interval: Duration) -> Self {
|
|
|
|
Self {
|
|
|
|
receiver: context.add_timer(Instant::now(), Some(interval)),
|
|
|
|
}
|
2019-04-18 10:38:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Stream for Interval {
|
2019-12-02 09:30:07 +00:00
|
|
|
type Item = Result<(), ()>;
|
|
|
|
|
|
|
|
fn poll_next(
|
|
|
|
mut self: Pin<&mut Self>,
|
|
|
|
cx: &mut std::task::Context,
|
|
|
|
) -> Poll<Option<Self::Item>> {
|
|
|
|
self.receiver
|
|
|
|
.poll_next_unpin(cx)
|
|
|
|
.map(|item_opt| item_opt.map(Ok))
|
2019-04-18 10:38:10 +00:00
|
|
|
}
|
|
|
|
}
|
2019-08-22 23:04:14 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
/// A `Future` that completes after a `timeout` is elapsed.
|
|
|
|
#[derive(Debug)]
|
2019-08-22 23:04:14 +00:00
|
|
|
pub struct Timeout {
|
2019-12-02 09:30:07 +00:00
|
|
|
receiver: future_mpsc::UnboundedReceiver<()>,
|
2019-08-22 23:04:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Timeout {
|
2019-12-02 09:30:07 +00:00
|
|
|
fn new(context: &Context, timeout: Duration) -> Self {
|
|
|
|
Self {
|
|
|
|
receiver: context.add_timer(Instant::now() + timeout, None),
|
|
|
|
}
|
2019-08-22 23:04:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Future for Timeout {
|
2019-11-24 20:12:40 +00:00
|
|
|
type Output = ();
|
2019-08-22 23:04:14 +00:00
|
|
|
|
2019-12-02 09:30:07 +00:00
|
|
|
fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll<Self::Output> {
|
2019-11-24 20:12:40 +00:00
|
|
|
match ready!(self.receiver.poll_next_unpin(cx)) {
|
|
|
|
Some(_) => Poll::Ready(()),
|
|
|
|
None => unreachable!(),
|
2019-08-22 23:04:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-12-02 09:30:07 +00:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use futures::channel::{mpsc, oneshot};
|
|
|
|
use futures::future::Aborted;
|
|
|
|
use futures::lock::Mutex;
|
|
|
|
|
|
|
|
use gst;
|
|
|
|
|
|
|
|
use std::sync::Arc;
|
|
|
|
use std::time::{Duration, Instant};
|
|
|
|
|
|
|
|
use crate::block_on;
|
|
|
|
use crate::runtime::future::abortable_waitable;
|
|
|
|
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
type Item = i32;
|
|
|
|
|
|
|
|
const SLEEP_DURATION: u32 = 2;
|
|
|
|
const INTERVAL: Duration = Duration::from_millis(100 * SLEEP_DURATION as u64);
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn user_drain_pending_tasks() {
|
|
|
|
// Setup
|
|
|
|
gst::init().unwrap();
|
|
|
|
|
|
|
|
let context = Context::acquire("user_drain_task_queue", SLEEP_DURATION).unwrap();
|
|
|
|
let queue_id = context.acquire_task_queue_id();
|
|
|
|
|
|
|
|
let (sender, mut receiver) = mpsc::channel(1);
|
|
|
|
let sender: Arc<Mutex<mpsc::Sender<Item>>> = Arc::new(Mutex::new(sender));
|
|
|
|
|
|
|
|
let ctx_weak = context.downgrade();
|
|
|
|
let queue_id_clone = queue_id.clone();
|
|
|
|
let add_task = move |item| {
|
|
|
|
let sender_task = Arc::clone(&sender);
|
|
|
|
let context = ctx_weak.upgrade().unwrap();
|
|
|
|
context.add_task(queue_id_clone, async move {
|
|
|
|
sender_task
|
|
|
|
.lock()
|
|
|
|
.await
|
|
|
|
.send(item)
|
|
|
|
.await
|
|
|
|
.map_err(|_| gst::FlowError::Error)
|
|
|
|
})
|
|
|
|
};
|
|
|
|
|
|
|
|
// Tests
|
|
|
|
assert!(context.drain_task_queue(queue_id).is_none());
|
|
|
|
|
|
|
|
add_task(0).unwrap();
|
|
|
|
receiver.try_next().unwrap_err();
|
|
|
|
|
|
|
|
let drain = context.drain_task_queue(queue_id).unwrap();
|
|
|
|
|
|
|
|
// User triggered drain
|
|
|
|
receiver.try_next().unwrap_err();
|
|
|
|
|
|
|
|
block_on!(drain).unwrap();
|
|
|
|
assert_eq!(receiver.try_next().unwrap(), Some(0));
|
|
|
|
|
|
|
|
add_task(1).unwrap();
|
|
|
|
receiver.try_next().unwrap_err();
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn delay_for() {
|
|
|
|
gst::init().unwrap();
|
|
|
|
|
|
|
|
let context = Context::acquire("delay_for", SLEEP_DURATION).unwrap();
|
|
|
|
|
|
|
|
let (sender, receiver) = oneshot::channel();
|
|
|
|
|
|
|
|
let start = Instant::now();
|
|
|
|
let delayed_by_fut = context.delay_for(INTERVAL, move || async {
|
|
|
|
sender.send(42).unwrap();
|
|
|
|
});
|
|
|
|
context.spawn(delayed_by_fut);
|
|
|
|
|
|
|
|
let _ = block_on!(receiver).unwrap();
|
|
|
|
let delta = Instant::now() - start;
|
|
|
|
assert!(delta >= INTERVAL);
|
|
|
|
assert!(delta < INTERVAL * 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn delay_for_abort() {
|
|
|
|
gst::init().unwrap();
|
|
|
|
|
|
|
|
let context = Context::acquire("delay_for_abort", SLEEP_DURATION).unwrap();
|
|
|
|
|
|
|
|
let (sender, receiver) = oneshot::channel();
|
|
|
|
|
|
|
|
let delay_for_fut = context.delay_for(INTERVAL, move || async {
|
|
|
|
sender.send(42).unwrap();
|
|
|
|
});
|
|
|
|
let (abortable_delay_for, abort_handle) = abortable_waitable(delay_for_fut);
|
|
|
|
context.spawn(abortable_delay_for.map(move |res| {
|
|
|
|
if let Err(Aborted) = res {
|
|
|
|
gst_debug!(RUNTIME_CAT, "Aborted delay_for");
|
|
|
|
}
|
|
|
|
}));
|
|
|
|
|
|
|
|
block_on!(abort_handle.abort_and_wait()).unwrap();
|
|
|
|
block_on!(receiver).unwrap_err();
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn interval_ok() {
|
|
|
|
gst::init().unwrap();
|
|
|
|
|
|
|
|
let context = Context::acquire("interval_ok", SLEEP_DURATION).unwrap();
|
|
|
|
|
|
|
|
let (sender, mut receiver) = mpsc::channel(1);
|
|
|
|
let sender: Arc<Mutex<mpsc::Sender<Instant>>> = Arc::new(Mutex::new(sender));
|
|
|
|
|
|
|
|
let interval_fut = context.interval(INTERVAL, move || {
|
|
|
|
let sender = Arc::clone(&sender);
|
|
|
|
async move {
|
|
|
|
let instant = Instant::now();
|
|
|
|
sender.lock().await.send(instant).await.map_err(drop)
|
|
|
|
}
|
|
|
|
});
|
|
|
|
context.spawn(interval_fut.map(drop));
|
|
|
|
|
|
|
|
block_on!(async {
|
|
|
|
let mut idx: u32 = 0;
|
|
|
|
let mut first = Instant::now();
|
|
|
|
while let Some(instant) = receiver.next().await {
|
|
|
|
if idx > 0 {
|
|
|
|
let delta = instant - first;
|
|
|
|
assert!(delta > INTERVAL * (idx - 1));
|
|
|
|
assert!(delta < INTERVAL * (idx + 1));
|
|
|
|
} else {
|
|
|
|
first = instant;
|
|
|
|
}
|
|
|
|
if idx == 3 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
idx += 1;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn interval_err() {
|
|
|
|
gst::init().unwrap();
|
|
|
|
|
|
|
|
let context = Context::acquire("interval_err", SLEEP_DURATION).unwrap();
|
|
|
|
|
|
|
|
let (sender, mut receiver) = mpsc::channel(1);
|
|
|
|
let sender: Arc<Mutex<mpsc::Sender<Instant>>> = Arc::new(Mutex::new(sender));
|
|
|
|
let interval_idx: Arc<Mutex<Item>> = Arc::new(Mutex::new(0));
|
|
|
|
|
|
|
|
let interval_fut = context.interval(INTERVAL, move || {
|
|
|
|
let sender = Arc::clone(&sender);
|
|
|
|
let interval_idx = Arc::clone(&interval_idx);
|
|
|
|
async move {
|
|
|
|
let instant = Instant::now();
|
|
|
|
let mut idx = interval_idx.lock().await;
|
|
|
|
sender.lock().await.send(instant).await.unwrap();
|
|
|
|
*idx += 1;
|
|
|
|
if *idx < 3 {
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
context.spawn(interval_fut.map(drop));
|
|
|
|
|
|
|
|
block_on!(async {
|
|
|
|
let mut idx: u32 = 0;
|
|
|
|
let mut first = Instant::now();
|
|
|
|
while let Some(instant) = receiver.next().await {
|
|
|
|
if idx > 0 {
|
|
|
|
let delta = instant - first;
|
|
|
|
assert!(delta > INTERVAL * (idx - 1));
|
|
|
|
assert!(delta < INTERVAL * (idx + 1));
|
|
|
|
} else {
|
|
|
|
first = instant;
|
|
|
|
}
|
|
|
|
|
|
|
|
idx += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(idx, 3);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn interval_abort() {
|
|
|
|
gst::init().unwrap();
|
|
|
|
|
|
|
|
let context = Context::acquire("interval_abort", SLEEP_DURATION).unwrap();
|
|
|
|
|
|
|
|
let (sender, mut receiver) = mpsc::channel(1);
|
|
|
|
let sender: Arc<Mutex<mpsc::Sender<Instant>>> = Arc::new(Mutex::new(sender));
|
|
|
|
|
|
|
|
let interval_fut = context.interval(INTERVAL, move || {
|
|
|
|
let sender = Arc::clone(&sender);
|
|
|
|
async move {
|
|
|
|
let instant = Instant::now();
|
|
|
|
sender.lock().await.send(instant).await.map_err(drop)
|
|
|
|
}
|
|
|
|
});
|
|
|
|
let (abortable_interval, abort_handle) = abortable_waitable(interval_fut);
|
|
|
|
context.spawn(abortable_interval.map(move |res| {
|
|
|
|
if let Err(Aborted) = res {
|
|
|
|
gst_debug!(RUNTIME_CAT, "Aborted timeout");
|
|
|
|
}
|
|
|
|
}));
|
|
|
|
|
|
|
|
block_on!(async {
|
|
|
|
let mut idx: u32 = 0;
|
|
|
|
let mut first = Instant::now();
|
|
|
|
while let Some(instant) = receiver.next().await {
|
|
|
|
if idx > 0 {
|
|
|
|
let delta = instant - first;
|
|
|
|
assert!(delta > INTERVAL * (idx - 1));
|
|
|
|
assert!(delta < INTERVAL * (idx + 1));
|
|
|
|
} else {
|
|
|
|
first = instant;
|
|
|
|
}
|
|
|
|
if idx == 3 {
|
|
|
|
abort_handle.abort_and_wait().await.unwrap();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
idx += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(receiver.next().await, None);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|