gst-plugins-rs/generic/threadshare/src/runtime/executor/scheduler.rs
François Laignel 61c62ee1e8 ts/timers: multiple improvements
This commit improves threadshare timers predictability
by better making use of current time slice.

Added a dedicate timer BTreeMap for after timers (those
that are guaranteed to fire no sooner than the expected
instant) so as to avoid previous workaround which added
half the max throttling duration. These timers can now
be checked against the reactor processing instant.

Oneshot timers only need to be polled as `Future`s when
intervals are `Stream`s. This also reduces the size for
oneshot timers and make user call `next` on intervals.
Intervals can also implement `FusedStream`, which can help
when used in features such as `select!`.

Also drop the `time` module, which was kepts for
compatibility when the `executor` was migrated from tokio
based to smol-like.
2022-09-13 07:29:50 +00:00

503 lines
14 KiB
Rust

// Copyright (C) 2018-2020 Sebastian Dröge <sebastian@centricular.com>
// Copyright (C) 2019-2022 François Laignel <fengalin@free.fr>
//
// Take a look at the license at the top of the repository in the LICENSE file.
use futures::channel::oneshot;
use futures::pin_mut;
use gio::glib::clone::Downgrade;
use std::cell::RefCell;
use std::future::Future;
use std::panic;
#[cfg(feature = "tuning")]
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::mpsc as sync_mpsc;
use std::sync::{Arc, Condvar, Mutex, Weak};
use std::task::Poll;
use std::thread;
use std::time::{Duration, Instant};
use waker_fn::waker_fn;
use super::task::{SubTaskOutput, TaskId, TaskQueue};
use super::{CallOnDrop, JoinHandle, Reactor};
use crate::runtime::RUNTIME_CAT;
thread_local! {
static CURRENT_SCHEDULER: RefCell<Option<HandleWeak>> = RefCell::new(None);
}
#[derive(Debug)]
pub(super) struct Scheduler {
context_name: Arc<str>,
max_throttling: Duration,
tasks: TaskQueue,
must_unpark: Mutex<bool>,
must_unpark_cvar: Condvar,
#[cfg(feature = "tuning")]
parked_duration: AtomicU64,
}
impl Scheduler {
pub const DUMMY_NAME: &'static str = "DUMMY";
pub fn start(context_name: &str, max_throttling: Duration) -> Handle {
// Name the thread so that it appears in panic messages.
let thread = thread::Builder::new().name(context_name.to_string());
let (handle_sender, handle_receiver) = sync_mpsc::channel();
let (shutdown_sender, shutdown_receiver) = oneshot::channel();
let context_name = Arc::from(context_name);
let thread_ctx_name = Arc::clone(&context_name);
let join = thread
.spawn(move || {
gst::debug!(
RUNTIME_CAT,
"Started Scheduler thread for Context {}",
thread_ctx_name
);
let handle = Scheduler::init(Arc::clone(&thread_ctx_name), max_throttling);
let this = Arc::clone(&handle.0.scheduler);
handle_sender.send(handle.clone()).unwrap();
match this.block_on_priv(shutdown_receiver) {
Ok(_) => {
gst::debug!(
RUNTIME_CAT,
"Scheduler thread shut down for Context {}",
thread_ctx_name
);
}
Err(e) => {
gst::error!(
RUNTIME_CAT,
"Scheduler thread shut down due to an error within Context {}",
thread_ctx_name
);
// We are shutting down on our own initiative
if let Ok(mut shutdown) = handle.0.shutdown.lock() {
shutdown.clear();
}
panic::resume_unwind(e);
}
}
})
.expect("Failed to spawn Scheduler thread");
let handle = handle_receiver.recv().expect("Context thread init failed");
handle.set_shutdown(shutdown_sender, join);
handle
}
fn init(context_name: Arc<str>, max_throttling: Duration) -> Handle {
let handle = CURRENT_SCHEDULER.with(|cur_scheduler| {
let mut cur_scheduler = cur_scheduler.borrow_mut();
if cur_scheduler.is_some() {
panic!("Attempt to initialize an Scheduler on thread where another Scheduler is running.");
}
let handle = Handle::new(Arc::new(Scheduler {
context_name: context_name.clone(),
max_throttling,
tasks: TaskQueue::new(context_name),
must_unpark: Mutex::new(false),
must_unpark_cvar: Condvar::new(),
#[cfg(feature = "tuning")]
parked_duration: AtomicU64::new(0),
}));
*cur_scheduler = Some(handle.downgrade());
handle
});
Reactor::init(handle.max_throttling());
handle
}
pub fn block_on<F>(future: F) -> F::Output
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
assert!(
!Scheduler::is_scheduler_thread(),
"Attempt to block within an existing Scheduler thread."
);
let handle = Scheduler::init(Scheduler::DUMMY_NAME.into(), Duration::ZERO);
let this = Arc::clone(&handle.0.scheduler);
let (task_id, task) = this.tasks.add(async move {
let res = future.await;
let task_id = TaskId::current().unwrap();
let _ = handle.drain_sub_tasks(task_id).await;
res
});
gst::trace!(RUNTIME_CAT, "Blocking on current thread with {:?}", task_id);
let _guard = CallOnDrop::new(|| {
gst::trace!(
RUNTIME_CAT,
"Blocking on current thread with {:?} done",
task_id,
);
});
match this.block_on_priv(task) {
Ok(res) => res,
Err(e) => {
gst::error!(
RUNTIME_CAT,
"Panic blocking on Context {}",
&Scheduler::DUMMY_NAME
);
panic::resume_unwind(e);
}
}
}
fn block_on_priv<F>(&self, future: F) -> std::thread::Result<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
let waker = waker_fn(|| ());
let cx = &mut std::task::Context::from_waker(&waker);
pin_mut!(future);
let _guard = CallOnDrop::new(|| Scheduler::close(Arc::clone(&self.context_name)));
if let Poll::Ready(t) = future.as_mut().poll(cx) {
return Ok(t);
}
let mut last;
loop {
last = Instant::now();
Reactor::with_mut(|reactor| reactor.react(last).ok());
if let Poll::Ready(t) = future.as_mut().poll(cx) {
return Ok(t);
}
while let Ok(runnable) = self.tasks.pop_runnable() {
panic::catch_unwind(|| runnable.run()).map_err(|err| {
gst::error!(
RUNTIME_CAT,
"A task has panicked within Context {}",
self.context_name
);
err
})?;
}
let mut must_unpark = self.must_unpark.lock().unwrap();
loop {
if *must_unpark {
*must_unpark = false;
break;
}
if let Some(parking_duration) = self.max_throttling.checked_sub(last.elapsed()) {
let result = self
.must_unpark_cvar
.wait_timeout(must_unpark, parking_duration)
.unwrap();
#[cfg(feature = "tuning")]
self.parked_duration
.fetch_add(parking_duration.subsec_nanos() as u64, Ordering::Relaxed);
must_unpark = result.0;
} else {
*must_unpark = false;
break;
}
}
}
}
fn unpark(&self) {
let mut must_unpark = self.must_unpark.lock().unwrap();
*must_unpark = true;
self.must_unpark_cvar.notify_one();
}
fn close(context_name: Arc<str>) {
gst::trace!(
RUNTIME_CAT,
"Closing Scheduler for Context {}",
context_name,
);
Reactor::close();
let _ = CURRENT_SCHEDULER.try_with(|cur_scheduler| {
*cur_scheduler.borrow_mut() = None;
});
}
pub fn is_scheduler_thread() -> bool {
CURRENT_SCHEDULER.with(|cur_scheduler| cur_scheduler.borrow().is_some())
}
pub fn current() -> Option<Handle> {
CURRENT_SCHEDULER.with(|cur_scheduler| {
cur_scheduler
.borrow()
.as_ref()
.and_then(HandleWeak::upgrade)
})
}
pub fn is_current(&self) -> bool {
CURRENT_SCHEDULER.with(|cur_scheduler| {
cur_scheduler
.borrow()
.as_ref()
.and_then(HandleWeak::upgrade)
.map_or(false, |cur| {
std::ptr::eq(self, Arc::as_ptr(&cur.0.scheduler))
})
})
}
}
impl Drop for Scheduler {
fn drop(&mut self) {
gst::debug!(
RUNTIME_CAT,
"Terminated: Scheduler for Context {}",
self.context_name
);
}
}
#[derive(Debug)]
struct SchedulerShutdown {
scheduler: Arc<Scheduler>,
sender: Option<oneshot::Sender<()>>,
join: Option<thread::JoinHandle<()>>,
}
impl SchedulerShutdown {
fn new(scheduler: Arc<Scheduler>) -> Self {
SchedulerShutdown {
scheduler,
sender: None,
join: None,
}
}
fn clear(&mut self) {
self.sender = None;
self.join = None;
}
}
impl Drop for SchedulerShutdown {
fn drop(&mut self) {
if let Some(sender) = self.sender.take() {
gst::debug!(
RUNTIME_CAT,
"Shutting down Scheduler thread for Context {}",
self.scheduler.context_name
);
drop(sender);
// Don't block shutting down itself
if !self.scheduler.is_current() {
if let Some(join_handler) = self.join.take() {
gst::trace!(
RUNTIME_CAT,
"Waiting for Scheduler thread to shutdown for Context {}",
self.scheduler.context_name
);
let _ = join_handler.join();
}
}
}
}
}
#[derive(Debug)]
struct HandleInner {
scheduler: Arc<Scheduler>,
shutdown: Mutex<SchedulerShutdown>,
}
#[derive(Clone, Debug)]
pub(super) struct HandleWeak(Weak<HandleInner>);
impl HandleWeak {
pub(super) fn upgrade(&self) -> Option<Handle> {
self.0.upgrade().map(Handle)
}
}
#[derive(Clone, Debug)]
pub(super) struct Handle(Arc<HandleInner>);
impl Handle {
fn new(scheduler: Arc<Scheduler>) -> Self {
Handle(Arc::new(HandleInner {
shutdown: Mutex::new(SchedulerShutdown::new(Arc::clone(&scheduler))),
scheduler,
}))
}
fn set_shutdown(&self, sender: oneshot::Sender<()>, join: thread::JoinHandle<()>) {
let mut shutdown = self.0.shutdown.lock().unwrap();
shutdown.sender = Some(sender);
shutdown.join = Some(join);
}
pub fn context_name(&self) -> &str {
&self.0.scheduler.context_name
}
pub fn max_throttling(&self) -> Duration {
self.0.scheduler.max_throttling
}
#[cfg(feature = "tuning")]
pub fn parked_duration(&self) -> Duration {
Duration::from_nanos(self.0.scheduler.parked_duration.load(Ordering::Relaxed))
}
/// Executes the provided function relatively to this [`Scheduler`]'s [`Reactor`].
///
/// Usefull to initialze i/o sources and timers from outside
/// of a [`Scheduler`].
///
/// # Panic
///
/// This will block current thread and would panic if run
/// from the [`Scheduler`].
pub fn enter<'a, F, O>(&'a self, f: F) -> O
where
F: FnOnce() -> O + Send + 'a,
O: Send + 'a,
{
assert!(!self.0.scheduler.is_current());
// Safety: bounding `self` to `'a` and blocking on the task
// ensures that the lifetime bounds satisfy the safety
// requirements for `TaskQueue::add_sync`.
let task = unsafe { self.0.scheduler.tasks.add_sync(f) };
self.0.scheduler.unpark();
futures::executor::block_on(task)
}
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
let (task_id, task) = self.0.scheduler.tasks.add(future);
JoinHandle::new(task_id, task, self)
}
pub fn spawn_and_unpark<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
let (task_id, task) = self.0.scheduler.tasks.add(future);
self.0.scheduler.unpark();
JoinHandle::new(task_id, task, self)
}
pub(super) fn unpark(&self) {
self.0.scheduler.unpark();
}
pub fn add_sub_task<T>(&self, task_id: TaskId, sub_task: T) -> Result<(), T>
where
T: Future<Output = SubTaskOutput> + Send + 'static,
{
self.0.scheduler.tasks.add_sub_task(task_id, sub_task)
}
pub fn downgrade(&self) -> HandleWeak {
HandleWeak(self.0.downgrade())
}
pub async fn drain_sub_tasks(&self, task_id: TaskId) -> SubTaskOutput {
let sub_tasks_fut = self.0.scheduler.tasks.drain_sub_tasks(task_id);
sub_tasks_fut.await
}
}
impl PartialEq for Handle {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.0, &other.0)
}
}
#[cfg(test)]
mod tests {
use super::super::*;
use std::time::Duration;
#[test]
fn block_on_task_join_handle() {
use futures::channel::oneshot;
use std::sync::mpsc;
let (join_sender, join_receiver) = mpsc::channel();
let (shutdown_sender, shutdown_receiver) = oneshot::channel();
std::thread::spawn(move || {
let handle =
Scheduler::init("block_on_task_join_handle".into(), Duration::from_millis(2));
let join_handle = handle.spawn(async {
timer::delay_for(Duration::from_millis(5)).await;
42
});
let _ = join_sender.send(join_handle);
let _ = handle.0.scheduler.block_on_priv(shutdown_receiver);
});
let task_join_handle = join_receiver.recv().unwrap();
let res = Scheduler::block_on(task_join_handle).unwrap();
let _ = shutdown_sender.send(());
assert_eq!(res, 42);
}
#[test]
fn block_on_timer() {
let res = Scheduler::block_on(async {
timer::delay_for(Duration::from_millis(5)).await;
42
});
assert_eq!(res, 42);
}
#[test]
fn enter_non_static() {
let handle = Scheduler::start("enter_non_static", Duration::from_millis(2));
let mut flag = false;
handle.enter(|| flag = true);
assert!(flag);
}
}