threadshare: Refactor

This commit is contained in:
Sebastian Dröge 2018-03-15 20:52:38 +02:00
parent f53efc6e6f
commit e269e51524
5 changed files with 616 additions and 540 deletions

View file

@ -19,3 +19,9 @@ either = "1.0"
name = "gstthreadshare"
crate-type = ["cdylib"]
path = "src/lib.rs"
[profile.release]
lto = true
opt-level = 3
debug = true
panic = 'unwind'

View file

@ -0,0 +1,301 @@
// Copyright (C) 2018 Sebastian Dröge <sebastian@centricular.com>
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Library General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Library General Public License for more details.
//
// You should have received a copy of the GNU Library General Public
// License along with this library; if not, write to the
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
use std::collections::HashMap;
use std::sync::{Arc, Mutex, Weak};
use std::sync::atomic;
use std::thread;
use futures::Future;
use tokio::executor::thread_pool;
use tokio::reactor;
use gst;
use either::Either;
lazy_static!{
static ref CONTEXTS: Mutex<HashMap<String, Weak<IOContextInner>>> = Mutex::new(HashMap::new());
static ref CONTEXT_CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-context",
gst::DebugColorFlags::empty(),
"Thread-sharing Context",
);
}
// Our own simplified implementation of reactor::Background to allow hooking into its internals
const RUNNING: usize = 0;
const SHUTDOWN_NOW: usize = 1;
struct IOContextRunner {
name: String,
shutdown: Arc<atomic::AtomicUsize>,
pending_futures: Option<Arc<Mutex<Vec<Box<Future<Item = (), Error = ()> + Send + 'static>>>>>,
}
impl IOContextRunner {
fn start_single_threaded(
name: &str,
wait: u32,
reactor: reactor::Reactor,
) -> (IOContextExecutor, IOContextShutdown) {
let handle = reactor.handle().clone();
let handle2 = reactor.handle().clone();
let shutdown = Arc::new(atomic::AtomicUsize::new(RUNNING));
let shutdown_clone = shutdown.clone();
let name_clone = name.into();
let pending_futures = Arc::new(Mutex::new(Vec::new()));
let pending_futures_clone = pending_futures.clone();
let mut runner = IOContextRunner {
shutdown: shutdown_clone,
name: name_clone,
pending_futures: Some(pending_futures),
};
let join = thread::spawn(move || {
runner.run(wait, reactor);
});
let executor = IOContextExecutor {
handle: handle,
pending_futures: pending_futures_clone,
};
let shutdown = IOContextShutdown {
name: name.into(),
shutdown: shutdown,
handle: handle2,
join: Some(join),
};
(executor, shutdown)
}
fn start(name: &str, wait: u32, reactor: reactor::Reactor) -> IOContextShutdown {
let handle = reactor.handle().clone();
let shutdown = Arc::new(atomic::AtomicUsize::new(RUNNING));
let shutdown_clone = shutdown.clone();
let name_clone = name.into();
let mut runner = IOContextRunner {
shutdown: shutdown_clone,
name: name_clone,
pending_futures: None,
};
let join = thread::spawn(move || {
runner.run(wait, reactor);
});
let shutdown = IOContextShutdown {
name: name.into(),
shutdown: shutdown,
handle: handle,
join: Some(join),
};
shutdown
}
fn run(&mut self, wait: u32, reactor: reactor::Reactor) {
use std::time;
let wait = time::Duration::from_millis(wait as u64);
gst_debug!(CONTEXT_CAT, "Started reactor thread '{}'", self.name);
if let Some(ref pending_futures) = self.pending_futures {
use tokio::executor::current_thread;
reactor.set_fallback().unwrap();
let handle = reactor.handle();
let mut enter = ::tokio_executor::enter().unwrap();
let mut current_thread = current_thread::CurrentThread::new_with_park(reactor);
::tokio_reactor::with_default(&handle, &mut enter, |enter| loop {
let now = time::Instant::now();
if self.shutdown.load(atomic::Ordering::SeqCst) > RUNNING {
break;
}
{
let mut pending_futures = pending_futures.lock().unwrap();
while let Some(future) = pending_futures.pop() {
current_thread.spawn(future);
}
}
gst_trace!(CONTEXT_CAT, "Turning current thread '{}'", self.name);
current_thread.enter(enter).turn(None).unwrap();
gst_trace!(CONTEXT_CAT, "Turned current thread '{}'", self.name);
let elapsed = now.elapsed();
if elapsed < wait {
gst_trace!(
CONTEXT_CAT,
"Waiting for {:?} before polling again",
wait - elapsed
);
thread::sleep(wait - elapsed);
}
});
} else {
let mut reactor = reactor;
loop {
let now = time::Instant::now();
if self.shutdown.load(atomic::Ordering::SeqCst) > RUNNING {
break;
}
gst_trace!(CONTEXT_CAT, "Turning reactor '{}'", self.name);
reactor.turn(None).unwrap();
gst_trace!(CONTEXT_CAT, "Turned reactor '{}'", self.name);
let elapsed = now.elapsed();
if elapsed < wait {
gst_trace!(
CONTEXT_CAT,
"Waiting for {:?} before polling again",
wait - elapsed
);
thread::sleep(wait - elapsed);
}
}
}
}
}
impl Drop for IOContextRunner {
fn drop(&mut self) {
gst_debug!(CONTEXT_CAT, "Shut down reactor thread '{}'", self.name);
}
}
struct IOContextShutdown {
name: String,
shutdown: Arc<atomic::AtomicUsize>,
handle: reactor::Handle,
join: Option<thread::JoinHandle<()>>,
}
impl Drop for IOContextShutdown {
fn drop(&mut self) {
use tokio_executor::park::Unpark;
gst_debug!(CONTEXT_CAT, "Shutting down reactor thread '{}'", self.name);
self.shutdown.store(SHUTDOWN_NOW, atomic::Ordering::SeqCst);
gst_trace!(CONTEXT_CAT, "Waiting for reactor '{}' shutdown", self.name);
// After being unparked, the next turn() is guaranteed to finish immediately,
// as such there is no race condition between checking for shutdown and setting
// shutdown.
self.handle.unpark();
let _ = self.join.take().unwrap().join();
}
}
struct IOContextExecutor {
handle: reactor::Handle,
pending_futures: Arc<Mutex<Vec<Box<Future<Item = (), Error = ()> + Send + 'static>>>>,
}
impl IOContextExecutor {
fn spawn<F>(&self, future: F)
where
F: Future<Item = (), Error = ()> + Send + 'static,
{
use tokio_executor::park::Unpark;
self.pending_futures.lock().unwrap().push(Box::new(future));
self.handle.unpark();
}
}
#[derive(Clone)]
pub struct IOContext(Arc<IOContextInner>);
struct IOContextInner {
name: String,
pool: Either<thread_pool::ThreadPool, IOContextExecutor>,
// Only used for dropping
_shutdown: IOContextShutdown,
}
impl Drop for IOContextInner {
fn drop(&mut self) {
let mut contexts = CONTEXTS.lock().unwrap();
gst_debug!(CONTEXT_CAT, "Finalizing context '{}'", self.name);
contexts.remove(&self.name);
}
}
impl IOContext {
pub fn new(name: &str, n_threads: isize, wait: u32) -> Self {
let mut contexts = CONTEXTS.lock().unwrap();
if let Some(context) = contexts.get(name) {
if let Some(context) = context.upgrade() {
gst_debug!(CONTEXT_CAT, "Reusing existing context '{}'", name);
return IOContext(context);
}
}
let reactor = reactor::Reactor::new().unwrap();
let (pool, shutdown) = if n_threads >= 0 {
let handle = reactor.handle().clone();
let shutdown = IOContextRunner::start(name, wait, reactor);
let mut pool_builder = thread_pool::Builder::new();
pool_builder.around_worker(move |w, enter| {
::tokio_reactor::with_default(&handle, enter, |_| {
w.run();
});
});
if n_threads > 0 {
pool_builder.pool_size(n_threads as usize);
}
(Either::Left(pool_builder.build()), shutdown)
} else {
let (executor, shutdown) = IOContextRunner::start_single_threaded(name, wait, reactor);
(Either::Right(executor), shutdown)
};
let context = Arc::new(IOContextInner {
name: name.into(),
pool,
_shutdown: shutdown,
});
contexts.insert(name.into(), Arc::downgrade(&context));
gst_debug!(CONTEXT_CAT, "Created new context '{}'", name);
IOContext(context)
}
pub fn spawn<F>(&self, future: F)
where
F: Future<Item = (), Error = ()> + Send + 'static,
{
match self.0.pool {
Either::Left(ref pool) => pool.spawn(future),
Either::Right(ref pool) => pool.spawn(future),
}
}
}

View file

@ -33,6 +33,8 @@ extern crate either;
#[macro_use]
extern crate lazy_static;
mod iocontext;
mod udpsocket;
mod udpsrc;
fn plugin_init(plugin: &gst::Plugin) -> bool {

View file

@ -0,0 +1,298 @@
// Copyright (C) 2018 Sebastian Dröge <sebastian@centricular.com>
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Library General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Library General Public License for more details.
//
// You should have received a copy of the GNU Library General Public
// License along with this library; if not, write to the
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
use std::marker::PhantomData;
use std::sync::{Arc, Mutex};
use gst;
use gst::prelude::*;
use futures::{Async, Future, IntoFuture, Poll, Stream};
use futures::{future, task};
use futures::sync::oneshot;
use tokio::net;
use iocontext::*;
lazy_static!{
static ref SOCKET_CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-socket",
gst::DebugColorFlags::empty(),
"Thread-sharing Socket",
);
}
// FIXME: Workaround for https://github.com/tokio-rs/tokio/issues/207
struct YieldOnce<E>(Option<()>, PhantomData<E>);
impl<E> YieldOnce<E> {
fn new() -> YieldOnce<E> {
YieldOnce(None, PhantomData)
}
}
impl<E> Future for YieldOnce<E> {
type Item = ();
type Error = E;
fn poll(&mut self) -> Poll<(), E> {
if let Some(_) = self.0.take() {
Ok(Async::Ready(()))
} else {
self.0 = Some(());
task::current().notify();
Ok(Async::NotReady)
}
}
}
#[derive(Clone)]
pub struct Socket(Arc<Mutex<SocketInner>>);
#[derive(PartialEq, Eq, Debug)]
enum SocketState {
Unscheduled,
Scheduled,
Running,
Shutdown,
}
struct SocketInner {
element: gst::Element,
state: SocketState,
socket: net::UdpSocket,
buffer_pool: gst::BufferPool,
current_task: Option<task::Task>,
shutdown_receiver: Option<oneshot::Receiver<()>>,
clock: Option<gst::Clock>,
base_time: Option<gst::ClockTime>,
}
impl Socket {
pub fn new(
element: &gst::Element,
socket: net::UdpSocket,
buffer_pool: gst::BufferPool,
) -> Self {
Socket(Arc::new(Mutex::new(SocketInner {
element: element.clone(),
state: SocketState::Unscheduled,
socket: socket,
buffer_pool: buffer_pool,
current_task: None,
shutdown_receiver: None,
clock: None,
base_time: None,
})))
}
pub fn schedule<F: Fn(gst::Buffer) -> Result<(), gst::FlowError> + Send + 'static>(
&self,
io_context: &IOContext,
func: F,
) {
// Ready->Paused
//
// Need to wait for a possible shutdown to finish first
// spawn() on the reactor, change state to Scheduled
let stream = SocketStream(self.clone(), None);
let mut inner = self.0.lock().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Scheduling socket");
if inner.state == SocketState::Scheduled {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already scheduled");
return;
}
assert_eq!(inner.state, SocketState::Unscheduled);
inner.state = SocketState::Scheduled;
inner.buffer_pool.set_active(true).unwrap();
let (sender, receiver) = oneshot::channel::<()>();
inner.shutdown_receiver = Some(receiver);
let element_clone = inner.element.clone();
io_context.spawn(
stream
.for_each(move |buffer| {
let res = func(buffer);
match res {
Ok(()) => future::Either::A(Ok(()).into_future()),
//Ok(()) => future::Either::A(YieldOnce::new()),
Err(err) => future::Either::B(Err(err).into_future()),
}
})
.then(move |res| {
gst_debug!(SOCKET_CAT, obj: &element_clone, "Socket finished {:?}", res);
// TODO: Do something with errors here?
let _ = sender.send(());
Ok(())
}),
);
}
pub fn unpause(&self, clock: gst::Clock, base_time: gst::ClockTime) {
// Paused->Playing
//
// Change state to Running and signal task
let mut inner = self.0.lock().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Unpausing socket");
if inner.state == SocketState::Running {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already unpaused");
return;
}
assert_eq!(inner.state, SocketState::Scheduled);
inner.state = SocketState::Running;
inner.clock = Some(clock);
inner.base_time = Some(base_time);
if let Some(task) = inner.current_task.take() {
task.notify();
}
}
pub fn pause(&self) {
// Playing->Paused
//
// Change state to Scheduled and signal task
let mut inner = self.0.lock().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Pausing socket");
if inner.state == SocketState::Scheduled {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already paused");
return;
}
assert_eq!(inner.state, SocketState::Running);
inner.state = SocketState::Scheduled;
inner.clock = None;
inner.base_time = None;
if let Some(task) = inner.current_task.take() {
task.notify();
}
}
pub fn shutdown(&self) {
// Paused->Ready
//
// Change state to Shutdown and signal task, wait for our future to be finished
// Requires scheduled function to be unblocked! Pad must be deactivated before
let mut inner = self.0.lock().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Shutting down socket");
if inner.state == SocketState::Unscheduled {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already shut down");
return;
}
assert!(inner.state == SocketState::Scheduled || inner.state == SocketState::Running);
inner.state = SocketState::Shutdown;
if let Some(task) = inner.current_task.take() {
task.notify();
}
let shutdown_receiver = inner.shutdown_receiver.take().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Waiting for socket to shut down");
drop(inner);
shutdown_receiver.wait().unwrap();
let mut inner = self.0.lock().unwrap();
inner.state = SocketState::Unscheduled;
inner.buffer_pool.set_active(false).unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket shut down");
}
}
impl Drop for SocketInner {
fn drop(&mut self) {
assert_eq!(self.state, SocketState::Unscheduled);
}
}
struct SocketStream(Socket, Option<gst::MappedBuffer<gst::buffer::Writable>>);
impl Stream for SocketStream {
type Item = gst::Buffer;
type Error = gst::FlowError;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let mut inner = (self.0).0.lock().unwrap();
if inner.state == SocketState::Shutdown {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket shutting down");
return Ok(Async::Ready(None));
} else if inner.state == SocketState::Scheduled {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket not running");
inner.current_task = Some(task::current());
return Ok(Async::NotReady);
}
assert_eq!(inner.state, SocketState::Running);
gst_debug!(SOCKET_CAT, obj: &inner.element, "Trying to read data");
let (len, time) = {
let mut buffer = match self.1 {
Some(ref mut buffer) => buffer,
None => match inner.buffer_pool.acquire_buffer(None) {
Ok(buffer) => {
self.1 = Some(buffer.into_mapped_buffer_writable().unwrap());
self.1.as_mut().unwrap()
}
Err(err) => {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Failed to acquire buffer {:?}", err);
return Err(err.into_result().unwrap_err());
}
},
};
match inner.socket.poll_recv(buffer.as_mut_slice()) {
Ok(Async::NotReady) => {
gst_debug!(SOCKET_CAT, obj: &inner.element, "No data available");
inner.current_task = Some(task::current());
return Ok(Async::NotReady);
}
Err(err) => {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Read error {:?}", err);
return Err(gst::FlowError::Error);
}
Ok(Async::Ready(len)) => {
let time = inner.clock.as_ref().unwrap().get_time();
let dts = time - inner.base_time.unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Read {} bytes at {} (clock {})", len, dts, time);
(len, dts)
}
}
};
let mut buffer = self.1.take().unwrap().into_buffer();
{
let buffer = buffer.get_mut().unwrap();
if len < buffer.get_size() {
buffer.set_size(len);
}
buffer.set_dts(time);
}
// TODO: Only ever poll the second again in Xms, using tokio-timer
Ok(Async::Ready(Some(buffer)))
}
}

View file

@ -24,548 +24,13 @@ use gst_plugin::properties::*;
use gst_plugin::object::*;
use gst_plugin::element::*;
use std::sync::{Arc, Mutex, Weak};
use std::sync::atomic;
use std::thread;
use std::sync::Mutex;
use std::u16;
use futures::{Async, Future, IntoFuture, Poll, Stream};
use futures::{future, task};
use futures::sync::oneshot;
use tokio::executor::thread_pool;
use tokio::reactor;
use tokio::net;
use std::collections::HashMap;
use std::marker::PhantomData;
use either::Either;
lazy_static!{
static ref CONTEXTS: Mutex<HashMap<String, Weak<IOContextInner>>> = Mutex::new(HashMap::new());
static ref CONTEXT_CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-context",
gst::DebugColorFlags::empty(),
"Thread-sharing Context",
);
static ref SOCKET_CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-socket",
gst::DebugColorFlags::empty(),
"Thread-sharing Socket",
);
}
// Our own simplified implementation of reactor::Background to allow hooking into its internals
const RUNNING: usize = 0;
const SHUTDOWN_NOW: usize = 1;
struct IOContextRunner {
name: String,
shutdown: Arc<atomic::AtomicUsize>,
pending_futures: Option<Arc<Mutex<Vec<Box<Future<Item = (), Error = ()> + Send + 'static>>>>>,
}
impl IOContextRunner {
fn start_single_threaded(
name: &str,
wait: u32,
reactor: reactor::Reactor,
) -> (IOContextExecutor, IOContextShutdown) {
let handle = reactor.handle().clone();
let handle2 = reactor.handle().clone();
let shutdown = Arc::new(atomic::AtomicUsize::new(RUNNING));
let shutdown_clone = shutdown.clone();
let name_clone = name.into();
let pending_futures = Arc::new(Mutex::new(Vec::new()));
let pending_futures_clone = pending_futures.clone();
let mut runner = IOContextRunner {
shutdown: shutdown_clone,
name: name_clone,
pending_futures: Some(pending_futures),
};
let join = thread::spawn(move || {
runner.run(wait, reactor);
});
let executor = IOContextExecutor {
handle: handle,
pending_futures: pending_futures_clone,
};
let shutdown = IOContextShutdown {
name: name.into(),
shutdown: shutdown,
handle: handle2,
join: Some(join),
};
(executor, shutdown)
}
fn start(name: &str, wait: u32, reactor: reactor::Reactor) -> IOContextShutdown {
let handle = reactor.handle().clone();
let shutdown = Arc::new(atomic::AtomicUsize::new(RUNNING));
let shutdown_clone = shutdown.clone();
let name_clone = name.into();
let mut runner = IOContextRunner {
shutdown: shutdown_clone,
name: name_clone,
pending_futures: None,
};
let join = thread::spawn(move || {
runner.run(wait, reactor);
});
let shutdown = IOContextShutdown {
name: name.into(),
shutdown: shutdown,
handle: handle,
join: Some(join),
};
shutdown
}
fn run(&mut self, wait: u32, reactor: reactor::Reactor) {
use std::time;
let wait = time::Duration::from_millis(wait as u64);
gst_debug!(CONTEXT_CAT, "Started reactor thread '{}'", self.name);
if let Some(ref pending_futures) = self.pending_futures {
use tokio::executor::current_thread;
reactor.set_fallback().unwrap();
let handle = reactor.handle();
let mut enter = ::tokio_executor::enter().unwrap();
let mut current_thread = current_thread::CurrentThread::new_with_park(reactor);
::tokio_reactor::with_default(&handle, &mut enter, |enter| loop {
let now = time::Instant::now();
if self.shutdown.load(atomic::Ordering::SeqCst) > RUNNING {
break;
}
{
let mut pending_futures =
self.pending_futures.as_ref().unwrap().lock().unwrap();
while let Some(future) = pending_futures.pop() {
current_thread.spawn(future);
}
}
gst_trace!(CONTEXT_CAT, "Turning current thread '{}'", self.name);
current_thread.enter(enter).turn(None).unwrap();
gst_trace!(CONTEXT_CAT, "Turned current thread '{}'", self.name);
let elapsed = now.elapsed();
if elapsed < wait {
gst_trace!(CONTEXT_CAT, "Waiting for {:?} before polling again", wait - elapsed);
thread::sleep(wait - elapsed);
}
});
} else {
let mut reactor = reactor;
loop {
let now = time::Instant::now();
if self.shutdown.load(atomic::Ordering::SeqCst) > RUNNING {
break;
}
gst_trace!(CONTEXT_CAT, "Turning reactor '{}'", self.name);
reactor.turn(None).unwrap();
gst_trace!(CONTEXT_CAT, "Turned reactor '{}'", self.name);
let elapsed = now.elapsed();
if elapsed < wait {
gst_trace!(CONTEXT_CAT, "Waiting for {:?} before polling again", wait - elapsed);
thread::sleep(wait - elapsed);
}
}
}
}
}
impl Drop for IOContextRunner {
fn drop(&mut self) {
gst_debug!(CONTEXT_CAT, "Shut down reactor thread '{}'", self.name);
}
}
struct IOContextShutdown {
name: String,
shutdown: Arc<atomic::AtomicUsize>,
handle: reactor::Handle,
join: Option<thread::JoinHandle<()>>,
}
impl Drop for IOContextShutdown {
fn drop(&mut self) {
use tokio_executor::park::Unpark;
gst_debug!(CONTEXT_CAT, "Shutting down reactor thread '{}'", self.name);
self.shutdown.store(SHUTDOWN_NOW, atomic::Ordering::SeqCst);
gst_trace!(CONTEXT_CAT, "Waiting for reactor '{}' shutdown", self.name);
// After being unparked, the next turn() is guaranteed to finish immediately,
// as such there is no race condition between checking for shutdown and setting
// shutdown.
self.handle.unpark();
let _ = self.join.take().unwrap().join();
}
}
struct IOContextExecutor {
handle: reactor::Handle,
pending_futures: Arc<Mutex<Vec<Box<Future<Item = (), Error = ()> + Send + 'static>>>>,
}
impl IOContextExecutor {
fn spawn<F>(&self, future: F)
where
F: Future<Item = (), Error = ()> + Send + 'static,
{
use tokio_executor::park::Unpark;
self.pending_futures.lock().unwrap().push(Box::new(future));
self.handle.unpark();
}
}
#[derive(Clone)]
struct IOContext(Arc<IOContextInner>);
struct IOContextInner {
name: String,
pool: Either<thread_pool::ThreadPool, IOContextExecutor>,
// Only used for dropping
_shutdown: IOContextShutdown,
}
impl Drop for IOContextInner {
fn drop(&mut self) {
let mut contexts = CONTEXTS.lock().unwrap();
gst_debug!(CONTEXT_CAT, "Finalizing context '{}'", self.name);
contexts.remove(&self.name);
}
}
impl IOContext {
fn new(name: &str, n_threads: isize, wait: u32) -> Self {
let mut contexts = CONTEXTS.lock().unwrap();
if let Some(context) = contexts.get(name) {
if let Some(context) = context.upgrade() {
gst_debug!(CONTEXT_CAT, "Reusing existing context '{}'", name);
return IOContext(context);
}
}
let reactor = reactor::Reactor::new().unwrap();
let (pool, shutdown) = if n_threads >= 0 {
let handle = reactor.handle().clone();
let shutdown = IOContextRunner::start(name, wait, reactor);
let mut pool_builder = thread_pool::Builder::new();
pool_builder.around_worker(move |w, enter| {
::tokio_reactor::with_default(&handle, enter, |_| {
w.run();
});
});
if n_threads > 0 {
pool_builder.pool_size(n_threads as usize);
}
(Either::Left(pool_builder.build()), shutdown)
} else {
let (executor, shutdown) = IOContextRunner::start_single_threaded(name, wait, reactor);
(Either::Right(executor), shutdown)
};
let context = Arc::new(IOContextInner {
name: name.into(),
pool,
_shutdown: shutdown,
});
contexts.insert(name.into(), Arc::downgrade(&context));
gst_debug!(CONTEXT_CAT, "Created new context '{}'", name);
IOContext(context)
}
fn spawn<F>(&self, future: F)
where
F: Future<Item = (), Error = ()> + Send + 'static,
{
match self.0.pool {
Either::Left(ref pool) => pool.spawn(future),
Either::Right(ref pool) => pool.spawn(future),
}
}
}
// FIXME: Workaround for https://github.com/tokio-rs/tokio/issues/207
struct YieldOnce<E>(Option<()>, PhantomData<E>);
impl<E> YieldOnce<E> {
fn new() -> YieldOnce<E> {
YieldOnce(None, PhantomData)
}
}
impl<E> Future for YieldOnce<E> {
type Item = ();
type Error = E;
fn poll(&mut self) -> Poll<(), E> {
if let Some(_) = self.0.take() {
Ok(Async::Ready(()))
} else {
self.0 = Some(());
task::current().notify();
Ok(Async::NotReady)
}
}
}
#[derive(Clone)]
struct Socket(Arc<Mutex<SocketInner>>);
#[derive(PartialEq, Eq, Debug)]
enum SocketState {
Unscheduled,
Scheduled,
Running,
Shutdown,
}
struct SocketInner {
element: Element,
state: SocketState,
socket: net::UdpSocket,
buffer_pool: gst::BufferPool,
current_task: Option<task::Task>,
shutdown_receiver: Option<oneshot::Receiver<()>>,
clock: Option<gst::Clock>,
base_time: Option<gst::ClockTime>,
}
impl Socket {
fn new(element: &Element, socket: net::UdpSocket, buffer_pool: gst::BufferPool) -> Self {
Socket(Arc::new(Mutex::new(SocketInner {
element: element.clone(),
state: SocketState::Unscheduled,
socket: socket,
buffer_pool: buffer_pool,
current_task: None,
shutdown_receiver: None,
clock: None,
base_time: None,
})))
}
fn schedule<F: Fn(gst::Buffer) -> Result<(), gst::FlowError> + Send + 'static>(
&self,
io_context: &IOContext,
func: F,
) {
// Ready->Paused
//
// Need to wait for a possible shutdown to finish first
// spawn() on the reactor, change state to Scheduled
let stream = SocketStream(self.clone(), None);
let mut inner = self.0.lock().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Scheduling socket");
if inner.state == SocketState::Scheduled {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already scheduled");
return;
}
assert_eq!(inner.state, SocketState::Unscheduled);
inner.state = SocketState::Scheduled;
inner.buffer_pool.set_active(true).unwrap();
let (sender, receiver) = oneshot::channel::<()>();
inner.shutdown_receiver = Some(receiver);
let element_clone = inner.element.clone();
io_context.spawn(
stream
.for_each(move |buffer| {
let res = func(buffer);
match res {
Ok(()) => future::Either::A(Ok(()).into_future()),
//Ok(()) => future::Either::A(YieldOnce::new()),
Err(err) => future::Either::B(Err(err).into_future()),
}
})
.then(move |res| {
gst_debug!(SOCKET_CAT, obj: &element_clone, "Socket finished {:?}", res);
// TODO: Do something with errors here?
let _ = sender.send(());
Ok(())
}),
);
}
fn unpause(&self, clock: gst::Clock, base_time: gst::ClockTime) {
// Paused->Playing
//
// Change state to Running and signal task
let mut inner = self.0.lock().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Unpausing socket");
if inner.state == SocketState::Running {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already unpaused");
return;
}
assert_eq!(inner.state, SocketState::Scheduled);
inner.state = SocketState::Running;
inner.clock = Some(clock);
inner.base_time = Some(base_time);
if let Some(task) = inner.current_task.take() {
task.notify();
}
}
fn pause(&self) {
// Playing->Paused
//
// Change state to Scheduled and signal task
let mut inner = self.0.lock().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Pausing socket");
if inner.state == SocketState::Scheduled {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already paused");
return;
}
assert_eq!(inner.state, SocketState::Running);
inner.state = SocketState::Scheduled;
inner.clock = None;
inner.base_time = None;
if let Some(task) = inner.current_task.take() {
task.notify();
}
}
fn shutdown(&self) {
// Paused->Ready
//
// Change state to Shutdown and signal task, wait for our future to be finished
// Requires scheduled function to be unblocked! Pad must be deactivated before
let mut inner = self.0.lock().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Shutting down socket");
if inner.state == SocketState::Unscheduled {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already shut down");
return;
}
assert!(inner.state == SocketState::Scheduled || inner.state == SocketState::Running);
inner.state = SocketState::Shutdown;
if let Some(task) = inner.current_task.take() {
task.notify();
}
let shutdown_receiver = inner.shutdown_receiver.take().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Waiting for socket to shut down");
drop(inner);
shutdown_receiver.wait().unwrap();
let mut inner = self.0.lock().unwrap();
inner.state = SocketState::Unscheduled;
inner.buffer_pool.set_active(false).unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket shut down");
}
}
impl Drop for SocketInner {
fn drop(&mut self) {
assert_eq!(self.state, SocketState::Unscheduled);
}
}
struct SocketStream(Socket, Option<gst::MappedBuffer<gst::buffer::Writable>>);
impl Stream for SocketStream {
type Item = gst::Buffer;
type Error = gst::FlowError;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let mut inner = (self.0).0.lock().unwrap();
if inner.state == SocketState::Shutdown {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket shutting down");
return Ok(Async::Ready(None));
} else if inner.state == SocketState::Scheduled {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket not running");
inner.current_task = Some(task::current());
return Ok(Async::NotReady);
}
assert_eq!(inner.state, SocketState::Running);
gst_debug!(SOCKET_CAT, obj: &inner.element, "Trying to read data");
let (len, time) = {
let mut buffer = match self.1 {
Some(ref mut buffer) => buffer,
None => match inner.buffer_pool.acquire_buffer(None) {
Ok(buffer) => {
self.1 = Some(buffer.into_mapped_buffer_writable().unwrap());
self.1.as_mut().unwrap()
}
Err(err) => {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Failed to acquire buffer {:?}", err);
return Err(err.into_result().unwrap_err());
}
},
};
match inner.socket.poll_recv(buffer.as_mut_slice()) {
Ok(Async::NotReady) => {
gst_debug!(SOCKET_CAT, obj: &inner.element, "No data available");
inner.current_task = Some(task::current());
return Ok(Async::NotReady);
}
Err(err) => {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Read error {:?}", err);
return Err(gst::FlowError::Error);
}
Ok(Async::Ready(len)) => {
let time = inner.clock.as_ref().unwrap().get_time();
let dts = time - inner.base_time.unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Read {} bytes at {} (clock {})", len, dts, time);
(len, dts)
}
}
};
let mut buffer = self.1.take().unwrap().into_buffer();
{
let buffer = buffer.get_mut().unwrap();
if len < buffer.get_size() {
buffer.set_size(len);
}
buffer.set_dts(time);
}
// TODO: Only ever poll the second again in Xms, using tokio-timer
Ok(Async::Ready(Some(buffer)))
}
}
use iocontext::*;
use udpsocket::*;
const DEFAULT_ADDRESS: Option<&'static str> = Some("127.0.0.1");
const DEFAULT_PORT: u32 = 5000;
@ -790,7 +255,11 @@ impl UdpSrc {
// TODO: Error handling
let mut state = self.state.lock().unwrap();
let io_context = IOContext::new(&settings.context, settings.context_threads as isize, settings.context_wait);
let io_context = IOContext::new(
&settings.context,
settings.context_threads as isize,
settings.context_wait,
);
let addr: IpAddr = match settings.address {
None => return Err(()),
@ -847,7 +316,7 @@ impl UdpSrc {
config.set_params(None, settings.mtu, 0, 0);
buffer_pool.set_config(config).unwrap();
let socket = Socket::new(element, socket, buffer_pool);
let socket = Socket::new(&element.clone().upcast(), socket, buffer_pool);
let element_clone = element.clone();
socket.schedule(&io_context, move |buffer| {