threadshare: Schedule the pending queue in queue/proxysink only on EOS or the first buffer/buffer list

We will get the custom sticky downstream event with the IO context only
after stream-start and others, so would potentially block the current
thread from another futures executor, which then panics. Instead let's
just queue up those events for the time being until a later time.
This commit is contained in:
Sebastian Dröge 2018-05-16 17:32:35 +03:00
parent 3a8ce35e60
commit 51aa06d013
2 changed files with 184 additions and 134 deletions

View file

@ -215,7 +215,7 @@ impl Drop for SharedQueue {
let mut inner = self.0.lock().unwrap();
assert!(inner.have_sink);
inner.have_sink = false;
if let Some((Some(task), _)) = inner.pending_queue.take() {
if let Some((Some(task), _, _)) = inner.pending_queue.take() {
task.notify();
}
} else {
@ -232,7 +232,7 @@ struct SharedQueueInner {
name: String,
queue: Option<DataQueue>,
last_ret: gst::FlowReturn,
pending_queue: Option<(Option<task::Task>, VecDeque<DataQueueItem>)>,
pending_queue: Option<(Option<task::Task>, bool, VecDeque<DataQueueItem>)>,
pending_future_cancel: Option<futures::sync::oneshot::Sender<()>>,
have_sink: bool,
have_src: bool,
@ -388,9 +388,24 @@ impl ProxySink {
};
if let Err(item) = item {
if queue
.pending_queue
.as_ref()
.map(|(_, scheduled, _)| !scheduled)
.unwrap_or(true)
{
if queue.pending_queue.is_none() {
queue.pending_queue = Some((None, VecDeque::new()));
queue.pending_queue.as_mut().unwrap().1.push_back(item);
queue.pending_queue = Some((None, false, VecDeque::new()));
}
let schedule_now = match item {
DataQueueItem::Event(ref ev) if ev.get_type() != gst::EventType::Eos => {
false
}
_ => true,
};
queue.pending_queue.as_mut().unwrap().2.push_back(item);
gst_log!(
self.cat,
@ -398,6 +413,11 @@ impl ProxySink {
"Proxy is full - Pushing first item on pending queue"
);
if schedule_now {
gst_log!(self.cat, obj: element, "Scheduling pending queue now");
queue.pending_queue.as_mut().unwrap().1 = true;
let element_clone = element.clone();
let future = future::poll_fn(move || {
let sink = element_clone
@ -425,7 +445,8 @@ impl ProxySink {
..
} = *queue;
let res = if let Some((ref mut task, ref mut items)) = *pending_queue {
let res = if let Some((ref mut task, _, ref mut items)) = *pending_queue
{
if let &Some(ref queue) = queue {
let mut failed_item = None;
for item in items.drain(..) {
@ -485,8 +506,12 @@ impl ProxySink {
Some(future)
}
} else {
assert!(io_context.is_some());
queue.pending_queue.as_mut().unwrap().1.push_back(item);
gst_log!(self.cat, obj: element, "Scheduling pending queue later");
None
}
} else {
queue.pending_queue.as_mut().unwrap().2.push_back(item);
None
}
@ -645,7 +670,7 @@ impl ProxySink {
let mut queue = state.queue.as_ref().unwrap().0.lock().unwrap();
if let Some((Some(task), _)) = queue.pending_queue.take() {
if let Some((Some(task), _, _)) = queue.pending_queue.take() {
task.notify();
}
queue.last_ret = gst::FlowReturn::Flushing;
@ -908,7 +933,7 @@ impl ProxySrc {
let event = {
let state = self.state.lock().unwrap();
let queue = state.queue.as_ref().unwrap().0.lock().unwrap();
if let Some((Some(ref task), _)) = queue.pending_queue {
if let Some((Some(ref task), _, _)) = queue.pending_queue {
task.notify();
}

View file

@ -123,7 +123,7 @@ struct State {
io_context_in: Option<IOContext>,
pending_future_id_in: Option<PendingFutureId>,
queue: Option<DataQueue>,
pending_queue: Option<(Option<task::Task>, VecDeque<DataQueueItem>)>,
pending_queue: Option<(Option<task::Task>, bool, VecDeque<DataQueueItem>)>,
last_ret: gst::FlowReturn,
pending_future_cancel: Option<futures::sync::oneshot::Sender<()>>,
}
@ -292,9 +292,23 @@ impl Queue {
Err(item)
};
if let Err(item) = item {
if pending_queue
.as_ref()
.map(|(_, scheduled, _)| !scheduled)
.unwrap_or(true)
{
if pending_queue.is_none() {
*pending_queue = Some((None, VecDeque::new()));
pending_queue.as_mut().unwrap().1.push_back(item);
*pending_queue = Some((None, false, VecDeque::new()));
}
let schedule_now = match item {
DataQueueItem::Event(ref ev) if ev.get_type() != gst::EventType::Eos => {
false
}
_ => true,
};
pending_queue.as_mut().unwrap().2.push_back(item);
gst_log!(
self.cat,
@ -302,6 +316,11 @@ impl Queue {
"Queue is full - Pushing first item on pending queue"
);
if schedule_now {
gst_log!(self.cat, obj: element, "Scheduling pending queue now");
pending_queue.as_mut().unwrap().1 = true;
let element_clone = element.clone();
let future = future::poll_fn(move || {
let queue = element_clone.get_impl().downcast_ref::<Queue>().unwrap();
@ -322,7 +341,8 @@ impl Queue {
obj: &element_clone,
"Trying to empty pending queue"
);
let res = if let Some((ref mut task, ref mut items)) = *pending_queue {
let res = if let Some((ref mut task, _, ref mut items)) = *pending_queue
{
let mut failed_item = None;
for item in items.drain(..) {
if let Err(item) = dq.as_ref().unwrap().push(item) {
@ -372,9 +392,14 @@ impl Queue {
} else {
Some(future)
}
} else {
gst_log!(self.cat, obj: element, "Scheduling pending queue later");
None
}
} else {
assert!(io_context_in.is_some());
pending_queue.as_mut().unwrap().1.push_back(item);
pending_queue.as_mut().unwrap().2.push_back(item);
None
}
@ -562,7 +587,7 @@ impl Queue {
> {
let event = {
let state = self.state.lock().unwrap();
if let Some((Some(ref task), _)) = state.pending_queue {
if let Some((Some(ref task), _, _)) = state.pending_queue {
task.notify();
}
@ -793,7 +818,7 @@ impl Queue {
queue.pause();
queue.clear(&self.src_pad);
}
if let Some((Some(task), _)) = state.pending_queue.take() {
if let Some((Some(task), _, _)) = state.pending_queue.take() {
task.notify();
}
let _ = state.pending_future_cancel.take();