mirror of
https://gitlab.freedesktop.org/gstreamer/gstreamer.git
synced 2024-11-19 00:01:23 +00:00
aggregator: Don't forward QOS events to sinkpads that had no buffer yet
Otherwise they will receive a QOS event that has earliest_time=0 (because we can't have negative timestamps), and consider their buffer as too late https://bugzilla.gnome.org/show_bug.cgi?id=754356
This commit is contained in:
parent
93d85bd361
commit
fc76c936f4
1 changed files with 27 additions and 6 deletions
|
@ -215,6 +215,8 @@ struct _GstAggregatorPadPrivate
|
||||||
gboolean pending_flush_stop;
|
gboolean pending_flush_stop;
|
||||||
gboolean pending_eos;
|
gboolean pending_eos;
|
||||||
|
|
||||||
|
gboolean first_buffer;
|
||||||
|
|
||||||
GQueue buffers;
|
GQueue buffers;
|
||||||
guint num_buffers;
|
guint num_buffers;
|
||||||
GstClockTime head_position;
|
GstClockTime head_position;
|
||||||
|
@ -311,6 +313,7 @@ typedef struct
|
||||||
GstEvent *event;
|
GstEvent *event;
|
||||||
gboolean result;
|
gboolean result;
|
||||||
gboolean flush;
|
gboolean flush;
|
||||||
|
gboolean only_to_active_pads;
|
||||||
|
|
||||||
gboolean one_actually_seeked;
|
gboolean one_actually_seeked;
|
||||||
} EventData;
|
} EventData;
|
||||||
|
@ -1086,6 +1089,8 @@ gst_aggregator_default_sink_event (GstAggregator * self,
|
||||||
GST_OBJECT_UNLOCK (self);
|
GST_OBJECT_UNLOCK (self);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
aggpad->priv->first_buffer = TRUE;
|
||||||
|
|
||||||
/* We never forward the event */
|
/* We never forward the event */
|
||||||
goto eat;
|
goto eat;
|
||||||
}
|
}
|
||||||
|
@ -1554,9 +1559,14 @@ gst_aggregator_event_forward_func (GstPad * pad, gpointer user_data)
|
||||||
GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
|
GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
|
||||||
|
|
||||||
if (peer) {
|
if (peer) {
|
||||||
ret = gst_pad_send_event (peer, gst_event_ref (evdata->event));
|
if (evdata->only_to_active_pads && aggpad->priv->first_buffer) {
|
||||||
GST_DEBUG_OBJECT (pad, "return of event push is %d", ret);
|
GST_DEBUG_OBJECT (pad, "not sending event to inactive pad");
|
||||||
gst_object_unref (peer);
|
ret = TRUE;
|
||||||
|
} else {
|
||||||
|
ret = gst_pad_send_event (peer, gst_event_ref (evdata->event));
|
||||||
|
GST_DEBUG_OBJECT (pad, "return of event push is %d", ret);
|
||||||
|
gst_object_unref (peer);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret == FALSE) {
|
if (ret == FALSE) {
|
||||||
|
@ -1602,7 +1612,7 @@ gst_aggregator_event_forward_func (GstPad * pad, gpointer user_data)
|
||||||
|
|
||||||
static EventData
|
static EventData
|
||||||
gst_aggregator_forward_event_to_all_sinkpads (GstAggregator * self,
|
gst_aggregator_forward_event_to_all_sinkpads (GstAggregator * self,
|
||||||
GstEvent * event, gboolean flush)
|
GstEvent * event, gboolean flush, gboolean only_to_active_pads)
|
||||||
{
|
{
|
||||||
EventData evdata;
|
EventData evdata;
|
||||||
|
|
||||||
|
@ -1610,6 +1620,7 @@ gst_aggregator_forward_event_to_all_sinkpads (GstAggregator * self,
|
||||||
evdata.result = TRUE;
|
evdata.result = TRUE;
|
||||||
evdata.flush = flush;
|
evdata.flush = flush;
|
||||||
evdata.one_actually_seeked = FALSE;
|
evdata.one_actually_seeked = FALSE;
|
||||||
|
evdata.only_to_active_pads = only_to_active_pads;
|
||||||
|
|
||||||
/* We first need to set all pads as flushing in a first pass
|
/* We first need to set all pads as flushing in a first pass
|
||||||
* as flush_start flush_stop is sometimes sent synchronously
|
* as flush_start flush_stop is sometimes sent synchronously
|
||||||
|
@ -1669,7 +1680,8 @@ gst_aggregator_do_seek (GstAggregator * self, GstEvent * event)
|
||||||
GST_OBJECT_UNLOCK (self);
|
GST_OBJECT_UNLOCK (self);
|
||||||
|
|
||||||
/* forward the seek upstream */
|
/* forward the seek upstream */
|
||||||
evdata = gst_aggregator_forward_event_to_all_sinkpads (self, event, flush);
|
evdata =
|
||||||
|
gst_aggregator_forward_event_to_all_sinkpads (self, event, flush, FALSE);
|
||||||
event = NULL;
|
event = NULL;
|
||||||
|
|
||||||
if (!evdata.result || !evdata.one_actually_seeked) {
|
if (!evdata.result || !evdata.one_actually_seeked) {
|
||||||
|
@ -1712,7 +1724,12 @@ gst_aggregator_default_src_event (GstAggregator * self, GstEvent * event)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
evdata = gst_aggregator_forward_event_to_all_sinkpads (self, event, FALSE);
|
/* Don't forward QOS events to pads that had no active buffer yet. Otherwise
|
||||||
|
* they will receive a QOS event that has earliest_time=0 (because we can't
|
||||||
|
* have negative timestamps), and consider their buffer as too late */
|
||||||
|
evdata =
|
||||||
|
gst_aggregator_forward_event_to_all_sinkpads (self, event, FALSE,
|
||||||
|
GST_EVENT_TYPE (event) == GST_EVENT_QOS);
|
||||||
res = evdata.result;
|
res = evdata.result;
|
||||||
|
|
||||||
done:
|
done:
|
||||||
|
@ -2132,6 +2149,8 @@ gst_aggregator_pad_chain_internal (GstAggregator * self,
|
||||||
|
|
||||||
buf_pts = GST_BUFFER_PTS (actual_buf);
|
buf_pts = GST_BUFFER_PTS (actual_buf);
|
||||||
|
|
||||||
|
aggpad->priv->first_buffer = FALSE;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
SRC_LOCK (self);
|
SRC_LOCK (self);
|
||||||
PAD_LOCK (aggpad);
|
PAD_LOCK (aggpad);
|
||||||
|
@ -2417,6 +2436,8 @@ gst_aggregator_pad_init (GstAggregatorPad * pad)
|
||||||
|
|
||||||
g_mutex_init (&pad->priv->flush_lock);
|
g_mutex_init (&pad->priv->flush_lock);
|
||||||
g_mutex_init (&pad->priv->lock);
|
g_mutex_init (&pad->priv->lock);
|
||||||
|
|
||||||
|
pad->priv->first_buffer = TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in a new issue