aggregator: Protect data with the same mutex as GCond

Whenever a GCond is used, the safest paradigm is to protect
the variable which change is signalled by the GCond with the same
mutex that the GCond depends on.

https://bugzilla.gnome.org/show_bug.cgi?id=742684
This commit is contained in:
Olivier Crête 2015-01-09 22:01:00 -05:00 committed by Tim-Philipp Müller
parent d33e6a191e
commit 6379433a70

View file

@ -166,7 +166,7 @@ GST_DEBUG_CATEGORY_STATIC (aggregator_debug);
g_thread_self()); \ g_thread_self()); \
} G_STMT_END } G_STMT_END
#define SRC_STREAM_BROADCAST_UNLOCKED(self) G_STMT_START { \ #define SRC_STREAM_BROADCAST(self) G_STMT_START { \
GST_LOG_OBJECT (self, "Signaling src STREAM from thread %p", \ GST_LOG_OBJECT (self, "Signaling src STREAM from thread %p", \
g_thread_self()); \ g_thread_self()); \
if (self->priv->aggregate_id) \ if (self->priv->aggregate_id) \
@ -174,12 +174,6 @@ GST_DEBUG_CATEGORY_STATIC (aggregator_debug);
g_cond_broadcast(&(self->priv->src_cond)); \ g_cond_broadcast(&(self->priv->src_cond)); \
} G_STMT_END } G_STMT_END
#define SRC_STREAM_BROADCAST(self) G_STMT_START { \
SRC_STREAM_LOCK (self); \
SRC_STREAM_BROADCAST_UNLOCKED (self); \
SRC_STREAM_UNLOCK (self); \
} G_STMT_END
struct _GstAggregatorPadPrivate struct _GstAggregatorPadPrivate
{ {
gboolean pending_flush_start; gboolean pending_flush_start;
@ -217,7 +211,7 @@ struct _GstAggregatorPrivate
gint padcount; gint padcount;
/* Our state is >= PAUSED */ /* Our state is >= PAUSED */
gboolean running; gboolean running; /* protected by SRC_STREAM_LOCK */
gint seqnum; gint seqnum;
@ -690,15 +684,16 @@ gst_aggregator_stop_srcpad_task (GstAggregator * self, GstEvent * flush_start)
GST_INFO_OBJECT (self, "%s srcpad task", GST_INFO_OBJECT (self, "%s srcpad task",
flush_start ? "Pausing" : "Stopping"); flush_start ? "Pausing" : "Stopping");
SRC_STREAM_LOCK (self);
self->priv->running = FALSE; self->priv->running = FALSE;
SRC_STREAM_BROADCAST (self); SRC_STREAM_BROADCAST (self);
SRC_STREAM_UNLOCK (self);
if (flush_start) { if (flush_start) {
res = gst_pad_push_event (self->srcpad, flush_start); res = gst_pad_push_event (self->srcpad, flush_start);
} }
gst_pad_stop_task (self->srcpad); gst_pad_stop_task (self->srcpad);
SRC_STREAM_BROADCAST (self);
return res; return res;
} }
@ -827,9 +822,11 @@ gst_aggregator_default_sink_event (GstAggregator * self,
* all sinkpads -- Seeking is Done... sending FLUSH_STOP */ * all sinkpads -- Seeking is Done... sending FLUSH_STOP */
gst_aggregator_flush (self); gst_aggregator_flush (self);
gst_pad_push_event (self->srcpad, event); gst_pad_push_event (self->srcpad, event);
priv->send_eos = TRUE;
event = NULL; event = NULL;
SRC_STREAM_LOCK (self);
priv->send_eos = TRUE;
SRC_STREAM_BROADCAST (self); SRC_STREAM_BROADCAST (self);
SRC_STREAM_UNLOCK (self);
GST_INFO_OBJECT (self, "Releasing source pad STREAM_LOCK"); GST_INFO_OBJECT (self, "Releasing source pad STREAM_LOCK");
GST_PAD_STREAM_UNLOCK (self->srcpad); GST_PAD_STREAM_UNLOCK (self->srcpad);
@ -849,6 +846,7 @@ gst_aggregator_default_sink_event (GstAggregator * self,
* check for it. Mark pending_eos, eos will be set when steal_buffer is * check for it. Mark pending_eos, eos will be set when steal_buffer is
* called * called
*/ */
SRC_STREAM_LOCK (self);
PAD_LOCK_EVENT (aggpad); PAD_LOCK_EVENT (aggpad);
if (!aggpad->buffer) { if (!aggpad->buffer) {
aggpad->eos = TRUE; aggpad->eos = TRUE;
@ -858,6 +856,7 @@ gst_aggregator_default_sink_event (GstAggregator * self,
PAD_UNLOCK_EVENT (aggpad); PAD_UNLOCK_EVENT (aggpad);
SRC_STREAM_BROADCAST (self); SRC_STREAM_BROADCAST (self);
SRC_STREAM_UNLOCK (self);
goto eat; goto eat;
} }
case GST_EVENT_SEGMENT: case GST_EVENT_SEGMENT:
@ -994,12 +993,14 @@ gst_aggregator_release_pad (GstElement * element, GstPad * pad)
GST_INFO_OBJECT (pad, "Removing pad"); GST_INFO_OBJECT (pad, "Removing pad");
SRC_STREAM_LOCK (self);
g_atomic_int_set (&aggpad->priv->flushing, TRUE); g_atomic_int_set (&aggpad->priv->flushing, TRUE);
tmpbuf = gst_aggregator_pad_steal_buffer (aggpad); tmpbuf = gst_aggregator_pad_steal_buffer (aggpad);
gst_buffer_replace (&tmpbuf, NULL); gst_buffer_replace (&tmpbuf, NULL);
gst_element_remove_pad (element, pad); gst_element_remove_pad (element, pad);
SRC_STREAM_BROADCAST (self); SRC_STREAM_BROADCAST (self);
SRC_STREAM_UNLOCK (self);
} }
static GstPad * static GstPad *
@ -1260,8 +1261,12 @@ gst_aggregator_default_src_query (GstAggregator * self, GstQuery * query)
/* Wake up the src thread again, due to changed latencies /* Wake up the src thread again, due to changed latencies
* or changed live-ness we might have to adjust if we wait * or changed live-ness we might have to adjust if we wait
* on a deadline at all and how long. * on a deadline at all and how long.
* This is only to unschedule the clock id, we don't really care
* about the GCond here.
*/ */
SRC_STREAM_LOCK (self);
SRC_STREAM_BROADCAST (self); SRC_STREAM_BROADCAST (self);
SRC_STREAM_UNLOCK (self);
return ret; return ret;
} }
default: default:
@ -1757,6 +1762,7 @@ gst_aggregator_pad_chain (GstPad * pad, GstObject * object, GstBuffer * buffer)
aggclass->clip (self, aggpad, buffer, &actual_buf); aggclass->clip (self, aggpad, buffer, &actual_buf);
} }
SRC_STREAM_LOCK (self);
PAD_LOCK_EVENT (aggpad); PAD_LOCK_EVENT (aggpad);
if (aggpad->buffer) if (aggpad->buffer)
gst_buffer_unref (aggpad->buffer); gst_buffer_unref (aggpad->buffer);
@ -1764,9 +1770,8 @@ gst_aggregator_pad_chain (GstPad * pad, GstObject * object, GstBuffer * buffer)
PAD_UNLOCK_EVENT (aggpad); PAD_UNLOCK_EVENT (aggpad);
PAD_STREAM_UNLOCK (aggpad); PAD_STREAM_UNLOCK (aggpad);
SRC_STREAM_LOCK (self);
if (gst_aggregator_check_pads_ready (self)) if (gst_aggregator_check_pads_ready (self))
SRC_STREAM_BROADCAST_UNLOCKED (self); SRC_STREAM_BROADCAST (self);
SRC_STREAM_UNLOCK (self); SRC_STREAM_UNLOCK (self);
GST_DEBUG_OBJECT (aggpad, "Done chaining"); GST_DEBUG_OBJECT (aggpad, "Done chaining");
@ -1877,8 +1882,8 @@ gst_aggregator_pad_activate_mode_func (GstPad * pad,
PAD_BROADCAST_EVENT (aggpad); PAD_BROADCAST_EVENT (aggpad);
PAD_UNLOCK_EVENT (aggpad); PAD_UNLOCK_EVENT (aggpad);
} else { } else {
g_atomic_int_set (&aggpad->priv->flushing, FALSE);
PAD_LOCK_EVENT (aggpad); PAD_LOCK_EVENT (aggpad);
g_atomic_int_set (&aggpad->priv->flushing, FALSE);
PAD_BROADCAST_EVENT (aggpad); PAD_BROADCAST_EVENT (aggpad);
PAD_UNLOCK_EVENT (aggpad); PAD_UNLOCK_EVENT (aggpad);
} }