aggregator: Rename confusinly named SRC_STREAM_LOCK macros to SRC_LOCK

This will match the name of the lock itself. It is also not a stream
lock as it not recursive and not held while pushing.

https://bugzilla.gnome.org/show_bug.cgi?id=742684
This commit is contained in:
Olivier Crête 2015-02-18 15:11:14 -05:00
parent 3a3f2b5343
commit 17df37d8cb

View file

@ -127,32 +127,32 @@ GST_DEBUG_CATEGORY_STATIC (aggregator_debug);
g_thread_self()); \ g_thread_self()); \
} G_STMT_END } G_STMT_END
#define SRC_STREAM_LOCK(self) G_STMT_START { \ #define SRC_LOCK(self) G_STMT_START { \
GST_TRACE_OBJECT (self, "Taking src STREAM lock from thread %p", \ GST_TRACE_OBJECT (self, "Taking src lock from thread %p", \
g_thread_self()); \ g_thread_self()); \
g_mutex_lock(&self->priv->src_lock); \ g_mutex_lock(&self->priv->src_lock); \
GST_TRACE_OBJECT (self, "Took src STREAM lock from thread %p", \ GST_TRACE_OBJECT (self, "Took src lock from thread %p", \
g_thread_self()); \ g_thread_self()); \
} G_STMT_END } G_STMT_END
#define SRC_STREAM_UNLOCK(self) G_STMT_START { \ #define SRC_UNLOCK(self) G_STMT_START { \
GST_TRACE_OBJECT (self, "Releasing src STREAM lock from thread %p", \ GST_TRACE_OBJECT (self, "Releasing src lock from thread %p", \
g_thread_self()); \ g_thread_self()); \
g_mutex_unlock(&self->priv->src_lock); \ g_mutex_unlock(&self->priv->src_lock); \
GST_TRACE_OBJECT (self, "Released src STREAM lock from thread %p", \ GST_TRACE_OBJECT (self, "Released src lock from thread %p", \
g_thread_self()); \ g_thread_self()); \
} G_STMT_END } G_STMT_END
#define SRC_STREAM_WAIT(self) G_STMT_START { \ #define SRC_WAIT(self) G_STMT_START { \
GST_LOG_OBJECT (self, "Waiting for src STREAM on thread %p", \ GST_LOG_OBJECT (self, "Waiting for src on thread %p", \
g_thread_self()); \ g_thread_self()); \
g_cond_wait(&(self->priv->src_cond), &(self->priv->src_lock)); \ g_cond_wait(&(self->priv->src_cond), &(self->priv->src_lock)); \
GST_LOG_OBJECT (self, "DONE Waiting for src STREAM on thread %p", \ GST_LOG_OBJECT (self, "DONE Waiting for src on thread %p", \
g_thread_self()); \ g_thread_self()); \
} G_STMT_END } G_STMT_END
#define SRC_STREAM_BROADCAST(self) G_STMT_START { \ #define SRC_BROADCAST(self) G_STMT_START { \
GST_LOG_OBJECT (self, "Signaling src STREAM from thread %p", \ GST_LOG_OBJECT (self, "Signaling src from thread %p", \
g_thread_self()); \ g_thread_self()); \
if (self->priv->aggregate_id) \ if (self->priv->aggregate_id) \
gst_clock_id_unschedule (self->priv->aggregate_id); \ gst_clock_id_unschedule (self->priv->aggregate_id); \
@ -208,7 +208,7 @@ struct _GstAggregatorPrivate
gint padcount; gint padcount;
/* Our state is >= PAUSED */ /* Our state is >= PAUSED */
gboolean running; /* protected by SRC_STREAM_LOCK */ gboolean running; /* protected by src_lock */
gint seqnum; gint seqnum;
gboolean send_stream_start; /* protected by srcpad stream lock */ gboolean send_stream_start; /* protected by srcpad stream lock */
@ -529,7 +529,7 @@ gst_aggregator_wait_and_check (GstAggregator * self, gboolean * timeout)
*timeout = FALSE; *timeout = FALSE;
SRC_STREAM_LOCK (self); SRC_LOCK (self);
GST_OBJECT_LOCK (self); GST_OBJECT_LOCK (self);
gst_aggregator_get_latency_unlocked (self, &live, &latency_min, &latency_max); gst_aggregator_get_latency_unlocked (self, &live, &latency_min, &latency_max);
@ -537,14 +537,14 @@ gst_aggregator_wait_and_check (GstAggregator * self, gboolean * timeout)
if (gst_aggregator_check_pads_ready (self)) { if (gst_aggregator_check_pads_ready (self)) {
GST_DEBUG_OBJECT (self, "all pads have data"); GST_DEBUG_OBJECT (self, "all pads have data");
SRC_STREAM_UNLOCK (self); SRC_UNLOCK (self);
return TRUE; return TRUE;
} }
/* Before waiting, check if we're actually still running */ /* Before waiting, check if we're actually still running */
if (!self->priv->running || !self->priv->send_eos) { if (!self->priv->running || !self->priv->send_eos) {
SRC_STREAM_UNLOCK (self); SRC_UNLOCK (self);
return FALSE; return FALSE;
} }
@ -557,7 +557,7 @@ gst_aggregator_wait_and_check (GstAggregator * self, gboolean * timeout)
* then check if we're ready now. If we return FALSE, * then check if we're ready now. If we return FALSE,
* we will be directly called again. * we will be directly called again.
*/ */
SRC_STREAM_WAIT (self); SRC_WAIT (self);
} else { } else {
GstClockTime base_time, time; GstClockTime base_time, time;
GstClock *clock; GstClock *clock;
@ -589,12 +589,12 @@ gst_aggregator_wait_and_check (GstAggregator * self, gboolean * timeout)
self->priv->aggregate_id = gst_clock_new_single_shot_id (clock, time); self->priv->aggregate_id = gst_clock_new_single_shot_id (clock, time);
gst_object_unref (clock); gst_object_unref (clock);
SRC_STREAM_UNLOCK (self); SRC_UNLOCK (self);
jitter = 0; jitter = 0;
status = gst_clock_id_wait (self->priv->aggregate_id, &jitter); status = gst_clock_id_wait (self->priv->aggregate_id, &jitter);
SRC_STREAM_LOCK (self); SRC_LOCK (self);
if (self->priv->aggregate_id) { if (self->priv->aggregate_id) {
gst_clock_id_unref (self->priv->aggregate_id); gst_clock_id_unref (self->priv->aggregate_id);
self->priv->aggregate_id = NULL; self->priv->aggregate_id = NULL;
@ -607,14 +607,14 @@ gst_aggregator_wait_and_check (GstAggregator * self, gboolean * timeout)
/* we timed out */ /* we timed out */
if (status == GST_CLOCK_OK || status == GST_CLOCK_EARLY) { if (status == GST_CLOCK_OK || status == GST_CLOCK_EARLY) {
SRC_STREAM_UNLOCK (self); SRC_UNLOCK (self);
*timeout = TRUE; *timeout = TRUE;
return TRUE; return TRUE;
} }
} }
res = gst_aggregator_check_pads_ready (self); res = gst_aggregator_check_pads_ready (self);
SRC_STREAM_UNLOCK (self); SRC_UNLOCK (self);
return res; return res;
} }
@ -712,10 +712,10 @@ gst_aggregator_stop_srcpad_task (GstAggregator * self, GstEvent * flush_start)
GST_INFO_OBJECT (self, "%s srcpad task", GST_INFO_OBJECT (self, "%s srcpad task",
flush_start ? "Pausing" : "Stopping"); flush_start ? "Pausing" : "Stopping");
SRC_STREAM_LOCK (self); SRC_LOCK (self);
self->priv->running = FALSE; self->priv->running = FALSE;
SRC_STREAM_BROADCAST (self); SRC_BROADCAST (self);
SRC_STREAM_UNLOCK (self); SRC_UNLOCK (self);
if (flush_start) { if (flush_start) {
res = gst_pad_push_event (self->srcpad, flush_start); res = gst_pad_push_event (self->srcpad, flush_start);
@ -859,10 +859,10 @@ gst_aggregator_default_sink_event (GstAggregator * self,
gst_aggregator_flush (self); gst_aggregator_flush (self);
gst_pad_push_event (self->srcpad, event); gst_pad_push_event (self->srcpad, event);
event = NULL; event = NULL;
SRC_STREAM_LOCK (self); SRC_LOCK (self);
priv->send_eos = TRUE; priv->send_eos = TRUE;
SRC_STREAM_BROADCAST (self); SRC_BROADCAST (self);
SRC_STREAM_UNLOCK (self); SRC_UNLOCK (self);
GST_INFO_OBJECT (self, "Releasing source pad STREAM_LOCK"); GST_INFO_OBJECT (self, "Releasing source pad STREAM_LOCK");
GST_PAD_STREAM_UNLOCK (self->srcpad); GST_PAD_STREAM_UNLOCK (self->srcpad);
@ -885,7 +885,7 @@ gst_aggregator_default_sink_event (GstAggregator * self,
* check for it. Mark pending_eos, eos will be set when steal_buffer is * check for it. Mark pending_eos, eos will be set when steal_buffer is
* called * called
*/ */
SRC_STREAM_LOCK (self); SRC_LOCK (self);
PAD_LOCK (aggpad); PAD_LOCK (aggpad);
if (!aggpad->priv->buffer) { if (!aggpad->priv->buffer) {
aggpad->priv->eos = TRUE; aggpad->priv->eos = TRUE;
@ -894,8 +894,8 @@ gst_aggregator_default_sink_event (GstAggregator * self,
} }
PAD_UNLOCK (aggpad); PAD_UNLOCK (aggpad);
SRC_STREAM_BROADCAST (self); SRC_BROADCAST (self);
SRC_STREAM_UNLOCK (self); SRC_UNLOCK (self);
goto eat; goto eat;
} }
case GST_EVENT_SEGMENT: case GST_EVENT_SEGMENT:
@ -1041,13 +1041,13 @@ gst_aggregator_release_pad (GstElement * element, GstPad * pad)
GST_INFO_OBJECT (pad, "Removing pad"); GST_INFO_OBJECT (pad, "Removing pad");
SRC_STREAM_LOCK (self); SRC_LOCK (self);
g_atomic_int_set (&aggpad->priv->flushing, TRUE); g_atomic_int_set (&aggpad->priv->flushing, TRUE);
gst_aggregator_pad_drop_buffer (aggpad); gst_aggregator_pad_drop_buffer (aggpad);
gst_element_remove_pad (element, pad); gst_element_remove_pad (element, pad);
SRC_STREAM_BROADCAST (self); SRC_BROADCAST (self);
SRC_STREAM_UNLOCK (self); SRC_UNLOCK (self);
} }
static GstPad * static GstPad *
@ -1196,10 +1196,10 @@ gst_aggregator_query_latency (GstAggregator * self, GstQuery * query)
data.live = FALSE; data.live = FALSE;
/* query upstream's latency */ /* query upstream's latency */
SRC_STREAM_LOCK (self); SRC_LOCK (self);
gst_aggregator_iterate_sinkpads (self, gst_aggregator_iterate_sinkpads (self,
gst_aggregator_query_sink_latency_foreach, &data); gst_aggregator_query_sink_latency_foreach, &data);
SRC_STREAM_UNLOCK (self); SRC_UNLOCK (self);
GST_OBJECT_LOCK (self); GST_OBJECT_LOCK (self);
our_latency = self->priv->latency; our_latency = self->priv->latency;
@ -1313,9 +1313,9 @@ gst_aggregator_default_src_query (GstAggregator * self, GstQuery * query)
* This is only to unschedule the clock id, we don't really care * This is only to unschedule the clock id, we don't really care
* about the GCond here. * about the GCond here.
*/ */
SRC_STREAM_LOCK (self); SRC_LOCK (self);
SRC_STREAM_BROADCAST (self); SRC_BROADCAST (self);
SRC_STREAM_UNLOCK (self); SRC_UNLOCK (self);
return ret; return ret;
} }
default: default:
@ -1833,7 +1833,7 @@ gst_aggregator_pad_chain (GstPad * pad, GstObject * object, GstBuffer * buffer)
aggclass->clip (self, aggpad, buffer, &actual_buf); aggclass->clip (self, aggpad, buffer, &actual_buf);
} }
SRC_STREAM_LOCK (self); SRC_LOCK (self);
PAD_LOCK (aggpad); PAD_LOCK (aggpad);
if (aggpad->priv->buffer) if (aggpad->priv->buffer)
gst_buffer_unref (aggpad->priv->buffer); gst_buffer_unref (aggpad->priv->buffer);
@ -1841,8 +1841,8 @@ gst_aggregator_pad_chain (GstPad * pad, GstObject * object, GstBuffer * buffer)
PAD_UNLOCK (aggpad); PAD_UNLOCK (aggpad);
PAD_FLUSH_UNLOCK (aggpad); PAD_FLUSH_UNLOCK (aggpad);
SRC_STREAM_BROADCAST (self); SRC_BROADCAST (self);
SRC_STREAM_UNLOCK (self); SRC_UNLOCK (self);
GST_DEBUG_OBJECT (aggpad, "Done chaining"); GST_DEBUG_OBJECT (aggpad, "Done chaining");