splitmuxsrc: Fix some deadlock conditions and a crash

When switching the splitmuxsrc state back to NULL quickly, it
can encounter deadlocks shutting down the part readers that
are still starting up, or encounter a crash if the splitmuxsrc
cleaned up the parts before the async callback could run.

Taking the state lock to post async-start / async-done messages can
deadlock if the state change function is trying to shut down the
element, so use some finer grained locks for that.
This commit is contained in:
Jan Schmidt 2020-03-24 00:23:24 +11:00 committed by Nicolas Dufresne
parent 8ef172d8b4
commit 00a08c69ac
4 changed files with 38 additions and 12 deletions

View file

@ -37,6 +37,9 @@ GST_DEBUG_CATEGORY_STATIC (splitmux_part_debug);
#define SPLITMUX_PART_TYPE_LOCK(p) g_mutex_lock(&(p)->type_lock)
#define SPLITMUX_PART_TYPE_UNLOCK(p) g_mutex_unlock(&(p)->type_lock)
#define SPLITMUX_PART_MSG_LOCK(p) g_mutex_lock(&(p)->msg_lock)
#define SPLITMUX_PART_MSG_UNLOCK(p) g_mutex_unlock(&(p)->msg_lock)
typedef struct _GstSplitMuxPartPad
{
GstPad parent;
@ -636,6 +639,7 @@ gst_splitmux_part_reader_init (GstSplitMuxPartReader * reader)
g_cond_init (&reader->inactive_cond);
g_mutex_init (&reader->lock);
g_mutex_init (&reader->type_lock);
g_mutex_init (&reader->msg_lock);
/* FIXME: Create elements on a state change */
reader->src = gst_element_factory_make ("filesrc", NULL);
@ -683,6 +687,7 @@ splitmux_part_reader_finalize (GObject * object)
g_cond_clear (&reader->inactive_cond);
g_mutex_clear (&reader->lock);
g_mutex_clear (&reader->type_lock);
g_mutex_clear (&reader->msg_lock);
g_free (reader->path);
@ -694,12 +699,12 @@ do_async_start (GstSplitMuxPartReader * reader)
{
GstMessage *message;
GST_STATE_LOCK (reader);
SPLITMUX_PART_MSG_LOCK (reader);
reader->async_pending = TRUE;
message = gst_message_new_async_start (GST_OBJECT_CAST (reader));
GST_BIN_CLASS (parent_class)->handle_message (GST_BIN_CAST (reader), message);
GST_STATE_UNLOCK (reader);
SPLITMUX_PART_MSG_UNLOCK (reader);
}
static void
@ -707,7 +712,7 @@ do_async_done (GstSplitMuxPartReader * reader)
{
GstMessage *message;
GST_STATE_LOCK (reader);
SPLITMUX_PART_MSG_LOCK (reader);
if (reader->async_pending) {
message =
gst_message_new_async_done (GST_OBJECT_CAST (reader),
@ -717,7 +722,7 @@ do_async_done (GstSplitMuxPartReader * reader)
reader->async_pending = FALSE;
}
GST_STATE_UNLOCK (reader);
SPLITMUX_PART_MSG_UNLOCK (reader);
}
static void

View file

@ -80,6 +80,7 @@ struct _GstSplitMuxPartReader
GCond inactive_cond;
GMutex lock;
GMutex type_lock;
GMutex msg_lock;
GstSplitMuxPartReaderPadCb get_pad_cb;
gpointer cb_data;

View file

@ -331,13 +331,13 @@ do_async_start (GstSplitMuxSrc * splitmux)
{
GstMessage *message;
GST_STATE_LOCK (splitmux);
SPLITMUX_SRC_MSG_LOCK (splitmux);
splitmux->async_pending = TRUE;
message = gst_message_new_async_start (GST_OBJECT_CAST (splitmux));
GST_BIN_CLASS (parent_class)->handle_message (GST_BIN_CAST (splitmux),
message);
GST_STATE_UNLOCK (splitmux);
SPLITMUX_SRC_MSG_UNLOCK (splitmux);
}
static void
@ -345,7 +345,7 @@ do_async_done (GstSplitMuxSrc * splitmux)
{
GstMessage *message;
GST_STATE_LOCK (splitmux);
SPLITMUX_SRC_MSG_LOCK (splitmux);
if (splitmux->async_pending) {
message =
gst_message_new_async_done (GST_OBJECT_CAST (splitmux),
@ -355,7 +355,7 @@ do_async_done (GstSplitMuxSrc * splitmux)
splitmux->async_pending = FALSE;
}
GST_STATE_UNLOCK (splitmux);
SPLITMUX_SRC_MSG_UNLOCK (splitmux);
}
static GstStateChangeReturn
@ -408,10 +408,14 @@ gst_splitmux_src_change_state (GstElement * element, GstStateChange transition)
static void
gst_splitmux_src_activate_first_part (GstSplitMuxSrc * splitmux)
{
if (!gst_splitmux_src_activate_part (splitmux, 0, GST_SEEK_FLAG_NONE)) {
GST_ELEMENT_ERROR (splitmux, RESOURCE, OPEN_READ, (NULL),
("Failed to activate first part for playback"));
SPLITMUX_SRC_LOCK (splitmux);
if (splitmux->running) {
if (!gst_splitmux_src_activate_part (splitmux, 0, GST_SEEK_FLAG_NONE)) {
GST_ELEMENT_ERROR (splitmux, RESOURCE, OPEN_READ, (NULL),
("Failed to activate first part for playback"));
}
}
SPLITMUX_SRC_UNLOCK (splitmux);
}
static GstBusSyncReply
@ -887,6 +891,14 @@ gst_splitmux_src_start (GstSplitMuxSrc * splitmux)
gchar **files;
guint i;
SPLITMUX_SRC_LOCK (splitmux);
if (splitmux->running) {
/* splitmux is still running / stopping. We can't start again yet */
SPLITMUX_SRC_UNLOCK (splitmux);
return FALSE;
}
SPLITMUX_SRC_UNLOCK (splitmux);
GST_DEBUG_OBJECT (splitmux, "Starting");
g_signal_emit (splitmux, signals[SIGNAL_FORMAT_LOCATION], 0, &files);
@ -980,7 +992,10 @@ gst_splitmux_src_stop (GstSplitMuxSrc * splitmux)
GST_DEBUG_OBJECT (splitmux, "Stopping");
/* Stop and destroy all parts */
SPLITMUX_SRC_UNLOCK (splitmux);
/* Stop and destroy all parts. We don't need the lock here,
* because all parts were created in _start() */
for (i = 0; i < splitmux->num_created_parts; i++) {
if (splitmux->parts[i] == NULL)
continue;
@ -988,6 +1003,7 @@ gst_splitmux_src_stop (GstSplitMuxSrc * splitmux)
g_object_unref (splitmux->parts[i]);
splitmux->parts[i] = NULL;
}
SPLITMUX_SRC_LOCK (splitmux);
SPLITMUX_SRC_PADS_WLOCK (splitmux);
pads_list = splitmux->pads;

View file

@ -44,6 +44,7 @@ struct _GstSplitMuxSrc
GstBin parent;
GMutex lock;
GMutex msg_lock;
gboolean running;
gchar *location; /* OBJECT_LOCK */
@ -108,6 +109,9 @@ gboolean register_splitmuxsrc (GstPlugin * plugin);
#define SPLITMUX_SRC_LOCK(s) g_mutex_lock(&(s)->lock)
#define SPLITMUX_SRC_UNLOCK(s) g_mutex_unlock(&(s)->lock)
#define SPLITMUX_SRC_MSG_LOCK(s) g_mutex_lock(&(s)->msg_lock)
#define SPLITMUX_SRC_MSG_UNLOCK(s) g_mutex_unlock(&(s)->msg_lock)
#define SPLITMUX_SRC_PADS_WLOCK(s) g_rw_lock_writer_lock(&(s)->pads_rwlock)
#define SPLITMUX_SRC_PADS_WUNLOCK(s) g_rw_lock_writer_unlock(&(s)->pads_rwlock)
#define SPLITMUX_SRC_PADS_RLOCK(s) g_rw_lock_reader_lock(&(s)->pads_rwlock)