clock: Avoid creating a weakref with every entry

Creating and destroying weakrefs takes a write lock on a global
`GRWLock`. This makes for a very contended lock when the pipeline has
many synchronizing elements.

Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/2533>
This commit is contained in:
Jan Alexander Steffens (heftig) 2022-05-18 17:03:27 +02:00 committed by Tim-Philipp Müller
parent 4357c72451
commit a4d7ae7af4
2 changed files with 71 additions and 9 deletions

View file

@ -514,11 +514,14 @@ struct _GstDynamicTypeFactoryClass {
/* privat flag used by GstBus / GstMessage */
#define GST_MESSAGE_FLAG_ASYNC_DELIVERY (GST_MINI_OBJECT_FLAG_LAST << 0)
/* private type used by GstClock */
typedef struct _GstClockWeakRef GstClockWeakRef;
/* private struct used by GstClock and GstSystemClock */
struct _GstClockEntryImpl
{
GstClockEntry entry;
GWeakRef clock;
GstClockWeakRef *weakref;
GDestroyNotify destroy_entry;
gpointer padding[21]; /* padding for allowing e.g. systemclock
* to add data in lieu of overridable

View file

@ -132,6 +132,60 @@ enum
#define GST_CLOCK_SLAVE_LOCK(clock) g_mutex_lock (&GST_CLOCK_CAST (clock)->priv->slave_lock)
#define GST_CLOCK_SLAVE_UNLOCK(clock) g_mutex_unlock (&GST_CLOCK_CAST (clock)->priv->slave_lock)
/* An atomically ref-counted wrapper around a GWeakRef for a GstClock, created
* by the clock and shared with all its clock entries.
*
* This exists because g_weak_ref_ operations are quite expensive and operate
* with a global GRWLock. _get takes a reader lock, _init and _clear take a
* writer lock. We want to avoid having a GWeakRef in every clock entry.
*
* FIXME: Simplify this with g_atomic_rc_box_new (GWeakRef) once we can depend
* on GLib 2.58.
*/
struct _GstClockWeakRef
{
gint refcount;
GWeakRef clock;
};
static GstClockWeakRef *
gst_clock_weak_ref_new (GstClock * clock)
{
GstClockWeakRef *weakref = g_slice_new (GstClockWeakRef);
weakref->refcount = 1;
g_weak_ref_init (&weakref->clock, clock);
return weakref;
}
static GstClockWeakRef *
gst_clock_weak_ref_ref (GstClockWeakRef * weakref)
{
g_atomic_int_add (&weakref->refcount, 1);
return weakref;
}
static void
gst_clock_weak_ref_unref (GstClockWeakRef * weakref)
{
gint old_refcount;
old_refcount = g_atomic_int_add (&weakref->refcount, -1);
g_return_if_fail (old_refcount > 0);
if (G_UNLIKELY (old_refcount == 1)) {
g_weak_ref_clear (&weakref->clock);
g_slice_free (GstClockWeakRef, weakref);
}
}
static GstClock *
gst_clock_weak_ref_get (GstClockWeakRef * weakref)
{
return g_weak_ref_get (&weakref->clock);
}
struct _GstClockPrivate
{
GMutex slave_lock; /* order: SLAVE_LOCK, OBJECT_LOCK */
@ -165,11 +219,13 @@ struct _GstClockPrivate
gint post_count;
gboolean synced;
GstClockWeakRef *weakref;
};
typedef struct _GstClockEntryImpl GstClockEntryImpl;
#define GST_CLOCK_ENTRY_CLOCK_WEAK_REF(entry) (&((GstClockEntryImpl *)(entry))->clock)
#define GST_CLOCK_ENTRY_CLOCK_WEAK_REF(entry) (((GstClockEntryImpl *)(entry))->weakref)
/* seqlocks */
#define read_seqbegin(clock) \
@ -260,7 +316,8 @@ gst_clock_entry_new (GstClock * clock, GstClockTime time,
entry->_clock = clock;
#endif
#endif
g_weak_ref_init (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry), clock);
GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry) =
gst_clock_weak_ref_ref (clock->priv->weakref);
entry->type = type;
entry->time = time;
entry->interval = interval;
@ -374,7 +431,7 @@ _gst_clock_id_free (GstClockID id)
if (entry_impl->destroy_entry)
entry_impl->destroy_entry (entry_impl);
g_weak_ref_clear (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry));
gst_clock_weak_ref_unref (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry));
/* FIXME: add tracer hook for struct allocations such as clock entries */
@ -531,7 +588,7 @@ gst_clock_id_wait (GstClockID id, GstClockTimeDiff * jitter)
entry = (GstClockEntry *) id;
requested = GST_CLOCK_ENTRY_TIME (entry);
clock = g_weak_ref_get (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry));
clock = gst_clock_weak_ref_get (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry));
if (G_UNLIKELY (clock == NULL))
goto invalid_entry;
@ -613,7 +670,7 @@ gst_clock_id_wait_async (GstClockID id,
entry = (GstClockEntry *) id;
requested = GST_CLOCK_ENTRY_TIME (entry);
clock = g_weak_ref_get (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry));
clock = gst_clock_weak_ref_get (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry));
if (G_UNLIKELY (clock == NULL))
goto invalid_entry;
@ -676,7 +733,7 @@ gst_clock_id_unschedule (GstClockID id)
g_return_if_fail (id != NULL);
entry = (GstClockEntry *) id;
clock = g_weak_ref_get (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry));
clock = gst_clock_weak_ref_get (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry));
if (G_UNLIKELY (clock == NULL))
goto invalid_entry;
@ -769,6 +826,7 @@ gst_clock_init (GstClock * clock)
priv->timeout = DEFAULT_TIMEOUT;
priv->times = g_new0 (GstClockTime, 4 * priv->window_size);
priv->times_temp = priv->times + 2 * priv->window_size;
priv->weakref = gst_clock_weak_ref_new (clock);
}
static void
@ -801,6 +859,7 @@ gst_clock_finalize (GObject * object)
clock->priv->times_temp = NULL;
GST_CLOCK_SLAVE_UNLOCK (clock);
gst_clock_weak_ref_unref (clock->priv->weakref);
g_mutex_clear (&clock->priv->slave_lock);
g_cond_clear (&clock->priv->sync_cond);
@ -1374,7 +1433,7 @@ gst_clock_id_get_clock (GstClockID id)
g_return_val_if_fail (id != NULL, NULL);
entry = (GstClockEntry *) id;
return g_weak_ref_get (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry));
return gst_clock_weak_ref_get (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry));
}
/**
@ -1401,7 +1460,7 @@ gst_clock_id_uses_clock (GstClockID id, GstClock * clock)
g_return_val_if_fail (clock != NULL, FALSE);
entry = (GstClockEntry *) id;
entry_clock = g_weak_ref_get (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry));
entry_clock = gst_clock_weak_ref_get (GST_CLOCK_ENTRY_CLOCK_WEAK_REF (entry));
if (entry_clock == clock)
ret = TRUE;