memory: expose the internal locking api

Expose the internally used methods for locking and unlocking the object. Pass
the access mode to the unlock function for extra checks and because we need it
for the EXCLUSIVE locks.
Make some new defines to specify the desired locking.
Add a new EXCLUSIVE lock mode which will increment the shared counter. Objects
with a shared counter > 1 will not be lockable in WRITE mode.
This commit is contained in:
Wim Taymans 2012-07-03 09:48:32 +02:00
parent 0f69f9e44a
commit 93f279cd5a
3 changed files with 105 additions and 16 deletions

View file

@ -68,6 +68,31 @@ can free the memory. The GstMemoryFreeFunction of the allocator will be called
to cleanup the memory.
Sharing
-------
GstMemory objects can be shared between multiple GstBuffer objects. It is
important that when a thread writes to the shared memory that the other
buffer don't not see the changes.
We add a separate shared counter that counts the amount of objects that share
this GstMemory object. The counter is initially 0, meaning that the object is
not shared with any object. When a GstBuffer (or other object) adds a ref to
the GstMemorty, it will also increase the shared count.
When the GstMemory is removed from the buffer, the ref count and the shared
counter will be decreased.
We don't want to use the refcount for this purpose because language bindings
might keep arbitrary references to the object.
A GstMemory object with a shared counter > 1 is not writable. Any attempt to
map with WRITE access or resize will fail. _make_mapped() with WRITE access
will make a copy.
Memory layout
~~~~~~~~~~~~~

View file

@ -117,6 +117,12 @@ static GstAllocator *_default_allocator;
/* our predefined allocators */
static GstAllocator *_default_mem_impl;
#define SHARE_ONE (1 << 16)
#define LOCK_ONE (GST_LOCK_FLAG_LAST)
#define FLAG_MASK (GST_LOCK_FLAG_LAST - 1)
#define LOCK_MASK ((SHARE_ONE - 1) - FLAG_MASK)
#define LOCK_FLAG_MASK (SHARE_ONE - 1)
static GstMemory *
_gst_memory_copy (GstMemory * mem)
{
@ -146,7 +152,8 @@ _default_mem_init (GstMemoryDefault * mem, GstMemoryFlags flags,
mem->mem.allocator = _default_mem_impl;
mem->mem.parent = parent ? gst_memory_ref (parent) : NULL;
mem->mem.state = (flags & GST_MEMORY_FLAG_READONLY ? 0x1 : 0);
mem->mem.state = (flags & GST_MEMORY_FLAG_READONLY ? GST_LOCK_FLAG_READ : 0);
mem->mem.state |= (flags & GST_MEMORY_FLAG_NO_SHARE ? SHARE_ONE : 0);
mem->mem.maxsize = maxsize;
mem->mem.align = align;
mem->mem.offset = offset;
@ -496,24 +503,43 @@ gst_memory_resize (GstMemory * mem, gssize offset, gsize size)
mem->size = size;
}
static gboolean
gst_memory_lock (GstMemory * mem, GstMapFlags flags)
/**
* gst_memory_lock:
* @mem: a #GstMemory
* @flags: #GstLockFlags
*
* Lock the memory with the specified access mode in @flags.
*
* Returns: %TRUE if the memory could be locked.
*/
gboolean
gst_memory_lock (GstMemory * mem, GstLockFlags flags)
{
gint access_mode, state, newstate;
access_mode = flags & 3;
access_mode = flags & FLAG_MASK;
do {
state = g_atomic_int_get (&mem->state);
if (state == 0) {
if (flags == GST_LOCK_FLAG_EXCLUSIVE) {
/* shared ref */
newstate = state + SHARE_ONE;
flags &= ~GST_LOCK_FLAG_EXCLUSIVE;
}
/* shared counter > 1 and write access */
if (state > SHARE_ONE && flags & GST_LOCK_FLAG_WRITE)
goto lock_failed;
if ((state & LOCK_FLAG_MASK) == 0) {
/* nothing mapped, set access_mode and refcount */
newstate = 4 | access_mode;
newstate = state | LOCK_ONE | access_mode;
} else {
/* access_mode must match */
if ((state & access_mode) != access_mode)
goto lock_failed;
/* increase refcount */
newstate = state + 4;
newstate = state + LOCK_ONE;
}
} while (!g_atomic_int_compare_and_exchange (&mem->state, state, newstate));
@ -527,22 +553,39 @@ lock_failed:
}
}
static void
gst_memory_unlock (GstMemory * mem)
/**
* gst_memory_unlock:
* @mem: a #GstMemory
* @flags: #GstLockFlags
*
* Unlock the memory with the specified access mode in @flags.
*/
void
gst_memory_unlock (GstMemory * mem, GstLockFlags flags)
{
gint state, newstate;
gint access_mode, state, newstate;
access_mode = flags & 3;
do {
state = g_atomic_int_get (&mem->state);
if (flags == GST_LOCK_FLAG_EXCLUSIVE) {
/* shared counter */
g_return_if_fail (state >= SHARE_ONE);
newstate = state - SHARE_ONE;
flags &= ~GST_LOCK_FLAG_EXCLUSIVE;
}
g_return_if_fail ((state & access_mode) == access_mode);
/* decrease the refcount */
newstate = state - 4;
newstate = state - LOCK_ONE;
/* last refcount, unset access_mode */
if (newstate < 4)
newstate = 0;
if ((newstate & LOCK_FLAG_MASK) == access_mode)
newstate = state & ~LOCK_FLAG_MASK;
} while (!g_atomic_int_compare_and_exchange (&mem->state, state, newstate));
}
/**
* gst_memory_make_mapped:
* @mem: (transfer full): a #GstMemory
@ -646,7 +689,7 @@ error:
{
/* something went wrong, restore the orginal state again */
GST_CAT_ERROR (GST_CAT_MEMORY, "mem %p: map failed", mem);
gst_memory_unlock (mem);
gst_memory_unlock (mem, flags);
return FALSE;
}
}
@ -668,7 +711,7 @@ gst_memory_unmap (GstMemory * mem, GstMapInfo * info)
g_return_if_fail (g_atomic_int_get (&mem->state) >= 4);
mem->allocator->info.mem_unmap (mem);
gst_memory_unlock (mem);
gst_memory_unlock (mem, info->flags);
}
/**

View file

@ -463,8 +463,29 @@ gst_memory_unref (GstMemory * memory)
gst_mini_object_unref (GST_MINI_OBJECT_CAST (memory));
}
/* locking */
/**
* GstLockFlags:
* @GST_LOCK_FLAG_READ: lock for read access
* @GST_LOCK_FLAG_WRITE: lock for write access
* @GST_LOCK_FLAG_EXCLUSIVE: lock for exclusive access
* @GST_LOCK_FLAG_LAST: first flag that can be used for custom purposes
*
* Flags used when locking memory
*/
typedef enum {
GST_LOCK_FLAG_READ = (1 << 0),
GST_LOCK_FLAG_WRITE = (1 << 1),
GST_LOCK_FLAG_EXCLUSIVE = (1 << 2),
GST_LOCK_FLAG_LAST = (1 << 4)
} GstLockFlags;
gboolean gst_memory_is_exclusive (GstMemory *mem);
gboolean gst_memory_lock (GstMemory *mem, GstLockFlags flags);
void gst_memory_unlock (GstMemory *mem, GstLockFlags flags);
/* getting/setting memory properties */
gsize gst_memory_get_sizes (GstMemory *mem, gsize *offset, gsize *maxsize);
void gst_memory_resize (GstMemory *mem, gssize offset, gsize size);