Added element-level locking support, implemented for cothreads with a mutex held whenever an element is actually curr...

Original commit message from CVS:
Added element-level locking support, implemented for cothreads with a mutex
held whenever an element is actually currently running.  This should be done
with atomic variables eventually, but the best I can find is to use
<asm/spinlock.h> from the kernel, which brings in too much for my taste.

Also increased cothread stack size to 32KB and max cothread count to 64,
which fits within the default pthread 2MB stack.  It would probably be good
to do a pthread_attr_getstacksize() to verify that it's 2MB, and if
linuxthreads had a pthread_attr_setstacksize(), we could remove these
limits.
This commit is contained in:
Erik Walthinsen 2001-04-25 22:32:17 +00:00
parent 2e56cca65e
commit 284bfca414
6 changed files with 145 additions and 5 deletions

View file

@ -76,6 +76,13 @@ cothread_init (void)
ctx->threads[0]->sp = (int *)CURRENT_STACK_FRAME;
ctx->threads[0]->pc = 0;
// initialize the lock
#ifdef COTHREAD_ATOMIC
atomic_set (&ctx->threads[0]->lock, 0);
#else
ctx->threads[0]->lock = g_mutex_new();
#endif
GST_INFO (GST_CAT_COTHREADS,"0th thread is %p at sp:%p",ctx->threads[0], ctx->threads[0]->sp);
// we consider the initiating process to be cothread 0
@ -176,6 +183,11 @@ cothread_stub (void)
GST_DEBUG_ENTER("");
thread->flags |= COTHREAD_STARTED;
#ifdef COTHREAD_ATOMIC
// do something here to lock
#else
g_mutex_lock(thread->lock);
#endif
while (1) {
thread->func(thread->argc,thread->argv);
// we do this to avoid ever returning, we just switch to 0th thread
@ -262,6 +274,20 @@ cothread_switch (cothread_state *thread)
#endif
if (current == thread) goto selfswitch;
// unlock the current thread, we're out of that context now
#ifdef COTHREAD_ATOMIC
// do something to unlock the cothread
#else
g_mutex_unlock(current->lock);
#endif
// lock the next cothread before we even switch to it
#ifdef COTHREAD_ATOMIC
// do something to lock the cothread
#else
g_mutex_lock(thread->lock);
#endif
// find the number of the thread to switch to
GST_INFO (GST_CAT_COTHREAD_SWITCH,"switching from cothread #%d to cothread #%d",
ctx->current,thread->threadnum);
@ -313,3 +339,35 @@ selfswitch:
g_print("cothread: trying to switch to same thread, legal but not necessary\n");
return;
}
void
cothread_lock (cothread_state *thread)
{
#ifdef COTHREAD_ATOMIC
// do something to lock the cothread
#else
g_mutex_lock(thread->lock);
#endif
}
gboolean
cothread_trylock (cothread_state *thread)
{
#ifdef COTHREAD_ATOMIC
// do something to try to lock the cothread
#else
return g_mutex_trylock(thread->lock);
#endif
}
void
cothread_unlock (cothread_state *thread)
{
#ifdef COTHREAD_ATOMIC
// do something to unlock the cothread
#else
g_mutex_unlock(thread->lock);
#endif
}

View file

@ -26,8 +26,14 @@
#include <glib.h>
#include <setjmp.h>
#define COTHREAD_STACKSIZE 8192
#define COTHREAD_MAXTHREADS 16
#ifdef HAVE_ATOMIC_H
#include <asm/atomic.h>
#endif
#undef COTHREAD_ATOMIC
#define COTHREAD_STACKSIZE 32768
#define COTHREAD_MAXTHREADS 64
#define STACK_SIZE 0x200000
#ifndef CURRENT_STACK_FRAME
@ -51,10 +57,16 @@ struct _cothread_state {
int flags;
void *sp;
jmp_buf jmp;
/* is this needed any more? */
void *top_sp;
void *pc;
jmp_buf jmp;
#ifdef COTHREAD_ATOMIC
atomic_t lock;
#else
GMutex *lock;
#endif
};
struct _cothread_context {
@ -73,6 +85,10 @@ void cothread_switch (cothread_state *thread);
void cothread_set_data (cothread_state *thread, gchar *key, gpointer data);
gpointer cothread_get_data (cothread_state *thread, gchar *key);
void cothread_lock (cothread_state *thread);
gboolean cothread_trylock (cothread_state *thread);
void cothread_unlock (cothread_state *thread);
cothread_state* cothread_main (cothread_context *ctx);
#endif /* __COTHREAD_H__ */

View file

@ -49,6 +49,9 @@ enum {
static void gst_element_class_init (GstElementClass *klass);
static void gst_element_init (GstElement *element);
static void gst_element_set_arg (GtkObject *object, GtkArg *arg, guint id);
static void gst_element_get_arg (GtkObject *object, GtkArg *arg, guint id);
static void gst_element_real_destroy (GtkObject *object);
static GstElementStateReturn gst_element_change_state (GstElement *element);
@ -68,8 +71,8 @@ GtkType gst_element_get_type(void) {
sizeof(GstElementClass),
(GtkClassInitFunc)gst_element_class_init,
(GtkObjectInitFunc)gst_element_init,
(GtkArgSetFunc)NULL,
(GtkArgGetFunc)NULL,
(GtkArgSetFunc)gst_element_set_arg,
(GtkArgGetFunc)gst_element_get_arg,
(GtkClassInitFunc)NULL,
};
element_type = gtk_type_unique(GST_TYPE_OBJECT,&element_info);
@ -116,6 +119,8 @@ gst_element_class_init (GstElementClass *klass)
gtk_object_class_add_signals (gtkobject_class, gst_element_signals, LAST_SIGNAL);
gtkobject_class->set_arg = gst_element_set_arg;
gtkobject_class->get_arg = gst_element_get_arg;
gtkobject_class->destroy = gst_element_real_destroy;
gstobject_class->save_thyself = gst_element_save_thyself;
@ -138,6 +143,35 @@ gst_element_init (GstElement *element)
element->sched = NULL;
}
static void
gst_element_set_arg (GtkObject *object, GtkArg *arg, guint id)
{
GstElementClass *oclass = GST_ELEMENT_CLASS (object->klass);
GST_SCHEDULE_LOCK_ELEMENT ( GST_ELEMENT_SCHED(object), GST_ELEMENT(object) );
if (oclass->set_arg)
(oclass->set_arg)(object,arg,id);
GST_SCHEDULE_UNLOCK_ELEMENT ( GST_ELEMENT_SCHED(object), GST_ELEMENT(object) );
}
static void
gst_element_get_arg (GtkObject *object, GtkArg *arg, guint id)
{
GstElementClass *oclass = GST_ELEMENT_CLASS (object->klass);
GST_SCHEDULE_LOCK_ELEMENT (GST_ELEMENT_SCHED(object), GST_ELEMENT(object) );
if (oclass->get_arg)
(oclass->get_arg)(object,arg,id);
GST_SCHEDULE_UNLOCK_ELEMENT (GST_ELEMENT_SCHED(object), GST_ELEMENT(object) );
}
/**
* gst_element_new:
*

View file

@ -167,6 +167,14 @@ struct _GstElementClass {
void (*error) (GstElement *element,gchar *error);
void (*eos) (GstElement *element);
/* local pointers for get/set */
void (*set_arg) (GtkObject *object,
GtkArg *arg,
guint arg_id);
void (*get_arg) (GtkObject *object,
GtkArg *arg,
guint arg_id);
/* change the element state */
GstElementStateReturn (*change_state) (GstElement *element);
/* request a new pad */

View file

@ -773,6 +773,18 @@ void gst_bin_schedule_func(GstBin *bin) {
}
*/
static void
gst_schedule_lock_element (GstSchedule *sched,GstElement *element)
{
cothread_lock(element->threadstate);
}
static void
gst_schedule_unlock_element (GstSchedule *sched,GstElement *element)
{
cothread_unlock(element->threadstate);
}
/*************** INCREMENTAL SCHEDULING CODE STARTS HERE ***************/
@ -814,6 +826,8 @@ gst_schedule_init (GstSchedule *schedule)
schedule->remove_element = GST_DEBUG_FUNCPTR(gst_schedule_remove_element);
schedule->enable_element = GST_DEBUG_FUNCPTR(gst_schedule_enable_element);
schedule->disable_element = GST_DEBUG_FUNCPTR(gst_schedule_disable_element);
schedule->lock_element = GST_DEBUG_FUNCPTR(gst_schedule_lock_element);
schedule->unlock_element = GST_DEBUG_FUNCPTR(gst_schedule_unlock_element);
schedule->pad_connect = GST_DEBUG_FUNCPTR(gst_schedule_pad_connect);
schedule->pad_disconnect = GST_DEBUG_FUNCPTR(gst_schedule_pad_disconnect);
schedule->iterate = GST_DEBUG_FUNCPTR(gst_schedule_iterate);

View file

@ -64,6 +64,8 @@ struct _GstSchedule {
void (*remove_element) (GstSchedule *sched, GstElement *element);
void (*enable_element) (GstSchedule *sched, GstElement *element);
void (*disable_element) (GstSchedule *sched, GstElement *element);
void (*lock_element) (GstSchedule *sched, GstElement *element);
void (*unlock_element) (GstSchedule *sched, GstElement *element);
void (*pad_connect) (GstSchedule *sched, GstPad *srcpad, GstPad *sinkpad);
void (*pad_disconnect) (GstSchedule *sched, GstPad *srcpad, GstPad *sinkpad);
gboolean (*iterate) (GstSchedule *sched);
@ -83,6 +85,14 @@ struct _GstScheduleClass {
GST_SCHEDULE_SAFETY ((sched)->enable_element((sched),(element)))
#define GST_SCHEDULE_DISABLE_ELEMENT(sched,element) \
GST_SCHEDULE_SAFETY ((sched)->disable_element((sched),(element)))
#define GST_SCHEDULE_LOCK_ELEMENT(sched,element) \
if ((sched)->lock_element != NULL) \
((sched)->lock_element((sched),(element)))
#define GST_SCHEDULE_UNLOCK_ELEMENT(sched,element) \
if ((sched)->unlock_element != NULL) \
((sched)->unlock_element((sched),(element)))
#define GST_SCHEDULE_PAD_CONNECT(sched,srcpad,sinkpad) \
GST_SCHEDULE_SAFETY ((sched)->pad_connect((sched),(srcpad),(sinkpad)))
#define GST_SCHEDULE_PAD_DISCONNECT(sched,srcpad,sinkpad) \