gstreamer/gst/gstbin.c

1026 lines
29 KiB
C
Raw Normal View History

/* GStreamer
* Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
* 2000 Wim Taymans <wtay@chello.be>
*
* gstbin.c: GstBin container object and support code
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
//#define GST_DEBUG_ENABLED
#include "config.h"
#include "gst_private.h"
#include "gstbin.h"
#include "gstscheduler.h"
GstElementDetails gst_bin_details = {
"Generic bin",
"Bin",
"Simple container object",
VERSION,
"Erik Walthinsen <omega@cse.ogi.edu>",
"(C) 1999",
};
static void gst_bin_real_destroy (GtkObject *object);
static GstElementStateReturn gst_bin_change_state (GstElement *element);
static GstElementStateReturn gst_bin_change_state_norecurse (GstBin *bin);
static gboolean gst_bin_change_state_type (GstBin *bin,
GstElementState state,
GtkType type);
static void gst_bin_create_plan_func (GstBin *bin);
static gboolean gst_bin_iterate_func (GstBin *bin);
static xmlNodePtr gst_bin_save_thyself (GstObject *object, xmlNodePtr parent);
static void gst_bin_restore_thyself (GstObject *object, xmlNodePtr self);
/* Bin signals and args */
enum {
OBJECT_ADDED,
LAST_SIGNAL
};
enum {
ARG_0,
/* FILL ME */
};
static void gst_bin_class_init (GstBinClass *klass);
static void gst_bin_init (GstBin *bin);
static GstElementClass *parent_class = NULL;
static guint gst_bin_signals[LAST_SIGNAL] = { 0 };
GtkType
gst_bin_get_type (void)
{
static GtkType bin_type = 0;
if (!bin_type) {
static const GtkTypeInfo bin_info = {
"GstBin",
sizeof(GstBin),
sizeof(GstBinClass),
(GtkClassInitFunc)gst_bin_class_init,
(GtkObjectInitFunc)gst_bin_init,
(GtkArgSetFunc)NULL,
(GtkArgGetFunc)NULL,
(GtkClassInitFunc)NULL,
};
bin_type = gtk_type_unique (GST_TYPE_ELEMENT, &bin_info);
}
return bin_type;
}
static void
gst_bin_class_init (GstBinClass *klass)
{
GtkObjectClass *gtkobject_class;
GstObjectClass *gstobject_class;
GstElementClass *gstelement_class;
gtkobject_class = (GtkObjectClass*)klass;
gstobject_class = (GstObjectClass*)klass;
gstelement_class = (GstElementClass*)klass;
parent_class = gtk_type_class (GST_TYPE_ELEMENT);
gst_bin_signals[OBJECT_ADDED] =
gtk_signal_new ("object_added", GTK_RUN_FIRST, gtkobject_class->type,
GTK_SIGNAL_OFFSET (GstBinClass, object_added),
gtk_marshal_NONE__POINTER, GTK_TYPE_NONE, 1,
GST_TYPE_ELEMENT);
gtk_object_class_add_signals (gtkobject_class, gst_bin_signals, LAST_SIGNAL);
klass->change_state_type = gst_bin_change_state_type;
klass->create_plan = gst_bin_create_plan_func;
// klass->schedule = gst_bin_schedule_func;
klass->iterate = gst_bin_iterate_func;
gstobject_class->save_thyself = gst_bin_save_thyself;
gstobject_class->restore_thyself = gst_bin_restore_thyself;
gstelement_class->change_state = gst_bin_change_state;
gtkobject_class->destroy = gst_bin_real_destroy;
}
static void
gst_bin_init (GstBin *bin)
{
// in general, we prefer to use cothreads for most things
GST_FLAG_SET (bin, GST_BIN_FLAG_PREFER_COTHREADS);
bin->numchildren = 0;
bin->children = NULL;
bin->eos_providers = NULL;
bin->num_eos_providers = 0;
bin->chains = NULL;
bin->eoscond = g_cond_new ();
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus... Original commit message from CVS: Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pushregion() to deal with async reads, etc. That whole thing has gone away, in favor of providing a pull() function for the output (Src) pad instead, ala chain functions. This makes constructing cothreaded schedules out of non-loop elements somewhat easier. Basically there was always a question as to which pad was being dealt with. In the pullregion case, cothread-specific data was used to try to pass the region struct to the right place, which is a slow hack. And in general, the push function severely limited the kind of tricks that could be played when there's more than one output pad, such as a multi-out file reader with async capabilities on each pad independently. This changes the way cothread scheduling occurs. Instead of the hack to deal with Src's by calling their push() function (or optionally the pushregion(), in certain cases), we now are working towards a general mechanism where pads are the only thing that are dealt with directly. An optimization was made in the process of doing this: the loopfunction actually run as the outer [stack] frame of the cothread is now set more intelligently in create_plan() based on what kind of element it is. We now have: loopfunc_wrapper: used for loop-based elements, it simply calls the loopfunc in a loop, paying attention to COTHREAD_STOPPING (see below). It currently does other, soon to be depracated, stuff. pullsrc_wrapper: wraps a Src that's not loop-based (since your options are now loop- or pull-based) There will be a couple more to deal with other cases, such as Connections and chain-based elements. The general idea is that it's a lot more efficient to make the decisions once in create_plan than to keep doing this huge if/else chain in the wrapper. Just choose the right wrapper up front. It'll be most apparent performance-wise in the case of whichever element context is switched to first for each iteration, since the whole wrapper setup is done for every iteration. The tricky part is that there is now a bit of overloading of the function pointers in a pad. The current meanings (possibly to change a bit more soon) are: chainfunc: as always, chainfunc pointer is mirrored between peer pads (this may change, and the chain func may end up in pushfunc) pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer SinkPad: none (may take over chainfunc, see below) pullfunc: SrcPad: Src or Connection's function to construct buffers SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer There are a number of issues remaining with the scheduling, not the least of which is the fact that Connections are still dealt with the old way, with _push() functions and such. I'm trying to figure out a way to unify the system so it makes sense. Following the scheduling system is hard enough, trying to change it is murder. Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING. It's an element flag that's used to signal whatever code is running in cothread context that it should be finishing up and exiting soon. An example of this is in plugins/cobin/spindentity.c. All the loops should now be composed of do/while loops, rather than while(1) loops: do { buf = gst_pad_pull(spindentity->sinkpad); gst_pad_push(spindentity->srcpad,buf); } while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element)); The reason for this is that COTHREAD_STOPPING may be set before the above loop ever gets started. It wouldn't do for the body of the loop to never once get called, that would simply stall the pipeline. Note that only the core library code is ever responsible for setting and unsetting this flag. All elements have to do is respond to it by cleanly exiting the loop and the function holding it. This is needed primarily to allow iterations to occur properly. Basically, there's a single entry point in the cothread scheduling loop, gst_bin_iterate_func() simply switches to this cothread. If the element in this context is allowed to loop infinitely, nothing would even switch back to the context from which the iterate() was originally called. This is a bit of a problem. The solution is for there to be an implicit switch back to the originating context. Now, even I'm not sure exactly how this works, but if the cothread that's switched to actually returns, execution returns back to the calling context, i.e. iterate_func(). COTHREAD_STOPPING is therefore set just before switching into this (currently randomly chosen) context, on the assumption that it will return promptly after finishing its duties. The burden of clearing the flag falls to the various wrapper functions provided by the Bin code, thus element writers don't have to worry about doing that at all (and simply shouldn't). Related changes: All the sources in elements/ have been changed to reflect the new system. FIXMEs: 1) gstpipeline.c calls gst_src_push at some point, dunno why, it's commented out now. 2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
// FIXME temporary testing measure
// bin->use_cothreads = TRUE;
}
/**
* gst_bin_new:
* @name: name of new bin
*
* Create a new bin with given name.
*
* Returns: new bin
*/
GstElement*
gst_bin_new (const gchar *name)
{
return gst_elementfactory_make ("bin", name);
}
static inline void
gst_bin_reset_element_sched (GstElement *element, GstSchedule *sched)
{
GST_INFO_ELEMENT (GST_CAT_PARENTAGE, element, "resetting element's scheduler");
// first remove the element from its current schedule, if any
// if (GST_ELEMENT_SCHED(element))
// GST_SCHEDULE_REMOVE_ELEMENT (GST_ELEMENT_SCHED(element), element);
// then set the new manager
gst_element_set_sched (element,sched);
// and add it to the new scheduler
// if (sched)
// GST_SCHEDULE_ADD_ELEMENT (sched, element);
}
void
gst_bin_set_element_sched (GstElement *element,GstSchedule *sched)
{
GstSchedule *realsched = NULL;
GList *children;
GstElement *child;
g_return_if_fail (element != NULL);
g_return_if_fail (GST_IS_ELEMENT(element));
g_return_if_fail (sched != NULL);
g_return_if_fail (GST_IS_SCHEDULE(sched));
GST_INFO (GST_CAT_SCHEDULING, "setting element \"%s\" sched to %p",GST_ELEMENT_NAME(element),
sched);
// if it's actually a Bin
if (GST_IS_BIN(element)) {
// figure out which element is the manager
if (GST_FLAG_IS_SET(element,GST_BIN_FLAG_MANAGER)) {
realsched = GST_ELEMENT_SCHED(element);
GST_INFO_ELEMENT (GST_CAT_PARENTAGE, element, "setting children's schedule to own sched");
} else {
realsched = sched;
GST_INFO_ELEMENT (GST_CAT_PARENTAGE, element, "setting children's schedule to parent's");
GST_SCHEDULE_ADD_ELEMENT (sched, element);
}
// set the children's schedule
children = GST_BIN(element)->children;
while (children) {
child = GST_ELEMENT (children->data);
children = g_list_next(children);
gst_bin_set_element_sched (child, realsched);
}
// otherwise, if it's just a regular old element
} else {
//g_print("calling schedule_add_element (%p, \"%s\")\n",sched, GST_ELEMENT_NAME(element));
GST_SCHEDULE_ADD_ELEMENT (sched, element);
}
}
void
gst_bin_unset_element_sched (GstElement *element)
{
GList *children;
GstElement *child;
g_return_if_fail (element != NULL);
g_return_if_fail (GST_IS_ELEMENT(element));
// if it's actually a Bin
if (GST_IS_BIN(element)) {
// for each child, remove them from their schedule
children = GST_BIN(element)->children;
while (children) {
child = GST_ELEMENT (children->data);
children = g_list_next(children);
gst_bin_unset_element_sched (child);
}
// otherwise, if it's just a regular old element
} else {
if (GST_ELEMENT_SCHED (element))
GST_SCHEDULE_REMOVE_ELEMENT (GST_ELEMENT_SCHED(element), element);
}
}
/**
* gst_bin_add:
* @bin: #GstBin to add element to
* @element: #GstElement to add to bin
*
* Add the given element to the bin. Set the elements parent, and thus
* add a reference.
*/
void
gst_bin_add (GstBin *bin,
GstElement *element)
{
g_return_if_fail (bin != NULL);
g_return_if_fail (GST_IS_BIN (bin));
g_return_if_fail (element != NULL);
g_return_if_fail (GST_IS_ELEMENT (element));
GST_DEBUG_ENTER ("");
// must be not be in PLAYING state in order to modify bin
// g_return_if_fail (GST_STATE (bin) != GST_STATE_PLAYING);
// the element must not already have a parent
g_return_if_fail (GST_ELEMENT_PARENT(element) == NULL);
// then check to see if the element's name is already taken in the bin
g_return_if_fail (gst_object_check_uniqueness (bin->children, GST_ELEMENT_NAME(element)) == TRUE);
// set the element's parent and add the element to the bin's list of children
gst_object_set_parent (GST_OBJECT (element), GST_OBJECT (bin));
bin->children = g_list_append (bin->children, element);
bin->numchildren++;
///// now we have to deal with manager stuff
// we can only do this if there's a scheduler:
// if we're not a manager, and aren't attached to anything, we have no sched (yet)
if (GST_ELEMENT_SCHED(bin) != NULL)
gst_bin_set_element_sched (element, GST_ELEMENT_SCHED(bin));
GST_INFO_ELEMENT (GST_CAT_PARENTAGE, bin, "added child \"%s\"", GST_ELEMENT_NAME (element));
gtk_signal_emit (GTK_OBJECT (bin), gst_bin_signals[OBJECT_ADDED], element);
}
/**
* gst_bin_remove:
* @bin: #GstBin to remove element from
* @element: #GstElement to remove
*
* Remove the element from its associated bin, unparenting as well.
*/
void
gst_bin_remove (GstBin *bin,
GstElement *element)
{
g_return_if_fail (bin != NULL);
g_return_if_fail (GST_IS_BIN (bin));
g_return_if_fail (element != NULL);
g_return_if_fail (GST_IS_ELEMENT (element));
g_return_if_fail (bin->children != NULL);
// must not be in PLAYING state in order to modify bin
g_return_if_fail (GST_STATE (bin) != GST_STATE_PLAYING);
// the element must have its parent set to the current bin
g_return_if_fail (GST_ELEMENT_PARENT(element) == (GstElement *)bin);
// the element must be in the bin's list of children
if (g_list_find(bin->children, element) == NULL) {
// FIXME this should be a warning!!!
GST_ERROR_OBJECT(bin,element,"no such element in bin");
return;
}
// remove this element from the list of managed elements
gst_bin_unset_element_sched (element);
// now remove the element from the list of elements
gst_object_unparent (GST_OBJECT (element));
bin->children = g_list_remove (bin->children, element);
bin->numchildren--;
GST_INFO_ELEMENT (GST_CAT_PARENTAGE, bin, "removed child %s", GST_ELEMENT_NAME (element));
/* if we're down to zero children, force state to NULL */
if (bin->numchildren == 0)
gst_element_set_state (GST_ELEMENT (bin), GST_STATE_NULL);
}
static GstElementStateReturn
gst_bin_change_state (GstElement *element)
{
GstBin *bin;
GList *children;
GstElement *child;
GstElementStateReturn ret;
GST_DEBUG_ENTER("(\"%s\")",GST_ELEMENT_NAME (element));
g_return_val_if_fail (GST_IS_BIN (element), GST_STATE_FAILURE);
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus... Original commit message from CVS: Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pushregion() to deal with async reads, etc. That whole thing has gone away, in favor of providing a pull() function for the output (Src) pad instead, ala chain functions. This makes constructing cothreaded schedules out of non-loop elements somewhat easier. Basically there was always a question as to which pad was being dealt with. In the pullregion case, cothread-specific data was used to try to pass the region struct to the right place, which is a slow hack. And in general, the push function severely limited the kind of tricks that could be played when there's more than one output pad, such as a multi-out file reader with async capabilities on each pad independently. This changes the way cothread scheduling occurs. Instead of the hack to deal with Src's by calling their push() function (or optionally the pushregion(), in certain cases), we now are working towards a general mechanism where pads are the only thing that are dealt with directly. An optimization was made in the process of doing this: the loopfunction actually run as the outer [stack] frame of the cothread is now set more intelligently in create_plan() based on what kind of element it is. We now have: loopfunc_wrapper: used for loop-based elements, it simply calls the loopfunc in a loop, paying attention to COTHREAD_STOPPING (see below). It currently does other, soon to be depracated, stuff. pullsrc_wrapper: wraps a Src that's not loop-based (since your options are now loop- or pull-based) There will be a couple more to deal with other cases, such as Connections and chain-based elements. The general idea is that it's a lot more efficient to make the decisions once in create_plan than to keep doing this huge if/else chain in the wrapper. Just choose the right wrapper up front. It'll be most apparent performance-wise in the case of whichever element context is switched to first for each iteration, since the whole wrapper setup is done for every iteration. The tricky part is that there is now a bit of overloading of the function pointers in a pad. The current meanings (possibly to change a bit more soon) are: chainfunc: as always, chainfunc pointer is mirrored between peer pads (this may change, and the chain func may end up in pushfunc) pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer SinkPad: none (may take over chainfunc, see below) pullfunc: SrcPad: Src or Connection's function to construct buffers SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer There are a number of issues remaining with the scheduling, not the least of which is the fact that Connections are still dealt with the old way, with _push() functions and such. I'm trying to figure out a way to unify the system so it makes sense. Following the scheduling system is hard enough, trying to change it is murder. Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING. It's an element flag that's used to signal whatever code is running in cothread context that it should be finishing up and exiting soon. An example of this is in plugins/cobin/spindentity.c. All the loops should now be composed of do/while loops, rather than while(1) loops: do { buf = gst_pad_pull(spindentity->sinkpad); gst_pad_push(spindentity->srcpad,buf); } while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element)); The reason for this is that COTHREAD_STOPPING may be set before the above loop ever gets started. It wouldn't do for the body of the loop to never once get called, that would simply stall the pipeline. Note that only the core library code is ever responsible for setting and unsetting this flag. All elements have to do is respond to it by cleanly exiting the loop and the function holding it. This is needed primarily to allow iterations to occur properly. Basically, there's a single entry point in the cothread scheduling loop, gst_bin_iterate_func() simply switches to this cothread. If the element in this context is allowed to loop infinitely, nothing would even switch back to the context from which the iterate() was originally called. This is a bit of a problem. The solution is for there to be an implicit switch back to the originating context. Now, even I'm not sure exactly how this works, but if the cothread that's switched to actually returns, execution returns back to the calling context, i.e. iterate_func(). COTHREAD_STOPPING is therefore set just before switching into this (currently randomly chosen) context, on the assumption that it will return promptly after finishing its duties. The burden of clearing the flag falls to the various wrapper functions provided by the Bin code, thus element writers don't have to worry about doing that at all (and simply shouldn't). Related changes: All the sources in elements/ have been changed to reflect the new system. FIXMEs: 1) gstpipeline.c calls gst_src_push at some point, dunno why, it's commented out now. 2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
bin = GST_BIN (element);
// GST_DEBUG (GST_CAT_STATES,"currently %d(%s), %d(%s) pending\n",GST_STATE (element),
// _gst_print_statename (GST_STATE (element)), GST_STATE_PENDING (element),
// _gst_print_statename (GST_STATE_PENDING (element)));
GST_INFO_ELEMENT (GST_CAT_STATES, element, "changing bin's state from %s to %s",
_gst_print_statename (GST_STATE (element)),
_gst_print_statename (GST_STATE_PENDING (element)));
// g_return_val_if_fail(bin->numchildren != 0, GST_STATE_FAILURE);
/*
switch (GST_STATE_TRANSITION (element)) {
case GST_STATE_NULL_TO_READY:
{
GstObject *parent;
parent = gst_object_get_parent (GST_OBJECT (element));
if (!parent || !GST_IS_BIN (parent))
gst_bin_create_plan (bin);
else
GST_DEBUG (GST_CAT_STATES,"not creating plan for '%s'\n",GST_ELEMENT_NAME (bin));
break;
}
case GST_STATE_READY_TO_NULL:
GST_FLAG_UNSET (bin, GST_BIN_FLAG_MANAGER);
default:
break;
}
*/
// g_print("-->\n");
children = bin->children;
while (children) {
child = GST_ELEMENT (children->data);
GST_DEBUG (GST_CAT_STATES,"setting state on '%s'\n",GST_ELEMENT_NAME (child));
switch (gst_element_set_state (child, GST_STATE_PENDING (element))) {
case GST_STATE_FAILURE:
GST_STATE_PENDING (element) = GST_STATE_NONE_PENDING;
GST_DEBUG (GST_CAT_STATES,"child '%s' failed to go to state %d(%s)\n", GST_ELEMENT_NAME (child),
GST_STATE_PENDING (element), _gst_print_statename (GST_STATE_PENDING (element)));
return GST_STATE_FAILURE;
break;
case GST_STATE_ASYNC:
GST_DEBUG (GST_CAT_STATES,"child '%s' is changing state asynchronously\n", GST_ELEMENT_NAME (child));
break;
}
// g_print("\n");
children = g_list_next (children);
}
// g_print("<-- \"%s\"\n",GST_OBJECT_NAME(bin));
GST_INFO_ELEMENT (GST_CAT_STATES, element, "done changing bin's state from %s to %s",
_gst_print_statename (GST_STATE (element)),
_gst_print_statename (GST_STATE_PENDING (element)));
return gst_bin_change_state_norecurse (bin);
}
static GstElementStateReturn
gst_bin_change_state_norecurse (GstBin *bin)
{
if (GST_ELEMENT_CLASS (parent_class)->change_state) {
GST_DEBUG(GST_CAT_STATES, "setting bin's own state\n");
return GST_ELEMENT_CLASS (parent_class)->change_state (GST_ELEMENT (bin));
} else
return GST_STATE_FAILURE;
}
static gboolean
gst_bin_change_state_type(GstBin *bin,
GstElementState state,
GtkType type)
{
GList *children;
GstElement *child;
// g_print("gst_bin_change_state_type(\"%s\",%d,%d);\n",
// GST_OBJECT_NAME(bin))),state,type);
g_return_val_if_fail (GST_IS_BIN (bin), FALSE);
g_return_val_if_fail (bin->numchildren != 0, FALSE);
// g_print("-->\n");
children = bin->children;
while (children) {
child = GST_ELEMENT (children->data);
if (GST_IS_BIN (child)) {
if (!gst_bin_set_state_type (GST_BIN (child), state,type))
return FALSE;
} else if (GTK_CHECK_TYPE (child,type)) {
if (!gst_element_set_state (child,state))
return FALSE;
}
// g_print("\n");
children = g_list_next (children);
}
if (type == GST_TYPE_BIN)
gst_element_set_state (GST_ELEMENT (bin),state);
return TRUE;
}
/**
* gst_bin_set_state_type:
* @bin: #GstBin to set the state
* @state: the new state to set the elements to
* @type: the type of elements to change
*
* Sets the state of only those objects of the given type.
*
* Returns: indication if the state change was successfull
*/
gboolean
gst_bin_set_state_type (GstBin *bin,
GstElementState state,
GtkType type)
{
GstBinClass *oclass;
GST_DEBUG (GST_CAT_STATES,"gst_bin_set_state_type(\"%s\",%d,%d)\n",
GST_ELEMENT_NAME (bin), state,type);
g_return_val_if_fail (bin != NULL, FALSE);
g_return_val_if_fail (GST_IS_BIN (bin), FALSE);
oclass = GST_BIN_CLASS (GTK_OBJECT (bin)->klass);
if (oclass->change_state_type)
(oclass->change_state_type) (bin,state,type);
return TRUE;
}
static void
gst_bin_real_destroy (GtkObject *object)
{
GstBin *bin = GST_BIN (object);
GList *children;
GstElement *child;
GST_DEBUG (0,"in gst_bin_real_destroy()\n");
children = bin->children;
while (children) {
child = GST_ELEMENT (children->data);
gst_element_destroy (child);
children = g_list_next (children);
}
g_list_free (bin->children);
}
/**
* gst_bin_get_by_name:
* @bin: #Gstbin to search
* @name: the element name to search for
*
* Get the element with the given name from this bin.
*
* Returns: the element with the given name
*/
GstElement*
gst_bin_get_by_name (GstBin *bin,
const gchar *name)
{
GList *children;
GstElement *child;
g_return_val_if_fail (bin != NULL, NULL);
g_return_val_if_fail (GST_IS_BIN (bin), NULL);
g_return_val_if_fail (name != NULL, NULL);
GST_INFO_ELEMENT (GST_CAT_PARENTAGE, bin, "looking up child element %s", name);
children = bin->children;
while (children) {
child = GST_ELEMENT (children->data);
if (!strcmp (GST_OBJECT_NAME(child),name))
return child;
if (GST_IS_BIN (child)) {
GstElement *res = gst_bin_get_by_name (GST_BIN (child), name);
if (res)
return res;
}
children = g_list_next (children);
}
return NULL;
}
/**
* gst_bin_get_by_name_recurse_up:
* @bin: #Gstbin to search
* @name: the element name to search for
*
* Get the element with the given name from this bin. If the
* element is not found, a recursion is performed on the parent bin.
*
* Returns: the element with the given name
*/
GstElement*
gst_bin_get_by_name_recurse_up (GstBin *bin,
const gchar *name)
{
GstElement *result = NULL;
GstObject *parent;
g_return_val_if_fail (bin != NULL, NULL);
g_return_val_if_fail (GST_IS_BIN (bin), NULL);
g_return_val_if_fail (name != NULL, NULL);
result = gst_bin_get_by_name (bin, name);
if (result)
return result;
parent = gst_object_get_parent (GST_OBJECT (bin));
if (parent && GST_IS_BIN (parent)) {
result = gst_bin_get_by_name_recurse_up (GST_BIN (parent), name);
}
return result;
}
/**
* gst_bin_get_list:
* @bin: #Gstbin to get the list from
*
* Get the list of elements in this bin.
*
* Returns: a GList of elements
*/
GList*
gst_bin_get_list (GstBin *bin)
{
g_return_val_if_fail (bin != NULL, NULL);
g_return_val_if_fail (GST_IS_BIN (bin), NULL);
return bin->children;
}
static xmlNodePtr
gst_bin_save_thyself (GstObject *object,
xmlNodePtr parent)
{
GstBin *bin = GST_BIN (object);
xmlNodePtr childlist, elementnode;
GList *children;
GstElement *child;
if (GST_OBJECT_CLASS (parent_class)->save_thyself)
GST_OBJECT_CLASS (parent_class)->save_thyself (GST_OBJECT (bin), parent);
childlist = xmlNewChild (parent, NULL, "children", NULL);
GST_INFO_ELEMENT (GST_CAT_XML, bin, "saving %d children", bin->numchildren);
children = bin->children;
while (children) {
child = GST_ELEMENT (children->data);
elementnode = xmlNewChild (childlist, NULL, "element", NULL);
gst_object_save_thyself (GST_OBJECT (child), elementnode);
children = g_list_next (children);
}
return childlist;
}
static void
gst_bin_restore_thyself (GstObject *object,
xmlNodePtr self)
{
GstBin *bin = GST_BIN (object);
xmlNodePtr field = self->xmlChildrenNode;
xmlNodePtr childlist;
while (field) {
if (!strcmp (field->name, "children")) {
GST_INFO_ELEMENT (GST_CAT_XML, GST_ELEMENT (object), "loading children");
childlist = field->xmlChildrenNode;
while (childlist) {
if (!strcmp (childlist->name, "element")) {
GstElement *element = gst_element_load_thyself (childlist, GST_OBJECT (bin));
gst_bin_add (bin, element);
}
childlist = childlist->next;
}
}
field = field->next;
}
}
void
gst_bin_use_cothreads (GstBin *bin,
gboolean enabled)
{
g_return_if_fail (GST_IS_BIN (bin));
bin->use_cothreads = enabled;
}
/**
* gst_bin_iterate:
* @bin: #Gstbin to iterate
*
* Iterates over the elements in this bin.
*
* Returns: TRUE if the bin did something usefull. This value
* can be used to determine it the bin is in EOS.
*/
gboolean
gst_bin_iterate (GstBin *bin)
{
GstBinClass *oclass;
gboolean eos = TRUE;
GST_DEBUG_ENTER("(\"%s\")",GST_ELEMENT_NAME (bin));
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus... Original commit message from CVS: Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pushregion() to deal with async reads, etc. That whole thing has gone away, in favor of providing a pull() function for the output (Src) pad instead, ala chain functions. This makes constructing cothreaded schedules out of non-loop elements somewhat easier. Basically there was always a question as to which pad was being dealt with. In the pullregion case, cothread-specific data was used to try to pass the region struct to the right place, which is a slow hack. And in general, the push function severely limited the kind of tricks that could be played when there's more than one output pad, such as a multi-out file reader with async capabilities on each pad independently. This changes the way cothread scheduling occurs. Instead of the hack to deal with Src's by calling their push() function (or optionally the pushregion(), in certain cases), we now are working towards a general mechanism where pads are the only thing that are dealt with directly. An optimization was made in the process of doing this: the loopfunction actually run as the outer [stack] frame of the cothread is now set more intelligently in create_plan() based on what kind of element it is. We now have: loopfunc_wrapper: used for loop-based elements, it simply calls the loopfunc in a loop, paying attention to COTHREAD_STOPPING (see below). It currently does other, soon to be depracated, stuff. pullsrc_wrapper: wraps a Src that's not loop-based (since your options are now loop- or pull-based) There will be a couple more to deal with other cases, such as Connections and chain-based elements. The general idea is that it's a lot more efficient to make the decisions once in create_plan than to keep doing this huge if/else chain in the wrapper. Just choose the right wrapper up front. It'll be most apparent performance-wise in the case of whichever element context is switched to first for each iteration, since the whole wrapper setup is done for every iteration. The tricky part is that there is now a bit of overloading of the function pointers in a pad. The current meanings (possibly to change a bit more soon) are: chainfunc: as always, chainfunc pointer is mirrored between peer pads (this may change, and the chain func may end up in pushfunc) pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer SinkPad: none (may take over chainfunc, see below) pullfunc: SrcPad: Src or Connection's function to construct buffers SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer There are a number of issues remaining with the scheduling, not the least of which is the fact that Connections are still dealt with the old way, with _push() functions and such. I'm trying to figure out a way to unify the system so it makes sense. Following the scheduling system is hard enough, trying to change it is murder. Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING. It's an element flag that's used to signal whatever code is running in cothread context that it should be finishing up and exiting soon. An example of this is in plugins/cobin/spindentity.c. All the loops should now be composed of do/while loops, rather than while(1) loops: do { buf = gst_pad_pull(spindentity->sinkpad); gst_pad_push(spindentity->srcpad,buf); } while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element)); The reason for this is that COTHREAD_STOPPING may be set before the above loop ever gets started. It wouldn't do for the body of the loop to never once get called, that would simply stall the pipeline. Note that only the core library code is ever responsible for setting and unsetting this flag. All elements have to do is respond to it by cleanly exiting the loop and the function holding it. This is needed primarily to allow iterations to occur properly. Basically, there's a single entry point in the cothread scheduling loop, gst_bin_iterate_func() simply switches to this cothread. If the element in this context is allowed to loop infinitely, nothing would even switch back to the context from which the iterate() was originally called. This is a bit of a problem. The solution is for there to be an implicit switch back to the originating context. Now, even I'm not sure exactly how this works, but if the cothread that's switched to actually returns, execution returns back to the calling context, i.e. iterate_func(). COTHREAD_STOPPING is therefore set just before switching into this (currently randomly chosen) context, on the assumption that it will return promptly after finishing its duties. The burden of clearing the flag falls to the various wrapper functions provided by the Bin code, thus element writers don't have to worry about doing that at all (and simply shouldn't). Related changes: All the sources in elements/ have been changed to reflect the new system. FIXMEs: 1) gstpipeline.c calls gst_src_push at some point, dunno why, it's commented out now. 2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
oclass = GST_BIN_CLASS (GTK_OBJECT (bin)->klass);
if (oclass->iterate)
eos = (oclass->iterate) (bin);
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus... Original commit message from CVS: Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pushregion() to deal with async reads, etc. That whole thing has gone away, in favor of providing a pull() function for the output (Src) pad instead, ala chain functions. This makes constructing cothreaded schedules out of non-loop elements somewhat easier. Basically there was always a question as to which pad was being dealt with. In the pullregion case, cothread-specific data was used to try to pass the region struct to the right place, which is a slow hack. And in general, the push function severely limited the kind of tricks that could be played when there's more than one output pad, such as a multi-out file reader with async capabilities on each pad independently. This changes the way cothread scheduling occurs. Instead of the hack to deal with Src's by calling their push() function (or optionally the pushregion(), in certain cases), we now are working towards a general mechanism where pads are the only thing that are dealt with directly. An optimization was made in the process of doing this: the loopfunction actually run as the outer [stack] frame of the cothread is now set more intelligently in create_plan() based on what kind of element it is. We now have: loopfunc_wrapper: used for loop-based elements, it simply calls the loopfunc in a loop, paying attention to COTHREAD_STOPPING (see below). It currently does other, soon to be depracated, stuff. pullsrc_wrapper: wraps a Src that's not loop-based (since your options are now loop- or pull-based) There will be a couple more to deal with other cases, such as Connections and chain-based elements. The general idea is that it's a lot more efficient to make the decisions once in create_plan than to keep doing this huge if/else chain in the wrapper. Just choose the right wrapper up front. It'll be most apparent performance-wise in the case of whichever element context is switched to first for each iteration, since the whole wrapper setup is done for every iteration. The tricky part is that there is now a bit of overloading of the function pointers in a pad. The current meanings (possibly to change a bit more soon) are: chainfunc: as always, chainfunc pointer is mirrored between peer pads (this may change, and the chain func may end up in pushfunc) pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer SinkPad: none (may take over chainfunc, see below) pullfunc: SrcPad: Src or Connection's function to construct buffers SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer There are a number of issues remaining with the scheduling, not the least of which is the fact that Connections are still dealt with the old way, with _push() functions and such. I'm trying to figure out a way to unify the system so it makes sense. Following the scheduling system is hard enough, trying to change it is murder. Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING. It's an element flag that's used to signal whatever code is running in cothread context that it should be finishing up and exiting soon. An example of this is in plugins/cobin/spindentity.c. All the loops should now be composed of do/while loops, rather than while(1) loops: do { buf = gst_pad_pull(spindentity->sinkpad); gst_pad_push(spindentity->srcpad,buf); } while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element)); The reason for this is that COTHREAD_STOPPING may be set before the above loop ever gets started. It wouldn't do for the body of the loop to never once get called, that would simply stall the pipeline. Note that only the core library code is ever responsible for setting and unsetting this flag. All elements have to do is respond to it by cleanly exiting the loop and the function holding it. This is needed primarily to allow iterations to occur properly. Basically, there's a single entry point in the cothread scheduling loop, gst_bin_iterate_func() simply switches to this cothread. If the element in this context is allowed to loop infinitely, nothing would even switch back to the context from which the iterate() was originally called. This is a bit of a problem. The solution is for there to be an implicit switch back to the originating context. Now, even I'm not sure exactly how this works, but if the cothread that's switched to actually returns, execution returns back to the calling context, i.e. iterate_func(). COTHREAD_STOPPING is therefore set just before switching into this (currently randomly chosen) context, on the assumption that it will return promptly after finishing its duties. The burden of clearing the flag falls to the various wrapper functions provided by the Bin code, thus element writers don't have to worry about doing that at all (and simply shouldn't). Related changes: All the sources in elements/ have been changed to reflect the new system. FIXMEs: 1) gstpipeline.c calls gst_src_push at some point, dunno why, it's commented out now. 2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
GST_DEBUG_LEAVE("(\"%s\")",GST_ELEMENT_NAME (bin));
return eos;
}
/**
* gst_bin_create_plan:
* @bin: #GstBin to create the plan for
*
* Let the bin figure out how to handle its children.
*/
void
gst_bin_create_plan (GstBin *bin)
{
GstBinClass *oclass;
oclass = GST_BIN_CLASS (GTK_OBJECT (bin)->klass);
if (oclass->create_plan)
(oclass->create_plan) (bin);
}
/* out internal element fired EOS, we decrement the number of pending EOS childs */
static void
gst_bin_received_eos (GstElement *element, GstBin *bin)
{
GST_INFO_ELEMENT (GST_CAT_PLANNING, bin, "child %s fired eos, pending %d", GST_ELEMENT_NAME (element),
bin->num_eos_providers);
GST_LOCK (bin);
if (bin->num_eos_providers) {
bin->num_eos_providers--;
g_cond_signal (bin->eoscond);
}
GST_UNLOCK (bin);
}
/**
* gst_bin_schedule:
* @bin: #GstBin to schedule
*
* Let the bin figure out how to handle its children.
*/
void
gst_bin_schedule (GstBin *bin)
WARNING: Don't grab this updated unless you're really, REALLY sure. Original commit message from CVS: WARNING: Don't grab this updated unless you're really, REALLY sure. WARNING: Wait for the next one. Whole lotta changes here, including a few random bits: examples/*/Makefile: updated to use `libtool gcc`, not just `gcc` gst/ gstbuffer.h: updated to new flag style gst.c, gstdebug.h: added new debugging for function ptrs gstpipeline.c: set type of parent_class to the class, not the object gstthread.c: ditto plugins/ cdparanoia/cdparanoia.c: added an argument type, updated some defaults cobin/spindentity.c: updated to new do/while loopfunction style mp3encode/lame/gstlame.c: argument types, whole lotta lame options tests/: various changes Now, for the big changes: Once again, the scheduling system has changed. And once again, it broke a whole bunch of things. The gist of the change is that there is now a function pointer for gst_pad_push and gst_pad_pull, instead of a hard-wired function. Well, currently they are functions, but that's for debugging purposes only, they just call the function pointer after spewing lots of DEBUG(). This changed the GstPad structure a bit, and the GstPad API as well. Where elements used to provide chain() and pull() functions, they provide chain() and get() functions. gst_pad_set_pull[region]_function has been changed to get_pad_set_get[region]_function. This means all the elements out there that used to have pull functions need to be updated. The calls to that function have been changed in the normal elements, but the names of the functions passed is still _pull[region](), which is an aesthetic issue more than anything. As for what doesn't work yet, just about anything dealing with Connections is hosed, meaning threaded stuff won't work. This will be fixed about 12 hours from now, after I've slept, etc. The simplefake.c test works in both cothreaded and chained cases, but not much else will work due to the Connection problem. Needless to say, don't grab this unless you *need* these features *now*, else wait to update this stuff until tomorrow. I'm going to sleep now.
2000-12-16 10:18:09 +00:00
{
GstBinClass *oclass;
WARNING: Don't grab this updated unless you're really, REALLY sure. Original commit message from CVS: WARNING: Don't grab this updated unless you're really, REALLY sure. WARNING: Wait for the next one. Whole lotta changes here, including a few random bits: examples/*/Makefile: updated to use `libtool gcc`, not just `gcc` gst/ gstbuffer.h: updated to new flag style gst.c, gstdebug.h: added new debugging for function ptrs gstpipeline.c: set type of parent_class to the class, not the object gstthread.c: ditto plugins/ cdparanoia/cdparanoia.c: added an argument type, updated some defaults cobin/spindentity.c: updated to new do/while loopfunction style mp3encode/lame/gstlame.c: argument types, whole lotta lame options tests/: various changes Now, for the big changes: Once again, the scheduling system has changed. And once again, it broke a whole bunch of things. The gist of the change is that there is now a function pointer for gst_pad_push and gst_pad_pull, instead of a hard-wired function. Well, currently they are functions, but that's for debugging purposes only, they just call the function pointer after spewing lots of DEBUG(). This changed the GstPad structure a bit, and the GstPad API as well. Where elements used to provide chain() and pull() functions, they provide chain() and get() functions. gst_pad_set_pull[region]_function has been changed to get_pad_set_get[region]_function. This means all the elements out there that used to have pull functions need to be updated. The calls to that function have been changed in the normal elements, but the names of the functions passed is still _pull[region](), which is an aesthetic issue more than anything. As for what doesn't work yet, just about anything dealing with Connections is hosed, meaning threaded stuff won't work. This will be fixed about 12 hours from now, after I've slept, etc. The simplefake.c test works in both cothreaded and chained cases, but not much else will work due to the Connection problem. Needless to say, don't grab this unless you *need* these features *now*, else wait to update this stuff until tomorrow. I'm going to sleep now.
2000-12-16 10:18:09 +00:00
oclass = GST_BIN_CLASS (GTK_OBJECT (bin)->klass);
if (oclass->schedule)
(oclass->schedule) (bin);
}
typedef struct {
gulong offset;
gulong size;
} region_struct;
/* seriously depracated!!! */
static void
gst_bin_create_plan_func (GstBin *bin)
{
/*
GstElement *manager;
GList *elements;
GstElement *element;
#ifdef GST_DEBUG_ENABLED
const gchar *elementname;
#endif
GSList *pending = NULL;
GstBin *pending_bin;
*/
GST_DEBUG_ENTER("(\"%s\")",GST_ELEMENT_NAME (bin));
GST_INFO_ELEMENT (GST_CAT_PLANNING, bin, "creating plan");
/*
// first figure out which element is the manager of this and all child elements
// if we're a managing bin ourselves, that'd be us
if (GST_FLAG_IS_SET (bin, GST_BIN_FLAG_MANAGER)) {
manager = GST_ELEMENT (bin);
GST_DEBUG (0,"setting manager to self\n");
// otherwise, it's what our parent says it is
} else {
manager = gst_element_get_manager (GST_ELEMENT (bin));
if (!manager) {
GST_DEBUG (0,"manager not set for element \"%s\" assuming manager is self\n", GST_ELEMENT_NAME (bin));
manager = GST_ELEMENT (bin);
GST_FLAG_SET (bin, GST_BIN_FLAG_MANAGER);
}
GST_DEBUG (0,"setting manager to \"%s\"\n", GST_ELEMENT_NAME (manager));
}
gst_element_set_manager (GST_ELEMENT (bin), manager);
*/
/*
// perform the first recursive pass of plan generation
// we set the manager of every element but those who manage themselves
// the need for cothreads is also determined recursively
GST_DEBUG (0,"performing first-phase recursion\n");
bin->need_cothreads = bin->use_cothreads;
if (bin->need_cothreads)
GST_DEBUG (0,"requiring cothreads because we're forced to\n");
elements = bin->children;
while (elements) {
element = GST_ELEMENT (elements->data);
elements = g_list_next (elements);
#ifdef GST_DEBUG_ENABLED
elementname = GST_ELEMENT_NAME (element);
#endif
GST_DEBUG (0,"have element \"%s\"\n",elementname);
// // first set their manager
// GST_DEBUG (0,"setting manager of \"%s\" to \"%s\"\n",elementname,GST_ELEMENT_NAME (manager));
// gst_element_set_manager (element, manager);
// we do recursion and such for Bins
if (GST_IS_BIN (element)) {
// recurse into the child Bin
GST_DEBUG (0,"recursing into child Bin \"%s\" with manager \"%s\"\n",elementname,
GST_ELEMENT_NAME (element->manager));
gst_bin_create_plan (GST_BIN (element));
GST_DEBUG (0,"after recurse got manager \"%s\"\n",
GST_ELEMENT_NAME (element->manager));
// check to see if it needs cothreads and isn't self-managing
if (((GST_BIN (element))->need_cothreads) && !GST_FLAG_IS_SET(element,GST_BIN_FLAG_MANAGER)) {
GST_DEBUG (0,"requiring cothreads because child bin \"%s\" does\n",elementname);
bin->need_cothreads = TRUE;
}
} else {
// then we need to determine whether they need cothreads
// if it's a loop-based element, use cothreads
if (element->loopfunc != NULL) {
GST_DEBUG (0,"requiring cothreads because \"%s\" is a loop-based element\n",elementname);
GST_FLAG_SET (element, GST_ELEMENT_USE_COTHREAD);
// if it's a 'complex' element, use cothreads
} else if (GST_FLAG_IS_SET (element, GST_ELEMENT_COMPLEX)) {
GST_DEBUG (0,"requiring cothreads because \"%s\" is complex\n",elementname);
GST_FLAG_SET (element, GST_ELEMENT_USE_COTHREAD);
// if the element has more than one sink pad, use cothreads
} else if (element->numsinkpads > 1) {
GST_DEBUG (0,"requiring cothreads because \"%s\" has more than one sink pad\n",elementname);
GST_FLAG_SET (element, GST_ELEMENT_USE_COTHREAD);
}
if (GST_FLAG_IS_SET (element, GST_ELEMENT_USE_COTHREAD))
bin->need_cothreads = TRUE;
}
}
WARNING: Don't grab this updated unless you're really, REALLY sure. Original commit message from CVS: WARNING: Don't grab this updated unless you're really, REALLY sure. WARNING: Wait for the next one. Whole lotta changes here, including a few random bits: examples/*/Makefile: updated to use `libtool gcc`, not just `gcc` gst/ gstbuffer.h: updated to new flag style gst.c, gstdebug.h: added new debugging for function ptrs gstpipeline.c: set type of parent_class to the class, not the object gstthread.c: ditto plugins/ cdparanoia/cdparanoia.c: added an argument type, updated some defaults cobin/spindentity.c: updated to new do/while loopfunction style mp3encode/lame/gstlame.c: argument types, whole lotta lame options tests/: various changes Now, for the big changes: Once again, the scheduling system has changed. And once again, it broke a whole bunch of things. The gist of the change is that there is now a function pointer for gst_pad_push and gst_pad_pull, instead of a hard-wired function. Well, currently they are functions, but that's for debugging purposes only, they just call the function pointer after spewing lots of DEBUG(). This changed the GstPad structure a bit, and the GstPad API as well. Where elements used to provide chain() and pull() functions, they provide chain() and get() functions. gst_pad_set_pull[region]_function has been changed to get_pad_set_get[region]_function. This means all the elements out there that used to have pull functions need to be updated. The calls to that function have been changed in the normal elements, but the names of the functions passed is still _pull[region](), which is an aesthetic issue more than anything. As for what doesn't work yet, just about anything dealing with Connections is hosed, meaning threaded stuff won't work. This will be fixed about 12 hours from now, after I've slept, etc. The simplefake.c test works in both cothreaded and chained cases, but not much else will work due to the Connection problem. Needless to say, don't grab this unless you *need* these features *now*, else wait to update this stuff until tomorrow. I'm going to sleep now.
2000-12-16 10:18:09 +00:00
// if we're not a manager thread, we're done.
if (!GST_FLAG_IS_SET (bin, GST_BIN_FLAG_MANAGER)) {
GST_DEBUG_LEAVE("(\"%s\")",GST_ELEMENT_NAME (bin));
return;
}
*/
/*
// clear previous plan state
g_list_free (bin->managed_elements);
bin->managed_elements = NULL;
bin->num_managed_elements = 0;
// find all the managed children
// here we pull off the trick of walking an entire arbitrary tree without recursion
GST_DEBUG (0,"attempting to find all the elements to manage\n");
pending = g_slist_prepend (pending, bin);
do {
// retrieve the top of the stack and pop it
pending_bin = GST_BIN (pending->data);
pending = g_slist_remove (pending, pending_bin);
// walk the list of elements, find bins, and do stuff
GST_DEBUG (0,"checking Bin \"%s\" for managed elements\n",
GST_ELEMENT_NAME (pending_bin));
elements = pending_bin->children;
while (elements) {
element = GST_ELEMENT (elements->data);
elements = g_list_next (elements);
#ifdef GST_DEBUG_ENABLED
elementname = GST_ELEMENT_NAME (element);
#endif
// if it's ours, add it to the list
if (element->manager == GST_ELEMENT(bin)) {
// if it's a Bin, add it to the list of Bins to check
if (GST_IS_BIN (element)) {
GST_DEBUG (0,"flattened recurse into \"%s\"\n",elementname);
pending = g_slist_prepend (pending, element);
// otherwise add it to the list of elements
} else {
GST_DEBUG (0,"found element \"%s\" that I manage\n",elementname);
bin->managed_elements = g_list_prepend (bin->managed_elements, element);
bin->num_managed_elements++;
}
}
// else it's not ours and we need to wait for EOS notifications
else {
GST_DEBUG (0,"setting up EOS signal from \"%s\" to \"%s\"\n", elementname,
gst_element_get_name (GST_ELEMENT(bin)->manager));
gtk_signal_connect (GTK_OBJECT (element), "eos", gst_bin_received_eos, GST_ELEMENT(bin)->manager);
bin->eos_providers = g_list_prepend (bin->eos_providers, element);
bin->num_eos_providers++;
}
}
} while (pending);
*/
/*
GST_DEBUG (0,"have %d elements to manage, implementing plan\n",bin->num_managed_elements);
gst_bin_schedule(bin);
GST_DEBUG (0, "gstbin \"%s\", eos providers:%d\n",
GST_ELEMENT_NAME (bin),
bin->num_eos_providers);
*/
GST_DEBUG_LEAVE("(\"%s\")",GST_ELEMENT_NAME (bin));
}
static gboolean
gst_bin_iterate_func (GstBin *bin)
{
// only iterate if this is the manager bin
if (GST_ELEMENT_SCHED(bin)->parent == bin) {
return GST_SCHEDULE_ITERATE(GST_ELEMENT_SCHED(bin));
} else {
GST_DEBUG (GST_CAT_SCHEDULING, "this bin can't be iterated on!\n");
}
return FALSE;
}
/*
{
GList *chains;
_GstBinChain *chain;
GList *entries;
GstElement *entry;
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus... Original commit message from CVS: Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pushregion() to deal with async reads, etc. That whole thing has gone away, in favor of providing a pull() function for the output (Src) pad instead, ala chain functions. This makes constructing cothreaded schedules out of non-loop elements somewhat easier. Basically there was always a question as to which pad was being dealt with. In the pullregion case, cothread-specific data was used to try to pass the region struct to the right place, which is a slow hack. And in general, the push function severely limited the kind of tricks that could be played when there's more than one output pad, such as a multi-out file reader with async capabilities on each pad independently. This changes the way cothread scheduling occurs. Instead of the hack to deal with Src's by calling their push() function (or optionally the pushregion(), in certain cases), we now are working towards a general mechanism where pads are the only thing that are dealt with directly. An optimization was made in the process of doing this: the loopfunction actually run as the outer [stack] frame of the cothread is now set more intelligently in create_plan() based on what kind of element it is. We now have: loopfunc_wrapper: used for loop-based elements, it simply calls the loopfunc in a loop, paying attention to COTHREAD_STOPPING (see below). It currently does other, soon to be depracated, stuff. pullsrc_wrapper: wraps a Src that's not loop-based (since your options are now loop- or pull-based) There will be a couple more to deal with other cases, such as Connections and chain-based elements. The general idea is that it's a lot more efficient to make the decisions once in create_plan than to keep doing this huge if/else chain in the wrapper. Just choose the right wrapper up front. It'll be most apparent performance-wise in the case of whichever element context is switched to first for each iteration, since the whole wrapper setup is done for every iteration. The tricky part is that there is now a bit of overloading of the function pointers in a pad. The current meanings (possibly to change a bit more soon) are: chainfunc: as always, chainfunc pointer is mirrored between peer pads (this may change, and the chain func may end up in pushfunc) pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer SinkPad: none (may take over chainfunc, see below) pullfunc: SrcPad: Src or Connection's function to construct buffers SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer There are a number of issues remaining with the scheduling, not the least of which is the fact that Connections are still dealt with the old way, with _push() functions and such. I'm trying to figure out a way to unify the system so it makes sense. Following the scheduling system is hard enough, trying to change it is murder. Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING. It's an element flag that's used to signal whatever code is running in cothread context that it should be finishing up and exiting soon. An example of this is in plugins/cobin/spindentity.c. All the loops should now be composed of do/while loops, rather than while(1) loops: do { buf = gst_pad_pull(spindentity->sinkpad); gst_pad_push(spindentity->srcpad,buf); } while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element)); The reason for this is that COTHREAD_STOPPING may be set before the above loop ever gets started. It wouldn't do for the body of the loop to never once get called, that would simply stall the pipeline. Note that only the core library code is ever responsible for setting and unsetting this flag. All elements have to do is respond to it by cleanly exiting the loop and the function holding it. This is needed primarily to allow iterations to occur properly. Basically, there's a single entry point in the cothread scheduling loop, gst_bin_iterate_func() simply switches to this cothread. If the element in this context is allowed to loop infinitely, nothing would even switch back to the context from which the iterate() was originally called. This is a bit of a problem. The solution is for there to be an implicit switch back to the originating context. Now, even I'm not sure exactly how this works, but if the cothread that's switched to actually returns, execution returns back to the calling context, i.e. iterate_func(). COTHREAD_STOPPING is therefore set just before switching into this (currently randomly chosen) context, on the assumption that it will return promptly after finishing its duties. The burden of clearing the flag falls to the various wrapper functions provided by the Bin code, thus element writers don't have to worry about doing that at all (and simply shouldn't). Related changes: All the sources in elements/ have been changed to reflect the new system. FIXMEs: 1) gstpipeline.c calls gst_src_push at some point, dunno why, it's commented out now. 2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
GList *pads;
GstPad *pad;
GstBuffer *buf = NULL;
gint num_scheduled = 0;
gboolean eos = FALSE;
GST_DEBUG_ENTER("(\"%s\")", GST_ELEMENT_NAME (bin));
g_return_val_if_fail (bin != NULL, TRUE);
g_return_val_if_fail (GST_IS_BIN (bin), TRUE);
g_return_val_if_fail (GST_STATE (bin) == GST_STATE_PLAYING, TRUE);
// step through all the chains
chains = bin->chains;
while (chains) {
chain = (_GstBinChain *)(chains->data);
chains = g_list_next (chains);
if (!chain->need_scheduling) continue;
if (chain->need_cothreads) {
GList *entries;
// all we really have to do is switch to the first child
// FIXME this should be lots more intelligent about where to start
GST_DEBUG (0,"starting iteration via cothreads\n");
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus... Original commit message from CVS: Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pushregion() to deal with async reads, etc. That whole thing has gone away, in favor of providing a pull() function for the output (Src) pad instead, ala chain functions. This makes constructing cothreaded schedules out of non-loop elements somewhat easier. Basically there was always a question as to which pad was being dealt with. In the pullregion case, cothread-specific data was used to try to pass the region struct to the right place, which is a slow hack. And in general, the push function severely limited the kind of tricks that could be played when there's more than one output pad, such as a multi-out file reader with async capabilities on each pad independently. This changes the way cothread scheduling occurs. Instead of the hack to deal with Src's by calling their push() function (or optionally the pushregion(), in certain cases), we now are working towards a general mechanism where pads are the only thing that are dealt with directly. An optimization was made in the process of doing this: the loopfunction actually run as the outer [stack] frame of the cothread is now set more intelligently in create_plan() based on what kind of element it is. We now have: loopfunc_wrapper: used for loop-based elements, it simply calls the loopfunc in a loop, paying attention to COTHREAD_STOPPING (see below). It currently does other, soon to be depracated, stuff. pullsrc_wrapper: wraps a Src that's not loop-based (since your options are now loop- or pull-based) There will be a couple more to deal with other cases, such as Connections and chain-based elements. The general idea is that it's a lot more efficient to make the decisions once in create_plan than to keep doing this huge if/else chain in the wrapper. Just choose the right wrapper up front. It'll be most apparent performance-wise in the case of whichever element context is switched to first for each iteration, since the whole wrapper setup is done for every iteration. The tricky part is that there is now a bit of overloading of the function pointers in a pad. The current meanings (possibly to change a bit more soon) are: chainfunc: as always, chainfunc pointer is mirrored between peer pads (this may change, and the chain func may end up in pushfunc) pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer SinkPad: none (may take over chainfunc, see below) pullfunc: SrcPad: Src or Connection's function to construct buffers SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer There are a number of issues remaining with the scheduling, not the least of which is the fact that Connections are still dealt with the old way, with _push() functions and such. I'm trying to figure out a way to unify the system so it makes sense. Following the scheduling system is hard enough, trying to change it is murder. Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING. It's an element flag that's used to signal whatever code is running in cothread context that it should be finishing up and exiting soon. An example of this is in plugins/cobin/spindentity.c. All the loops should now be composed of do/while loops, rather than while(1) loops: do { buf = gst_pad_pull(spindentity->sinkpad); gst_pad_push(spindentity->srcpad,buf); } while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element)); The reason for this is that COTHREAD_STOPPING may be set before the above loop ever gets started. It wouldn't do for the body of the loop to never once get called, that would simply stall the pipeline. Note that only the core library code is ever responsible for setting and unsetting this flag. All elements have to do is respond to it by cleanly exiting the loop and the function holding it. This is needed primarily to allow iterations to occur properly. Basically, there's a single entry point in the cothread scheduling loop, gst_bin_iterate_func() simply switches to this cothread. If the element in this context is allowed to loop infinitely, nothing would even switch back to the context from which the iterate() was originally called. This is a bit of a problem. The solution is for there to be an implicit switch back to the originating context. Now, even I'm not sure exactly how this works, but if the cothread that's switched to actually returns, execution returns back to the calling context, i.e. iterate_func(). COTHREAD_STOPPING is therefore set just before switching into this (currently randomly chosen) context, on the assumption that it will return promptly after finishing its duties. The burden of clearing the flag falls to the various wrapper functions provided by the Bin code, thus element writers don't have to worry about doing that at all (and simply shouldn't). Related changes: All the sources in elements/ have been changed to reflect the new system. FIXMEs: 1) gstpipeline.c calls gst_src_push at some point, dunno why, it's commented out now. 2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
entries = chain->elements;
entry = NULL;
// find an element with a threadstate to start with
while (entries) {
entry = GST_ELEMENT (entries->data);
if (entry->threadstate)
break;
entries = g_list_next (entries);
}
// if we couldn't find one, bail out
if (entries == NULL)
GST_ERROR(GST_ELEMENT(bin),"no cothreaded elements found!");
GST_FLAG_SET (entry, GST_ELEMENT_COTHREAD_STOPPING);
GST_DEBUG (0,"set COTHREAD_STOPPING flag on \"%s\"(@%p)\n",
GST_ELEMENT_NAME (entry),entry);
cothread_switch (entry->threadstate);
WARNING: Don't grab this updated unless you're really, REALLY sure. Original commit message from CVS: WARNING: Don't grab this updated unless you're really, REALLY sure. WARNING: Wait for the next one. Whole lotta changes here, including a few random bits: examples/*/Makefile: updated to use `libtool gcc`, not just `gcc` gst/ gstbuffer.h: updated to new flag style gst.c, gstdebug.h: added new debugging for function ptrs gstpipeline.c: set type of parent_class to the class, not the object gstthread.c: ditto plugins/ cdparanoia/cdparanoia.c: added an argument type, updated some defaults cobin/spindentity.c: updated to new do/while loopfunction style mp3encode/lame/gstlame.c: argument types, whole lotta lame options tests/: various changes Now, for the big changes: Once again, the scheduling system has changed. And once again, it broke a whole bunch of things. The gist of the change is that there is now a function pointer for gst_pad_push and gst_pad_pull, instead of a hard-wired function. Well, currently they are functions, but that's for debugging purposes only, they just call the function pointer after spewing lots of DEBUG(). This changed the GstPad structure a bit, and the GstPad API as well. Where elements used to provide chain() and pull() functions, they provide chain() and get() functions. gst_pad_set_pull[region]_function has been changed to get_pad_set_get[region]_function. This means all the elements out there that used to have pull functions need to be updated. The calls to that function have been changed in the normal elements, but the names of the functions passed is still _pull[region](), which is an aesthetic issue more than anything. As for what doesn't work yet, just about anything dealing with Connections is hosed, meaning threaded stuff won't work. This will be fixed about 12 hours from now, after I've slept, etc. The simplefake.c test works in both cothreaded and chained cases, but not much else will work due to the Connection problem. Needless to say, don't grab this unless you *need* these features *now*, else wait to update this stuff until tomorrow. I'm going to sleep now.
2000-12-16 10:18:09 +00:00
} else {
GST_DEBUG (0,"starting iteration via chain-functions\n");
entries = chain->entries;
g_assert (entries != NULL);
while (entries) {
entry = GST_ELEMENT (entries->data);
entries = g_list_next (entries);
GST_DEBUG (0,"have entry \"%s\"\n",GST_ELEMENT_NAME (entry));
if (GST_IS_BIN (entry)) {
gst_bin_iterate (GST_BIN (entry));
} else {
pads = entry->pads;
while (pads) {
pad = GST_PAD (pads->data);
if (GST_RPAD_DIRECTION(pad) == GST_PAD_SRC) {
GST_DEBUG (0,"calling getfunc of %s:%s\n",GST_DEBUG_PAD_NAME(pad));
if (GST_REAL_PAD(pad)->getfunc == NULL)
fprintf(stderr, "error, no getfunc in \"%s\"\n", GST_ELEMENT_NAME (entry));
else
buf = (GST_REAL_PAD(pad)->getfunc)(pad);
if (buf) gst_pad_push(pad,buf);
}
pads = g_list_next (pads);
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus... Original commit message from CVS: Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pushregion() to deal with async reads, etc. That whole thing has gone away, in favor of providing a pull() function for the output (Src) pad instead, ala chain functions. This makes constructing cothreaded schedules out of non-loop elements somewhat easier. Basically there was always a question as to which pad was being dealt with. In the pullregion case, cothread-specific data was used to try to pass the region struct to the right place, which is a slow hack. And in general, the push function severely limited the kind of tricks that could be played when there's more than one output pad, such as a multi-out file reader with async capabilities on each pad independently. This changes the way cothread scheduling occurs. Instead of the hack to deal with Src's by calling their push() function (or optionally the pushregion(), in certain cases), we now are working towards a general mechanism where pads are the only thing that are dealt with directly. An optimization was made in the process of doing this: the loopfunction actually run as the outer [stack] frame of the cothread is now set more intelligently in create_plan() based on what kind of element it is. We now have: loopfunc_wrapper: used for loop-based elements, it simply calls the loopfunc in a loop, paying attention to COTHREAD_STOPPING (see below). It currently does other, soon to be depracated, stuff. pullsrc_wrapper: wraps a Src that's not loop-based (since your options are now loop- or pull-based) There will be a couple more to deal with other cases, such as Connections and chain-based elements. The general idea is that it's a lot more efficient to make the decisions once in create_plan than to keep doing this huge if/else chain in the wrapper. Just choose the right wrapper up front. It'll be most apparent performance-wise in the case of whichever element context is switched to first for each iteration, since the whole wrapper setup is done for every iteration. The tricky part is that there is now a bit of overloading of the function pointers in a pad. The current meanings (possibly to change a bit more soon) are: chainfunc: as always, chainfunc pointer is mirrored between peer pads (this may change, and the chain func may end up in pushfunc) pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer SinkPad: none (may take over chainfunc, see below) pullfunc: SrcPad: Src or Connection's function to construct buffers SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer There are a number of issues remaining with the scheduling, not the least of which is the fact that Connections are still dealt with the old way, with _push() functions and such. I'm trying to figure out a way to unify the system so it makes sense. Following the scheduling system is hard enough, trying to change it is murder. Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING. It's an element flag that's used to signal whatever code is running in cothread context that it should be finishing up and exiting soon. An example of this is in plugins/cobin/spindentity.c. All the loops should now be composed of do/while loops, rather than while(1) loops: do { buf = gst_pad_pull(spindentity->sinkpad); gst_pad_push(spindentity->srcpad,buf); } while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element)); The reason for this is that COTHREAD_STOPPING may be set before the above loop ever gets started. It wouldn't do for the body of the loop to never once get called, that would simply stall the pipeline. Note that only the core library code is ever responsible for setting and unsetting this flag. All elements have to do is respond to it by cleanly exiting the loop and the function holding it. This is needed primarily to allow iterations to occur properly. Basically, there's a single entry point in the cothread scheduling loop, gst_bin_iterate_func() simply switches to this cothread. If the element in this context is allowed to loop infinitely, nothing would even switch back to the context from which the iterate() was originally called. This is a bit of a problem. The solution is for there to be an implicit switch back to the originating context. Now, even I'm not sure exactly how this works, but if the cothread that's switched to actually returns, execution returns back to the calling context, i.e. iterate_func(). COTHREAD_STOPPING is therefore set just before switching into this (currently randomly chosen) context, on the assumption that it will return promptly after finishing its duties. The burden of clearing the flag falls to the various wrapper functions provided by the Bin code, thus element writers don't have to worry about doing that at all (and simply shouldn't). Related changes: All the sources in elements/ have been changed to reflect the new system. FIXMEs: 1) gstpipeline.c calls gst_src_push at some point, dunno why, it's commented out now. 2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
}
}
}
}
num_scheduled++;
}
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus... Original commit message from CVS: Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pushregion() to deal with async reads, etc. That whole thing has gone away, in favor of providing a pull() function for the output (Src) pad instead, ala chain functions. This makes constructing cothreaded schedules out of non-loop elements somewhat easier. Basically there was always a question as to which pad was being dealt with. In the pullregion case, cothread-specific data was used to try to pass the region struct to the right place, which is a slow hack. And in general, the push function severely limited the kind of tricks that could be played when there's more than one output pad, such as a multi-out file reader with async capabilities on each pad independently. This changes the way cothread scheduling occurs. Instead of the hack to deal with Src's by calling their push() function (or optionally the pushregion(), in certain cases), we now are working towards a general mechanism where pads are the only thing that are dealt with directly. An optimization was made in the process of doing this: the loopfunction actually run as the outer [stack] frame of the cothread is now set more intelligently in create_plan() based on what kind of element it is. We now have: loopfunc_wrapper: used for loop-based elements, it simply calls the loopfunc in a loop, paying attention to COTHREAD_STOPPING (see below). It currently does other, soon to be depracated, stuff. pullsrc_wrapper: wraps a Src that's not loop-based (since your options are now loop- or pull-based) There will be a couple more to deal with other cases, such as Connections and chain-based elements. The general idea is that it's a lot more efficient to make the decisions once in create_plan than to keep doing this huge if/else chain in the wrapper. Just choose the right wrapper up front. It'll be most apparent performance-wise in the case of whichever element context is switched to first for each iteration, since the whole wrapper setup is done for every iteration. The tricky part is that there is now a bit of overloading of the function pointers in a pad. The current meanings (possibly to change a bit more soon) are: chainfunc: as always, chainfunc pointer is mirrored between peer pads (this may change, and the chain func may end up in pushfunc) pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer SinkPad: none (may take over chainfunc, see below) pullfunc: SrcPad: Src or Connection's function to construct buffers SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer There are a number of issues remaining with the scheduling, not the least of which is the fact that Connections are still dealt with the old way, with _push() functions and such. I'm trying to figure out a way to unify the system so it makes sense. Following the scheduling system is hard enough, trying to change it is murder. Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING. It's an element flag that's used to signal whatever code is running in cothread context that it should be finishing up and exiting soon. An example of this is in plugins/cobin/spindentity.c. All the loops should now be composed of do/while loops, rather than while(1) loops: do { buf = gst_pad_pull(spindentity->sinkpad); gst_pad_push(spindentity->srcpad,buf); } while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element)); The reason for this is that COTHREAD_STOPPING may be set before the above loop ever gets started. It wouldn't do for the body of the loop to never once get called, that would simply stall the pipeline. Note that only the core library code is ever responsible for setting and unsetting this flag. All elements have to do is respond to it by cleanly exiting the loop and the function holding it. This is needed primarily to allow iterations to occur properly. Basically, there's a single entry point in the cothread scheduling loop, gst_bin_iterate_func() simply switches to this cothread. If the element in this context is allowed to loop infinitely, nothing would even switch back to the context from which the iterate() was originally called. This is a bit of a problem. The solution is for there to be an implicit switch back to the originating context. Now, even I'm not sure exactly how this works, but if the cothread that's switched to actually returns, execution returns back to the calling context, i.e. iterate_func(). COTHREAD_STOPPING is therefore set just before switching into this (currently randomly chosen) context, on the assumption that it will return promptly after finishing its duties. The burden of clearing the flag falls to the various wrapper functions provided by the Bin code, thus element writers don't have to worry about doing that at all (and simply shouldn't). Related changes: All the sources in elements/ have been changed to reflect the new system. FIXMEs: 1) gstpipeline.c calls gst_src_push at some point, dunno why, it's commented out now. 2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
// check if nothing was scheduled that was ours..
if (!num_scheduled) {
// are there any other elements that are still busy?
if (bin->num_eos_providers) {
GST_LOCK (bin);
GST_DEBUG (0,"waiting for eos providers\n");
g_cond_wait (bin->eoscond, GST_GET_LOCK(bin));
GST_DEBUG (0,"num eos providers %d\n", bin->num_eos_providers);
GST_UNLOCK (bin);
}
else {
gst_element_signal_eos (GST_ELEMENT (bin));
eos = TRUE;
}
}
GST_DEBUG_LEAVE("(%s)", GST_ELEMENT_NAME (bin));
return !eos;
}
*/