2000-01-30 09:03:00 +00:00
|
|
|
/* Gnome-Streamer
|
|
|
|
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Library General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Library General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Library General Public
|
|
|
|
* License along with this library; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 02111-1307, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
|
|
|
|
#include <gstasyncdisksrc.h>
|
|
|
|
|
|
|
|
|
|
|
|
GstElementDetails gst_asyncdisksrc_details = {
|
|
|
|
"Asynchronous Disk Source",
|
|
|
|
"Source/File",
|
|
|
|
"Read from arbitrary point in a file",
|
|
|
|
VERSION,
|
|
|
|
"Erik Walthinsen <omega@cse.ogi.edu>",
|
|
|
|
"(C) 1999",
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/* AsyncDiskSrc signals and args */
|
|
|
|
enum {
|
|
|
|
/* FILL ME */
|
|
|
|
LAST_SIGNAL
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
ARG_0,
|
|
|
|
ARG_LOCATION,
|
|
|
|
ARG_BYTESPERREAD,
|
|
|
|
ARG_OFFSET,
|
2000-10-25 19:09:53 +00:00
|
|
|
ARG_SIZE,
|
2000-01-30 09:03:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2000-11-06 00:15:51 +00:00
|
|
|
static void gst_asyncdisksrc_class_init (GstAsyncDiskSrcClass *klass);
|
|
|
|
static void gst_asyncdisksrc_init (GstAsyncDiskSrc *asyncdisksrc);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2000-11-06 00:15:51 +00:00
|
|
|
static void gst_asyncdisksrc_set_arg (GtkObject *object, GtkArg *arg, guint id);
|
|
|
|
static void gst_asyncdisksrc_get_arg (GtkObject *object, GtkArg *arg, guint id);
|
|
|
|
|
2000-12-20 09:39:43 +00:00
|
|
|
static GstBuffer * gst_asyncdisksrc_get (GstPad *pad);
|
|
|
|
static GstBuffer * gst_asyncdisksrc_get_region (GstPad *pad, gulong offset, gulong size);
|
2000-11-06 00:15:51 +00:00
|
|
|
|
|
|
|
static GstElementStateReturn gst_asyncdisksrc_change_state (GstElement *element);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
static GstSrcClass *parent_class = NULL;
|
2000-02-27 23:18:38 +00:00
|
|
|
//static guint gst_asyncdisksrc_signals[LAST_SIGNAL] = { 0 };
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
GtkType
|
2000-11-06 00:15:51 +00:00
|
|
|
gst_asyncdisksrc_get_type(void)
|
|
|
|
{
|
2000-01-30 09:03:00 +00:00
|
|
|
static GtkType asyncdisksrc_type = 0;
|
|
|
|
|
|
|
|
if (!asyncdisksrc_type) {
|
|
|
|
static const GtkTypeInfo asyncdisksrc_info = {
|
|
|
|
"GstAsyncDiskSrc",
|
|
|
|
sizeof(GstAsyncDiskSrc),
|
|
|
|
sizeof(GstAsyncDiskSrcClass),
|
|
|
|
(GtkClassInitFunc)gst_asyncdisksrc_class_init,
|
|
|
|
(GtkObjectInitFunc)gst_asyncdisksrc_init,
|
|
|
|
(GtkArgSetFunc)gst_asyncdisksrc_set_arg,
|
|
|
|
(GtkArgGetFunc)gst_asyncdisksrc_get_arg,
|
|
|
|
(GtkClassInitFunc)NULL,
|
|
|
|
};
|
2000-11-06 00:15:51 +00:00
|
|
|
asyncdisksrc_type = gtk_type_unique (GST_TYPE_SRC, &asyncdisksrc_info);
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
return asyncdisksrc_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2000-11-06 00:15:51 +00:00
|
|
|
gst_asyncdisksrc_class_init (GstAsyncDiskSrcClass *klass)
|
|
|
|
{
|
2000-01-30 09:03:00 +00:00
|
|
|
GtkObjectClass *gtkobject_class;
|
|
|
|
GstElementClass *gstelement_class;
|
|
|
|
GstSrcClass *gstsrc_class;
|
|
|
|
|
|
|
|
gtkobject_class = (GtkObjectClass*)klass;
|
|
|
|
gstelement_class = (GstElementClass*)klass;
|
|
|
|
gstsrc_class = (GstSrcClass*)klass;
|
|
|
|
|
2000-11-06 00:15:51 +00:00
|
|
|
parent_class = gtk_type_class (GST_TYPE_SRC);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2000-11-06 00:15:51 +00:00
|
|
|
gtk_object_add_arg_type ("GstAsyncDiskSrc::location", GST_TYPE_FILENAME,
|
|
|
|
GTK_ARG_READWRITE, ARG_LOCATION);
|
|
|
|
gtk_object_add_arg_type ("GstAsyncDiskSrc::bytesperread", GTK_TYPE_INT,
|
|
|
|
GTK_ARG_READWRITE, ARG_BYTESPERREAD);
|
|
|
|
gtk_object_add_arg_type ("GstAsyncDiskSrc::offset", GTK_TYPE_LONG,
|
|
|
|
GTK_ARG_READWRITE, ARG_OFFSET);
|
|
|
|
gtk_object_add_arg_type ("GstAsyncDiskSrc::size", GTK_TYPE_LONG,
|
|
|
|
GTK_ARG_READABLE, ARG_SIZE);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
gtkobject_class->set_arg = gst_asyncdisksrc_set_arg;
|
|
|
|
gtkobject_class->get_arg = gst_asyncdisksrc_get_arg;
|
|
|
|
|
|
|
|
gstelement_class->change_state = gst_asyncdisksrc_change_state;
|
|
|
|
}
|
|
|
|
|
2000-11-06 00:15:51 +00:00
|
|
|
static void
|
|
|
|
gst_asyncdisksrc_init (GstAsyncDiskSrc *asyncdisksrc)
|
|
|
|
{
|
2000-12-15 01:57:34 +00:00
|
|
|
GST_FLAG_SET (asyncdisksrc, GST_SRC_ASYNC);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2000-12-05 18:08:59 +00:00
|
|
|
g_print("init\n");
|
2000-11-06 00:15:51 +00:00
|
|
|
asyncdisksrc->srcpad = gst_pad_new ("src", GST_PAD_SRC);
|
WARNING: Don't grab this updated unless you're really, REALLY sure.
Original commit message from CVS:
WARNING: Don't grab this updated unless you're really, REALLY sure.
WARNING: Wait for the next one.
Whole lotta changes here, including a few random bits:
examples/*/Makefile: updated to use `libtool gcc`, not just `gcc`
gst/
gstbuffer.h: updated to new flag style
gst.c, gstdebug.h: added new debugging for function ptrs
gstpipeline.c: set type of parent_class to the class, not the object
gstthread.c: ditto
plugins/
cdparanoia/cdparanoia.c: added an argument type, updated some defaults
cobin/spindentity.c: updated to new do/while loopfunction style
mp3encode/lame/gstlame.c: argument types, whole lotta lame options
tests/: various changes
Now, for the big changes: Once again, the scheduling system has changed.
And once again, it broke a whole bunch of things. The gist of the change
is that there is now a function pointer for gst_pad_push and gst_pad_pull,
instead of a hard-wired function. Well, currently they are functions, but
that's for debugging purposes only, they just call the function pointer
after spewing lots of DEBUG().
This changed the GstPad structure a bit, and the GstPad API as well.
Where elements used to provide chain() and pull() functions, they provide
chain() and get() functions. gst_pad_set_pull[region]_function has been
changed to get_pad_set_get[region]_function. This means all the elements
out there that used to have pull functions need to be updated. The calls
to that function have been changed in the normal elements, but the names
of the functions passed is still _pull[region](), which is an aesthetic
issue more than anything.
As for what doesn't work yet, just about anything dealing with Connections
is hosed, meaning threaded stuff won't work. This will be fixed about 12
hours from now, after I've slept, etc. The simplefake.c test works in
both cothreaded and chained cases, but not much else will work due to the
Connection problem. Needless to say, don't grab this unless you *need*
these features *now*, else wait to update this stuff until tomorrow.
I'm going to sleep now.
2000-12-16 10:18:09 +00:00
|
|
|
gst_pad_set_get_function (asyncdisksrc->srcpad,gst_asyncdisksrc_get);
|
|
|
|
gst_pad_set_getregion_function (asyncdisksrc->srcpad,gst_asyncdisksrc_get_region);
|
2000-11-06 00:15:51 +00:00
|
|
|
gst_element_add_pad (GST_ELEMENT (asyncdisksrc), asyncdisksrc->srcpad);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
asyncdisksrc->filename = NULL;
|
|
|
|
asyncdisksrc->fd = 0;
|
|
|
|
asyncdisksrc->size = 0;
|
|
|
|
asyncdisksrc->map = NULL;
|
|
|
|
asyncdisksrc->curoffset = 0;
|
|
|
|
asyncdisksrc->bytes_per_read = 4096;
|
|
|
|
asyncdisksrc->seq = 0;
|
2000-11-06 00:15:51 +00:00
|
|
|
asyncdisksrc->new_seek = FALSE;
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-11-06 00:15:51 +00:00
|
|
|
static void
|
|
|
|
gst_asyncdisksrc_set_arg (GtkObject *object, GtkArg *arg, guint id)
|
|
|
|
{
|
2000-01-30 09:03:00 +00:00
|
|
|
GstAsyncDiskSrc *src;
|
|
|
|
|
|
|
|
/* it's not null if we got it, but it might not be ours */
|
2000-11-06 00:15:51 +00:00
|
|
|
g_return_if_fail (GST_IS_ASYNCDISKSRC (object));
|
|
|
|
|
|
|
|
src = GST_ASYNCDISKSRC (object);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
switch(id) {
|
|
|
|
case ARG_LOCATION:
|
|
|
|
/* the element must be stopped in order to do this */
|
2000-11-06 00:15:51 +00:00
|
|
|
g_return_if_fail (GST_STATE (src) < GST_STATE_PLAYING);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2000-11-06 00:15:51 +00:00
|
|
|
if (src->filename) g_free (src->filename);
|
2000-01-30 09:03:00 +00:00
|
|
|
/* clear the filename if we get a NULL (is that possible?) */
|
2000-11-06 00:15:51 +00:00
|
|
|
if (GTK_VALUE_STRING (*arg) == NULL) {
|
|
|
|
gst_element_set_state (GST_ELEMENT (object), GST_STATE_NULL);
|
2000-01-30 09:03:00 +00:00
|
|
|
src->filename = NULL;
|
|
|
|
/* otherwise set the new filename */
|
|
|
|
} else {
|
2000-11-06 00:15:51 +00:00
|
|
|
src->filename = g_strdup (GTK_VALUE_STRING (*arg));
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ARG_BYTESPERREAD:
|
2000-11-06 00:15:51 +00:00
|
|
|
src->bytes_per_read = GTK_VALUE_INT (*arg);
|
2000-01-30 09:03:00 +00:00
|
|
|
break;
|
|
|
|
case ARG_OFFSET:
|
2000-11-06 00:15:51 +00:00
|
|
|
src->curoffset = GTK_VALUE_LONG (*arg);
|
|
|
|
src->new_seek = TRUE;
|
2000-01-30 09:03:00 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-11-06 00:15:51 +00:00
|
|
|
static void
|
|
|
|
gst_asyncdisksrc_get_arg (GtkObject *object, GtkArg *arg, guint id)
|
|
|
|
{
|
2000-01-30 09:03:00 +00:00
|
|
|
GstAsyncDiskSrc *src;
|
|
|
|
|
|
|
|
/* it's not null if we got it, but it might not be ours */
|
2000-11-06 00:15:51 +00:00
|
|
|
g_return_if_fail (GST_IS_ASYNCDISKSRC (object));
|
|
|
|
|
|
|
|
src = GST_ASYNCDISKSRC (object);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
switch (id) {
|
|
|
|
case ARG_LOCATION:
|
2000-11-06 00:15:51 +00:00
|
|
|
GTK_VALUE_STRING (*arg) = src->filename;
|
2000-01-30 09:03:00 +00:00
|
|
|
break;
|
|
|
|
case ARG_BYTESPERREAD:
|
2000-11-06 00:15:51 +00:00
|
|
|
GTK_VALUE_INT (*arg) = src->bytes_per_read;
|
2000-01-30 09:03:00 +00:00
|
|
|
break;
|
|
|
|
case ARG_OFFSET:
|
2000-11-06 00:15:51 +00:00
|
|
|
GTK_VALUE_LONG (*arg) = src->curoffset;
|
2000-01-30 09:03:00 +00:00
|
|
|
break;
|
2000-10-25 19:09:53 +00:00
|
|
|
case ARG_SIZE:
|
2000-11-06 00:15:51 +00:00
|
|
|
GTK_VALUE_LONG (*arg) = src->size;
|
2000-10-25 19:09:53 +00:00
|
|
|
break;
|
2000-01-30 09:03:00 +00:00
|
|
|
default:
|
|
|
|
arg->type = GTK_TYPE_INVALID;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
WARNING: Don't grab this updated unless you're really, REALLY sure.
Original commit message from CVS:
WARNING: Don't grab this updated unless you're really, REALLY sure.
WARNING: Wait for the next one.
Whole lotta changes here, including a few random bits:
examples/*/Makefile: updated to use `libtool gcc`, not just `gcc`
gst/
gstbuffer.h: updated to new flag style
gst.c, gstdebug.h: added new debugging for function ptrs
gstpipeline.c: set type of parent_class to the class, not the object
gstthread.c: ditto
plugins/
cdparanoia/cdparanoia.c: added an argument type, updated some defaults
cobin/spindentity.c: updated to new do/while loopfunction style
mp3encode/lame/gstlame.c: argument types, whole lotta lame options
tests/: various changes
Now, for the big changes: Once again, the scheduling system has changed.
And once again, it broke a whole bunch of things. The gist of the change
is that there is now a function pointer for gst_pad_push and gst_pad_pull,
instead of a hard-wired function. Well, currently they are functions, but
that's for debugging purposes only, they just call the function pointer
after spewing lots of DEBUG().
This changed the GstPad structure a bit, and the GstPad API as well.
Where elements used to provide chain() and pull() functions, they provide
chain() and get() functions. gst_pad_set_pull[region]_function has been
changed to get_pad_set_get[region]_function. This means all the elements
out there that used to have pull functions need to be updated. The calls
to that function have been changed in the normal elements, but the names
of the functions passed is still _pull[region](), which is an aesthetic
issue more than anything.
As for what doesn't work yet, just about anything dealing with Connections
is hosed, meaning threaded stuff won't work. This will be fixed about 12
hours from now, after I've slept, etc. The simplefake.c test works in
both cothreaded and chained cases, but not much else will work due to the
Connection problem. Needless to say, don't grab this unless you *need*
these features *now*, else wait to update this stuff until tomorrow.
I'm going to sleep now.
2000-12-16 10:18:09 +00:00
|
|
|
* gst_asyncdisksrc_get:
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
* @pad: #GstPad to push a buffer from
|
2000-01-30 09:03:00 +00:00
|
|
|
*
|
|
|
|
* Push a new buffer from the asyncdisksrc at the current offset.
|
|
|
|
*/
|
2000-12-20 09:39:43 +00:00
|
|
|
static GstBuffer *
|
WARNING: Don't grab this updated unless you're really, REALLY sure.
Original commit message from CVS:
WARNING: Don't grab this updated unless you're really, REALLY sure.
WARNING: Wait for the next one.
Whole lotta changes here, including a few random bits:
examples/*/Makefile: updated to use `libtool gcc`, not just `gcc`
gst/
gstbuffer.h: updated to new flag style
gst.c, gstdebug.h: added new debugging for function ptrs
gstpipeline.c: set type of parent_class to the class, not the object
gstthread.c: ditto
plugins/
cdparanoia/cdparanoia.c: added an argument type, updated some defaults
cobin/spindentity.c: updated to new do/while loopfunction style
mp3encode/lame/gstlame.c: argument types, whole lotta lame options
tests/: various changes
Now, for the big changes: Once again, the scheduling system has changed.
And once again, it broke a whole bunch of things. The gist of the change
is that there is now a function pointer for gst_pad_push and gst_pad_pull,
instead of a hard-wired function. Well, currently they are functions, but
that's for debugging purposes only, they just call the function pointer
after spewing lots of DEBUG().
This changed the GstPad structure a bit, and the GstPad API as well.
Where elements used to provide chain() and pull() functions, they provide
chain() and get() functions. gst_pad_set_pull[region]_function has been
changed to get_pad_set_get[region]_function. This means all the elements
out there that used to have pull functions need to be updated. The calls
to that function have been changed in the normal elements, but the names
of the functions passed is still _pull[region](), which is an aesthetic
issue more than anything.
As for what doesn't work yet, just about anything dealing with Connections
is hosed, meaning threaded stuff won't work. This will be fixed about 12
hours from now, after I've slept, etc. The simplefake.c test works in
both cothreaded and chained cases, but not much else will work due to the
Connection problem. Needless to say, don't grab this unless you *need*
these features *now*, else wait to update this stuff until tomorrow.
I'm going to sleep now.
2000-12-16 10:18:09 +00:00
|
|
|
gst_asyncdisksrc_get (GstPad *pad)
|
2000-11-06 00:15:51 +00:00
|
|
|
{
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
GstAsyncDiskSrc *src;
|
2000-01-30 09:03:00 +00:00
|
|
|
GstBuffer *buf;
|
|
|
|
|
2000-12-20 09:39:43 +00:00
|
|
|
g_return_val_if_fail (pad != NULL, NULL);
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
src = GST_ASYNCDISKSRC (gst_pad_get_parent(pad));
|
2000-12-20 09:39:43 +00:00
|
|
|
g_return_val_if_fail (GST_FLAG_IS_SET (src, GST_ASYNCDISKSRC_OPEN), NULL);
|
|
|
|
|
2000-01-30 09:03:00 +00:00
|
|
|
/* deal with EOF state */
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
if (src->curoffset >= src->size) {
|
|
|
|
gst_src_signal_eos (GST_SRC (src));
|
2000-12-20 09:39:43 +00:00
|
|
|
return NULL;
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* create the buffer */
|
|
|
|
// FIXME: should eventually use a bufferpool for this
|
2000-11-06 00:15:51 +00:00
|
|
|
buf = gst_buffer_new ();
|
2000-12-20 09:39:43 +00:00
|
|
|
|
|
|
|
g_return_val_if_fail (buf != NULL, NULL);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
/* simply set the buffer to point to the correct region of the file */
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
GST_BUFFER_DATA (buf) = src->map + src->curoffset;
|
|
|
|
GST_BUFFER_OFFSET (buf) = src->curoffset;
|
2000-11-06 00:15:51 +00:00
|
|
|
GST_BUFFER_FLAG_SET (buf, GST_BUFFER_DONTFREE);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2000-12-20 09:39:43 +00:00
|
|
|
if ((src->curoffset + src->bytes_per_read) > src->size) {
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
GST_BUFFER_SIZE (buf) = src->size - src->curoffset;
|
2000-01-30 09:03:00 +00:00
|
|
|
// FIXME: set the buffer's EOF bit here
|
|
|
|
} else
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
GST_BUFFER_SIZE (buf) = src->bytes_per_read;
|
2000-01-30 09:03:00 +00:00
|
|
|
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
src->curoffset += GST_BUFFER_SIZE (buf);
|
2000-11-06 00:15:51 +00:00
|
|
|
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
if (src->new_seek) {
|
2000-11-06 00:15:51 +00:00
|
|
|
GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLUSH);
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
src->new_seek = FALSE;
|
2000-11-06 00:15:51 +00:00
|
|
|
}
|
2000-04-09 21:36:56 +00:00
|
|
|
|
2000-12-20 09:39:43 +00:00
|
|
|
/* we're done, return the buffer */
|
|
|
|
return buf;
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
WARNING: Don't grab this updated unless you're really, REALLY sure.
Original commit message from CVS:
WARNING: Don't grab this updated unless you're really, REALLY sure.
WARNING: Wait for the next one.
Whole lotta changes here, including a few random bits:
examples/*/Makefile: updated to use `libtool gcc`, not just `gcc`
gst/
gstbuffer.h: updated to new flag style
gst.c, gstdebug.h: added new debugging for function ptrs
gstpipeline.c: set type of parent_class to the class, not the object
gstthread.c: ditto
plugins/
cdparanoia/cdparanoia.c: added an argument type, updated some defaults
cobin/spindentity.c: updated to new do/while loopfunction style
mp3encode/lame/gstlame.c: argument types, whole lotta lame options
tests/: various changes
Now, for the big changes: Once again, the scheduling system has changed.
And once again, it broke a whole bunch of things. The gist of the change
is that there is now a function pointer for gst_pad_push and gst_pad_pull,
instead of a hard-wired function. Well, currently they are functions, but
that's for debugging purposes only, they just call the function pointer
after spewing lots of DEBUG().
This changed the GstPad structure a bit, and the GstPad API as well.
Where elements used to provide chain() and pull() functions, they provide
chain() and get() functions. gst_pad_set_pull[region]_function has been
changed to get_pad_set_get[region]_function. This means all the elements
out there that used to have pull functions need to be updated. The calls
to that function have been changed in the normal elements, but the names
of the functions passed is still _pull[region](), which is an aesthetic
issue more than anything.
As for what doesn't work yet, just about anything dealing with Connections
is hosed, meaning threaded stuff won't work. This will be fixed about 12
hours from now, after I've slept, etc. The simplefake.c test works in
both cothreaded and chained cases, but not much else will work due to the
Connection problem. Needless to say, don't grab this unless you *need*
these features *now*, else wait to update this stuff until tomorrow.
I'm going to sleep now.
2000-12-16 10:18:09 +00:00
|
|
|
* gst_asyncdisksrc_get_region:
|
2000-01-30 09:03:00 +00:00
|
|
|
* @src: #GstSrc to push a buffer from
|
|
|
|
* @offset: offset in file
|
|
|
|
* @size: number of bytes
|
|
|
|
*
|
|
|
|
* Push a new buffer from the asyncdisksrc of given size at given offset.
|
|
|
|
*/
|
2000-12-20 09:39:43 +00:00
|
|
|
static GstBuffer *
|
WARNING: Don't grab this updated unless you're really, REALLY sure.
Original commit message from CVS:
WARNING: Don't grab this updated unless you're really, REALLY sure.
WARNING: Wait for the next one.
Whole lotta changes here, including a few random bits:
examples/*/Makefile: updated to use `libtool gcc`, not just `gcc`
gst/
gstbuffer.h: updated to new flag style
gst.c, gstdebug.h: added new debugging for function ptrs
gstpipeline.c: set type of parent_class to the class, not the object
gstthread.c: ditto
plugins/
cdparanoia/cdparanoia.c: added an argument type, updated some defaults
cobin/spindentity.c: updated to new do/while loopfunction style
mp3encode/lame/gstlame.c: argument types, whole lotta lame options
tests/: various changes
Now, for the big changes: Once again, the scheduling system has changed.
And once again, it broke a whole bunch of things. The gist of the change
is that there is now a function pointer for gst_pad_push and gst_pad_pull,
instead of a hard-wired function. Well, currently they are functions, but
that's for debugging purposes only, they just call the function pointer
after spewing lots of DEBUG().
This changed the GstPad structure a bit, and the GstPad API as well.
Where elements used to provide chain() and pull() functions, they provide
chain() and get() functions. gst_pad_set_pull[region]_function has been
changed to get_pad_set_get[region]_function. This means all the elements
out there that used to have pull functions need to be updated. The calls
to that function have been changed in the normal elements, but the names
of the functions passed is still _pull[region](), which is an aesthetic
issue more than anything.
As for what doesn't work yet, just about anything dealing with Connections
is hosed, meaning threaded stuff won't work. This will be fixed about 12
hours from now, after I've slept, etc. The simplefake.c test works in
both cothreaded and chained cases, but not much else will work due to the
Connection problem. Needless to say, don't grab this unless you *need*
these features *now*, else wait to update this stuff until tomorrow.
I'm going to sleep now.
2000-12-16 10:18:09 +00:00
|
|
|
gst_asyncdisksrc_get_region (GstPad *pad, gulong offset, gulong size)
|
2000-11-06 00:15:51 +00:00
|
|
|
{
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
GstAsyncDiskSrc *src;
|
2000-01-30 09:03:00 +00:00
|
|
|
GstBuffer *buf;
|
|
|
|
|
2000-12-20 09:39:43 +00:00
|
|
|
g_return_val_if_fail (pad != NULL, NULL);
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
|
|
|
|
src = GST_ASYNCDISKSRC (gst_pad_get_parent(pad));
|
|
|
|
|
2000-12-20 09:39:43 +00:00
|
|
|
g_return_val_if_fail (GST_IS_ASYNCDISKSRC (src), NULL);
|
|
|
|
g_return_val_if_fail (GST_FLAG_IS_SET (src, GST_ASYNCDISKSRC_OPEN), NULL);
|
2000-11-06 00:15:51 +00:00
|
|
|
|
2000-01-30 09:03:00 +00:00
|
|
|
/* deal with EOF state */
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
if (offset >= src->size) {
|
|
|
|
gst_src_signal_eos (GST_SRC (src));
|
2000-12-20 09:39:43 +00:00
|
|
|
return NULL;
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* create the buffer */
|
|
|
|
// FIXME: should eventually use a bufferpool for this
|
2000-11-06 00:15:51 +00:00
|
|
|
buf = gst_buffer_new ();
|
2000-12-20 09:39:43 +00:00
|
|
|
g_return_val_if_fail (buf != NULL, NULL);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
/* simply set the buffer to point to the correct region of the file */
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
GST_BUFFER_DATA (buf) = src->map + offset;
|
2000-11-06 00:15:51 +00:00
|
|
|
GST_BUFFER_OFFSET (buf) = offset;
|
|
|
|
GST_BUFFER_FLAG_SET (buf, GST_BUFFER_DONTFREE);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
if ((offset + size) > src->size) {
|
|
|
|
GST_BUFFER_SIZE (buf) = src->size - offset;
|
2000-01-30 09:03:00 +00:00
|
|
|
// FIXME: set the buffer's EOF bit here
|
|
|
|
} else
|
2000-11-06 00:15:51 +00:00
|
|
|
GST_BUFFER_SIZE (buf) = size;
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
/* we're done, push the buffer off now */
|
Changed the way things are scheduled, especially sources. A Src used to have a push() function, and optionally a pus...
Original commit message from CVS:
Changed the way things are scheduled, especially sources. A Src used to
have a push() function, and optionally a pushregion() to deal with async
reads, etc. That whole thing has gone away, in favor of providing a
pull() function for the output (Src) pad instead, ala chain functions.
This makes constructing cothreaded schedules out of non-loop elements
somewhat easier. Basically there was always a question as to which pad
was being dealt with. In the pullregion case, cothread-specific data was
used to try to pass the region struct to the right place, which is a slow
hack. And in general, the push function severely limited the kind of
tricks that could be played when there's more than one output pad, such as
a multi-out file reader with async capabilities on each pad independently.
This changes the way cothread scheduling occurs. Instead of the hack to
deal with Src's by calling their push() function (or optionally the
pushregion(), in certain cases), we now are working towards a general
mechanism where pads are the only thing that are dealt with directly.
An optimization was made in the process of doing this: the loopfunction
actually run as the outer [stack] frame of the cothread is now set more
intelligently in create_plan() based on what kind of element it is. We
now have:
loopfunc_wrapper: used for loop-based elements, it simply calls the
loopfunc in a loop, paying attention to COTHREAD_STOPPING (see
below). It currently does other, soon to be depracated, stuff.
pullsrc_wrapper: wraps a Src that's not loop-based (since your options
are now loop- or pull-based)
There will be a couple more to deal with other cases, such as Connections
and chain-based elements. The general idea is that it's a lot more
efficient to make the decisions once in create_plan than to keep doing
this huge if/else chain in the wrapper. Just choose the right wrapper up
front. It'll be most apparent performance-wise in the case of whichever
element context is switched to first for each iteration, since the whole
wrapper setup is done for every iteration.
The tricky part is that there is now a bit of overloading of the function
pointers in a pad. The current meanings (possibly to change a bit more
soon) are:
chainfunc: as always, chainfunc pointer is mirrored between peer pads
(this may change, and the chain func may end up in pushfunc)
pushfunc: SrcPad: gst_pad_pushfunc_proxy, cothread_switch to peer
SinkPad: none (may take over chainfunc, see below) pullfunc:
SrcPad: Src or Connection's function to construct buffers
SinkPad: gst_pad_pullfunc_proxy, cothread_switch to peer
There are a number of issues remaining with the scheduling, not the least
of which is the fact that Connections are still dealt with the old way,
with _push() functions and such. I'm trying to figure out a way to unify
the system so it makes sense. Following the scheduling system is hard
enough, trying to change it is murder.
Another useful scheduling addition, mentioned above, is COTHREAD_STOPPING.
It's an element flag that's used to signal whatever code is running in
cothread context that it should be finishing up and exiting soon. An
example of this is in plugins/cobin/spindentity.c. All the loops should
now be composed of do/while loops, rather than while(1) loops:
do {
buf = gst_pad_pull(spindentity->sinkpad);
gst_pad_push(spindentity->srcpad,buf);
} while (!GST_ELEMENT_IS_COTHREAD_STOPPING(element));
The reason for this is that COTHREAD_STOPPING may be set before the above
loop ever gets started. It wouldn't do for the body of the loop to never
once get called, that would simply stall the pipeline. Note that only the
core library code is ever responsible for setting and unsetting this flag.
All elements have to do is respond to it by cleanly exiting the loop and
the function holding it.
This is needed primarily to allow iterations to occur properly.
Basically, there's a single entry point in the cothread scheduling loop,
gst_bin_iterate_func() simply switches to this cothread. If the element
in this context is allowed to loop infinitely, nothing would even switch
back to the context from which the iterate() was originally called. This
is a bit of a problem. The solution is for there to be an implicit switch
back to the originating context. Now, even I'm not sure exactly how this
works, but if the cothread that's switched to actually returns, execution
returns back to the calling context, i.e. iterate_func().
COTHREAD_STOPPING is therefore set just before switching into this
(currently randomly chosen) context, on the assumption that it will return
promptly after finishing its duties. The burden of clearing the flag
falls to the various wrapper functions provided by the Bin code, thus
element writers don't have to worry about doing that at all (and simply
shouldn't).
Related changes:
All the sources in elements/ have been changed to reflect the new system.
FIXMEs:
1) gstpipeline.c calls gst_src_push at some point, dunno why, it's
commented out now.
2) any other sources, including vcdsrc, dvdsrc, and v4lsrc will break
badly and need to be modified to work as pull-based sources.
2000-12-04 10:52:30 +00:00
|
|
|
gst_pad_push (pad,buf);
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-07-21 03:57:51 +00:00
|
|
|
/* open the file and mmap it, necessary to go to READY state */
|
2000-11-06 00:15:51 +00:00
|
|
|
static
|
|
|
|
gboolean gst_asyncdisksrc_open_file (GstAsyncDiskSrc *src)
|
|
|
|
{
|
|
|
|
g_return_val_if_fail (!GST_FLAG_IS_SET (src ,GST_ASYNCDISKSRC_OPEN), FALSE);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
/* open the file */
|
2000-11-06 00:15:51 +00:00
|
|
|
src->fd = open (src->filename, O_RDONLY);
|
2000-01-30 09:03:00 +00:00
|
|
|
if (src->fd < 0) {
|
2000-11-06 00:15:51 +00:00
|
|
|
gst_element_error (GST_ELEMENT (src), "opening file");
|
2000-01-30 09:03:00 +00:00
|
|
|
return FALSE;
|
|
|
|
} else {
|
|
|
|
/* find the file length */
|
2000-11-06 00:15:51 +00:00
|
|
|
src->size = lseek (src->fd, 0, SEEK_END);
|
|
|
|
lseek (src->fd, 0, SEEK_SET);
|
2000-01-30 09:03:00 +00:00
|
|
|
/* map the file into memory */
|
2000-11-06 00:15:51 +00:00
|
|
|
src->map = mmap (NULL, src->size, PROT_READ, MAP_SHARED, src->fd, 0);
|
|
|
|
madvise (src->map,src->size, 2);
|
2000-01-30 09:03:00 +00:00
|
|
|
/* collapse state if that failed */
|
|
|
|
if (src->map == NULL) {
|
2000-11-06 00:15:51 +00:00
|
|
|
close (src->fd);
|
|
|
|
gst_element_error (GST_ELEMENT (src),"mmapping file");
|
2000-01-30 09:03:00 +00:00
|
|
|
return FALSE;
|
|
|
|
}
|
2000-11-06 00:15:51 +00:00
|
|
|
GST_FLAG_SET (src, GST_ASYNCDISKSRC_OPEN);
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* unmap and close the file */
|
2000-11-06 00:15:51 +00:00
|
|
|
static void
|
|
|
|
gst_asyncdisksrc_close_file (GstAsyncDiskSrc *src)
|
|
|
|
{
|
|
|
|
g_return_if_fail (GST_FLAG_IS_SET (src, GST_ASYNCDISKSRC_OPEN));
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
/* unmap the file from memory */
|
2000-11-06 00:15:51 +00:00
|
|
|
munmap (src->map, src->size);
|
2000-01-30 09:03:00 +00:00
|
|
|
/* close the file */
|
2000-11-06 00:15:51 +00:00
|
|
|
close (src->fd);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
/* zero out a lot of our state */
|
|
|
|
src->fd = 0;
|
|
|
|
src->size = 0;
|
|
|
|
src->map = NULL;
|
|
|
|
src->curoffset = 0;
|
|
|
|
src->seq = 0;
|
|
|
|
|
2000-11-06 00:15:51 +00:00
|
|
|
GST_FLAG_UNSET (src, GST_ASYNCDISKSRC_OPEN);
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-11-06 00:15:51 +00:00
|
|
|
static
|
|
|
|
GstElementStateReturn gst_asyncdisksrc_change_state (GstElement *element)
|
|
|
|
{
|
|
|
|
g_return_val_if_fail (GST_IS_ASYNCDISKSRC (element), GST_STATE_FAILURE);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2000-11-06 00:15:51 +00:00
|
|
|
if (GST_STATE_PENDING (element) == GST_STATE_NULL) {
|
|
|
|
if (GST_FLAG_IS_SET (element, GST_ASYNCDISKSRC_OPEN))
|
|
|
|
gst_asyncdisksrc_close_file (GST_ASYNCDISKSRC (element));
|
2000-07-21 03:57:51 +00:00
|
|
|
} else {
|
2000-11-06 00:15:51 +00:00
|
|
|
if (!GST_FLAG_IS_SET (element, GST_ASYNCDISKSRC_OPEN)) {
|
|
|
|
if (!gst_asyncdisksrc_open_file (GST_ASYNCDISKSRC (element)))
|
2000-07-21 03:57:51 +00:00
|
|
|
return GST_STATE_FAILURE;
|
|
|
|
}
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
|
2000-11-06 00:15:51 +00:00
|
|
|
if (GST_ELEMENT_CLASS (parent_class)->change_state)
|
|
|
|
return GST_ELEMENT_CLASS (parent_class)->change_state (element);
|
2000-07-21 03:57:51 +00:00
|
|
|
|
|
|
|
return GST_STATE_SUCCESS;
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|