2000-12-29 05:38:06 +00:00
|
|
|
/* GStreamer
|
|
|
|
* Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
|
|
|
|
* 2000 Wim Taymans <wtay@chello.be>
|
2003-04-17 09:19:34 +00:00
|
|
|
* 2003 Colin Walters <cwalters@gnome.org>
|
2000-12-29 05:38:06 +00:00
|
|
|
*
|
2001-01-20 17:59:25 +00:00
|
|
|
* gstqueue.c:
|
2000-01-30 09:03:00 +00:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Library General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Library General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Library General Public
|
|
|
|
* License along with this library; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 02111-1307, USA.
|
|
|
|
*/
|
|
|
|
|
2002-07-08 19:22:02 +00:00
|
|
|
|
2001-01-19 00:02:53 +00:00
|
|
|
#include "gst_private.h"
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2001-01-19 00:02:53 +00:00
|
|
|
#include "gstqueue.h"
|
2001-05-25 21:00:07 +00:00
|
|
|
#include "gstscheduler.h"
|
2001-12-18 19:03:07 +00:00
|
|
|
#include "gstevent.h"
|
2003-06-29 14:05:49 +00:00
|
|
|
#include "gstinfo.h"
|
2004-01-18 21:36:20 +00:00
|
|
|
#include "gsterror.h"
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2003-10-31 19:32:47 +00:00
|
|
|
static GstElementDetails gst_queue_details = GST_ELEMENT_DETAILS (
|
2000-01-30 09:03:00 +00:00
|
|
|
"Queue",
|
2002-04-20 21:42:53 +00:00
|
|
|
"Generic",
|
2000-01-30 09:03:00 +00:00
|
|
|
"Simple data queue",
|
2003-10-31 19:32:47 +00:00
|
|
|
"Erik Walthinsen <omega@cse.ogi.edu>"
|
|
|
|
);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* Queue signals and args */
|
|
|
|
enum {
|
2003-12-13 17:12:46 +00:00
|
|
|
SIGNAL_UNDERRUN,
|
|
|
|
SIGNAL_RUNNING,
|
|
|
|
SIGNAL_OVERRUN,
|
2000-01-30 09:03:00 +00:00
|
|
|
LAST_SIGNAL
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
ARG_0,
|
2003-12-13 17:12:46 +00:00
|
|
|
/* FIXME: don't we have another way of doing this
|
|
|
|
* "Gstreamer format" (frame/byte/time) queries? */
|
|
|
|
ARG_CUR_LEVEL_BUFFERS,
|
|
|
|
ARG_CUR_LEVEL_BYTES,
|
|
|
|
ARG_CUR_LEVEL_TIME,
|
|
|
|
ARG_MAX_SIZE_BUFFERS,
|
|
|
|
ARG_MAX_SIZE_BYTES,
|
|
|
|
ARG_MAX_SIZE_TIME,
|
2004-01-15 20:37:07 +00:00
|
|
|
ARG_MIN_THRESHOLD_BUFFERS,
|
|
|
|
ARG_MIN_THRESHOLD_BYTES,
|
|
|
|
ARG_MIN_THRESHOLD_TIME,
|
2001-05-25 21:00:07 +00:00
|
|
|
ARG_LEAKY,
|
2001-12-22 21:18:17 +00:00
|
|
|
ARG_MAY_DEADLOCK,
|
2003-12-13 17:12:46 +00:00
|
|
|
ARG_BLOCK_TIMEOUT
|
|
|
|
/* FILL ME */
|
2000-01-30 09:03:00 +00:00
|
|
|
};
|
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
typedef struct _GstQueueEventResponse {
|
|
|
|
GstEvent *event;
|
|
|
|
gboolean ret, handled;
|
|
|
|
} GstQueueEventResponse;
|
|
|
|
|
|
|
|
static void gst_queue_base_init (GstQueueClass *klass);
|
|
|
|
static void gst_queue_class_init (GstQueueClass *klass);
|
|
|
|
static void gst_queue_init (GstQueue *queue);
|
|
|
|
static void gst_queue_dispose (GObject *object);
|
|
|
|
|
|
|
|
static void gst_queue_set_property (GObject *object,
|
|
|
|
guint prop_id,
|
|
|
|
const GValue *value,
|
|
|
|
GParamSpec *pspec);
|
|
|
|
static void gst_queue_get_property (GObject *object,
|
|
|
|
guint prop_id,
|
|
|
|
GValue *value,
|
|
|
|
GParamSpec *pspec);
|
|
|
|
|
|
|
|
static void gst_queue_chain (GstPad *pad,
|
|
|
|
GstData *data);
|
|
|
|
static GstData *gst_queue_get (GstPad *pad);
|
2001-04-16 21:45:02 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
static gboolean gst_queue_handle_src_event (GstPad *pad,
|
|
|
|
GstEvent *event);
|
2002-05-26 21:54:27 +00:00
|
|
|
|
2004-01-11 22:11:35 +00:00
|
|
|
static GstCaps *gst_queue_getcaps (GstPad *pad);
|
|
|
|
static GstPadLinkReturn
|
|
|
|
gst_queue_link (GstPad *pad,
|
|
|
|
const GstCaps *caps);
|
2003-12-13 17:12:46 +00:00
|
|
|
static void gst_queue_locked_flush (GstQueue *queue);
|
2000-11-25 14:18:47 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
static GstElementStateReturn
|
|
|
|
gst_queue_change_state (GstElement *element);
|
|
|
|
static gboolean gst_queue_release_locks (GstElement *element);
|
2000-10-30 21:02:08 +00:00
|
|
|
|
2001-05-25 21:00:07 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
#define GST_TYPE_QUEUE_LEAKY (queue_leaky_get_type ())
|
|
|
|
|
2001-06-25 01:20:11 +00:00
|
|
|
static GType
|
2003-12-13 17:12:46 +00:00
|
|
|
queue_leaky_get_type (void)
|
|
|
|
{
|
2001-06-25 01:20:11 +00:00
|
|
|
static GType queue_leaky_type = 0;
|
|
|
|
static GEnumValue queue_leaky[] = {
|
2001-08-21 20:16:48 +00:00
|
|
|
{ GST_QUEUE_NO_LEAK, "0", "Not Leaky" },
|
|
|
|
{ GST_QUEUE_LEAK_UPSTREAM, "1", "Leaky on Upstream" },
|
|
|
|
{ GST_QUEUE_LEAK_DOWNSTREAM, "2", "Leaky on Downstream" },
|
2001-05-25 21:00:07 +00:00
|
|
|
{ 0, NULL, NULL },
|
|
|
|
};
|
|
|
|
if (!queue_leaky_type) {
|
2001-06-25 01:20:11 +00:00
|
|
|
queue_leaky_type = g_enum_register_static("GstQueueLeaky", queue_leaky);
|
2001-05-25 21:00:07 +00:00
|
|
|
}
|
|
|
|
return queue_leaky_type;
|
|
|
|
}
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2000-12-29 02:28:04 +00:00
|
|
|
static GstElementClass *parent_class = NULL;
|
2003-04-17 09:19:34 +00:00
|
|
|
static guint gst_queue_signals[LAST_SIGNAL] = { 0 };
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2001-06-25 01:20:11 +00:00
|
|
|
GType
|
2003-12-13 17:12:46 +00:00
|
|
|
gst_queue_get_type (void)
|
2001-08-21 20:16:48 +00:00
|
|
|
{
|
2001-06-25 01:20:11 +00:00
|
|
|
static GType queue_type = 0;
|
2000-01-30 09:03:00 +00:00
|
|
|
|
|
|
|
if (!queue_type) {
|
2001-06-25 01:20:11 +00:00
|
|
|
static const GTypeInfo queue_info = {
|
2003-12-13 17:12:46 +00:00
|
|
|
sizeof (GstQueueClass),
|
|
|
|
(GBaseInitFunc) gst_queue_base_init,
|
2001-06-25 01:20:11 +00:00
|
|
|
NULL,
|
2003-12-13 17:12:46 +00:00
|
|
|
(GClassInitFunc) gst_queue_class_init,
|
2001-06-25 01:20:11 +00:00
|
|
|
NULL,
|
|
|
|
NULL,
|
2003-12-13 17:12:46 +00:00
|
|
|
sizeof (GstQueue),
|
2001-06-25 01:20:11 +00:00
|
|
|
4,
|
2003-12-13 17:12:46 +00:00
|
|
|
(GInstanceInitFunc) gst_queue_init,
|
2001-09-14 22:16:47 +00:00
|
|
|
NULL
|
2000-01-30 09:03:00 +00:00
|
|
|
};
|
2003-12-13 17:12:46 +00:00
|
|
|
|
|
|
|
queue_type = g_type_register_static (GST_TYPE_ELEMENT,
|
|
|
|
"GstQueue", &queue_info, 0);
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
2003-12-13 17:12:46 +00:00
|
|
|
|
2000-01-30 09:03:00 +00:00
|
|
|
return queue_type;
|
|
|
|
}
|
|
|
|
|
2001-01-20 17:59:25 +00:00
|
|
|
static void
|
2003-12-13 17:12:46 +00:00
|
|
|
gst_queue_base_init (GstQueueClass *klass)
|
2000-11-25 14:18:47 +00:00
|
|
|
{
|
2003-12-13 17:12:46 +00:00
|
|
|
GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
|
2003-10-31 19:32:47 +00:00
|
|
|
|
|
|
|
gst_element_class_set_details (gstelement_class, &gst_queue_details);
|
|
|
|
}
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2003-10-31 19:32:47 +00:00
|
|
|
static void
|
2003-12-13 17:12:46 +00:00
|
|
|
gst_queue_class_init (GstQueueClass *klass)
|
2003-10-31 19:32:47 +00:00
|
|
|
{
|
2003-12-13 17:12:46 +00:00
|
|
|
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
|
|
|
|
GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
parent_class = g_type_class_peek_parent (klass);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
/* signals */
|
|
|
|
gst_queue_signals[SIGNAL_UNDERRUN] =
|
|
|
|
g_signal_new ("underrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
|
|
|
|
G_STRUCT_OFFSET (GstQueueClass, underrun), NULL, NULL,
|
|
|
|
g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
|
|
|
|
gst_queue_signals[SIGNAL_RUNNING] =
|
|
|
|
g_signal_new ("running", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
|
|
|
|
G_STRUCT_OFFSET (GstQueueClass, running), NULL, NULL,
|
|
|
|
g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
|
|
|
|
gst_queue_signals[SIGNAL_OVERRUN] =
|
|
|
|
g_signal_new ("overrun", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_FIRST,
|
|
|
|
G_STRUCT_OFFSET (GstQueueClass, overrun), NULL, NULL,
|
2003-04-17 09:19:34 +00:00
|
|
|
g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
|
2002-01-13 22:22:42 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
/* properties */
|
|
|
|
g_object_class_install_property (gobject_class, ARG_CUR_LEVEL_BYTES,
|
|
|
|
g_param_spec_uint ("current-level-bytes", "Current level (kB)",
|
|
|
|
"Current amount of data in the queue (bytes)",
|
|
|
|
0, G_MAXUINT, 0, G_PARAM_READABLE));
|
|
|
|
g_object_class_install_property (gobject_class, ARG_CUR_LEVEL_BUFFERS,
|
|
|
|
g_param_spec_uint ("current-level-buffers", "Current level (buffers)",
|
|
|
|
"Current number of buffers in the queue",
|
|
|
|
0, G_MAXUINT, 0, G_PARAM_READABLE));
|
|
|
|
g_object_class_install_property (gobject_class, ARG_CUR_LEVEL_TIME,
|
|
|
|
g_param_spec_uint64 ("current-level-time", "Current level (ns)",
|
|
|
|
"Current amount of data in the queue (in ns)",
|
|
|
|
0, G_MAXUINT64, 0, G_PARAM_READABLE));
|
|
|
|
|
|
|
|
g_object_class_install_property (gobject_class, ARG_MAX_SIZE_BYTES,
|
|
|
|
g_param_spec_uint ("max-size-bytes", "Max. size (kB)",
|
|
|
|
"Max. amount of data in the queue (bytes, 0=disable)",
|
|
|
|
0, G_MAXUINT, 0, G_PARAM_READWRITE));
|
|
|
|
g_object_class_install_property (gobject_class, ARG_MAX_SIZE_BUFFERS,
|
|
|
|
g_param_spec_uint ("max-size-buffers", "Max. size (buffers)",
|
|
|
|
"Max. number of buffers in the queue (0=disable)",
|
|
|
|
0, G_MAXUINT, 0, G_PARAM_READWRITE));
|
|
|
|
g_object_class_install_property (gobject_class, ARG_MAX_SIZE_TIME,
|
|
|
|
g_param_spec_uint64 ("max-size-time", "Max. size (ns)",
|
|
|
|
"Max. amount of data in the queue (in ns, 0=disable)",
|
|
|
|
0, G_MAXUINT64, 0, G_PARAM_READWRITE));
|
|
|
|
|
2004-01-15 20:37:07 +00:00
|
|
|
g_object_class_install_property (gobject_class, ARG_MIN_THRESHOLD_BYTES,
|
|
|
|
g_param_spec_uint ("min-threshold-bytes", "Min. threshold (kB)",
|
2003-12-13 17:12:46 +00:00
|
|
|
"Min. amount of data in the queue to allow reading (bytes, 0=disable)",
|
|
|
|
0, G_MAXUINT, 0, G_PARAM_READWRITE));
|
2004-01-15 20:37:07 +00:00
|
|
|
g_object_class_install_property (gobject_class, ARG_MIN_THRESHOLD_BUFFERS,
|
|
|
|
g_param_spec_uint ("min-threshold-buffers", "Min. threshold (buffers)",
|
2003-12-13 17:12:46 +00:00
|
|
|
"Min. number of buffers in the queue to allow reading (0=disable)",
|
|
|
|
0, G_MAXUINT, 0, G_PARAM_READWRITE));
|
2004-01-15 20:37:07 +00:00
|
|
|
g_object_class_install_property (gobject_class, ARG_MIN_THRESHOLD_TIME,
|
|
|
|
g_param_spec_uint64 ("min-threshold-time", "Min. threshold (ns)",
|
2003-12-13 17:12:46 +00:00
|
|
|
"Min. amount of data in the queue to allow reading (in ns, 0=disable)",
|
|
|
|
0, G_MAXUINT64, 0, G_PARAM_READWRITE));
|
|
|
|
|
|
|
|
g_object_class_install_property (gobject_class, ARG_LEAKY,
|
|
|
|
g_param_spec_enum ("leaky", "Leaky",
|
|
|
|
"Where the queue leaks, if at all",
|
|
|
|
GST_TYPE_QUEUE_LEAKY, GST_QUEUE_NO_LEAK, G_PARAM_READWRITE));
|
|
|
|
g_object_class_install_property (gobject_class, ARG_MAY_DEADLOCK,
|
|
|
|
g_param_spec_boolean ("may_deadlock", "May Deadlock",
|
|
|
|
"The queue may deadlock if it's full and not PLAYING",
|
|
|
|
TRUE, G_PARAM_READWRITE));
|
|
|
|
g_object_class_install_property (gobject_class, ARG_BLOCK_TIMEOUT,
|
|
|
|
g_param_spec_uint64 ("block_timeout", "Timeout for Block",
|
|
|
|
"Nanoseconds until blocked queue times out and returns filler event. "
|
|
|
|
"Value of -1 disables timeout",
|
|
|
|
0, G_MAXUINT64, -1, G_PARAM_READWRITE));
|
|
|
|
|
|
|
|
/* set several parent class virtual functions */
|
|
|
|
gobject_class->dispose = GST_DEBUG_FUNCPTR (gst_queue_dispose);
|
|
|
|
gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_queue_set_property);
|
|
|
|
gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_queue_get_property);
|
|
|
|
|
|
|
|
gstelement_class->change_state = GST_DEBUG_FUNCPTR (gst_queue_change_state);
|
|
|
|
gstelement_class->release_locks = GST_DEBUG_FUNCPTR (gst_queue_release_locks);
|
2002-01-13 22:22:42 +00:00
|
|
|
}
|
|
|
|
|
2001-01-20 17:59:25 +00:00
|
|
|
static void
|
2003-12-13 17:12:46 +00:00
|
|
|
gst_queue_init (GstQueue *queue)
|
2000-11-25 14:18:47 +00:00
|
|
|
{
|
2001-12-14 22:59:21 +00:00
|
|
|
/* scheduling on this kind of element is, well, interesting */
|
2000-12-26 23:51:04 +00:00
|
|
|
GST_FLAG_SET (queue, GST_ELEMENT_DECOUPLED);
|
2001-11-14 21:09:44 +00:00
|
|
|
GST_FLAG_SET (queue, GST_ELEMENT_EVENT_AWARE);
|
2000-12-20 09:39:43 +00:00
|
|
|
|
2000-11-25 14:18:47 +00:00
|
|
|
queue->sinkpad = gst_pad_new ("sink", GST_PAD_SINK);
|
2002-01-13 22:22:42 +00:00
|
|
|
gst_pad_set_chain_function (queue->sinkpad, GST_DEBUG_FUNCPTR (gst_queue_chain));
|
2000-12-11 00:24:32 +00:00
|
|
|
gst_element_add_pad (GST_ELEMENT (queue), queue->sinkpad);
|
2004-01-11 22:11:35 +00:00
|
|
|
gst_pad_set_link_function (queue->sinkpad, GST_DEBUG_FUNCPTR (gst_queue_link));
|
|
|
|
gst_pad_set_getcaps_function (queue->sinkpad, GST_DEBUG_FUNCPTR (gst_queue_getcaps));
|
2003-11-17 01:09:56 +00:00
|
|
|
gst_pad_set_active (queue->sinkpad, TRUE);
|
2000-09-24 14:29:49 +00:00
|
|
|
|
2000-11-25 14:18:47 +00:00
|
|
|
queue->srcpad = gst_pad_new ("src", GST_PAD_SRC);
|
2002-01-13 22:22:42 +00:00
|
|
|
gst_pad_set_get_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_queue_get));
|
2000-11-25 14:18:47 +00:00
|
|
|
gst_element_add_pad (GST_ELEMENT (queue), queue->srcpad);
|
2004-01-11 22:11:35 +00:00
|
|
|
gst_pad_set_link_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_queue_link));
|
|
|
|
gst_pad_set_getcaps_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_queue_getcaps));
|
2002-05-26 21:54:27 +00:00
|
|
|
gst_pad_set_event_function (queue->srcpad, GST_DEBUG_FUNCPTR (gst_queue_handle_src_event));
|
2003-11-17 01:09:56 +00:00
|
|
|
gst_pad_set_active (queue->srcpad, TRUE);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
queue->cur_level.buffers = 0; /* no content */
|
|
|
|
queue->cur_level.bytes = 0; /* no content */
|
|
|
|
queue->cur_level.time = 0; /* no content */
|
2004-01-11 17:55:10 +00:00
|
|
|
queue->max_size.buffers = 250; /* high limit */
|
|
|
|
queue->max_size.bytes = 0; /* unlimited */
|
|
|
|
queue->max_size.time = 0; /* unlimited */
|
2004-01-15 20:37:07 +00:00
|
|
|
queue->min_threshold.buffers = 0; /* no threshold */
|
|
|
|
queue->min_threshold.bytes = 0; /* no threshold */
|
|
|
|
queue->min_threshold.time = 0; /* no threshold */
|
2003-12-13 17:12:46 +00:00
|
|
|
|
2001-12-29 03:02:14 +00:00
|
|
|
queue->leaky = GST_QUEUE_NO_LEAK;
|
2001-12-22 21:18:17 +00:00
|
|
|
queue->may_deadlock = TRUE;
|
2003-12-13 17:12:46 +00:00
|
|
|
queue->block_timeout = GST_CLOCK_TIME_NONE;
|
2002-06-09 12:13:30 +00:00
|
|
|
queue->interrupt = FALSE;
|
|
|
|
queue->flush = FALSE;
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2001-10-27 20:28:31 +00:00
|
|
|
queue->qlock = g_mutex_new ();
|
2003-12-13 17:12:46 +00:00
|
|
|
queue->item_add = g_cond_new ();
|
|
|
|
queue->item_del = g_cond_new ();
|
|
|
|
queue->event_done = g_cond_new ();
|
|
|
|
queue->events = g_queue_new ();
|
2002-07-08 19:22:02 +00:00
|
|
|
queue->queue = g_queue_new ();
|
2003-12-13 17:12:46 +00:00
|
|
|
|
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_THREAD, queue,
|
|
|
|
"initialized queue's not_empty & not_full conditions");
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
|
2001-12-22 21:18:17 +00:00
|
|
|
static void
|
|
|
|
gst_queue_dispose (GObject *object)
|
|
|
|
{
|
|
|
|
GstQueue *queue = GST_QUEUE (object);
|
|
|
|
|
2003-04-15 18:46:09 +00:00
|
|
|
gst_element_set_state (GST_ELEMENT (queue), GST_STATE_NULL);
|
2003-03-27 03:16:08 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
while (!g_queue_is_empty (queue->queue)) {
|
|
|
|
GstData *data = g_queue_pop_head (queue->queue);
|
|
|
|
gst_data_unref (data);
|
|
|
|
}
|
2002-07-08 19:22:02 +00:00
|
|
|
g_queue_free (queue->queue);
|
2003-12-13 17:12:46 +00:00
|
|
|
g_mutex_free (queue->qlock);
|
|
|
|
g_cond_free (queue->item_add);
|
|
|
|
g_cond_free (queue->item_del);
|
|
|
|
g_cond_free (queue->event_done);
|
|
|
|
while (!g_queue_is_empty (queue->events)) {
|
|
|
|
GstEvent *event = g_queue_pop_head (queue->events);
|
|
|
|
gst_event_unref (event);
|
|
|
|
}
|
2001-12-22 21:18:17 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
if (G_OBJECT_CLASS (parent_class)->dispose)
|
|
|
|
G_OBJECT_CLASS (parent_class)->dispose (object);
|
2001-12-22 21:18:17 +00:00
|
|
|
}
|
|
|
|
|
2004-01-11 22:11:35 +00:00
|
|
|
static GstCaps *
|
|
|
|
gst_queue_getcaps (GstPad *pad)
|
|
|
|
{
|
|
|
|
GstQueue *queue;
|
|
|
|
|
|
|
|
queue = GST_QUEUE (gst_pad_get_parent (pad));
|
|
|
|
|
2004-01-14 01:50:31 +00:00
|
|
|
if (queue->cur_level.bytes > 0) {
|
2004-01-11 22:11:35 +00:00
|
|
|
return gst_caps_copy (queue->negotiated_caps);
|
|
|
|
}
|
|
|
|
|
|
|
|
return gst_pad_proxy_getcaps (pad);
|
|
|
|
}
|
|
|
|
|
|
|
|
static GstPadLinkReturn
|
|
|
|
gst_queue_link (GstPad *pad, const GstCaps *caps)
|
|
|
|
{
|
|
|
|
GstQueue *queue;
|
|
|
|
GstPadLinkReturn link_ret;
|
|
|
|
|
|
|
|
queue = GST_QUEUE (gst_pad_get_parent (pad));
|
|
|
|
|
2004-01-14 01:50:31 +00:00
|
|
|
if (queue->cur_level.bytes > 0) {
|
2004-01-11 22:11:35 +00:00
|
|
|
if (gst_caps_is_equal_fixed (caps, queue->negotiated_caps)) {
|
|
|
|
return GST_PAD_LINK_OK;
|
|
|
|
}
|
|
|
|
return GST_PAD_LINK_REFUSED;
|
|
|
|
}
|
|
|
|
|
|
|
|
link_ret = gst_pad_proxy_pad_link (pad, caps);
|
|
|
|
|
|
|
|
if (GST_PAD_LINK_SUCCESSFUL (link_ret)) {
|
|
|
|
/* we store an extra copy of the negotiated caps, just in case
|
|
|
|
* the pads become unnegotiated while we have buffers */
|
|
|
|
gst_caps_replace (&queue->negotiated_caps, gst_caps_copy (caps));
|
|
|
|
}
|
|
|
|
|
|
|
|
return link_ret;
|
|
|
|
}
|
|
|
|
|
2001-01-20 17:59:25 +00:00
|
|
|
static void
|
2001-10-27 20:28:31 +00:00
|
|
|
gst_queue_locked_flush (GstQueue *queue)
|
2000-11-25 14:18:47 +00:00
|
|
|
{
|
2003-12-13 17:12:46 +00:00
|
|
|
while (!g_queue_is_empty (queue->queue)) {
|
|
|
|
GstData *data = g_queue_pop_head (queue->queue);
|
2004-02-09 09:11:50 +00:00
|
|
|
/* First loose the reference we added when putting that data in the queue */
|
|
|
|
gst_data_unref (data);
|
|
|
|
/* Then loose another reference because we are supposed to destroy that
|
|
|
|
data when flushing */
|
2003-12-13 17:12:46 +00:00
|
|
|
gst_data_unref (data);
|
2002-07-08 19:22:02 +00:00
|
|
|
}
|
2001-01-07 03:42:27 +00:00
|
|
|
queue->timeval = NULL;
|
2003-12-13 17:12:46 +00:00
|
|
|
queue->cur_level.buffers = 0;
|
|
|
|
queue->cur_level.bytes = 0;
|
|
|
|
queue->cur_level.time = 0;
|
|
|
|
|
2002-06-09 12:13:30 +00:00
|
|
|
/* make sure any pending buffers to be added are flushed too */
|
|
|
|
queue->flush = TRUE;
|
2003-12-13 17:12:46 +00:00
|
|
|
|
|
|
|
/* we deleted something... */
|
|
|
|
g_cond_signal (queue->item_del);
|
2000-10-30 21:02:08 +00:00
|
|
|
}
|
|
|
|
|
2003-12-14 13:14:35 +00:00
|
|
|
static void
|
|
|
|
gst_queue_handle_pending_events (GstQueue *queue)
|
|
|
|
{
|
|
|
|
/* check for events to send upstream */
|
|
|
|
while (!g_queue_is_empty (queue->events)){
|
|
|
|
GstQueueEventResponse *er = g_queue_pop_head (queue->events);
|
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue, "sending event upstream");
|
2003-12-21 18:59:06 +00:00
|
|
|
er->ret = gst_pad_event_default (queue->srcpad, er->event);
|
2003-12-14 13:14:35 +00:00
|
|
|
er->handled = TRUE;
|
|
|
|
g_cond_signal (queue->event_done);
|
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue, "event sent");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
#define STATUS(queue, msg) \
|
|
|
|
GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, \
|
|
|
|
"(%s:%s) " msg ": %u of %u-%u buffers, %u of %u-%u " \
|
|
|
|
"bytes, %" G_GUINT64_FORMAT " of %" G_GUINT64_FORMAT \
|
|
|
|
"-%" G_GUINT64_FORMAT " ns, %u elements", \
|
|
|
|
GST_DEBUG_PAD_NAME (pad), \
|
|
|
|
queue->cur_level.buffers, \
|
2004-01-15 20:37:07 +00:00
|
|
|
queue->min_threshold.buffers, \
|
2003-12-13 17:12:46 +00:00
|
|
|
queue->max_size.buffers, \
|
|
|
|
queue->cur_level.bytes, \
|
2004-01-15 20:37:07 +00:00
|
|
|
queue->min_threshold.bytes, \
|
2003-12-13 17:12:46 +00:00
|
|
|
queue->max_size.bytes, \
|
|
|
|
queue->cur_level.time, \
|
2004-01-15 20:37:07 +00:00
|
|
|
queue->min_threshold.time, \
|
2003-12-13 17:12:46 +00:00
|
|
|
queue->max_size.time, \
|
|
|
|
queue->queue->length)
|
|
|
|
|
2001-01-20 17:59:25 +00:00
|
|
|
static void
|
2003-12-13 17:12:46 +00:00
|
|
|
gst_queue_chain (GstPad *pad,
|
|
|
|
GstData *data)
|
2000-11-25 14:18:47 +00:00
|
|
|
{
|
2000-01-30 09:03:00 +00:00
|
|
|
GstQueue *queue;
|
|
|
|
|
2000-11-25 14:18:47 +00:00
|
|
|
g_return_if_fail (pad != NULL);
|
|
|
|
g_return_if_fail (GST_IS_PAD (pad));
|
2003-10-08 16:06:02 +00:00
|
|
|
g_return_if_fail (data != NULL);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2001-01-29 00:06:02 +00:00
|
|
|
queue = GST_QUEUE (GST_OBJECT_PARENT (pad));
|
2002-06-08 16:13:18 +00:00
|
|
|
|
2001-12-04 22:12:50 +00:00
|
|
|
restart:
|
2001-10-27 20:28:31 +00:00
|
|
|
/* we have to lock the queue since we span threads */
|
2003-06-29 14:05:49 +00:00
|
|
|
GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, "locking t:%p", g_thread_self ());
|
2001-10-27 20:28:31 +00:00
|
|
|
g_mutex_lock (queue->qlock);
|
2003-06-29 14:05:49 +00:00
|
|
|
GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, "locked t:%p", g_thread_self ());
|
2003-12-13 17:12:46 +00:00
|
|
|
|
2003-12-14 13:14:35 +00:00
|
|
|
gst_queue_handle_pending_events (queue);
|
2003-12-13 17:12:46 +00:00
|
|
|
|
2002-06-09 12:13:30 +00:00
|
|
|
/* assume don't need to flush this buffer when the queue is filled */
|
|
|
|
queue->flush = FALSE;
|
2003-10-08 16:06:02 +00:00
|
|
|
|
|
|
|
if (GST_IS_EVENT (data)) {
|
|
|
|
switch (GST_EVENT_TYPE (data)) {
|
2001-10-27 20:28:31 +00:00
|
|
|
case GST_EVENT_FLUSH:
|
2003-12-13 17:12:46 +00:00
|
|
|
STATUS (queue, "received flush event");
|
2001-10-27 20:28:31 +00:00
|
|
|
gst_queue_locked_flush (queue);
|
2003-12-21 18:59:06 +00:00
|
|
|
STATUS (queue, "after flush");
|
2001-11-14 21:09:44 +00:00
|
|
|
break;
|
|
|
|
case GST_EVENT_EOS:
|
2003-12-13 17:12:46 +00:00
|
|
|
STATUS (queue, "received EOS");
|
2001-11-14 21:09:44 +00:00
|
|
|
break;
|
2001-10-27 20:28:31 +00:00
|
|
|
default:
|
2002-10-25 19:14:57 +00:00
|
|
|
/* we put the event in the queue, we don't have to act ourselves */
|
2003-12-13 17:12:46 +00:00
|
|
|
GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"adding event %p of type %d",
|
|
|
|
data, GST_EVENT_TYPE (data));
|
2001-11-14 21:09:44 +00:00
|
|
|
break;
|
2001-10-27 20:28:31 +00:00
|
|
|
}
|
2000-07-12 22:52:42 +00:00
|
|
|
}
|
2003-10-08 16:06:02 +00:00
|
|
|
|
|
|
|
if (GST_IS_BUFFER (data))
|
|
|
|
GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
|
2003-12-13 17:12:46 +00:00
|
|
|
"adding buffer %p of size %d",
|
|
|
|
data, GST_BUFFER_SIZE (data));
|
|
|
|
|
|
|
|
/* We make space available if we're "full" according to whatever
|
|
|
|
* the user defined as "full". Note that this only applies to buffers.
|
|
|
|
* We always handle events and they don't count in our statistics. */
|
|
|
|
if (GST_IS_BUFFER (data) &&
|
|
|
|
((queue->max_size.buffers > 0 &&
|
|
|
|
queue->cur_level.buffers >= queue->max_size.buffers) ||
|
|
|
|
(queue->max_size.bytes > 0 &&
|
|
|
|
queue->cur_level.bytes >= queue->max_size.bytes) ||
|
|
|
|
(queue->max_size.time > 0 &&
|
|
|
|
queue->cur_level.time >= queue->max_size.time))) {
|
2003-04-17 19:51:28 +00:00
|
|
|
g_mutex_unlock (queue->qlock);
|
2003-12-13 17:12:46 +00:00
|
|
|
g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_OVERRUN], 0);
|
2003-04-17 19:51:28 +00:00
|
|
|
g_mutex_lock (queue->qlock);
|
2003-04-17 09:19:34 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
/* how are we going to make space for this buffer? */
|
|
|
|
switch (queue->leaky) {
|
|
|
|
/* leak current buffer */
|
|
|
|
case GST_QUEUE_LEAK_UPSTREAM:
|
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"queue is full, leaking buffer on upstream end");
|
|
|
|
/* now we can clean up and exit right away */
|
2001-10-27 20:28:31 +00:00
|
|
|
g_mutex_unlock (queue->qlock);
|
2003-03-21 20:53:26 +00:00
|
|
|
goto out_unref;
|
2003-12-13 17:12:46 +00:00
|
|
|
|
|
|
|
/* leak first buffer in the queue */
|
|
|
|
case GST_QUEUE_LEAK_DOWNSTREAM: {
|
|
|
|
/* this is a bit hacky. We'll manually iterate the list
|
|
|
|
* and find the first buffer from the head on. We'll
|
|
|
|
* unref that and "fix up" the GQueue object... */
|
|
|
|
GList *item;
|
|
|
|
GstData *leak = NULL;
|
|
|
|
|
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"queue is full, leaking buffer on downstream end");
|
|
|
|
|
|
|
|
for (item = queue->queue->head; item != NULL; item = item->next) {
|
|
|
|
if (GST_IS_BUFFER (item->data)) {
|
|
|
|
leak = item->data;
|
|
|
|
break;
|
|
|
|
}
|
2003-10-08 16:06:02 +00:00
|
|
|
}
|
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
/* if we didn't find anything, it means we have no buffers
|
|
|
|
* in here. That cannot happen, since we had >= 1 bufs */
|
|
|
|
g_assert (leak);
|
|
|
|
|
|
|
|
/* Now remove it from the list, fixing up the GQueue
|
|
|
|
* CHECKME: is a queue->head the first or the last item? */
|
|
|
|
item = g_list_delete_link (queue->queue->head, item);
|
|
|
|
queue->queue->head = g_list_first (item);
|
|
|
|
queue->queue->tail = g_list_last (item);
|
|
|
|
queue->queue->length--;
|
|
|
|
|
|
|
|
/* and unref the data at the end. Twice, because we keep a ref
|
|
|
|
* to make things read-only. Also keep our list uptodate. */
|
|
|
|
queue->cur_level.bytes -= GST_BUFFER_SIZE (data);
|
|
|
|
queue->cur_level.buffers --;
|
|
|
|
if (GST_BUFFER_DURATION (data) != GST_CLOCK_TIME_NONE)
|
|
|
|
queue->cur_level.time -= GST_BUFFER_DURATION (data);
|
|
|
|
|
|
|
|
gst_data_unref (data);
|
|
|
|
gst_data_unref (data);
|
|
|
|
break;
|
2001-05-25 21:00:07 +00:00
|
|
|
}
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
default:
|
|
|
|
g_warning ("Unknown leaky type, using default");
|
|
|
|
/* fall-through */
|
|
|
|
|
|
|
|
/* don't leak. Instead, wait for space to be available */
|
|
|
|
case GST_QUEUE_NO_LEAK:
|
|
|
|
STATUS (queue, "pre-full wait");
|
|
|
|
|
|
|
|
while ((queue->max_size.buffers > 0 &&
|
|
|
|
queue->cur_level.buffers >= queue->max_size.buffers) ||
|
|
|
|
(queue->max_size.bytes > 0 &&
|
|
|
|
queue->cur_level.bytes >= queue->max_size.bytes) ||
|
|
|
|
(queue->max_size.time > 0 &&
|
|
|
|
queue->cur_level.time >= queue->max_size.time)) {
|
|
|
|
/* if there's a pending state change for this queue
|
|
|
|
* or its manager, switch back to iterator so bottom
|
|
|
|
* half of state change executes */
|
|
|
|
if (queue->interrupt) {
|
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue, "interrupted");
|
|
|
|
g_mutex_unlock (queue->qlock);
|
|
|
|
if (gst_scheduler_interrupt (gst_pad_get_scheduler (queue->sinkpad),
|
|
|
|
GST_ELEMENT (queue))) {
|
|
|
|
goto out_unref;
|
|
|
|
}
|
|
|
|
/* if we got here because we were unlocked after a
|
|
|
|
* flush, we don't need to add the buffer to the
|
|
|
|
* queue again */
|
|
|
|
if (queue->flush) {
|
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"not adding pending buffer after flush");
|
|
|
|
goto out_unref;
|
|
|
|
}
|
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"adding pending buffer after interrupt");
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (GST_STATE (queue) != GST_STATE_PLAYING) {
|
|
|
|
/* this means the other end is shut down. Try to
|
|
|
|
* signal to resolve the error */
|
|
|
|
if (!queue->may_deadlock) {
|
|
|
|
g_mutex_unlock (queue->qlock);
|
|
|
|
gst_data_unref (data);
|
2004-01-31 19:30:31 +00:00
|
|
|
GST_ELEMENT_ERROR (queue, CORE, THREAD, (NULL),
|
2004-01-18 21:36:20 +00:00
|
|
|
("deadlock found, shutting down source pad elements"));
|
2003-12-13 17:12:46 +00:00
|
|
|
/* we don't go to out_unref here, since we want to
|
2004-01-18 21:36:20 +00:00
|
|
|
* unref the buffer *before* calling GST_ELEMENT_ERROR */
|
2003-12-13 17:12:46 +00:00
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"%s: waiting for the app to restart "
|
|
|
|
"source pad elements",
|
|
|
|
GST_ELEMENT_NAME (queue));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-12-14 13:14:35 +00:00
|
|
|
/* OK, we've got a serious issue here. Imagine the situation
|
|
|
|
* where the puller (next element) is sending an event here,
|
|
|
|
* so it cannot pull events from the queue, and we cannot
|
|
|
|
* push data further because the queue is 'full' and therefore,
|
|
|
|
* we wait here (and do not handle events): deadlock! to solve
|
|
|
|
* that, we handle pending upstream events here, too. */
|
|
|
|
gst_queue_handle_pending_events (queue);
|
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
STATUS (queue, "waiting for item_del signal");
|
|
|
|
g_cond_wait (queue->item_del, queue->qlock);
|
|
|
|
STATUS (queue, "received item_del signal");
|
|
|
|
}
|
2002-07-08 19:22:02 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
STATUS (queue, "post-full wait");
|
2001-10-27 20:28:31 +00:00
|
|
|
g_mutex_unlock (queue->qlock);
|
2003-12-13 17:12:46 +00:00
|
|
|
g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_RUNNING], 0);
|
|
|
|
g_mutex_lock (queue->qlock);
|
|
|
|
break;
|
2001-05-25 21:00:07 +00:00
|
|
|
}
|
2000-02-13 13:43:32 +00:00
|
|
|
}
|
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
/* put the buffer on the tail of the list. We keep a reference,
|
|
|
|
* so that the data is read-only while in here. There's a good
|
|
|
|
* reason to do so: we have a size and time counter, and any
|
|
|
|
* modification to the content could change any of the two. */
|
|
|
|
gst_data_ref (data);
|
2003-10-08 16:06:02 +00:00
|
|
|
g_queue_push_tail (queue->queue, data);
|
2002-07-08 19:22:02 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
/* Note that we only add buffers (not events) to the statistics */
|
|
|
|
if (GST_IS_BUFFER (data)) {
|
|
|
|
queue->cur_level.buffers++;
|
|
|
|
queue->cur_level.bytes += GST_BUFFER_SIZE (data);
|
|
|
|
if (GST_BUFFER_DURATION (data) != GST_CLOCK_TIME_NONE)
|
|
|
|
queue->cur_level.time += GST_BUFFER_DURATION (data);
|
|
|
|
}
|
2002-07-08 19:22:02 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
STATUS (queue, "+ level");
|
2001-10-27 20:28:31 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, "signalling item_add");
|
|
|
|
g_cond_signal (queue->item_add);
|
2001-10-27 20:28:31 +00:00
|
|
|
g_mutex_unlock (queue->qlock);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2003-03-21 20:53:26 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
out_unref:
|
2003-10-08 16:06:02 +00:00
|
|
|
gst_data_unref (data);
|
2003-03-21 20:53:26 +00:00
|
|
|
return;
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
|
2003-10-08 16:06:02 +00:00
|
|
|
static GstData *
|
2001-01-20 17:59:25 +00:00
|
|
|
gst_queue_get (GstPad *pad)
|
2000-11-25 14:18:47 +00:00
|
|
|
{
|
2001-02-21 20:27:54 +00:00
|
|
|
GstQueue *queue;
|
2003-12-13 17:12:46 +00:00
|
|
|
GstData *data;
|
Another big set of changes. Connections are now also pullfunc based. gstqueue has been updated, I don't know of any ...
Original commit message from CVS:
Another big set of changes. Connections are now also pullfunc based.
gstqueue has been updated, I don't know of any other connections offhand.
There are still a few things that need doing, specifically the concept
of a source or connection with connections to multiple thread contexts is
not dealt with. This may force us to move the threadstate from the
element to the pad, maybe keeping the element's copy for simple cases.
Then the Bin would create a structure to pass to the cothreaded _wrappers
of any such elements, which would detail the pads that are to be dealt with
by this particular cothread context.
That will speed things up to, since we don't have to look through the list
of all pads for every Src or Connection element for every iteration, we can
simply step through the list provided by the plan. Special case might even
have a single pad pointer sitting there to trump the list, if there's only
one (the common case anyway).
Task 23098 is tracking these changes. The main task 22588 depends on that
subtask, as well as 22240, which is a consistency check on PAD_DISABLED.
2000-12-08 10:33:01 +00:00
|
|
|
|
2001-02-21 20:27:54 +00:00
|
|
|
g_return_val_if_fail (pad != NULL, NULL);
|
|
|
|
g_return_val_if_fail (GST_IS_PAD (pad), NULL);
|
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
queue = GST_QUEUE (gst_pad_get_parent (pad));
|
2001-10-27 20:28:31 +00:00
|
|
|
|
2001-12-04 22:12:50 +00:00
|
|
|
restart:
|
2000-01-30 09:03:00 +00:00
|
|
|
/* have to lock for thread-safety */
|
2003-12-13 17:12:46 +00:00
|
|
|
GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"locking t:%p", g_thread_self ());
|
2001-10-27 20:28:31 +00:00
|
|
|
g_mutex_lock (queue->qlock);
|
2003-12-13 17:12:46 +00:00
|
|
|
GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"locked t:%p", g_thread_self ());
|
|
|
|
|
|
|
|
if (queue->queue->length == 0 ||
|
2004-01-15 20:37:07 +00:00
|
|
|
(queue->min_threshold.buffers > 0 &&
|
|
|
|
queue->cur_level.buffers < queue->min_threshold.buffers) ||
|
|
|
|
(queue->min_threshold.bytes > 0 &&
|
|
|
|
queue->cur_level.bytes < queue->min_threshold.bytes) ||
|
|
|
|
(queue->min_threshold.time > 0 &&
|
|
|
|
queue->cur_level.time < queue->min_threshold.time)) {
|
2003-12-13 17:12:46 +00:00
|
|
|
g_mutex_unlock (queue->qlock);
|
|
|
|
g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_UNDERRUN], 0);
|
|
|
|
g_mutex_lock (queue->qlock);
|
|
|
|
|
|
|
|
STATUS (queue, "pre-empty wait");
|
|
|
|
while (queue->queue->length == 0 ||
|
2004-01-15 20:37:07 +00:00
|
|
|
(queue->min_threshold.buffers > 0 &&
|
|
|
|
queue->cur_level.buffers < queue->min_threshold.buffers) ||
|
|
|
|
(queue->min_threshold.bytes > 0 &&
|
|
|
|
queue->cur_level.bytes < queue->min_threshold.bytes) ||
|
|
|
|
(queue->min_threshold.time > 0 &&
|
|
|
|
queue->cur_level.time < queue->min_threshold.time)) {
|
2003-12-13 17:12:46 +00:00
|
|
|
/* if there's a pending state change for this queue or its
|
|
|
|
* manager, switch back to iterator so bottom half of state
|
|
|
|
* change executes. */
|
|
|
|
if (queue->interrupt) {
|
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue, "interrupted");
|
2001-12-22 21:18:17 +00:00
|
|
|
g_mutex_unlock (queue->qlock);
|
2003-12-13 17:12:46 +00:00
|
|
|
if (gst_scheduler_interrupt (gst_pad_get_scheduler (queue->srcpad),
|
|
|
|
GST_ELEMENT (queue)))
|
|
|
|
return GST_DATA (gst_event_new (GST_EVENT_INTERRUPT));
|
2001-12-27 00:47:41 +00:00
|
|
|
goto restart;
|
2001-12-22 21:18:17 +00:00
|
|
|
}
|
2003-12-13 17:12:46 +00:00
|
|
|
if (GST_STATE (queue) != GST_STATE_PLAYING) {
|
|
|
|
/* this means the other end is shut down */
|
|
|
|
if (!queue->may_deadlock) {
|
|
|
|
g_mutex_unlock (queue->qlock);
|
2004-01-31 19:30:31 +00:00
|
|
|
GST_ELEMENT_ERROR (queue, CORE, THREAD, (NULL),
|
2004-01-18 21:36:20 +00:00
|
|
|
("deadlock found, shutting down sink pad elements"));
|
2003-12-13 17:12:46 +00:00
|
|
|
goto restart;
|
|
|
|
} else {
|
|
|
|
GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"%s: waiting for the app to restart "
|
|
|
|
"source pad elements",
|
|
|
|
GST_ELEMENT_NAME (queue));
|
|
|
|
}
|
2001-12-22 21:18:17 +00:00
|
|
|
}
|
2001-05-25 21:00:07 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
STATUS (queue, "waiting for item_add");
|
2002-06-03 15:44:28 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
if (queue->block_timeout != GST_CLOCK_TIME_NONE) {
|
|
|
|
GTimeVal timeout;
|
|
|
|
g_get_current_time (&timeout);
|
|
|
|
g_time_val_add (&timeout, queue->block_timeout / 1000);
|
|
|
|
if (!g_cond_timed_wait (queue->item_add, queue->qlock, &timeout)){
|
|
|
|
g_mutex_unlock (queue->qlock);
|
|
|
|
GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"Sending filler event");
|
|
|
|
return GST_DATA (gst_event_new_filler ());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
g_cond_wait (queue->item_add, queue->qlock);
|
2002-06-03 15:44:28 +00:00
|
|
|
}
|
2003-12-13 17:12:46 +00:00
|
|
|
STATUS (queue, "got item_add signal");
|
2002-06-03 15:44:28 +00:00
|
|
|
}
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
STATUS (queue, "post-empty wait");
|
|
|
|
g_mutex_unlock (queue->qlock);
|
|
|
|
g_signal_emit (G_OBJECT (queue), gst_queue_signals[SIGNAL_RUNNING], 0);
|
|
|
|
g_mutex_lock (queue->qlock);
|
|
|
|
}
|
2000-12-21 01:27:27 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
/* There's something in the list now, whatever it is */
|
|
|
|
data = g_queue_pop_head (queue->queue);
|
|
|
|
GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"retrieved data %p from queue", data);
|
2004-02-15 19:05:59 +00:00
|
|
|
|
|
|
|
if (data == NULL)
|
|
|
|
return NULL;
|
2003-12-13 17:12:46 +00:00
|
|
|
|
|
|
|
if (GST_IS_BUFFER (data)) {
|
|
|
|
/* Update statistics */
|
|
|
|
queue->cur_level.buffers--;
|
|
|
|
queue->cur_level.bytes -= GST_BUFFER_SIZE (data);
|
|
|
|
if (GST_BUFFER_DURATION (data) != GST_CLOCK_TIME_NONE)
|
|
|
|
queue->cur_level.time -= GST_BUFFER_DURATION (data);
|
|
|
|
}
|
2001-12-29 03:02:14 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
/* Now that we're done, we can lose our own reference to
|
|
|
|
* the item, since we're no longer in danger. */
|
|
|
|
gst_data_unref (data);
|
2001-10-27 20:28:31 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
STATUS (queue, "after _get()");
|
2001-12-29 03:02:14 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
GST_CAT_LOG_OBJECT (GST_CAT_DATAFLOW, queue, "signalling item_del");
|
|
|
|
g_cond_signal (queue->item_del);
|
2001-10-27 20:28:31 +00:00
|
|
|
g_mutex_unlock (queue->qlock);
|
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
/* FIXME: I suppose this needs to be locked, since the EOS
|
|
|
|
* bit affects the pipeline state. However, that bit is
|
|
|
|
* locked too so it'd cause a deadlock. */
|
2003-10-08 16:06:02 +00:00
|
|
|
if (GST_IS_EVENT (data)) {
|
|
|
|
GstEvent *event = GST_EVENT (data);
|
2003-12-13 17:12:46 +00:00
|
|
|
switch (GST_EVENT_TYPE (event)) {
|
2001-10-27 20:28:31 +00:00
|
|
|
case GST_EVENT_EOS:
|
2003-12-13 17:12:46 +00:00
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"queue \"%s\" eos",
|
|
|
|
GST_ELEMENT_NAME (queue));
|
2001-12-28 20:20:26 +00:00
|
|
|
gst_element_set_eos (GST_ELEMENT (queue));
|
2001-10-27 20:28:31 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2000-02-13 13:43:32 +00:00
|
|
|
|
2003-10-08 16:06:02 +00:00
|
|
|
return data;
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
|
2002-05-26 21:54:27 +00:00
|
|
|
|
|
|
|
static gboolean
|
2003-12-13 17:12:46 +00:00
|
|
|
gst_queue_handle_src_event (GstPad *pad,
|
|
|
|
GstEvent *event)
|
2002-05-26 21:54:27 +00:00
|
|
|
{
|
2003-12-13 17:12:46 +00:00
|
|
|
GstQueue *queue = GST_QUEUE (gst_pad_get_parent (pad));
|
2002-06-08 16:30:43 +00:00
|
|
|
gboolean res;
|
2002-05-26 21:54:27 +00:00
|
|
|
|
2002-06-08 16:13:18 +00:00
|
|
|
g_mutex_lock (queue->qlock);
|
2002-06-08 16:30:43 +00:00
|
|
|
|
|
|
|
if (gst_element_get_state (GST_ELEMENT (queue)) == GST_STATE_PLAYING) {
|
2003-12-13 17:12:46 +00:00
|
|
|
GstQueueEventResponse er;
|
|
|
|
|
|
|
|
/* push the event to the queue and wait for upstream consumption */
|
|
|
|
er.event = event;
|
|
|
|
er.handled = FALSE;
|
|
|
|
g_queue_push_tail (queue->events, &er);
|
2003-12-14 13:14:35 +00:00
|
|
|
GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"Preparing for loop for event handler");
|
|
|
|
/* see the chain function on why this is here - it prevents a deadlock */
|
|
|
|
g_cond_signal (queue->item_del);
|
2003-12-13 17:12:46 +00:00
|
|
|
while (!er.handled) {
|
2003-12-21 18:59:06 +00:00
|
|
|
GTimeVal timeout;
|
|
|
|
g_get_current_time (&timeout);
|
|
|
|
g_time_val_add (&timeout, 500 * 1000); /* half a second */
|
|
|
|
if (!g_cond_timed_wait (queue->event_done, queue->qlock, &timeout) &&
|
|
|
|
!er.handled) {
|
|
|
|
GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"timeout in upstream event handling");
|
|
|
|
/* remove ourselves from the pending list. Since we're
|
|
|
|
* locked, others cannot reference this anymore. */
|
|
|
|
queue->queue->head = g_list_remove (queue->queue->head, &er);
|
|
|
|
queue->queue->head = g_list_first (queue->queue->head);
|
|
|
|
queue->queue->tail = g_list_last (queue->queue->head);
|
|
|
|
queue->queue->length--;
|
|
|
|
res = FALSE;
|
|
|
|
goto handled;
|
|
|
|
}
|
2003-12-13 17:12:46 +00:00
|
|
|
}
|
2003-12-14 13:14:35 +00:00
|
|
|
GST_CAT_WARNING_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"Event handled");
|
2003-12-13 17:12:46 +00:00
|
|
|
res = er.ret;
|
|
|
|
} else {
|
|
|
|
res = gst_pad_event_default (pad, event);
|
2003-01-12 13:06:44 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
switch (GST_EVENT_TYPE (event)) {
|
|
|
|
case GST_EVENT_FLUSH:
|
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_DATAFLOW, queue,
|
|
|
|
"FLUSH event, flushing queue\n");
|
2002-06-02 15:34:34 +00:00
|
|
|
gst_queue_locked_flush (queue);
|
2003-12-13 17:12:46 +00:00
|
|
|
break;
|
|
|
|
case GST_EVENT_SEEK:
|
|
|
|
if (GST_EVENT_SEEK_FLAGS (event) & GST_SEEK_FLAG_FLUSH) {
|
|
|
|
gst_queue_locked_flush (queue);
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2002-05-26 21:54:27 +00:00
|
|
|
}
|
2003-12-21 18:59:06 +00:00
|
|
|
handled:
|
2002-05-26 21:54:27 +00:00
|
|
|
g_mutex_unlock (queue->qlock);
|
2002-06-02 15:34:34 +00:00
|
|
|
|
2003-01-15 21:01:58 +00:00
|
|
|
return res;
|
2002-05-26 21:54:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
gst_queue_release_locks (GstElement *element)
|
|
|
|
{
|
|
|
|
GstQueue *queue;
|
|
|
|
|
|
|
|
queue = GST_QUEUE (element);
|
|
|
|
|
|
|
|
g_mutex_lock (queue->qlock);
|
|
|
|
queue->interrupt = TRUE;
|
2003-12-13 17:12:46 +00:00
|
|
|
g_cond_signal (queue->item_add);
|
|
|
|
g_cond_signal (queue->item_del);
|
2002-05-26 21:54:27 +00:00
|
|
|
g_mutex_unlock (queue->qlock);
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2001-01-20 17:59:25 +00:00
|
|
|
static GstElementStateReturn
|
|
|
|
gst_queue_change_state (GstElement *element)
|
2000-11-25 14:18:47 +00:00
|
|
|
{
|
2000-10-30 21:02:08 +00:00
|
|
|
GstQueue *queue;
|
2003-12-13 17:12:46 +00:00
|
|
|
GstElementStateReturn ret = GST_STATE_SUCCESS;
|
2000-10-30 21:02:08 +00:00
|
|
|
|
2000-11-25 14:18:47 +00:00
|
|
|
queue = GST_QUEUE (element);
|
2001-05-25 21:00:07 +00:00
|
|
|
|
2003-06-29 14:05:49 +00:00
|
|
|
GST_CAT_LOG_OBJECT (GST_CAT_STATES, element, "starting state change");
|
2001-12-04 22:12:50 +00:00
|
|
|
|
2001-12-14 22:59:21 +00:00
|
|
|
/* lock the queue so another thread (not in sync with this thread's state)
|
|
|
|
* can't call this queue's _get (or whatever)
|
|
|
|
*/
|
2001-12-04 22:12:50 +00:00
|
|
|
g_mutex_lock (queue->qlock);
|
2000-10-30 21:02:08 +00:00
|
|
|
|
2003-01-15 21:01:58 +00:00
|
|
|
switch (GST_STATE_TRANSITION (element)) {
|
|
|
|
case GST_STATE_NULL_TO_READY:
|
|
|
|
gst_queue_locked_flush (queue);
|
|
|
|
break;
|
|
|
|
case GST_STATE_PAUSED_TO_PLAYING:
|
|
|
|
if (!GST_PAD_IS_LINKED (queue->sinkpad)) {
|
2003-12-13 17:12:46 +00:00
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_STATES, queue,
|
|
|
|
"queue %s is not linked",
|
|
|
|
GST_ELEMENT_NAME (queue));
|
2003-01-15 21:01:58 +00:00
|
|
|
/* FIXME can this be? */
|
2003-12-13 17:12:46 +00:00
|
|
|
g_cond_signal (queue->item_add);
|
2003-01-15 21:01:58 +00:00
|
|
|
|
|
|
|
ret = GST_STATE_FAILURE;
|
|
|
|
goto error;
|
2003-12-13 17:12:46 +00:00
|
|
|
} else {
|
2003-01-15 21:01:58 +00:00
|
|
|
GstScheduler *src_sched, *sink_sched;
|
|
|
|
|
2003-12-13 16:58:29 +00:00
|
|
|
src_sched = gst_pad_get_scheduler (GST_PAD (queue->srcpad));
|
|
|
|
sink_sched = gst_pad_get_scheduler (GST_PAD (queue->sinkpad));
|
2000-10-30 21:02:08 +00:00
|
|
|
|
2003-01-15 21:01:58 +00:00
|
|
|
if (src_sched == sink_sched) {
|
2003-12-13 17:12:46 +00:00
|
|
|
GST_CAT_DEBUG_OBJECT (GST_CAT_STATES, queue,
|
|
|
|
"queue %s does not connect different schedulers",
|
|
|
|
GST_ELEMENT_NAME (queue));
|
2001-12-04 22:12:50 +00:00
|
|
|
|
2003-02-11 21:03:51 +00:00
|
|
|
g_warning ("queue %s does not connect different schedulers",
|
2003-12-13 17:12:46 +00:00
|
|
|
GST_ELEMENT_NAME (queue));
|
2003-01-15 21:01:58 +00:00
|
|
|
|
|
|
|
ret = GST_STATE_FAILURE;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
queue->interrupt = FALSE;
|
|
|
|
break;
|
|
|
|
case GST_STATE_PAUSED_TO_READY:
|
|
|
|
gst_queue_locked_flush (queue);
|
|
|
|
break;
|
2003-12-13 17:12:46 +00:00
|
|
|
default:
|
2003-01-15 21:01:58 +00:00
|
|
|
break;
|
2001-12-04 22:12:50 +00:00
|
|
|
}
|
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
if (GST_ELEMENT_CLASS (parent_class)->change_state)
|
|
|
|
ret = GST_ELEMENT_CLASS (parent_class)->change_state (element);
|
|
|
|
|
|
|
|
/* this is an ugly hack to make sure our pads are always active.
|
|
|
|
* Reason for this is that pad activation for the queue element
|
|
|
|
* depends on 2 schedulers (ugh) */
|
2003-11-17 01:09:56 +00:00
|
|
|
gst_pad_set_active (queue->sinkpad, TRUE);
|
|
|
|
gst_pad_set_active (queue->srcpad, TRUE);
|
2003-01-15 21:01:58 +00:00
|
|
|
|
|
|
|
error:
|
2001-12-04 22:12:50 +00:00
|
|
|
g_mutex_unlock (queue->qlock);
|
2000-10-30 21:02:08 +00:00
|
|
|
|
2003-06-29 14:05:49 +00:00
|
|
|
GST_CAT_LOG_OBJECT (GST_CAT_STATES, element, "done with state change");
|
2003-12-13 17:12:46 +00:00
|
|
|
|
2001-05-25 21:00:07 +00:00
|
|
|
return ret;
|
2000-10-30 21:02:08 +00:00
|
|
|
}
|
|
|
|
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2001-01-20 17:59:25 +00:00
|
|
|
static void
|
2003-12-13 17:12:46 +00:00
|
|
|
gst_queue_set_property (GObject *object,
|
|
|
|
guint prop_id,
|
|
|
|
const GValue *value,
|
|
|
|
GParamSpec *pspec)
|
2000-11-25 14:18:47 +00:00
|
|
|
{
|
2003-12-13 17:12:46 +00:00
|
|
|
GstQueue *queue = GST_QUEUE (object);
|
2001-01-20 17:59:25 +00:00
|
|
|
|
2003-12-13 17:12:46 +00:00
|
|
|
/* someone could change levels here, and since this
|
|
|
|
* affects the get/put funcs, we need to lock for safety. */
|
|
|
|
g_mutex_lock (queue->qlock);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2001-06-25 01:20:11 +00:00
|
|
|
switch (prop_id) {
|
2003-12-13 17:12:46 +00:00
|
|
|
case ARG_MAX_SIZE_BYTES:
|
|
|
|
queue->max_size.bytes = g_value_get_uint (value);
|
|
|
|
break;
|
|
|
|
case ARG_MAX_SIZE_BUFFERS:
|
|
|
|
queue->max_size.buffers = g_value_get_uint (value);
|
|
|
|
break;
|
|
|
|
case ARG_MAX_SIZE_TIME:
|
|
|
|
queue->max_size.time = g_value_get_uint64 (value);
|
2000-02-13 15:20:49 +00:00
|
|
|
break;
|
2004-01-15 20:37:07 +00:00
|
|
|
case ARG_MIN_THRESHOLD_BYTES:
|
|
|
|
queue->min_threshold.bytes = g_value_get_uint (value);
|
2001-12-22 21:18:17 +00:00
|
|
|
break;
|
2004-01-15 20:37:07 +00:00
|
|
|
case ARG_MIN_THRESHOLD_BUFFERS:
|
|
|
|
queue->min_threshold.buffers = g_value_get_uint (value);
|
2003-12-13 17:12:46 +00:00
|
|
|
break;
|
2004-01-15 20:37:07 +00:00
|
|
|
case ARG_MIN_THRESHOLD_TIME:
|
|
|
|
queue->min_threshold.time = g_value_get_uint64 (value);
|
2003-12-13 17:12:46 +00:00
|
|
|
break;
|
|
|
|
case ARG_LEAKY:
|
|
|
|
queue->leaky = g_value_get_enum (value);
|
2003-05-30 23:20:02 +00:00
|
|
|
break;
|
2001-12-22 21:18:17 +00:00
|
|
|
case ARG_MAY_DEADLOCK:
|
|
|
|
queue->may_deadlock = g_value_get_boolean (value);
|
2000-12-26 23:51:04 +00:00
|
|
|
break;
|
2002-06-03 15:44:28 +00:00
|
|
|
case ARG_BLOCK_TIMEOUT:
|
2003-12-13 17:12:46 +00:00
|
|
|
queue->block_timeout = g_value_get_uint64 (value);
|
2002-06-03 15:44:28 +00:00
|
|
|
break;
|
2000-01-30 09:03:00 +00:00
|
|
|
default:
|
2001-06-25 01:20:11 +00:00
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
|
2000-01-30 09:03:00 +00:00
|
|
|
break;
|
|
|
|
}
|
2003-12-13 17:12:46 +00:00
|
|
|
|
|
|
|
g_mutex_unlock (queue->qlock);
|
2000-01-30 09:03:00 +00:00
|
|
|
}
|
|
|
|
|
2001-01-20 17:59:25 +00:00
|
|
|
static void
|
2003-12-13 17:12:46 +00:00
|
|
|
gst_queue_get_property (GObject *object,
|
|
|
|
guint prop_id,
|
|
|
|
GValue *value,
|
|
|
|
GParamSpec *pspec)
|
2000-11-25 14:18:47 +00:00
|
|
|
{
|
2003-12-13 17:12:46 +00:00
|
|
|
GstQueue *queue = GST_QUEUE (object);
|
2000-01-30 09:03:00 +00:00
|
|
|
|
2001-06-25 01:20:11 +00:00
|
|
|
switch (prop_id) {
|
2003-12-13 17:12:46 +00:00
|
|
|
case ARG_CUR_LEVEL_BYTES:
|
|
|
|
g_value_set_uint (value, queue->cur_level.bytes);
|
|
|
|
break;
|
|
|
|
case ARG_CUR_LEVEL_BUFFERS:
|
|
|
|
g_value_set_uint (value, queue->cur_level.buffers);
|
|
|
|
break;
|
|
|
|
case ARG_CUR_LEVEL_TIME:
|
|
|
|
g_value_set_uint64 (value, queue->cur_level.time);
|
|
|
|
break;
|
|
|
|
case ARG_MAX_SIZE_BYTES:
|
|
|
|
g_value_set_uint (value, queue->max_size.bytes);
|
2001-05-25 21:00:07 +00:00
|
|
|
break;
|
2003-12-13 17:12:46 +00:00
|
|
|
case ARG_MAX_SIZE_BUFFERS:
|
|
|
|
g_value_set_uint (value, queue->max_size.buffers);
|
2000-01-30 09:03:00 +00:00
|
|
|
break;
|
2003-12-13 17:12:46 +00:00
|
|
|
case ARG_MAX_SIZE_TIME:
|
|
|
|
g_value_set_uint64 (value, queue->max_size.time);
|
2001-12-22 21:18:17 +00:00
|
|
|
break;
|
2004-01-15 20:37:07 +00:00
|
|
|
case ARG_MIN_THRESHOLD_BYTES:
|
|
|
|
g_value_set_uint (value, queue->min_threshold.bytes);
|
2003-12-13 17:12:46 +00:00
|
|
|
break;
|
2004-01-15 20:37:07 +00:00
|
|
|
case ARG_MIN_THRESHOLD_BUFFERS:
|
|
|
|
g_value_set_uint (value, queue->min_threshold.buffers);
|
2003-12-13 17:12:46 +00:00
|
|
|
break;
|
2004-01-15 20:37:07 +00:00
|
|
|
case ARG_MIN_THRESHOLD_TIME:
|
|
|
|
g_value_set_uint64 (value, queue->min_threshold.time);
|
2003-12-13 17:12:46 +00:00
|
|
|
break;
|
|
|
|
case ARG_LEAKY:
|
|
|
|
g_value_set_enum (value, queue->leaky);
|
2003-05-30 23:20:02 +00:00
|
|
|
break;
|
2001-12-22 21:18:17 +00:00
|
|
|
case ARG_MAY_DEADLOCK:
|
|
|
|
g_value_set_boolean (value, queue->may_deadlock);
|
2000-12-26 23:51:04 +00:00
|
|
|
break;
|
2002-06-03 15:44:28 +00:00
|
|
|
case ARG_BLOCK_TIMEOUT:
|
2003-12-13 17:12:46 +00:00
|
|
|
g_value_set_uint64 (value, queue->block_timeout);
|
2002-06-03 15:44:28 +00:00
|
|
|
break;
|
2000-01-30 09:03:00 +00:00
|
|
|
default:
|
2001-06-25 01:20:11 +00:00
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
|
2000-01-30 09:03:00 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|