gstreamer/gst-libs/gst/audio/gstaudiobasesrc.c

1172 lines
37 KiB
C
Raw Normal View History

/* GStreamer
* Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
* 2005 Wim Taymans <wim@fluendo.com>
*
2011-11-11 11:00:52 +00:00
* gstaudiobasesrc.c:
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/**
2011-11-11 11:00:52 +00:00
* SECTION:gstaudiobasesrc
* @short_description: Base class for audio sources
* @see_also: #GstAudioSrc, #GstAudioRingBuffer.
*
* This is the base class for audio sources. Subclasses need to implement the
* ::create_ringbuffer vmethod. This base class will then take care of
* reading samples from the ringbuffer, synchronisation and flushing.
*
* Last reviewed on 2006-09-27 (0.10.12)
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <string.h>
2011-11-11 11:00:52 +00:00
#include "gstaudiobasesrc.h"
#include "gst/gst-i18n-plugin.h"
2011-11-11 10:52:47 +00:00
GST_DEBUG_CATEGORY_STATIC (gst_audio_base_src_debug);
#define GST_CAT_DEFAULT gst_audio_base_src_debug
GType
2011-11-11 10:52:47 +00:00
gst_audio_base_src_slave_method_get_type (void)
{
static volatile gsize slave_method_type = 0;
/* FIXME 0.11: nick should be "retimestamp" not "re-timestamp" */
static const GEnumValue slave_method[] = {
2011-11-11 10:52:47 +00:00
{GST_AUDIO_BASE_SRC_SLAVE_RESAMPLE,
"GST_AUDIO_BASE_SRC_SLAVE_RESAMPLE", "resample"},
{GST_AUDIO_BASE_SRC_SLAVE_RETIMESTAMP,
"GST_AUDIO_BASE_SRC_SLAVE_RETIMESTAMP", "re-timestamp"},
{GST_AUDIO_BASE_SRC_SLAVE_SKEW, "GST_AUDIO_BASE_SRC_SLAVE_SKEW", "skew"},
{GST_AUDIO_BASE_SRC_SLAVE_NONE, "GST_AUDIO_BASE_SRC_SLAVE_NONE", "none"},
{0, NULL, NULL},
};
if (g_once_init_enter (&slave_method_type)) {
GType tmp =
2011-11-11 10:52:47 +00:00
g_enum_register_static ("GstAudioBaseSrcSlaveMethod", slave_method);
g_once_init_leave (&slave_method_type, tmp);
}
return (GType) slave_method_type;
}
2011-11-11 10:52:47 +00:00
#define GST_AUDIO_BASE_SRC_GET_PRIVATE(obj) \
(G_TYPE_INSTANCE_GET_PRIVATE ((obj), GST_TYPE_AUDIO_BASE_SRC, GstAudioBaseSrcPrivate))
2011-11-11 10:52:47 +00:00
struct _GstAudioBaseSrcPrivate
{
/* the clock slaving algorithm in use */
2011-11-11 10:52:47 +00:00
GstAudioBaseSrcSlaveMethod slave_method;
};
/* BaseAudioSrc signals and args */
enum
{
/* FILL ME */
LAST_SIGNAL
};
#define DEFAULT_BUFFER_TIME ((200 * GST_MSECOND) / GST_USECOND)
#define DEFAULT_LATENCY_TIME ((10 * GST_MSECOND) / GST_USECOND)
#define DEFAULT_ACTUAL_BUFFER_TIME -1
#define DEFAULT_ACTUAL_LATENCY_TIME -1
#define DEFAULT_PROVIDE_CLOCK TRUE
2011-11-11 10:52:47 +00:00
#define DEFAULT_SLAVE_METHOD GST_AUDIO_BASE_SRC_SLAVE_SKEW
enum
{
PROP_0,
PROP_BUFFER_TIME,
PROP_LATENCY_TIME,
PROP_ACTUAL_BUFFER_TIME,
PROP_ACTUAL_LATENCY_TIME,
PROP_PROVIDE_CLOCK,
PROP_SLAVE_METHOD,
PROP_LAST
};
static void
_do_init (GType type)
{
2011-11-11 11:00:52 +00:00
GST_DEBUG_CATEGORY_INIT (gst_audio_base_src_debug, "audiobasesrc", 0,
"audiobasesrc element");
#ifdef ENABLE_NLS
GST_DEBUG ("binding text domain %s to locale dir %s", GETTEXT_PACKAGE,
LOCALEDIR);
bindtextdomain (GETTEXT_PACKAGE, LOCALEDIR);
bind_textdomain_codeset (GETTEXT_PACKAGE, "UTF-8");
#endif /* ENABLE_NLS */
}
2011-11-11 10:52:47 +00:00
#define gst_audio_base_src_parent_class parent_class
G_DEFINE_TYPE_WITH_CODE (GstAudioBaseSrc, gst_audio_base_src, GST_TYPE_PUSH_SRC,
_do_init (g_define_type_id));
2011-11-11 10:52:47 +00:00
static void gst_audio_base_src_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec);
2011-11-11 10:52:47 +00:00
static void gst_audio_base_src_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
2011-11-11 10:52:47 +00:00
static void gst_audio_base_src_dispose (GObject * object);
2011-11-11 10:52:47 +00:00
static GstStateChangeReturn gst_audio_base_src_change_state (GstElement *
element, GstStateChange transition);
2011-11-11 10:52:47 +00:00
static GstClock *gst_audio_base_src_provide_clock (GstElement * elem);
static GstClockTime gst_audio_base_src_get_time (GstClock * clock,
GstAudioBaseSrc * src);
2011-11-11 10:52:47 +00:00
static GstFlowReturn gst_audio_base_src_create (GstBaseSrc * bsrc,
guint64 offset, guint length, GstBuffer ** buf);
2011-11-11 10:52:47 +00:00
static gboolean gst_audio_base_src_event (GstBaseSrc * bsrc, GstEvent * event);
static void gst_audio_base_src_get_times (GstBaseSrc * bsrc,
GstBuffer * buffer, GstClockTime * start, GstClockTime * end);
2011-11-11 10:52:47 +00:00
static gboolean gst_audio_base_src_setcaps (GstBaseSrc * bsrc, GstCaps * caps);
static gboolean gst_audio_base_src_query (GstBaseSrc * bsrc, GstQuery * query);
2012-03-11 18:04:41 +00:00
static GstCaps *gst_audio_base_src_fixate (GstBaseSrc * bsrc, GstCaps * caps);
2011-11-11 10:52:47 +00:00
/* static guint gst_audio_base_src_signals[LAST_SIGNAL] = { 0 }; */
static void
2011-11-11 10:52:47 +00:00
gst_audio_base_src_class_init (GstAudioBaseSrcClass * klass)
{
GObjectClass *gobject_class;
GstElementClass *gstelement_class;
GstBaseSrcClass *gstbasesrc_class;
gobject_class = (GObjectClass *) klass;
gstelement_class = (GstElementClass *) klass;
gstbasesrc_class = (GstBaseSrcClass *) klass;
2011-11-11 10:52:47 +00:00
g_type_class_add_private (klass, sizeof (GstAudioBaseSrcPrivate));
2011-11-11 10:52:47 +00:00
gobject_class->set_property = gst_audio_base_src_set_property;
gobject_class->get_property = gst_audio_base_src_get_property;
gobject_class->dispose = gst_audio_base_src_dispose;
g_object_class_install_property (gobject_class, PROP_BUFFER_TIME,
g_param_spec_int64 ("buffer-time", "Buffer Time",
"Size of audio buffer in microseconds", 1,
Use G_PARAM_STATIC_STRINGS everywhere for GParamSpecs that use static strings (i.e. all). This gives us less memory u... Original commit message from CVS: * configure.ac: * ext/alsa/gstalsamixerelement.c: (gst_alsa_mixer_element_class_init): * ext/alsa/gstalsasink.c: (gst_alsasink_class_init): * ext/alsa/gstalsasrc.c: (gst_alsasrc_class_init): * ext/cdparanoia/gstcdparanoiasrc.c: (gst_cd_paranoia_src_class_init): * ext/gio/gstgiosink.c: (gst_gio_sink_class_init): * ext/gio/gstgiosrc.c: (gst_gio_src_class_init): * ext/gio/gstgiostreamsink.c: (gst_gio_stream_sink_class_init): * ext/gio/gstgiostreamsrc.c: (gst_gio_stream_src_class_init): * ext/gnomevfs/gstgnomevfssink.c: (gst_gnome_vfs_sink_class_init): * ext/gnomevfs/gstgnomevfssrc.c: (gst_gnome_vfs_src_class_init): * ext/ogg/gstoggmux.c: (gst_ogg_mux_class_init): * ext/pango/gsttextoverlay.c: (gst_text_overlay_class_init): * ext/pango/gsttextrender.c: (gst_text_render_class_init): * ext/theora/theoradec.c: (gst_theora_dec_class_init): * ext/theora/theoraenc.c: (gst_theora_enc_class_init): * ext/theora/theoraparse.c: (gst_theora_parse_class_init): * ext/vorbis/vorbisenc.c: (gst_vorbis_enc_class_init): * gst-libs/gst/audio/gstaudiofiltertemplate.c: (gst_audio_filter_template_class_init): * gst-libs/gst/audio/gstbaseaudiosink.c: (gst_base_audio_sink_class_init): * gst-libs/gst/audio/gstbaseaudiosrc.c: (gst_base_audio_src_class_init): * gst-libs/gst/cdda/gstcddabasesrc.c: (gst_cdda_base_src_class_init): * gst-libs/gst/interfaces/mixertrack.c: (gst_mixer_track_class_init): * gst-libs/gst/rtp/gstbasertpdepayload.c: (gst_base_rtp_depayload_class_init): * gst-libs/gst/rtp/gstbasertppayload.c: (gst_basertppayload_class_init): * gst/audioconvert/gstaudioconvert.c: (gst_audio_convert_class_init): * gst/audiorate/gstaudiorate.c: (gst_audio_rate_class_init): * gst/audioresample/gstaudioresample.c: (gst_audioresample_class_init): * gst/audiotestsrc/gstaudiotestsrc.c: (gst_audio_test_src_class_init): * gst/gdp/gstgdppay.c: (gst_gdp_pay_class_init): * gst/playback/gstdecodebin2.c: (gst_decode_bin_class_init): * gst/playback/gstplaybasebin.c: (gst_play_base_bin_class_init), (preroll_unlinked): * gst/playback/gstplaybin.c: (gst_play_bin_class_init): * gst/playback/gstplaybin2.c: (gst_play_bin_class_init): * gst/playback/gstplaysink.c: (gst_play_sink_class_init): * gst/playback/gstqueue2.c: (gst_queue_class_init): * gst/playback/gststreaminfo.c: (gst_stream_info_class_init): * gst/playback/gststreamselector.c: (gst_selector_pad_class_init), (gst_stream_selector_class_init): * gst/playback/gsturidecodebin.c: (gst_uri_decode_bin_class_init): * gst/subparse/gstsubparse.c: (gst_sub_parse_class_init): * gst/tcp/gstmultifdsink.c: (gst_multi_fd_sink_class_init): * gst/tcp/gsttcpclientsink.c: (gst_tcp_client_sink_class_init): * gst/tcp/gsttcpclientsrc.c: (gst_tcp_client_src_class_init): * gst/tcp/gsttcpserversink.c: (gst_tcp_server_sink_class_init): * gst/tcp/gsttcpserversrc.c: (gst_tcp_server_src_class_init): * gst/videorate/gstvideorate.c: (gst_video_rate_class_init): * gst/videoscale/gstvideoscale.c: (gst_video_scale_class_init): * gst/videotestsrc/gstvideotestsrc.c: (gst_video_test_src_class_init): * gst/volume/gstvolume.c: (gst_volume_class_init): * sys/v4l/gstv4lelement.c: (gst_v4lelement_class_init): * sys/v4l/gstv4lmjpegsink.c: (gst_v4lmjpegsink_class_init): * sys/v4l/gstv4lmjpegsrc.c: (gst_v4lmjpegsrc_class_init): * sys/v4l/gstv4lsrc.c: (gst_v4lsrc_class_init): * sys/ximage/ximagesink.c: (gst_ximagesink_class_init): * sys/xvimage/xvimagesink.c: (gst_xvimagesink_class_init): Use G_PARAM_STATIC_STRINGS everywhere for GParamSpecs that use static strings (i.e. all). This gives us less memory usage, fewer allocations and thus less memory defragmentation. Depend on core CVS for this. Fixes bug #523806.
2008-03-22 15:00:53 +00:00
G_MAXINT64, DEFAULT_BUFFER_TIME,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_LATENCY_TIME,
g_param_spec_int64 ("latency-time", "Latency Time",
"Audio latency in microseconds", 1,
Use G_PARAM_STATIC_STRINGS everywhere for GParamSpecs that use static strings (i.e. all). This gives us less memory u... Original commit message from CVS: * configure.ac: * ext/alsa/gstalsamixerelement.c: (gst_alsa_mixer_element_class_init): * ext/alsa/gstalsasink.c: (gst_alsasink_class_init): * ext/alsa/gstalsasrc.c: (gst_alsasrc_class_init): * ext/cdparanoia/gstcdparanoiasrc.c: (gst_cd_paranoia_src_class_init): * ext/gio/gstgiosink.c: (gst_gio_sink_class_init): * ext/gio/gstgiosrc.c: (gst_gio_src_class_init): * ext/gio/gstgiostreamsink.c: (gst_gio_stream_sink_class_init): * ext/gio/gstgiostreamsrc.c: (gst_gio_stream_src_class_init): * ext/gnomevfs/gstgnomevfssink.c: (gst_gnome_vfs_sink_class_init): * ext/gnomevfs/gstgnomevfssrc.c: (gst_gnome_vfs_src_class_init): * ext/ogg/gstoggmux.c: (gst_ogg_mux_class_init): * ext/pango/gsttextoverlay.c: (gst_text_overlay_class_init): * ext/pango/gsttextrender.c: (gst_text_render_class_init): * ext/theora/theoradec.c: (gst_theora_dec_class_init): * ext/theora/theoraenc.c: (gst_theora_enc_class_init): * ext/theora/theoraparse.c: (gst_theora_parse_class_init): * ext/vorbis/vorbisenc.c: (gst_vorbis_enc_class_init): * gst-libs/gst/audio/gstaudiofiltertemplate.c: (gst_audio_filter_template_class_init): * gst-libs/gst/audio/gstbaseaudiosink.c: (gst_base_audio_sink_class_init): * gst-libs/gst/audio/gstbaseaudiosrc.c: (gst_base_audio_src_class_init): * gst-libs/gst/cdda/gstcddabasesrc.c: (gst_cdda_base_src_class_init): * gst-libs/gst/interfaces/mixertrack.c: (gst_mixer_track_class_init): * gst-libs/gst/rtp/gstbasertpdepayload.c: (gst_base_rtp_depayload_class_init): * gst-libs/gst/rtp/gstbasertppayload.c: (gst_basertppayload_class_init): * gst/audioconvert/gstaudioconvert.c: (gst_audio_convert_class_init): * gst/audiorate/gstaudiorate.c: (gst_audio_rate_class_init): * gst/audioresample/gstaudioresample.c: (gst_audioresample_class_init): * gst/audiotestsrc/gstaudiotestsrc.c: (gst_audio_test_src_class_init): * gst/gdp/gstgdppay.c: (gst_gdp_pay_class_init): * gst/playback/gstdecodebin2.c: (gst_decode_bin_class_init): * gst/playback/gstplaybasebin.c: (gst_play_base_bin_class_init), (preroll_unlinked): * gst/playback/gstplaybin.c: (gst_play_bin_class_init): * gst/playback/gstplaybin2.c: (gst_play_bin_class_init): * gst/playback/gstplaysink.c: (gst_play_sink_class_init): * gst/playback/gstqueue2.c: (gst_queue_class_init): * gst/playback/gststreaminfo.c: (gst_stream_info_class_init): * gst/playback/gststreamselector.c: (gst_selector_pad_class_init), (gst_stream_selector_class_init): * gst/playback/gsturidecodebin.c: (gst_uri_decode_bin_class_init): * gst/subparse/gstsubparse.c: (gst_sub_parse_class_init): * gst/tcp/gstmultifdsink.c: (gst_multi_fd_sink_class_init): * gst/tcp/gsttcpclientsink.c: (gst_tcp_client_sink_class_init): * gst/tcp/gsttcpclientsrc.c: (gst_tcp_client_src_class_init): * gst/tcp/gsttcpserversink.c: (gst_tcp_server_sink_class_init): * gst/tcp/gsttcpserversrc.c: (gst_tcp_server_src_class_init): * gst/videorate/gstvideorate.c: (gst_video_rate_class_init): * gst/videoscale/gstvideoscale.c: (gst_video_scale_class_init): * gst/videotestsrc/gstvideotestsrc.c: (gst_video_test_src_class_init): * gst/volume/gstvolume.c: (gst_volume_class_init): * sys/v4l/gstv4lelement.c: (gst_v4lelement_class_init): * sys/v4l/gstv4lmjpegsink.c: (gst_v4lmjpegsink_class_init): * sys/v4l/gstv4lmjpegsrc.c: (gst_v4lmjpegsrc_class_init): * sys/v4l/gstv4lsrc.c: (gst_v4lsrc_class_init): * sys/ximage/ximagesink.c: (gst_ximagesink_class_init): * sys/xvimage/xvimagesink.c: (gst_xvimagesink_class_init): Use G_PARAM_STATIC_STRINGS everywhere for GParamSpecs that use static strings (i.e. all). This gives us less memory usage, fewer allocations and thus less memory defragmentation. Depend on core CVS for this. Fixes bug #523806.
2008-03-22 15:00:53 +00:00
G_MAXINT64, DEFAULT_LATENCY_TIME,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
/**
2011-11-11 10:52:47 +00:00
* GstAudioBaseSrc:actual-buffer-time:
*
* Actual configured size of audio buffer in microseconds.
*
* Since: 0.10.20
**/
g_object_class_install_property (gobject_class, PROP_ACTUAL_BUFFER_TIME,
g_param_spec_int64 ("actual-buffer-time", "Actual Buffer Time",
"Actual configured size of audio buffer in microseconds",
DEFAULT_ACTUAL_BUFFER_TIME, G_MAXINT64, DEFAULT_ACTUAL_BUFFER_TIME,
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
/**
2011-11-11 10:52:47 +00:00
* GstAudioBaseSrc:actual-latency-time:
*
* Actual configured audio latency in microseconds.
*
* Since: 0.10.20
**/
g_object_class_install_property (gobject_class, PROP_ACTUAL_LATENCY_TIME,
g_param_spec_int64 ("actual-latency-time", "Actual Latency Time",
"Actual configured audio latency in microseconds",
DEFAULT_ACTUAL_LATENCY_TIME, G_MAXINT64, DEFAULT_ACTUAL_LATENCY_TIME,
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_PROVIDE_CLOCK,
g_param_spec_boolean ("provide-clock", "Provide Clock",
"Provide a clock to be used as the global pipeline clock",
Use G_PARAM_STATIC_STRINGS everywhere for GParamSpecs that use static strings (i.e. all). This gives us less memory u... Original commit message from CVS: * configure.ac: * ext/alsa/gstalsamixerelement.c: (gst_alsa_mixer_element_class_init): * ext/alsa/gstalsasink.c: (gst_alsasink_class_init): * ext/alsa/gstalsasrc.c: (gst_alsasrc_class_init): * ext/cdparanoia/gstcdparanoiasrc.c: (gst_cd_paranoia_src_class_init): * ext/gio/gstgiosink.c: (gst_gio_sink_class_init): * ext/gio/gstgiosrc.c: (gst_gio_src_class_init): * ext/gio/gstgiostreamsink.c: (gst_gio_stream_sink_class_init): * ext/gio/gstgiostreamsrc.c: (gst_gio_stream_src_class_init): * ext/gnomevfs/gstgnomevfssink.c: (gst_gnome_vfs_sink_class_init): * ext/gnomevfs/gstgnomevfssrc.c: (gst_gnome_vfs_src_class_init): * ext/ogg/gstoggmux.c: (gst_ogg_mux_class_init): * ext/pango/gsttextoverlay.c: (gst_text_overlay_class_init): * ext/pango/gsttextrender.c: (gst_text_render_class_init): * ext/theora/theoradec.c: (gst_theora_dec_class_init): * ext/theora/theoraenc.c: (gst_theora_enc_class_init): * ext/theora/theoraparse.c: (gst_theora_parse_class_init): * ext/vorbis/vorbisenc.c: (gst_vorbis_enc_class_init): * gst-libs/gst/audio/gstaudiofiltertemplate.c: (gst_audio_filter_template_class_init): * gst-libs/gst/audio/gstbaseaudiosink.c: (gst_base_audio_sink_class_init): * gst-libs/gst/audio/gstbaseaudiosrc.c: (gst_base_audio_src_class_init): * gst-libs/gst/cdda/gstcddabasesrc.c: (gst_cdda_base_src_class_init): * gst-libs/gst/interfaces/mixertrack.c: (gst_mixer_track_class_init): * gst-libs/gst/rtp/gstbasertpdepayload.c: (gst_base_rtp_depayload_class_init): * gst-libs/gst/rtp/gstbasertppayload.c: (gst_basertppayload_class_init): * gst/audioconvert/gstaudioconvert.c: (gst_audio_convert_class_init): * gst/audiorate/gstaudiorate.c: (gst_audio_rate_class_init): * gst/audioresample/gstaudioresample.c: (gst_audioresample_class_init): * gst/audiotestsrc/gstaudiotestsrc.c: (gst_audio_test_src_class_init): * gst/gdp/gstgdppay.c: (gst_gdp_pay_class_init): * gst/playback/gstdecodebin2.c: (gst_decode_bin_class_init): * gst/playback/gstplaybasebin.c: (gst_play_base_bin_class_init), (preroll_unlinked): * gst/playback/gstplaybin.c: (gst_play_bin_class_init): * gst/playback/gstplaybin2.c: (gst_play_bin_class_init): * gst/playback/gstplaysink.c: (gst_play_sink_class_init): * gst/playback/gstqueue2.c: (gst_queue_class_init): * gst/playback/gststreaminfo.c: (gst_stream_info_class_init): * gst/playback/gststreamselector.c: (gst_selector_pad_class_init), (gst_stream_selector_class_init): * gst/playback/gsturidecodebin.c: (gst_uri_decode_bin_class_init): * gst/subparse/gstsubparse.c: (gst_sub_parse_class_init): * gst/tcp/gstmultifdsink.c: (gst_multi_fd_sink_class_init): * gst/tcp/gsttcpclientsink.c: (gst_tcp_client_sink_class_init): * gst/tcp/gsttcpclientsrc.c: (gst_tcp_client_src_class_init): * gst/tcp/gsttcpserversink.c: (gst_tcp_server_sink_class_init): * gst/tcp/gsttcpserversrc.c: (gst_tcp_server_src_class_init): * gst/videorate/gstvideorate.c: (gst_video_rate_class_init): * gst/videoscale/gstvideoscale.c: (gst_video_scale_class_init): * gst/videotestsrc/gstvideotestsrc.c: (gst_video_test_src_class_init): * gst/volume/gstvolume.c: (gst_volume_class_init): * sys/v4l/gstv4lelement.c: (gst_v4lelement_class_init): * sys/v4l/gstv4lmjpegsink.c: (gst_v4lmjpegsink_class_init): * sys/v4l/gstv4lmjpegsrc.c: (gst_v4lmjpegsrc_class_init): * sys/v4l/gstv4lsrc.c: (gst_v4lsrc_class_init): * sys/ximage/ximagesink.c: (gst_ximagesink_class_init): * sys/xvimage/xvimagesink.c: (gst_xvimagesink_class_init): Use G_PARAM_STATIC_STRINGS everywhere for GParamSpecs that use static strings (i.e. all). This gives us less memory usage, fewer allocations and thus less memory defragmentation. Depend on core CVS for this. Fixes bug #523806.
2008-03-22 15:00:53 +00:00
DEFAULT_PROVIDE_CLOCK, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_SLAVE_METHOD,
g_param_spec_enum ("slave-method", "Slave Method",
"Algorithm to use to match the rate of the masterclock",
2011-11-11 10:52:47 +00:00
GST_TYPE_AUDIO_BASE_SRC_SLAVE_METHOD, DEFAULT_SLAVE_METHOD,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
gstelement_class->change_state =
2011-11-11 10:52:47 +00:00
GST_DEBUG_FUNCPTR (gst_audio_base_src_change_state);
gstelement_class->provide_clock =
2011-11-11 10:52:47 +00:00
GST_DEBUG_FUNCPTR (gst_audio_base_src_provide_clock);
2011-11-11 10:52:47 +00:00
gstbasesrc_class->set_caps = GST_DEBUG_FUNCPTR (gst_audio_base_src_setcaps);
gstbasesrc_class->event = GST_DEBUG_FUNCPTR (gst_audio_base_src_event);
gstbasesrc_class->query = GST_DEBUG_FUNCPTR (gst_audio_base_src_query);
gstbasesrc_class->get_times =
2011-11-11 10:52:47 +00:00
GST_DEBUG_FUNCPTR (gst_audio_base_src_get_times);
gstbasesrc_class->create = GST_DEBUG_FUNCPTR (gst_audio_base_src_create);
gstbasesrc_class->fixate = GST_DEBUG_FUNCPTR (gst_audio_base_src_fixate);
/* ref class from a thread-safe context to work around missing bit of
* thread-safety in GObject */
g_type_class_ref (GST_TYPE_AUDIO_CLOCK);
g_type_class_ref (GST_TYPE_AUDIO_RING_BUFFER);
}
static void
2011-11-11 11:00:52 +00:00
gst_audio_base_src_init (GstAudioBaseSrc * audiobasesrc)
{
2011-11-11 11:00:52 +00:00
audiobasesrc->priv = GST_AUDIO_BASE_SRC_GET_PRIVATE (audiobasesrc);
2011-11-11 11:00:52 +00:00
audiobasesrc->buffer_time = DEFAULT_BUFFER_TIME;
audiobasesrc->latency_time = DEFAULT_LATENCY_TIME;
if (DEFAULT_PROVIDE_CLOCK)
GST_OBJECT_FLAG_SET (audiobasesrc, GST_ELEMENT_FLAG_PROVIDE_CLOCK);
else
GST_OBJECT_FLAG_UNSET (audiobasesrc, GST_ELEMENT_FLAG_PROVIDE_CLOCK);
2011-11-11 11:00:52 +00:00
audiobasesrc->priv->slave_method = DEFAULT_SLAVE_METHOD;
/* reset blocksize we use latency time to calculate a more useful
* value based on negotiated format. */
2011-11-11 11:00:52 +00:00
GST_BASE_SRC (audiobasesrc)->blocksize = 0;
2011-11-11 11:00:52 +00:00
audiobasesrc->clock = gst_audio_clock_new ("GstAudioSrcClock",
(GstAudioClockGetTimeFunc) gst_audio_base_src_get_time, audiobasesrc,
2011-11-10 12:50:08 +00:00
NULL);
/* we are always a live source */
2011-11-11 11:00:52 +00:00
gst_base_src_set_live (GST_BASE_SRC (audiobasesrc), TRUE);
/* we operate in time */
2011-11-11 11:00:52 +00:00
gst_base_src_set_format (GST_BASE_SRC (audiobasesrc), GST_FORMAT_TIME);
}
static void
2011-11-11 10:52:47 +00:00
gst_audio_base_src_dispose (GObject * object)
{
2011-11-11 10:52:47 +00:00
GstAudioBaseSrc *src;
2011-11-11 10:52:47 +00:00
src = GST_AUDIO_BASE_SRC (object);
GST_OBJECT_LOCK (src);
if (src->clock) {
gst_audio_clock_invalidate (src->clock);
gst_object_unref (src->clock);
src->clock = NULL;
}
if (src->ringbuffer) {
gst_object_unparent (GST_OBJECT_CAST (src->ringbuffer));
src->ringbuffer = NULL;
}
GST_OBJECT_UNLOCK (src);
G_OBJECT_CLASS (parent_class)->dispose (object);
}
static GstClock *
2011-11-11 10:52:47 +00:00
gst_audio_base_src_provide_clock (GstElement * elem)
{
2011-11-11 10:52:47 +00:00
GstAudioBaseSrc *src;
GstClock *clock;
2011-11-11 10:52:47 +00:00
src = GST_AUDIO_BASE_SRC (elem);
/* we have no ringbuffer (must be NULL state) */
if (src->ringbuffer == NULL)
goto wrong_state;
if (!gst_audio_ring_buffer_is_acquired (src->ringbuffer))
goto wrong_state;
GST_OBJECT_LOCK (src);
if (!GST_OBJECT_FLAG_IS_SET (src, GST_ELEMENT_FLAG_PROVIDE_CLOCK))
goto clock_disabled;
clock = GST_CLOCK_CAST (gst_object_ref (src->clock));
GST_OBJECT_UNLOCK (src);
return clock;
/* ERRORS */
wrong_state:
{
GST_DEBUG_OBJECT (src, "ringbuffer not acquired");
return NULL;
}
clock_disabled:
{
GST_DEBUG_OBJECT (src, "clock provide disabled");
GST_OBJECT_UNLOCK (src);
return NULL;
}
}
static GstClockTime
2011-11-11 10:52:47 +00:00
gst_audio_base_src_get_time (GstClock * clock, GstAudioBaseSrc * src)
{
guint64 raw, samples;
guint delay;
GstClockTime result;
if (G_UNLIKELY (src->ringbuffer == NULL
|| src->ringbuffer->spec.info.rate == 0))
return GST_CLOCK_TIME_NONE;
raw = samples = gst_audio_ring_buffer_samples_done (src->ringbuffer);
/* the number of samples not yet processed, this is still queued in the
* device (not yet read for capture). */
delay = gst_audio_ring_buffer_delay (src->ringbuffer);
samples += delay;
result = gst_util_uint64_scale_int (samples, GST_SECOND,
src->ringbuffer->spec.info.rate);
GST_DEBUG_OBJECT (src,
2009-10-09 12:07:24 +00:00
"processed samples: raw %" G_GUINT64_FORMAT ", delay %u, real %"
G_GUINT64_FORMAT ", time %" GST_TIME_FORMAT, raw, delay, samples,
GST_TIME_ARGS (result));
return result;
}
/**
2011-11-11 10:52:47 +00:00
* gst_audio_base_src_set_provide_clock:
* @src: a #GstAudioBaseSrc
* @provide: new state
*
* Controls whether @src will provide a clock or not. If @provide is %TRUE,
* gst_element_provide_clock() will return a clock that reflects the datarate
* of @src. If @provide is %FALSE, gst_element_provide_clock() will return NULL.
*
* Since: 0.10.16
*/
void
2011-11-11 10:52:47 +00:00
gst_audio_base_src_set_provide_clock (GstAudioBaseSrc * src, gboolean provide)
{
2011-11-11 10:52:47 +00:00
g_return_if_fail (GST_IS_AUDIO_BASE_SRC (src));
GST_OBJECT_LOCK (src);
if (provide)
GST_OBJECT_FLAG_SET (src, GST_ELEMENT_FLAG_PROVIDE_CLOCK);
else
GST_OBJECT_FLAG_UNSET (src, GST_ELEMENT_FLAG_PROVIDE_CLOCK);
GST_OBJECT_UNLOCK (src);
}
/**
2011-11-11 10:52:47 +00:00
* gst_audio_base_src_get_provide_clock:
* @src: a #GstAudioBaseSrc
*
* Queries whether @src will provide a clock or not. See also
2011-11-11 10:52:47 +00:00
* gst_audio_base_src_set_provide_clock.
*
* Returns: %TRUE if @src will provide a clock.
*
* Since: 0.10.16
*/
gboolean
2011-11-11 10:52:47 +00:00
gst_audio_base_src_get_provide_clock (GstAudioBaseSrc * src)
{
gboolean result;
2011-11-11 10:52:47 +00:00
g_return_val_if_fail (GST_IS_AUDIO_BASE_SRC (src), FALSE);
GST_OBJECT_LOCK (src);
result = GST_OBJECT_FLAG_IS_SET (src, GST_ELEMENT_FLAG_PROVIDE_CLOCK);
GST_OBJECT_UNLOCK (src);
return result;
}
/**
2011-11-11 10:52:47 +00:00
* gst_audio_base_src_set_slave_method:
* @src: a #GstAudioBaseSrc
* @method: the new slave method
*
* Controls how clock slaving will be performed in @src.
*
* Since: 0.10.20
*/
void
2011-11-11 10:52:47 +00:00
gst_audio_base_src_set_slave_method (GstAudioBaseSrc * src,
GstAudioBaseSrcSlaveMethod method)
{
2011-11-11 10:52:47 +00:00
g_return_if_fail (GST_IS_AUDIO_BASE_SRC (src));
GST_OBJECT_LOCK (src);
src->priv->slave_method = method;
GST_OBJECT_UNLOCK (src);
}
/**
2011-11-11 10:52:47 +00:00
* gst_audio_base_src_get_slave_method:
* @src: a #GstAudioBaseSrc
*
* Get the current slave method used by @src.
*
* Returns: The current slave method used by @src.
*
* Since: 0.10.20
*/
2011-11-11 10:52:47 +00:00
GstAudioBaseSrcSlaveMethod
gst_audio_base_src_get_slave_method (GstAudioBaseSrc * src)
{
2011-11-11 10:52:47 +00:00
GstAudioBaseSrcSlaveMethod result;
2011-11-11 10:52:47 +00:00
g_return_val_if_fail (GST_IS_AUDIO_BASE_SRC (src), -1);
GST_OBJECT_LOCK (src);
result = src->priv->slave_method;
GST_OBJECT_UNLOCK (src);
return result;
}
static void
2011-11-11 10:52:47 +00:00
gst_audio_base_src_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
2011-11-11 10:52:47 +00:00
GstAudioBaseSrc *src;
2011-11-11 10:52:47 +00:00
src = GST_AUDIO_BASE_SRC (object);
switch (prop_id) {
case PROP_BUFFER_TIME:
src->buffer_time = g_value_get_int64 (value);
break;
case PROP_LATENCY_TIME:
src->latency_time = g_value_get_int64 (value);
break;
case PROP_PROVIDE_CLOCK:
2011-11-11 10:52:47 +00:00
gst_audio_base_src_set_provide_clock (src, g_value_get_boolean (value));
break;
case PROP_SLAVE_METHOD:
2011-11-11 10:52:47 +00:00
gst_audio_base_src_set_slave_method (src, g_value_get_enum (value));
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
2011-11-11 10:52:47 +00:00
gst_audio_base_src_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec)
{
2011-11-11 10:52:47 +00:00
GstAudioBaseSrc *src;
2011-11-11 10:52:47 +00:00
src = GST_AUDIO_BASE_SRC (object);
switch (prop_id) {
case PROP_BUFFER_TIME:
g_value_set_int64 (value, src->buffer_time);
break;
case PROP_LATENCY_TIME:
g_value_set_int64 (value, src->latency_time);
break;
case PROP_ACTUAL_BUFFER_TIME:
GST_OBJECT_LOCK (src);
if (src->ringbuffer && src->ringbuffer->acquired)
g_value_set_int64 (value, src->ringbuffer->spec.buffer_time);
else
g_value_set_int64 (value, DEFAULT_ACTUAL_BUFFER_TIME);
GST_OBJECT_UNLOCK (src);
break;
case PROP_ACTUAL_LATENCY_TIME:
GST_OBJECT_LOCK (src);
if (src->ringbuffer && src->ringbuffer->acquired)
g_value_set_int64 (value, src->ringbuffer->spec.latency_time);
else
g_value_set_int64 (value, DEFAULT_ACTUAL_LATENCY_TIME);
GST_OBJECT_UNLOCK (src);
break;
case PROP_PROVIDE_CLOCK:
2011-11-11 10:52:47 +00:00
g_value_set_boolean (value, gst_audio_base_src_get_provide_clock (src));
break;
case PROP_SLAVE_METHOD:
2011-11-11 10:52:47 +00:00
g_value_set_enum (value, gst_audio_base_src_get_slave_method (src));
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
2012-03-11 18:04:41 +00:00
static GstCaps *
2011-11-11 10:52:47 +00:00
gst_audio_base_src_fixate (GstBaseSrc * bsrc, GstCaps * caps)
{
GstStructure *s;
2012-03-11 18:04:41 +00:00
caps = gst_caps_make_writable (caps);
s = gst_caps_get_structure (caps, 0);
/* fields for all formats */
gst_structure_fixate_field_nearest_int (s, "rate", GST_AUDIO_DEF_RATE);
gst_structure_fixate_field_nearest_int (s, "channels",
GST_AUDIO_DEF_CHANNELS);
gst_structure_fixate_field_string (s, "format", GST_AUDIO_DEF_FORMAT);
2012-03-11 18:04:41 +00:00
caps = GST_BASE_SRC_CLASS (parent_class)->fixate (bsrc, caps);
return caps;
}
static gboolean
2011-11-11 10:52:47 +00:00
gst_audio_base_src_setcaps (GstBaseSrc * bsrc, GstCaps * caps)
{
2011-11-11 10:52:47 +00:00
GstAudioBaseSrc *src = GST_AUDIO_BASE_SRC (bsrc);
GstAudioRingBufferSpec *spec;
gint bpf, rate;
spec = &src->ringbuffer->spec;
spec->buffer_time = src->buffer_time;
spec->latency_time = src->latency_time;
GST_OBJECT_LOCK (src);
if (!gst_audio_ring_buffer_parse_caps (spec, caps)) {
GST_OBJECT_UNLOCK (src);
goto parse_error;
}
bpf = GST_AUDIO_INFO_BPF (&spec->info);
rate = GST_AUDIO_INFO_RATE (&spec->info);
/* calculate suggested segsize and segtotal */
spec->segsize = rate * bpf * spec->latency_time / GST_MSECOND;
spec->segtotal = spec->buffer_time / spec->latency_time;
GST_OBJECT_UNLOCK (src);
GST_DEBUG ("release old ringbuffer");
gst_audio_ring_buffer_release (src->ringbuffer);
gst_audio_ring_buffer_debug_spec_buff (spec);
GST_DEBUG ("acquire new ringbuffer");
if (!gst_audio_ring_buffer_acquire (src->ringbuffer, spec))
goto acquire_error;
/* calculate actual latency and buffer times */
spec->latency_time = spec->segsize * GST_MSECOND / (rate * bpf);
spec->buffer_time =
spec->segtotal * spec->segsize * GST_MSECOND / (rate * bpf);
gst_audio_ring_buffer_debug_spec_buff (spec);
g_object_notify (G_OBJECT (src), "actual-buffer-time");
g_object_notify (G_OBJECT (src), "actual-latency-time");
return TRUE;
/* ERRORS */
parse_error:
{
GST_DEBUG ("could not parse caps");
return FALSE;
}
acquire_error:
{
GST_DEBUG ("could not acquire ringbuffer");
return FALSE;
}
}
static void
2011-11-11 10:52:47 +00:00
gst_audio_base_src_get_times (GstBaseSrc * bsrc, GstBuffer * buffer,
GstClockTime * start, GstClockTime * end)
{
/* no need to sync to a clock here, we schedule the samples based
* on our own clock for the moment. */
*start = GST_CLOCK_TIME_NONE;
*end = GST_CLOCK_TIME_NONE;
}
static gboolean
2011-11-11 10:52:47 +00:00
gst_audio_base_src_query (GstBaseSrc * bsrc, GstQuery * query)
{
2011-11-11 10:52:47 +00:00
GstAudioBaseSrc *src = GST_AUDIO_BASE_SRC (bsrc);
gboolean res = FALSE;
switch (GST_QUERY_TYPE (query)) {
case GST_QUERY_LATENCY:
{
GstClockTime min_latency, max_latency;
GstAudioRingBufferSpec *spec;
gint bpf, rate;
GST_OBJECT_LOCK (src);
if (G_UNLIKELY (src->ringbuffer == NULL
|| src->ringbuffer->spec.info.rate == 0)) {
GST_OBJECT_UNLOCK (src);
goto done;
}
spec = &src->ringbuffer->spec;
rate = GST_AUDIO_INFO_RATE (&spec->info);
bpf = GST_AUDIO_INFO_BPF (&spec->info);
/* we have at least 1 segment of latency */
min_latency =
gst_util_uint64_scale_int (spec->segsize, GST_SECOND, rate * bpf);
/* we cannot delay more than the buffersize else we lose data */
max_latency =
gst_util_uint64_scale_int (spec->segtotal * spec->segsize, GST_SECOND,
rate * bpf);
GST_OBJECT_UNLOCK (src);
GST_DEBUG_OBJECT (src,
"report latency min %" GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
/* we are always live, the min latency is 1 segment and the max latency is
* the complete buffer of segments. */
gst_query_set_latency (query, TRUE, min_latency, max_latency);
res = TRUE;
break;
}
case GST_QUERY_SCHEDULING:
{
2011-11-18 16:58:58 +00:00
/* We allow limited pull base operation. Basically pulling can be
* done on any number of bytes as long as the offset is -1 or
* sequentially increasing. */
gst_query_set_scheduling (query, GST_SCHEDULING_FLAG_SEQUENTIAL, 1, -1,
0);
gst_query_add_scheduling_mode (query, GST_PAD_MODE_PULL);
gst_query_add_scheduling_mode (query, GST_PAD_MODE_PUSH);
res = TRUE;
break;
}
default:
res = GST_BASE_SRC_CLASS (parent_class)->query (bsrc, query);
break;
}
done:
return res;
}
static gboolean
2011-11-11 10:52:47 +00:00
gst_audio_base_src_event (GstBaseSrc * bsrc, GstEvent * event)
{
2011-11-11 10:52:47 +00:00
GstAudioBaseSrc *src = GST_AUDIO_BASE_SRC (bsrc);
2011-11-10 15:24:12 +00:00
gboolean res, forward;
2011-11-10 15:24:12 +00:00
res = FALSE;
forward = TRUE;
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_FLUSH_START:
GST_DEBUG_OBJECT (bsrc, "flush-start");
gst_audio_ring_buffer_pause (src->ringbuffer);
gst_audio_ring_buffer_clear_all (src->ringbuffer);
break;
case GST_EVENT_FLUSH_STOP:
GST_DEBUG_OBJECT (bsrc, "flush-stop");
/* always resync on sample after a flush */
src->next_sample = -1;
gst_audio_ring_buffer_clear_all (src->ringbuffer);
break;
case GST_EVENT_SEEK:
GST_DEBUG_OBJECT (bsrc, "refuse to seek");
2011-11-10 15:24:12 +00:00
forward = FALSE;
break;
default:
2011-11-10 15:24:12 +00:00
GST_DEBUG_OBJECT (bsrc, "forward event %p", event);
break;
}
2011-11-10 15:24:12 +00:00
if (forward)
res = GST_BASE_SRC_CLASS (parent_class)->event (bsrc, event);
return res;
}
/* get the next offset in the ringbuffer for reading samples.
* If the next sample is too far away, this function will position itself to the
* next most recent sample, creating discontinuity */
static guint64
2011-11-11 10:52:47 +00:00
gst_audio_base_src_get_offset (GstAudioBaseSrc * src)
{
guint64 sample;
gint readseg, segdone, segtotal, sps;
gint diff;
/* assume we can append to the previous sample */
sample = src->next_sample;
sps = src->ringbuffer->samples_per_seg;
segtotal = src->ringbuffer->spec.segtotal;
/* get the currently processed segment */
segdone = g_atomic_int_get (&src->ringbuffer->segdone)
- src->ringbuffer->segbase;
if (sample != -1) {
GST_DEBUG_OBJECT (src, "at segment %d and sample %" G_GUINT64_FORMAT,
segdone, sample);
/* figure out the segment and the offset inside the segment where
* the sample should be read from. */
readseg = sample / sps;
/* see how far away it is from the read segment, normally segdone (where new
* data is written in the ringbuffer) is bigger than readseg (where we are
* reading). */
diff = segdone - readseg;
if (diff >= segtotal) {
GST_DEBUG_OBJECT (src, "dropped, align to segment %d", segdone);
/* sample would be dropped, position to next playable position */
sample = ((guint64) (segdone)) * sps;
}
} else {
/* no previous sample, go to the current position */
GST_DEBUG_OBJECT (src, "first sample, align to current %d", segdone);
sample = ((guint64) (segdone)) * sps;
readseg = segdone;
}
GST_DEBUG_OBJECT (src,
"reading from %d, we are at %d, sample %" G_GUINT64_FORMAT, readseg,
segdone, sample);
return sample;
}
static GstFlowReturn
2011-11-11 10:52:47 +00:00
gst_audio_base_src_create (GstBaseSrc * bsrc, guint64 offset, guint length,
GstBuffer ** outbuf)
{
GstFlowReturn ret;
2011-11-11 10:52:47 +00:00
GstAudioBaseSrc *src = GST_AUDIO_BASE_SRC (bsrc);
GstBuffer *buf;
2012-01-20 15:11:54 +00:00
GstMapInfo info;
guint8 *ptr;
guint samples, total_samples;
guint64 sample;
gint bpf, rate;
GstAudioRingBuffer *ringbuffer;
GstAudioRingBufferSpec *spec;
guint read;
GstClockTime timestamp, duration;
GstClock *clock;
ringbuffer = src->ringbuffer;
spec = &ringbuffer->spec;
if (G_UNLIKELY (!gst_audio_ring_buffer_is_acquired (ringbuffer)))
goto wrong_state;
bpf = GST_AUDIO_INFO_BPF (&spec->info);
rate = GST_AUDIO_INFO_RATE (&spec->info);
if ((length == 0 && bsrc->blocksize == 0) || length == -1)
/* no length given, use the default segment size */
length = spec->segsize;
else
/* make sure we round down to an integral number of samples */
length -= length % bpf;
/* figure out the offset in the ringbuffer */
if (G_UNLIKELY (offset != -1)) {
sample = offset / bpf;
/* if a specific offset was given it must be the next sequential
* offset we expect or we fail for now. */
if (src->next_sample != -1 && sample != src->next_sample)
goto wrong_offset;
} else {
/* calculate the sequentially next sample we need to read. This can jump and
* create a DISCONT. */
2011-11-11 10:52:47 +00:00
sample = gst_audio_base_src_get_offset (src);
}
GST_DEBUG_OBJECT (src, "reading from sample %" G_GUINT64_FORMAT " length %u",
sample, length);
/* get the number of samples to read */
total_samples = samples = length / bpf;
/* use the basesrc allocation code to use bufferpools or custom allocators */
ret = GST_BASE_SRC_CLASS (parent_class)->alloc (bsrc, offset, length, &buf);
if (G_UNLIKELY (ret != GST_FLOW_OK))
goto alloc_failed;
2012-01-20 15:11:54 +00:00
gst_buffer_map (buf, &info, GST_MAP_WRITE);
ptr = info.data;
do {
read = gst_audio_ring_buffer_read (ringbuffer, sample, ptr, samples);
GST_DEBUG_OBJECT (src, "read %u of %u", read, samples);
/* if we read all, we're done */
if (read == samples)
break;
/* else something interrupted us and we wait for playing again. */
GST_DEBUG_OBJECT (src, "wait playing");
if (gst_base_src_wait_playing (bsrc) != GST_FLOW_OK)
goto stopped;
GST_DEBUG_OBJECT (src, "continue playing");
/* read next samples */
sample += read;
samples -= read;
ptr += read * bpf;
} while (TRUE);
2012-01-20 15:11:54 +00:00
gst_buffer_unmap (buf, &info);
/* mark discontinuity if needed */
if (G_UNLIKELY (sample != src->next_sample) && src->next_sample != -1) {
GST_WARNING_OBJECT (src,
"create DISCONT of %" G_GUINT64_FORMAT " samples at sample %"
G_GUINT64_FORMAT, sample - src->next_sample, sample);
GST_ELEMENT_WARNING (src, CORE, CLOCK,
(_("Can't record audio fast enough")),
("Dropped %" G_GUINT64_FORMAT " samples. This is most likely because "
"downstream can't keep up and is consuming samples too slowly.",
sample - src->next_sample));
GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
}
src->next_sample = sample + samples;
/* get the normal timestamp to get the duration. */
timestamp = gst_util_uint64_scale_int (sample, GST_SECOND, rate);
duration = gst_util_uint64_scale_int (src->next_sample, GST_SECOND,
rate) - timestamp;
GST_OBJECT_LOCK (src);
if (!(clock = GST_ELEMENT_CLOCK (src)))
goto no_sync;
if (clock != src->clock) {
/* we are slaved, check how to handle this */
switch (src->priv->slave_method) {
2011-11-11 10:52:47 +00:00
case GST_AUDIO_BASE_SRC_SLAVE_RESAMPLE:
/* not implemented, use skew algorithm. This algorithm should
* work on the readout pointer and produces more or less samples based
* on the clock drift */
2011-11-11 10:52:47 +00:00
case GST_AUDIO_BASE_SRC_SLAVE_SKEW:
{
GstClockTime running_time;
GstClockTime base_time;
GstClockTime current_time;
guint64 running_time_sample;
gint running_time_segment;
gint last_read_segment;
gint segment_skew;
gint sps;
gint segments_written;
gint last_written_segment;
/* get the amount of segments written from the device by now */
segments_written = g_atomic_int_get (&ringbuffer->segdone);
/* subtract the base to segments_written to get the number of the
last written segment in the ringbuffer (one segment written = segment 0) */
last_written_segment = segments_written - ringbuffer->segbase - 1;
/* samples per segment */
sps = ringbuffer->samples_per_seg;
/* get the current time */
current_time = gst_clock_get_time (clock);
/* get the basetime */
base_time = GST_ELEMENT_CAST (src)->base_time;
/* get the running_time */
running_time = current_time - base_time;
/* the running_time converted to a sample (relative to the ringbuffer) */
running_time_sample =
gst_util_uint64_scale_int (running_time, rate, GST_SECOND);
/* the segmentnr corresponding to running_time, round down */
running_time_segment = running_time_sample / sps;
/* the segment currently read from the ringbuffer */
last_read_segment = sample / sps;
/* the skew we have between running_time and the ringbuffertime (last written to) */
segment_skew = running_time_segment - last_written_segment;
GST_DEBUG_OBJECT (bsrc,
"\n running_time = %"
GST_TIME_FORMAT
"\n timestamp = %"
GST_TIME_FORMAT
"\n running_time_segment = %d"
"\n last_written_segment = %d"
"\n segment_skew (running time segment - last_written_segment) = %d"
"\n last_read_segment = %d",
GST_TIME_ARGS (running_time), GST_TIME_ARGS (timestamp),
running_time_segment, last_written_segment, segment_skew,
last_read_segment);
/* Resync the ringbuffer if:
*
* 1. We are more than the length of the ringbuffer behind.
* The length of the ringbuffer then gets to dictate
* the threshold for what is considered "too late"
*
* 2. If this is our first buffer.
* We know that we should catch up to running_time
* the first time we are ran.
*/
if ((segment_skew >= ringbuffer->spec.segtotal) ||
(last_read_segment == 0)) {
gint new_read_segment;
gint segment_diff;
guint64 new_sample;
/* the difference between running_time and the last written segment */
segment_diff = running_time_segment - last_written_segment;
/* advance the ringbuffer */
gst_audio_ring_buffer_advance (ringbuffer, segment_diff);
/* we move the new read segment to the last known written segment */
new_read_segment =
g_atomic_int_get (&ringbuffer->segdone) - ringbuffer->segbase;
/* we calculate the new sample value */
new_sample = ((guint64) new_read_segment) * sps;
/* and get the relative time to this -> our new timestamp */
timestamp = gst_util_uint64_scale_int (new_sample, GST_SECOND, rate);
/* we update the next sample accordingly */
src->next_sample = new_sample + samples;
GST_DEBUG_OBJECT (bsrc,
"Timeshifted the ringbuffer with %d segments: "
"Updating the timestamp to %" GST_TIME_FORMAT ", "
"and src->next_sample to %" G_GUINT64_FORMAT, segment_diff,
GST_TIME_ARGS (timestamp), src->next_sample);
}
break;
}
2011-11-11 10:52:47 +00:00
case GST_AUDIO_BASE_SRC_SLAVE_RETIMESTAMP:
{
GstClockTime base_time, latency;
/* We are slaved to another clock, take running time of the pipeline clock and
* timestamp against it. Somebody else in the pipeline should figure out the
* clock drift. We keep the duration we calculated above. */
timestamp = gst_clock_get_time (clock);
base_time = GST_ELEMENT_CAST (src)->base_time;
if (GST_CLOCK_DIFF (timestamp, base_time) < 0)
timestamp -= base_time;
else
timestamp = 0;
/* subtract latency */
latency = gst_util_uint64_scale_int (total_samples, GST_SECOND, rate);
if (timestamp > latency)
timestamp -= latency;
else
timestamp = 0;
}
2011-11-11 10:52:47 +00:00
case GST_AUDIO_BASE_SRC_SLAVE_NONE:
break;
}
} else {
GstClockTime base_time;
/* to get the timestamp against the clock we also need to add our offset */
timestamp = gst_audio_clock_adjust (clock, timestamp);
/* we are not slaved, subtract base_time */
base_time = GST_ELEMENT_CAST (src)->base_time;
if (GST_CLOCK_DIFF (timestamp, base_time) < 0) {
timestamp -= base_time;
GST_LOG_OBJECT (src,
"buffer timestamp %" GST_TIME_FORMAT " (base_time %" GST_TIME_FORMAT
")", GST_TIME_ARGS (timestamp), GST_TIME_ARGS (base_time));
} else {
GST_LOG_OBJECT (src,
"buffer timestamp 0, ts %" GST_TIME_FORMAT " <= base_time %"
GST_TIME_FORMAT, GST_TIME_ARGS (timestamp),
GST_TIME_ARGS (base_time));
timestamp = 0;
}
}
no_sync:
GST_OBJECT_UNLOCK (src);
GST_BUFFER_TIMESTAMP (buf) = timestamp;
GST_BUFFER_DURATION (buf) = duration;
GST_BUFFER_OFFSET (buf) = sample;
GST_BUFFER_OFFSET_END (buf) = sample + samples;
*outbuf = buf;
return GST_FLOW_OK;
/* ERRORS */
wrong_state:
{
GST_DEBUG_OBJECT (src, "ringbuffer in wrong state");
return GST_FLOW_FLUSHING;
}
wrong_offset:
{
GST_ELEMENT_ERROR (src, RESOURCE, SEEK,
(NULL), ("resource can only be operated on sequentially but offset %"
G_GUINT64_FORMAT " was given", offset));
return GST_FLOW_ERROR;
}
alloc_failed:
{
GST_DEBUG_OBJECT (src, "alloc failed: %s", gst_flow_get_name (ret));
return ret;
}
stopped:
{
gst_buffer_unref (buf);
GST_DEBUG_OBJECT (src, "ringbuffer stopped");
return GST_FLOW_FLUSHING;
}
}
/**
2011-11-11 10:52:47 +00:00
* gst_audio_base_src_create_ringbuffer:
* @src: a #GstAudioBaseSrc.
*
* Create and return the #GstAudioRingBuffer for @src. This function will call the
* ::create_ringbuffer vmethod and will set @src as the parent of the returned
* buffer (see gst_object_set_parent()).
*
* Returns: The new ringbuffer of @src.
*/
GstAudioRingBuffer *
2011-11-11 10:52:47 +00:00
gst_audio_base_src_create_ringbuffer (GstAudioBaseSrc * src)
{
2011-11-11 10:52:47 +00:00
GstAudioBaseSrcClass *bclass;
GstAudioRingBuffer *buffer = NULL;
2011-11-11 10:52:47 +00:00
bclass = GST_AUDIO_BASE_SRC_GET_CLASS (src);
if (bclass->create_ringbuffer)
buffer = bclass->create_ringbuffer (src);
if (G_LIKELY (buffer))
gst_object_set_parent (GST_OBJECT_CAST (buffer), GST_OBJECT_CAST (src));
return buffer;
}
static GstStateChangeReturn
2011-11-11 10:52:47 +00:00
gst_audio_base_src_change_state (GstElement * element,
GstStateChange transition)
{
GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS;
2011-11-11 10:52:47 +00:00
GstAudioBaseSrc *src = GST_AUDIO_BASE_SRC (element);
switch (transition) {
case GST_STATE_CHANGE_NULL_TO_READY:
GST_DEBUG_OBJECT (src, "NULL->READY");
GST_OBJECT_LOCK (src);
if (src->ringbuffer == NULL) {
gst_audio_clock_reset (GST_AUDIO_CLOCK (src->clock), 0);
2011-11-11 10:52:47 +00:00
src->ringbuffer = gst_audio_base_src_create_ringbuffer (src);
}
GST_OBJECT_UNLOCK (src);
if (!gst_audio_ring_buffer_open_device (src->ringbuffer))
goto open_failed;
break;
case GST_STATE_CHANGE_READY_TO_PAUSED:
GST_DEBUG_OBJECT (src, "READY->PAUSED");
src->next_sample = -1;
gst_audio_ring_buffer_set_flushing (src->ringbuffer, FALSE);
gst_audio_ring_buffer_may_start (src->ringbuffer, FALSE);
/* Only post clock-provide messages if this is the clock that
* we've created. If the subclass has overriden it the subclass
* should post this messages whenever necessary */
if (src->clock && GST_IS_AUDIO_CLOCK (src->clock) &&
GST_AUDIO_CLOCK_CAST (src->clock)->func ==
2011-11-11 10:52:47 +00:00
(GstAudioClockGetTimeFunc) gst_audio_base_src_get_time)
gst_element_post_message (element,
gst_message_new_clock_provide (GST_OBJECT_CAST (element),
src->clock, TRUE));
break;
case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
GST_DEBUG_OBJECT (src, "PAUSED->PLAYING");
gst_audio_ring_buffer_may_start (src->ringbuffer, TRUE);
break;
case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
GST_DEBUG_OBJECT (src, "PLAYING->PAUSED");
gst_audio_ring_buffer_may_start (src->ringbuffer, FALSE);
gst_audio_ring_buffer_pause (src->ringbuffer);
break;
case GST_STATE_CHANGE_PAUSED_TO_READY:
GST_DEBUG_OBJECT (src, "PAUSED->READY");
/* Only post clock-lost messages if this is the clock that
* we've created. If the subclass has overriden it the subclass
* should post this messages whenever necessary */
if (src->clock && GST_IS_AUDIO_CLOCK (src->clock) &&
GST_AUDIO_CLOCK_CAST (src->clock)->func ==
2011-11-11 10:52:47 +00:00
(GstAudioClockGetTimeFunc) gst_audio_base_src_get_time)
gst_element_post_message (element,
gst_message_new_clock_lost (GST_OBJECT_CAST (element), src->clock));
gst_audio_ring_buffer_set_flushing (src->ringbuffer, TRUE);
break;
default:
break;
}
ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
switch (transition) {
case GST_STATE_CHANGE_PAUSED_TO_READY:
GST_DEBUG_OBJECT (src, "PAUSED->READY");
gst_audio_ring_buffer_release (src->ringbuffer);
break;
case GST_STATE_CHANGE_READY_TO_NULL:
GST_DEBUG_OBJECT (src, "READY->NULL");
gst_audio_ring_buffer_close_device (src->ringbuffer);
GST_OBJECT_LOCK (src);
gst_object_unparent (GST_OBJECT_CAST (src->ringbuffer));
src->ringbuffer = NULL;
GST_OBJECT_UNLOCK (src);
break;
default:
break;
}
return ret;
/* ERRORS */
open_failed:
{
/* subclass must post a meaningful error message */
GST_DEBUG_OBJECT (src, "open failed");
return GST_STATE_CHANGE_FAILURE;
}
}