Merge branch 'master' into 0.11

Conflicts:
	ext/ogg/gstoggmux.c
	gst-libs/gst/audio/audio.c
	gst-libs/gst/audio/audio.h
	gst-libs/gst/audio/multichannel.h
	gst-libs/gst/pbutils/Makefile.am
	gst-libs/gst/pbutils/gstdiscoverer.c
	gst/playback/gstplaysinkaudioconvert.c
	gst/playback/gstplaysinkvideoconvert.c
	win32/common/libgstaudio.def
This commit is contained in:
Wim Taymans 2011-08-29 11:37:36 +02:00
commit e1287b97ab
27 changed files with 5721 additions and 53 deletions

View file

@ -2254,13 +2254,16 @@ gst_discoverer_stream_info_get_stream_type_nick
gst_discoverer_info_get_audio_streams
gst_discoverer_info_get_container_streams
gst_discoverer_info_get_streams
gst_discoverer_info_get_subtitle_streams
gst_discoverer_info_get_video_streams
gst_discoverer_audio_info_get_bitrate
gst_discoverer_audio_info_get_channels
gst_discoverer_audio_info_get_depth
gst_discoverer_audio_info_get_language
gst_discoverer_audio_info_get_max_bitrate
gst_discoverer_audio_info_get_sample_rate
gst_discoverer_container_info_get_streams
gst_discoverer_subtitle_info_get_language
gst_discoverer_video_info_get_bitrate
gst_discoverer_video_info_get_depth
gst_discoverer_video_info_get_framerate_denom
@ -2308,6 +2311,7 @@ gst_discoverer_info_get_type
gst_discoverer_info_copy
gst_discoverer_result_get_type
gst_discoverer_stream_info_get_type
gst_discoverer_subtitle_info_get_type
gst_discoverer_video_info_get_type
</SECTION>

View file

@ -60,7 +60,7 @@ GST_DEBUG_CATEGORY (gst_ogg_demux_setup_debug);
static ogg_packet *
_ogg_packet_copy (const ogg_packet * packet)
{
ogg_packet *ret = g_new0 (ogg_packet, 1);
ogg_packet *ret = g_slice_new (ogg_packet);
*ret = *packet;
ret->packet = g_memdup (packet->packet, packet->bytes);
@ -72,13 +72,13 @@ static void
_ogg_packet_free (ogg_packet * packet)
{
g_free (packet->packet);
g_free (packet);
g_slice_free (ogg_packet, packet);
}
static ogg_page *
gst_ogg_page_copy (ogg_page * page)
{
ogg_page *p = g_new0 (ogg_page, 1);
ogg_page *p = g_slice_new (ogg_page);
/* make a copy of the page */
p->header = g_memdup (page->header, page->header_len);
@ -94,7 +94,7 @@ gst_ogg_page_free (ogg_page * page)
{
g_free (page->header);
g_free (page->body);
g_free (page);
g_slice_free (ogg_page, page);
}
static gboolean gst_ogg_demux_collect_chain_info (GstOggDemux * ogg,
@ -1097,7 +1097,7 @@ choked:
static GstOggChain *
gst_ogg_chain_new (GstOggDemux * ogg)
{
GstOggChain *chain = g_new0 (GstOggChain, 1);
GstOggChain *chain = g_slice_new0 (GstOggChain);
GST_DEBUG_OBJECT (ogg, "creating new chain %p", chain);
chain->ogg = ogg;
@ -1124,7 +1124,7 @@ gst_ogg_chain_free (GstOggChain * chain)
gst_object_unref (pad);
}
g_array_free (chain->streams, TRUE);
g_free (chain);
g_slice_free (GstOggChain, chain);
}
static void

View file

@ -313,6 +313,18 @@ gst_ogg_mux_sink_event (GstPad * pad, GstEvent * event)
gst_segment_init (&ogg_pad->segment, GST_FORMAT_TIME);
break;
}
case GST_EVENT_TAG:{
GstTagList *tags;
gst_event_parse_tag (event, &tags);
tags = gst_tag_list_merge (ogg_pad->tags, tags, GST_TAG_MERGE_APPEND);
if (ogg_pad->tags)
gst_tag_list_free (ogg_pad->tags);
ogg_pad->tags = tags;
GST_DEBUG_OBJECT (ogg_mux, "Got tags %" GST_PTR_FORMAT, ogg_pad->tags);
break;
}
default:
break;
}
@ -1143,6 +1155,66 @@ gst_ogg_mux_byte_writer_put_string_utf8 (GstByteWriter * bw, const char *s)
gst_byte_writer_put_data (bw, (const guint8 *) s, strlen (s));
}
static void
gst_ogg_mux_add_fisbone_message_header (GstOggMux * mux, GstByteWriter * bw,
const char *tag, const char *value)
{
/* It is valid to pass NULL as the value to omit the tag */
if (!value)
return;
GST_DEBUG_OBJECT (mux, "Adding fisbone message header %s: %s", tag, value);
gst_ogg_mux_byte_writer_put_string_utf8 (bw, tag);
gst_ogg_mux_byte_writer_put_string_utf8 (bw, ": ");
gst_ogg_mux_byte_writer_put_string_utf8 (bw, value);
gst_ogg_mux_byte_writer_put_string_utf8 (bw, "\r\n");
}
static void
gst_ogg_mux_add_fisbone_message_header_from_tags (GstOggMux * mux,
GstByteWriter * bw, const char *header, const char *tag,
const GstTagList * tags)
{
GString *s;
guint size = gst_tag_list_get_tag_size (tags, tag), n;
GST_DEBUG_OBJECT (mux, "Found %u tags for name %s", size, tag);
if (size == 0)
return;
s = g_string_new ("");
for (n = 0; n < size; ++n) {
gchar *tmp;
if (n)
g_string_append (s, ", ");
gst_tag_list_get_string_index (tags, tag, n, &tmp);
g_string_append (s, tmp);
g_free (tmp);
}
gst_ogg_mux_add_fisbone_message_header (mux, bw, header, s->str);
g_string_free (s, TRUE);
}
/* This is a basic placeholder to generate roles for the tracks.
For tracks with more than one video, both video tracks will get
tagged with a "video/main" role, but we have no way of knowing
which one is the main one, if any. We could just pick one. For
audio, it's more complicated as we don't know which is music,
which is dubbing, etc. For kate, we could take a pretty good
guess based on the category, as role essentially is category.
For now, leave this as is. */
static const char *
gst_ogg_mux_get_default_role (GstOggPadData * pad)
{
const char *type = gst_ogg_stream_get_media_type (&pad->map);
if (type) {
if (!strncmp (type, "video/", strlen ("video/")))
return "video/main";
if (!strncmp (type, "audio/", strlen ("audio/")))
return "audio/main";
if (!strcmp (type + strlen (type) - strlen ("kate"), "kate"))
return "text/caption";
}
return NULL;
}
static void
gst_ogg_mux_make_fisbone (GstOggMux * mux, ogg_stream_state * os,
GstOggPadData * pad)
@ -1165,10 +1237,14 @@ gst_ogg_mux_make_fisbone (GstOggMux * mux, ogg_stream_state * os,
gst_byte_writer_put_uint8 (&bw, pad->map.granuleshift);
gst_byte_writer_fill (&bw, 0, 3); /* padding */
/* message header fields - MIME type for now */
gst_ogg_mux_byte_writer_put_string_utf8 (&bw, "Content-Type: ");
gst_ogg_mux_byte_writer_put_string_utf8 (&bw,
gst_ogg_mux_add_fisbone_message_header (mux, &bw, "Content-Type",
gst_ogg_stream_get_media_type (&pad->map));
gst_ogg_mux_byte_writer_put_string_utf8 (&bw, "\r\n");
gst_ogg_mux_add_fisbone_message_header (mux, &bw, "Role",
gst_ogg_mux_get_default_role (pad));
gst_ogg_mux_add_fisbone_message_header_from_tags (mux, &bw, "Language",
GST_TAG_LANGUAGE_CODE, pad->tags);
gst_ogg_mux_add_fisbone_message_header_from_tags (mux, &bw, "Title",
GST_TAG_TITLE, pad->tags);
gst_ogg_mux_submit_skeleton_header_packet (mux, os,
gst_byte_writer_reset_and_get_buffer (&bw), 0, 0);
@ -1920,6 +1996,11 @@ gst_ogg_mux_clear_collectpads (GstCollectPads * collect)
oggpad->buffer = NULL;
}
if (oggpad->tags) {
gst_tag_list_free (oggpad->tags);
oggpad->tags = NULL;
}
gst_segment_init (&oggpad->segment, GST_FORMAT_TIME);
}
}

View file

@ -83,6 +83,8 @@ typedef struct
gint64 keyframe_granule; /* granule of last preceding keyframe */
GstPadEventFunction collect_event;
GstTagList *tags;
}
GstOggPadData;

View file

@ -117,7 +117,7 @@ free_stream (GstOggStream * stream)
g_list_foreach (stream->unknown_pages, (GFunc) gst_mini_object_unref, NULL);
g_list_foreach (stream->stored_buffers, (GFunc) gst_mini_object_unref, NULL);
g_free (stream);
g_slice_free (GstOggStream, stream);
}
static void
@ -140,7 +140,7 @@ gst_ogg_parse_new_stream (GstOggParse * parser, ogg_page * page)
GST_DEBUG_OBJECT (parser, "creating new stream %08x", serialno);
stream = g_new0 (GstOggStream, 1);
stream = g_slice_new0 (GstOggStream);
stream->serialno = serialno;
stream->in_headers = 1;

View file

@ -607,7 +607,7 @@ theora_enc_sink_getcaps (GstPad * pad, GstCaps * filter)
peer = gst_pad_get_peer (encoder->srcpad);
if (peer) {
const GstCaps *templ_caps;
GstCaps *peer_caps;
GstCaps *peer_caps, *tmp_caps;
GstStructure *s;
guint i, n;
@ -625,8 +625,9 @@ theora_enc_sink_getcaps (GstPad * pad, GstCaps * filter)
templ_caps = gst_pad_get_pad_template_caps (pad);
caps = gst_caps_intersect (peer_caps, templ_caps);
caps = gst_caps_intersect (caps, theora_enc_src_caps);
tmp_caps = gst_caps_intersect (peer_caps, templ_caps);
caps = gst_caps_intersect (tmp_caps, theora_enc_src_caps);
gst_caps_unref (tmp_caps);
gst_caps_unref (peer_caps);
gst_object_unref (peer);
peer = NULL;

View file

@ -5,23 +5,23 @@
SUBDIRS = \
interfaces \
tag \
audio \
cdda \
fft \
floatcast \
netbuffer \
riff \
rtp \
sdp \
rtsp \
video \
pbutils \
audio \
riff \
app
noinst_HEADERS = gettext.h gst-i18n-plugin.h
# dependencies:
audio: interfaces
audio: interfaces pbutils
cdda: tag
@ -29,10 +29,8 @@ riff: tag audio
rtsp: sdp
pbutils: video
INDEPENDENT_SUBDIRS = \
interfaces tag fft floatcast netbuffer rtp sdp video app
interfaces tag fft floatcast netbuffer pbutils rtp sdp video app
.PHONY: independent-subdirs $(INDEPENDENT_SUBDIRS)

View file

@ -22,6 +22,8 @@ libgstaudio_@GST_MAJORMINOR@_la_SOURCES = \
gstaudioclock.c \
mixerutils.c \
multichannel.c \
gstbaseaudiodecoder.c \
gstbaseaudioencoder.c \
gstbaseaudiosink.c \
gstbaseaudiosrc.c \
gstaudiofilter.c \
@ -36,6 +38,8 @@ libgstaudio_@GST_MAJORMINOR@include_HEADERS = \
gstringbuffer.h \
gstaudioclock.h \
gstaudiofilter.h \
gstbaseaudiodecoder.h \
gstbaseaudioencoder.h \
gstbaseaudiosink.h \
gstbaseaudiosrc.h \
gstaudiosink.h \
@ -49,6 +53,7 @@ nodist_libgstaudio_@GST_MAJORMINOR@include_HEADERS = \
libgstaudio_@GST_MAJORMINOR@_la_CFLAGS = $(GST_PLUGINS_BASE_CFLAGS) $(GST_BASE_CFLAGS) $(GST_CFLAGS)
libgstaudio_@GST_MAJORMINOR@_la_LIBADD = $(GST_BASE_LIBS) $(GST_LIBS) \
$(top_builddir)/gst-libs/gst/pbutils/libgstpbutils-@GST_MAJORMINOR@.la \
$(top_builddir)/gst-libs/gst/interfaces/libgstinterfaces-@GST_MAJORMINOR@.la
libgstaudio_@GST_MAJORMINOR@_la_LDFLAGS = $(GST_LIB_LDFLAGS) $(GST_ALL_LDFLAGS) $(GST_LT_LDFLAGS)

View file

@ -504,6 +504,437 @@ done:
}
#define SINT (GST_AUDIO_FORMAT_FLAG_INTEGER | GST_AUDIO_FORMAT_FLAG_SIGNED)
#define UINT (GST_AUDIO_FORMAT_FLAG_INTEGER)
#define MAKE_FORMAT(str,flags,end,width,depth,silent) \
{ GST_AUDIO_FORMAT_ ##str, G_STRINGIFY(str), flags, end, width, depth, silent }
#define SILENT_0 { 0, 0, 0, 0, 0, 0, 0, 0 }
#define SILENT_U8 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }
#define SILENT_U16_LE { 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80 }
#define SILENT_U16_BE { 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00 }
#define SILENT_U24_LE { 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, 0x00 }
#define SILENT_U24_BE { 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00 }
#define SILENT_U32_LE { 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 }
#define SILENT_U32_BE { 0x80, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00 }
#define SILENT_U24_3LE { 0x00, 0x00, 0x80, 0x00, 0x00, 0x80 }
#define SILENT_U24_3BE { 0x80, 0x00, 0x00, 0x80, 0x00, 0x00 }
#define SILENT_U20_3LE { 0x00, 0x00, 0x08, 0x00, 0x00, 0x08 }
#define SILENT_U20_3BE { 0x08, 0x00, 0x00, 0x08, 0x00, 0x00 }
#define SILENT_U18_3LE { 0x00, 0x00, 0x02, 0x00, 0x00, 0x02 }
#define SILENT_U18_3BE { 0x02, 0x00, 0x00, 0x02, 0x00, 0x00 }
static GstAudioFormatInfo formats[] = {
{GST_AUDIO_FORMAT_UNKNOWN, "UNKNOWN", 0, 0, 0, 0},
/* 8 bit */
MAKE_FORMAT (S8, SINT, 0, 8, 8, SILENT_0),
MAKE_FORMAT (U8, UINT, 0, 8, 8, SILENT_U8),
/* 16 bit */
MAKE_FORMAT (S16_LE, SINT, G_LITTLE_ENDIAN, 16, 16, SILENT_0),
MAKE_FORMAT (S16_BE, SINT, G_BIG_ENDIAN, 16, 16, SILENT_0),
MAKE_FORMAT (U16_LE, UINT, G_LITTLE_ENDIAN, 16, 16, SILENT_U16_LE),
MAKE_FORMAT (U16_BE, UINT, G_BIG_ENDIAN, 16, 16, SILENT_U16_BE),
/* 24 bit in low 3 bytes of 32 bits */
MAKE_FORMAT (S24_LE, SINT, G_LITTLE_ENDIAN, 32, 24, SILENT_0),
MAKE_FORMAT (S24_BE, SINT, G_BIG_ENDIAN, 32, 24, SILENT_0),
MAKE_FORMAT (U24_LE, UINT, G_LITTLE_ENDIAN, 32, 24, SILENT_U24_LE),
MAKE_FORMAT (U24_BE, UINT, G_BIG_ENDIAN, 32, 24, SILENT_U24_BE),
/* 32 bit */
MAKE_FORMAT (S32_LE, SINT, G_LITTLE_ENDIAN, 32, 32, SILENT_0),
MAKE_FORMAT (S32_BE, SINT, G_BIG_ENDIAN, 32, 32, SILENT_0),
MAKE_FORMAT (U32_LE, UINT, G_LITTLE_ENDIAN, 32, 32, SILENT_U32_LE),
MAKE_FORMAT (U32_BE, UINT, G_BIG_ENDIAN, 32, 32, SILENT_U32_BE),
/* 24 bit in 3 bytes */
MAKE_FORMAT (S24_3LE, SINT, G_LITTLE_ENDIAN, 24, 24, SILENT_0),
MAKE_FORMAT (S24_3BE, SINT, G_BIG_ENDIAN, 24, 24, SILENT_0),
MAKE_FORMAT (U24_3LE, UINT, G_LITTLE_ENDIAN, 24, 24, SILENT_U24_3LE),
MAKE_FORMAT (U24_3BE, UINT, G_BIG_ENDIAN, 24, 24, SILENT_U24_3BE),
/* 20 bit in 3 bytes */
MAKE_FORMAT (S20_3LE, SINT, G_LITTLE_ENDIAN, 24, 20, SILENT_0),
MAKE_FORMAT (S20_3BE, SINT, G_BIG_ENDIAN, 24, 20, SILENT_0),
MAKE_FORMAT (U20_3LE, UINT, G_LITTLE_ENDIAN, 24, 20, SILENT_U20_3LE),
MAKE_FORMAT (U20_3BE, UINT, G_BIG_ENDIAN, 24, 20, SILENT_U20_3BE),
/* 18 bit in 3 bytes */
MAKE_FORMAT (S18_3LE, SINT, G_LITTLE_ENDIAN, 24, 18, SILENT_0),
MAKE_FORMAT (S18_3BE, SINT, G_BIG_ENDIAN, 24, 18, SILENT_0),
MAKE_FORMAT (U18_3LE, UINT, G_LITTLE_ENDIAN, 24, 18, SILENT_U18_3LE),
MAKE_FORMAT (U18_3BE, UINT, G_BIG_ENDIAN, 24, 18, SILENT_U18_3BE),
/* float */
MAKE_FORMAT (F32_LE, GST_AUDIO_FORMAT_FLAG_FLOAT, G_LITTLE_ENDIAN, 32, 32,
SILENT_0),
MAKE_FORMAT (F32_BE, GST_AUDIO_FORMAT_FLAG_FLOAT, G_BIG_ENDIAN, 32, 32,
SILENT_0),
MAKE_FORMAT (F64_LE, GST_AUDIO_FORMAT_FLAG_FLOAT, G_LITTLE_ENDIAN, 64, 64,
SILENT_0),
MAKE_FORMAT (F64_BE, GST_AUDIO_FORMAT_FLAG_FLOAT, G_BIG_ENDIAN, 64, 64,
SILENT_0)
};
static GstAudioFormat
gst_audio_format_from_caps_structure (const GstStructure * s)
{
gint endianness, width, depth;
guint i;
if (gst_structure_has_name (s, "audio/x-raw-int")) {
gboolean sign;
if (!gst_structure_get_boolean (s, "signed", &sign))
goto missing_field_signed;
if (!gst_structure_get_int (s, "endianness", &endianness))
goto missing_field_endianness;
if (!gst_structure_get_int (s, "width", &width))
goto missing_field_width;
if (!gst_structure_get_int (s, "depth", &depth))
goto missing_field_depth;
for (i = 0; i < G_N_ELEMENTS (formats); i++) {
if (GST_AUDIO_FORMAT_INFO_IS_INTEGER (&formats[i]) &&
sign == GST_AUDIO_FORMAT_INFO_IS_SIGNED (&formats[i]) &&
GST_AUDIO_FORMAT_INFO_ENDIANNESS (&formats[i]) == endianness &&
GST_AUDIO_FORMAT_INFO_WIDTH (&formats[i]) == width &&
GST_AUDIO_FORMAT_INFO_DEPTH (&formats[i]) == depth) {
return GST_AUDIO_FORMAT_INFO_FORMAT (&formats[i]);
}
}
} else if (gst_structure_has_name (s, "audio/x-raw-float")) {
/* fallbacks are for backwards compatibility (is this needed at all?) */
if (!gst_structure_get_int (s, "endianness", &endianness)) {
GST_WARNING ("float audio caps without endianness %" GST_PTR_FORMAT, s);
endianness = G_BYTE_ORDER;
}
if (!gst_structure_get_int (s, "width", &width)) {
GST_WARNING ("float audio caps without width %" GST_PTR_FORMAT, s);
width = 32;
}
for (i = 0; i < G_N_ELEMENTS (formats); i++) {
if (GST_AUDIO_FORMAT_INFO_IS_FLOAT (&formats[i]) &&
GST_AUDIO_FORMAT_INFO_ENDIANNESS (&formats[i]) == endianness &&
GST_AUDIO_FORMAT_INFO_WIDTH (&formats[i]) == width) {
return GST_AUDIO_FORMAT_INFO_FORMAT (&formats[i]);
}
}
}
/* no match */
return GST_AUDIO_FORMAT_UNKNOWN;
missing_field_signed:
{
GST_ERROR ("missing 'signed' field in audio caps %" GST_PTR_FORMAT, s);
return GST_AUDIO_FORMAT_UNKNOWN;
}
missing_field_endianness:
{
GST_ERROR ("missing 'endianness' field in audio caps %" GST_PTR_FORMAT, s);
return GST_AUDIO_FORMAT_UNKNOWN;
}
missing_field_depth:
{
GST_ERROR ("missing 'depth' field in audio caps %" GST_PTR_FORMAT, s);
return GST_AUDIO_FORMAT_UNKNOWN;
}
missing_field_width:
{
GST_ERROR ("missing 'width' field in audio caps %" GST_PTR_FORMAT, s);
return GST_AUDIO_FORMAT_UNKNOWN;
}
}
/* FIXME: remove these if we don't actually go for deep alloc positions */
void
gst_audio_info_init (GstAudioInfo * info)
{
memset (info, 0, sizeof (GstAudioInfo));
}
void
gst_audio_info_clear (GstAudioInfo * info)
{
memset (info, 0, sizeof (GstAudioInfo));
}
GstAudioInfo *
gst_audio_info_copy (GstAudioInfo * info)
{
return (GstAudioInfo *) g_slice_copy (sizeof (GstAudioInfo), info);
}
void
gst_audio_info_free (GstAudioInfo * info)
{
g_slice_free (GstAudioInfo, info);
}
static void
gst_audio_info_set_format (GstAudioInfo * info, GstAudioFormat format,
gint rate, gint channels)
{
const GstAudioFormatInfo *finfo;
g_return_if_fail (info != NULL);
g_return_if_fail (format != GST_AUDIO_FORMAT_UNKNOWN);
finfo = &formats[format];
info->flags = 0;
info->finfo = finfo;
info->rate = rate;
info->channels = channels;
info->bpf = (finfo->width * channels) / 8;
}
/* from multichannel.c */
void priv_gst_audio_info_fill_default_channel_positions (GstAudioInfo * info);
/**
* gst_audio_info_from_caps:
* @info: a #GstAudioInfo
* @caps: a #GstCaps
*
* Parse @caps and update @info.
*
* Returns: TRUE if @caps could be parsed
*
* Since: 0.10.36
*/
gboolean
gst_audio_info_from_caps (GstAudioInfo * info, const GstCaps * caps)
{
GstStructure *str;
GstAudioFormat format;
gint rate, channels;
const GValue *pos_val_arr, *pos_val_entry;
gint i;
g_return_val_if_fail (info != NULL, FALSE);
g_return_val_if_fail (caps != NULL, FALSE);
g_return_val_if_fail (gst_caps_is_fixed (caps), FALSE);
GST_DEBUG ("parsing caps %" GST_PTR_FORMAT, caps);
str = gst_caps_get_structure (caps, 0);
format = gst_audio_format_from_caps_structure (str);
if (format == GST_AUDIO_FORMAT_UNKNOWN)
goto unknown_format;
if (!gst_structure_get_int (str, "rate", &rate))
goto no_rate;
if (!gst_structure_get_int (str, "channels", &channels))
goto no_channels;
gst_audio_info_set_format (info, format, rate, channels);
pos_val_arr = gst_structure_get_value (str, "channel-positions");
if (pos_val_arr) {
if (channels <= G_N_ELEMENTS (info->position)) {
for (i = 0; i < channels; i++) {
pos_val_entry = gst_value_array_get_value (pos_val_arr, i);
info->position[i] = g_value_get_enum (pos_val_entry);
}
} else {
/* for that many channels, the positions are always NONE */
for (i = 0; i < G_N_ELEMENTS (info->position); i++)
info->position[i] = GST_AUDIO_CHANNEL_POSITION_NONE;
info->flags |= GST_AUDIO_FLAG_DEFAULT_POSITIONS;
}
} else {
info->flags |= GST_AUDIO_FLAG_DEFAULT_POSITIONS;
priv_gst_audio_info_fill_default_channel_positions (info);
}
return TRUE;
/* ERROR */
unknown_format:
{
GST_ERROR ("unknown format given");
return FALSE;
}
no_rate:
{
GST_ERROR ("no rate property given");
return FALSE;
}
no_channels:
{
GST_ERROR ("no channels property given");
return FALSE;
}
}
/**
* gst_audio_info_to_caps:
* @info: a #GstAudioInfo
*
* Convert the values of @info into a #GstCaps.
*
* Returns: (transfer full): the new #GstCaps containing the
* info of @info.
*
* Since: 0.10.36
*/
GstCaps *
gst_audio_info_to_caps (GstAudioInfo * info)
{
GstCaps *caps;
g_return_val_if_fail (info != NULL, NULL);
g_return_val_if_fail (info->finfo != NULL, NULL);
g_return_val_if_fail (info->finfo->format != GST_AUDIO_FORMAT_UNKNOWN, NULL);
if (GST_AUDIO_FORMAT_INFO_IS_INTEGER (info->finfo)) {
caps = gst_caps_new_simple ("audio/x-raw-int",
"width", G_TYPE_INT, GST_AUDIO_INFO_WIDTH (info),
"depth", G_TYPE_INT, GST_AUDIO_INFO_DEPTH (info),
"endianness", G_TYPE_INT,
GST_AUDIO_FORMAT_INFO_ENDIANNESS (info->finfo), "signed",
G_TYPE_BOOLEAN, GST_AUDIO_FORMAT_INFO_IS_SIGNED (info->finfo), "rate",
G_TYPE_INT, GST_AUDIO_INFO_RATE (info), "channels", G_TYPE_INT,
GST_AUDIO_INFO_CHANNELS (info), NULL);
} else if (GST_AUDIO_FORMAT_INFO_IS_FLOAT (info->finfo)) {
caps = gst_caps_new_simple ("audio/x-raw-float",
"width", G_TYPE_INT, GST_AUDIO_INFO_WIDTH (info),
"endianness", G_TYPE_INT,
GST_AUDIO_FORMAT_INFO_ENDIANNESS (info->finfo), "rate", G_TYPE_INT,
GST_AUDIO_INFO_RATE (info), "channels", G_TYPE_INT,
GST_AUDIO_INFO_CHANNELS (info), NULL);
} else {
GST_ERROR ("unknown audio format, neither integer nor float");
return NULL;
}
if (info->channels > 2) {
GValue pos_val_arr = { 0 }
, pos_val_entry = {
0};
GstStructure *str;
gint i;
/* build gvaluearray from positions */
g_value_init (&pos_val_arr, GST_TYPE_ARRAY);
g_value_init (&pos_val_entry, GST_TYPE_AUDIO_CHANNEL_POSITION);
for (i = 0; i < info->channels; i++) {
/* if we have many many channels, all positions are NONE */
if (info->channels <= 64)
g_value_set_enum (&pos_val_entry, info->position[i]);
else
g_value_set_enum (&pos_val_entry, GST_AUDIO_CHANNEL_POSITION_NONE);
gst_value_array_append_value (&pos_val_arr, &pos_val_entry);
}
g_value_unset (&pos_val_entry);
/* add to structure */
str = gst_caps_get_structure (caps, 0);
gst_structure_set_value (str, "channel-positions", &pos_val_arr);
g_value_unset (&pos_val_arr);
}
return caps;
}
/**
* gst_audio_format_convert:
* @info: a #GstAudioInfo
* @src_format: #GstFormat of the @src_value
* @src_value: value to convert
* @dest_format: #GstFormat of the @dest_value
* @dest_value: pointer to destination value
*
* Converts among various #GstFormat types. This function handles
* GST_FORMAT_BYTES, GST_FORMAT_TIME, and GST_FORMAT_DEFAULT. For
* raw audio, GST_FORMAT_DEFAULT corresponds to audio frames. This
* function can be used to handle pad queries of the type GST_QUERY_CONVERT.
*
* Returns: TRUE if the conversion was successful.
*
* Since: 0.10.36
*/
gboolean
gst_audio_info_convert (GstAudioInfo * info,
GstFormat src_fmt, gint64 src_val, GstFormat dest_fmt, gint64 * dest_val)
{
gboolean res = TRUE;
gint bpf, rate;
GST_DEBUG ("converting value %" G_GINT64_FORMAT " from %s (%d) to %s (%d)",
src_val, gst_format_get_name (src_fmt), src_fmt,
gst_format_get_name (dest_fmt), dest_fmt);
if (src_fmt == dest_fmt || src_val == -1) {
*dest_val = src_val;
goto done;
}
/* get important info */
bpf = GST_AUDIO_INFO_BPF (info);
rate = GST_AUDIO_INFO_RATE (info);
if (bpf == 0 || rate == 0) {
GST_DEBUG ("no rate or bpf configured");
res = FALSE;
goto done;
}
switch (src_fmt) {
case GST_FORMAT_BYTES:
switch (dest_fmt) {
case GST_FORMAT_TIME:
*dest_val = GST_FRAMES_TO_CLOCK_TIME (src_val / bpf, rate);
break;
case GST_FORMAT_DEFAULT:
*dest_val = src_val / bpf;
break;
default:
res = FALSE;
break;
}
break;
case GST_FORMAT_DEFAULT:
switch (dest_fmt) {
case GST_FORMAT_TIME:
*dest_val = GST_FRAMES_TO_CLOCK_TIME (src_val, rate);
break;
case GST_FORMAT_BYTES:
*dest_val = src_val * bpf;
break;
default:
res = FALSE;
break;
}
break;
case GST_FORMAT_TIME:
switch (dest_fmt) {
case GST_FORMAT_DEFAULT:
*dest_val = GST_CLOCK_TIME_TO_FRAMES (src_val, rate);
break;
case GST_FORMAT_BYTES:
*dest_val = GST_CLOCK_TIME_TO_FRAMES (src_val, rate);
*dest_val *= bpf;
break;
default:
res = FALSE;
break;
}
break;
default:
res = FALSE;
break;
}
done:
GST_DEBUG ("ret=%d result %" G_GINT64_FORMAT, res, *dest_val);
return res;
}
/**
* gst_audio_buffer_clip:
* @buffer: The buffer to clip.

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,275 @@
/* GStreamer
* Copyright (C) 2009 Igalia S.L.
* Author: Iago Toral Quiroga <itoral@igalia.com>
* Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
* Copyright (C) 2011 Nokia Corporation. All rights reserved.
* Contact: Stefan Kost <stefan.kost@nokia.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#ifndef _GST_BASE_AUDIO_DECODER_H_
#define _GST_BASE_AUDIO_DECODER_H_
#ifndef GST_USE_UNSTABLE_API
#warning "GstBaseAudioDecoder is unstable API and may change in future."
#warning "You can define GST_USE_UNSTABLE_API to avoid this warning."
#endif
#include <gst/gst.h>
#include <gst/audio/audio.h>
#include <gst/base/gstadapter.h>
G_BEGIN_DECLS
#define GST_TYPE_BASE_AUDIO_DECODER \
(gst_base_audio_decoder_get_type())
#define GST_BASE_AUDIO_DECODER(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_BASE_AUDIO_DECODER,GstBaseAudioDecoder))
#define GST_BASE_AUDIO_DECODER_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_BASE_AUDIO_DECODER,GstBaseAudioDecoderClass))
#define GST_BASE_AUDIO_DECODER_GET_CLASS(obj) \
(G_TYPE_INSTANCE_GET_CLASS((obj),GST_TYPE_BASE_AUDIO_DECODER,GstBaseAudioDecoderClass))
#define GST_IS_BASE_AUDIO_DECODER(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_BASE_AUDIO_DECODER))
#define GST_IS_BASE_AUDIO_DECODER_CLASS(obj) \
(G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_BASE_AUDIO_DECODER))
/**
* GST_BASE_AUDIO_DECODER_SINK_NAME:
*
* The name of the templates for the sink pad.
*
* Since: 0.10.36
*/
#define GST_BASE_AUDIO_DECODER_SINK_NAME "sink"
/**
* GST_BASE_AUDIO_DECODER_SRC_NAME:
*
* The name of the templates for the source pad.
*
* Since: 0.10.36
*/
#define GST_BASE_AUDIO_DECODER_SRC_NAME "src"
/**
* GST_BASE_AUDIO_DECODER_SRC_PAD:
* @obj: base audio codec instance
*
* Gives the pointer to the source #GstPad object of the element.
*
* Since: 0.10.36
*/
#define GST_BASE_AUDIO_DECODER_SRC_PAD(obj) (((GstBaseAudioDecoder *) (obj))->srcpad)
/**
* GST_BASE_AUDIO_DECODER_SINK_PAD:
* @obj: base audio codec instance
*
* Gives the pointer to the sink #GstPad object of the element.
*
* Since: 0.10.36
*/
#define GST_BASE_AUDIO_DECODER_SINK_PAD(obj) (((GstBaseAudioDecoder *) (obj))->sinkpad)
typedef struct _GstBaseAudioDecoder GstBaseAudioDecoder;
typedef struct _GstBaseAudioDecoderClass GstBaseAudioDecoderClass;
typedef struct _GstBaseAudioDecoderPrivate GstBaseAudioDecoderPrivate;
/* do not use this one, use macro below */
GstFlowReturn _gst_base_audio_decoder_error (GstBaseAudioDecoder *dec, gint weight,
GQuark domain, gint code,
gchar *txt, gchar *debug,
const gchar *file, const gchar *function,
gint line);
/**
* GST_BASE_AUDIO_DECODER_ERROR:
* @el: the base audio decoder element that generates the error
* @weight: element defined weight of the error, added to error count
* @domain: like CORE, LIBRARY, RESOURCE or STREAM (see #gstreamer-GstGError)
* @code: error code defined for that domain (see #gstreamer-GstGError)
* @text: the message to display (format string and args enclosed in
* parentheses)
* @debug: debugging information for the message (format string and args
* enclosed in parentheses)
* @ret: variable to receive return value
*
* Utility function that audio decoder elements can use in case they encountered
* a data processing error that may be fatal for the current "data unit" but
* need not prevent subsequent decoding. Such errors are counted and if there
* are too many, as configured in the context's max_errors, the pipeline will
* post an error message and the application will be requested to stop further
* media processing. Otherwise, it is considered a "glitch" and only a warning
* is logged. In either case, @ret is set to the proper value to
* return to upstream/caller (indicating either GST_FLOW_ERROR or GST_FLOW_OK).
*
* Since: 0.10.36
*/
#define GST_BASE_AUDIO_DECODER_ERROR(el, w, domain, code, text, debug, ret) \
G_STMT_START { \
gchar *__txt = _gst_element_error_printf text; \
gchar *__dbg = _gst_element_error_printf debug; \
GstBaseAudioDecoder *dec = GST_BASE_AUDIO_DECODER (el); \
ret = _gst_base_audio_decoder_error (dec, w, GST_ ## domain ## _ERROR, \
GST_ ## domain ## _ERROR_ ## code, __txt, __dbg, __FILE__, \
GST_FUNCTION, __LINE__); \
} G_STMT_END
/**
* GstBaseAudioDecoder:
*
* The opaque #GstBaseAudioDecoder data structure.
*
* Since: 0.10.36
*/
struct _GstBaseAudioDecoder
{
GstElement element;
/*< protected >*/
/* source and sink pads */
GstPad *sinkpad;
GstPad *srcpad;
/* MT-protected (with STREAM_LOCK) */
GstSegment segment;
/*< private >*/
GstBaseAudioDecoderPrivate *priv;
gpointer _gst_reserved[GST_PADDING_LARGE];
};
/**
* GstBaseAudioDecoderClass:
* @start: Optional.
* Called when the element starts processing.
* Allows opening external resources.
* @stop: Optional.
* Called when the element stops processing.
* Allows closing external resources.
* @set_format: Notifies subclass of incoming data format (caps).
* @parse: Optional.
* Allows chopping incoming data into manageable units (frames)
* for subsequent decoding. This division is at subclass
* discretion and may or may not correspond to 1 (or more)
* frames as defined by audio format.
* @handle_frame: Provides input data (or NULL to clear any remaining data)
* to subclass. Input data ref management is performed by
* base class, subclass should not care or intervene.
* @flush: Optional.
* Instructs subclass to clear any codec caches and discard
* any pending samples and not yet returned encoded data.
* @hard indicates whether a FLUSH is being processed,
* or otherwise a DISCONT (or conceptually similar).
* @event: Optional.
* Event handler on the sink pad. This function should return
* TRUE if the event was handled and should be discarded
* (i.e. not unref'ed).
* @pre_push: Optional.
* Called just prior to pushing (encoded data) buffer downstream.
* Subclass has full discretionary access to buffer,
* and a not OK flow return will abort downstream pushing.
*
* Subclasses can override any of the available virtual methods or not, as
* needed. At minimum @handle_frame (and likely @set_format) needs to be
* overridden.
*
* Since: 0.10.36
*/
struct _GstBaseAudioDecoderClass
{
GstElementClass parent_class;
/*< public >*/
/* virtual methods for subclasses */
gboolean (*start) (GstBaseAudioDecoder *dec);
gboolean (*stop) (GstBaseAudioDecoder *dec);
gboolean (*set_format) (GstBaseAudioDecoder *dec,
GstCaps *caps);
GstFlowReturn (*parse) (GstBaseAudioDecoder *dec,
GstAdapter *adapter,
gint *offset, gint *length);
GstFlowReturn (*handle_frame) (GstBaseAudioDecoder *dec,
GstBuffer *buffer);
void (*flush) (GstBaseAudioDecoder *dec, gboolean hard);
GstFlowReturn (*pre_push) (GstBaseAudioDecoder *dec,
GstBuffer **buffer);
gboolean (*event) (GstBaseAudioDecoder *dec,
GstEvent *event);
/*< private >*/
gpointer _gst_reserved[GST_PADDING_LARGE];
};
GstFlowReturn gst_base_audio_decoder_finish_frame (GstBaseAudioDecoder * dec,
GstBuffer * buf, gint frames);
/* context parameters */
GstAudioInfo * gst_base_audio_decoder_get_audio_info (GstBaseAudioDecoder * dec);
void gst_base_audio_decoder_set_plc_aware (GstBaseAudioDecoder * dec,
gboolean plc);
gint gst_base_audio_decoder_get_plc_aware (GstBaseAudioDecoder * dec);
void gst_base_audio_decoder_set_byte_time (GstBaseAudioDecoder * dec,
gboolean enabled);
gint gst_base_audio_decoder_get_byte_time (GstBaseAudioDecoder * dec);
gint gst_base_audio_decoder_get_delay (GstBaseAudioDecoder * dec);
void gst_base_audio_decoder_set_max_errors (GstBaseAudioDecoder * enc,
gint num);
gint gst_base_audio_decoder_get_max_errors (GstBaseAudioDecoder * dec);
void gst_base_audio_decoder_set_latency (GstBaseAudioDecoder * dec,
GstClockTime min, GstClockTime max);
void gst_base_audio_decoder_get_latency (GstBaseAudioDecoder * dec,
GstClockTime * min, GstClockTime * max);
void gst_base_audio_decoder_get_parse_state (GstBaseAudioDecoder * dec,
gboolean * sync, gboolean * eos);
/* object properties */
void gst_base_audio_decoder_set_plc (GstBaseAudioDecoder * dec,
gboolean enabled);
gboolean gst_base_audio_decoder_get_plc (GstBaseAudioDecoder * dec);
void gst_base_audio_decoder_set_min_latency (GstBaseAudioDecoder * dec,
gint64 num);
gint64 gst_base_audio_decoder_get_min_latency (GstBaseAudioDecoder * dec);
void gst_base_audio_decoder_set_tolerance (GstBaseAudioDecoder * dec,
gint64 tolerance);
gint64 gst_base_audio_decoder_get_tolerance (GstBaseAudioDecoder * dec);
GType gst_base_audio_decoder_get_type (void);
G_END_DECLS
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,235 @@
/* GStreamer
* Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
* Copyright (C) 2011 Nokia Corporation. All rights reserved.
* Contact: Stefan Kost <stefan.kost@nokia.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#ifndef __GST_BASE_AUDIO_ENCODER_H__
#define __GST_BASE_AUDIO_ENCODER_H__
#ifndef GST_USE_UNSTABLE_API
#warning "GstBaseAudioEncoder is unstable API and may change in future."
#warning "You can define GST_USE_UNSTABLE_API to avoid this warning."
#endif
#include <gst/gst.h>
#include <gst/audio/audio.h>
G_BEGIN_DECLS
#define GST_TYPE_BASE_AUDIO_ENCODER (gst_base_audio_encoder_get_type())
#define GST_BASE_AUDIO_ENCODER(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_BASE_AUDIO_ENCODER,GstBaseAudioEncoder))
#define GST_BASE_AUDIO_ENCODER_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_BASE_AUDIO_ENCODER,GstBaseAudioEncoderClass))
#define GST_BASE_AUDIO_ENCODER_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj),GST_TYPE_BASE_AUDIO_ENCODER,GstBaseAudioEncoderClass))
#define GST_IS_BASE_AUDIO_ENCODER(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_BASE_AUDIO_ENCODER))
#define GST_IS_BASE_AUDIO_ENCODER_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_BASE_AUDIO_ENCODER))
#define GST_BASE_AUDIO_ENCODER_CAST(obj) ((GstBaseAudioEncoder *)(obj))
/**
* GST_BASE_AUDIO_ENCODER_SINK_NAME:
*
* the name of the templates for the sink pad
*
* Since: 0.10.36
*/
#define GST_BASE_AUDIO_ENCODER_SINK_NAME "sink"
/**
* GST_BASE_AUDIO_ENCODER_SRC_NAME:
*
* the name of the templates for the source pad
*
* Since: 0.10.36
*/
#define GST_BASE_AUDIO_ENCODER_SRC_NAME "src"
/**
* GST_BASE_AUDIO_ENCODER_SRC_PAD:
* @obj: base parse instance
*
* Gives the pointer to the source #GstPad object of the element.
*
* Since: 0.10.36
*/
#define GST_BASE_AUDIO_ENCODER_SRC_PAD(obj) (GST_BASE_AUDIO_ENCODER_CAST (obj)->srcpad)
/**
* GST_BASE_AUDIO_ENCODER_SINK_PAD:
* @obj: base parse instance
*
* Gives the pointer to the sink #GstPad object of the element.
*
* Since: 0.10.36
*/
#define GST_BASE_AUDIO_ENCODER_SINK_PAD(obj) (GST_BASE_AUDIO_ENCODER_CAST (obj)->sinkpad)
/**
* GST_BASE_AUDIO_ENCODER_SEGMENT:
* @obj: base parse instance
*
* Gives the segment of the element.
*
* Since: 0.10.36
*/
#define GST_BASE_AUDIO_ENCODER_SEGMENT(obj) (GST_BASE_AUDIO_ENCODER_CAST (obj)->segment)
typedef struct _GstBaseAudioEncoder GstBaseAudioEncoder;
typedef struct _GstBaseAudioEncoderClass GstBaseAudioEncoderClass;
typedef struct _GstBaseAudioEncoderPrivate GstBaseAudioEncoderPrivate;
/**
* GstBaseAudioEncoder:
* @element: the parent element.
*
* The opaque #GstBaseAudioEncoder data structure.
*
* Since: 0.10.36
*/
struct _GstBaseAudioEncoder {
GstElement element;
/*< protected >*/
/* source and sink pads */
GstPad *sinkpad;
GstPad *srcpad;
/* MT-protected (with STREAM_LOCK) */
GstSegment segment;
/*< private >*/
GstBaseAudioEncoderPrivate *priv;
gpointer _gst_reserved[GST_PADDING_LARGE];
};
/**
* GstBaseAudioEncoderClass:
* @start: Optional.
* Called when the element starts processing.
* Allows opening external resources.
* @stop: Optional.
* Called when the element stops processing.
* Allows closing external resources.
* @set_format: Notifies subclass of incoming data format.
* GstAudioInfo contains the format according to provided caps.
* @handle_frame: Provides input samples (or NULL to clear any remaining data)
* according to directions as provided by subclass in the
* #GstBaseAudioEncoderContext. Input data ref management
* is performed by base class, subclass should not care or
* intervene.
* @flush: Optional.
* Instructs subclass to clear any codec caches and discard
* any pending samples and not yet returned encoded data.
* @event: Optional.
* Event handler on the sink pad. This function should return
* TRUE if the event was handled and should be discarded
* (i.e. not unref'ed).
* @pre_push: Optional.
* Called just prior to pushing (encoded data) buffer downstream.
* Subclass has full discretionary access to buffer,
* and a not OK flow return will abort downstream pushing.
* @getcaps: Optional.
* Allows for a custom sink getcaps implementation (e.g.
* for multichannel input specification). If not implemented,
* default returns gst_base_audio_encoder_proxy_getcaps
* applied to sink template caps.
*
* Subclasses can override any of the available virtual methods or not, as
* needed. At minimum @set_format and @handle_frame needs to be overridden.
*
* Since: 0.10.36
*/
struct _GstBaseAudioEncoderClass {
GstElementClass parent_class;
/*< public >*/
/* virtual methods for subclasses */
gboolean (*start) (GstBaseAudioEncoder *enc);
gboolean (*stop) (GstBaseAudioEncoder *enc);
gboolean (*set_format) (GstBaseAudioEncoder *enc,
GstAudioInfo *info);
GstFlowReturn (*handle_frame) (GstBaseAudioEncoder *enc,
GstBuffer *buffer);
void (*flush) (GstBaseAudioEncoder *enc);
GstFlowReturn (*pre_push) (GstBaseAudioEncoder *enc,
GstBuffer **buffer);
gboolean (*event) (GstBaseAudioEncoder *enc,
GstEvent *event);
GstCaps * (*getcaps) (GstBaseAudioEncoder *enc);
/*< private >*/
gpointer _gst_reserved[GST_PADDING_LARGE];
};
GType gst_base_audio_encoder_get_type (void);
GstFlowReturn gst_base_audio_encoder_finish_frame (GstBaseAudioEncoder * enc,
GstBuffer *buffer, gint samples);
GstCaps * gst_base_audio_encoder_proxy_getcaps (GstBaseAudioEncoder * enc,
GstCaps * caps);
/* context parameters */
GstAudioInfo * gst_base_audio_encoder_get_audio_info (GstBaseAudioEncoder * enc);
gint gst_base_audio_encoder_get_frame_samples (GstBaseAudioEncoder * enc);
void gst_base_audio_encoder_set_frame_samples (GstBaseAudioEncoder * enc,
gint num);
gint gst_base_audio_encoder_get_frame_max (GstBaseAudioEncoder * enc);
void gst_base_audio_encoder_set_frame_max (GstBaseAudioEncoder * enc,
gint num);
gint gst_base_audio_encoder_get_lookahead (GstBaseAudioEncoder * enc);
void gst_base_audio_encoder_set_lookahead (GstBaseAudioEncoder * enc,
gint num);
void gst_base_audio_encoder_get_latency (GstBaseAudioEncoder * enc,
GstClockTime * min, GstClockTime * max);
void gst_base_audio_encoder_set_latency (GstBaseAudioEncoder * enc,
GstClockTime min, GstClockTime max);
/* object properties */
void gst_base_audio_encoder_set_mark_granule (GstBaseAudioEncoder * enc,
gboolean enabled);
gboolean gst_base_audio_encoder_get_mark_granule (GstBaseAudioEncoder * enc);
void gst_base_audio_encoder_set_perfect_timestamp (GstBaseAudioEncoder * enc,
gboolean enabled);
gboolean gst_base_audio_encoder_get_perfect_timestamp (GstBaseAudioEncoder * enc);
void gst_base_audio_encoder_set_hard_resync (GstBaseAudioEncoder * enc,
gboolean enabled);
gboolean gst_base_audio_encoder_get_hard_resync (GstBaseAudioEncoder * enc);
void gst_base_audio_encoder_set_tolerance (GstBaseAudioEncoder * enc,
gint64 tolerance);
gint64 gst_base_audio_encoder_get_tolerance (GstBaseAudioEncoder * enc);
G_END_DECLS
#endif /* __GST_BASE_AUDIO_ENCODER_H__ */

View file

@ -288,6 +288,32 @@ gst_audio_get_channel_positions (GstStructure * str)
return pos;
}
void priv_gst_audio_info_fill_default_channel_positions (GstAudioInfo * info);
void
priv_gst_audio_info_fill_default_channel_positions (GstAudioInfo * info)
{
guint channels, i;
g_assert (info != NULL);
channels = GST_AUDIO_INFO_CHANNELS (info);
g_assert (channels > 0);
if (channels <= NUM_DEF_CHANS) {
/* just return some default channel layout if we have one */
for (i = 0; i < channels; ++i)
info->position[i] = default_positions[channels - 1][i];
} else {
/* for many many channels, the positions are always NONE */
for (i = 0; i < G_N_ELEMENTS (info->position); i++)
info->position[i] = GST_AUDIO_CHANNEL_POSITION_NONE;
}
info->flags |= GST_AUDIO_FLAG_DEFAULT_POSITIONS;
}
/**
* gst_audio_set_channel_positions:
* @str: A #GstStructure to set channel positions on.

View file

@ -17,12 +17,12 @@
* Boston, MA 02111-1307, USA.
*/
#include <gst/audio/audio.h>
#include <gst/audio/audio-enumtypes.h>
#ifndef __GST_AUDIO_MULTICHANNEL_H__
#define __GST_AUDIO_MULTICHANNEL_H__
#include <gst/gst.h>
#include <gst/audio/audio-enumtypes.h>
G_BEGIN_DECLS
/**

View file

@ -85,7 +85,6 @@ GstPbutils-@GST_MAJORMINOR@.gir: $(INTROSPECTION_SCANNER) libgstpbutils-@GST_MAJ
-DGST_USE_UNSTABLE_API \
-I$(top_srcdir)/gst-libs \
-I$(top_builddir)/gst-libs \
--add-include-path=$(srcdir)/../video \
--add-include-path=`$(PKG_CONFIG) --variable=girdir gstreamer-@GST_MAJORMINOR@` \
--library=libgstpbutils-@GST_MAJORMINOR@.la \
--library-path=`$(PKG_CONFIG) --variable=libdir gstreamer-@GST_MAJORMINOR@` \
@ -93,7 +92,6 @@ GstPbutils-@GST_MAJORMINOR@.gir: $(INTROSPECTION_SCANNER) libgstpbutils-@GST_MAJ
--include=Gst-@GST_MAJORMINOR@ \
--libtool="$(top_builddir)/libtool" \
--pkg gstreamer-@GST_MAJORMINOR@ \
--pkg gstreamer-video-@GST_MAJORMINOR@ \
--pkg-export gstreamer-pbutils-@GST_MAJORMINOR@ \
--add-init-section="gst_init(NULL,NULL);" \
--output $@ \
@ -113,7 +111,6 @@ typelibs_DATA = $(BUILT_GIRSOURCES:.gir=.typelib)
$(AM_V_GEN)PKG_CONFIG_PATH="$(GST_PKG_CONFIG_PATH)" \
$(INTROSPECTION_COMPILER) \
--includedir=$(srcdir) \
--includedir=$(srcdir)/../video \
--includedir=$(builddir) \
--includedir=`$(PKG_CONFIG) --variable=girdir gstreamer-@GST_MAJORMINOR@` \
$(INTROSPECTION_COMPILER_OPTS) $< -o $(@F)

View file

@ -39,6 +39,9 @@ static GstDiscovererAudioInfo
static GstDiscovererVideoInfo
* gst_discoverer_video_info_copy_int (GstDiscovererVideoInfo * ptr);
static GstDiscovererSubtitleInfo
* gst_discoverer_subtitle_info_copy_int (GstDiscovererSubtitleInfo * ptr);
/* Per-stream information */
G_DEFINE_TYPE (GstDiscovererStreamInfo, gst_discoverer_stream_info,
@ -104,6 +107,11 @@ gst_discoverer_info_copy_int (GstDiscovererStreamInfo * info,
ret = (GstDiscovererStreamInfo *)
gst_discoverer_video_info_copy_int ((GstDiscovererVideoInfo *) info);
} else if (ltyp == GST_TYPE_DISCOVERER_SUBTITLE_INFO) {
ret = (GstDiscovererStreamInfo *)
gst_discoverer_subtitle_info_copy_int ((GstDiscovererSubtitleInfo *)
info);
} else
ret = gst_discoverer_stream_info_new ();
@ -192,16 +200,23 @@ gst_stream_container_info_copy_int (GstDiscovererContainerInfo * ptr,
G_DEFINE_TYPE (GstDiscovererAudioInfo, gst_discoverer_audio_info,
GST_TYPE_DISCOVERER_STREAM_INFO);
static void
gst_discoverer_audio_info_finalize (GstDiscovererAudioInfo * info)
{
g_free (info->language);
}
static void
gst_discoverer_audio_info_class_init (GstDiscovererAudioInfoClass * klass)
{
/* Nothing to initialize */
klass->finalize =
(GstMiniObjectFinalizeFunction) gst_discoverer_audio_info_finalize;
}
static void
gst_discoverer_audio_info_init (GstDiscovererAudioInfo * info)
{
/* Nothing to initialize */
info->language = NULL;
}
static GstDiscovererAudioInfo *
@ -223,6 +238,49 @@ gst_discoverer_audio_info_copy_int (GstDiscovererAudioInfo * ptr)
ret->depth = ptr->depth;
ret->bitrate = ptr->bitrate;
ret->max_bitrate = ptr->max_bitrate;
ret->language = g_strdup (ptr->language);
return ret;
}
/* Subtitle information */
G_DEFINE_TYPE (GstDiscovererSubtitleInfo, gst_discoverer_subtitle_info,
GST_TYPE_DISCOVERER_STREAM_INFO);
static void
gst_discoverer_subtitle_info_init (GstDiscovererSubtitleInfo * info)
{
info->language = NULL;
}
static void
gst_discoverer_subtitle_info_finalize (GstDiscovererSubtitleInfo * info)
{
g_free (info->language);
}
static void
gst_discoverer_subtitle_info_class_init (GstMiniObjectClass * klass)
{
klass->finalize =
(GstMiniObjectFinalizeFunction) gst_discoverer_subtitle_info_finalize;
}
static GstDiscovererSubtitleInfo *
gst_discoverer_subtitle_info_new (void)
{
return (GstDiscovererSubtitleInfo *)
gst_mini_object_new (GST_TYPE_DISCOVERER_SUBTITLE_INFO);
}
static GstDiscovererSubtitleInfo *
gst_discoverer_subtitle_info_copy_int (GstDiscovererSubtitleInfo * ptr)
{
GstDiscovererSubtitleInfo *ret;
ret = gst_discoverer_subtitle_info_new ();
ret->language = g_strdup (ptr->language);
return ret;
}
@ -432,6 +490,25 @@ gst_discoverer_info_get_video_streams (GstDiscovererInfo * info)
return gst_discoverer_info_get_streams (info, GST_TYPE_DISCOVERER_VIDEO_INFO);
}
/**
* gst_discoverer_info_get_subtitle_streams:
* @info: a #GstDiscovererInfo
*
* Finds all the #GstDiscovererSubtitleInfo contained in @info
*
* Returns: (transfer full) (element-type Gst.DiscovererStreamInfo): A #GList of
* matching #GstDiscovererStreamInfo. The caller should free it with
* gst_discoverer_stream_info_list_free().
*
* Since: 0.10.36
*/
GList *
gst_discoverer_info_get_subtitle_streams (GstDiscovererInfo * info)
{
return gst_discoverer_info_get_streams (info,
GST_TYPE_DISCOVERER_SUBTITLE_INFO);
}
/**
* gst_discoverer_info_get_container_streams:
* @info: a #GstDiscovererInfo
@ -474,6 +551,8 @@ gst_discoverer_stream_info_get_stream_type_nick (GstDiscovererStreamInfo * info)
else
return "video";
}
if (GST_IS_DISCOVERER_SUBTITLE_INFO (info))
return "subtitles";
return "unknown";
}
@ -672,6 +751,17 @@ AUDIO_INFO_ACCESSOR_CODE (bitrate, guint, 0);
AUDIO_INFO_ACCESSOR_CODE (max_bitrate, guint, 0);
/**
* gst_discoverer_audio_info_get_language:
* @info: a #GstDiscovererAudioInfo
*
* Returns: the language of the stream, or NULL if unknown.
*
* Since: 0.10.36
*/
AUDIO_INFO_ACCESSOR_CODE (language, const gchar *, NULL);
/* GstDiscovererVideoInfo */
#define VIDEO_INFO_ACCESSOR_CODE(fieldname, type, failval) \
@ -811,6 +901,24 @@ gst_discoverer_video_info_is_image (const GstDiscovererVideoInfo * info)
return info->is_image;
}
/* GstDiscovererSubtitleInfo */
#define SUBTITLE_INFO_ACCESSOR_CODE(fieldname, type, failval) \
GENERIC_ACCESSOR_CODE(gst_discoverer_subtitle_info, GstDiscovererSubtitleInfo*, \
GST_TYPE_DISCOVERER_SUBTITLE_INFO, \
fieldname, type, failval)
/**
* gst_discoverer_subtitle_info_get_language:
* @info: a #GstDiscovererSubtitleInfo
*
* Returns: the language of the stream, or NULL if unknown.
*
* Since: 0.10.36
*/
SUBTITLE_INFO_ACCESSOR_CODE (language, const gchar *, NULL);
/* GstDiscovererInfo */
#define DISCOVERER_INFO_ACCESSOR_CODE(fieldname, type, failval) \

View file

@ -44,7 +44,6 @@
#include "config.h"
#endif
#include <gst/video/video.h>
#include "pbutils.h"
#include "pbutils-marshal.h"
#include "pbutils-private.h"
@ -438,6 +437,21 @@ _event_probe (GstPad * pad, GstProbeType type, GstEvent * event,
return GST_PROBE_OK;
}
static gboolean
is_subtitle_caps (const GstCaps * caps)
{
static GstCaps *subs_caps = NULL;
if (!subs_caps) {
subs_caps = gst_caps_from_string ("text/plain; text/x-pango-markup; "
"subpicture/x-pgs; subpicture/x-dvb; application/x-subtitle-unknown; "
"application/x-ssa; application/x-ass; subtitle/x-kate; "
"application/x-kate; video/x-dvd-subpicture; ");
}
return gst_caps_can_intersect (caps, subs_caps);
}
static void
uridecodebin_pad_added_cb (GstElement * uridecodebin, GstPad * pad,
GstDiscoverer * dc)
@ -445,14 +459,6 @@ uridecodebin_pad_added_cb (GstElement * uridecodebin, GstPad * pad,
PrivateStream *ps;
GstPad *sinkpad = NULL;
GstCaps *caps;
static GstCaps *subs_caps = NULL;
if (!subs_caps) {
subs_caps = gst_caps_from_string ("text/plain; text/x-pango-markup; "
"subpicture/x-pgs; subpicture/x-dvb; application/x-subtitle-unknown; "
"application/x-ssa; application/x-ass; subtitle/x-kate; "
"video/x-dvd-subpicture; ");
}
GST_DEBUG_OBJECT (dc, "pad %s:%s", GST_DEBUG_PAD_NAME (pad));
@ -471,8 +477,8 @@ uridecodebin_pad_added_cb (GstElement * uridecodebin, GstPad * pad,
caps = gst_pad_get_caps (pad, NULL);
if (gst_caps_can_intersect (caps, subs_caps)) {
/* Subtitle streams are sparse and don't provide any information - don't
if (is_subtitle_caps (caps)) {
/* Subtitle streams are sparse and may not provide any information - don't
* wait for data to preroll */
g_object_set (ps->sink, "async", FALSE, NULL);
}
@ -661,6 +667,14 @@ collect_information (GstDiscoverer * dc, const GstStructure * st,
gst_structure_free (tags_st);
}
if (!info->language && ((GstDiscovererStreamInfo *) info)->tags) {
gchar *language;
if (gst_tag_list_get_string (((GstDiscovererStreamInfo *) info)->tags,
GST_TAG_LANGUAGE_CODE, &language)) {
info->language = language;
}
}
return (GstDiscovererStreamInfo *) info;
} else if (g_str_has_prefix (name, "video/") ||
@ -709,6 +723,44 @@ collect_information (GstDiscoverer * dc, const GstStructure * st,
return (GstDiscovererStreamInfo *) info;
} else if (is_subtitle_caps (caps)) {
GstDiscovererSubtitleInfo *info;
if (parent)
info = (GstDiscovererSubtitleInfo *) parent;
else {
info = (GstDiscovererSubtitleInfo *)
gst_mini_object_new (GST_TYPE_DISCOVERER_SUBTITLE_INFO);
info->parent.caps = caps;
}
if (gst_structure_id_has_field (st, _TAGS_QUARK)) {
const gchar *language;
gst_structure_id_get (st, _TAGS_QUARK,
GST_TYPE_STRUCTURE, &tags_st, NULL);
language = gst_structure_get_string (caps_st, GST_TAG_LANGUAGE_CODE);
if (language)
info->language = g_strdup (language);
/* FIXME: Is it worth it to remove the tags we've parsed? */
info->parent.tags = gst_tag_list_merge (info->parent.tags,
(GstTagList *) tags_st, GST_TAG_MERGE_REPLACE);
gst_structure_free (tags_st);
}
if (!info->language && ((GstDiscovererStreamInfo *) info)->tags) {
gchar *language;
if (gst_tag_list_get_string (((GstDiscovererStreamInfo *) info)->tags,
GST_TAG_LANGUAGE_CODE, &language)) {
info->language = language;
}
}
return (GstDiscovererStreamInfo *) info;
} else {
/* None of the above - populate what information we can */
GstDiscovererStreamInfo *info;
@ -792,6 +844,9 @@ child_is_raw_stream (GstCaps * parent, GstCaps * child)
return TRUE;
}
if (is_subtitle_caps (parent))
return TRUE;
return FALSE;
}
@ -970,7 +1025,7 @@ discoverer_collect (GstDiscoverer * dc)
* caps named image/<foo> (th exception being MJPEG video which is also
* type image/jpeg), and should consist of precisely one stream (actually
* initially there are 2, the image and raw stream, but we squash these
* while parsing the stream topology). At some ponit, if we find that these
* while parsing the stream topology). At some point, if we find that these
* conditions are not sufficient, we can count the number of decoders and
* parsers in the chain, and if there's more than one decoder, or any
* parser at all, we should not mark this as an image.

View file

@ -110,6 +110,7 @@ guint gst_discoverer_audio_info_get_sample_rate(const GstDiscovererAudioInfo* in
guint gst_discoverer_audio_info_get_depth(const GstDiscovererAudioInfo* info);
guint gst_discoverer_audio_info_get_bitrate(const GstDiscovererAudioInfo* info);
guint gst_discoverer_audio_info_get_max_bitrate(const GstDiscovererAudioInfo* info);
const gchar * gst_discoverer_audio_info_get_language(const GstDiscovererAudioInfo* info);
/**
* GstDiscovererVideoInfo:
@ -140,6 +141,26 @@ guint gst_discoverer_video_info_get_bitrate(const GstDiscovererVideoIn
guint gst_discoverer_video_info_get_max_bitrate(const GstDiscovererVideoInfo* info);
gboolean gst_discoverer_video_info_is_image(const GstDiscovererVideoInfo* info);
/**
* GstDiscovererSubtitleInfo:
*
* #GstDiscovererStreamInfo specific to subtitle streams (this includes text and
* image based ones).
*
* Since: 0.10.36
*/
#define GST_TYPE_DISCOVERER_SUBTITLE_INFO \
(gst_discoverer_subtitle_info_get_type ())
#define GST_DISCOVERER_SUBTITLE_INFO(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_DISCOVERER_SUBTITLE_INFO, GstDiscovererSubtitleInfo))
#define GST_IS_DISCOVERER_SUBTITLE_INFO(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_DISCOVERER_SUBTITLE_INFO))
typedef struct _GstDiscovererSubtitleInfo GstDiscovererSubtitleInfo;
typedef GstMiniObjectClass GstDiscovererSubtitleInfoClass;
GType gst_discoverer_subtitle_info_get_type (void);
const gchar * gst_discoverer_subtitle_info_get_language(const GstDiscovererSubtitleInfo* info);
/**
* GstDiscovererResult:
* @GST_DISCOVERER_OK: The discovery was successful
@ -199,6 +220,7 @@ GList * gst_discoverer_info_get_streams (GstDiscovererInfo *in
GType streamtype);
GList * gst_discoverer_info_get_audio_streams (GstDiscovererInfo *info);
GList * gst_discoverer_info_get_video_streams (GstDiscovererInfo *info);
GList * gst_discoverer_info_get_subtitle_streams (GstDiscovererInfo *info);
GList * gst_discoverer_info_get_container_streams (GstDiscovererInfo *info);
void gst_discoverer_stream_info_list_free (GList *infos);

View file

@ -44,6 +44,8 @@ struct _GstDiscovererAudioInfo {
guint bitrate;
guint max_bitrate;
gchar *language;
};
struct _GstDiscovererVideoInfo {
@ -64,6 +66,12 @@ struct _GstDiscovererVideoInfo {
gboolean is_image;
};
struct _GstDiscovererSubtitleInfo {
GstDiscovererStreamInfo parent;
gchar *language;
};
struct _GstDiscovererInfo {
GObject parent;

View file

@ -20,7 +20,7 @@
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#include "config.h"
#endif
#include <string.h>
@ -44,6 +44,9 @@ struct _GstBaseRTPPayloadPrivate
gboolean perfect_rtptime;
gint notified_first_timestamp;
guint64 base_offset;
gint64 base_rtime;
gint64 prop_max_ptime;
gint64 caps_max_ptime;
};
@ -293,6 +296,8 @@ gst_basertppayload_init (GstBaseRTPPayload * basertppayload, gpointer g_class)
basertppayload->min_ptime = DEFAULT_MIN_PTIME;
basertppayload->priv->perfect_rtptime = DEFAULT_PERFECT_RTPTIME;
basertppayload->abidata.ABI.ptime_multiple = DEFAULT_PTIME_MULTIPLE;
basertppayload->priv->base_offset = GST_BUFFER_OFFSET_NONE;
basertppayload->priv->base_rtime = GST_BUFFER_OFFSET_NONE;
basertppayload->media = NULL;
basertppayload->encoding_name = NULL;
@ -393,6 +398,8 @@ gst_basertppayload_event_default (GstBaseRTPPayload * basertppayload,
segment = &basertppayload->segment;
gst_event_copy_segment (event, segment);
basertppayload->priv->base_offset = GST_BUFFER_OFFSET_NONE;
GST_DEBUG_OBJECT (basertppayload,
"configured SEGMENT %" GST_SEGMENT_FORMAT, segment);
res = gst_pad_event_default (basertppayload->sinkpad, event);
@ -772,9 +779,11 @@ gst_basertppayload_prepare_push (GstBaseRTPPayload * payload,
}
/* convert to RTP time */
if (priv->perfect_rtptime && data.offset != GST_BUFFER_OFFSET_NONE) {
if (priv->perfect_rtptime && data.offset != GST_BUFFER_OFFSET_NONE &&
priv->base_offset != GST_BUFFER_OFFSET_NONE) {
/* if we have an offset, use that for making an RTP timestamp */
data.rtptime = payload->ts_base + data.offset;
data.rtptime = payload->ts_base + priv->base_rtime +
data.offset - priv->base_offset;
GST_LOG_OBJECT (payload,
"Using offset %" G_GUINT64_FORMAT " for RTP timestamp", data.offset);
} else if (GST_CLOCK_TIME_IS_VALID (data.timestamp)) {
@ -793,6 +802,8 @@ gst_basertppayload_prepare_push (GstBaseRTPPayload * payload,
GST_TIME_ARGS (rtime));
rtime =
gst_util_uint64_scale_int (rtime, payload->clock_rate, GST_SECOND);
priv->base_offset = data.offset;
priv->base_rtime = rtime;
}
/* add running_time in clock-rate units to the base timestamp */
data.rtptime = payload->ts_base + rtime;
@ -1041,6 +1052,7 @@ gst_basertppayload_change_state (GstElement * element,
basertppayload->ts_base = basertppayload->ts_offset;
basertppayload->timestamp = basertppayload->ts_base;
g_atomic_int_set (&basertppayload->priv->notified_first_timestamp, 1);
priv->base_offset = GST_BUFFER_OFFSET_NONE;
break;
default:
break;

View file

@ -1262,9 +1262,9 @@ gen_video_chain (GstPlaySink * playsink, gboolean raw, gboolean async)
}
/* find ts-offset element */
chain->ts_offset =
gst_object_replace ((GstObject **) & chain->ts_offset, (GstObject *)
gst_play_sink_find_property_sinks (playsink, chain->sink, "ts-offset",
G_TYPE_INT64);
G_TYPE_INT64));
/* create a bin to hold objects, as we create them we add them to this bin so
* that when something goes wrong we only need to unref the bin */
@ -1387,9 +1387,10 @@ setup_video_chain (GstPlaySink * playsink, gboolean raw, gboolean async)
return FALSE;
/* find ts-offset element */
chain->ts_offset =
gst_object_replace ((GstObject **) & chain->ts_offset, (GstObject *)
gst_play_sink_find_property_sinks (playsink, chain->sink, "ts-offset",
G_TYPE_INT64);
G_TYPE_INT64));
/* if we can disable async behaviour of the sink, we can avoid adding a
* queue for the audio chain. */
@ -1704,9 +1705,9 @@ gen_audio_chain (GstPlaySink * playsink, gboolean raw)
}
/* find ts-offset element */
chain->ts_offset =
gst_object_replace ((GstObject **) & chain->ts_offset, (GstObject *)
gst_play_sink_find_property_sinks (playsink, chain->sink, "ts-offset",
G_TYPE_INT64);
G_TYPE_INT64));
/* check if the sink, or something within the sink, has the volume property.
* If it does we don't need to add a volume element. */
@ -1887,9 +1888,9 @@ setup_audio_chain (GstPlaySink * playsink, gboolean raw)
return FALSE;
/* find ts-offset element */
chain->ts_offset =
gst_object_replace ((GstObject **) & chain->ts_offset, (GstObject *)
gst_play_sink_find_property_sinks (playsink, chain->sink, "ts-offset",
G_TYPE_INT64);
G_TYPE_INT64));
/* check if the sink, or something within the sink, has the volume property.
* If it does we don't need to add a volume element. */
@ -2303,6 +2304,7 @@ gst_play_sink_reconfigure (GstPlaySink * playsink)
add_chain (GST_PLAY_CHAIN (playsink->videochain), FALSE);
activate_chain (GST_PLAY_CHAIN (playsink->videochain), FALSE);
g_object_unref (playsink->videochain->ts_offset);
playsink->videochain->ts_offset = NULL;
}
@ -2356,6 +2358,7 @@ gst_play_sink_reconfigure (GstPlaySink * playsink)
disconnect_chain (playsink->audiochain, playsink);
playsink->audiochain->volume = NULL;
playsink->audiochain->mute = NULL;
g_object_unref (playsink->audiochain->ts_offset);
playsink->audiochain->ts_offset = NULL;
free_chain ((GstPlayChain *) playsink->audiochain);
playsink->audiochain = NULL;
@ -2426,6 +2429,7 @@ gst_play_sink_reconfigure (GstPlaySink * playsink)
disconnect_chain (playsink->audiochain, playsink);
playsink->audiochain->volume = NULL;
playsink->audiochain->mute = NULL;
g_object_unref (playsink->audiochain->ts_offset);
playsink->audiochain->ts_offset = NULL;
}
add_chain (GST_PLAY_CHAIN (playsink->audiochain), FALSE);
@ -3020,14 +3024,14 @@ caps_notify_cb (GstPad * pad, GParamSpec * unused, GstPlaySink * playsink)
if (pad == playsink->audio_pad) {
raw = is_raw_pad (pad);
reconfigure = (! !playsink->audio_pad_raw != ! !raw)
reconfigure = (!!playsink->audio_pad_raw != !!raw)
&& playsink->audiochain;
GST_DEBUG_OBJECT (pad,
"Audio caps changed: raw %d reconfigure %d caps %" GST_PTR_FORMAT, raw,
reconfigure, caps);
} else if (pad == playsink->video_pad) {
raw = is_raw_pad (pad);
reconfigure = (! !playsink->video_pad_raw != ! !raw)
reconfigure = (!!playsink->video_pad_raw != !!raw)
&& playsink->videochain;
GST_DEBUG_OBJECT (pad,
"Video caps changed: raw %d reconfigure %d caps %" GST_PTR_FORMAT, raw,
@ -3418,6 +3422,7 @@ gst_play_sink_change_state (GstElement * element, GstStateChange transition)
disconnect_chain (playsink->audiochain, playsink);
playsink->audiochain->volume = NULL;
playsink->audiochain->mute = NULL;
g_object_unref (playsink->audiochain->ts_offset);
playsink->audiochain->ts_offset = NULL;
}
ret = GST_STATE_CHANGE_SUCCESS;

View file

@ -72,6 +72,28 @@ post_missing_element_message (GstPlaySinkAudioConvert * self,
gst_element_post_message (GST_ELEMENT_CAST (self), msg);
}
static void
distribute_running_time (GstElement * element, const GstSegment * segment)
{
GstEvent *event;
GstPad *pad;
pad = gst_element_get_static_pad (element, "sink");
if (segment->accum) {
event = gst_event_new_new_segment_full (FALSE, segment->rate,
segment->applied_rate, segment->format, 0, segment->accum, 0);
gst_pad_send_event (pad, event);
}
event = gst_event_new_new_segment_full (FALSE, segment->rate,
segment->applied_rate, segment->format,
segment->start, segment->stop, segment->time);
gst_pad_send_event (pad, event);
gst_object_unref (pad);
}
static GstProbeReturn
pad_blocked_cb (GstPad * pad, GstProbeType type, gpointer type_data,
gpointer user_data)

View file

@ -72,6 +72,28 @@ post_missing_element_message (GstPlaySinkVideoConvert * self,
gst_element_post_message (GST_ELEMENT_CAST (self), msg);
}
static void
distribute_running_time (GstElement * element, const GstSegment * segment)
{
GstEvent *event;
GstPad *pad;
pad = gst_element_get_static_pad (element, "sink");
if (segment->accum) {
event = gst_event_new_new_segment_full (FALSE, segment->rate,
segment->applied_rate, segment->format, 0, segment->accum, 0);
gst_pad_send_event (pad, event);
}
event = gst_event_new_new_segment_full (FALSE, segment->rate,
segment->applied_rate, segment->format,
segment->start, segment->stop, segment->time);
gst_pad_send_event (pad, event);
gst_object_unref (pad);
}
static GstProbeReturn
pad_blocked_cb (GstPad * pad, GstProbeType type, gpointer type_data,
gpointer user_data)

View file

@ -47,6 +47,7 @@ gst_stream_audio_information_to_string (GstDiscovererStreamInfo * info,
GstDiscovererAudioInfo *audio_info;
GString *s;
gchar *tmp;
const gchar *ctmp;
int len = 400;
const GstTagList *tags;
GstCaps *caps;
@ -72,6 +73,8 @@ gst_stream_audio_information_to_string (GstDiscovererStreamInfo * info,
}
audio_info = (GstDiscovererAudioInfo *) info;
ctmp = gst_discoverer_audio_info_get_language (audio_info);
my_g_string_append_printf (s, "Language: %s\n", ctmp ? ctmp : "<unknown>");
my_g_string_append_printf (s, "Channels: %u\n",
gst_discoverer_audio_info_get_channels (audio_info));
my_g_string_append_printf (s, "Sample rate: %u\n",
@ -171,6 +174,57 @@ gst_stream_video_information_to_string (GstDiscovererStreamInfo * info,
return g_string_free (s, FALSE);
}
static gchar *
gst_stream_subtitle_information_to_string (GstDiscovererStreamInfo * info,
gint depth)
{
GstDiscovererSubtitleInfo *subtitle_info;
GString *s;
gchar *tmp;
const gchar *ctmp;
int len = 400;
const GstTagList *tags;
GstCaps *caps;
g_return_val_if_fail (info != NULL, NULL);
s = g_string_sized_new (len);
my_g_string_append_printf (s, "Codec:\n");
caps = gst_discoverer_stream_info_get_caps (info);
tmp = gst_caps_to_string (caps);
gst_caps_unref (caps);
my_g_string_append_printf (s, " %s\n", tmp);
g_free (tmp);
my_g_string_append_printf (s, "Additional info:\n");
if (gst_discoverer_stream_info_get_misc (info)) {
tmp = gst_structure_to_string (gst_discoverer_stream_info_get_misc (info));
my_g_string_append_printf (s, " %s\n", tmp);
g_free (tmp);
} else {
my_g_string_append_printf (s, " None\n");
}
subtitle_info = (GstDiscovererSubtitleInfo *) info;
ctmp = gst_discoverer_subtitle_info_get_language (subtitle_info);
my_g_string_append_printf (s, "Language: %s\n", ctmp ? ctmp : "<unknown>");
my_g_string_append_printf (s, "Tags:\n");
tags = gst_discoverer_stream_info_get_tags (info);
if (tags) {
tmp = gst_structure_to_string ((GstStructure *) tags);
my_g_string_append_printf (s, " %s\n", tmp);
g_free (tmp);
} else {
my_g_string_append_printf (s, " None\n");
}
if (verbose)
my_g_string_append_printf (s, "\n");
return g_string_free (s, FALSE);
}
static void
print_stream_info (GstDiscovererStreamInfo * info, void *depth)
{
@ -204,6 +258,10 @@ print_stream_info (GstDiscovererStreamInfo * info, void *depth)
desc =
gst_stream_video_information_to_string (info,
GPOINTER_TO_INT (depth) + 1);
else if (GST_IS_DISCOVERER_SUBTITLE_INFO (info))
desc =
gst_stream_subtitle_information_to_string (info,
GPOINTER_TO_INT (depth) + 1);
if (desc) {
g_print ("%s", desc);
g_free (desc);

View file

@ -1,4 +1,5 @@
EXPORTS
_gst_base_audio_decoder_error
gst_audio_buffer_clip
gst_audio_channel_position_get_type
gst_audio_check_channel_positions
@ -31,6 +32,47 @@ EXPORTS
gst_audio_set_structure_channel_positions_list
gst_audio_sink_get_type
gst_audio_src_get_type
gst_audio_structure_set_int
gst_base_audio_decoder_finish_frame
gst_base_audio_decoder_get_audio_info
gst_base_audio_decoder_get_byte_time
gst_base_audio_decoder_get_delay
gst_base_audio_decoder_get_latency
gst_base_audio_decoder_get_max_errors
gst_base_audio_decoder_get_min_latency
gst_base_audio_decoder_get_parse_state
gst_base_audio_decoder_get_plc
gst_base_audio_decoder_get_plc_aware
gst_base_audio_decoder_get_tolerance
gst_base_audio_decoder_get_type
gst_base_audio_decoder_set_byte_time
gst_base_audio_decoder_set_latency
gst_base_audio_decoder_set_max_errors
gst_base_audio_decoder_set_min_latency
gst_base_audio_decoder_set_plc
gst_base_audio_decoder_set_plc_aware
gst_base_audio_decoder_set_tolerance
gst_base_audio_encoder_finish_frame
gst_base_audio_encoder_get_audio_info
gst_base_audio_encoder_get_frame_max
gst_base_audio_encoder_get_frame_samples
gst_base_audio_encoder_get_hard_resync
gst_base_audio_encoder_get_latency
gst_base_audio_encoder_get_lookahead
gst_base_audio_encoder_get_mark_granule
gst_base_audio_encoder_get_perfect_timestamp
gst_base_audio_encoder_get_tolerance
gst_base_audio_encoder_get_type
gst_base_audio_encoder_proxy_getcaps
gst_base_audio_encoder_set_frame_max
gst_base_audio_encoder_set_frame_samples
gst_base_audio_encoder_set_hard_resync
gst_base_audio_encoder_set_latency
gst_base_audio_encoder_set_lookahead
gst_base_audio_encoder_set_mark_granule
gst_base_audio_encoder_set_perfect_timestamp
gst_base_audio_encoder_set_tolerance
>>>>>>> master
gst_base_audio_sink_create_ringbuffer
gst_base_audio_sink_get_drift_tolerance
gst_base_audio_sink_get_provide_clock

View file

@ -13,6 +13,7 @@ EXPORTS
gst_discoverer_audio_info_get_bitrate
gst_discoverer_audio_info_get_channels
gst_discoverer_audio_info_get_depth
gst_discoverer_audio_info_get_language
gst_discoverer_audio_info_get_max_bitrate
gst_discoverer_audio_info_get_sample_rate
gst_discoverer_audio_info_get_type
@ -31,6 +32,7 @@ EXPORTS
gst_discoverer_info_get_stream_info
gst_discoverer_info_get_stream_list
gst_discoverer_info_get_streams
gst_discoverer_info_get_subtitle_streams
gst_discoverer_info_get_tags
gst_discoverer_info_get_type
gst_discoverer_info_get_uri
@ -47,6 +49,8 @@ EXPORTS
gst_discoverer_stream_info_get_tags
gst_discoverer_stream_info_get_type
gst_discoverer_stream_info_list_free
gst_discoverer_subtitle_info_get_language
gst_discoverer_subtitle_info_get_type
gst_discoverer_video_info_get_bitrate
gst_discoverer_video_info_get_depth
gst_discoverer_video_info_get_framerate_denom